content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Predict method for \code{beeSurvFit} objects #' #' @description This is the generic \code{predict} S3 method for the \code{beeSurvFit} #' class. It predict the survival over time for the concentration profiles entered by the user. #' No concentration reconstructions are performed here. Functions [odeGUTS::predict_ode()] #' from the \code{morse} package is used. This might be changed in a future update #' #' @param object An object of class \code{beeSurvFit} #' @param dataPredict Data to predict in the format as a dataframe containing the #' following column: #' \itemize{ #' \item \code{time}: A vector of time in days #' \item \code{conc}: A vector of number of survivors of same length #' \item \code{replicate} A vector replicate name #' } #' @param ... Additional arguments to be parsed to the \code{predict.survFit} method from \code{odeGUTS} (e.g. #' \code{mcmc_size = 1000} is to be used to reduce the number of mcmc samples in order to speed up #' the computation. \code{mcmc_size} is the number of selected iterations for one chain. Default #' is 1000. If all MCMC is wanted, set argument to \code{NULL}. #' \code{hb_value = FALSE} the background mortality \code{hb} is set to a fixed value. #' If \code{TRUE}, parameter \code{hb} taken from the posterior (only works if #' one \code{hb} value was estimated. The default is \code{FALSE}. #' \code{hb_valueFORCED = 0} hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}. The default is \code{0} #' #' @return A \code{beeSurvPred} object containing the results of the forwards prediction #' @export #' #' @examples #' \donttest{ #' dataPredict <- data.frame(time = c(1:5, 1:15), #' conc = c(rep(5, 5), rep(15, 15)), #' replicate = c(rep("rep1", 5), rep("rep2", 15))) #' data(fitBetacyfluthrin_Chronic) #' prediction <- predict(fitBetacyfluthrin_Chronic, dataPredict) #' } predict.beeSurvFit <- function(object, dataPredict, ...) { # Check for correct class if (!is(object,"beeSurvFit")) { stop("predict.beeSurvFit: an object of class 'beeSurvFit' is expected") } # Transform if(!exists("hb_value")) { # if no hb_value defined # in this case, hb_value is set to FALSE by default in odeGUTS and the value # is forced to 0 by hb_valueFORCED in odeGUTS if(object$modelType == "SD"){ morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "zw_log10", "bw_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "z_log10", "kk_log10") } } else if(object$modelType == "IT") { morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "mw_log10", "beta_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "alpha_log10", "beta_log10") } } else { stop("Wrong model type. Model type should be 'SD' or 'IT'") } } else { # if hb_value exists if (hb_value == FALSE) { # In this case the value of hb is set to 0 via hb_valueFORCED in odeGUTS if(object$modelType == "SD"){ morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "zw_log10", "bw_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "z_log10", "kk_log10") } } else if(object$modelType == "IT") { morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "mw_log10", "beta_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "alpha_log10", "beta_log10") } } else { stop("Wrong model type. Model type should be 'SD' or 'IT'") } } else if (hb_value == TRUE) { ## TODO Need to include a check for hb_log10 length == 1 if(object$modelType == "SD"){ morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("hb_log10", "kd_log10", "zw_log10", "bw_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("hb_log10", "kd_log10", "z_log10", "kk_log10") } } else if(object$modelType == "IT") { morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("hb_log10", "kd_log10", "mw_log10", "beta_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("hb_log10", "kd_log10", "alpha_log10", "beta_log10") } } else { stop("Wrong model type. Model type should be 'SD' or 'IT'") } } else { stop("'hb_value' should be either TRUE or FALSE") } } # Perform predictions using the odeGUTS package outMorse <- odeGUTS::predict_ode(morseObject, dataPredict, ...) # Calculate summary to embed mean posteriors values with outputs invisible(utils::capture.output(outSummary <- summary(object))) # Return lsOut <- list(parsPost = outSummary$Qposteriors, modelType = object$modelType, unitData = object$data$unitData, beeSpecies = object$data$beeSpecies, setupMCMC = object$setupMCMC, sim = outMorse$df_quantile ) class(lsOut) <- c("beeSurvPred", class(lsOut)) return(lsOut) }
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/predictBeeGUTS.R
#' Read and format the data for the BeeGUTS model #' #' @description Read data from a \code{text} or \code{csv} file and recalculate the #' exposure profile depending on the type of experiment (acute oral, acute contact, chronic oral). #' #' @param file_location List of Locations of text files containing each two datasets, one for the survival data, #' and one for the concentration data. Both datasets must be included in the same file and contain the same number of column in the same order. #' The following columns must be included in the survival dataset: #' \itemize{ #' \item \code{Survival time \[d\]}: a vector of time in days #' \item \code{Control} A vector of number of survivors for the control #' \item \code{T1} - \code{Tn} A vector of number of survivors for the treatments #' T1 to Tn, one column per treatment. #' } #' A line containing the \code{Concentration unit} must be included directly after the end of #' the last row of the survival data. #' #' The following columns must be included in the concentration dataset #' \itemize{ #' \item \code{Concentration time \[d\]}: a vector of time in days. #' \item \code{Control} A vector of concentrations for the control #' \item \code{T1} - \code{Tn} A vector of concentration for the treatments #' T1 to Tn, one column per treatment. #' } #' For the \code{Acute_Oral} and \code{Acute_Contact}, only the initial #' exposure concentration at time 0 is required. #' #' See detail section for example #' #' @param test_type list of test types amongst "Acute_Oral", "Acute_Contact", and "Chronic_Oral" #' this list must have the same length of the list of file locations #' @param bee_species the bee type among "Honey_Bee", "Bumble_Bee", "Osmia_bicornis", and "User_Bee". If "User_Bee" is selected, #' optional arguments to be passed to the concentration reconstruction need to be defined. #' @param NA_string a character vector of strings which are to be interpreted as NA values #' @param ... Optional arguments to be passed to the concentration reconstruction (e.g. #' \itemize{ #' \item \code{k_sr =} for the stomach release rate (d-1), default is 0.625 for Honey bee, #' \item \code{k_ca =} contact availability rate (d-1), default is 0.4 for Honey bee), or #' \item \code{cTime =} the duration of exposure in days for the acute oral tests, default is 0.25 d #' \item \code{cstConcCal = } logical, recalculate concentration in the Chronic_Oral test from mg a.s./kg feed to Xg/bee (default is TRUE) #' \item \code{f_rate = } numerical vector, feeding rate used in the concentration recalculation in the Chronic_Oral (default is 25 mg/bee/day for honey bee) #' \item \code{targConc =} numerical scalar, target concentration unit in the recalculation in the Chronic_Oral, 1 for µg/bee, 2 for ng/bee, 3 for mg/bee (default is 1). #' } #' #' @return An object of class \code{beeSurvData}, which is a list with the following information: #' \item{nDatasets}{Number of files passed to the function} #' \item{survData}{A table containing the survival data as entered by the user in the input file} #' \item{survData_long}{A data frame containing the survival data in long format for model purposes} #' \item{concData}{A table containing the concentration data as entered by the user in the input file} #' \item{concData_long}{A data frame containing concentration data in long format} #' \item{unitData}{A character vector containing the units of the data as entered in the line \code{Concentration unit} #' of the input file} #' \item{typeData}{A character vector containing the type of experiment} #' \item{beeSpecies}{A character vector containing the type bee} #' \item{concModel}{A data frame containing the concentration data as recalculated by the model} #' \item{concModel_long}{A data frame containing the concentration data as recalculated by the model in a long format} #' Each element of the list is itself a list to account for multiple files that can be passed as input. #' #' @details #' The filename must begin with name of the chemical substance being tested and #' each word of the filename should be separated via an underscore '_'. #' #' #' Example of formatting of the input file for a chronic oral study #' \tabular{lllllll}{ #' Survival time \[d\] \tab Control \tab T1 \tab T2 \tab T3 \tab T4 \tab T5 \cr #' 0 \tab 120 \tab 120 \tab 120 \tab 120 \tab 120 \tab 120 \cr #' 1 \tab 120 \tab 118 \tab 117 \tab 112 \tab 115 \tab 94 \cr #' 2 \tab 120 \tab 118 \tab 115 \tab 112 \tab 98 \tab 88 \cr #' 3 \tab 120 \tab 118 \tab 114 \tab 106 \tab 83 \tab 27 \cr #' 4 \tab 119 \tab 118 \tab 113 \tab 103 \tab 67 \tab 9 \cr #' 5 \tab 119 \tab 118 \tab 112 \tab 100 \tab 43 \tab 3 \cr #' Concentration unit: ug/bee/day \tab\tab\tab\tab\tab\tab \cr #' Concentration time \[d\] \tab Control \tab T1 \tab T2 \tab T3 \tab T4 \tab T5 \cr #' 0 \tab 0 \tab 3 \tab 7 \tab 12 \tab 41 \tab 68 \cr #' 5 \tab 0 \tab 3 \tab 7 \tab 12 \tab 41 \tab 68 #' } #' #' @export #' #' @examples #' \donttest{ #' file_location <- system.file("extdata", "betacyfluthrin_chronic_ug.txt", package = "BeeGUTS") #' lsData <- dataGUTS(file_location = c(file_location), #' test_type = c('Chronic_Oral'), #' bee_species = "Honey_Bee", #' cstConcCal = FALSE) #' } dataGUTS <- function(file_location = NULL, test_type = NULL, bee_species = "Honey_Bee", NA_string = getOption("datatable.na.strings","NA"), ...) { # Possibility to add non default ksR, and kca ## Initiate storage for warning messages msg <- data.frame() ## check that file_location and test_types have the same length if (length(file_location) != length(test_type)){ stop("Mismatch between number of files and number of tests.") } name_chemical <- c() # array to store the name of the chemical extracted from the filename for (i in 1:length(file_location)){ # Ensure a correct filename and a correct types is entered if (is.null(file_location[i]) || !file.exists(file_location[i]) || (!grepl("\\.txt$", file_location[i]) && !grepl("\\.csv$", file_location[i])) ) { stop("You need to specify a path to the correct 'file_location' with a '.txt.' or '.csv' extension.") } if (is.null(test_type[i]) || !(test_type[i] %in% c("Acute_Oral", "Acute_Contact", "Chronic_Oral"))) { stop("You need to specifiy a correct data 'test_type' amongst 'Acute_Oral', 'Acute_Contact', or 'Chronic_Oral'.") } # sanity check on the inserted chemical species splitpath <- strsplit(strsplit(file_location[i], "_")[[1]],"/")[[1]] name_chemical <- append(name_chemical, splitpath[length(splitpath)]) } # check that there are no multiple entries in the bee_species argument if (length(bee_species) > 1){ msgTmp <- warning("You entered multiple entries for the bee species. Only one is required. Calibration on different species is not possible. Using only the first entry.") msg <- c(msg, msgTmp) bee_species <- bee_species[[1]] # to make sure that the entry is always a string even if a list is passed } # Check if correct bee type is entered if (is.null(bee_species) || !(bee_species %in% c("Honey_Bee", "Bumble_Bee", "Osmia_bicornis", "User_Bee"))) { stop("You need to specifiy a correct 'bee_species' amongst 'Honey_Bee', 'Bumble_Bee'. 'Osmia_bicornis', and 'User_Bee'. Other types of bees are not yet implemented.") } if(bee_species == "Honey_Bee"){ if(!exists("k_ca")) { k_ca <- 0.4 # Default value for Honey bees } else { msgTmp <- warning("User defined 'k_ca' parameter for 'Honey_Bee' of ", k_ca, " d-1") msg <- c(msg, msgTmp) } if(!exists("k_sr")) { k_sr <- 0.625 # Default value for Honey bees } else { msgTmp <- warning("User defined 'k_sr' parameter for 'Honey_Bee of'", k_sr, " d-1") msg <- c(msg, msgTmp) } if(!exists("cTime")) { cTime <- 0.25 # Default value for Honey bees } else { msgTmp <- warning("User defined 'cTime' parameter for 'Honey_Bee of'", cTime, " d") msg <- c(msg, msgTmp) } } else if(bee_species == "Bumble_Bee") { if(!exists("k_ca")) { k_ca <- 0.4 # Default value for Bumble Bee } else { msgTmp <- warning("User defined 'k_ca' parameter for 'Bumble_Bee' of ", k_ca, " d-1") msg <- c(msg, msgTmp) } if(!exists("k_sr")) { k_sr <- 1 # Default value for Bumble Bee } else { msgTmp <- warning("User defined 'k_sr' parameter for 'Bumble_Bee' of", k_sr, " d-1") msg <- c(msg, msgTmp) } if(!exists("cTime")) { cTime <- 0.25 # Default value for Bumble Bee } else { msgTmp <- warning("User defined 'cTime' parameter for 'Bumble_Bee' of", cTime, " d") msg <- c(msg, msgTmp) } } else if(bee_species == "Osmia_bicornis") { if(!exists("k_ca")) { k_ca <- 2.0 # Default value for Osmia bicornis } else { msgTmp <- warning("User defined 'k_ca' parameter for 'Osmia_bicornis' of ", k_ca, " d-1") msg <- c(msg, msgTmp) } if(!exists("k_sr")) { k_sr <- 1.5 # Default value for Osmia bicornis } else { msgTmp <- warning("User defined 'k_sr' parameter for 'Osmia_bicornis' of", k_sr, " d-1") msg <- c(msg, msgTmp) } if(!exists("cTime")) { cTime <- 0.25 # Default value for Osmia bicornis } else { msgTmp <- warning("User defined 'cTime' parameter for 'Osmia_bicornis' of", cTime, " d") msg <- c(msg, msgTmp) } } else if(bee_species == "User_Bee"){ # If user defined bee type, check that correct parameters are entered if(!exists("k_ca") || !exists("k_sr") || !exists("cTime")){ stop("'k_ca', 'k_sr', and 'cTime' arguments must be defined for a user defined bee") } msgTmp <- warning("User defined bee with parameters k_ca = ", k_ca, "k_sr =", k_sr, "cTime = ", cTime) msg <- c(msg, msgTmp) if(exists("cstConcCal")) { if(!exists("f_rate") || !exists("targConc")){ stop("'f_rate' and 'targConc' arguments must be defined when 'cstConcCal' is set to TRUE for a user defined bee") } msgTmp <- warning("f_rate = ", f_rate, "targConc = ", targConc) msg <- c(msg, msgTmp) } } # Empty arrays for the objects to be returned. Values for each test are appended tbSurv <- list() tbConc <- list() tbSurv_long <- list() tbConc_long <- list() chUnits <- list() # use single value for now bee_species <- bee_species #as.list(rep(bee_species, length(file_location))) dfConcModel <- list() dfConcModel_long <- list() # Load the survival data from the file # Check where the survival data starts and ends for each file nDatasets <- length(file_location) for (i in 1:nDatasets){ rawData <- readLines(file_location[i]) skipLine_surv <- grep("Survival", rawData) nrowLine_surv <- grep("Concentration unit", rawData) # Load the survival data tbSurv_aux <- data.table::fread(text = rawData, skip = skipLine_surv - 1L, header = T, nrow = nrowLine_surv - (skipLine_surv + 1L), na.strings = NA_string) colnames(tbSurv_aux)[1] <- c("SurvivalTime") # Set unique name for time column tbSurv_aux$Dataset <- i tbSurv <- append(tbSurv, list(tbSurv_aux)) # Load the concentration data from the file # Check where the concentration data starts and ends skipLine_conc <- grep("Concentration time", rawData) # Load the concentration data tbConc_aux <- data.table::fread(text = rawData, skip = skipLine_conc - 1L, header = T, na.strings = NA_string) tbConc_aux$Dataset <- i colnames(tbConc_aux)[1] <- c("SurvivalTime") tbConc <- append(tbConc, list(tbConc_aux)) if (ncol(tbSurv_aux) != ncol(tbConc_aux)) { stop("The number of columns in the survival dataset differs from the number of columns in the concentration dataset") } # Load the units # TODO: insert code to extract exact value of units # TODO: recalculate if units are different for different tests # remove all possible whitespaces and substitute with simple whitespace chUnits <- append(chUnits, gsub("[[:blank:]]", " ",rawData[nrowLine_surv])) # Recalculate the concentrations based on the experiment type if(test_type[i] == "Acute_Oral") { dfConcModel_aux <- concAO(tbConc_aux[1,-1], expTime = max(tbSurv_aux[,1]), k_sr = k_sr, ...) dfConcModel_aux$Dataset <- i # reallocate the column because it gets overwritten } else if(test_type[i] == "Acute_Contact") { dfConcModel_aux <- concAC(tbConc_aux[1,-1], expTime = max(tbSurv_aux[,1]), k_ca = k_ca, ...) dfConcModel_aux$Dataset <- i # reallocate the column because it gets overwritten } else { lsConcModel_aux <- concCst(tbConc_aux, ...) dfConcModel_aux <- lsConcModel_aux$Concentrations if (!is.null(lsConcModel_aux$Units)) { chUnits[[i]] <- lsConcModel_aux$Units } } dfConcModel <- append(dfConcModel, list(dfConcModel_aux)) # Transform into long data tbSurv_long_aux <- tidyr::gather(tbSurv_aux, Treatment, NSurv, -SurvivalTime, -Dataset) tbSurv_long_aux <- na.omit(tbSurv_long_aux) # Remove NAs tbConc_long_aux <- tidyr::gather(tbConc_aux, Treatment, Conc, -SurvivalTime, -Dataset) tbConc_long_aux <- na.omit(tbConc_long_aux) # Remove NAs dfConcModel_long_aux <- tidyr::gather(dfConcModel_aux, Treatment, Conc, -SurvivalTime, -Dataset) dfConcModel_long_aux <- na.omit(dfConcModel_long_aux) # Remove NAs tbSurv_long <- append(tbSurv_long, list(tbSurv_long_aux)) tbConc_long <- append(tbConc_long, list(tbConc_long_aux)) dfConcModel_long <- append(dfConcModel_long, list(dfConcModel_long_aux)) } ## TODO: This part has to be improved substantially. Needs to account for all ## possible ways to write the units in the experimental data file. units_check <- length(unique(chUnits)) == 1 if (!units_check){ msgTmp <- warning("!!! IMPORTANT NOTE !!! Check the units in the data file. There seems to be a mismatch. You can continue with the fit, but the results might be incorrect. See 'object$unitData' for more information.") msg <- c(msg, msgTmp) } ## TODO: Find a good strategy to ensure safety checks ## to ensure using the same chemical species chemical_check <- length(unique(tolower(name_chemical))) == 1 if (!chemical_check){ msgTmp <- warning("!!! IMPORTANT NOTE !!! Make sure you know what you are doing. Chemical species extracted from filename of the two files does not match. The data will continue to be read, but there might be inconsistencies.") msg <- c(msg, msgTmp) } # Check if messages exists, otherwise return NA if (length(msg) == 0){ msg <- c(msg, 'NA') } # Return lsOut <- list(nDatasets = nDatasets, survData = tbSurv, survData_long = tbSurv_long, concData = tbConc, concData_long = tbConc_long, unitData = chUnits, typeData = test_type, beeSpecies = bee_species, concModel = dfConcModel, concModel_long = dfConcModel_long, messages = msg) class(lsOut) <- "beeSurvData" # This is to keep the compatibility with the current code if there is a single file passed # TODO: Remove once all the changes have been implemented #if (length(file_location)==1){ # for (name in names(lsOut)) {lsOut[name]<-lsOut[name][[1]]} #} return(lsOut) } # Internal # Sub-function to recalculate the concentrations based on the type of test and species # Acute oral tests #' Recalculate concentration for the acute oral tests for bees #' #' @param cExt A dataframe of concentrations at time 0 concentration applied #' @param cTime The duration of exposure in days, default is 0.25 d #' @param k_sr Stomach release rate (d-1), default is 0.625 #' @param expTime The duration of the experiment in days #' @param ... Not used #' #' @return A data frame containing a column with the time points and a column with the #' recalculated concentrations #' @export #' #' @examples conc <- concAO(cExt = cbind(3.5, 6, 8, 10), cTime = 0.25, expTime = 4) concAO <- function(cExt, cTime = 0.25, expTime, k_sr = 0.625, ...) { timePoint <- seq(0, expTime, 0.05) cExt <- cExt[rep(seq_len(nrow(cExt)), each = length(timePoint)),] # Expend cExt to allow concentration calculation for all time points out <- (cExt * (timePoint / cTime) * (timePoint <= cTime)) + (cExt * exp(-k_sr * (timePoint - cTime)) * (timePoint > cTime)) return(data.frame(SurvivalTime = timePoint, out)) } # Acute contact test #' Recalculate the concentrations for the acute contact tests for bees #' #' @param cExt The concentration applied #' @param expTime The duration of the experiment in days #' @param k_ca Contact availability rate (d-1), default is 0.4 #' @param ... Not used #' #' @return A data frame containing a column with the time points and a column with the #' recalculated concentrations #' @export #' #' @examples conc <- concAC(cbind(3.1, 4, 6, 8), 4) concAC <- function(cExt, expTime, k_ca = 0.4, ...) { timePoint <- seq(0, expTime, 0.05) cExt <- cExt[rep(seq_len(nrow(cExt)), each = length(timePoint)),] # Expend cExt to allow concentration calculation for all time points out <-cExt * exp(-k_ca * timePoint) return(data.frame(SurvivalTime = timePoint, out)) } # Chronic oral test #' Recalculate the concentrations for the chronic oral tests for bees from #' mg a.s./kg feed to \eqn{\mu}g/bee #' #' @param f_rate A vector containing the feeding rates of the bees in mg/bee/day. If the vector #' is of size 1, the same feeding rate is used for all test conditions. If the vector #' is of size >1, it should be of the same size as the number of condition and one #' feeding rate must be provided per condition. Default is 25 mg/bee/day #' @param cstConcCal Logical. Indicate if concentrations should be recalculated from #' mg a.s./kg feed to Xg/bee #' @param cExt The concentration dataframe in mg a.s./kg feed #' @param targConc A numerical scalar representing the unit of the target concentration amongst (default = 1) #' \itemize{ #' \item \code{1} for \eqn{\mu}g a.s./bee #' \item \code{2} for ng a.s./bee #' \item \code{3} for mg a.s./bee #' } #' @param ... Not used #' #' @return A data frame containing a column with the time points and a column with the #' recalculated concentrations #' @export #' #' @examples #' cExt <- data.frame(SurvivalTime = c(0,10), Control = c(0,0), #' T1 = c(1, 1), T2 = c(5, 5), Dataset = c(1, 1)) #' conc <- concCst(cExt, targConc = 2) concCst <- function(cExt, f_rate = c(25), targConc = 1, cstConcCal = TRUE, ...) { if (cstConcCal == FALSE) { # If recalculating chronic concentrations is not necessary, return early return(list(Units = NULL, Concentrations = data.frame(SurvivalTime = cExt[,1], cExt[,2:ncol(cExt)]))) } if (cstConcCal == TRUE) { tmpConc <- cExt[,2:(ncol(cExt) - 1L)] # Remove dataset number for concentration calculations if (!(targConc %in% c(1, 2, 3))) { stop("targConc should be 1, 2, or 3") } concConvert <- switch(targConc, 1, 1000, 0.001) # Choose the correct target concentration concUnit <- switch(targConc, "\u00b5g/bee/day", "ng/bee/day", "mg/bee/day") if (length(f_rate) == 1){ # If only one feeding rate is provided, use it for all conditions f_rate <- rep(f_rate, times = length(tmpConc)) out <- mapply('*', tmpConc, f_rate) / 1000 * concConvert # Correspond to (f_rate/1000) * tmpConc } else if (length(f_rate) == length(tmpConc)) { # If more than one feeding rate is provided, it should be of the same length than the number of conditions out <- mapply('*', tmpConc, f_rate) / 1000 * concConvert # Correspond to (f_rate/1000) * tmpConc } else { stop("Feeding rates should either be provided as a mean for the whole study or per treatment") } return(list(Units = concUnit, Concentrations = data.frame(SurvivalTime = cExt[,1], out, Dataset = cExt [,ncol(cExt)]))) } }
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/readData.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("GUTS_IT", "GUTS_SD") # load each stan module Rcpp::loadModule("stan_fit4GUTS_IT_mod", what = TRUE) Rcpp::loadModule("stan_fit4GUTS_SD_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/stanmodels.R
#' Summary of \code{beeSurvFit} objects #' #' @description This is the generic \code{summary} S3 method for the \code{beeSurvFit} class. #' It shows the quantiles of priors and posteriors on parameters. #' #' @param object An object of class \code{beeSurvFit} #' @param ... Additional arguments to be parsed to the generic \code{summary} method (not used) #' #' @return A summary of the \code{beeSurvFit} object #' @export #' #' @examples #' \donttest{ #' data(fitBetacyfluthrin_Chronic) #' summary(fitBetacyfluthrin_Chronic) #' } summary.beeSurvFit <- function(object, ...) { cat("Computing summary can take some time. Please be patient...") # Prepare prior lsData_fit <- object$dataFit lsData_fit$nDatasets <- ifelse(is.null(lsData_fit$nDatasets), 1, lsData_fit$nDatasets) ## Common parameters hb <- 10^qnorm(p = c(0.5, 0.025, 0.975), mean = lsData_fit$hbMean_log10, sd = lsData_fit$hbSD_log10) kd <- 10^qnorm(p = c(0.5, 0.025, 0.975), mean = lsData_fit$kdMean_log10, sd = lsData_fit$kdSD_log10) ## Model specific parameters if (object$modelType == "SD") { zw <- 10^qnorm(p = c(0.5, 0.025, 0.975), mean = lsData_fit$zwMean_log10, sd = lsData_fit$zwSD_log10) bw <- 10^qnorm(p = c(0.5, 0.025, 0.975), mean = lsData_fit$bwMean_log10, sd = lsData_fit$bwSD_log10) outPrior <- data.frame(parameters = c("hb", "kd", "zw", "bw"), median = c(hb[1], kd[1], zw[1], bw[1]), Q2.5 = c(hb[2], kd[2], zw[2], bw[2]), Q97.5 = c(hb[3], kd[3], zw[3], bw[3])) } else if (object$modelType == "IT") { mw <- 10^qnorm(p = c(0.5, 0.025, 0.975), mean = lsData_fit$mwMean_log10, sd = lsData_fit$mwSD_log10) beta <- 10^qunif(p = c(0.5, 0.025, 0.975), min = lsData_fit$betaMin_log10, max = lsData_fit$betaMax_log10) outPrior <- data.frame(parameters = c("hb", "kd", "mw", "beta"), median = c(hb[1], kd[1], mw[1], beta[1]), Q2.5 = c(hb[2], kd[2], mw[2], beta[2]), Q97.5 = c(hb[3], kd[3], mw[3], beta[3])) } # Prepare posteriors tmpRes <- rstan::monitor(object$stanFit, print = FALSE) ## Common parameters hb_med <- c() hb_inf95 <- c() hb_sup95 <- c() for(i in 1:lsData_fit$nDatasets){ parname <- ifelse(lsData_fit$nDatasets == 1, "hb_log10", paste0("hb_log10[",i,"]")) hb_med[i] <- 10^tmpRes[[parname, "50%"]] hb_inf95[i] <- 10^tmpRes[[parname, "2.5%"]] hb_sup95[i] <- 10^tmpRes[[parname, "97.5%"]] } kd_med <- 10^tmpRes[["kd_log10", "50%"]] kd_inf95 <- 10^tmpRes[["kd_log10", "2.5%"]] kd_sup95 <- 10^tmpRes[["kd_log10", "97.5%"]] ## Model specific parameters if (object$modelType == "SD") { zw_med <- 10^tmpRes[["zw_log10", "50%"]] zw_inf95 <- 10^tmpRes[["zw_log10", "2.5%"]] zw_sup95 <- 10^tmpRes[["zw_log10", "97.5%"]] bw_med <- 10^tmpRes[["bw_log10", "50%"]] bw_inf95 <- 10^tmpRes[["bw_log10", "2.5%"]] bw_sup95 <- 10^tmpRes[["bw_log10", "97.5%"]] outPost <- data.frame(parameters = c("kd", "zw", "bw"), median = c(kd_med, zw_med, bw_med), Q2.5 = c(kd_inf95, zw_inf95, bw_inf95), Q97.5 = c(kd_sup95, zw_sup95, bw_sup95)) } else if (object$modelType == "IT") { mw_med <- 10^tmpRes[["mw_log10", "50%"]] mw_inf95 <- 10^tmpRes[["mw_log10", "2.5%"]] mw_sup95 <- 10^tmpRes[["mw_log10", "97.5%"]] beta_med <- 10^tmpRes[["beta_log10", "50%"]] beta_inf95 <- 10^tmpRes[["beta_log10", "2.5%"]] beta_sup95 <- 10^tmpRes[["beta_log10", "97.5%"]] outPost <- data.frame(parameters = c("kd", "mw", "beta"), median = c(kd_med, mw_med, beta_med), Q2.5 = c(kd_inf95, mw_inf95, beta_inf95), Q97.5 = c(kd_sup95, mw_sup95, beta_sup95)) } hbNames <- c() for(i in 1:lsData_fit$nDatasets){ hbNames[i] <- paste0("hb[",i,"]") } outPost_hb <- data.frame(parameters = hbNames, median = hb_med, Q2.5 = hb_inf95, Q97.5 = hb_sup95) # Format and output outPrior <- format(data.frame(outPrior), scientific = TRUE, digit = 6) outPost <- format(data.frame(outPost), scientific = TRUE, digit = 6) outPost_hb <- format(data.frame(outPost_hb), scientific = TRUE, digit = 6) maxRhat <- max(rstan::summary(object$stanFit)$summary[,"Rhat"], na.rm= TRUE) minBulk_ESS <- min(tmpRes$Bulk_ESS) minTail_ESS <- min(tmpRes$Tail_ESS) cat("Summary: \n\n") cat("Bayesian Inference performed with Stan.\n", "Model type:", object$modelType, "\n", "Bee species:", object$data$beeSpecies, "\n\n", "MCMC sampling setup (select with '$setupMCMC')\n", "Iterations:", object$setupMCMC$nIter, "\n", "Warmup iterations:", object$setupMCMC$nWarmup, "\n", "Thinning interval:", object$setupMCMC$thinInterval, "\n", "Number of chains:", object$setupMCMC$nChains) cat("\n\nPriors of the parameters (quantiles) (select with '$Qpriors'):\n\n") print(outPrior, row.names = FALSE) cat("\nPosteriors of the parameters (quantiles) (select with '$Qposteriors'):\n\n") print(outPost_hb, row.names = FALSE) print(outPost, row.names = FALSE) cat("\n\n Maximum Rhat computed (na.rm = TRUE):", maxRhat, "\n", "Minimum Bulk_ESS:", minBulk_ESS, "\n", "Minimum Tail_ESS:", minTail_ESS, "\n", "Bulk_ESS and Tail_ESS are crude measures of effecting sampling size for bulk and tail quantities respectively. An ESS > 100 per chain can be considered as a good indicator. Rhat is an indicator of chains convergence. A Rhat <= 1.05 is a good indicator of convergence. For detail results, one can call 'rstan::monitor(YOUR_beeSurvFit_OBJECT$stanFit)") cat("\n\n EFSA Criteria (PPC, NRMSE, and SPPE) can be accessed via 'x$EFSA'") critEFSA <- criteriaCheck(object) invisible(list( setupMCMC = object$setupMCMC, Qpriors = outPrior, Qposteriors_hb = outPost_hb, Qposteriors = outPost, EFSA = critEFSA)) } #' Summary of \code{LCx} objects #' #' @description This is the generic \code{summary} S3 method for the \code{LCx} class. #' It shows the median and 95% credible interval of the calculated LCx. #' #' @param object An object of class \code{LCx} #' @param ... Additional arguments to be parsed to the generic \code{summary} method (not used) #' #' @return A summary of the \code{LCx} object #' @export #' #' @examples #' \donttest{ #' data(fitBetacyfluthrin_Chronic) #' out <- LCx(fitBetacyfluthrin_Chronic) #' summary(out) #' } summary.LCx <- function(object, ...) { cat("Summary: \n\n") cat("LC",object$X_prop*100, " calculation. \n", "Time for which the LCx is calculated:", object$timeLCx, "\n", "Bee species:", object$beeSpecies, "\n", "Test type:", object$testType, "\n", "LCx:", "\n") print(object$dfLCx) }
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/summaryBeeGUTS.R
#' @title Validation method for \code{beeSurvFit } objects #' #' @description This is a \code{validation} method for the #' \code{beeSurvFit} object. It perform forwards predictions for a specific concentration #' profile and compare these prediction to the respective experimental data. #' #' @param object An object of class \code{beeSurvFit} #' @param dataValidate Data to validate in the format of the experimental data used for fit (dataGUTS) #' @param ... Additional arguments to be parsed to the \code{predict.survFit} method from \code{odeGUTS} (e.g. #' \code{mcmc_size = 1000} is to be used to reduce the number of mcmc samples in order to speed up #' the computation. \code{mcmc_size} is the number of selected iterations for one chain. Default #' is 1000. If all MCMC is wanted, set argument to \code{NULL}., #' \code{hb_value = FALSE} the background mortality \code{hb} is taken into account from the posterior. #' If \code{FALSE}, parameter \code{hb} is set to a fixed value. The default is \code{FALSE}. #' \code{hb_valueFORCED = 0} hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}. The default is \code{0} #' #' @return An object of class \code{beeSurvValidation}. #' #' @export validate <- function(object, dataValidate, ...){ UseMethod("validate") } #' Validate method for \code{beeSurvFit} objects #' #' @description This is the generic \code{validate} S3 method for the \code{beeSurvFit} #' class. It predict the survival over time for the concentration profiles entered by the user. #' #' @param object An object of class \code{beeSurvFit} #' @param dataValidate Data to validate in the format of the experimental data used for fit (dataGUTS) #' @param ... Additional arguments to be parsed to the \code{predict.survFit} method from \code{odeGUTS} (e.g. #' \code{mcmc_size = 1000} is to be used to reduce the number of mcmc samples in order to speed up #' the computation. \code{mcmc_size} is the number of selected iterations for one chain. Default #' is 1000. If all MCMC is wanted, set argument to \code{NULL}., #' \code{hb_value = FALSE} the background mortality \code{hb} is taken into account from the posterior. #' If \code{FALSE}, parameter \code{hb} is set to a fixed value. The default is \code{FALSE}. #' \code{hb_valueFORCED = 0} hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}. The default is \code{0} #' #' @return A \code{beeSurvValidation} object with the results of the validation #' @export #' #' @examples #' \donttest{ #' data(betacyfluthrinChronic) #' data(fitBetacyfluthrin_Chronic) #' validation <- validate(fitBetacyfluthrin_Chronic, betacyfluthrinChronic) #' } validate.beeSurvFit <- function(object, dataValidate, ...) { # Check for correct class if (!is(object,"beeSurvFit")) { stop("predict.beeSurvFit: an object of class 'beeSurvFit' is expected") } # Ugly fix to keep the validate function the same # Validation data is always a single file, so collapse the list if (length(dataValidate$nDatasets)==1){ for (name in names(dataValidate)) {dataValidate[name]<-dataValidate[name][[1]]} } ### prepare experimental dataset for dfData <- dplyr::full_join(dplyr::select(dataValidate$survData_long,!Dataset), dplyr::select(dataValidate$concModel_long, !Dataset), by =c("SurvivalTime", "Treatment")) colnames(dfData) <- c("time", "replicate", "Nsurv", "conc") dfData <- dfData[with(dfData, order(replicate, time)),] ## run prediction with odeGUTS::predict_Nsurv_ode function if(object$modelType == "SD"){ morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "zw_log10", "bw_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "z_log10", "kk_log10") } } else if(object$modelType == "IT") { morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "mw_log10", "beta_log10")), model_type = object$modelType) class(morseObject) <- "survFit" for(i in 1:object$setupMCMC$nChains) { colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "alpha_log10", "beta_log10") } } else { stop("Wrong model type. Model type should be 'SD' or 'IT'") } # Perform predictions using the odeGUTS package outMorse <- odeGUTS::predict_Nsurv_ode(morseObject, dfData, ...) # Calculate EFSA criteria using the odeGUTS package EFSA_Criteria <- odeGUTS::predict_Nsurv_check(outMorse, ...) # Calculate summary to embed mean posteriors values with outputs invisible(utils::capture.output(outSummary <- summary(object))) if(object$data$beeSpecies != dataValidate$beeSpecies){ warning("BeeGUTS was calibrated on a ", object$data$beeSpecies, " and is validated on a ", dataValidate$beeSpecies) } # Return lsOut <- list(parsPost = outSummary$Qposteriors, modelType = object$modelType, unitData = object$data$unitData, beeSpecies = object$data$beeSpecies, beeSpeciesVal = dataValidate$beeSpecies, typeData = dataValidate$typeData, setupMCMC = object$setupMCMC, sim = outMorse$df_quantile, EFSA = EFSA_Criteria, data = dataValidate$concData_long, dataModel = dataValidate$concModel_long ) class(lsOut) <- "beeSurvValidation" return(lsOut) }
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/validateBeeGUTS.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.path = "man/figures/Tutorial-", out.width = "100%" ) ## ----example------------------------------------------------------------------ library(BeeGUTS) file_location <- system.file("extdata", "betacyfluthrin_chronic_ug.txt", package = "BeeGUTS") # Load the path to one of the example file lsData <- dataGUTS(file_location = file_location, test_type = 'Chronic_Oral') # Read the example file plot(lsData) # Plot the data fit <- fitBeeGUTS(lsData, modelType = "SD", nIter = 2000, nChains = 1) # Fit a SD model. This can take some time... traceplot(fit) # Produce a diagnostic plot of the fit plot(fit) # Plot the fit results summary(fit) # Gives a summary of the results validation <- validate(fit, lsData) # produce a validation of the fit (here it uses the same dataset as calibration as an example, so not relevant…) plot(validation) # plot the validation results dataPredict <- data.frame(time = c(1:5, 1:15), conc = c(rep(5, 5), rep(15, 15)), replicate = c(rep("rep1", 5), rep("rep3", 15))) # Prepare data for forwards prediction prediction <- predict(fit, dataPredict) # Perform forwards prediction. At the moment, no concentration recalculation is performed in the forwards prediction. The concentrations are taken as in a chronic test plot(prediction) # Plot of the prediction results
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/inst/doc/Tutorial.R
--- title: "Tutorial" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.path = "man/figures/Tutorial-", out.width = "100%" ) ``` # BeeGUTS The goal of BeeGUTS is to analyse the survival toxicity tests performed for bee species. It can be used to fit a Toxicokinetic-Toxicodynamic (TKTD) model adapted for bee standard studies (acute oral, acute contact, and chronic oral studies). The TKTD model used is the General Unified Threshold model of Survival (GUTS). ## Installation You can install the released version of BeeGUTS from [CRAN](https://CRAN.R-project.org) with: ``` r install.packages("BeeGUTS") ``` And the development version from [GitHub](https://github.com/) with: ``` r # install.packages("devtools") devtools::install_github("bgoussen/BeeGUTS") ``` ## Example This is a basic example which shows you how to solve a common problem: ```{r example} library(BeeGUTS) file_location <- system.file("extdata", "betacyfluthrin_chronic_ug.txt", package = "BeeGUTS") # Load the path to one of the example file lsData <- dataGUTS(file_location = file_location, test_type = 'Chronic_Oral') # Read the example file plot(lsData) # Plot the data fit <- fitBeeGUTS(lsData, modelType = "SD", nIter = 2000, nChains = 1) # Fit a SD model. This can take some time... traceplot(fit) # Produce a diagnostic plot of the fit plot(fit) # Plot the fit results summary(fit) # Gives a summary of the results validation <- validate(fit, lsData) # produce a validation of the fit (here it uses the same dataset as calibration as an example, so not relevant…) plot(validation) # plot the validation results dataPredict <- data.frame(time = c(1:5, 1:15), conc = c(rep(5, 5), rep(15, 15)), replicate = c(rep("rep1", 5), rep("rep3", 15))) # Prepare data for forwards prediction prediction <- predict(fit, dataPredict) # Perform forwards prediction. At the moment, no concentration recalculation is performed in the forwards prediction. The concentrations are taken as in a chronic test plot(prediction) # Plot of the prediction results ```
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/inst/doc/Tutorial.Rmd
--- title: "Tutorial" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.path = "man/figures/Tutorial-", out.width = "100%" ) ``` # BeeGUTS The goal of BeeGUTS is to analyse the survival toxicity tests performed for bee species. It can be used to fit a Toxicokinetic-Toxicodynamic (TKTD) model adapted for bee standard studies (acute oral, acute contact, and chronic oral studies). The TKTD model used is the General Unified Threshold model of Survival (GUTS). ## Installation You can install the released version of BeeGUTS from [CRAN](https://CRAN.R-project.org) with: ``` r install.packages("BeeGUTS") ``` And the development version from [GitHub](https://github.com/) with: ``` r # install.packages("devtools") devtools::install_github("bgoussen/BeeGUTS") ``` ## Example This is a basic example which shows you how to solve a common problem: ```{r example} library(BeeGUTS) file_location <- system.file("extdata", "betacyfluthrin_chronic_ug.txt", package = "BeeGUTS") # Load the path to one of the example file lsData <- dataGUTS(file_location = file_location, test_type = 'Chronic_Oral') # Read the example file plot(lsData) # Plot the data fit <- fitBeeGUTS(lsData, modelType = "SD", nIter = 2000, nChains = 1) # Fit a SD model. This can take some time... traceplot(fit) # Produce a diagnostic plot of the fit plot(fit) # Plot the fit results summary(fit) # Gives a summary of the results validation <- validate(fit, lsData) # produce a validation of the fit (here it uses the same dataset as calibration as an example, so not relevant…) plot(validation) # plot the validation results dataPredict <- data.frame(time = c(1:5, 1:15), conc = c(rep(5, 5), rep(15, 15)), replicate = c(rep("rep1", 5), rep("rep3", 15))) # Prepare data for forwards prediction prediction <- predict(fit, dataPredict) # Perform forwards prediction. At the moment, no concentration recalculation is performed in the forwards prediction. The concentrations are taken as in a chronic test plot(prediction) # Plot of the prediction results ```
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/vignettes/Tutorial.Rmd
# $Id: Benchmarking.sR 258 2023-08-03 12:27:03Z larso $ .onAttach = function(...) { packageStartupMessage("\nLoading Benchmarking version 0.32h (Revision 263, 2024/03/13 15:04:04) ...") packageStartupMessage("Build 2024/03/13 15:05:00") }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/Benchmarking.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 chol_LO <- function(A) { .Call(`_Benchmarking_chol_LO`, A) } chol_downdate <- function(L, v) { .Call(`_Benchmarking_chol_downdate`, L, v) } chol_downdate2 <- function(L, v) { .Call(`_Benchmarking_chol_downdate2`, L, v) } det_chol_downdate <- function(L, v) { .Call(`_Benchmarking_det_chol_downdate`, L, v) } chol_update <- function(L, v) { .Call(`_Benchmarking_chol_update`, L, v) } inverse_LO <- function(L) { .Call(`_Benchmarking_inverse_LO`, L) } matProdT_LO <- function(X) { .Call(`_Benchmarking_matProdT_LO`, X) } inverse_spd <- function(A, lower_triangel = FALSE) { .Call(`_Benchmarking_inverse_spd`, A, lower_triangel) } solve_LO <- function(L, d) { .Call(`_Benchmarking_solve_LO`, L, d) } det_downdate <- function(A, v, det) { .Call(`_Benchmarking_det_downdate`, A, v, det) } outlierCpp <- function(K, R, xy, ratio, imat, rmin) { invisible(.Call(`_Benchmarking_outlierCpp`, K, R, xy, ratio, imat, rmin)) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/RcppExports.R
# $Id: StoNED.R 223 2020-06-20 10:38:10Z lao $ ################################################################ # Convex nonparametric least squares # here for convex (Cost) function # with multiplicative error term: # Y=b'X*exp(e) # and additive error term: # Y=b'X + e ################################################################ ############################################################### # USAGE: X are RHS, Y are LHS, # COST specifies whether a cost function needs is estimated (COST=1) # or a production function (COST=0), # MULT determines if multiplicative (MULT=1) or additive (MULT=0) # is estimated # RTS determines returns to scale assumption: RTS="vrs", "drs", "irs" # and "crs" are possible for constant or variable returns to scale # METHOD specifies the way efficiency is estimated: MM for Method # of Momente and PSL for pseudo likelihood estimation are possible ############################################################### require(ucminf) require(quadprog) stoned <- function(X, Y, RTS="vrs", COST=0, MULT=0, METHOD="MM") { ############################################### ###### row, columns and intercept for non-crs ############################################### if (!is.matrix(X)) X <- as.matrix(X) if (!is.matrix(Y)) Y <- as.matrix(Y) n <- nrow(X) # Number of units m <- ncol(X) # Number of parameters in each unit except for intercept rts <- c("vrs","drs","crs","irs") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { RTStemp <- rts[RTS] # there is no first fdh RTS <- RTStemp } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns:", RTS) if (!is.null(METHOD) & !(METHOD %in% c("MM", "PSL"))) { stop("Unknown choice for METHOD: ", METHOD) } inter <- ifelse (RTS=="crs", 0, 1) if (is.null(METHOD)) METHOD <- "NONE" if (RTS=="crs") MULT <- 1 if (RTS=="drs") MULT <- 1 if (RTS=="irs") MULT <- 1 if (RTS=="crs" | RTS=="irs" | RTS=="drs") { message("Multiplicative model induced for this scale assumption") } ############################################### ### Get data together, create A - matrix with ### intercept if necessary and B-Matrix ############################################### if (MULT==1) { if (inter==1) { Z <- diag(1 / c(Y)) for (i in 1:m) { # Z <- cbind( Z, (diag(n) * as.matrix(X)[,i])/Y ) Z <- cbind(Z, diag(c(X[,i] / Y))) } } else if (inter==0) { Z <- NULL for (i in 1:m) { Z <- cbind(Z, diag(c(X[,i] / Y))) } } B <- rep(1, n) } if (MULT==0) { if (inter==1) { Z <- diag(n) for (i in 1:m) { Z <- cbind(Z, diag(X[,i])) } } else if (inter==0) { Z <- NULL for (i in 1:m) { Z <- cbind(Z, diag(X[,i])) } } B <- Y } colnames(Z) <- outer(1:n, ifelse(inter==1, 0, 1):m, FUN = function(x, y) { paste(x, y, sep="_") }) ############################################### ###### Create non-neg constraints ############################################### non_neg_beta <- cbind(matrix(0, nrow = m * n, ncol=n), diag(m * n)) if (inter==1) { if (RTS=="vrs") { sign_cons <- non_neg_beta } else if (RTS=="drs") { RTS_cons <- cbind(ifelse(COST==0,1,-1) * diag(m * n), matrix(0, nrow = m * n, ncol = n)) sign_cons <- rbind(non_neg_beta, RTS_cons) } else if (RTS=="irs") { RTS_cons <- cbind(ifelse(COST==0,-1,1) * diag(m * n), matrix(0, nrow = m * n, ncol = n)) sign_cons <- rbind(non_neg_beta, RTS_cons) } } else if (inter==0) { ## 'crs' er uden konstantled sign_cons <- diag(m * n) } ############################################### ###### Create concavity constraints ############################################### alpha_matlist <- list() beta_matlist <- list() for (i in 1:n) { a_matr <- diag(n) a_matr[,i] <- a_matr[,i] - 1 alpha_matlist[[i]] <- a_matr # Create beta b_matr <- matrix(0, ncol = m * n, nrow = n) for (j in 1:m) { b_matr[, i + n * (j - 1)] <- -X[i,j] for (k in 1:n) { b_matr[k, k + (j - 1) * n] <- b_matr[k, k + (j - 1) * n] + X[i,j] } } beta_matlist[[i]] <- b_matr } if (inter==1) { sspot_cons <- cbind(do.call(rbind, alpha_matlist), do.call(rbind, beta_matlist)) } else if (inter==0) { sspot_cons <- cbind(do.call(rbind, beta_matlist)) } ##################################### # !!!!!!!!! Cost function !!!!!!!!! # Convexity - not concavity constr! ##################################### if (COST==1) { sspot_cons <- -sspot_cons } ##################################### ### Getting the constraints together ##################################### G <- rbind(sign_cons, sspot_cons) h <- rep(0, nrow(sign_cons) + nrow(sspot_cons)) # Set column and row names for the constrains; should reuse any existing names colnames(G) <- outer(1:n, ifelse(inter==1, 0, 1):m, FUN = function(x, y) {paste(x, y, sep = "_")}) rownames(G) <- names(h) <- c(outer(1:n, 1:m, FUN = function(x, y) { paste(x, y, sep = "_") }), outer(1:n, 1:n, FUN = function(x, y) { paste(y, x, sep = "") })) ##################################### ### Run ##################################### # Drop restriktioner hvor hele raekken er nul helenul <- apply(G, 1, function(x) {all(x==0)}) G <- G[!helenul, ] h <- h[!helenul] dvec <- base::crossprod(Z, B) # was: t(A)%*%B Dmat <- base::crossprod(Z, Z) # t(A) %*% A # Make sure Dmat is positive definite Dmat <- Dmat + diag(1e-10 * max(Dmat), dim(Dmat)[1], dim(Dmat)[2]) z_cnls <- solve.QP(Dmat, dvec, Amat = t(G), bvec = h) z_cnls$type <- "solve.QP" z_cnls$X <- z_cnls$solution value <- 0.5 * z_cnls$solution %*% Dmat %*% z_cnls$solution - c(dvec) %*% z_cnls$solution z_cnls$IsError <- FALSE slack <- G %*% z_cnls$X - h slackResidual <- -sum(slack[slack < 0]) z_cnls$slackNorm <- slackResidual eps2 <- sum((Z %*% z_cnls$X - B)^2) z_cnls$solutionNorm <- eps2 ##################################### ### Collect results ##################################### # First column is the intercept when 'inter==1' beta_matrix <- matrix(z_cnls$X, ncol = length(z_cnls$X) / n) if (inter==0) { y_hat <- diag(beta_matrix %*% t(X)) } else if (inter==1) { y_hat <- diag(beta_matrix %*% t(cbind(1, X))) } if (MULT==1) { resid <- log(Y) - log(y_hat) # Hvorfor baade 'yhat' og 'fitted'? fitted <- Y / exp(resid) } else if (MULT==0) { resid <- Y - y_hat fitted <- Y - resid } # Restled paa en anden maade restled <- Y - Z %*% z_cnls$X SSR <- crossprod(restled) # t(restled) %*% restled ##################################### ### Inefficiency Estimation ##################################### if (METHOD=="MM") { M2 <- sum((resid - mean(resid)) * (resid - mean(resid))) / length(resid) M3 <- sum((resid - mean(resid)) * (resid - mean(resid)) * (resid - mean(resid))) * (1 / length(resid)) if (COST==1) M3 <- -M3 if (M3 > 0) print("wrong skewness? Third moment of the residuals is greater than 0") # Sigma_u and Sigma_v as functions of M3 and M2 sigma_u <- (M3 / (sqrt(2 / pi) * (1 - 4 / pi)))^(1 / 3) sigma_v <- sqrt(M2 - ((pi - 2) / pi) * sigma_u * sigma_u) lamda <- sigma_u / sigma_v myy <- (sigma_u^2 / pi)**(1 / 2) } ## if (METHOD=="MM") if (METHOD=="PSL") { # minus log-likelihood funktion ll <- function(lmd = 1, e) { sc <- sqrt(2 * lmd^2 / pi / (1 + lmd^2)) si <- sqrt(mean(e^2) / (1 - sc^2)) mu <- si * sc ep <- e - mu likelihood <- -length(ep) * log(si) + sum(pnorm(-ep * lmd / si, log.p = TRUE)) - 0.5 * sum(ep^2) / si^2 return(-likelihood) } if (COST==0) { sol <- ucminf(par = 1, fn = ll, e = resid) } else if (COST==1) { sol <- ucminf(par = 1, fn = ll, e = -resid) } if (sol$convergence < 0) { warning("Not necessarily converged, $convergence = ", sol$convergence, "\n", sol$message) } lamda <- sol$par ## Use estimate of lambda to calculate sigma^2 sc <- sqrt(2 * lamda^2 / pi / (1 + lamda^2)) sig.sq <- mean(resid^2) / (1 - sc^2) ## Now calculate sigma.u and sigma.v sigma_v <- sqrt(sig.sq / (1 + lamda^2)) sigma_u <- sigma_v * lamda } ## if (METHOD=="PSL") if (METHOD=="NONE") { # Hvis nedenstaaende variabler ikke saettes til NA giver det # fejl ved manglende variabler senere i programmet. sigma_v <- sigma_u <- NA } # Shift factor if (MULT==0) { Expect_Ineff <- sigma_u * sqrt(2 / pi) } else if (MULT==1) { Expect_Ineff <- 1 - exp(-sigma_u * sqrt(2 / pi)) } sumsigma2 <- sigma_u^2 + sigma_v^2 #### JMLS Point estimator for inefficiency #### # Composite error if (COST==0) { Comp_err <- resid - sigma_u * sqrt(2 / pi) } else if (COST==1) { Comp_err <- resid + sigma_u * sqrt(2 / pi) } # mu star and sigma star mu_star <- -Comp_err * sigma_u * sigma_u / sumsigma2 sigma_star <- sigma_u * sigma_u * sigma_v * sigma_v cond_mean <- mu_star + (sigma_u * sigma_v) * (dnorm(Comp_err / sigma_v * sigma_v) / (1 - pnorm(Comp_err / sigma_v * sigma_v))) # Be aware: for the calculation of the cond mean I follow Keshvari / Kuosmanen 2013 # stating that the formula in Kuosmanen/Kortelainen is not correct! if (MULT==1) { eff_score <- exp(-cond_mean) if (COST==0) { Frontier_reference_points <- fitted * exp(sigma_u * sqrt(2 / pi)) } else if (COST==1) { Frontier_reference_points <- fitted * exp(-sigma_u * sqrt(2 / pi)) } } else if (MULT==0) { eff_score <- 1 - cond_mean / Y if (COST==1) { Frontier_reference_points <- fitted - sigma_u * sqrt(2 / pi) } else if (COST==0) { Frontier_reference_points <- fitted + sigma_u * sqrt(2 / pi) } } return(list(residualNorm = z_cnls$slackNorm, solutionNorm = z_cnls$solutionNorm, error = z_cnls$IsError, # type = z_cnls$type, coef = beta_matrix, sol = z_cnls$X, residuals = resid, restled = restled, fit = fitted, yhat = y_hat, eff = eff_score, front = Frontier_reference_points, sigma_u = sigma_u, SSR = SSR # G=G, # H=h # Fitted_Values = yhat )) } ## stoned
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/StoNED.R
# $Id: addModel.R 218 2020-05-21 21:28:28Z lao $ # Additive model, corresponds to eqs. 4.34-4.38 in Cooper et al., 2007 dea.add <- function(X, Y, RTS="vrs", XREF=NULL, YREF=NULL, FRONT.IDX=NULL, param=NULL, TRANSPOSE=FALSE, LP=FALSE) { rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+") if ( is.numeric(RTS) ) { if (LP) print(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) print(paste("' is '",RTS,"'\n",sep=""),quote=F) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns:", RTS) if ( TRANSPOSE ) { K <- dim(X)[2] } else { K <- dim(X)[1] } if ( RTS == "fdh+" ) { # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } param <- c(low=low, high=high) } e <- list(eff=rep(1,K), objval=NULL, RTS=RTS, ORIENTATION="in", TRANSPOSE=TRANSPOSE, param=param) class(e) <- "Farrell" sl <- slack(X,Y,e, XREF=XREF, YREF=YREF, FRONT.IDX=FRONT.IDX, LP=LP) # Make slack the sum of slacks, not a logical variable # wether there is slack or not # if (TRANSPOSE) # sl$sum2 <- colSums(sl$sx) + colSums(sl$sy) # else # sl$sum2 <- rowSums(sl$sx) + rowSums(sl$sy) return(sl) } # dea.aff
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/addModel.R
# $Id: boot.fear.R 128 2014-06-14 16:19:18Z B002961 $ # Bootstrap DEA functions, a wrapper for FEAR::boot.sw98 # boot.sw98(XOBS, YOBS, NREP = 2000, DHAT = NULL, # RTS = 1, ORIENTATION = 1, alpha = 0.05, CI.TYPE=2, # XREF = NULL, YREF = NULL, DREF = NULL, # OUTPUT.FARRELL = FALSE, NOPRINT = FALSE, errchk = TRUE) boot.fear <- function(X,Y, NREP=200, EFF=NULL, RTS="vrs", ORIENTATION="in", alpha=0.05, XREF=NULL, YREF=NULL, EREF=NULL) { if ( !("FEAR" %in% .packages(T)) ) stop("Note: boot.fear only works if the package FEAR is installed") rts_ <- c("fdh","vrs","drs","crs","irs","irs","add") if ( is.numeric(RTS) ) { RTStemp <- rts_[1+RTS] # the first fdh is number 0 RTS <- RTStemp } RTS <- tolower(RTS) rts <- which(RTS == rts_) -1 if ( rts < 1 || 3 < rts ) stop("Invalid value of RTS in call to boot.fear") orientation_ <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) orientation <- which(ORIENTATION == orientation_) -1 if ( orientation < 1 || 3 < orientation ) stop("Invalid value of ORIENTATION in call to boot.fear") if ( !is.null(EREF) && (is.null(XREF) || is.null(YREF)) ) stop("When EREF is present then XREF and YREF must also be present") if (ORIENTATION=="out") farrell<-TRUE else farrell<-FALSE # print(paste("rts =",rts," orientation=",orientation)) b <- NA if ( !is.null(XREF) ) XREF <- t(XREF) if ( !is.null(YREF) ) YREF <- t(YREF) if ( !is.null(EFF) ) EFF <- 1/EFF # Konverter til Shephard if ( !is.null(EREF) ) EREF <- 1/EREF # Konverter til Shephard ## tryCatch( b <- FEAR::boot.sw98(t(X), t(Y), NREP, EFF, rts, ## orientation, alpha,,XREF, YREF, EREF, ## OUTPUT.FARREL=farrell) , ## warning = function(w) print(w), ## error = function(e) { ## print(e) ## stop("boot.fear aborted: Could be that FEAR is not installed") ## } # , ## # finaly=print("FEAR::boot.sw98 finished with bootstrap",quote=FALSE) ## ) # boot.sw98(t(x), t(y), NREP=NREP) # print("FEAR done; now for calculating the aggregate statistics") if ( farrell ) { # bb <- b bb <- list(eff=b$dhat, eff.bc=b$dhat.bc, bias=b$bias, var=b$var, conf.int=b$conf.int, eref=b$dref, boot=b$boot, fear=b) } else { # print("Omregner til Farrell") # Omregn til Farrell efficiencer naar det nu ikke er Farrell eff <- 1/b$dhat bias <- 1/b$dhat - 1/b$dhat.bc # bias0 <- rowMeans(1/b$boot) - eff # eff.bc <- eff - bias # eff.bc1 <- 2*b$eff - 1/rowMeans(b$boot) # boot_ <- b$boot - b$dhat # ci_ <- # t(apply(boot_,1, quantile, # probs=c(0.5*alpha, 1-0.5*alpha), type=9, na.rm=TRUE)) # ci <- 1/(b$dhat- ci_) # rm(boot_, ci_) # Forste ordens tilnaermelse til variansen, Taylor, delta var <- b$var/b$dhat^2 var0 <- apply(1/b$boot,1,var) if ( is.null(b$dref) ) eref <- NULL else eref <- 1/b$dref sboot <- t(apply(1/b$boot,1,sort)) bb <- list(eff=eff, eff.bc=1/b$dhat.bc, # eff.bc0=eff.bc, eff.bc1=eff.bc1, bias=bias, # bias0=bias0, var=var, var0=var0, conf.int=1/b$conf.int[,c(2,1)], # conf.int0=ci, eref=eref, boot=sboot, fear=b) } return( bb ) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/boot.fear.R
# $Id: bootStat.R 117 2011-05-17 10:17:07Z Lars $ # Calculates the critical value at level |alpha| for the vector of # trials |s| critValue <- function(s, alpha=0.05) { if ( alpha <= 0 || alpha >= 1 ) stop("The argument alpha must be between 0 and 1") ss_ <- sort(s) mean( ss_[floor(alpha*length(s))], ss_[ceiling(alpha*length(s))], na.rm=TRUE ) } # Calculate the probability of a larger value than |shat| in the vector # of trials |s| typeIerror <- function(shat,s) { reject <- function(alfa) { quantile(s, alfa, na.rm=TRUE, names=F) - shat } if ( reject(0) * reject(1) > 0 ) { # Ingen loesning til unitroot, saa enten 0% eller 100% if ( shat <= min(s) ) return(0) if ( shat >= max(s) ) return(1) } uniroot(reject,c(0,1))$root }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/bootStat.R
# $Id: cost.R 235 2021-04-11 13:46:25Z lao $ # Function to calculate minimum cost input. # Calculations are done with transposed matrices compared to R # standards, but according to LP practice cost.opt <- function(XREF, YREF, W, YOBS=NULL, RTS="vrs", param=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { if ( missing(YOBS) ) { YOBS <- YREF } if (!TRANSPOSE) { XREF <- t(XREF) YREF <- t(YREF) W <- t(W) YOBS <- t(YOBS) } m = dim(XREF)[1] # number of inputs n = dim(YREF)[1] # number of outputs K = dim(YOBS)[2] # number of units, firms, DMUs Kr = dim(XREF)[2] # number of units, firms, DMUs if ( dim(W)[2] > 1 && dim(W)[2] != K ) stop("Dimensions for W and YOBS are different") if ( Kr != dim(YREF)[2] ) stop("Number of firms in XREF and YREF differ") if ( m != dim(W)[1] ) stop("Number of inputs in W and XREF differ") if ( n != dim(YOBS)[1] ) stop("Number of outputs in YREF and YOBS differ") XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) W <- tjek_data(W) YOBS <- tjek_data(YOBS) rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+") if ( is.numeric(RTS) ) { if (LP) cat(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) cat(paste("' is '",RTS,"'\n",sep=""),quote=F) } if ( !(RTS %in% rts) ) stop("Unknown scale of returns:", RTS) if ( RTS != "crs" && RTS != "add" ) { rlamb <- 2 } else rlamb <- 0 lps <- make.lp(m+n +rlamb,m+Kr) # Saet lp options lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke name.lp(lps, paste("DEA cost,",RTS,"technology")) # saet raekker i matrix med restriktioner, saet 0'er for den foerste # soejle for den skal alligevel aendres for hver firm. dia <- diag(1,nrow=m) for ( h in 1:m ) set.row(lps,h, c(dia[h,], -XREF[h,])) for ( h in 1:n) set.row(lps,m+h, c(rep(0,m), YREF[h,])) # restriktioner paa lambda if ( RTS != "crs" && RTS != "add" ) { set.row(lps, m+n+1, c(rep(0,m),rep(-1,Kr))) set.row(lps, m+n+2, c(rep(0,m),rep( 1,Kr))) } if ( RTS == "fdh" ) { set.type(lps,(m+1):(m+Kr),"binary") set.rhs(lps,-1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "vrs" ) { set.rhs(lps, c(-1,1), (m+n+1):(m+n+2)) } else if ( RTS == "drs" ) { set.rhs(lps, -1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "irs" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 } else if ( RTS == "add" ) { set.type(lps,2:(1+Kr),"integer") } else if ( RTS == "fdh+" ) { # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } param <- c(low=low, high=high) set.rhs(lps, c(-high, low), (m+n+1):(m+n+2)) add.SOS(lps,"lambda", 1,1, (m+1):(m+Kr), rep(1, Kr)) } set.objfn(lps, c(W[,1],rep(0,Kr))) set.constr.type(lps, rep(">=",m+n+rlamb)) lp.control(lps, sense="min") if (!missing(CONTROL)) set_control(lps, CONTROL) xopt <- matrix(NA,m,K) lambda <- matrix(NA,nrow=Kr,ncol=K) cost <- rep(NA,K) for ( k in 1:K ) { if ( dim(W)[2] != 1 && k > 1 ) { set.objfn(lps, c(W[,k],rep(0,Kr))) } set.rhs(lps, YOBS[,k], (m+1):(m+n)) if (LP) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if ( status != 0 ) { print(paste("Error in solving for firm",k,": Status =",status), quote=FALSE) } else { cost[k] <- get.objective(lps) sol <- get.variables(lps) xopt[,k] <- sol[1:m] lambda[,k] <- sol[(m+1):(m+Kr)] } if ( !is.null(LPK) && k %in% LPK ) { write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } } # for ( k in 1:K ) # delete.lp(lps) rownames(lambda) <- paste("L",1:Kr,sep="") names(cost) <- colnames(YOBS) if (!TRANSPOSE) { xopt <- t(xopt) # cost <- t(cost) lambda <- t(lambda) } svar <- list("xopt"=xopt, "cost"=cost, "lambda"=lambda, rts=RTS, TRANSPOSE=TRANSPOSE) class(svar) <- "cost.opt" return (svar) } # cost.opt print.cost.opt <- function(x, ...) { a <- cbind("Optimalt input"=x$x) print(a,...) invisible(a) } ## print.cost.opt summary.cost.opt <- function(object, ...) { cat("Optimal input:\n") print.cost.opt(object) cat("Cost:\n") print(object$cost,...) cat("Weights (lambda):\n") x <- lambda(object) xx <- round(unclass(x)*1000)/1000 if (any(ina <- is.na(x))) xx[ina] <- "" if ( any(i0 <- !ina & abs(x) < 1e-9) ) xx[i0] <- sub("0.0000", ".", xx[i0]) print(xx, quote=FALSE, rigth=TRUE, ...) invisible(object) # printSpMatrix(Matrix(object$lambda),digits=4, col.names=T,...) # print(object$lambda,digits=4) } ## summary.cost.opt
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/cost.R
# $Id: dea.R 257 2023-06-07 09:51:25Z larso $ # DEA beregning via brug af lp_solveAPI. Fordelene ved lp_solveAPI er # faerre kald fra R med hele matricer for hver unit og dermed skulle # det gerne vaere en hurtigere metode. Maaske er det ogsaa lettere at # gennemskue hvad der bliver gjort en gang for alle og hvad der bliver # aendret ved beregning for hver unit. # Option FAST=TRUE giver en meget hurtigere beregning af efficienser, # men tilgaengaeld bliver der IKKE gemt de beregnede lambdaer. Det # betyder bl.a. at der ikke kan findes peers for de enkelte units. dea <- function(X,Y, RTS="vrs", ORIENTATION="in", XREF=NULL,YREF=NULL, FRONT.IDX=NULL, SLACK=FALSE, DUAL=FALSE, DIRECT=NULL, param=NULL, TRANSPOSE=FALSE, FAST=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { # XREF, YREF determines the technology # FRONT.IDX index for units that determine the technology # In the calculation in the method input/output matrices X and Y # are of the order good x units. # TRANSPOSE the restriction matrix is transposed, For TRUE then X and Y are # matrices of dimension inputs/ouputs times number of units, i.e. # goods are rows, and therefore X, Y etc must be transformed # as default in R is unit x good. if ( FAST ) { DUAL=FALSE; # SLACK=FALSE; # print("When FAST then neither DUAL nor SLACK") } #---------- # Tjek af RTS og ORIENTATION rts <- c("fdh","vrs","drs","crs","irs","irs2","add","fdh+","fdh++","fdh0", "vrs+") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { if (LP) print(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first, fdh, is number 0 RTS <- RTStemp if (LP) print(paste("' is '",RTS,"'\n",sep=""),quote=F) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns:", RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION:",ORIENTATION) } #---------- # Tjek af input # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix X <- tjek_data(X) Y <- tjek_data(Y) .xyref.missing <- FALSE if ( missing(XREF) || is.null(XREF) ) { .xyref.missing <- TRUE XREF <- X } if ( missing(YREF) || is.null(YREF) ) { .xyref.missing <- TRUE && .xyref.missing YREF <- Y } XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) if ( TRANSPOSE ) { X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } #------------ # Tjek af dimensioner orgKr <- dim(XREF) if ( length(FRONT.IDX) > 0 ) { if (LP) print("FRONT.IDX") if (LP) print(FRONT.IDX) if ( !is.vector(FRONT.IDX) & !(ifelse(is.matrix(FRONT.IDX), dim(FRONT.IDX)[2]==1, FALSE) )) stop("FRONT.IDX is not a vector or collumn matrix in 'dea'") XREF <- XREF[FRONT.IDX,, drop=FALSE] YREF <- YREF[FRONT.IDX,, drop=FALSE] } rNames <- rownames(XREF) if ( is.null(rNames) & !is.null(rownames(YREF)) ) rNames <- rownames(YREF) if ( is.null(rNames) ) rNames <- 1:dim(XREF)[1] m <- dim(X)[2] # number of inputs n <- dim(Y)[2] # number of outputs K <- dim(X)[1] # number of units, units, DMUs Ky <- dim(Y)[1] Kr <- dim(XREF)[1] # number of units,units in the reference technology oKr <- orgKr[1] # number of units in reference before use of FRONT.IDX if ( !is.null(DIRECT) ) { if ( is(DIRECT, "matrix") ) { md <- dim(DIRECT)[2] Kd <- dim(DIRECT)[1] } else { md <- length(DIRECT) Kd <- 0 } } else { Kd <- 0 } if (LP) cat("m n K Kr = ",m,n,K,Kr,"\n") if (LP & !is.null(DIRECT) ) cat("md, Kd =",md,Kd,"\n") if (m!=dim(XREF)[2]) stop("Number of inputs must be the same in X and XREF") if (n!=dim(YREF)[2]) stop("Number of outputs must be the same in Y and YREF") if (K!=Ky) stop("Number of units must be the same in X and Y") if (Kr!=dim(YREF)[1]) stop("Number of units must be the same in XREF and YREF") if ( !is.null(DIRECT) & all(DIRECT=="min") & ORIENTATION=="graph" ) # Kaldet kommer fra 'mea' og stop vil vise 'dea' kaldet som mea laver; derfor call.=FALSE stop("The option 'ORIENTATION=\"graph\"' cannot be used for 'mea'", call.=FALSE) if ( !is.null(DIRECT) & length(DIRECT) > 1 ) { if ( ORIENTATION=="in" & md!=m ) stop("Length of DIRECT must be the number of inputs") else if ( ORIENTATION=="out" & md!=n ) stop("Length of DIRECT must be the number of outputs") else if ( ORIENTATION=="in-out" & md!=m+n ) stop("Length of DIRECT must be the number of inputs plus outputs") if ( is(DIRECT, "matrix") & (Kd>0 & Kd!=K) ) stop("Number of units in DIRECT must equal units in X and Y") } if ( !is.null(DIRECT) & length(DIRECT) == 1 ) { if ( ORIENTATION=="in" & length(DIRECT)!=m ) DIRECT <- rep(DIRECT,m) else if ( ORIENTATION=="out" & length(DIRECT)!=n ) DIRECT <- rep(DIRECT,n) else if ( ORIENTATION=="in-out" & length(DIRECT)!=m+n ) DIRECT <- rep(DIRECT,m+n) } #------------ if ( RTS=="fdh" && ORIENTATION!="graph" && !FAST && DUAL==FALSE && all(DIRECT!="min") ) { e <- fdh(X,Y, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=FRONT.IDX, DIRECT=DIRECT, TRANSPOSE=FALSE, oKr) if ( SLACK ) { warning("Run 'slack(X, Y, e)' to get slacks") } return(e) } #------------ if ( RTS=="fdh++" ) { e <- dea.fdhPlus(X, Y, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=FRONT.IDX, DIRECT=DIRECT, param=param, TRANSPOSE=FALSE, oKr) if ( SLACK ) { warning("Run 'slack(X, Y, e)' to get slacks") } return(e) } #------------ if ( RTS != "crs" && RTS != "add" ) { rlamb <- 2 } else { rlamb <- 0 } #------------ # Lav LP objekt og saet alt der ikke aendres ved ny firm. # Initialiser LP objekt lps <- make.lp(m+n +rlamb,1+Kr) # Saet lp options lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke if ( is.null(DIRECT) ) dirStreng<-"" else dirStreng<-"Dir" name.lp(lps, paste(ifelse(is.null(DIRECT)|(DIRECT!="min"), "Dea", "Mea"), ORIENTATION,RTS,dirStreng,sep="-")) if (!missing(CONTROL)) set_control(lps, CONTROL) # saet raekker i matrix med restriktioner, saet 0'er for den foerste # soejle for den skal alligevel aendres for hver unit. # Bemaerk X og Y transponeres implicit naar de saettes i lp_solveAPI; # soejler i X og Y saettes som raekker i lp_solveAPI. # Foerste 'm' raekker med input for ( h in 1:m ) set.row(lps,h, c(0,-XREF[,h])) # Foelgende 'n' raekker med output for ( h in 1:n) set.row(lps,m+h, c(0,YREF[,h])) # restriktioner paa lambda if ( RTS != "crs" && RTS != "add" ) { set.row(lps, m+n+1, c(0,rep(-1,Kr))) set.row(lps, m+n+2, c(0,rep( 1,Kr))) } # Saet restriktioner for lambda, dvs. for RTS if ( RTS == "fdh" || RTS == "fdh0" ) { set.type(lps,2:(1+Kr),"binary") set.rhs(lps,-1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "vrs" ) { set.rhs(lps, c(-1,1), (m+n+1):(m+n+2)) } else if ( RTS == "vrs+" ) { # param: (lav, hoej, sum lav, sum hoej) # Saet parametrene low og high if ( is.null(param) ) { param <- c(.5, 2.) } if ( length(param) == 1 ) { low <- param high <- 1+(1-param) } else { low <- param[1] high <- param[2] } if (length(param) == 4) { # print("Graenser for sum af lambda sat") set.rhs(lps, c(-param[4], param[3]), (m+n+1):(m+n+2)) } else { set.rhs(lps, c(-1,1), (m+n+1):(m+n+2)) } param <- c(low=low, high=high) set.semicont(lps, 2:(1+Kr)) set.bounds(lps, lower=rep(low,Kr), upper=rep(high,Kr), columns=2:(1+Kr)) } else if ( RTS == "drs" ) { set.rhs(lps, -1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "irs" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 } else if ( RTS == "irs2" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 set.semicont(lps, 2:(1+Kr)) set.bounds(lps, lower=rep(1,Kr), columns=2:(1+Kr)) } else if ( RTS == "add" ) { set.type(lps,2:(1+Kr),"integer") } else if ( RTS == "fdh+" ) { # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } param <- c(low=low, high=high) set.rhs(lps, c(-high, low), (m+n+1):(m+n+2)) add.SOS(lps,"lambda", 1,1, 2:(1+Kr), rep(1, Kr)) } if ( !is.null(DIRECT) & Kd<=1 & all(DIRECT!="min") ) { # print(Kd) # print(DIRECT) # Samme retning for alle enheder if ( ORIENTATION=="in" ) set.column(lps, 1, c(1,-DIRECT),0:m) else if ( ORIENTATION=="out" ) set.column(lps, 1, c(1,-DIRECT),c(0,(m+1):(m+n))) else if ( ORIENTATION=="in-out" ) set.column(lps, 1, c(1,-DIRECT),0:(m+n)) } if ( !is.null(DIRECT) ) { # Ved super efficiency for directional kan loesning vaere # negativ saa loensingen skal kunne vaere negativ, dvs. objval # kan vaere negativ. set.bounds(lps,lower=-Inf, columns=1) } set.objfn(lps, 1,1) # Baade in- og output modeller skal formuleres med ">=" set.constr.type(lps, rep(">=",m+n+rlamb)) if ( ORIENTATION %in% c("in","graph") ) { lp.control(lps, sense="min") } else if ( ORIENTATION == "out" ) { lp.control(lps, sense="max") } else if ( ORIENTATION == "in-out" & !is.null(DIRECT) ) { lp.control(lps, sense="max") } else stop("In 'dea' for ORIENTATION use only 'in', 'out', 'graph', or 'in-out (only for DIRECT)") # Ved brug af directional efficiency er der altid tale om et max-problem if ( !is.null(DIRECT) ) { lp.control(lps, sense="max") } if ( ORIENTATION == "graph" ) { oe <- graphEff(lps, X, Y, XREF, YREF, RTS, FRONT.IDX, rlamb, oKr, param=param, TRANSPOSE, SLACK, FAST, LP) # delete.lp(lps) return(oe) } objval <- rep(NA,K) # vector for the final efficiencies if ( FAST ) { lambda <- NULL primal <- NULL dual <- NULL } else { lambda <- matrix(NA, nrow=K, ncol=Kr) # lambdas one column per unit rownames(lambda) <- rownames(X) colnames(lambda) <- rNames if (DUAL) { dual <- matrix(NA, nrow=K, ncol=sum(dim(lps))+1) # primal <- matrix(NA, nrow=K, ncol=sum(dim(lps))+1) # solutions rownames(dual) <- rownames(X) rownames(primal) <- rownames(X) } else { primal <- NULL dual <- NULL } } if ( !is.null(DIRECT) & all(DIRECT == "min") ) { directMin <- TRUE if ( ORIENTATION=="in" ) { directMatrix <- matrix(NA, nrow=K, ncol=m) } else if ( ORIENTATION=="out" ) { directMatrix <- matrix(NA, nrow=K, ncol=n) } else if ( ORIENTATION=="in-out" ) { directMatrix <- matrix(NA, nrow=K, ncol=m+n) } } else { directMin <- FALSE } # The loop for each unit for ( k in 1:K) { if ( LP ) print(paste("Unit",k," -------------------"), quote=FALSE) # Af en eller anden grund saetter set.column ogsaa vaerdi for # kriteriefunktion og hvis der ikke er nogen vaerdi bliver den # automatisk sat til 0. Derfor maa 1-tallet for # kriteriefunktionen med for denne soejle og det er raekke 0. if ( directMin ) { # Saet hoejreside for enhedens input og output set.rhs(lps, c(-X[k,],Y[k,]), 1:(m+n)) # Find retningen og saet foerste soejle til den if ( ORIENTATION=="in" ) { DIRECT <- minDirection(lps, m, n, ORIENTATION, LP=LP) set.column(lps, 1, c(1,-DIRECT),0:m) } else if ( ORIENTATION=="out" ) { DIRECT <- minDirection(lps, m, n, ORIENTATION, LP=LP) set.column(lps, 1, c(1,-DIRECT), c(0,(m+1):(m+n)) ) } else if ( ORIENTATION=="in-out" ) { DIRECT <- minDirection(lps, m, n, ORIENTATION) set.column(lps, 1, c(1,-DIRECT),0:(m+n)) } directMatrix[k,] <- DIRECT if (LP) { print("Min DIRECT:"); print(DIRECT) } # Check om DIRECT er 0, hvis den er nul gaa til naeste unit lpcontr <- lp.control(lps) eps <- sqrt(lpcontr$epsilon["epsint"]) if (LP) print(paste("eps for minDirectin",eps), quote=FALSE) # Daarligt test for om direction er 0, tager ikke hensyn at X'er og # Y'er indbyrdes kan vaere forskellig stoerrelse if ( ORIENTATION=="in" ) { deltaDir <- DIRECT/( X[k,] + .Machine$double.xmin ) } else if ( ORIENTATION=="out" ) { deltaDir <- DIRECT/( Y[k,] + .Machine$double.xmin ) } else if ( ORIENTATION=="in-out" ) { deltaDir <- DIRECT/( c(X[k,],Y[k,]) + .Machine$double.xmin ) } if ( max(DIRECT) < eps && max(abs(deltaDir)) < eps ) { if (LP) print(paste("Direction 0 for unit",k)) objval[k] <- 0 if ( !FAST ) { lambda[k,] <- rep(0,Kr) lambda[k,k] <- 1 } next # ingen direction at gaa, tag naeste unit } } # if ( directMin ) if ( is.null(DIRECT) ) { if ( ORIENTATION == "in" ) { set.column(lps, 1, c(1,X[k,]),0:m) set.rhs(lps, Y[k,], (m+1):(m+n)) } else { set.column(lps, 1, c(1,-Y[k,]),c(0,(m+1):(m+n))) set.rhs(lps, -X[k,], 1:m) } } else { # print(Kd) # print(DIRECT) set.rhs(lps, c(-X[k,],Y[k,]), 1:(m+n)) if ( Kd > 1 ) { # retning for enheden if ( ORIENTATION=="in" ) set.column(lps, 1, c(1,-DIRECT[k,]),0:m) else if ( ORIENTATION=="out" ) set.column(lps, 1, c(1,-DIRECT[k,]), c(0,(m+1):(m+n)) ) else if ( ORIENTATION=="in-out" ) set.column(lps, 1, c(1,-DIRECT[k,]),0:(m+n)) } } if ( LP && k <= 10 ) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if ( status == 5 ) { # Numerical failure, reset basis og proev igen evt. med en # anden skalering; desvaerre virker det helt forkerte resultater # at aendre skalering, dvs. at bruge 'dynupdate'. set.basis(lps, default=TRUE) status <- solve(lps) } if (LP) print(paste("Status =",status)) if ( status != 0 ) { if ( status == 2 || status == 3 ) { # At unit ikke er i teknologimaengden svarer til 'Inf', det # er derfor unoedvendigt at give en advarsel. #cat("Unit ", k, " is not in the technology set. Status = ", status, # "\n", sep="") objval[k] <- ifelse(ORIENTATION=="in", Inf, -Inf) } else { cat("Error in solving for unit ", k, ": Status =",status, "\n", sep="") objval[k] <- NA } sol <- NA } else { objval[k] <- get.objective(lps) if ( !FAST ) sol <- get.variables(lps) } if ( !FAST ) { lambda[k,] <- sol[2:(1+Kr)] if ( DUAL ) { primal[k,] <- get.primal.solution(lps) dual[k,] <- get.dual.solution(lps) } } if (LP && status==0) { print(paste("Objval, unit",k)) print(get.objective(lps)) print("Solution/variables") print(get.variables(lps)) print("Primal solution") print(get.primal.solution(lps)) print("Dual solution:") print(get.dual.solution(lps)) } if ( !is.null(LPK) && k %in% LPK ) { write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } } # loop for each unit e <- objval lpcontr <- lp.control(lps) eps <- lpcontr$epsilon["epsint"] e[abs(e-1) < eps] <- 1 # 'e' taet ved 1 skal vaere 1 if ( !is.null(dimnames(X)[[1]]) ) { names(e) <- dimnames(X)[[1]] } lambda[abs(lambda-1) < eps] <- 1 # taet ved 1 lambda[abs(lambda) < eps] <- 0 # taet ved 0 # Faerdig med at bruge lps # delete.lp(lps) # if ( ORIENTATION == "in" ) { # names(e) <- "E" # } else if ( ORIENTATION == "out" ) { # names(e) <- "F" # } else if ( ORIENTATION == "graph" ) { # names(e) <- "G" # } if ( FAST ) { return(e) stop("Her skulle vi ikke kunne komme i 'dea'") } if (LP) print("Forbi retur fra FAST") if ( is.null(rownames(lambda)) ) { if ( length(FRONT.IDX)>0 ) { colnames(lambda) <- paste("L",(1:oKr)[FRONT.IDX],sep="") } else { colnames(lambda) <- paste("L",1:Kr,sep="") } } else { colnames(lambda) <- paste("L",rNames,sep="_") } if ( DUAL ) { if ( ORIENTATION == "out" ) sign <- -1 else sign <- 1 ux <- sign*dual[,2:(1+m),drop=FALSE] vy <- sign*dual[,(2+m):(1+m+n),drop=FALSE] colnames(ux) <- paste("u",1:m,sep="") colnames(vy) <- paste("v",1:n,sep="") if ( rlamb > 0 ) { gamma <- dual[,(1+m+n+1):(1+m+n+rlamb),drop=FALSE] if (rlamb==1) { colnames(gamma) <- "gamma" } else { colnames(gamma) <- paste("gamma",1:rlamb,sep="") } } else gamma <- NULL } else { ux <- vy <- NULL } ## if (DUAL) if (LP) print("DUAL faerdig") if ( directMin ) { DIRECT <- directMatrix } if ( TRANSPOSE ) { if ( is(e, "matrix") ) e <- t(e) lambda <- t(lambda) if (DUAL) { ux <- t(ux) vy <- t(vy) primal <- t(primal) dual <- t(dual) if ( !is.null(gamma) ) gamma <- t(gamma) } if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } ## if (TRANSPOSE) oe <- list(eff=e, lambda=lambda, objval=objval, RTS=RTS, primal=primal, dual=dual, ux=ux, vy=vy, gamma=gamma, ORIENTATION=ORIENTATION, TRANSPOSE=TRANSPOSE, # slack=NULL, sx=NULL, sy=NULL, sum=NULL, param=param ) if (!is.null(DIRECT)) { oe$direct <- DIRECT } class(oe) <- "Farrell" if ( SLACK ) { if ( TRANSPOSE ) { # Transponer tilbage hvis de blev transponeret X <- t(X) Y <- t(Y) if (.xyref.missing) { XREF <- NULL YREF <- NULL } else { XREF <- t(XREF) YREF <- t(YREF) } } sl <- slack(X, Y, oe, XREF, YREF, FRONT.IDX, LP=LP) oe$slack <- sl$slack oe$sum <- sl$sum oe$sx <- sl$sx oe$sy <- sl$sy oe$lambda <- sl$lambda if (LP) { print("slack fra slack:") print(sl$slack) print("slack efter slack:") print(oe$slack) } } ## if (SLACK) return(oe) } # dea # Kontrol af om data er numerisk data_kontrol <- function(X) { if ( is.null(X) ) return(TRUE) if (!is.data.frame(X) && !is.numeric(X)) return(FALSE) if ( is.data.frame(X)) { nc <- dim(X)[2] for ( i in 1:nc ) { if ( !is.numeric(X[,i]) ) { return(FALSE) break } } } return(TRUE) } # data_kontrol # Tjek om data er matrix og hvis ikke lav om til matrix tjek_data <- function(X) { if (is.matrix(X) & is.numeric(X)) return(X) if (is.null(X) || !data_kontrol(X)) { stop("'", deparse(substitute(X)), "' is not a numeric matrix (or numeric data.frame)", call.=FALSE) } if (!is.data.frame(X) || !is.numeric(X)) { X <- as.matrix(X) } return(X) } # tjek_data # Saet option til lpsolveAPI; isaer relevant for skalering set_control <- function(lp, CONTROL=NULL) { if ( !is.null(CONTROL) ) { if( !is.list(CONTROL)) { stop( "argument 'CONTROL' must be a 'list' object") } do.call( lp.control, c( list( lprec = lp ), CONTROL ) ) } return(NULL) } # set_control
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.R
# $Id: dea.boot.R 229 2020-07-04 13:39:18Z lao $ # Boot No FEAR: boot.nf # Bootstrap af dea model a la Simar Wilson 1998. dea.boot <- function(X,Y, NREP=200, EFF=NULL, RTS="vrs", ORIENTATION="in", alpha=0.05, XREF=NULL, YREF=NULL, FRONT.IDX=NULL, EREF=NULL, DIRECT=NULL, TRANSPOSE=FALSE, SHEPHARD.INPUT=TRUE, LP=FALSE, CONTROL=NULL) { if ( !is.null(DIRECT) ) stop("Use of DIRECT does not yet work in dea.boot") rts_ <- c("fdh","vrs","drs","crs","irs","irs","add") if ( is.numeric(RTS) ) { RTStemp <- rts_[1+RTS] # the first fdh is number 0 RTS <- RTStemp } RTS <- tolower(RTS) rts <- which(RTS == rts_) -1 # Foerste i RTS 'fdh' er 0 if ( rts < 1 || rts > 4 ) stop("Invalid value of RTS in call to boot:",RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) orientation <- which(ORIENTATION == orientation) -1 if ( orientation < 1 || 3 < orientation ) stop("Invalid value of ORIENTATION in call to boot") if (LP) print(paste("rts =",RTS," orientation=",ORIENTATION), quote=FALSE) if (LP) print(paste("rts =",rts," orientation=",orientation), quote=FALSE) if ( !is(X, "matrix") ) stop("X is not a matrix") if ( !is(Y, "matrix") ) stop("Y is not a matrix") if ( !is.null(XREF) && !is(XREF, "matrix") ) stop("XREF is not a matrix") if ( !is.null(YREF) && !is(YREF, "matrix") ) stop("YREF is not a matrix") if ( !is.null(EREF) && (is.null(XREF) || is.null(YREF)) ) stop("When EREF is present then XREF and YREF must also be present") .xyref.missing <- FALSE if ( missing(XREF) || is.null(XREF) ) { .xyref.missing <- TRUE XREF <- X } if ( missing(YREF) || is.null(YREF) ) { .xyref.missing <- TRUE && .xyref.missing YREF <- Y } if ( TRANSPOSE ) { X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } if ( length(FRONT.IDX) > 0 ) { if ( !is.vector(FRONT.IDX) & !(ifelse(is.matrix(FRONT.IDX), dim(FRONT.IDX)[2]==1, FALSE) )) stop("FRONT.IDX is not a vector or collumn matrix in 'dea'") XREF <- XREF[FRONT.IDX,, drop=FALSE] YREF <- YREF[FRONT.IDX,, drop=FALSE] } rNames <- rownames(XREF) if ( is.null(rNames) & !is.null(rownames(YREF)) ) rNames <- rownames(YREF) K <- dim(X)[1] # number of units, firms, DMUs Kr <- dim(XREF)[1] # number of units,units in the reference technology if ( is.null(EFF) ) eff <- dea(X,Y, RTS, ORIENTATION, XREF, YREF, FAST=TRUE, CONTROL=CONTROL) else if ( is(EFF, "Farrell") ) eff <- EFF$eff else eff <- EFF if ( length(eff) != K ) stop("The length of EFF must be the number of firms in X and Y") if ( is.null(EREF) ) { if ( identical(X, XREF) & identical(Y, YREF) ) { EREF <- eff } else { EREF <- dea(XREF, YREF, RTS, ORIENTATION, FAST=TRUE, CONTROL=CONTROL) } } # Hvis eff eller EREF har NA elementer vil X eller XREF faa NA # elemnter og dermed kommer der NA i elementer i LP problemet og # det giver fejl i 'set.row' ved kald af dea. Derfor tjek af om # der er NA. if (!all(!is.na(eff))) { nr <- which(is.na(eff)) msg <- paste0("The dea calculation has NA values for firms ", paste(nr, collapse=", "), ".\n", "For bootstrap either remove these firms from the data, use a (another) scaling\n", "of the data, or use a scaling option using CONTROL, cf. the manual for 'dea'.\n") stop(msg) } if (!all(!is.na(EREF))) { nr <- which(is.na(EREF)) msg <- paste0("The dea calculation has NA values for firms ", paste(nr, collapse=", "), " in the reference technology.\n", "For bootstrap either remove these firms from the reference data, use a (another) scaling\n", "of the data, or use a scaling option using CONTROL, cf. the manual for 'dea'.\n") stop(msg) } # Bootstrap noget der er over 1, dvs. Farrel output eller Shephard input if (ORIENTATION=="out") farrell<-TRUE else farrell<-FALSE # Beregn vinduesbredden til udglatning af efficiensers fordeling. # Lav det alene for efficencer paa 1 eller over, dvs. for Farrell # output eller Shephard input orienterede efficiencer fordi det er # dem der samples paa. # Fjern 1-erne saa de ikke dominerer og spejl efficiencer i 1, # Daraio and Simar (2007, 61) ligning (3.23) og (3.26) dist <- eff if ( !farrell ) dist <- 1/dist # Behold efficiencer over 1, drop 1-erne naar bredden beregnes zeff <- eff[ dist > 1 + 1e-6 ] if ( length(zeff) == 0 ) { cat("No unit with efficiency different from 1.0000.\n", quote=FALSE) stop("The range of efficiencies is degenrate, 'dea.boot' stops.") } # Spejling om 1 neff <- c(zeff,2-zeff) # Ratio |adjust| som bredden skal ganges med fordi der er for mange # elementer i den spejlede vektor som baandbredden bregnes ud fra adjust <- sd(dist)/sd(neff) * (length(neff)/length(dist))^(1/5) # Ligning (3.28), (3.30) og (3.31) i Silverman (1986, 45--47) std <- sd(neff) iqr <- IQR(neff)/1.349 # Hvis bredden er for lille kan IQR vaere 0 og saa skal std bruges # Hvorfor '0.9' og ikke '1.06' som staar i Daraio and Simar (2007, 61) # ligning (3.23)? Fordi Silverman side 48 anbefaler 0.9. if ( iqr > 1e-6 && iqr < std ) std <- iqr h0 <- .9 * std * length(neff)^(-1/5) # h0 <- 1.06 * std * length(neff)^(-1/5) h <- adjust * h0 if (LP) cat("Bandwidth =",h,"\n") # matrix til at gemme de bootstrappede efficiencer boot <- matrix(NA, nrow=K, ncol=NREP) if ( ORIENTATION == "in" ) { for ( b in 1:NREP ) { # if (LP) print(paste(b," -----")) estar <- dea.sample(eff, h, Kr) # estar/eff ganges paa hver soejle i X; benytter at R har data # efter soejler, byrow=FALSE er default i R. # eff*XREF er paa randen, og eff/estar*XREF bliver indre punkt xstar <- EREF/estar * XREF #### xstar <- eff/estar * XREF boot[,b] <- dea(X,Y, RTS, ORIENTATION, XREF=xstar, YREF, FAST=TRUE, CONTROL=CONTROL) } # b in 1:NREP } else if ( ORIENTATION == "out" ) { # farrel er TRUE for ( b in 1:NREP ) { # Farrell output efficiencer estar <- dea.sample(eff, h, Kr) ystar <- EREF/estar * YREF boot[,b] <- dea(X,Y, RTS, ORIENTATION, XREF, YREF=ystar, FAST=TRUE, CONTROL=CONTROL) } # b in 1:NREP } else if ( ORIENTATION == "graph" ) { for ( b in 1:NREP ) { estar <- dea.sample(eff, h, Kr) xstar <- EREF/estar * XREF ystar <- estar/EREF * YREF boot[,b] <- dea(X,Y, RTS, ORIENTATION, XREF=xstar, YREF=ystar, FAST=TRUE, CONTROL=CONTROL) } # b in 1:NREP } else { stop("Unknown ORIENTATION for dea.boot") } if ( SHEPHARD.INPUT & ORIENTATION != "out" ) { # Lav input efficiencer storre end 1 hvis den er under 1 eff <- 1/eff boot <- 1/boot } # Efficiencer stoerre end 1 bias <- rowMeans(boot, na.rm=TRUE) - eff eff.bc <- eff - bias #ci <- t(apply(boot,1, quantile, # probs=c(0.5*alpha, 1-0.5*alpha), type=9, na.rm=TRUE)) # ci_ <- t(apply(eff - boot, 1, quantile, # probs=c(0.5*alpha, 1-0.5*alpha), na.rm=TRUE, type=9)) ci_ <- t(apply(eff-boot, 1, function(x) { quantile(x, probs=c(0.5*alpha, 1-0.5*alpha), type=9, na.rm=TRUE)})) ci <- eff + ci_ if (SHEPHARD.INPUT & ORIENTATION!="out") { # Lav efficiencer mm. tilbage til under 1 eff <- 1/eff boot <- 1/boot eff.bc <- 1/eff.bc ci <- t(apply(1/ci, 1, rev)) # Byt om paa de lave og hoeje graenser bias <- eff - eff.bc } var <- apply(boot,1, function(x) {var(x, na.rm=TRUE)}) bb <- list(eff=eff, eff.bc=eff.bc, bias=bias, var=var, conf.int=ci, eref=EREF, boot=boot) return( bb ) } # boot # Bootstrap sample fra efficiencer over 1, # Shephard input eller Farrell output dea.sample <- function(e, h, K=NULL) { # sample for Shaphard input afstandsfunktion, dvs. vaerdier over 1 if ( min(e) < 1-1e-6 ) { e <- 1/e farrell <- TRUE } else { farrell <- FALSE # Vaerdi over 1 er ikke Farrel, men Shephard } if ( is.null(K) ) K <- length(e) # spejling omkring 1 espejl <- c(e, 2-e) beta <- sample(espejl, K, replace=TRUE) etilde <- beta + h * rnorm(K) # Juster saa varians bliver rigtig, Daraio and Simar (2007, 62) [3] estar <- mean(beta) + (etilde-mean(beta)) / sqrt(1+h^2/var(espejl)) # Genspejl for at faa alle vaerdier storre end 1 estar <- ifelse(estar < 1, 2-estar, estar) if ( farrell ) { # Omsaet til Farrell estar <- 1/estar } return(estar) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.boot.R
# $Id: dea.direct.R 219 2020-05-21 23:58:50Z lao $ dea.direct <- function(X,Y, DIRECT, RTS="vrs", ORIENTATION="in", XREF=NULL, YREF=NULL, FRONT.IDX=NULL, SLACK=FALSE, param=NULL, TRANSPOSE=FALSE) { orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix X <- tjek_data(X) Y <- tjek_data(Y) if ( missing(XREF) || is.null(XREF) ) XREF <- X if ( missing(YREF) || is.null(YREF) ) YREF <- Y XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) transpose <- FALSE if ( TRANSPOSE ) { transpose <- TRUE X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } m <- dim(X)[2] # number of inputs n <- dim(Y)[2] # number of outputs K <- dim(X)[1] # number of units, firms, DMUs ee <- dea(X,Y, RTS=RTS, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=FRONT.IDX, SLACK=SLACK, DUAL=FALSE, DIRECT=DIRECT, param=param, TRANSPOSE=FALSE) mmd <- switch(ORIENTATION, "in"=m, "out"=n, "in-out"=m+n) if ( is.null(ee$objval) ) ee$objval <- ee$eff ob <- matrix(ee$objval,nrow=K, ncol=mmd) if ( is(DIRECT, "matrix") && dim(DIRECT)[1] > 1 ) { dir <- DIRECT } else { dir <- matrix(DIRECT,nrow=K, ncol=mmd, byrow=TRUE) } if ( ORIENTATION=="in" ) { e <- 1 - ob*dir/X } else if ( ORIENTATION=="out" ) { e <- 1 + ob*dir/Y } else if ( ORIENTATION=="in-out" ) { e <- cbind(1 - dir[,1:m,drop=FALSE]*ob[,1:m,drop=FALSE]/X, 1 + dir[,(m+1):(m+n),drop=FALSE]*ob[,(m+1):(m+n),drop=FALSE]/Y) } else { warning("Illegal ORIENTATION for argument DIRECT") } if ( is(e, "matrix") && dim(e)[2]==1 ) e <- c(e) if ( transpose ) { transpose <- FALSE TRANSPOSE <- FALSE } if ( TRANSPOSE ) { if ( is(e, "matrix") ) e <- t(e) ee$lambda <- t(ee$lambda) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } ee$eff <- e ee$direct <- DIRECT ee$TRANSPOSE <- TRANSPOSE return(ee) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.direct.R
# $Id: dea.dual.R 257 2023-06-07 09:51:25Z larso $ # In the calculation in the method input/output matrices X and Y are # of the order good x firms. Ie. X, Y etc must be transformed as # default in R is firm x good. # DUALIN og DUALOUT eller blot DUAL. Her er valgt DUAL saa DUAL skal # vaere en matrix saaledes at der en raekke i DUAL for hvert input og # hvert output, bortset fra det foerste input og det foerste output, # dvs m-1+n-1=m+n-2 raekker. Der skal vaere 2 soejler, foerste soejle # er den nedre graense, og anden soejle er den oevre graense. dea.dual <- function(X,Y, RTS="vrs", ORIENTATION="in", XREF=NULL,YREF=NULL, FRONT.IDX=NULL, DUAL=NULL, DIRECT=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { # XREF, YREF determines the technology # FRONT.IDX index for units that determine the technology rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+") if ( missing(RTS) ) RTS <- "vrs" if (LP) print(paste("Vaerdi af 'RTS' er ",RTS),quote=FALSE) if ( is.numeric(RTS) ) { if (LP) print(paste("Number '",RTS,"'",sep=""),quote=FALSE) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) print(paste("' is '",RTS,"'\n",sep=""),quote=FALSE) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns:", RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ",ORIENTATION) } if ( RTS %in% c("fdh","add", "fdh+") ) stop("dea.dual does not work for \"fdh\", \"fdh+\" or \"add\"") if ( !ORIENTATION %in% c("in","out","in-out") ) stop("dea.dual does not work for \"graph\"") # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix X <- tjek_data(X) Y <- tjek_data(Y) if ( missing(XREF) || is.null(XREF) ) { XREF <- X } if ( missing(YREF) || is.null(YREF) ) { YREF <- Y } XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) if ( TRANSPOSE ) { X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } orgKr <- dim(XREF) if ( length(FRONT.IDX) > 0 ) { if ( !is.vector(FRONT.IDX) ) stop("FRONT.IDX is not a vector in 'eff'") XREF <- XREF[FRONT.IDX,, drop=FALSE] YREF <- YREF[FRONT.IDX,, drop=FALSE] } m = dim(X)[2] # number of inputs n = dim(Y)[2] # number of outputs K = dim(X)[1] # number of units, firms, DMUs Kr = dim(XREF)[1] # number of units, firms, DMUs if ( !is.null(DIRECT) ) { if ( is(DIRECT, "matrix") ) { md <- dim(DIRECT)[2] Kd <- dim(DIRECT)[1] } else { md <- length(DIRECT) Kd <- 0 } } else { Kd <- 0 } if (LP) cat("m n k kr = ",m,n,K,Kr,"\n") if (LP & !is.null(DIRECT) ) cat("md, Kd =",md,Kd,"\n") if ( m != dim(XREF)[2] ) stop("Number of inputs must be the same in X and XREF") if ( n != dim(YREF)[2] ) stop("Number of outputs must be the same in Y and YREF") if ( K != dim(Y)[1] ) stop("Number of units must be the same in X and Y") if ( Kr != dim(YREF)[1] ) stop("Number of units must be the same in XREF and YREF") if ( !is.null(DUAL) && ( !is.matrix(DUAL) || dim(DUAL)[1] != (m+n-2) || dim(DUAL)[2] != 2 ) ) { stop("DUAL must be a (m+n-2) x 2 matrix with lower and upper bounds for restrictions") } if ( !is.null(DIRECT) & ORIENTATION=="graph" ) stop("DIRECT cannot not be used with ORIENTATION=\"graph\"") if ( !is.null(DIRECT) & length(DIRECT) > 1 ) { if ( ORIENTATION=="in" & md!=m ) stop("Length of DIRECT must be the number of inputs") else if ( ORIENTATION=="out" & md!=n ) stop("Length of DIRECT must be the number of outputs") else if ( ORIENTATION=="in-out" & md!=m+n ) stop("Length of DIRECT must be the number of inputs plus outputs") if ( is(DIRECT, "matrix") & (Kd>0 & Kd!=K) ) stop("Number of firms in DIRECT must equal firms in X and Y") } if ( !is.null(DIRECT) & length(DIRECT) == 1 ) { if ( ORIENTATION=="in" & length(DIRECT)!=m ) DIRECT <- rep(DIRECT,m) else if ( ORIENTATION=="out" & length(DIRECT)!=n ) DIRECT <- rep(DIRECT,n) else if ( ORIENTATION=="in-out" & length(DIRECT)!=m+n ) DIRECT <- rep(DIRECT,m+n) } if ( RTS == "vrs" ) { rlamb <- 2 } else if ( RTS == "drs" || RTS == "irs" ) { rlamb <- 1 } else if ( RTS == "crs" ) { rlamb <- 0 } else { stop("Unknown value for RTS in 'dea.dual': ", RTS) } if ( !missing(DUAL) && !is.null(DUAL) ) { # Make matrix for dual restrictions. # Foerst for input hvis der er mere end 1 input if ( m > 1 ) { Udiag <- diag(1,m-1) DL <- rbind(DUAL[1:(m-1),1], -Udiag) DU <- rbind(-DUAL[1:(m-1),2], Udiag) if (LP) { print("DL"); print(DL) print("DU"); print(DU) } ADin <- cbind(DL,DU) } else ADin <- NULL if (LP) { print("ADin"); print(ADin) } # og saa restriktioner for output hvis der er mere end 1 output if ( n > 1 ) { Udiag <- diag(1,n-1) DL <- rbind(DUAL[m:(m+n-2),1], -Udiag) DU <- rbind(-DUAL[m:(m+n-2),2], Udiag) ADout <- cbind(DL,DU) } else ADout <- NULL nulln <- matrix(0,nrow=n,ncol=2*(m-1)) nullm <- matrix(0,nrow=m,ncol=2*(n-1)) AD <- cbind(rbind(ADin,nulln),rbind(nullm,ADout)) # AD <- t(AD) if (LP) { print("AD"); print(AD) } } else { # AD <- matrix(0, nrow=m+n, ncol=2*(m+n-2)) AD <- NULL } if ( is.null(AD) ) { restr <- 0 } else { restr <- 2*(m-1 +n-1) } # Initialiser LP objekt lps <- make.lp(1+Kr+restr, m+n+rlamb ) # Saet lp options lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke name.lp(lps, paste(ifelse(is.null(AD),"Dual","DualAC"),ORIENTATION, RTS,sep="-")) # if ( LP==TRUE ) print(lps) # saet soejler i matrix med restriktioner, saet 0'er for den foerste # raekke for den skal alligevel aendres for hver firm. for ( h in 1:m ) set.column(lps,h, c( 0, -XREF[,h], AD[h,])) # if ( LP || !is.null(LPK) ) print(lps) for ( h in 1:n) set.column(lps,m+h, c( 0, YREF[,h], AD[m+h,])) # restriktioner paa lambda objgamma <- NULL if ( rlamb > 0 ) { set.column(lps, m+n+1, c(0,rep(-1,Kr)), 1:(Kr+1)) objgamma <- -1 } if ( rlamb > 1 ) { set.column(lps, m+n+2, c(0,rep( 1,Kr)), 1:(Kr+1)) objgamma <- c(-1,1) } if ( rlamb == 1 && RTS == "irs" ) { set.column(lps, m+n+1, c(0,rep(1,Kr)), 1:(Kr+1)) objgamma <- 1 } if ( !is.null(DIRECT) & Kd==0 ) { # print(Kd) # print(DIRECT) # Samme retning for alle enheder if ( ORIENTATION=="in" ) set.row(lps, 1, c(-DIRECT),1:m) else if ( ORIENTATION=="out" ) set.row(lps, 1, c(-DIRECT),(m+1):(m+n)) else if ( ORIENTATION=="in-out" ) set.row(lps, 1, c(-DIRECT),1:(m+n)) } set.constr.type(lps, rep("<=", 1+Kr+restr)) if ( ORIENTATION == "in" ) { lp.control(lps, sense="max") set.rhs(lps, 1, 1) } else if ( ORIENTATION == "out" ) { lp.control(lps, sense="min") set.rhs(lps, -1, 1) if ( !is.null(objgamma) ) objgamma <- -objgamma } if ( !is.null(DIRECT) ) { lp.control(lps, sense="min") set.rhs(lps, c(-1,rep(0,Kr))) if ( ORIENTATION == "in" | ORIENTATION == "in-out" ) if ( !is.null(objgamma) ) objgamma <- -objgamma } if (!missing(CONTROL)) set_control(lps, CONTROL) # if ( LP || !is.null(LPK) ) print(lps) u <- matrix(NA,K,m) # vector for the final efficiencies v <- matrix(NA,K,n) # vector for the final efficiencies objval <- rep(NA,K) # vector for the final efficiencies sol <- matrix(NA, K, 1 + sum(dim(lps)) ) gamma <- NULL if (rlamb > 0) { gamma <- matrix(NA,K,rlamb) } for ( k in 1:K ) { # Finds the efficiencies for each unit if ( is.null(DIRECT) ) { # object function and first collumn if ( ORIENTATION == "in" ) { objrow <- c(rep(0,m),Y[k,], objgamma) if (LP) { print(lps); print("objgamma:") print(objgamma) print("objrow:") print(objrow) } set.objfn(lps, objrow) set.row(lps, 1, c(X[k,]),1:m) } else { objrow <- c(X[k,], rep(0,n), objgamma) set.objfn(lps, objrow) set.row(lps, 1, c(-Y[k,]),(m+1):(m+n)) } } else { objrow <- c(X[k,],-Y[k,], objgamma) set.objfn(lps, objrow) # print(Kd) # print(DIRECT) if ( Kd > 1 ) { # retning for enheden if ( ORIENTATION=="in" ) set.row(lps, 1, -DIRECT[k,], 1:m) else if ( ORIENTATION=="out" ) set.row(lps, 1, -DIRECT[k,], (m+1):(m+n)) else if ( ORIENTATION=="in-out" ) set.row(lps, 1, -DIRECT[k,], 1:(m+n)) } } if ( LP ) print(paste("Firm",k), quote=FALSE) if ( LP && k == 1 ) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if ( status != 0 ) { if (status == 2 || status == 3) { # print(paste("Firm",k,"not in the technology set"), quote=F) objval[k] <- ifelse(ORIENTATION=="in",Inf,-Inf) } else { print(paste("Error in solving for firm",k,": Status =",status), quote=F) objval[k] <- NA } } else { objval[k] <- get.objective(lps) losning <- get.variables(lps) # sol[k,] <- get.variables(lps) sol[k,] <- get.primal.solution(lps) u[k,] <- losning[1:m] v[k,] <- losning[(m+1):(m+n)] if ( rlamb > 0 ) { gamma[k,] <- losning[(m+n+1):(m+n+rlamb)] } } if (LP && status==0) { print(paste("Objval, firm",k)) print(objval[k]) print("Solution") print(sol) print("Dual values:") print(u[k,]) print(v[k,]) print("get.variables") print(get.variables(lps)) print("Primal solution") print(get.primal.solution(lps)) print("Dual solution:") print(get.dual.solution(lps)) } if ( !is.null(LPK) && k %in% LPK ) { print(paste("Model",k,"(",name.lp(lps),")")) print(lps) write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } } # for ( k in 1:K ) if ( is.null(DIRECT) ) { eff <- objval } else { mmd <- switch(ORIENTATION, "in"=m, "out"=n, "in-out"=m+n) ob <- matrix(objval, nrow=K, ncol=mmd) if ( is(DIRECT, "matrix") && dim(DIRECT)[1] > 1 ) { dir <- DIRECT } else { dir <- matrix(DIRECT, nrow=K, ncol=mmd, byrow=TRUE) } if ( ORIENTATION=="in" ) { eff <- 1 - ob*dir/X } else if ( ORIENTATION=="out" ) { eff <- 1 + ob*dir/Y } else if ( ORIENTATION=="in-out" ) { eff <- cbind(1 - ob[,1:m,drop=FALSE]*dir[,1:m,drop=FALSE]/X, 1 + ob[,(m+1):(m+n),drop=FALSE]*dir[,(m+1):(m+n),drop=FALSE]/Y) } else { warning("Illegal ORIENTATION for argument DIRECT") } if ( is(eff, "matrix") && ( dim(eff)[1]==1 ) ) ##### || dim(eff)==1 ) ) eff <- c(eff) } # undgaa afrundingsfejl i e naar den er taet ved 1. lpcontr <- lp.control(lps) eps <- lpcontr$epsilon["epsint"] eff[abs(eff-1) < eps] <- 1 # Slut med at bruge lps # delete.lp(lps) colnames(u) <- paste("u",1:m,sep="") colnames(v) <- paste("v",1:n,sep="") if (rlamb==1) { colnames(gamma) <- "gamma" } else if (rlamb>=2) { colnames(gamma) <- paste("gamma",1:rlamb,sep="") } if ( TRANSPOSE ) { u <- t(u) v <- t(v) if ( is(eff, "matrix") ) eff <- t(eff) } oe <- list(eff=eff, objval=objval, RTS=RTS, ORIENTATION=ORIENTATION, TRANSPOSE=TRANSPOSE, u=u, v=v, gamma=gamma, sol=sol) class(oe) <- "Farrell" return (oe) } ## dea.dual ######################## # X = x # Y = y # RTS="crs" # ORIENTATION="in" # XREF=NULL # YREF=NULL # FRONT.IDX=NULL # DUAL=NULL # TRANSPOSE=FALSE # LP=F # CONTROL=NULL # LPK=NULL
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.dual.R
# $Id: dea.merge.R 229 2020-07-04 13:39:18Z lao $ dea.merge <- function(X, Y, M, RTS = "vrs", ORIENTATION = "in", XREF = NULL, YREF = NULL, FRONT.IDX = NULL, TRANSPOSE = FALSE, CONTROL = NULL) { rts <- c("fdh","vrs","drs","crs","irs","irs","add") if ( is.numeric(RTS) ) { RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns: ", RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ", ORIENTATION) } # Mergers should be measured against the originale technology set, # therefore we must use XREF og YREF otherwise it will be Xmerger and # Ymerger that determines the technology. .xyref.missing <- FALSE if ( missing(XREF) || is.null(XREF) ) { .xyref.missing <- TRUE XREF <- X } if ( missing(YREF) || is.null(YREF) ) { .xyref.missing <- TRUE && .xyref.missing YREF <- Y } Xmerger <- M %*% X Ymerger <- M %*% Y # Potential gains E <- dea(Xmerger, Ymerger, RTS=RTS, ORIENTATION=ORIENTATION, XREF = XREF, YREF = YREF, CONTROL = CONTROL, FRONT.IDX = FRONT.IDX, TRANSPOSE = TRANSPOSE, FAST = TRUE) # Individual efficiencies e <- dea(X, Y, RTS=RTS, ORIENTATION=ORIENTATION, XREF = XREF, YREF = YREF, FRONT.IDX = FRONT.IDX, TRANSPOSE = TRANSPOSE, FAST = TRUE, CONTROL = CONTROL) # Inputs of individual firms projected on efficient frontier and # inputs of merged firms after elimination of individual inefficiency if ( ORIENTATION == "in" ) { Xeff <- diag(e) %*% X Xmerger_proj <- M %*% Xeff Ymerger_proj <- Ymerger } else if ( ORIENTATION == "out" ) { Yeff <- diag(e) %*% Y Xmerger_proj <- Xmerger Ymerger_proj <- M %*% Yeff } else if ( ORIENTATION == "graph" ) { Xeff <- diag(e) %*% X Yeff <- diag(e) %*% Y Xmerger_proj <- M %*% Xeff Ymerger_proj <- M %*% Yeff } else { stop("Unknown ORIENTATION: ", ORIENTATION, ". The function dea.merge stops.") } # Pure gains from mergers Estar <- dea(Xmerger_proj, Ymerger_proj, RTS=RTS, ORIENTATION=ORIENTATION, XREF = XREF, YREF = YREF, FRONT.IDX = FRONT.IDX, TRANSPOSE = TRANSPOSE, FAST = TRUE, CONTROL = CONTROL) # Learning effect LE <- E/Estar # Inputs and outputs for merged firms in harmony calculation Xharm <- diag(1/rowSums(M), nrow=dim(M)[1]) %*% Xmerger_proj Yharm <- diag(1/rowSums(M), nrow=dim(M)[1]) %*% Ymerger_proj # Harmony effect HA <- dea(Xharm,Yharm, RTS=RTS, ORIENTATION=ORIENTATION, XREF = XREF, YREF = YREF, FRONT.IDX = FRONT.IDX, TRANSPOSE = TRANSPOSE, FAST = TRUE, CONTROL = CONTROL) # Size effect SI <- Estar/HA em <- list(Eff=E, Estar=Estar, learning=LE, harmony=HA, size=SI) return(em) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.merge.R
# $Id: dea.plot.R 245 2022-05-11 22:52:42Z X052717 $ "dea.plot" <- function(x, y, RTS="vrs", ORIENTATION="in-out", txt=NULL, add=FALSE, wx=NULL, wy=NULL, TRANSPOSE = FALSE, fex=1, GRID=FALSE, RANGE=FALSE, param=NULL, ..., xlim, ylim, xlab, ylab) # x er input 1 og y er iput 2 eller output. # Hvis der flere varer i de to input/output bliver de lagt sammen som # vaegtet sum med vaegte wx og wy; default vaegte som vaere simpel addition. # { if ( sum(is.na(x)) + sum(is.na(y)) > 0 ) stop("There is one or more NA in 'x' or 'y'") rts <- c("fdh","vrs","drs","crs","irs","irs2","add","fdh+") if ( is.numeric(RTS) ) { cat("Number '",RTS,sep="") RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp cat("' is '",RTS,"'\n",sep="") } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown value for RTS: ", RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ", ORIENTATION) } if ( RTS=="fdh+" && ORIENTATION!="in-out" ) stop("RTS=\"fdh+\" only works for ORIENTATION=\"in-out\"") if ( RTS=="add" && ORIENTATION!="in-out" ) stop("RTS=\"add\" only works for ORIENTATION=\"in-out\"") # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix x <- tjek_data(x) y <- tjek_data(y) if (TRANSPOSE) { x <- t(x) y <- t(y) if ( !is.null(wx) ) { if ( is.matrix(wx) ) { wx <- t(wx) } } if ( !is.null(wy) ) { if ( is.matrix(wy) ) { wy <- t(wy) } } } if ( is.matrix(x) && dim(x)[2] > 1 ) { # x skal aggregeres if ( is.null(wx) ) { wx <- matrix(1, nrow=dim(x)[2] ,ncol=1) } x <- x %*% wx } if ( is.matrix(y) && dim(y)[2] > 1 ) { # y skal aggregeres if ( is.null(wy) ) { wy <- matrix(1, nrow=dim(y)[2] ,ncol=1) } y <- y %*% wy } if ( add == FALSE ) { dots = list(...) if (RANGE) { xlim <- 1.2*range(c(0,x)) +c(0,.01) ylim <- 1.2*range(c(0,y)) +c(0,.01) } if ( missing(xlim) ) xlim=c(0,1.2*max(x+.001)) if ( missing(ylim) ) ylim=c(0,1.2*max(y+.0011)) if ( missing(xlab) ) {xlab <- switch(ORIENTATION, "in"="x1", "out"="y1", "in-out"="X")} if (missing(ylab) ) {ylab <- switch(ORIENTATION, "in"="x2", "out"="y2", "in-out"="Y")} if ( RTS=="fdh+" ) { if ( is.null(param) ) { delta <- .15 low <- 1-delta high <- 1+delta } else { if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } } xlim <- c(low,high)*xlim ylim <- c(low,high)*ylim } # plot points with axes plot(x,y,xlim=xlim,ylim=ylim,xaxs="i",yaxs="i",xlab=xlab,ylab=ylab, frame=FALSE,...) if ( GRID ) { grid(col="darkgray") box(col="grey") } if ( is(txt, "logical") && txt ) { if ( is(x, "matrix") ) { if ( !is.null(rownames(x)) ) { txt <- rownames(x) } else if ( !is.null(rownames(y)) ) { txt <- rownames(y) } else { txt <- 1:dim(x)[1] } } else { txt <- 1:length(x) } } if ( !is(txt, "logical") && length(txt) > 0 ) { # Evt. tekst paa punkter saettes lidt nede til hoejre text(x,y,txt,adj=c(-.75,.75),cex=fex) } } # if ( add == FALSE ) if ( RTS == "add" ) { # Lav alle mulige additive kombinatinoner af data # Find foerst randen i en fdh idx <- sort(x, index.return=TRUE) effektive <- rep(NA, length(x)) j <- 1 prev <- idx$ix[1] effektive[j] <- prev for ( i in idx$ix ) { if ( y[i] > y[prev] ) { j <- j+1 effektive[j] <- i prev <- i } } # Frontier/rand og antal firms i randen rand <- effektive[!is.na(effektive)] nr <- length(rand) # Hvor mange gange en firm optraeder foer vi er uden for plotrammen if ( missing(xlim) ) xlim=c(0,1.2*max(x+.001)) nx <- round(xlim[2]/x[rand]) # lav aggregerings matrix til alle kombinationer af data # Hoejst 5 gentagelser hvis nx er uendelig fordi x[rand] er 0 nx[is.infinite(nx)] <- 5 M <- matrix(NA, nrow=prod(1+nx), ncol=nr) M[,1] <- rep(0:nx[1], prod(1+nx[-1])) if ( nr>1) for ( j in 2:nr ) { M[,j] <- as.integer(gl(1+nx[j], prod(1+nx[1:(j-1)]))) -1 } # Drop foerste raekke med bar nuller M <- M[-1,] # Lav saa alle kombinationerne x <- M %*% as.matrix(x[rand]) y <- M %*% as.matrix(y[rand]) # Herefter kan plottet laves som var det fdh, bemaerk at firms # er allerede afsat som punkter saa kombinationerne kommer ikke # til at optraede som punkter RTS <- "fdh" } if ( RTS == "fdh+" ) { dea.plot.fdhPlus(x,y,param, ...) } # "fdh+" if ( ORIENTATION == "in" ) { # inputkravmaengde, input afstandsfunktion hpts=chull(c(x,min(x),2*max(x)),c(y,2*max(y),min(y))) if ( RTS != "fdh" ) { lines(x[hpts],y[hpts],...) } if ( RTS == "fdh" ) { # tegn linjer for fdh idx <- sort(x,index.return=T) prev <- idx$ix[1]; for ( i in idx$ix ) { if ( y[i] < y[prev] ) { lines(x[c(prev,i,i)],y[c(prev,prev,i)],...) prev <- i } } } x1fmax <- max(x[hpts],na.rm=T) x2fmax <- max(y[hpts],na.rm=T) lines(c(x1fmax,2*max(x)), c(min(y), min(y)),...) lines(c(min(x), min(x)), c(x2fmax,2*max(y)),...) } else if (ORIENTATION == "out") { # produktionsmulighedsomraade for output, output afstandsfunktion hpts=chull( c(x,0,0,max(x)) , c(y,0,max(y),0) ) # For at vaere sikre paa at alle linjer tegnes laves en lukket graf # eller kan der opstaa et hul i fronten hpts <- c(hpts, hpts[1]) if ( RTS != "fdh" ) { lines(x[hpts],y[hpts],...) # Problem hvis mindste x er 0 ved max(y) for saa bliver # min(x(hpts)) ikke 0 da det omtalte punkt ikke er i hpts som # del af x, men det ekstra punkt (0,max(y)). lines(c(0, min(x[hpts],na.rm=T) ), c(max(y),max(y[hpts],na.rm=T) ),...) lines( c(max(x),max(x)), c(0,min(y[hpts],na.rm=T)),...) } if ( RTS == "fdh" ) { # tegn linjer for fdh idy <- sort(y,index.return=T,decreasing=T) lines(c(0,x[idy$ix[1]]),c(idy$x[1],idy$x[1]),...) prev <- match(max(x),x) lines(c(max(x),max(x)),c(0,y[prev]),...) prev <- idy$ix[1]; for ( i in idy$ix[-1] ) { if ( x[i] > x[prev] ) { lines(x[c(prev,prev,i)],y[c(prev,i,i)],...) prev <- i } } } } else { # ORIENTATION == "in-out" # Et input og et output, "normal produktionsfunktion" # Foerst findes det konvekse hul af punkterne og linjer # for yderpunkter tegnes. # Punkterne udvides med noget stoerre end max for at tegne linjer # der peger mod uendelig (free disposability) if ( RTS == "crs" ) { # crs abline(0, max(y/x),...) } else if (RTS=="vrs" | RTS=="fdh") { # vrs # hpts=chull(c(x,min(x),max(x)+2),c(y,0,max(y))) hpts=chull( c(x,min(x),2*max(x),2*max(x)),c(pmax(y,0),0,max(y),0) ) lines(c(min(x),min(x)), c(0,min(y[hpts],na.rm=T)),...) lines(c(max(x[hpts],na.rm=T),2*max(x)),c(max(y),max(y)),...) } else if (RTS=="drs") { # vrs og (0,0), dvs. drs hpts=chull(c(x,0,2*max(x),2*max(x)),c(pmax(y,0),0,0,max(y))) lines(c(0,min(x[hpts],na.rm=T)), c(0,min(y[hpts],na.rm=T)),...) lines(c(max(x[hpts],na.rm=T),2*max(x)),c(max(y),max(y)),...) } else if (RTS=="irs") { # vrs plus infinity hpts=chull( c(x ,min(x),2*max(x), 2*max(x)), c(pmax(y,0),0, 2*max(x)*max(y/x),0) ) # get the unit with the largest slope id <- which.max(y/x) lines(c(min(x),min(x)), c(0,min(y[hpts],na.rm=T)),...) lines(c(x[id], 2*max(x)), c(y[id], 2*max(x)*y[id]/x[id]),...) } # For vrs, drs and irs draw the lines between the # extreme points in the convex hull if ( RTS == "vrs" | RTS == "drs" | RTS == "irs" ) lines(x[hpts],y[hpts],...) if ( RTS == "fdh" | RTS == "add" ) { # tegn linjer for fdh idx <- sort(x,index.return=TRUE) prev <- idx$ix[1]; for ( i in idx$ix ) { if ( y[i] > y[prev] ) { lines(x[c(prev,i,i)],y[c(prev,prev,i)],...) prev <- i } } } if ( RTS == "irs2" ) { # Lines for increasing returns to scale, irs2 # Plot first vertical part lines(c(min(x),min(x)), c(0,max(y[x==min(x)],na.rm=T)),...) # Plot the line for input going to infinity slopes <- y/x idmax <- which.max(y/x) lines(c(x[idmax],20*x[idmax]), c(y[idmax],20*slopes[idmax]*x[idmax]),...) # find inputs in increasing order that have increasing slopes sx <- sort(x,index.return=T) id <- which(x %in% min(x),y) idy <- id[which.max(y[id])] slope <- y[idy]/x[idy] hpts <- idy for( i in 1:length(x) ) { if ( slopes[sx$ix[i]] > slope ) { # A higher slope found, change level in technology hpts <- c(hpts,sx$ix[i]) slope <- slopes[sx$ix[i]] } } # print(hpts) # Plot the lines for increasing slopes, remember the jumps if ( length(hpts) > 1 ) { for ( i in 1:(length(hpts)-1) ) { lines(c(x[hpts[i]],x[hpts[i+1]],x[hpts[i+1]]), c(y[hpts[i]],slopes[hpts[i]]*x[hpts[i+1]],y[hpts[i+1]]),...) } } } # RTS=="irs2" } } dea.plot.frontier <- function(x, y, RTS="vrs",...) { dea.plot(x, y, RTS=RTS, ORIENTATION="in-out",...) } dea.plot.isoquant <- function(x1, x2, RTS="vrs",...) { dea.plot(x1, x2, RTS=RTS, ORIENTATION="in",...) } dea.plot.transform <- function(y1, y2, RTS="vrs",...) { dea.plot(y1, y2, RTS=RTS, ORIENTATION="out",...) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.plot.R
# $Id: dea.web.R 244 2022-05-05 14:31:31Z X052717 $ # Foerste forsoeg med en web-graf for efficiens og tilhoerende input/output dea.web <- function(X, E, N=NULL, txt=NULL, add=FALSE, GRID=FALSE, fex=1, # RANGE=FALSE, param=NULL, ..., xlim) # x er input eller output for en firm eller for firm n # { if (dim(X)[1] > 1 && missing(N) ) stop("If X is for more firms then N must be present") if (dim(X)[1] > 1 && missing(N) ) stop("If X is for more firms then N must be present") sl <- 0 if ( is(E, "Farrell") ) { # Hvordan skelne mellem sx eller st der er behov for? if ( !is.null(E$sx) && E$ORIENTATION=="in" ) sl <- E$sx else if ( !is.null(E$sx) && E$ORIENTATION=="out" ) sl <- -E$sy E <- eff(E) } if ( length(E) != dim(X)[1] ) stop("Firms in X and E must be the same") if ( is(X, "matrix") ) { m <- dim(X)[2] X <- X[N,] E <- E[N] } else m <- length(X) # Nu er en X et array if ( is(sl, "matrix") ) { sl <- sl[N,] } step <- 2*pi/m angle <- seq(from=0, to=2*pi-step, by=step) dots = list(...) if ( missing(xlim) ) xlim <- ylim <- max(1,E) * (c(-max(X), max(X))+.01) * 1.1 plot( c(X * cos(angle), X[1]), c(X * sin(angle),0), type="l", xlim=xlim, ylim=ylim) lines( c((E * X-sl) * cos(angle), E*X[1]-sl[1]), c((E * X-sl) * sin(angle),0)) # Polaere linjer for hver vare segments(rep(0,m),rep(0,m), xlim[2]*cos(angle), xlim[2]*sin(angle), col="darkgray") text(xlim[2]*cos(angle)*1.05, xlim[2]*sin(angle)*1.05, 1:m) #, col="darkgray") if ( GRID ) { grid(col="darkgray") box(col="grey") } if ( is(txt, "logical") && txt ) { if ( !is(X, "matrix") ) { if ( !is.null(rownames(X)) ) { txt <- rownames(X) } else { txt <- 1:dim(X)[1] } } else { txt <- 1:length(X) } } if ( !is(txt, "logical") && length(txt) > 0 ) { # Evt. tekst paa punkter saettes lidt nede til hoejre text(X,X,txt,adj=c(-.75,.75),cex=fex) } } # dea.web
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/dea.web.R
# $Id: deaUtil.R 263 2024-03-13 14:04:04Z larso $ # Naesten alle funktioner transponerer lambda i forhold til normal. # Normal er lambda K x Kr, men i naesten alle funktioner laves den # til en Kr x K matrix; uvist af hvilken grund efficiencies <- function( object, ... ) { UseMethod( "efficiencies" ) } eff <- function( object, ... ) { UseMethod( "efficiencies" ) } eff.add <- function( object, ... ) { UseMethod( "efficiencies" ) } # default method efficiencies.default <- function( object, ... ) { return( object$eff ) } eff.default <- function( object, ... ) { return( object$eff ) } efficiencies.Farrell <- function(object, type="Farrell", ...) { # Returnerer efficencer som et array if ( type == "Farrell" ) return(object$eff) else if ( type == "Shephard" ) return(1/object$eff) else warning("Unknown type:", type) # e <- as.matrix(object$eff) # if ( object$ORIENTATION == "in" ) { # colnames(e) <- "E" # } else if ( object$ORIENTATION == "out" ) { # colnames(e) <- "F" # } else if ( object$ORIENTATION == "graph" ) { # colnames(e) <- "G" # } # if ( !is.null(names(object$eff)) ) { # rownames(e) <- names(object$eff) # } # if ( object$TRANSPOSE == TRUE ) { # e <- t(e) # } # return(e) } ## efficiency eff.Farrell <- function(object, type="Farrell", ...) { return( efficiencies.Farrell(object, type, ...) ) } print.Farrell <- function(x, digits=4, ...) { # a <- cbind("Efficiens"=x$eff) a <- x$eff # a <- x@eff print(a, digits=digits, ...) invisible(a) } ## print.Farrell summary.Farrell <- function(object, digits=4, ...) { eps <- 1e-7 eff <- object$eff cat("Summary of ", ifelse(is.null(object$direct),"","directional "), "efficiencies\n", sep="") cat(toupper(object$RTS)," technology and ",object$ORIENTATION, "put orientated efficiency\n", sep="") if ( sum(is.infinite(eff)) | sum(is.nan(eff)) ) { cat("Number of firms with infinite efficiency are ", sum(is.infinite(eff)),"; removed below\n", sep="") eff <- eff[is.finite(eff)] } if ( is.null(object$direct) ) cat("Number of firms with efficiency==1 are", sum(abs(eff-1) < eps, na.rm=TRUE), "out of", length(eff), "\nMean efficiency:", format(mean(eff, na.rm=TRUE),digit=3), "\n---" ) if ( object$ORIENTATION!="out" && is.null(object$direct) ) { # Orientering er in eller graph minE <- min(eff, na.rm=TRUE) minE <- floor( 10 * minE ) / 10 dec <- seq(from=minE, to=1, by=.1) Estr <- "<= E <" Eeff <- " E ==1 " n <- length(dec) estr <- rep(NA,n) for ( i in 1:(n-1) ) estr[i] <- paste(dec[i],Estr,dec[i+1]," ",sep="") estr[n-1] <- paste(estr[n-1]," ") estr[n] <- Eeff antal <- rep(NA,n) for ( i in 1:(n-1) ) antal[i] <- sum(dec[i]-eps <= eff & eff < dec[i+1]-eps, na.rm=TRUE) antal[n] <- sum(abs(eff-1) < eps, na.rm=TRUE) } else if ( is.null(object$direct) ) { # Orientering er out maxF <- max(eff, na.rm=TRUE) maxF <- ceiling( 10 * maxF ) / 10 dec <- seq(from=1, to=maxF, by=.1) Estr <- "< F =<" Eeff <- "F ==1 " n <- length(dec) if ( n > 10 ) { dec_ <- c(1,1.1,1.2,1.3,1.5,2.0,5.0,10.0,100.0,Inf) n <- length(dec_) while ( n>1 && dec_[n-1] > maxF ) n <- n - 1 dec <- dec_[1:n] } estr <- rep(NA,n) estr[1] <- paste(" ",Eeff) for ( i in 2:n ) estr[i] <- paste(format(dec[i-1],digits=2,width=3),Estr, format(dec[i],digits=3,width=3)," ",sep="") his <- hist(object$eff, breaks=dec, plot=FALSE) antal <- his$counts # Foerste gruppe skal vaere eff==1; fra dist er foerte gruppe eff mellem 1 og 1.1 antal[1] <- antal[1] - sum(abs(eff-1) < eps, na.rm=TRUE) antal <- c(sum(abs(eff-1) < eps, na.rm=TRUE), antal) } else { # directional er det saa cat("Number of firms with directional efficiency==0 are", sum(abs(eff) < eps, na.rm=TRUE), "\nMean efficiency:", format(mean(eff, na.rm=TRUE),digit=3), "\n---" ) his <- hist(object$eff, breaks=7, plot=FALSE) antal <- his$counts antal[1] <- antal[1] - sum( abs(eff) < eps , na.rm=TRUE) antal <- c(sum( abs(eff) < eps , na.rm=TRUE), antal) dec <- his$breaks Estr <- "< D =<" Eeff <- "D ==0 " estr <- rep(NA,length(his$counts)+1) estr[1] <- paste(" ",Eeff) for ( i in 1:length(his$counts) ) { estr[1+i] <- paste(format(dec[i],digits=2,width=3),Estr, format(dec[i+1],digits=3,width=3)," ",sep="") } } andel <- antal/sum(!is.na(eff)) a <- cbind(antal , 100*andel) dimnames(a) <- list(" Eff range"=estr,c( "#", "%")) print(a,digits=c(2,3),quote=F,...) print(summary(eff)) # if ( SLACK & !is.null(object$slack) ) { # sl <- object # class(sl) <- "slack" # summary(sl) # } invisible(object) } ## summary.Farrell # returns peers, i.e. numbers for units with positive lambda, # efficient units to be compared to peers <- function(object, NAMES=FALSE, N=1:dim(object$lambda)[1], LAMBDA=0) { # if ( object$TRANSPOSE ) { # print("Colnames i lambda") # print(colnames(object$lambda)) # } else { # print("Rownames i lambda") # print(rownames(object$lambda)) # } if ( !is(object, "Farrell") && !is(object, "slack") ) stop("'Object' is not of class 'Farrell' (or 'slack');", " you might have used FAST=TRUE in 'dea'.") if ( object$TRANSPOSE ) { # Gem kun de raekker/firms i lambda, der skal laves peers for lam <- t(object$lambda[,N,drop=FALSE]) } else { lam <- object$lambda[N,,drop=FALSE] #'lam' er NxKr matrix } # Fjern foranstillet L_ eller L i soejlenavne for lambda if ( all("L_" == substr(colnames(lam),1,2)) ) { colnames(lam) <- substring(colnames(lam),3) } if ( all("L" == substr(colnames(lam),1,1)) ) { colnames(lam) <- substring(colnames(lam),2) } # 'lam' er N x Kr matrix # Liste, for hver firm et array af peers pt_ <- apply(lam, 1, function(x){which(x>LAMBDA)}) if ( dim(lam)[1] == 1 ) { # Kun een firm; problem at 'pt_' bliver vektor, men # skal vaere liste derfor denne omskrivning pt_ <- list(c(pt_)) } # Lav liste om til matrix og transponer den saa firms er raekker # Bemaerk pt_ er indeks er bench ogsaa indeks i reference matrix, # og ikke navne. # Herunder er 'sapply(pt_, length)' antal positive i hver raekke bench <- t(mapply(function(x) x[1:max(sapply(pt_, length))], pt_)) # Hvis der kun er en peer for hver, bliver 'bench' et array, # en raekke-vektor, og ikke en soejle-vektor; derfor saettes # dim eksplicit. # Foerst finder vi det stoerste antal peers maxp <- max(sapply(pt_, length)) # Hvis er ingen loesning er paa LP problemet er lambda's elementer # alle NA, og saa skal peers vaere NA, dvs. mindst een peer. if ( maxp==0 | is.na(maxp) ) maxp <- 1 dim(bench) <- c(dim(lam)[1], maxp) rownames(bench) <- rownames(lam) # Skal der navne i matricen bench med peers i steder for blot indeks if ( is.logical(NAMES) && NAMES & (!is.null(colnames(lam)) || !is.null(names(object$eff))) ) { # der skal navne, og enten er der names paa lambda eller paa eff. bench_ <- matrix(colnames(lam)[bench], nrow=dim(bench)[1]) rownames(bench_) <- rownames(bench) bench <- bench_ } else if (is(NAMES, "character") | is(NAMES, "integer") & length(NAMES)==dim(bench)[1]) { # NAMES er et array med navne der bruges, dvs. character ell. # integer array bench_ <- matrix(NAMES[bench], nrow=dim(bench)[1]) rownames(bench_) <- rownames(bench) bench <- bench_ } if ( object$TRANSPOSE ) { bench <- t(bench) } colnames(bench) <- paste("peer",1:dim(bench)[2],sep="") return(bench) } ## peers # For each unit return lambda-values for peers get.peers.lambda <- function(object, N=1:dim(object$lambda)[1], LAMBDA=0) { lambda <- object$lambda if (object$TRANSPOSE) { lambda <- t(lambda) } if ( is.null(rownames(lambda)) ) { colnames(lambda) <- rownames(lambda) <- 1:dim(lambda)[1] } bench <- apply(lambda[N,,drop=FALSE], 1, function(x) {x[x>LAMBDA]}) if (0) { bench <- array(NA,dim=c(2,dim(lambda)[2],dim(lambda)[1])) maxj = 0 # Stoerste antal peers for en unit for ( i in 1:dim(lambda)[1] ) { # For hver unit j = 0 # hvor mange peers for unit 'i' for ( h in 1:dim(lambda)[2] ) { # For hver reference unit if ( lambda[i,h] > 0 ) { j = j+1 # Har fundet en peer bench[1,j,i] = h bench[2,j,i] = lambda[i,h] } maxj = max(maxj,j) } } bench <- bench[,1:maxj,] } ## if (0) return(bench) } ## get.peers.lambda print.peers <- function(x, ...) { a <- peers(x) print(a,...) invisible(a) } ## print.peers get.number.peers <- function(object, NAMES=FALSE, N=1:dim(object$lambda)[2], LAMBDA=0) { # For hver peer get antal units den er peer for if ( object$TRANSPOSE ) { lam <- t(object$lambda) } else { lam <- object$lambda } # Kun de raekker af lambda som svarer til de oenskede peers lam <- lam[,N,drop=FALSE] # Fjern foranstillet L i soejlenavne for lambda # if ( "L" %in% substr(rownames(lam),1,1) ) { # rownames(lam) <- substring(rownames(lam),2) # } # rownames er overfloedige da de fremgaar af foerste soejle, index er # ofte lettere for at kunne udpege raekker # rownames(lam) <- NULL if ( all("L_" == substr(colnames(lam),1,2)) ) { colnames(lam) <- substring(colnames(lam),3) } # Hvem er overhovedet peer for firms peer <- which(colSums(lam, na.rm=TRUE)>LAMBDA) # names(peer) <- NULL # Find hvor mange 'peer' er forbillede for count <- colSums(lam[,peer,drop=FALSE]>LAMBDA, na.rm=TRUE) np <- data.frame(peer,count) # if (NAMES) rownames(np) <- colnames(lam)[peer] if (NAMES) np$peer <- colnames(lam)[np$peer] return(np) } # get.number.peers get.which.peers <- function(object, N=1:dim(object$lambda)[2], LAMBDA=0 ) { # Hvilke units en peer er peer for # Problem: Hvis N ikke er blandt peers gives en R fejl. # Jeg har ikke fundet en maade at teste for dette for at undgaa # R fejlen. if ( object$TRANSPOSE ) { lam <- t(object$lambda) } else { lam <- object$lambda } # Fjern foranstillet L_ eller L i soejlenavne for lambda if ( all("L_" == substr(colnames(lam),1,2)) ) { colnames(lam) <- substring(colnames(lam),3) } # Nu er lambda en K x Kr matrix # Er peer for en unit, naar units lambda er positivt, # positivt element i soejlen for N p <- apply(lam[,N,drop=FALSE] > LAMBDA, 2, which) p0 <- p[lapply(p,length) > 0] if ( length(p0) > 0 ) return(p0) else return(NULL) } # get.which.peers lambda.print <- function(x, KEEPREF=FALSE, ...) { if ( x$TRANSPOSE ) { lam <- t(x$lambda) } else { lam <- x$lambda } # print(class(lam)) if (!KEEPREF && dim(lam)[1]>1 ) { lam <- lam[,rowSums(as.matrix(lam))>0] } xx <- round(unclass(lam)*1000)/1000 if (any(ina <- is.na(lam))) xx[ina] <- "" if ( any(i0 <- !ina & abs(lam) < 1e-5) ) xx[i0] <- sub("0.0000", ".", xx[i0]) if ( x$TRANSPOSE ) xx <- t(as.matrix(xx)) print(xx, quote=FALSE, rigth=TRUE, ...) invisible(xx) # printSpMatrix(Matrix(lam),digits=4, col.names=T,...) # invisible(lam) } ## print.lambda lambda <- function(object, KEEPREF=FALSE) { if ( object$TRANSPOSE ) { lam <- object$lambda } else { lam <- t(object$lambda) } if (!KEEPREF && dim(lam)[2]>1 ) { lam <- lam[rowSums(lam, na.rm=TRUE)>0,,drop=FALSE] } else if (!KEEPREF && dim(lam)[2]==1 ) { lam <- lam[lam>0,,drop=FALSE] } if ( !object$TRANSPOSE ) lam <- t(lam) return(lam) } # Calculate excess input or output excess <- function(object, X=NULL, Y=NULL) { if ( !is(object, "Farrell") ) stop("Only works for object of class/type 'Farrell'", " as output from dea and like functions") if ( is.null(object$direct) && is.null(X) && is.null(Y) ) stop("Either X or Y is needed in the arguments for", " objects with no direction") e <- object$objval if ( is.null(object$direct) ) { # no direction, must be Farrell so direction is set by X or Y if ( object$ORIENTATION == "in" && !is.null(X) ) ex <- X * (1-e) else if ( object$ORIENTATION == "out" && !is.null(Y) ) ex <- Y * (e-1) else if ( object$ORIENTATION == "graph"&& !is.null(X)&& !is.null(Y) ) ex <- cbind((1-e)*X, (1/e-1)*Y ) else # ( is.null(dir) ) stop("X/Y missing for ORIENTATION = ", object$ORIENTATION ) } else { if ( is(object$direct, "matrix") ) { ex <- apply(object$direct,2,"*",e) } else { dir <- matrix(object$direct, nrow=length(e), ncol=length(object$direct), byrow=TRUE ) ex <- e * dir } } # Afrund til 0 hvis ex er naer 0 eps <- sqrt(.Machine$double.eps) ex[abs(ex)<eps] <- 0 return(ex) } # excess eladder2 <- function(n, X, Y, RTS="vrs", ORIENTATION="in", XREF=NULL, YREF=NULL, DIRECT=NULL, param=NULL, MAXELAD=NULL) { if ( is.null(XREF) ) { XREF <- X } if ( is.null(YREF) ) { YREF <- Y } idx <- NULL if ( missing(MAXELAD) || is.null(MAXELAD) ) { MAXELAD <- dim(XREF)[1] } else { if( !is.numeric(MAXELAD) ) stop("MAXELAD must be an integer") MAXELAD <- min(abs(MAXELAD), dim(XREF)[1]) } elad <- rep(NA, MAXELAD) for ( i in 1:MAXELAD ) { # if (LP) print(paste("===> ",i),quote=FALSE) # if (LP) print("FRONT.IDX") # if (LP) print(idx) # print( length(idx) == dim(X)[1] ) if ( length(idx) == MAXELAD ) { break } # Brug FRONT.IDX for at kunne bruge de oprindelige indeks i X og Y e <- dea(X[n,,drop=F],Y[n,,drop=F], RTS=RTS, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=idx, DIRECT=DIRECT, param=param, LP=FALSE) # if (LP) print(paste("Eff =",eff(e)), quote=FALSE) # if (LP) print(paste("Peers =",peers(e)), quote=FALSE) # if (LP) print(lambda(e)) # Er der nogen peers overhovedet ellers kan vi bare slutte nu if ( is.na(eff(e)) ) break if ( abs(eff(e)) == Inf ) break if ( is.na(peers(e)[1]) ) break elad[i] <- e$eff # Array nr. for den stoerste vaerdi af lambda # Bruger kun den foerste hvis der er flere p <- which.max(e$lambda) # firm number for array number p, firm numbers follow L in the colnames str <- substring(colnames(e$lambda)[p],2) # 'ip' er indeksnr. for peer lavet ud fra søjlenavnet suppressWarnings(ip <- as.integer(str)) # nok en firm der ikke laengere skal indgaa i referenceteknologien if ( is.na(ip) ) { # Søjlenavn mangler i lambda eller er ikke et nummer så ip er NA, # derfor forsøges de fundet ud fra søjlenavn i XREF. # Det er et navn/en streng saa den skal laves om til et indeks, # et heltal, for elleres kan den ikke bruges som indeks til FRONT.IDX str0 <- substring(str,2) # if (LP) print(str0) navne <- rownames(XREF) # if (LP) print(navne) if ( is.null(navne) ) navne <- rownames(YREF) # Find indeks for placering af str0 i rownames(XREF)/rownames(YREF) ip <- which( navne %in% str0 ) } # saa er ip et tal idx <- c(idx,-ip) if (eff(e) > 1) break } ## elad <- c(na.omit(elad)) # Er dette ikke overflødigt? Der er test for is.na(eff(e)) # Nej for elad er initialiseret til bar NA'er if ( is.null(idx) ) { idx <- NA } else { idx <- -idx } return(list(eff=elad, peer=idx, lastp=peers(e))) } ## eladder eladder.plot <- function(elad, peer, TRIM=NULL, xlab="Most influential peers", ylab="Efficiency", ...) { if ( all(is.na(elad)) ) stop("All values of first argument are NA") if ( !is.null(TRIM) & !is.numeric(TRIM) ) stop("TRIM must be an integer") if ( is.null(TRIM) ) { TRIM <- 0 for ( i in 1:length(peer) ) { TRIM <- max(TRIM, nchar(toString(peer[i]))) } } linje <- ifelse(TRIM==1,2,TRIM^(1/1.3)) opar <- par(mar=c(linje+2,4.1,4.1,2.1)) plot(elad, xaxt="n", xlab="", ylab=ylab, ...) mtext(xlab, side=1, line=linje+.5) if ( is(peer, "character") || is(peer, "factor") ) { axis(1, at=1:length(peer), labels=strtrim(peer,TRIM), las=ifelse(TRIM>1,2,0) ) } else { axis(1, at=1:length(peer), labels=peer, las=ifelse(TRIM>1,2,0) ) } abline(v=which(elad==1), lty=3) abline(h=1, lty=3) par(opar) } ## eladder.plot # Funktion til at droppe en eller flere units fra et # Farrrell objekt #dropUnit <- function(E, dmu) { # # Det foerste element er typisk eff og kan give # # antal units. # K <- length(E[[1]]) # for (n in 1:length(names(E))) { # if ( class(E[[n]]) == "matrix" ) # E[[n]] <- E[[n]][-dmu,,drop=FALSE] # else if ( class(E[[n]]) == "numeric" ) # E[[n]] <- E[[n]][-dmu] # } # return(E) #}
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/deaUtil.R
# $Id: eff.dens.R 244 2022-05-05 14:31:31Z X052717 $ # Plot af taethed for efficiencer, bruger spejlingsprcincip eff.dens <- function(eff, bw="nrd0") { if ( is(eff, "Farrell") ) E <- eff$eff else E <- eff if ( max(E) <= 1 ) orient <- "in" else if ( min(E) >= 1 ) orient <- "out" else stop("Efficiencies should be on one side of 1 only, not on both sides") # Reflection around the boundary 1 refl <- c(E,2-E) sr <- sort(refl) if ( orient=="in" ) { den <- density(sr, bw=bw, from=0, to=1, na.rm=TRUE) } else { den <- density(sr, bw=bw, from=1, na.rm=TRUE) } x <- den$x y <- 2*den$y eden <- list(x=x, y=y, bw=den$bw, n=den$n, data.name=den$data.name) class(eden) <- "density" return (eden) } # eff.dens eff.dens.plot <- function(obj, bw="nrd0", ..., xlim, ylim, xlab, ylab) { if ( !is(obj, "list") ) { o_ <- eff.dens(obj, bw=bw) obj <- o_ } if ( missing(xlim) ) { if ( min(obj$x) < 1 ) { xl <- min(obj$x) xlim <- c(xl,1) } else { xr <- max(obj$x) xlim <- c(1,xr) } } if ( missing(ylim) ) { ylim <- c(0, max(obj$y)) } if ( missing(xlab) ) xlab <- "Efficiency" if ( missing(ylab) ) ylab <- "Density" plot(obj$x, obj$y, type="l",xlim=xlim,ylim=ylim,ylab=ylab,xlab=xlab, frame=FALSE,...) } # eff.dens.plot
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/eff.dens.R
# $Id: eladder.R 258 2023-08-03 12:27:03Z larso $ # Dropper succesivt den peer der giver størst stigning i eff.tallet. eladder <- function(n, X, Y, RTS="vrs", ORIENTATION="in", XREF=NULL, YREF=NULL, DIRECT=NULL, param=NULL, MAXELAD=NULL) { if ( is.null(XREF) ) { XREF <- X } if ( is.null(YREF) ) { YREF <- Y } idx <- NULL if ( missing(MAXELAD) || is.null(MAXELAD) ) { MAXELAD <- dim(XREF)[1] } else { if( !is.numeric(MAXELAD) ) stop("MAXELAD must be an integer") MAXELAD <- min(abs(MAXELAD), dim(XREF)[1]) } Xn <- X[n,,drop=FALSE] Yn <- Y[n,,drop=FALSE] # 'navne' bruges til at kunne referere til units i den oprindelige XREF # Efterhaanden som peers udgaar, svarer indeks i XREF ikke til de brugte # XREF via FRONT.IDX. Derfor bruges 'navne' til de oprindelige indeks. navne <- rownames(XREF) if ( is.null(navne) ) navne <- rownames(YREF) if ( is.null(navne) ) { # Hvis der ikke er nogen raekkenavne laves en loebende raekkenr. navne <- 1:dim(XREF)[1] } elad <- rep(NA, MAXELAD) for ( i in 1:MAXELAD ) { if ( length(idx) == MAXELAD ) { break } # Brug FRONT.IDX for at kunne bruge de oprindelige indeks i X og Y ## if (LP) cat("**", i, ": idx = ", paste(idx, collapse=", "), "\n", sep="") e <- dea(Xn, Yn, RTS=RTS, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=idx, DIRECT=DIRECT, param=param, LP=FALSE) # Er der nogen peers overhovedet ellers kan vi bare slutte nu if ( is.na(eff(e)) ) break if ( abs(eff(e)) == Inf ) break if ( is.na(peers(e)[1]) ) break # Gem den relevante eff.værdi elad[i] <- eff(e) # Find alle peers og find så den mest betydningsfulde fb <- peers(e) # 'fb' er indeks i XREF[idx,] og skal laves om til indeks i XREF ## if (LP) print(fb) efb <- rep(NA, length(fb)) ip <- 0 for ( f in fb ) { # 'fb' og dermed 'f' er indeks i XREF[idx,] og skal laves om til indeks i XREF if (is.null(idx)) { str <- navne[f] } else { str <- navne[idx][f] } ff <- which( navne %in% str ) e <- dea(Xn, Yn, RTS=RTS, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=c(idx, -ff), DIRECT=DIRECT, FAST=TRUE, param=param, LP=FALSE) ## if (LP) cat(i, ": str = ", str, ", f = ", f, ", ff = ", ff, "; e = ", e, "\n", sep="") ip <- ip + 1 efb[ip] <- e } # Peer med største effekt på eff.tal ip <- which.max(efb) # Find indeks for placering af fb[ip] i rownames(XREF)/rownames(YREF) if (is.null(idx)) { ipp <- which( navne %in% navne[fb[ip]] ) } else { ipp <- which( navne %in% navne[idx][fb[ip]] ) } idx <- c(idx, -ipp) ## if (LP) cat("*", i, ": ipp = ", ipp, ", f = ", fb[ip], "; e = ", efb[ip], "; idx = ", paste(idx, collapse=", "), "\n", sep="") # Hvis eff er over 1 så stopper videre beregninger; ville de give mening? if (efb[ip] > 1) break } elad <- elad[!is.na(elad)] e <- dea(Xn, Yn, RTS=RTS, ORIENTATION=ORIENTATION, XREF=XREF, YREF=YREF, FRONT.IDX=idx, DIRECT=DIRECT, param=param, LP=FALSE) if ( is.null(idx) ) { idx <- NA } else { idx <- -idx } return(list(eff=elad, peer=idx, lastp=peers(e))) } ## eladder
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/eladder.R
# $Id: fdh.R 207 2019-12-16 20:14:51Z lao $ # FDH efficiency uden brug af LP. # Der er ingen kontrol af argumenter, den taenkes at blive kaldt fra dea.R # der udfoerer alle kontroller fdh <- function(X,Y, ORIENTATION="in", XREF=NULL, YREF=NULL, FRONT.IDX=NULL, DIRECT=NULL, TRANSPOSE=FALSE, oKr=0) { if ( ORIENTATION=="graph" ) stop("ORIENTATION==\"graph\" does not work for fdh,", "use dea( ...,RTS=\"fdh\", ... ) ") if ( missing(XREF) || is.null(XREF) ) { XREF <- X } if ( missing(YREF) || is.null(YREF) ) { YREF <- Y } if ( TRANSPOSE ) { X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } orgKr <- dim(XREF) if ( length(FRONT.IDX) > 0 && oKr==dim(XREF)[1] ) { # Brug kun FRONT.IDX hvis den ikke allerede er brugt og saa vil # oKr vaare forskellig fra dim(XREF)[1] if ( !is.vector(FRONT.IDX) ) stop("FRONT.IDX is not a vector in 'dea'") XREF <- XREF[FRONT.IDX,, drop=FALSE] YREF <- YREF[FRONT.IDX,, drop=FALSE] } rNames <- rownames(XREF) if ( is.null(rNames) & !is.null(colnames(YREF)) ) rNames <- rownames(YREF) K <- dim(X)[1] m <- dim(X)[2] n <- dim(Y)[2] Kr <- dim(XREF)[1] # Saat kun oKr hvis den ikke allerede er sat i kaldet af fdh if (oKr==0) oKr <- orgKr[1] eff <- rep(NA, K) peer <- rep(NA, K) # Directional efficiency if ( !is.null(DIRECT) ) { if ( is(DIRECT, "matrix") && dim(DIRECT)[1] > 1 ) { if ( ORIENTATION=="in" ) { dirX <- DIRECT # matrix(DIRECT, nrow=K, ncol=m) # dirY <- matrix(.Machine$double.xmin, nrow=K, ncol=n) dirY <- matrix(NA, nrow=K, ncol=n) } else if ( ORIENTATION=="out" ) { # dirX <- matrix(.Machine$double.xmin, nrow=K, ncol=m) dirX <- matrix(NA, nrow=K, ncol=m) dirY <- DIRECT # matrix(DIRECT, nrow=K, ncol=n) } else if ( ORIENTATION=="in-out" ) { dirX <- DIRECT[,1:m,drop=FALSE] # matrix(DIRECT[,1:m], nrow=K, ncol=m) dirY <- DIRECT[,(m+1):(m+n), drop=FALSE] # matrix(DIRECT[,(m+1):(m+n)], nrow=K, ncol=n) } } else { # Her er DIRECT en vektor og derfor ens for alle firms, dvs. # alle raekker skal vaere ens if ( ORIENTATION=="in" ) { dirX <- matrix(DIRECT, nrow=K, ncol=m, byrow=T) # dirY <- matrix(.Machine$double.xmin, nrow=K, ncol=n) dirY <- matrix(NA, nrow=K, ncol=n) } else if ( ORIENTATION=="out" ) { # dirX <- matrix(.Machine$double.xmin, nrow=K, ncol=m) dirX <- matrix(NA, nrow=K, ncol=m) dirY <- matrix(DIRECT, nrow=K, ncol=n, byrow=T) } else if ( ORIENTATION=="in-out" ) { dirX <- matrix(DIRECT[1:m], nrow=K, ncol=m, byrow=T) dirY <- matrix(DIRECT[(m+1):(m+n)], nrow=K, ncol=n, byrow=T) } } eps <- 1e-7 for ( k in 1:K ) { # For each firm find max(XREF/X) over inputs (rows) # Se kun for de firmaer der dominerer firm k i den retning der # ikke aendres # xk <- NULL # matrix(X[k,,drop=FALSE], nrow=Kr, ncol=m, byrow=TRUE) # yk <- NULL # matrix(Y[k,,drop=FALSE], nrow=Kr, ncol=n, byrow=TRUE) xk <- matrix(X[k,], nrow=Kr, ncol=m, byrow=TRUE) yk <- matrix(Y[k,], nrow=Kr, ncol=n, byrow=TRUE) if ( ORIENTATION=="in" ) { idx <- rowSums( (xk >= XREF-eps & dirX[k,]==0) | abs(dirX[k,])>0+eps) & rowSums(yk <= YREF+eps) == n } else if ( ORIENTATION=="out" ) { idx <- rowSums(xk >= XREF) == m & rowSums( (yk <= YREF+eps & dirY[k,]==0) | abs(dirY[k,])>0+eps) } else if ( ORIENTATION=="in-out" ) { idx <- rowSums( (xk >= XREF-eps & dirX[k,]==0) | abs(dirX[k,])>0+eps) & rowSums( (yk <= YREF+eps & dirY[k,]==0) | abs(dirY[k,])>0+eps) } if ( is.null(xk) ) xk <- matrix(X[k,,drop=FALSE], nrow=sum(idx,na.rm=TRUE), ncol=m, byrow=TRUE) else xk <- xk[idx,,drop=FALSE] if ( is.null(yk) ) yk <- matrix(Y[k,,drop=FALSE], nrow=sum(idx,na.rm=TRUE), ncol=n, byrow=TRUE) else yk <- yk[idx,,drop=FALSE] allDir <- cbind( (xk-XREF[idx,])/dirX[rep(k,sum(idx)),], (YREF[idx,]-yk)/dirY[rep(k,sum(idx)),]) minDir <- apply(allDir,1,min, na.rm=TRUE) # eff[k] <- max(minDir) tryCatch ( eff[k] <- max(minDir, na.rm=TRUE), warning = function(w) NULL, finaly = (eff[k] <- Inf) ) # der er kun gjort plads til een peer per firm if ( !is.na(eff[k]) && abs(eff[k]) < Inf ) { peer[k] <- (1:Kr)[idx][which.max(minDir)] } else { peer[k] <- NA } # peer[k] <- (1:Kr)[idx][which.max(minDir)] } # we only need lambda to be able to call peers() to get peers. lam <- matrix(0, nrow=K, ncol=Kr) rownames(lam) <- rownames(X) colnames(lam) <- rNames for (k in 1:K) { lam[k, peer[k]] <- 1 } if ( is.null(rownames(lam)) ) { if ( length(FRONT.IDX)>0 ) { colnames(lam) <- paste("L",(1:oKr)[FRONT.IDX],sep="") } else { colnames(lam) <- paste("L",1:Kr,sep="") } } else { colnames(lam) <- paste("L",rNames,sep="_") } e <- list(eff=eff, objval=eff, peers=peer, lambda=lam, RTS="fdh", direct=DIRECT, ORIENTATION=ORIENTATION, TRANSPOSE=FALSE) class(e) <- "Farrell" return(e) } # if !is.null(DIRECT) # Find efficiency when compared to each dominating firm; the actual # efficiency is then then min/max over the dominating firms. if ( ORIENTATION=="in" ) { for ( k in 1:K ) { # For each firm find max(XREF/X) over inputs (rows) yk <- matrix(Y[k,], nrow=Kr, ncol=n, byrow=TRUE) if ( dim(YREF)[1] > 0 ) idx <- rowSums(yk <= YREF) == n else idx <- 0 if ( sum(idx) == 0 ) { # Der er ingen loesning, eff er NA eff[k] <- Inf peer[k] <- NA next } maxIn <- apply( XREF[idx,,drop=FALSE] / matrix(X[k,], nrow=sum(idx), ncol=m, byrow=TRUE) , 1, max ) eff[k] <- min(maxIn) # der er kun gjort plads til een peer peer[k] <- (1:Kr)[idx][which.min(maxIn)] } } else { for ( k in 1:K ) { xk <- matrix(X[k,], nrow=Kr, ncol=m, byrow=TRUE) idx <- rowSums(xk >= XREF) == m if ( sum(idx) == 0 ) { # Der er ingen loesning, eff er NA eff[k] <- -Inf peer[k] <- NA next } minOut <- apply( YREF[idx,,drop=FALSE] / matrix(Y[k,,drop=FALSE], nrow=sum(idx), ncol=n, byrow=TRUE) , 1, min) eff[k] <- max(minOut) peer[k] <- (1:Kr)[idx][which.max(minOut)] } } # we only need lambda to be able to call peers() to get peers. lam <- matrix(0, nrow=K, ncol=Kr) rownames(lam) <- rownames(X) colnames(lam) <- rNames for (k in 1:K) { # print(peer[k]) if ( !is.na(peer[k]) ) lam[k, peer[k]] <- 1 } if ( is.null(rownames(lam)) ) { if ( length(FRONT.IDX)>0 ) { colnames(lam) <- paste("L",(1:oKr)[FRONT.IDX],sep="") } else { colnames(lam) <- paste("L",1:Kr,sep="") } } else { colnames(lam) <- paste("L",rNames,sep="_") } # print(lam) e <- list(eff=eff, objval=eff, peers=peer, lambda=lam, RTS="fdh", ORIENTATION=ORIENTATION, TRANSPOSE=FALSE) class(e) <- "Farrell" return(e) } # fdh function
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/fdh.R
# $Id: fdhPlus.R 218 2020-05-21 21:28:28Z lao $ # FDH+. Beregn foerst CRS efficiency med kun en mulig peer; se om # lambda ligger inden for graenserne; hvis er den goer er alt ok, # ellers saet lambda svarende til den overtradte graense og beregn # efficiency svarende. dea.fdhPlus <- function(X, Y, ORIENTATION="in", XREF=NULL, YREF=NULL, FRONT.IDX=NULL, DIRECT=NULL, param=0.15, TRANSPOSE=FALSE, oKr=0) { orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ",ORIENTATION) } if ( ORIENTATION=="graph" ) stop("ORIENTATION==\"graph\" does not work for fdh+") if ( missing(XREF) || is.null(XREF) ) { XREF <- X } if ( missing(YREF) || is.null(YREF) ) { YREF <- Y } if ( length(FRONT.IDX) > 0 ) { if ( !is.vector(FRONT.IDX) ) stop("FRONT.IDX is not a vector in 'dea'") XREF <- XREF[,FRONT.IDX, drop=FALSE] YREF <- YREF[,FRONT.IDX, drop=FALSE] } rNames <- colnames(XREF) if ( is.null(rNames) & !is.null(colnames(YREF)) ) rNames <- colnames(YREF) # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } m <- dim(X)[2] # number of inputs n <- dim(Y)[2] # number of outputs K <- dim(X)[1] # number of units, firms, DMUs Kr <- dim(XREF)[1] # number of units,firms in the reference technology # # Find dominating reference firms for each of the Kr reference firms # Dom <- list(NA,Kr) # list of dominating reference firms # for ( i in 1:Kr ) { # # Stoerre for # Dom[[i]] <- which( # rowSums(matrix(XREF[i,,drop=FALSE],nrow=Kr,ncol=m,byrow=TRUE) > # XREF) == m # & # rowSums(matrix(YREF[i,,drop=FALSE],nrow=Kr,ncol=n,byrow=TRUE) < # YREF) == n # ) # } E <- rep(NA,K) peer <- rep(NA, K) lambda <- matrix(0, nrow=K, ncol=Kr) lamR <- matrix(NA, nrow=K, ncol=Kr) # Directional efficiency if ( !is.null(DIRECT) ) { stop("Directional efficiency does not yet work for RTS='fdh+'") if ( is(DIRECT, "matrix") && dim(DIRECT)[1] > 1 ) { if ( ORIENTATION=="in" ) { dirX <- DIRECT # matrix(DIRECT, nrow=K, ncol=m) # dirY <- matrix(.Machine$double.xmin, nrow=K, ncol=n) dirY <- matrix(NA, nrow=K, ncol=n) } else if ( ORIENTATION=="out" ) { # dirX <- matrix(.Machine$double.xmin, nrow=K, ncol=m) dirX <- matrix(NA, nrow=K, ncol=m) dirY <- DIRECT # matrix(DIRECT, nrow=K, ncol=n) } else if ( ORIENTATION=="in-out" ) { dirX <- DIRECT[,1:m,drop=FALSE] # matrix(DIRECT[,1:m], nrow=K, ncol=m) dirY <- DIRECT[,(m+1):(m+n), drop=FALSE] # matrix(DIRECT[,(m+1):(m+n)], nrow=K, ncol=n) } } else { # Her er DIRECT en vektor og derfor ens for alle firms, dvs. # alle raekker skal vaere ens if ( ORIENTATION=="in" ) { dirX <- matrix(DIRECT, nrow=K, ncol=m, byrow=T) # dirY <- matrix(.Machine$double.xmin, nrow=K, ncol=n) dirY <- matrix(NA, nrow=K, ncol=n) } else if ( ORIENTATION=="out" ) { # dirX <- matrix(.Machine$double.xmin, nrow=K, ncol=m) dirX <- matrix(NA, nrow=K, ncol=m) dirY <- matrix(DIRECT, nrow=K, ncol=n, byrow=T) } else if ( ORIENTATION=="in-out" ) { dirX <- matrix(DIRECT[1:m], nrow=K, ncol=m, byrow=T) dirY <- matrix(DIRECT[(m+1):(m+n)], nrow=K, ncol=n, byrow=T) } } for ( k in 1:K ) { # For each firm find max(XREF/X) over inputs (rows) # Se kun for de firmaer der dominerer firm k i den retning der # ikke aendres xk <- NULL # matrix(X[k,,drop=FALSE], nrow=Kr, ncol=m, byrow=TRUE) yk <- NULL # matrix(Y[k,,drop=FALSE], nrow=Kr, ncol=n, byrow=TRUE) if ( ORIENTATION=="in" ) { yk <- matrix(Y[k,], nrow=Kr, ncol=n, byrow=TRUE) idx <- rowSums(yk <= YREF) == n } else if ( ORIENTATION=="out" ) { xk <- matrix(X[k,], nrow=Kr, ncol=m, byrow=TRUE) idx <- rowSums(xk >= XREF) == m } else if ( ORIENTATION=="in-out" ) { xk <- matrix(X[k,], nrow=Kr, ncol=m, byrow=TRUE) yk <- matrix(Y[k,], nrow=Kr, ncol=n, byrow=TRUE) idx <- rowSums(xk >= XREF) == m & rowSums(yk <= YREF) == n } if ( is.null(xk) ) xk <- matrix(X[k,,drop=FALSE], nrow=sum(idx), ncol=m, byrow=TRUE) else xk <- xk[idx,,drop=FALSE] if ( is.null(yk) ) yk <- matrix(Y[k,,drop=FALSE], nrow=sum(idx), ncol=n, byrow=TRUE) else yk <- yk[idx,,drop=FALSE] allDir <- cbind( (xk-XREF[idx,])/dirX[rep(k,sum(idx)),], (YREF[idx,]-yk)/dirY[rep(k,sum(idx)),]) minDir <- apply(allDir,1,min, na.rm=TRUE) eff[k] <- max(minDir) # der er kun gjort plads til een peer per firm peer[k] <- (1:Kr)[idx][which.max(minDir)] } # we only need lambda to be able to call peers() to get peers. lam <- matrix(0, nrow=K, ncol=Kr) for (k in 1:K) { lam[k, peer[k]] <- 1 } if ( !is.null(dimnames(X)[[1]]) ) { names(eff) <- dimnames(X)[[1]] } e <- list(eff=eff, objval=eff, peers=peer, lambda=lam, RTS="fdh", direct=DIRECT, ORIENTATION=ORIENTATION, TRANSPOSE=FALSE) class(e) <- "Farrell" return(e) } # if !is.null(DIRECT) for ( k in 1:K ) { # Loeb hver firm der skal beregnes for, igennem # xk <- X[k,,drop=FALSE] # yk <- Y[k,,drop=FALSE] xk <- X[k,] yk <- Y[k,] ek <- rep(NA,Kr) # For alle mulige reference firms for ( r in 1:Kr ) { xref <- XREF[r,,drop=FALSE] yref <- YREF[r,,drop=FALSE] # Drop this firm as reference if it does not dominate the firm at # question at the ends of the possible lambda interval. # Tager hensyn saa det ogsaa virker med super efficiens if ( ORIENTATION=="in" && yk > high*yref ) next else if ( ORIENTATION=="out" && xk < low*xref ) next etemp <- dea.csrOne(xk,yk, xref, yref, ORIENTATION) #etemp <- dea(xk,yk, RTS="crs", ORIENTATION, XREF=xref, YREF=yref) lamR[k,r] <- etemp$lambda ek[r] <- etemp$eff if ( ORIENTATION=="in" && etemp$lambda < low ) { ek[r] <- max( low*xref/xk ) lamR[k,r] <- low } else if ( ORIENTATION=="out" && etemp$lambda > high ) { ek[r] <- min( high*yref/yk ) lamR[k,r] <- high } } # for r # Lad vaere med at printe warning hvis min/max er Inf/-Inf, # men saet vaerdien direkte i en finaly clause if ( ORIENTATION=="in" ) { tryCatch ( E[k] <- min(ek, na.rm=TRUE), warning = function(w) NULL, finaly = (E[k] <- Inf) ) } else { tryCatch ( E[k] <- max(ek, na.rm=TRUE), warning = function(w) NULL, finaly = (E[k] <- -Inf) ) } # print(ek) if ( !is.na(E[k]) && abs(E[k]) < Inf ) { peer[k] <- which.min(ek) } else { peer[k] <- NA } } # for k for (k in 1:K) { if (!is.na(peer[k])) { lambda[k,peer[k]] <- lamR[k,peer[k]] } } if ( !is.null(dimnames(X)[[1]]) ) { names(E) <- dimnames(X)[[1]] } obj <- list(eff=E, lambda=lambda,RTS="fdh+", lamR=lamR, peers=peer, ORIENTATION=ORIENTATION, TRANSPOSE=FALSE) class(obj) <- "Farrell" return(obj) } # function # dea.csrOne only works for one firm when there is ONE peer for that firm dea.csrOne <- function(X,Y, XREF, YREF, ORIENTATION="in") { # X and Y are vectors of input and outout for ONE firm m <- length(X) n <- length(Y) Kr <- dim(XREF)[1] # alle kombinationer af x og y sae vi kan finde all partielle # produktiviteter. Index ix og iy sae vi blot kan bruge y[iy]/x[ix] ix <- gl(m,1,m*n) iy <- gl(n,m) # Produktivity for each input-output combination yx0 <- Y[iy]/X[ix] ek <- rep(NA,Kr) for ( k in 1:Kr ) { # Find max relative productivity compared to each potential peer yxk <- YREF[k,iy] / XREF[k,ix] ek[k] <- max(yx0/yxk) } if ( ORIENTATION == "in" ) { peer <- which.min(ek) E <- ek[peer] lam <- max(Y/YREF[peer,]) } else { peer <- which.min(1/ek) E <- 1/ek[peer] lam <- min(X/XREF[peer,]) } # print(paste(h,":: E =",E[h],"; peer =", peer[h],"; lambda =",lam[h]),quote=FALSE) obj <- list(eff=E, lambda=lam, peer=peer) return(obj) } # dea.csrOne
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/fdhPlus.R
# $Id: fdhPlus.plot.R 156 2015-07-08 13:34:15Z b002961 $ dea.plot.fdhPlus <- function(x, y, param = 0.15, ...) { # Vaer sikker paa at x og y er matricer x <- matrix(x) y <- matrix(y) # Saet parametrene low og high if ( is.null(param) ) { delta <- .15 low <- 1-delta high <- 1+delta } else { if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } } # Find FDH frontier idx <- sort(x, index.return=TRUE) effektive <- rep(NA, dim(x)[1]) j <- 1 prev <- idx$ix[1] effektive[j] <- prev for ( i in idx$ix ) { if ( y[i] > y[prev] ) { j <- j+1 effektive[j] <- i prev <- i } } # Frontier/rand og antal firms i randen rand <- effektive[!is.na(effektive)] x <- matrix(x[rand]) y <- matrix(y[rand]) # kun (x,y) paa FDH frontier er nu tilbage sx <- sort(x,index.return=TRUE) ix <- sx$ix # crs-linjestykker starter i foerste soejles koordinat og # slutter i 2. soejles x0 <- t(outer(c(low,high),x[ix])) y0 <- t(outer(c(low,high),y[ix])) if (FALSE) { points(x,y,pch=16,col="red") text(x,y,1:dim(x)[1],adj=c(-.25,.75)) segments(x0[,1],y0[,1], x0[,2],y0[,2],col="lightgreen",lwd=1) points(t(x0),t(y0),pch=c(2,0)) } # text(x0[,1],y0[,1],1:dim(x0)[1],adj=c(-.75,.75)) # Det sidst tegnede linjestykke ender i (x_,y_) segments(x0[1,1],0, x0[1,1],y0[1,1] , ...) x_ <- x0[1,1] y_ <- y0[1,1] nx <- dim(x0)[1] for ( i in 1:(nx-1) ) { # Loeb alle de fdh effektive punkter igennem # Ved starten skal punkt vaere start eller punkt paa linjestykke # der skal tegnes # print(i) # if ( i==15 ) break if ( x_ > x0[i,2] ) next; # vi er kommet laengere xk <- x_ yk <- y_ # Foerst ser vi om der er en linje over, dvs om der er en linje der # starter foer den nuvaerende slutter h <- 0 while ( i+h+1<=nx && x0[i+h+1,1] < x0[i,2] ) { # Der er en linje foer det er slut, men er den ogsaa over? h <- h+1 lambda <- (x0[i+h,1]-x0[i,1])/(x0[i,2]-x0[i,1]) xk <- x0[i+h,1] yk <- (1-lambda)*y0[i,1] + lambda*y0[i,2] if ( y0[i+h,1] > yk ) break # Linjen ligger over } if ( h > 0 && y0[i+h,1] > yk ) { # Der var en linje foer slut og den ligger oven over # Er vi allerede forbi det punkt? if ( x_ > xk || y_ > yk ) next segments(x_,y_, xk,yk, ...) segments(xk,yk, x0[i+h,1],y0[i+h,1], ...) x_ <- x0[i+h,1] y_ <- y0[i+h,1] # vi er kommet til start paa ny linje } # Saa maa det vaere en lavere linje; find den forfra fordi den # maaske er sprunget over i soegen efter en hoejere # Den foerste linje maa vaere den lavere vi soeger # Er der en lavere linje? else if ( i+1<=nx && (x0[i+1,1] < x0[i,2] || y0[i+1,1] < y0[i,2]) ) { # der er en linje der starter lavere segments(x_,y_, x0[i,2],y0[i,2], ...) # Der er en lavere linje, find hvor den skal rammes vandret lambda <- (y0[i,2]-y0[i+1,1])/(y0[i+1,2]-y0[i+1,1]) xk <- (1-lambda)*x0[i+1,1] + lambda*x0[i+1,2] yk <- y0[i,2] # Er der en hoejere linje foer xk? h <- 0 while ( i+h<nx && x0[i+h+1,1] < xk ) { h <- h+1 if ( y0[i+h,1] > yk ) break } if ( h > 0 && y0[i+h,1] > yk ) { # der er en linje der starter over segments(x0[i,2],y0[i,2], x0[i+h,1], y0[i,2], ...) segments(x0[i+h,1], y0[i,2], x0[i+h,1], y0[i+h,1], ...) x_ <- x0[i+h,1] y_ <- y0[i+h,1] # Vi er paa starten af en nyt linjesykke } else { # Der maa saa vaere et stykke der starter under. # Find det foerste der starter under lambda <- (y0[i,2]-y0[i+1,1])/(y0[i+1,2]-y0[i+1,1]) xk <- (1-lambda)*x0[i+1,1] + lambda*x0[i+1,2] yk <- y0[i,2] h <- 1 xkmin <- xk while ( i+h+1<=nx ) { # det er ikke til at vide hvor de sidste numre rammer # fra neden h <- h+1 lambda <- (y0[i,2]-y0[i+h,1])/(y0[i+h,2]-y0[i+h,1]) if ( lambda < 0 || lambda > 1 ) break xk <- (1-lambda)*x0[i+h,1] + lambda*x0[i+h,2] if ( xk < xkmin ) { xkmin <- xk } } segments(x0[i,2],y0[i,2], xkmin,yk, ...) x_ <- xkmin y_ <- yk # Vi er paa nyt linjestykke } } else { # Der var ingen linjestykker foer slut saa tegn linjen til slut segments(x_,y_, x0[i,2],y0[i,2], ...) x_ <- x0[i,2] y_ <- y0[i,2] # Find saa en vandret streg if ( i+1 > nx ) break if ( y0[i,2] > y0[i+1,1] ) { # Det naeste punkt starter lavere lambda <- (y0[i,2]-y0[i+1,1])/(y0[i+1,2]-y0[i+1,1]) xk <- (1-lambda)*x0[i+1,1] + lambda*x0[i+1,2] yk <- y0[i,2] segments(x_,y_, xk,yk, ...) x_ <- xk y_ <- yk } else { # saa ma naeste punkt ligge over segments(x_,y_, x0[i+1,1],y_, ...) segments(x0[i+1,1],y_, x0[i+1,1],y0[i+1,1], ...) x_ <- x0[i+1,1] y_ <- y0[i+1,1] } } } segments(x_,y_, x0[nx,2],y0[nx,2], ...) segments(x0[nx,2],y0[nx,2], 2*x0[nx,2],y0[nx,2], ...) # } }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/fdhPlus.plot.R
# $Id: graphEff.R 256 2023-04-26 16:29:58Z larso $ # Funktion til beregning af graf efficiens. Beregning sker via # bisection hvor der itereres mellem mulige og ikke-mulige loesninger # i et LP problem hvor venstreside er som in- og output orienteret # efficiens. blot er foerste soejle erstattet af rene nuller, og G*X og # (1/G)*Y optraeder paa hoejresiden. Minimering af 0 saa der blot soeges om # der er en mulig loesning. Da soejlen for efficiens er bar 0'er vil # justering af efficiens ud over G ikke ske, dvs. det er kun lambdaer # der tilpasses for at se, om der er en mulig loesning. graphEff <- function(lps, X, Y, XREF, YREF, RTS, FRONT.IDX, rlamb, oKr, param=param, TRANSPOSE=FALSE, SLACK=FALSE, FAST=FALSE, LP=FALSE, CONTROL=CONTROL) { m = dim(X)[2] # number of inputs n = dim(Y)[2] # number of outputs K = dim(X)[1] # number of units, firms, DMUs Kr = dim(YREF)[1] # number of units, firms, DMUs MAXITER <- 50 # Maksimale antal interationer i bysection if (LP) { cat("m=",m, ", n=",n, ", K=",K, ", Kr=", Kr, "\n", sep="") flush.console() } objval <- rep(NA,K) # vector for the final efficiencies if ( FAST ) { lambda <- NULL } else { lambda <- matrix(NA, nrow=K, ncol=Kr) # lambdas one column per unit } set.column(lps, 1, rep(0,dim(lps)[1])) lpcontr <- lp.control(lps) tol <- lpcontr$epsilon["epsint"] if (LP) {cat("tol = ", tol, "\n", sep="")} lp.control(lps, timeout=5, verbose="severe") if (!missing(CONTROL)) set_control(lps, CONTROL) for ( k in 1:K) { ## loekke for hver unit if ( LP ) { print(paste("Firm",k), quote=FALSE); flush.console()} # Lav bisection a <- 0 b <- 2 # medfoerer start med G=1 nIter <- 0 gFundet <- FALSE xIset <- TRUE # Lidt mystisk variabel; TRUE hvis interval SKAL findes # Er G==1 en mulighed? G <- 1 set.rhs(lps, c(-G*X[k,],Y[k,]/G), 1:(m+n)) set.basis(lps, default=TRUE) status <- solve(lps) #if (status==7) { print(paste("For G=1, status =",status)); flush.console()} if (LP) { print(paste("For G=1, status =",status)); flush.console()} if ( status == 0 ) { # G=1 er mulig; hvis G=1-tol ikke er mulig, er G=1 optimal loesning nIter <- nIter + 1 G <- 1 - tol if (LP) { print(paste("G korrigeret til", G, "; tol =", tol)); flush.console() } set.rhs(lps, c(-G*X[k,],Y[k,]/G), 1:(m+n)) #if (LP) write.lp(lps, filename="graphEff.lp") # Uden set.basis gaer 'solve' en sjaelden gang i en uendelig loekke set.basis(lps, default=TRUE) status <- solve(lps) #if (status==7) { print(paste("For G=1-eps, status =",status)); flush.console()} if (LP) { print(paste("For G=1-eps, status =",status)); flush.console()} if ( status != 0 ) { # G=1 er mulig og G=1-eps er ikke-mulig ==> G==1 G <- b <- 1 gFundet <- TRUE } } else { # G=1 er ikke mulig; unit uden for teknology, saa G > 1; eller # der er skeet en fejl i solve. if (LP) {warning("Firm outside technology set, firm =",k); flush.console()} # Bestem oevre graense, start med 2; 'b' er allerede sat til 2 while (status != 0 && nIter < MAXITER) { nIter <- nIter + 1 set.rhs(lps, c(-b*X[k,], Y[k,]/b), 1:(m+n)) set.basis(lps, default=TRUE) status <- solve(lps) if (LP) { print(paste0("For b = ", b, ", status = ",status)); flush.console()} #if (status==7) { print(paste0("b G = ", G, ", status =",status)); flush.console()} if (status==0) break # 'b' skal ikke aendres naar en loesning findes b <- b^2 } # nedre graense if ( b > 2 ) { a <- sqrt(b) # 'b' blev oeget anden potens og forrige # vaerdi var sqrt(b) som saa er en mulig # nedre graense } else { a <- 1 } xIset <- FALSE # Interval er nu sat } ## Nedre og Oevre graense, interval, er sat, men kun naar G>1 status <- 0 # status kunne godt have en anden vaerdi og saa # ville naeste loekke ikke blive gennemloebet if ( !gFundet && xIset && status == 0 ) { # Find interval naar G<1; # skal bruges ved start af bisection. # Der kraever normalt faerre iterationer at loebe intervaller igennem i # stedet for at bruge bisection straks, fordi loesning ofte er mellem 0.6 og 1.0. # G==1 er mulig og G er mindre end 1 dif <- .1 i <- 1 while ( status==0 && i < 10 ) { # Saet G til en mindre vaerdi saalaenge det er en mulig loesning. G <- 1 - i*dif set.rhs(lps, c(-G*X[k,],Y[k,]/G), 1:(m+n)) set.basis(lps, default=TRUE) status <- solve(lps) #if (status==7) { print(paste("Graense: G = ",G,"; status = ",status)); flush.console()} if (LP) { print(paste("G = ",G,"; status = ",status)); flush.console()} nIter <- nIter + 1 i <- i+1 } # enten er i==10 eller ogsaa er status!=0 if ( i==10 ) { a <- 0 b <- dif } else { a <- 1 - (i-1)*dif b <- 1 - (i-2)*dif } } # Bisection loekke if (LP) { print(paste("Bisection interval: [",a,",",b,"]")); flush.console()} while ( !gFundet && b-a > tol^1.5 && nIter < MAXITER ) { # if (LP) {cat("nIter =", nIter, "\n"); flush.console()} G <- (a+b)/2 # if (LP) { print(paste("Bisect: G = ",G,"(",k,")")); flush.console()} set.rhs(lps, c(-G*X[k,],Y[k,]/G), 1:(m+n)) set.basis(lps, default=TRUE) status <- solve(lps) #if (status==7) { print(paste("Bisect G = ",G,"(",k,"); status =",status)); flush.console()} if (LP) { print(paste("G = ",G,"(",k,"); status =",status)); flush.console()} if ( status == 0 ) { # loesning findes, saet ny oevre graense b <- G } else { # ellers ny nedre graense, hvis loesning ikke findes a <- G } nIter <- nIter + 1 } ## while (... if (LP) {print(paste0("nIter=",nIter,"; G=", G, "; status=",status)); flush.console()} if ( status != 0 ) { # Hvis den sidste vaerdi af G ikke var mulig bruger vi den # oevre graense. Det er noedvendigt med en mulig loesning for at # kunne faa lambdaer og duale vaerdier. G <- b set.rhs(lps, c(-G*X[k,],Y[k,]/G), 1:(m+n)) set.basis(lps, default=TRUE) status <- solve(lps) #if (status==7) { print(paste("Sidste G = ",G,"; status =",status)); flush.console()} } if (LP) { print(paste0("G = ",G," (",k,"); status = ",status)) # print(rlamb) # print("Solution") # print(get.variables(lps)) # print(lps) flush.console() } objval[k] <- G if ( LP && k == 1 ) print(lps) if ( !FAST ) { sol <- get.variables(lps) lambda[k,] <- sol[2:(1+Kr)] } if (LP && status==0) { print(paste("Objval, firm",k)) print(get.objective(lps)) # print("Solution/varaibles") # print(get.variables(lps)) # print("Primal solution") # print(get.primal.solution(lps)) # print("Dual solution:") # print(get.dual.solution(lps)) flush.console() } } # loop for each unit lp.control(lps, timeout=0, verbose="neutral") e <- objval e[abs(e-1) < tol] <- 1 lambda[abs(lambda-1) < tol] <- 1 # taet ved 1 lambda[abs(lambda) < tol] <- 0 # taet ved 0 if ( FAST ) { return(e) stop("Her skulle vi ikke kunne komme i 'dea'") } if ( length(FRONT.IDX)>0 ) { colnames(lambda) <- paste("L",(1:oKr)[FRONT.IDX],sep="") } else { colnames(lambda) <- paste("L",1:Kr,sep="") } primal <- dual <- NULL ux <- vy <- NULL if ( TRANSPOSE ) { lambda <- t(lambda) } oe <- list(eff=e, lambda=lambda, objval=objval, RTS=RTS, primal=primal, dual=dual, ux=ux, vy=vy, gamma=gamma, ORIENTATION="graph", TRANSPOSE=TRANSPOSE # ,slack=slack_, sx=sx, sy=sy , param=param ) class(oe) <- "Farrell" if ( SLACK ) { if ( TRANSPOSE ) { # Transponer tilbage hvis de blev transponeret X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) } sl.x <- X*e - lambda %*% XREF sl.y <- -Y/e + lambda %*% YREF sl.x[abs(sl.x) < tol] <- 0 sl.y[abs(sl.y) < tol] <- 0 sum <- rowSums(sl.x) + rowSums(sl.y) ### sum[abs(sum) < tol] <- 0 colnames(sl.x) <- paste("sx",1:m,sep="") colnames(sl.y) <- paste("sy",1:n,sep="") oe$sx <- sl.x oe$sy <- sl.y oe$sum <- sum oe$slack <- sum > tol if (LP) { print("sum fra slack:") print(sum) print("slack efter slack:") print(oe$slack) flush.console() } } ## if (SLACK) return(oe) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/graphEff.R
# $Id: make.merge.R 244 2022-05-05 14:31:31Z X052717 $ make.merge <- function(grp, nFirm=NULL, X=NULL, names=NULL) { # Opstiller aggregeringsmatrix for at danne grupperne grp ud fra X. # Hvad der skal merges skal angives som indeks i en liste af arrays # hvor hvert array er indeks for de enheder der skal indgaa i en given # gruppe if ( is(grp, "factor") ) { # print("Faktor") g <- nlevels(grp) K <- Kg <- length(grp) Kn <- -1 } else if ( is(grp, "list") && is(grp[[1]], "character") ) { # print("Liste af navne") g <- length(grp) Kn <- K <- length(names) Kg <- K } else { # print("Liste af numre") g <- length(grp) Kg <- -1 } if ( !is.null(nFirm) && !is(nFirm, "numeric") && !is(nFirm, "integer") ) stop("The argument nFirm must be numeric or integer") if ( !is.null(X) && !is(X, "matrix") ) stop("The argument X must be a matrix") # print(g) if ( Kg == -1 & is.null(X) & is.null(nFirm) ) { stop("Either X or nFirm must be in the call to merge.matrix or grp must be a factor") } Kx <- -1 if ( !is.null(X) ) { K <- Kx <- dim(X)[1] } if ( !is.null(nFirm) ) K <- nFirm if ( !is.null(names) ) Kn <- length(names) if ( !is.null(nFirm) && !is.null(X) && Kx != K ) stop("nFirm must be the number of rows in X") if (Kg!=-1 && !is.null(nFirm) && Kg!=K ) stop("nFirm must be the length of the facotr grp") if (Kg!=-1 && !is.null(X) && Kg!=Kx ) stop("The length of the factor grp must be the number of rows in X") if ( !is.null(names) && K>0 && K!=Kn ) stop("The length of names must be the number of firms") if ( is(grp, "list") && is(grp[[1]], "character") && Kn <= 0) stop("When grp is a list of names for mergers the argument names must also be supplied") if ( K < 0 && Kn > 0 ) K <- Kn Mer <- matrix(0, nrow=g, ncol=K) if ( is(grp, "factor") ) { for ( i in 1:g ) { # Saet 1-taller soejler for dem der skal merges Mer[i,as.numeric(grp)==i] <- 1 } } else if ( is(grp, "list") && is(grp[[1]], "character") ) { for ( i in 1:g ) { Mer[i,which(names %in% grp[[i]])] <- 1 } } else { for ( i in 1:g ) { # Saet 1-taller soejler for dem der skal merges Mer[i,grp[[i]]] <- 1 } } if ( !is.null(names(grp)) ) rownames(Mer) <- names(grp) if ( !is.null(names) ) colnames(Mer) <- names return(Mer) # returnerer merge matrix # X %*% Mer # returnerer merge input/output data }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/make.merge.R
# $Id: malmq.R 247 2022-09-15 22:38:27Z X052717 $ # Beregner Malmquist indeks og dekomponering af samme ud fra to perioder # Det er ikke forudsat at der er samme antal units i hver periode, men # indekset bliver kun beregnet for foreningsmaengden af units # Metoden kan bruges alene, men er ogsaa taenkt som kaldt fra en anden # metode for haandtering af flere perioder. malmq <- function(X0, Y0, ID0=NULL, X1,Y1, ID1=NULL, RTS="vrs", ORIENTATION="in", SAMEREF=FALSE, SLACK=FALSE, DUAL=FALSE, DIRECT=NULL, param=NULL, TRANSPOSE=FALSE, FAST=TRUE, LP=FALSE, CONTROL=NULL, LPK=NULL) { # Det er underforstaaet at X'er og Y'er er matricer 'units x var' rts <- c("fdh","vrs","drs","crs","irs","irs2","add","fdh+","fdh++","fdh0") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { if (LP) print(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) print(paste("' is '",RTS,"'\n",sep=""),quote=F) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) stop("Unknown scale of returns: ", RTS) orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ", ORIENTATION) } m0<- dim(X0)[2] # number of inputs n0<- dim(Y0)[2] # number of outputs K0 <- dim(X0)[1] # number of units, firms, DMUs m1 <- dim(X1)[2] # number of inputs n1 <- dim(Y1)[2] # number of outputs K1 <- dim(X1)[1] # number of units, firms, DMUs # Hvis ID'er mangler bliver de sat til 1,...,K if ( is.null(ID0) ) ID0 <- seq(1,K0) if ( is.null(ID1) ) ID1 <- seq(1,K1) # Laengden af ID skal svare til antal units i X og Y if ( K0 != length(ID0) || K0 != dim(Y0)[1] ) stop("Number of units in X0 and Y0 must correspont to length of ID0") if ( K1 != length(ID1) || K1 != dim(Y1)[1] ) stop("Number of units in X1 and Y1 must correspont to length of ID1") # Antal input skal vaere samme i X0 og X1 if (m0 != m1) stop("Number of inputs must be the same in X0 and X1") # Antal output skal vaere samme i Y0 og Y1 if (n0 != n1) stop("Number of outputs must be the same in Y0 and Y1") # Find faellesmaengden af units og tilhoerende indeks idlab <- intersect(ID0, ID1) id0 <- ID0 %in% idlab id1 <- ID1 %in% idlab # Er der gengangere, og er der samme antal i de to perioder if ( sum(id0) != length(unique(ID0[id0])) || sum(id1) != length(unique(ID1[id1])) || sum(id0)!=sum(id1) ) stop("Units in ID are not unique for each period") # Input og output for faellesmaengden af units. # De samme units der skal beregnes for ellers giver det ingen mening # at beregne Malmquist indeks x0 <- X0[id0,,drop=FALSE] y0 <- Y0[id0,,drop=FALSE] x1 <- X1[id1,,drop=FALSE] y1 <- Y1[id1,,drop=FALSE] # Reference teknologi kan godt vaere bestemt af forskelige units i # forskellige perioder, men det kan ogsaa vaere de samme units der # bestemmer teknologien i hver periode. if (SAMEREF) { X0 <- X0[id0,,drop=FALSE] Y0 <- Y0[id0,,drop=FALSE] X1 <- X1[id1,,drop=FALSE] Y1 <- Y1[id1,,drop=FALSE] } # Skal teknologien i en periode bestemmes af alle dem der i # perioden uafhaengigt af om de er i den anden periode? Som det er # gjort nu er teknologien bestemt af dem der er i begge perioder. # Ved at bruge X0, X1 mm. som reference bestemmes teknologien af alle # units i paagaeldende periode, selv om der kun beregnes indeks for units # der er i begge perioder. e00_ <- dea(x0, y0, RTS=RTS, ORIENTATION=ORIENTATION, XREF=X0,YREF=Y0, SLACK=SLACK, DUAL=DUAL, DIRECT=DIRECT, param=param, TRANSPOSE=TRANSPOSE, FAST=TRUE, LP=LP, CONTROL=CONTROL, LPK=LPK) e10_ <- dea(x1, y1, RTS=RTS, ORIENTATION=ORIENTATION, XREF=X0,YREF=Y0, SLACK=SLACK, DUAL=DUAL, DIRECT=DIRECT, param=param, TRANSPOSE=TRANSPOSE, FAST=TRUE, LP=LP, CONTROL=CONTROL, LPK=LPK) e11_ <- dea(x1, y1, RTS=RTS, ORIENTATION=ORIENTATION, XREF=X1,YREF=Y1, SLACK=SLACK, DUAL=DUAL, DIRECT=DIRECT, param=param, TRANSPOSE=TRANSPOSE, FAST=TRUE, LP=LP, CONTROL=CONTROL, LPK=LPK) e01_ <- dea(x0, y0, RTS=RTS, ORIENTATION=ORIENTATION, XREF=X1,YREF=Y1, SLACK=SLACK, DUAL=DUAL, DIRECT=DIRECT, param=param, TRANSPOSE=TRANSPOSE, FAST=TRUE, LP=LP, CONTROL=CONTROL, LPK=LPK) # Lav raekkefoelgen saa den svarer til den i idlab e00 <- rep(NA, length(idlab)) e01 <- rep(NA, length(idlab)) e10 <- rep(NA, length(idlab)) e11 <- rep(NA, length(idlab)) for( i in idlab) { e00[idlab==i] <- e00_[ID0[id0]==i] e01[idlab==i] <- e01_[ID0[id0]==i] e10[idlab==i] <- e10_[ID1[id1]==i] e11[idlab==i] <- e11_[ID1[id1]==i] } tc <- sqrt(e10/e11 * e00/e01) # teknisk aendring; flytning af frontier ec <- e11/e00 # aendring i effektivitet m <- tc * ec mq <- sqrt(e10/e00 * e11/e01) ## == m; Malmquist indeks for produktivitet return( list(m=m, tc=tc, ec=ec, mq=mq, id=idlab, id0=id0, id1=id1, e00=e00, e10=e10, e11=e11, e01=e01) ) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/malmq.R
# $Id: malmquist.R 247 2022-09-15 22:38:27Z X052717 $ # Beregner Malmquist indeks for enhederne ID over tidspunkterne i TIME # Det forudsaettes at der ikke er huller i TIME, dvs. indeks beregnes i # mellem to paa hinanden vaerdier i TIME. Hvis TIME ikke er numerisk, # antages at raekkefoelgen er den oenskede. # Parvise perioder beregnes via functionen 'malmq' malmquist <- function(X, Y, ID, TIME, RTS="vrs", ORIENTATION="in", SAMEREF=FALSE, SLACK=FALSE, DUAL=FALSE, DIRECT=NULL, param=NULL, TRANSPOSE=FALSE, FAST=TRUE, LP=FALSE, CONTROL=NULL, LPK=NULL) { # De tidspunkter/perioder der er i data time <- unique(TIME) unit <- unique(ID) # |time| sorteres hvis variablen er numerisk ellers bruges implicit raekkefoelge # Boer saa ikke al data sorteres? Nej fordi det er ikke TIME der sorteres, men alene # de entydige TIME vaerdier uafhaenhaengigt at units. if ( is.numeric(time) ) time <- sort(time) # Vaerdier for foerste aarstal |time| vedbliver med at vaere NA # Raekkefoelgen er som i ID? Malm <- array(NA, dim=c(length(ID))) TC <- array(NA, dim=c(length(ID))) EC <- array(NA, dim=c(length(ID))) E00 <- array(NA, dim=c(length(ID))) E01 <- array(NA, dim=c(length(ID))) E10 <- array(NA, dim=c(length(ID))) E11 <- array(NA, dim=c(length(ID))) # Loeb perioderne igennem og beregn Malmquist for parvise perioder for ( t in 2:length(time) ) { cat("Period ",t,"\n") flush.console() # Find units i periode 0 og periode 1 id0 <- ID[time[t-1]==TIME] id1 <- ID[time[t]==TIME] X0 <- X[time[t-1]==TIME,, drop=FALSE] Y0 <- Y[time[t-1]==TIME,, drop=FALSE] X1 <- X[time[t]==TIME,, drop=FALSE] Y1 <- Y[time[t]==TIME,, drop=FALSE] m <- malmq(X0,Y0,id0, X1,Y1,id1, RTS=RTS, ORIENTATION=ORIENTATION, SAMEREF=SAMEREF, SLACK=SLACK, DUAL=DUAL, DIRECT=DIRECT, param=param, TRANSPOSE=TRANSPOSE, FAST=TRUE, LP=LP, CONTROL=CONTROL, LPK=LPK) # Raekkefoelgen skal vaere som i X og Y, dvs. som ID for (i in m$id) { Malm[ID==i & TIME==time[t]] <- m$m[m$id==i] # Malmquist indeks for aendring i produktivitet TC[ID==i & TIME==time[t]] <- m$tc[m$id==i] # teknisk aendring, flytning af frontier EC[ID==i & TIME==time[t]] <- m$ec[m$id==i] # aendring i effektivitet E00[ID==i & TIME==time[t]] <- m$e00[m$id==i] E01[ID==i & TIME==time[t]] <- m$e01[m$id==i] E10[ID==i & TIME==time[t]] <- m$e10[m$id==i] E11[ID==i & TIME==time[t]] <- m$e11[m$id==i] } if ( t==2) { # Foerste aar kan saettes for (i in m$id) { E11[ID==i & TIME==time[t-1]] <- m$e00[m$id==i] } } } # for (t) return(list(m=Malm, tc=TC, ec=EC, id=ID, time=TIME, e00=E00, e10=E10, e11=E11, e01=E01)) } # function
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/malmquist.R
# $Id: minDirection.R 229 2020-07-04 13:39:18Z lao $ # Function to calculate the min step for each input or max step for # each output to the frontier, retninger i MEA. A series of LP problems minDirection <- function(lps, m, n, ORIENTATION, LP=FALSE, CONTROL=CONTROL) { # 'md' antal elementer/varer i direction if ( ORIENTATION=="in" ) md <- m if ( ORIENTATION=="out" ) md <- n if ( ORIENTATION=="in-out" ) md <- m+n # Saet taeller for fooerste vare mn0 <- switch(ORIENTATION, "in"=0, "out"=m, "in-out"=0) if (!missing(CONTROL)) set_control(lps, CONTROL) Direct <- rep(NA,md) for ( h in 1:md ) { if (LP) print(paste(" --> Vare",h),quote=FALSE) # Saet 1 i 0'te raekke svarende til kriterifunktion -- default er 0 -- og # -1 i raekken for den relevante vare/element set.column(lps, 1, c(1,-1), c(0,mn0+h)) if (LP) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if (LP) print(paste("Status =",status),quote=FALSE) if (LP) print(get.objective(lps)) Direct[h] <- get.objective(lps) } lpcontr <- lp.control(lps) eps <- lpcontr$epsilon["epsint"] if (LP) print(paste("eps =",eps)) ## Direct[ abs(Direct) < eps ] <- 0 if (LP) { print("Min direction:"); print(Direct) } return(Direct) } # mea er en wrapper for dea med special vaerdi af DIRECT mea <- function(X,Y, RTS="vrs", ORIENTATION="in", XREF=NULL, YREF=NULL, FRONT.IDX=NULL, param=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { e <- dea(X,Y, RTS, ORIENTATION, XREF, YREF, FRONT.IDX, SLACK=FALSE, DUAL=FALSE, DIRECT="min", param=param, TRANSPOSE=FALSE, LP=LP, CONTROL=CONTROL, LPK=LPK) return(e) } # Tegn linjer for MEA mea.lines <- function(N, X, Y, ORIENTATION="in") { orientation <- c("in-out","in","out") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } ORIENTATION <- tolower(ORIENTATION) if ( !(ORIENTATION %in% orientation) ) { stop("Unknown value for ORIENTATION: ", ORIENTATION) } for ( n in N ) { if (ORIENTATION=="in") { abline(h=X[n,2],lty="dotted") abline(v=X[n,1],lty="dotted") dir1 <- c(X[n,1],0) dir2 <- c(0,X[n,2]) } else if (ORIENTATION=="out") { abline(h=Y[n,2],lty="dotted") abline(v=Y[n,1],lty="dotted") dir1 <- c(Y[n,1],0) dir2 <- c(0,Y[n,2]) } else if (ORIENTATION=="in-out") { abline(h=Y[n,1],lty="dotted") abline(v=X[n,1],lty="dotted") dir1 <- c(X[n,1],0) dir2 <- c(0,Y[n,1]) } else stop("Only directins in, out or in-out allowed in mea.lines") vn <- dea(X[n,,drop=FALSE],Y[n,,drop=FALSE], ORIENTATION=ORIENTATION, XREF=X, YREF=Y,FAST=TRUE,DIRECT=dir1) hn <- dea(X[n,,drop=FALSE],Y[n,,drop=FALSE], ORIENTATION=ORIENTATION, XREF=X, YREF=Y,FAST=TRUE,DIRECT=dir2) #print(paste("Nr",n,": vn =",vn,"; hn =",hn), quote=FALSE) #print(paste("dir = (",dir1,",",dir2,")"), quote=FALSE) if (ORIENTATION=="in") { abline(h=(1-hn)*X[n,2],lty="dotted") abline(v=(1-vn)*X[n,1],lty="dotted") # lines(c((1-vn)*X[n,1], X[n,1]), c(h=(1-hn)*X[n,2], X[n,2]),lw=2) arrows(X[n,1], X[n,2], (1-vn)*X[n,1], (1-hn)*X[n,2], lwd=2 ) dir <- c(vn*X[n,1], hn*X[n,2]) #print(paste("dir =", dir), quote=FALSE) mm <- dea(X[n,,drop=FALSE],Y[n,,drop=FALSE], ORIENTATION=ORIENTATION, XREF=X, YREF=Y,FAST=TRUE,DIRECT=dir) #print(paste("mm =",mm) , quote=FALSE) points(X[n,1]-mm*dir[1], X[n,2]-mm*dir[2], pch=16, col="green") abline(0, X[n,2]/X[n,1],lty="dashed", col="red") abline(X[n,2] - X[n,1]*(X[n,2]-(1-hn)*X[n,2])/(X[n,1]-(1-vn)*X[n,1]), (X[n,2] - (1-hn)*X[n,2])/(X[n,1] - (1-vn)*X[n,1]) , lty="dashed", col="blue") } else if (ORIENTATION=="out") { # print(paste("(1+vn)*Y =",(1+hn)*Y[n,2],"; (1+hn)*Y =",(1+vn)*Y[n,1])) abline(h=(1+hn)*Y[n,2],lty="dotted") abline(v=(1+vn)*Y[n,1],lty="dotted") arrows(Y[n,1], Y[n,2], (1+vn)*Y[n,1], (1+hn)*Y[n,2], lwd=2 ) dir <- c(vn*Y[n,1], hn*Y[n,2]) #print(paste("dir =", dir), quote=FALSE) mm <- dea(X[n,,drop=FALSE],Y[n,,drop=FALSE], ORIENTATION=ORIENTATION, XREF=X, YREF=Y,FAST=TRUE,DIRECT=dir) #print(paste("mm =",mm) , quote=FALSE) points(Y[n,1]+mm*dir[1], Y[n,2]+mm*dir[2], pch=16, col="green") abline(0, Y[n,2]/Y[n,1],lty="dashed", col="red") abline(Y[n,2] - Y[n,1]*(Y[n,2]-(1-hn)*Y[n,2])/(Y[n,1]-(1-vn)*Y[n,1]), (Y[n,2] - (1-hn)*Y[n,2])/(Y[n,1] - (1-vn)*Y[n,1]) , lty="dashed", col="blue") } else if (ORIENTATION=="in-out") { #print(paste("(1+vn)*Y =",(1+hn)*Y[n,1],"; (1+hn)*X =",(1+vn)*X[n,1]), quote=FALSE) abline(h=(1+hn)*Y[n,1],lty="dotted") abline(v=(1-vn)*X[n,1],lty="dotted") arrows(X[n,1], Y[n,1], (1-vn)*X[n,1], (1+hn)*Y[n,1], lwd=2 ) dir <- c(vn*X[n,], hn*Y[n,]) #print(paste("dir =", dir), quote=FALSE) mm <- dea(X[n,,drop=FALSE],Y[n,,drop=FALSE], ORIENTATION=ORIENTATION, XREF=X, YREF=Y,FAST=TRUE,DIRECT=dir) #print(mm) #print(paste("mm =",mm) , quote=FALSE) #print(paste("x0 =",X[n,]+mm*dir[1], "; y0 =",Y[n,]+mm*dir[2]), quote=FALSE) points(X[n,]-mm*dir[1], Y[n,]+mm*dir[2], pch=16, col="green") } } } # # smea <- function(X,Y, RTS="vrs", ORIENTATION="in", XREF=NULL, YREF=NULL, # FRONT.IDX=NULL, # TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { # # e <- sdea(X,Y, RTS, ORIENTATION, DIRECT="min", # TRANSPOSE=FALSE, LP=LP) # # return(e) # # } #
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/minDirection.R
# $Id: outlier.R 226 2020-06-27 21:48:14Z lao $ # Undersoeg om der er outliere i datasaet. # En outlier er en eller flere enheder, der adskiller sig kraftigt # fra alle andre enheder. # Her defineres outlier som enheder som aendrer arealet/rumfanget af # den maengde som alle enheder udspaender. Dvs. det undersoeges om # arealet/rumfanget aendres markant hvis en eller flere enheder fjernes. # Areal/rumfang beregnes som determinant af t(XY) %*% XY, en # (m+n)x(n+m) matrix; af hensyn til hastighed bruges crossprod(XY). # C:\prg\R\bin\x64\Rscript.exe --vanilla test_ap.R outlier.ap <- function(X, Y, NDEL = 3L, NLEN = 25L, TRANSPOSE = FALSE) { DEBUG=FALSE if (TRANSPOSE) { X <- t(X) Y <- t(Y) } X <- tjek_data(X) Y <- tjek_data(Y) if (dim(X)[1] != dim(Y)[1]) stop("Number of firms differ in 'X' and 'Y'") xy <- cbind(X,Y) XY <- crossprod(xy) S <- det(XY) # det(t(xy)%*%xy) R <- NDEL # hoejeste antal udeladte units K <- dim(X)[1] # number of firms/observations last = min(NLEN, K) # gem 25 mindste vaerdier af RX ratio <- array(Inf, c(last,R) ) imat <- matrix(NA, nrow=R, ncol=R) rmin <- array(Inf, dim=R) if (DEBUG) { lbnr <- 1 } for ( r in 1L:R ) { if (DEBUG) print( paste("Remove ",r," observations",sep=""), quote=FALSE ) # remove r observationer RX <- rep(Inf,last) rrlast <- Inf if (K < r) stop("Number of units less than number to be removed; K < NDEL") e <- 0 h <- r # antal udeladte units del <- 1L:r # Maengde af udeladte; foerste maengde er de 'r' foerste units if (DEBUG) { cat(format(lbnr, width=5), "*: ", sep=""); lbnr <- lbnr + 1 print(del) } Dxy = XY - crossprod(xy[del,,drop=FALSE]) rr <- det(Dxy)/S ## det( t(Dxy) %*% Dxy )/S RX[1] <- rr rrlast <- RX[last] # count <- as.integer(round(choose(K, r))) # print( paste("Number of combinations", count), quote=FALSE ) nmmp1 <- K - r + 1L # Indeks for unit een efter sidst mulige medtagne unit while (del[1L] != nmmp1) { if (e < K - h) { # der er flere der mangler at blive udeladt h <- 1L e <- del[r] j <- 1L } else { if (DEBUG) print("Skift at base") e <- del[r - h] h <- h + 1L j <- 1L:h } del[r - h + j] <- e + j if (DEBUG) { cat(format(lbnr, width=5), " : ", sep=""); lbnr <- lbnr + 1 print(del) } Dxy = XY - crossprod(xy[del,,drop=FALSE]) rr <- det(Dxy)/S ## det( t(Dxy) %*% Dxy )/S if ( rr < rrlast ) { # gem de |last| mindste if ( rr < min(RX) ) { imat[r,1:r] <- del } RX[last] <- rr RX <- sort(RX) rrlast <- RX[last] } } ## while rmin[r] <- min(RX) Rratio <- log(RX/min(RX)) ratio[,r] <- Rratio } ## for (r) return( list(ratio=ratio, imat=imat, r0=rmin) ) } # outlier.ap outlier.ap.plot <- function(ratio, NLEN = 25L, xlab="Number of firms deleted", ylab="Log ratio", ..., ylim) { nlen <- min(NLEN, dim(ratio)[1]) R <- dim(ratio)[2] if (missing(ylim)) ylim <- range(ratio[1:nlen,]) ry <- matrix(1:R, nrow=nlen, ncol=R, byrow=TRUE) plot(1:R, rep(0,R), type="p", ylim=ylim, xaxt="n", ylab=ylab, xlab=xlab, ...) axis(1, at=1:R, labels=c(1:R)) points(ry[-1,], ratio[2:nlen,]) lines(1:R, ratio[2,], lty="dashed") } # outlier.ap.plot #---------------------------------------------------------------------- #---------------------------------------------------------------------- outlierC.ap <- function(X, Y, NDEL = 3, NLEN = 25, TRANSPOSE = FALSE) { # Tjek af input if (TRANSPOSE) { X <- t(X) Y <- t(Y) } X <- tjek_data(X) Y <- tjek_data(Y) if (dim(X)[1] != dim(Y)[1]) stop("Number of firms differ in 'X' and 'Y'") # Data til outlierCpp xy <- cbind(X,Y) K <- dim(X)[1] # number of firms/observations R <- min(K, NDEL) # hoejeste antal udeladte units nlen = min(NLEN, K) # gem 25 mindste vaerdier af RX # Variabler til at holde output ratio <- array(Inf, dim = c(nlen, R)) imat <- array(Inf, dim = c(R, R)) # Rcpp kan ikke haandtere 'NA' saa # 'Inf' bruges i stedet. rmin <- array(Inf, dim=R) outlierCpp(K, R, xy, ratio, imat, rmin) imat[is.infinite(imat)] <- NA return(list(ratio=ratio, imat=imat, r0=rmin)) } #---------------------------------------------------------------------- #----------------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/outlier.R
# $Id: profit.R 235 2021-04-11 13:46:25Z lao $ # Calculates optimal input and output to maximize profit for given # input and output prices. profit.opt <- function(XREF, YREF, W, P, RTS="vrs", param=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { if (!TRANSPOSE) { XREF <- t(XREF) YREF <- t(YREF) W <- t(W) P <- t(P) } m = dim(XREF)[1] # number of inputs n = dim(YREF)[1] # number of outputs K = dim(W)[2] # number of price sets Kr = dim(XREF)[2] # number of units, firms, DMUs if ( dim(P)[2] > 1 && dim(P)[2] != K ) stop("Dimensions for W and P are different") if ( Kr != dim(YREF)[2] ) stop("Number of firms in XREF and YREF differ") if ( m != dim(W)[1] ) stop("Number of inputs in W and XREF differ") if ( n != dim(P)[1] ) stop("Number of outputs in P and YREF differ") XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) W <- tjek_data(W) P <- tjek_data(P) rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { if (LP) cat(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) cat(paste("' is '",RTS,"'\n",sep=""),quote=F) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) { print(paste("Unknown scale of returns:", RTS)) print("continuees asssuming RTS = \"vrs\"\n") RTS <- "vrs" } if ( RTS != "crs" && RTS != "add" ) { rlamb <- 2 } else rlamb <- 0 lps <- make.lp(m+n +rlamb,m+n+Kr) # Saet lp options lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke name.lp(lps, paste("DEA profit,",RTS,"technology")) set.objfn(lps, c(-W[,1],P[,1], rep(0,Kr))) # saet raekker i matrix med restriktioner, # foerst for input dia <- diag(1,nrow=m) for ( h in 1:m ) set.row(lps,h, c(-dia[h,], rep(0,n), XREF[h,])) # saa for output dia <- diag(1,nrow=n) for ( h in 1:n) set.row(lps,m+h, c(rep(0,m), dia[h,], -YREF[h,])) # restriktioner paa lambda if ( RTS != "crs" && RTS != "add" ) { set.row(lps, m+n+1, c(rep(0,m+n),rep(-1,Kr))) set.row(lps, m+n+2, c(rep(0,m+n),rep( 1,Kr))) } if ( RTS == "fdh" ) { set.type(lps,(m+n+1):(m+n+Kr),"binary") set.rhs(lps,1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 } else if ( RTS == "vrs" ) { set.rhs(lps, c(-1,1), (m+n+1):(m+n+2)) } else if ( RTS == "drs" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 # } else if ( RTS == "crs" ) { # # En mystisk restriktion for at tvinge loesning til eksisterende firm # add.constraint(lps, rep(1,Kr),">=", 1, (m+n+1):(m+n+Kr)) } else if ( RTS == "irs" ) { set.rhs(lps, -1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "add" ) { set.type(lps,(m+n+1):(m+n+Kr),"integer") } else if ( RTS == "fdh+" ) { # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } param <- c(low=low, high=high) set.rhs(lps, c(-low,high), (m+n+1):(m+n+2)) add.SOS(lps,"lambda", 1,1, (m+n+1):(m+n+Kr), rep(1, Kr)) } set.constr.type(lps, rep("<=",m+n+rlamb), 1:(m+n+rlamb)) lp.control(lps, sense="max") if (!missing(CONTROL)) set_control(lps, CONTROL) xopt <- matrix(NA,m,K) yopt <- matrix(NA,n,K) lambda <- matrix(NA,nrow=Kr,ncol=K) profit <- rep(NA,K) for ( k in 1:K ) { if ( dim(W)[2] != 1 && k > 1 ) { set.objfn(lps, c(-W[,k],P[,k], rep(0,Kr))) } if (LP) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if ( status == 3 ) { cat("\nProfit is unbounded for firm ",k, ". Input and output are not uniquely determined.\n", sep="") profit[k] <- Inf sol <- get.variables(lps) xopt[,k] <- sol[1:m] yopt[,k] <- sol[(m+1):(m+n)] lambda[,k] <- sol[(m+n+1):(m+n+Kr)] } else if ( status != 0 ) { cat("Error in solving for firm ", k,": Status = ",status, "\n", sep="") } else { profit[k] <- get.objective(lps) sol <- get.variables(lps) xopt[,k] <- sol[1:m] yopt[,k] <- sol[(m+1):(m+n)] lambda[,k] <- sol[(m+n+1):(m+n+Kr)] } if ( !is.null(LPK) && k %in% LPK ) { write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } } # for ( k in 1:K ) # delete.lp(lps) rownames(lambda) <- paste("L",1:Kr,sep="") names(profit) <- colnames(W) if (!TRANSPOSE) { xopt <- t(xopt) yopt <- t(yopt) # profit <- t(profit) lambda <- t(lambda) } svar <- list("xopt"=xopt, yopt=yopt, profit=profit, "lambda"=lambda, RTS=RTS, TRANSPOSE=TRANSPOSE) class(svar) <- "profit.opt" return (svar) } # profit.opt print.profit.opt <- function(x, ...) { a <- cbind("Optimalt input"=x$xopt, "Optimal output"=x$yopt) print(a,...) invisible(a) } ## print.profit.opt summary.profit.opt <- function(object, ...) { cat("Optimal input and output:\n") print.profit.opt(object) cat("Profit:\n") print(object$profit,...) cat("Weights (lambda):\n") x <- object$lambda xx <- format(unclass(x), digits=4) if (any(ina <- is.na(x))) xx[ina] <- "" if ( any(i0 <- !ina & abs(x) < 1e-9) ) xx[i0] <- sub("0.0000", ".", xx[i0]) print(xx, quote=FALSE, rigth=TRUE, ...) invisible(object) # printSpMatrix(Matrix(object$lambda),digits=4, col.names=T,...) # print(object$lambda,digits=4) } ## summary.profit.opt
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/profit.R
# $Id: rev.R 233 2020-08-10 16:43:17Z lao $ # Function to calculate maximun revenue for given input and given # output prices. # Calculations are done with matrices compared to R standards, but # according to LP practice # The function cannot be called rev.opt because then print.rev.opt is # confused with the method rev from base R. revenue.opt <- function(XREF, YREF, P, XOBS=NULL, RTS="vrs", param=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL, LPK=NULL) { if ( missing(XOBS) ) { XOBS <- XREF } if (!TRANSPOSE) { XREF <- t(XREF) YREF <- t(YREF) P <- t(P) XOBS <- t(XOBS) } m = dim(XREF)[1] # number of inputs n = dim(YREF)[1] # number of outputs K = dim(XOBS)[2] # number of units, firms, DMUs Kr = dim(XREF)[2] # number of units in tevhnology, firms, DMUs if ( dim(P)[2] > 1 && dim(P)[2] != K ) stop("Dimensions for P and XOBS are different") if ( Kr != dim(YREF)[2] ) stop("Number of firms in XREF and YREF differ") if ( n != dim(P)[1] ) stop("Number of inputs in P and YREF differ") if ( m != dim(XOBS)[1] ) stop("Number of outputs in XREF and XOBS differ") rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { if (LP) cat(paste("Number '",RTS,"'",sep=""),quote=F) RTStemp <- rts[1+RTS] # the first fdh is number 0 RTS <- RTStemp if (LP) cat(paste("' is '",RTS,"'\n",sep=""),quote=F) } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) { print(paste("Unknown scale of returns:", RTS)) print("continuees asssuming RTS = \"vrs\"\n") RTS <- "vrs" } if ( RTS != "crs" && RTS != "add" ) { rlamb <- 2 } else rlamb <- 0 lps <- make.lp(m+n +rlamb,n+Kr) # Saet lp options lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke name.lp(lps, paste("DEA rev,",RTS,"technology")) # saet raekker i matrix med restriktioner, saet 0'er for den foerste # soejle for den skal alligevel aendres for hver firm. dia <- diag(1,nrow=n) for ( h in 1:m ) set.row(lps,h, c(rep(0,n), XREF[h,])) for ( h in 1:n) set.row(lps,m+h, c(dia[h,], -YREF[h,])) # restriktioner paa lambda if ( RTS != "crs" && RTS != "add" ) { set.row(lps, m+n+1, c(rep(0,n),rep(1,Kr))) set.row(lps, m+n+2, c(rep(0,n),rep( -1,Kr))) } if ( RTS == "fdh" ) { set.type(lps,(n+1):(n+Kr),"binary") set.rhs(lps,1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "vrs" ) { set.rhs(lps, c(1,-1), (m+n+1):(m+n+2)) } else if ( RTS == "drs" ) { set.rhs(lps, 1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 } else if ( RTS == "irs" ) { set.rhs(lps, -1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 } else if ( RTS == "add" ) { set.type(lps,(n+1):(n+Kr),"integer") } else if ( RTS == "fdh+" ) { # Saet parametrene low og high if ( is.null(param) ) { param <- .15 } if ( length(param) == 1 ) { low <- 1-param high <- 1+param } else { low <- param[1] high <- param[2] } param <- c(low=low, high=high) set.rhs(lps, c(high, -low), (m+n+1):(m+n+2)) add.SOS(lps,"lambda", 1,1, (n+1):(n+Kr), rep(1, Kr)) } set.objfn(lps, c(P[,1],rep(0,Kr))) set.constr.type(lps, rep("<=",m+n+rlamb)) lp.control(lps, sense="max") if (!missing(CONTROL)) set_control(lps, CONTROL) yopt <- matrix(NA,n,K) lambda <- matrix(NA,nrow=Kr,ncol=K) rev <- rep(NA,K) for ( k in 1:K ) { if (LP) print(paste("===> firm",k),quote=FALSE) if ( dim(P)[2] != 1 && k > 1 ) { set.objfn(lps, c(P[,k],rep(0,Kr))) } set.rhs(lps, XOBS[,k], 1:m) if (LP) print(lps) set.basis(lps, default=TRUE) status <- solve(lps) if ( status != 0 ) { print(paste("Error in solving for firm",k,": Status =",status), quote=FALSE) } else { rev[k] <- get.objective(lps) sol <- get.variables(lps) yopt[,k] <- sol[1:n] lambda[,k] <- sol[(n+1):(n+Kr)] } if ( !is.null(LPK) && k %in% LPK ) { write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } } # for ( k in 1:K ) # delete.lp(lps) rownames(lambda) <- paste("L",1:Kr,sep="") names(rev) <- colnames(XOBS) if (!TRANSPOSE) { yopt <- t(yopt) # rev <- t(rev) lambda <- t(lambda) } svar <- list("yopt"=yopt, "rev"=rev, "lambda"=lambda, RTS=RTS, TRANSPOSE=TRANSPOSE) class(svar) <- "revenue.opt" return (svar) } # revenue.opt print.revenue.opt <- function(x, ...) { a <- cbind("Optimalt output"=x$yopt) print(a,...) invisible(a) } ## print.revenue.opt summary.revenue.opt <- function(object, ...) { cat("Optimal output:\n") print.revenue.opt(object) cat("Revenue:\n") print(object$rev,...) cat("Weights (lambda):\n") x <- object$lambda xx <- format(unclass(x), digits=4) if (any(ina <- is.na(x))) xx[ina] <- "" if ( any(i0 <- !ina & abs(x) < 1e-9) ) xx[i0] <- sub("0.0000", ".", xx[i0]) print(xx, quote=FALSE, rigth=TRUE, ...) invisible(object) # printSpMatrix(Matrix(object$lambda),digits=4, col.names=T,...) # print(object$lambda,digits=4) } ## summary.revenue.opt
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/rev.R
# $Id: sdea.R 229 2020-07-04 13:39:18Z lao $ # Calculates super efficiency sdea <- function(X,Y, RTS="vrs", ORIENTATION="in", DIRECT=NULL, param=NULL, TRANSPOSE=FALSE, LP=FALSE, CONTROL=NULL) { # Input is as for the method eff # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix X <- tjek_data(X) Y <- tjek_data(Y) # Antal firmaer i data er K if ( TRANSPOSE ) { X <- t(X) Y <- t(Y) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } K = dim(X)[1] if (LP) { print(paste("K =",K)) print(dim(X)) print(dim(Y)) } lambda <- matrix(0,K,K) # Superefficiens skal gemmes i en K-vektor som vi til en start # saetter til NA. supereff = rep(NA,K) if ( !is.null(dimnames(X)[[1]]) ) { names(supereff) <- dimnames(X)[[1]] } rts <- c("fdh","vrs","drs","crs","irs","irs","add","fdh+","fdh++","fdh0") if ( missing(RTS) ) RTS <- "vrs" if ( is.numeric(RTS) ) { RTS_ <- rts[1+RTS] # the first fdh is number 0 RTS <- RTS_ } RTS <- tolower(RTS) if ( !(RTS %in% rts) ) { print(paste("Unknown value for RTS:",RTS),quote=F) RTS <- "vrs" print(paste("Continues with RTS =",RTS),quote=F) } orientation <- c("in-out","in","out","graph") if ( is.numeric(ORIENTATION) ) { ORIENTATION_ <- orientation[ORIENTATION+1] # "in-out" er nr. 0 ORIENTATION <- ORIENTATION_ } if ( !(ORIENTATION %in% orientation) ) { print(paste("Unknown value for ORIENTATION:",ORIENTATION),quote=F) ORIENTATION <- "in" print(paste("Continues with ORIENTATION =",ORIENTATION),quote=F) } direct <- NULL directMatrix <- FALSE if ( !is.null(DIRECT) ) { if ( is(DIRECT, "matrix") ) directMatrix <- TRUE else direct <- DIRECT } for ( i in 1:K ) { if (LP) print(paste("=====>> Unit",i),quote=F) # For hver enhed i laver vi beregningen og saetter resultat paa # den i'te plads i supereff # Den forste brug af X og Y er data for den enhed der skal # beregnes efficiens for, den i'te. # Den anden brug er ved XREF og YREF for at angive hvilken teknologi # der skal bruges, de definerer teknologien. if ( directMatrix ) direct <- DIRECT[i,] e <- dea(X[i,,drop=FALSE], Y[i,,drop=FALSE], RTS,ORIENTATION, XREF=X[-i,,drop=FALSE], YREF=Y[-i,,drop=FALSE], TRANSPOSE=FALSE, DIRECT=direct, param=param, LP=LP, CONTROL=CONTROL) supereff[i] <- e$eff # print(dim(lambda)) if (LP) print(dim(e$lambda)) lambda[i,-i] <- e$lambda[1,] } if (LP) print("sdea: faerdig med gennemloeb") # print(colnames(X)) if ( is.null(rownames(X)) ) { colnames(lambda) <- paste("L",1:K,sep="") } else { colnames(lambda) <- paste("L",rownames(X),sep="_") } rownames(lambda) <- rownames(X) if (TRANSPOSE) { lambda <- t(lambda) if ( !is.null(DIRECT) & is(DIRECT, "matrix") ) DIRECT <- t(DIRECT) } if (LP) { print("sdea: Om lamda, dim og lambda") print(dim(lambda)) print(rownames(lambda)) print(colnames(lambda)) print(lambda) } # return(supereff) objval <- NULL oe <- list(eff=supereff, lambda=lambda, objval=objval, RTS=RTS, ORIENTATION=ORIENTATION, TRANSPOSE=TRANSPOSE, slack=NULL, sx=NULL, sy=NULL) class(oe) <- "Farrell" return(oe) } # sdea
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/sdea.R
# $Id: sfa.R 231 2020-07-26 20:56:26Z lao $ # \encoding{latin1} sfa <- function(x, y, beta0=NULL, lambda0=1, resfun=ebeta, TRANSPOSE = FALSE, DEBUG=FALSE, control = list(), hessian=2) { # Funktion: beregner minus loglikelihood loglik <- function(parm) { N <- dim(x)[1] K <- dim(x)[2] + 1 beta <- parm[1:K] lambda <- parm[K+1] # s <- parm[K+2] e <- resfun(x,y,beta) s <- sum(e^2)/length(e) z <- -lambda*e/sqrt(s) pz = pmax(pnorm(z),1e-323) # undgaa at der skal tages log af 0, ssh # er altid positiv fordi stoetten er hele # aksen, afrunding kan saette den til rent 0 l <- N/2*log(pi/2) +N/2*log(s) -sum(log(pz)) +N/2.0 # cat(parm,": ",l,"\n") # print(l) return(l) } # loglik if ( is(x, "data.frame") ) { x <- as.matrix(x) } if ( !is.matrix(x) ) { print("Input 'x' must be a matrix",quote=F) return(print("Function 'sfa' stops",quote=F)) } if ( is.matrix(y) && ncol(y) > 1 ) { stop("Only one column (one variale) allowed for argument y in sfa") } # Fjern observationer hvor der er NA'er cpc <- complete.cases(cbind(x,y)) if ( sum(!cpc) > 0 ) { x <- x[cpc,,drop=FALSE] y <- y[cpc] } if ( TRANSPOSE ) { x <- t(x) y <- t(y) } K <- dim(x)[2] + 1 if ( nrow(x) != length(y) ) { stop("Number of observations on x and y differ") } if ( missing(beta0) | is.null(beta0) ) { # 1. OLS estimate m <- lm( y ~ x) # OLS estimate; linear model # print(logLik(m)) beta0 <- m$coef # sigma0 <- deviance(m)/dim(x)[1] # estimate of variance, ML estimate # print(dim(x)) # print(loglik(c(m$coef,deviance(m)/dim(x)[1],0))) } # 2. Minimization of minus log likelihood parm = c(beta0,lambda0) # print(loglik(parm)) #print("For control er sat"); print(control) if ( missing(control) ) { #print("control er missing") #control["maxeval"] <- 1000 control["stepmax"] <- .1 } else { # Hvis 'control' er sat skal det sikres at stepmax har en lille vaerdi # hvis den ikke er sat i 'control'; er den sat i 'control' bliver den vaerdi # benyttet paa brugerens egen risiko. #print("control har vaerdier") #print(control) kontrol <- list() kontrol["stepmax"] <- .1 for ( i in names(control) ) { kontrol[i] <- control[i] } control <- kontrol } if (DEBUG) {print("Efter control er sat",quote=F); print(control)} if (DEBUG) print("ucminf bliver kaldt",quote=F) o <- ucminf(parm, loglik, control=control, hessian=hessian) if (DEBUG) { print("ucminf er slut",quote=F) print(paste("Antal funktionskald",o$info["neval"]),quote=F) print(o$info) if ( hessian!= 0 & !is.null(o$hessian) ) { print("Hessian:",quote=F) print(o$hessian) } if ( hessian!= 0 & !is.null(o$invhessian) ) { print("Inverse hessian",quote=F) print(o$invhessian) } } if ( o$convergence < 0 ) { warning("Not necessarily converged, $convergence = ", o$convergence,"\n",o$message) } if ( DEBUG & o$convergence > 0 ) { warning("Converged, $convergence = ", o$convergence,"\n",o$message) } if (o$par[K+1] < 0) { warning("lambda is negative.\n", " This could indicate that there is no inefficiency,\n", " or that the model is misspecified." ) } sf <- o class(sf) <- "sfa" K <- dim(x)[2] + 1 sf$beta <- o$par[1:K] sf$coef <- o$par[1:K] names(sf$coef) <- names(o$par[1:K]) e <- resfun(x,y,sf$beta) sf$residuals <- e sf$fitted.values <- y - e sf$lambda <- o$par[K+1] sf$sigma2 <- sum(e^2)/length(e) names(sf$par)[K+1] <- "lambda" # names(sf$par)[K+2] <- "sigma2" sf$N <- dim(x)[1] sf$df <- dim(x)[2] + 3 sf$loglik <- -o$val sf$vcov <- matrix(NA, nrow=dim(x)[2]+1, ncol=dim(x)[2]+1) if ( hessian == 2 || hessian == 3 ) sf$vcov <- o$invhessian else if ( hessian == 1 ) { if ( !is.null(o$hessian) ) sf$vcov <- genInv(o$hessian) } else sf$vcov <- matrix(NA, nrow=dim(x)[2]+1, ncol=dim(x)[2]+1) # Standard error of all parameters if (DEBUG & !is.null(o$hessian)) cat("Determinant for Hessian = ", det(o$hessian),"\n" ) ## Standard error of the parameters in the production function ## std.err[1:3] # t-ratios if (sum(!is.na(sf$vcov))) { sf$std.err <- sqrt(diag(sf$vcov)) sf$t.value <- sf$par/sf$std.err } else { sf$std.err <- NA sf$t.value <- NA } # t-ratio for production function # (o$par/std.err)[1:(1+dim(x)[2])] # print(loglik(o$par)) return(sf) } ## sfa sfa.cost <- function(W, Y, COST, beta0 = NULL, lambda0 = 1, resfun = ebeta, TRANSPOSE = FALSE, DEBUG=FALSE, control=list(), hessian=2) { cWY <- -1 * cbind(W,Y) cCOST <- -1 * COST if ( missing(beta0) | is.null(beta0) ) { # 1. OLS estimate m <- lm( cCOST ~ cWY) # OLS estimate; linear model beta0 <- m$coef } sol <- sfa(x=cWY, y=cCOST, beta0=beta0, lambda0=lambda0, resfun=resfun, TRANSPOSE=TRANSPOSE, DEBUG=DEBUG, control=control, hessian=hessian) sol$fitted.values <- -sol$fitted.values sol$residuals <- - sol$residuals class(sol) <- c(class(sol), "sfa.cost") return(sol) } ## sfa.cost ebeta <- function(x,y,beta) { # Calculate residuals # There is no intercept in the x matrix # %*% is the inner matrix product y - beta[1] - x %*% beta[2:(dim(x)[2]+1)] } genInv <- function(X, tol = sqrt(.Machine$double.eps)) { ## Generalized Inverse of a Matrix ## Calculates the Moore-Penrose generalized inverse of a matrix X dnx <- dimnames(X) if(is.null(dnx)) dnx <- vector("list", 2) s <- svd(X) nz <- s$d > tol * s$d[1] structure( if(any(nz)) s$v[, nz] %*% (t(s$u[, nz])/s$d[nz]) else X, dimnames = dnx[2:1]) } print.sfa <- # function(x,...) { function (x, digits = max(3, getOption("digits") - 3), ...) { # cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (length(coef(x))) { cat("Coefficients:\n") print.default(format(coef(x), digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n") cat("\n") invisible(x) # a <- cbind("Parameters"=x$par," Std.err"=x$std.err," t-value"=x$t.value) # if ( length(x$t.value) > 0 ) { # a<-cbind(a," Pr(>|t|)"=as.integer(1000* # 2*pt(abs(x$t.value),x$N-x$df,lower.tail=F) )/1000) # } # print(a,digits=4,quote=F,...) # cat("sigma2 ",format(x$sigma2,digits=5),"\n") # invisible(a) } ## print.sfa summary.sfa <- function(object, ...) { # print.sfa(object) a <- cbind("Parameters"=format(object$par, digits=4, nsmall=5, sci=FALSE), "", "Std.err"=format(object$std.err, digits=5, nsmall=5, sci=FALSE), "", "t-value"=format(object$t.value, digits=4, nsmall=3)) if ( length(object$t.value) > 0 ) { a<-cbind(" ", a, " ", "Pr(>|t|)"=format(as.integer(1000* 2*pt(abs(object$t.value),object$N-object$df,lower.tail=F) )/1000, digits=3, sci=FALSE)) } print(a, quote=FALSE, ...) cat("sigma2 ",format(object$sigma2,digits=5),"\n") cat("sigma2v = ", object$sigma2/(1 + object$lambda^2), "; sigma2u = ", object$sigma2*object$lambda^2/(1 + object$lambda^2),"\n") cat("log likelihood = ",object$loglik,"\n") cat("Convergence = ",object$convergence, "; number of evaluations of likelihood function", object$info["neval"], "\n") cat("Max value of gradien:", object$info["maxgradient"], "\n") cat("Length of last step:", object$info["laststep"], "\n") cat("Final maximal allowed step length:", object$info["stepmax"], "\n") # print(object$count) } ## summary.sfa fitted.sfa <- function(object, ...) { return(object$fitted.values) } residuals.sfa <- function(object, ...) { val <- object$residuals attr(val, "nobs") <- object$N attr(val, "df") <- object$df class(val) <- "residuals" return(val) } ## residuals.sfa vcov.sfa <- function(object, ...) { return(object$vcov) } logLik.sfa <- function(object, ...) { val <- -object$value attr(val, "nobs") <- object$N attr(val, "df") <- object$df class(val) <- "logLik" return(val) } ## logLik.sfa coef.sfa <- function(object, ...) { return(object$coef) } eff.sfa <- function(object, type="BC", ...) { switch( type, "BC" = return(te.sfa(object)), "Mode" = return(teMode.sfa(object)), "J" = return(teJ.sfa(object)), "add" = return(te.add.sfa(object)), "add" = return(te.add.sfa(object)), warning("Unknown type:", type) ) } efficiencies.sfa <- eff.sfa ## Beregning af teknisk effektivitet te.sfa <- function(object) { # Hjaelpevariabler lambda <- object$lambda s2 <- object$sigma2 sign <- 1 if (is(object, "sfa.cost")) sign <- -1 ustar <- - sign * object$residuals*lambda^2/(1+lambda^2) sstar <- lambda/(1+lambda^2)*sqrt(s2) # Teknisk efficiens for hver enhed TE = pnorm(ustar/sstar -sstar)/pnorm(ustar/sstar) * exp(sstar^2/2 -ustar) colnames(TE) <- "te" return(array(TE)) # class(TE) <- "te" # TE } teBC.sfa <- te.sfa teMode.sfa <- function(object) { # Hjaelpevariabler lambda <- object$lambda sign <- 1 if (is(object, "sfa.cost")) sign <- -1 ustar <- - sign * object$residuals*lambda^2/(1+lambda^2) # Teknisk efficiens for hver enhed TE1 = matrix(exp(pmin(0,-ustar)),ncol=1) colnames(TE1) <- "teM" return(array(TE1)) } # te1.sfa <- teMode.sfa teJ.sfa <- function(object) { # Hjaelpevariabler sign <- 1 if (is(object, "sfa.cost")) sign <- -1 lambda <- object$lambda s2 <- object$sigma2 ustar <- - sign * object$residuals*lambda^2/(1+lambda^2) sstar <- lambda/(1+lambda^2)*sqrt(s2) # Teknisk efficiens for hver enhed TE2 = exp(-ustar -sstar*( dnorm(ustar/sstar)/pnorm(ustar/sstar) ) ) colnames(TE2) <- "teJ" return(array(TE2)) } # te2.sfa <- teJ.sfa te.add.sfa <- function(object, ...) { sign <- 1 if (is(object, "sfa.cost")) sign <- -1 e <- sign * residuals.sfa(object) s2 <- sigma2.sfa(object) lambda <- lambda.sfa(object) # auxiliary variables sstar <- lambda/(1+lambda^2)*sqrt(s2) estar <- e * lambda / sqrt(s2) uJ <- sstar * (dnorm(estar)/(1 - pnorm(estar)) - estar) teAdd <- 1 - uJ/object$fitted.values class(teAdd) <- "matrix" colnames(teAdd) <- "teAdd" return(array(teAdd)) } eff.add.sfa <- te.add.sfa sigma2u.sfa <- function(object) { s2u <- object$lambda^2 / (1+object$lambda^2) * object$sigma2 names(s2u) <- "sigma2u" return(s2u) } sigma2v.sfa <- function(object) { s2v <- object$sigma2 / (1+object$lambda^2) names(s2v) <- "sigma2v" return(s2v) } sigma2.sfa <- function(object) { s2 <- object$sigma2 names(s2) <- "sigma2" return(s2) } lambda.sfa <- function(object) { lam <- object$lambda names(lam) <- "lambda" return(lam) }
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/sfa.R
# $Id: slack.R 241 2022-03-16 14:00:23Z X052717 $ # Calculate slack at the efficient points. # Hvis data ikke er transponeret bliver x og y transponeret og alle # beregninger laves transponeres. Det betyder at resultat skal # transponeres inden afslut og retur. slack <- function(X, Y, e, XREF=NULL, YREF=NULL, FRONT.IDX=NULL, LP=FALSE, CONTROL=NULL) { if ( !methods::is(e,"Farrell") ) { stop("In call of slack: argument 'e' must be of class 'Farrell'") } RTS <- e$RTS if ( RTS == "fdh0" ) RTS <- "fdh" if (LP) print(paste("slack: RTS =",RTS),quote=F) # Hvis data er en data.frame saa tjek om det er numerisk data og lav # dem i saa fald om til en matrix X <- tjek_data(X) Y <- tjek_data(Y) if ( missing(XREF) || is.null(XREF) ) XREF <- X if ( missing(YREF) || is.null(YREF) ) YREF <- Y XREF <- tjek_data(XREF) YREF <- tjek_data(YREF) if ( e$TRANSPOSE ) { if (LP) print("X and Y are transposed") X <- t(X) Y <- t(Y) XREF <- t(XREF) YREF <- t(YREF) } okr <- dim(XREF)[1] if (LP) cat("okr =",okr,"\n") if ( FALSE && length(FRONT.IDX) == 0 ) { # Kun units med eff==1 bruges i referenceteknologi FRONT.IDX <- which( abs(e$eff -1) < 1e-5 ) } if ( length(FRONT.IDX) > 0 ) { if ( !is.vector(FRONT.IDX) ) stop("FRONT.IDX is not a vector in method 'slack'") XREF <- XREF[FRONT.IDX,, drop=FALSE] YREF <- YREF[FRONT.IDX,, drop=FALSE] if (LP) cat("FRONT.IDX =",FRONT.IDX,"\n") } K = dim(X)[1] # number of units, firms, DMUs Kr = dim(XREF)[1] # number of units in reference set m = dim(X)[2] # number of inputs n = dim(Y)[2] # number of outputs if (LP) cat("In slack: m n K Kr = ",m,n,K,Kr,"\n") if ( m != dim(XREF)[2] ) stop("Number of inputs must be the same in X and XREF") if ( n != dim(YREF)[2] ) stop("Number of outputs must be the same in Y and YREF") if ( K != dim(Y)[1] ) stop("Number of units must be the same in X and Y") if ( Kr != dim(YREF)[1] ) stop("Number of units must be the same in XREF and YREF") # For at undgaa afrundingsproblemer i forbindelse med slacks skal # in- og ouput skaleres til at vaere i omegnen af 1 mmm <- (colMeans(X)) nnn <- (colMeans(Y)) if ( min(mmm) < 1e-4 || max(mmm) > 1e4 || min(nnn) < 1e-4 || max(nnn) > 1e4 ) { SKALERING <- TRUE X <- X / matrix(mmm, nrow=K, ncol=m, byrow=TRUE) XREF <- XREF / matrix(mmm, nrow=Kr, ncol=m, byrow=TRUE) Y <- Y / matrix(nnn, nrow=K, ncol=n, byrow=TRUE) YREF <- YREF / matrix(nnn, nrow=Kr, ncol=n, byrow=TRUE) } else { SKALERING <- FALSE } if ( e$ORIENTATION=="graph" ) { ## stop(paste0("The use of the method 'slack' does not work for orientation 'graph'\n", ## " Use option SLACK in method 'dea'")) lps <- make.lp(1, 1) lpcontr <- lp.control(lps) tol <- (lpcontr$epsilon["epsint"]) # delete.lp(lps) rm(lps, lpcontr) objval <- eff <- e$eff sx <- X*eff - e$lambda %*% XREF sy <- -Y/eff + e$lambda %*% YREF sx[abs(sx) < tol] <- 0 sy[abs(sy) < tol] <- 0 sum <- rowSums(sx) + rowSums(sy) sum[abs(sum) < tol] <- 0 colnames(sx) <- paste("sx",1:m,sep="") colnames(sy) <- paste("sy",1:n,sep="") if ( FALSE && e$TRANSPOSE ) { sx <- t(sx) sy <- t(sy) } if ( SKALERING ) { sx <- sx * matrix(mmm, nrow=K, ncol=m, byrow=TRUE) sy <- sy * matrix(nnn, nrow=K, ncol=n, byrow=TRUE) } oe <- list(eff=eff, slack=sum>tol, sum=sum, objval=objval, sx=sx, sy=sy, lambda=lambda, RTS=e$RTS, ORIENTATION=e$ORIENTATION, TRANSPOSE=e$TRANSPOSE) class(oe) <- "slack" return(oe) } ## if (e$ORIENTATION=="graph") if ( RTS != "crs" && RTS != "add" ) { rlamb <- 2 } else { rlamb <- 0 } if ( is.null(e$objval) ) eff <- e$eff else eff <- e$objval if ( is.null(e$direct) ) { if ( e$ORIENTATION == "in" ) { E <- eff FF <- rep(1,K) # Naturligt at bruge F, men F er FALSE i R } else if ( e$ORIENTATION == "out" ) { FF <- eff E <- rep(1,K) } else if ( e$ORIENTATION == "graph" ) { # Nedenstaaende virker ikke altid, derfor bruges beregningen # under "if ( e$ORIENTATION=="graph" )" stop("We should never end here") lps <- make.lp(1, 1) lpcontr <- lp.control(lps) tol <- lpcontr$epsilon["epsint"] # delete.lp(lps) # Afrunding af eff. kan goere, at det beregnede punkt paa frontier # faktisk ligger uden for frontier. Derfor adderes 'tol' til eff. # tallet for at sikre den bliver inde i teknologimaengden. # tol <- 0 E <- eff + tol FF <- 1/eff - tol rm(lps, lpcontr, tol) } else { stop("Unknown orientation in slack: ", e$ORIENTATION) } } else { mmd <- switch(e$ORIENTATION, "in"=m, "out"=n, "in-out"=m+n) ob <- matrix(e$objval,nrow=K, ncol=mmd) if ( is(e$direct, "matrix") && dim(e$direct)[1] > 1 ) { dir <- e$direct } else { dir <- matrix(e$direct,nrow=K, ncol=mmd, byrow=TRUE) } if ( e$ORIENTATION=="in" ) { dirRhs <- cbind(X - ob*dir, -Y) } else if ( e$ORIENTATION=="out" ) { dirRhs <- cbind(X, -Y - ob*dir) } else if ( e$ORIENTATION=="in-out" ) { dirRhs <- cbind(X -ob[,1:m,drop=FALSE]*dir[,1:m,drop=FALSE], -Y -ob[,(m+1):(m+n),drop=FALSE]*dir[,(m+1):(m+n),drop=FALSE]) } else { warning("Illegal ORIENTATION for argument DIRECT in slacks", immediate. = TRUE) } } ## if ( is.null(e$direct) ) # Initialiser LP objekt lps <- make.lp(m+n +rlamb, m+n+Kr) lp.control(lps, scaling=c("range", "equilibrate", "integers") # default scalering er 'geometric' ) # og den giver ikke altid tilfredsstillende resultat; # curtisreid virker i mange tilfaelde slet ikke name.lp(lps, paste("DEA-slack",RTS,",",e$ORIENTATION,"orientated",sep="-")) # saet raekker i matrix med restriktioner for ( h in 1:m ) set.row(lps,h, XREF[,h], (m+n+1):(m+n+Kr) ) for ( h in 1:n) set.row(lps,m+h, -YREF[,h], (m+n+1):(m+n+Kr) ) for ( h in 1:(m+n) ) set.mat(lps,h,h,1) # restriktioner paa lambda if ( RTS != "crs" && RTS != "add" ) { set.row(lps, m+n+1, c(rep(0,m+n),rep(-1,Kr))) set.row(lps, m+n+2, c(rep(0,m+n),rep( 1,Kr))) } set.constr.type(lps, c(rep("=",m+n), rep("<=",rlamb))) set.objfn(lps, rep(1, m+n), 1:(m+n)) lp.control(lps, sense="max") if ( RTS == "fdh" ) { set.type(lps,(m+n+1):(m+n+Kr),"binary") delete.constraint(lps, m+n+1) set.rhs(lps,1, m+n+1) rlamb <- rlamb -1 } else if ( RTS == "vrs" ) { set.rhs(lps, c(-1,1), (m+n+1):(m+n+2)) } else if ( RTS == "drs" ) { set.rhs(lps, -1, m+n+1) delete.constraint(lps, m+n+2) rlamb <- rlamb -1 set.constr.type(lps, ">=", m+n+1) } else if ( RTS == "irs" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 set.constr.type(lps, ">=", m+n+1) } else if ( RTS == "irs2" ) { set.rhs(lps, 1, m+n+2) delete.constraint(lps, m+n+1) rlamb <- rlamb -1 set.constr.type(lps, ">=", m+n+1) set.semicont(lps, 2:(1+Kr)) set.bounds(lps, lower=rep(1,Kr), columns=(m+n+1):(m+n+Kr)) } else if ( RTS == "add" ) { set.type(lps, (m+n+1):(m+n+Kr),"integer") } else if ( RTS == "fdh+" ) { param <- e$param low <- param["low"] high <- param["high"] set.rhs(lps, c(-low,high), (m+n+1):(m+n+2)) add.SOS(lps,"lambda", 1,1, (m+n+1):(m+n+Kr), rep(1, Kr)) } if (LP) { print("Right hand side not sat yet") print(lps) } objval <- rep(NA,K) # vector with sums of slacks sx <- matrix(NA,K,m) sy <- matrix(NA,K,n) lambda <- matrix(NA,K,Kr) if (!missing(CONTROL)) set_control(lps, CONTROL) for ( k in 1:K ) { # beregn for hver enhed if (LP) cat("\n---\nFirm",k,"\n") # her er der problem med afrunding, sammen med venstrsiden er # det ikke sikkert at restriktionen holder naar der er # sket afrunding i mellemregningerner. if ( is.null(e$direct) ) { rhs <- c(E[k] * X[k,], -FF[k] * Y[k,]) } else { rhs <- dirRhs[k,] } set.rhs(lps, rhs, 1:(m+n)) if (LP) { print(paste("Effektivitet er ", E[k])) print(paste("Hoejresiden for firm",k)) if ( is.null(e$direct) ) { print(E[k] * X[k,]) print( -FF[k] * Y[k,]) } else { print(rhs) } print(lps) } if ( !is.null(LP) && k %in% LP ) { write.lp(lps, paste(name.lp(lps),k,".mps",sep=""), type="mps",use.names=TRUE) } set.basis(lps, default=TRUE) status <- solve(lps) if (LP) print(paste0("status = ", status)) if ( status != 0 ) { if ( status == 2 || status == 3 ) { if (LP) print(paste("Firm",k,"not in the technology set"), quote=F) if (LP) print(paste("Status =",status)) objval[k] <- 0 sol <- rep(0,m+n+Kr) sol[m+n+k] <- 1 } else { print(paste("Error in solving for firm",k,": Status =",status), quote=F) objval[k] <- NA sol <- NA } } else { objval[k] <- get.objective(lps) sol <- get.variables(lps) } sx[k,] <- sol[1:m] sy[k,] <- sol[(m+1):(m+n)] lambda[k,] <- sol[(m+n+1):(m+n+Kr)] if (LP) { print(paste("Obj.value =",get.objective(lps))) cat(paste("Solutions for",k,": ")) print(sol) } } # loop for each firm; "for (k in 1:K)" # Drop soejler i lambda der alene er nuller, dvs lambda soejler skal # alene vaere reference firms colnames(sx) <- paste("sx",1:m,sep="") colnames(sy) <- paste("sy",1:n,sep="") # For at undgaa at regneunoejagtigheder giver ikke-nul slack bliver # slack sat til nul hvis de er mindre end den relative noejagitghed # i input og output, noejagtighed der er i beregningerne. Det # betyder at sum af slacks bliver sum mens objval er sum plus # regneunoejagtighed som giver en forskel hvis X er meget stoerre # eller mindre end 1. lpcontr <- lp.control(lps) eps <- lpcontr$epsilon["epsint"] sx[abs(sx) < eps ] <- 0 sy[abs(sy) < eps ] <- 0 if ( SKALERING ) { sx <- sx * matrix(mmm, nrow=K, ncol=m, byrow=TRUE) sy <- sy * matrix(nnn, nrow=K, ncol=n, byrow=TRUE) } sum <- rowSums(sx) + rowSums(sy) lambda[abs(lambda-1) < eps] <- 1 # taet ved 1 lambda[abs(lambda) < eps] <- 0 # taet ved 0 if ( length(FRONT.IDX)>0 ) { colnames(lambda) <- paste("L",(1:okr)[FRONT.IDX],sep="") } else { colnames(lambda) <- paste("L",1:Kr,sep="") } if (FALSE && LP) { print("Done with loop\nColumn names for lambda:") print(colnames(lambda)) print(lambda) } if ( e$TRANSPOSE ) { sx <- t(sx) sy <- t(sy) lambda <- t(lambda) } if ( FALSE && LP ) { print("Faerdig med slack: lambda") print(colnames(lambda)) print(lambda) print("slack:") print(paste("laengden af objval:",length(objval))) print(objval==0) # print(!slackZero) } oe <- list(eff=eff, slack=sum>eps, sum=sum, objval=objval, sx=sx, sy=sy, lambda=lambda, RTS=e$RTS, ORIENTATION=e$ORIENTATION, TRANSPOSE=e$TRANSPOSE) class(oe) <- "slack" return(oe) } ## slack() print.slack <- function(x, digits=4, ...) { sum <- x$sum print(sum, digits=digits, ...) invisible(sum) } ## print.slack summary.slack <- function(object, digits=4, ...) { eps <- 1e-7 cat("Efficiency and slacks:\n") # if ( object$ORIENTATION != "out" ) # a <- cbind(E=round(object$eff, digits=2), # "sum"=object$sum, object$sx, object$sy) # else # a <- cbind(F=round(object$eff,2), # "sum"=object$sum, object$sx, object$sy) # print(a,...) cat("Number of firms with efficiency==1 and positive slacks:", sum(abs(object$eff-1) < eps & object$slack , na.rm=TRUE),"\n" ) if ( dim(object$sx)[2]==1 ) { SX <- object$sx > eps } else { SX <- rowSums(object$sx, na.rm=TRUE) > eps } if ( dim(object$sy)[2]==1 ) { SY <- object$sy > eps } else { SY <- rowSums(object$sy, na.rm=TRUE) > eps } cat("Number of firms with:\n") cat(" only x slacks: ", sum(SX & !SY, na.rm=TRUE), "\n") cat(" only y slacks: ", sum(!SX & SY, na.rm=TRUE), "\n") cat(" x and y slacks:", sum(SY & SX, na.rm=TRUE), "\n") cat("\n x slacks: ", sum(SX, na.rm=TRUE), "\n") cat( " y slacks: ", sum(SY, na.rm=TRUE), "\n") cat( "all slacks: ", sum(SX | SY, na.rm=TRUE), "\n") if (sum(is.na(SX) | is.na(SY))) cat("Number of firms Not Available for slacks: ", sum(is.na(SX)|is.na(SY)),"\n") # invisible(a) if ( FALSE && dim(lambda(object))[2] <= 10 ) { cat("Weights (lambda):\n") x <- lambda(object) xx <- round(x, digits) # xx <- format(unclass(x), digits=digits) if (any(ina <- is.na(x))) xx[ina] <- "" if ( any(i0 <- !ina & abs(x) < 1e-9) ) xx[i0] <- sub("0", ".", xx[i0]) print(xx, quote=FALSE, rigth=TRUE, ...) invisible(xx) } } ## summary.slack()
/scratch/gouwar.j/cran-all/cranData/Benchmarking/R/slack.R
# # Do not delete! # File name Benford_tests.R # Part of: BenfordTests (GNU R contributed package) # Author: Dieter William Joenssen # Copyright: Dieter William Joenssen # Email: [email protected] # Created: 16 April 2013 # Last Update: 16 July 2015 # Description: R code for Package BenfordTests. Implemented functionions include following: # Actual Tests: # -chisq.benftest ~ Chi square test for Benford's law # -ks.benftest ~ Kologormov-Smirnov test for Benford's law # -mdist.benftest ~ Chebyshev distance based test for Benford's law # -edist.benftest ~ Euclidean distance based test for Benford's law # -usq.benftest ~ Freedman Watson U square test for Benford's law # -meandigit.benftest ~ mean digit test for Benford's law # -jpsq.benftest ~ Pearson correlation test for Benford's law (removed moved ability to choose "spearman" and "kendall" for correlation) # -signifd.analysis ~ function to (graphically) analyze each digit individualy. # -jointdigit.benftest ~ function to test all digit frequencies jointly # Supporting functions: # -signifd ~ returns the specified number of first significant digits # -signifd.seq ~ sequence of all possible k first digits(i.e., k=1 -> 1:9) # -qbenf ~ returns full cumulative probability function for Benford's distribution (first k digits) # -pbenf ~ returns full probability function for Benford's distribution (first k digits) # -rbenf ~ generates a random variable that satisfies Benford's law # -simulateH0 ~ calculates the H0-Distribution of all test statistics via simulation ## Tests for Benford's law ## Pearson's Chi squared test statistic chisq.benftest<-function(x=NULL,digits=1,pvalmethod="asymptotic",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("asymptotic", "simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 #calculate the relative frequencies rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate the chi square test statistic chi_square<-n*sum((rel_freq_of_digits-rel_freq_of_digits_H0)^2/rel_freq_of_digits_H0) #calc pval if using the asymptotic NULL-distribution if(pvalmethod==1) { pval<-1-pchisq(chi_square,df=length(signifd.seq(digits))-1) } if(pvalmethod==2)#calc pval if using the simulated NULL-distribution { #wrapper function for simulating the NULL distribution dist_chisquareH0<-simulateH0(teststatistic="chisq",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated chi_square value pval<-1-sum(dist_chisquareH0<=chi_square)/length(dist_chisquareH0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(chisq = chi_square), p.value = pval, method = "Chi-Square Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } ##Kologormov Smirnov test (EDF type) ks.benftest<-function(x=NULL,digits=1,pvalmethod="simulate",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed (relative)frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate the deviations in the cumulative sums cum_sum_Ds<-cumsum(rel_freq_of_digits)-cumsum(rel_freq_of_digits_H0) #calculate the K-S-D-statistic K_S_D<-max(max(cum_sum_Ds),abs(min(cum_sum_Ds)))*sqrt(n) if(pvalmethod==1)#calc pval if using the simulated NULL-distribution { #wrapper function for simulating the NULL distribution dist_K_S_D_H0<-simulateH0(teststatistic="ks",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated D value pval<-1-sum(dist_K_S_D_H0<=K_S_D)/length(dist_K_S_D_H0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(D = K_S_D), p.value = pval, method = "K-S Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } # Chebyshev Distance Test (crit values for one digit testing first by Morrow) mdist.benftest<-function(x=NULL,digits=1,pvalmethod="simulate",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed (relative)frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate the m_star statisitic #on a personal note, this isn't very different from the KSD. m_star<-sqrt(n)*max(abs(rel_freq_of_digits-rel_freq_of_digits_H0)) if(pvalmethod==1) { #wrapper function for simulating the NULL distribution dist_m_star_H0<-simulateH0(teststatistic="mdist",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated m_star value pval<-1-sum(dist_m_star_H0<=m_star)/length(dist_m_star_H0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(m_star = m_star), p.value = pval, method = "Chebyshev Distance Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } # Euclidean Distance Test(crit values for one digit testing first by Morrow) edist.benftest<-function(x=NULL,digits=1,pvalmethod="simulate",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed (relative)frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate the m_star statisitic #on a personal note, this isn't very different from the m_star statistic. d_star<-sqrt(n)*sqrt(sum((rel_freq_of_digits-rel_freq_of_digits_H0)^2)) if(pvalmethod==1) { #wrapper function for simulating the NULL distribution dist_d_star_H0<-simulateH0(teststatistic="edist",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated d_star value pval<-1-sum(dist_d_star_H0<=d_star)/length(dist_d_star_H0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(d_star = d_star), p.value = pval, method = "Euclidean Distance Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } # Freedman's modification of Watsons U^2 for the Benford distribution (originally 1 digit) usq.benftest<-function(x=NULL,digits=1,pvalmethod="simulate",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed (relative)frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate deviations betwen the cumulative sums cum_sum_Ds<-cumsum(rel_freq_of_digits-rel_freq_of_digits_H0) #calculate the U^2 test statistic U_square<-(n/length(rel_freq_of_digits))*(sum(cum_sum_Ds^2)-((sum(cum_sum_Ds)^2)/length(rel_freq_of_digits))) if(pvalmethod==1) { #wrapper function for simulating the NULL distribution dist_U_square_H0<-simulateH0(teststatistic="usq",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated U_square value pval<-1-sum(dist_U_square_H0<=U_square)/length(dist_U_square_H0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(U_square = U_square), p.value = pval, method = "Freedman-Watson U-squared Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } #Normed mean deviation test for Benfords distribution first proposed as descriptive test statistic by Judge and Schechter meandigit.benftest<-function(x=NULL,digits=1,pvalmethod="asymptotic",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("asymptotic", "simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #get specified number of first digits and number of numbers first_digits<-signifd(x,digits) n<-length(first_digits) #get empirical mean digit value mu_emp<-mean(first_digits) #get expected mean digit value under NULL mu_bed<-sum(signifd.seq(digits)*pbenf(digits)) #get variance of mean digit value under NULL var_bed<-sum(((signifd.seq(digits)-mu_bed)^2)*pbenf(digits)) #normalize to get a_star a_star<-abs(mu_emp-mu_bed)/(max(signifd.seq(digits))-mu_bed) #calc pval if using the asymptotic NULL-distribution if(pvalmethod==1) { pval<-(1-pnorm(a_star,mean=0,sd=sqrt(var_bed/n)/(9-mu_bed)))*2 } if(pvalmethod==2) { #wrapper function for simulating the NULL distribution dist_a_star_H0<-simulateH0(teststatistic="meandigit",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated a_star value pval<-1-sum(dist_a_star_H0<=a_star)/length(dist_a_star_H0) #if this were a two-sided test, the p_value would be adjusted as follows: #if(pval>.5){pval<- (1- pval)*2} #else{pval<- pval*2} } #make a nice S3 object of type htest RVAL <- list(statistic = c(a_star = a_star), p.value = pval, method = "Judge-Schechter Normed Deviation Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } # Shapiro-Francia type (correlation based) test for Benford's distribution first proposed by Joenssen (2013) jpsq.benftest<-function(x=NULL,digits=1,pvalmethod="simulate",pvalsims=10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("simulate")) if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed (relative)frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate the Jstat statistic J_stat_squ<-cor(rel_freq_of_digits,rel_freq_of_digits_H0) #square the J_stat_statistic and adjust the sign J_stat_squ<-sign(J_stat_squ)*(J_stat_squ^2) if(pvalmethod==1) { #wrapper function for simulating the NULL distribution dist_J_stat_H0<- simulateH0(teststatistic="jpsq",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated J_stat_square value pval<-sum(dist_J_stat_H0<=J_stat_squ)/length(dist_J_stat_H0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(J_stat_squ = J_stat_squ), p.value = pval, method = "JP-Square Correlation Statistic Test for Benford Distribution", data.name = deparse(substitute(x))) class(RVAL) <- "htest" return(RVAL) } # Hotelling type test for Benford's distribution first proposed by Joenssen (2015) jointdigit.benftest<-function(x = NULL, digits = 1, eigenvalues="all", tol = 1e-15, pvalmethod = "asymptotic", pvalsims = 10000) { #some self-explanitory error checking if(!is.numeric(x)){stop("x must be numeric.")} pvalmethod <- pmatch(pvalmethod, c("asymptotic"))#, "simulate" if (is.na(pvalmethod)){stop("invalid 'pvalmethod' argument")} if((length(pvalsims)!=1)){stop("'pvalsims' argument takes only single integer!")} if((length(digits)!=1)){stop("'digits' argument takes only single integer!")} #Might need this in the future decompose=TRUE #reduce the data to the specified number of first digits first_digits<-signifd(x,digits) #get the amount of values that should be tested n<-length(first_digits) #the the observed frequencies of all digits freq_of_digits<-table(c(first_digits,signifd.seq(digits)))-1 #calculate the relative frequencies rel_freq_of_digits<-freq_of_digits/n #get the expected frequencies under the NULL rel_freq_of_digits_H0<-pbenf(digits) #calculate covariance matrix under the NULL covariance_matirx<-outer(rel_freq_of_digits_H0,rel_freq_of_digits_H0,"*")*-1 diag(covariance_matirx)<-rel_freq_of_digits_H0*(1-rel_freq_of_digits_H0) #ignore multiplication by n b/c only eigenvectors are desired #covariance_matirx<-covariance_matirx*n if(decompose) { eigenval_vect<-eigen(covariance_matirx,symmetric = TRUE) eigenval_vect_result<-eigenval_vect # identify which eigenvalues = 0 eigen_to_keep<-abs(eigenval_vect$values)>tol #toss out the eigenvalues... = 0; eigenval_vect$values<-eigenval_vect$values[eigen_to_keep] eigenval_vect$vectors<-eigenval_vect$vectors[,eigen_to_keep] #determine which nonzero eigenvalues to keep if(length(eigenvalues)>0) { if(is.character(eigenvalues)) { if(length(eigenvalues)==1) { eigenvalues <- pmatch(tolower(eigenvalues), c("all","kaiser")) if(eigenvalues == 1) { eigen_to_keep<-1:length(eigenval_vect$values) } if(eigenvalues == 2) { eigen_to_keep<-which(eigenval_vect$values>=mean(eigenval_vect$values)) } } else {stop("Error: 'is.character(eigenvalues) && length(eigenvalues)!=1', use only one string!")} } else { if(is.numeric(eigenvalues)&all(eigenvalues>=0,na.rm = TRUE)) { eigen_to_keep<-eigenvalues[!is.na(eigenvalues)] eigen_to_keep<-eigen_to_keep[eigen_to_keep<=length(eigenval_vect$values)] if(length(eigen_to_keep)<=0) {stop("Error: No eigenvalues remain.")} } else {stop("Error: non string value for eigenvalues must numeric vector of eigenvalue indexes! No negative indexing allowed.")} } }else{stop("Error: 'length(eigenvalues)<=0'!")} #reduce to selected eigenvalues; eigenval_vect$values<-eigenval_vect$values[eigen_to_keep] eigenval_vect$vectors<-eigenval_vect$vectors[,eigen_to_keep] principle_components<-rel_freq_of_digits%*%eigenval_vect$vectors true_components_means<-rel_freq_of_digits_H0%*%eigenval_vect$vectors if(length(eigenval_vect$values)==1) { hotelling_T<-(n/eigenval_vect$values)*((principle_components-true_components_means)^2) }else{ hotelling_T<-n*(principle_components-true_components_means)%*%solve(diag(eigenval_vect$values))%*%t(principle_components-true_components_means) } deg_free<-length(principle_components) }else{ hotelling_T<-n*(rel_freq_of_digits-rel_freq_of_digits_H0)%*%solve(covariance_matirx)%*%t(rel_freq_of_digits-rel_freq_of_digits_H0) deg_free<-length(rel_freq_of_digits) } #calc pval if using the asymptotic NULL-distribution if(pvalmethod==1) { #pval<-pchisq(q = hotelling_T,df = length(principle_components)) pval<-1-pchisq(q = hotelling_T,df = deg_free) } if(pvalmethod==2)#calc pval if using the simulated NULL-distribution { #Reserved for when implemented #wrapper function for simulating the NULL distribution #dist_chisquareH0<-simulateH0(teststatistic="chisq",n=n,digits=digits,pvalsims=pvalsims) #calculate pvalue by determeninge the amount of values in the NULL-distribution that are larger than the calculated chi_square value #pval<-1-sum(dist_chisquareH0<=chi_square)/length(dist_chisquareH0) } #make a nice S3 object of type htest RVAL <- list(statistic = c(Tsquare = hotelling_T), p.value = pval, method = "Joint Digits Test", data.name = deparse(substitute(x)),eigenvalues_tested=eigen_to_keep,eigen_val_vect=eigenval_vect_result) class(RVAL) <- "htest" return(RVAL) } ### Aditional functions provided ## returns first "digits" significant digits of numerical vector x signifd<-function(x=NULL, digits=1) { #some self-explanitory error checking if(!is.numeric(x)){stop("x needs to be numeric.")} #calculate the first significant digits x<-abs(x) return(trunc((10^((floor(log10(x))*-1)+digits-1))*x)) } ##returns the sequence of all possible leading digits for "digits" leading digits #ie 1-> 1:9; 2-> 10:99; 3-> 100:999 etc. signifd.seq<-function(digits=1) {return(seq(from=10^(digits-1),to=(10^(digits))-1))} # returns complete cumulative distribution function of Benford distribution for the given amount of significant digits qbenf<-function(digits=1) { return(cumsum(pbenf(digits))) } # returns complete probability distribution function of Benford distribution for the given amount of significant digits pbenf<-function(digits=1) { pbenf_for_seq<-function(leaddigit=10) { return(log10(1+(1/leaddigit))) } benf_table<-table(signifd.seq(digits))-1 benf_table<-benf_table+sapply(signifd.seq(digits),FUN=pbenf_for_seq) return(benf_table) } #returns a n-long sample numbers satisfying Benford's law rbenf<-function(n) { return(10^(runif(n))) } #simulates the H0-Distribution of various tests offered in BenfordTests simulateH0<-function(teststatistic="chisq",n=10,digits=1,pvalsims=10) { teststatistic<-match.arg(arg = teststatistic, choices = c("chisq","edist","jpsq","ks","mdist","meandigit","usq"), several.ok = FALSE) if(teststatistic=="chisq") { H0_chi_square<-rep(0,pvalsims) H0_chi_square<- .C("compute_H0_chi_square", H0_chi_square = as.double(H0_chi_square), digits = as.integer(digits), pbenf = as.double(pbenf(digits)),qbenf=as.double(qbenf(digits)),n = as.integer(n), n_sim=as.integer(pvalsims))$H0_chi_square return(H0_chi_square) } if(teststatistic=="edist") { H0_dstar <- rep(0, pvalsims) H0_dstar <- .C("compute_H0_dstar", H0_dstar = as.double(H0_dstar), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_dstar return(H0_dstar) } if(teststatistic=="jpsq") { H0_J_stat <- rep(0, pvalsims) H0_J_stat <- .C("compute_H0_J_stat", H0_J_stat = as.double(H0_J_stat), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_J_stat return(H0_J_stat) } if(teststatistic=="ks") { H0_KSD <- rep(0, pvalsims) H0_KSD <- .C("compute_H0_KSD", H0_KSD = as.double(H0_KSD), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_KSD return(H0_KSD) } if(teststatistic=="mdist") { H0_mstar <- rep(0, pvalsims) H0_mstar <- .C("compute_H0_mstar", H0_mstar = as.double(H0_mstar), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_mstar return(H0_mstar) } if(teststatistic=="meandigit") { H0_astar <- rep(0, pvalsims) H0_astar <- .C("compute_H0_astar", H0_astar = as.double(H0_astar), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_astar return(H0_astar) } if(teststatistic=="usq") { H0_U_square <- rep(0, pvalsims) H0_U_square <- .C("compute_H0_U_square", H0_U_square = as.double(H0_U_square), digits = as.integer(digits), pbenf = as.double(pbenf(digits)), qbenf = as.double(qbenf(digits)), n = as.integer(n), n_sim = as.integer(pvalsims))$H0_U_square return(H0_U_square) } } #a method for graphically analyzing the first digits. Also gives pvalues for investigating each individual digit signifd.analysis<-function(x=NULL,digits=1,graphical_analysis=TRUE,freq=FALSE,alphas=20,tick_col="red",ci_col="darkgreen",ci_lines=c(.05)) { if(length(alphas)==1) { if(alphas>1) { alphas=seq(from=0,to=.5,length.out=alphas+2)[-c(1,alphas+2)] } } n<-length(x) first_digits<-signifd(x, digits) pdf_benf<-pbenf(digits) freq_of_digits <- table(c(first_digits, signifd.seq(digits))) - 1 E_vals<-pdf_benf*n Var_vals<-pdf_benf*n*(1-pdf_benf) Cov_vals<-outer(pdf_benf,pdf_benf)*-1*n diag(Cov_vals)<-Var_vals pval<-rep(0,length(pdf_benf)) for(i in 1:length(pval)) { pval[i]<-pnorm(q=freq_of_digits[i],mean=E_vals[i],sd=sqrt(Var_vals[i])) if(pval[i]>.5) {pval[i]<- (1- pval[i])*2}else{pval[i]<- pval[i]*2} } if(graphical_analysis) { mids<-seq(from=0,to=1,length.out=length(E_vals)+2) ci_line_length<-(mids[2]-mids[1])*(2/5) mids<-mids[-c(1,length(mids))] ## number formating function numformat <- function(val,trailing=4) { sub("^(-?)0.", "\\1.", sprintf(paste(sep="","%.",trailing,"f"), val)) } trailing<-0 ci_cols<-colorRampPalette(colors=c("white",ci_col),interpolate="linear")(length(alphas)+1)[-1] ci_cols<-c(ci_cols,rev(ci_cols)) alphas<-c(alphas/2,0.5,rev(1-(alphas/2))) cis<-sapply(alphas,FUN=qnorm,mean=E_vals,sd=sqrt(Var_vals)) CIs=t(cis) colnames(CIs)<-signifd.seq(digits) rownames(CIs)<-alphas if(!freq) { cis<-cis/n freq_of_digits<-freq_of_digits/n #if(sum(cis>1)>0){warning("n is small, normal approximation may not be accurate!\nSome confidence intervals > 1.",call.=FALSE)} cis[cis>1]<-1 trailing<-4 } results<-list(summary=rbind(freq=freq_of_digits,pvals=pval),CIs=CIs) #if(sum(cis<0)>0){warning("n is small, normal approximation may not be accurate!\n Some confidence intervals <0.",call.=FALSE)} cis[cis<0]<-0 lr_mid<-cbind(mids-ci_line_length,mids+ci_line_length) plot(x=0,y=0,xlim=c(0,1),ylim=c(0,max(cis)*1.3),type="n",axes=FALSE,xlab="summary",ylab="") for(i in 1:dim(cis)[1]) { for(j in 1:(dim(cis)[2]-1)) { polygon(x=lr_mid[i,c(1,1,2,2)],y=cis[i,c(j,j+1,j+1,j)],col=ci_cols[j],border=FALSE) } } dim_cis<-dim(cis) dim(cis)<-NULL posy<-seq(from=0,to=max(cis)*1.3,length.out=10) axis(side=2,at=round(posy-5*(10^-(digits+1)),digits),las=1) if(any(ci_lines!=FALSE)) { if((!is.logical(ci_lines))&(all(ci_lines<1)&all(ci_lines>0))) { ci_lines<-c(ci_lines/2,0.5,rev(1-(ci_lines/2))) cis<-sapply(ci_lines,FUN=qnorm,mean=E_vals,sd=sqrt(Var_vals)) if(!freq) {cis<-cis/n} CIs=t(cis) colnames(CIs)<-signifd.seq(digits) rownames(CIs)<-ci_lines results$CIs<-CIs dim_cis<-dim(cis) dim(cis)<-NULL if(!freq) {cis[cis>1]<-1} cis[cis<0]<-0 j<-1 for(i in 1:length(cis)) { lines(lr_mid[j,],rep(cis[i],2)) if(j==dim(lr_mid)[1]) {j<-1} else {j<-j+1} } } else { j<-1 for(i in 1:length(cis)) { lines(lr_mid[j,],rep(cis[i],2)) if(j==dim(lr_mid)[1]) {j<-1} else {j<-j+1} } } } points(mids,freq_of_digits,col=tick_col,pch=3) if(digits==1) { mtext(c("digit: ",names(E_vals)),side=1,line=0,at=c(-1*ci_line_length,mids)) if(freq){mtext(c("freq: ",numformat(freq_of_digits,trailing)),side=1,line=1,at=c(-1*ci_line_length,mids))} else{mtext(c("rel freq: ",numformat(freq_of_digits,trailing)),side=1,line=1,at=c(-1*ci_line_length,mids))} mtext(c("pval: ",numformat(pval)),side=1,line=2,at=c(-1*ci_line_length,mids)) } dim(cis)<-dim_cis abline(h=0) } if(!graphical_analysis) { if(any(ci_lines!=FALSE)&(!is.logical(ci_lines))&(all(ci_lines<1)&all(ci_lines>0))) {alphas<-c(ci_lines/2,0.5,rev(1-(ci_lines/2)))} else {alphas<-c(alphas/2,0.5,rev(1-(alphas/2)))} cis<-sapply(alphas,FUN=qnorm,mean=E_vals,sd=sqrt(Var_vals)) if(!freq) { cis<-cis/n freq_of_digits<-freq_of_digits/n #if(sum(cis>1)>0){warning("n is small, normal approximation may not be accurate!\nSome confidence intervals > 1.",call.=FALSE)} #cis[cis>1]<-1 } #if(sum(cis<0)>0){warning("n is small, normal approximation may not be accurate!\n Some confidence intervals <0.",call.=FALSE)} #cis[cis<0]<-0 CIs=t(cis) colnames(CIs)<-signifd.seq(digits) rownames(CIs)<-alphas results<-list(summary=rbind(freq=freq_of_digits,pvals=pval),CIs=CIs) return(results) } return(results) }
/scratch/gouwar.j/cran-all/cranData/BenfordTests/R/Benford_tests.R
.onAttach <- function(...) { if (!interactive()){return()} the_startup_message<-paste(sep="", "This is BenfordTests version ",utils::packageVersion("BenfordTests"),".\n", "Academic users, please be sure to use: citation(\"BenfordTests\").\n", "Feel free to contact the Maintainer about liscensing, feature requests, etc.\n", "Use suppressPackageStartupMessages to eliminate package startup messages.") packageStartupMessage(the_startup_message) }
/scratch/gouwar.j/cran-all/cranData/BenfordTests/R/zzz.R
#' Bayesian exponential random graph models #' #' \code{Bergm} provides a range of tools to analyse #' Bayesian exponential random graph models using advanced #' computational methods. #' #' @name Bergm-package #' @aliases Bergm #' @import ergm #' @import coda #' @import network #' @import mvtnorm #' @import MCMCpack #' @import Matrix #' @import statnet.common #' @import Rglpk #' @importFrom matrixcalc is.positive.definite #' @importFrom grDevices dev.new #' @importFrom graphics axis boxplot lines par plot title #' @importFrom stats cov dbinom density glm optim quantile runif simulate var vcov #' @importFrom utils capture.output setTxtProgressBar txtProgressBar #' @docType package NULL
/scratch/gouwar.j/cran-all/cranData/Bergm/R/Bergm-package.R
#' Parameter estimation for Bayesian ERGMs #' #' Function to fit Bayesian exponential random graphs models #' using the approximate exchange algorithm. #' #' @param formula formula; #' an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param prior.mean vector; #' mean vector of the multivariate Normal prior. #' By default set to a vector of 0's. #' #' @param prior.sigma square matrix; #' variance/covariance matrix for the multivariate Normal prior. #' By default set to a diagonal matrix with every diagonal entry equal to 100. #' #' @param burn.in count; #' number of burn-in iterations for every chain of the population. #' #' @param main.iters count; #' number of iterations for every chain of the population. #' #' @param aux.iters count; #' number of auxiliary iterations used for network simulation. #' #' @param nchains count; #' number of chains of the population MCMC. #' By default set to twice the model dimension (number of model terms). #' #' @param gamma scalar; #' parallel adaptive direction sampling move factor. #' #' @param V.proposal count; #' diagonal entry for the multivariate Normal proposal. #' By default set to 0.0025. #' #' @param startVals vector; #' optional starting values for the parameter estimation. #' #' @param offset.coef vector; #' A vector of coefficients for the offset terms. #' #' @param ... additional arguments, to be passed to lower-level functions. #' #' @references #' Caimo, A. and Friel, N. (2011), "Bayesian Inference for Exponential Random Graph Models," #' Social Networks, 33(1), 41-55. \url{https://arxiv.org/abs/1007.5192} #' #' Caimo, A. and Friel, N. (2014), "Bergm: Bayesian Exponential Random Graphs in R," #' Journal of Statistical Software, 61(2), 1-25. \url{https://www.jstatsoft.org/article/view/v061i02} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network #' data(florentine) #' #' # Posterior parameter estimation: #' p.flo <- bergm(flomarriage ~ edges + kstar(2), #' burn.in = 50, #' aux.iters = 500, #' main.iters = 3000, #' gamma = 1.2) #' #' # Posterior summaries: #' summary(p.flo) #' } #' @export bergm <- function(formula, prior.mean = NULL, prior.sigma = NULL, burn.in = 100, main.iters = 1000, aux.iters = 1000, nchains = NULL, gamma = 0.5, V.proposal = 0.0025, startVals = NULL, offset.coef = NULL, ...) { y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) specs <- unlist(sapply(model$terms, '[', 'coef.names'), use.names = FALSE) sy <- summary(formula) dim <- length(sy) if (dim == 1) stop("Model dimension must be greater than 1.") if (any(is.na(as.matrix.network(y)))) print("Network has missing data. Use bergmM() instead.") if (!is.null(offset.coef)) { if (any(offset.coef %in% c(Inf, -Inf, NaN, NA))) { stop("Inf, -Inf, NaN, NA are not allowed for offset.coef. \n If Inf or -Inf are required use large values instead (e.g., 1000 or -1000).") } } y0 <- simulate(formula, coef = rep(0, dim), nsim = 1, control = control.simulate(MCMC.burnin = 1, # !!! MCMC.interval = 1), return.args = "ergm_state")$object control <- control.ergm(MCMC.burnin = aux.iters, MCMC.interval = 1, MCMC.samplesize = 1) if (!is.null(control$init)) { if (length(control$init) != length(model$etamap$offsettheta)) { stop(paste("Invalid starting parameter vector control$init:", "wrong number of parameters.", "If you are passing output from another ergm run as control$init,", "in a model with curved terms, see help(enformulate.curved).")) } } else {control$init <- rep(NA, length(model$etamap$offsettheta))} if (!is.null(offset.coef)) { if (length(control$init[model$etamap$offsettheta]) != length(offset.coef)) { stop("Invalid offset parameter vector offset.coef: ", "wrong number of parameters: expected ", length(control$init[model$etamap$offsettheta]), " got ", length(offset.coef), ".")} control$init[model$etamap$offsettheta] <- offset.coef } if (any(is.na(control$init) & model$etamap$offsettheta)) { stop("The model contains offset terms whose parameter values have not been specified:", paste.and(specs[is.na(control$init) | model$offsettheta]), ".", sep = "") } proposal <- ergm_proposal(object = ~., constraints = ~., arguments = control$MCMC.prop.args, nw = y) if (is.null(prior.mean)) prior.mean <- rep(0, dim) if (is.null(prior.sigma)) prior.sigma <- diag(100, dim, dim) if (is.null(nchains)) nchains <- 2 * dim S.prop <- diag(V.proposal, dim, dim) Theta <- array(NA, c(main.iters, dim, nchains)) if (is.null(startVals)) { suppressMessages(mple <- ergm(formula, estimate = "MPLE", verbose = FALSE, offset.coef = offset.coef) |> stats::coef()) theta <- matrix(mple + runif(dim * nchains, min = -0.1, max = 0.1), dim, nchains) } else { theta <- matrix(startVals + runif(dim * nchains, min = -0.1, max = 0.1), dim, nchains) } theta[model$etamap$offsettheta,] <- offset.coef theta1 <- rep(NA, dim) tot.iters <- burn.in + main.iters clock.start <- Sys.time() message(" > MCMC start") for (k in 1:tot.iters) { for (h in 1:nchains) { theta1 <- theta[, h] + gamma * apply(theta[, sample(seq(1, nchains)[-h], 2)], 1, diff) + rmvnorm(1, sigma = S.prop)[1,] theta1[model$etamap$offsettheta] <- offset.coef delta <- ergm_MCMC_sample(y0, theta = theta1, control = control)$stats[[1]][1,] - sy pr <- dmvnorm(rbind(theta1, theta[, h]), mean = prior.mean, sigma = prior.sigma, log = TRUE) beta <- (theta[, h] - theta1) %*% delta + pr[1] - pr[2] if (beta >= log(runif(1))) theta[, h] <- theta1 } if (k > burn.in) Theta[k - burn.in, , ] <- theta } clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) FF <- mcmc(apply(Theta, 2, cbind)) AR <- round(1 - rejectionRate(FF)[1], 2) names(AR) <- NULL ess <- round(effectiveSize(FF), 0) names(ess) <- specs out = list(Time = runtime, formula = formula, specs = specs, dim = dim, Theta = FF, AR = AR, ess = ess) class(out) <- "bergm" return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/bergm.R
#' Calibrating misspecified Bayesian ERGMs #' #' Function to transform a sample from the pseudo-posterior #' to one that is approximately sampled from the intractable #' posterior distribution. #' #' @param formula formula; an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param prior.mean vector; mean vector of the multivariate Normal prior. #' By default set to a vector of 0's. #' #' @param prior.sigma square matrix; variance/covariance matrix for the multivariate Normal prior. #' By default set to a diagonal matrix with every diagonal entry equal to 100. #' #' @param burn.in count; number of burn-in iterations at the beginning of an MCMC run for the pseudo-posterior estimation. #' #' @param main.iters count; number of MCMC iterations after burn-in for the pseudo-posterior estimation. #' #' @param aux.iters count; number of auxiliary iterations used for drawing the first network from the ERGM likelihood (Robbins-Monro). See \code{\link[ergm]{control.simulate.formula}}. #' #' @param V.proposal count; diagonal entry for the multivariate Normal proposal. #' By default set to 1.5. #' #' @param thin count; thinning interval used in the simulation for the pseudo-posterior estimation. The number of MCMC iterations must be divisible by this value. #' #' @param rm.iters count; number of iterations for the Robbins-Monro stochastic approximation algorithm. #' #' @param rm.a scalar; constant for sequence alpha_n (Robbins-Monro). #' #' @param rm.alpha scalar; noise added to gradient (Robbins-Monro). #' #' @param n.aux.draws count; number of auxiliary networks drawn from the ERGM likelihood (Robbins-Monro). See \code{\link[ergm]{control.simulate.formula}}. #' #' @param aux.thin count; number of auxiliary iterations between network draws after the first network is drawn (Robbins-Monro). See \code{\link[ergm]{control.simulate.formula}}. #' #' @param estimate If "MLE" (the default), then an approximate maximum likelihood estimator is used as a starting point in the Robbins-Monro algorithm. If "CD" , the Monte-Carlo contrastive divergence estimate is returned. See \code{\link[ergm]{ergm}}. #' #' @param seed integer; seed for the random number generator. See \code{set.seed}. #' #' @param ... Additional arguments, to be passed to the ergm function. See \code{\link[ergm]{ergm}}. #' #' @references #' Bouranis, L., Friel, N., & Maire, F. (2017). Efficient Bayesian inference for exponential #' random graph models by correcting the pseudo-posterior distribution. #' Social Networks, 50, 98-108. \url{https://arxiv.org/abs/1510.00934} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network #' data(florentine) #' #' # Calibrated pseudo-posterior: #' cpp.flo <- bergmC(flomarriage ~ edges + kstar(2), #' aux.iters = 500, #' burn.in = 500, #' main.iters = 10000, #' V.proposal = 2.5) #' #' # Posterior summaries: #' summary(cpp.flo) #'} #' #' @export #' bergmC <- function(formula, prior.mean = NULL, prior.sigma = NULL, burn.in = 1e04, main.iters = 4e04, aux.iters = 3000, V.proposal = 1.5, thin = 1, rm.iters = 500, rm.a = 0.001, rm.alpha = 0, n.aux.draws = 400, aux.thin = 50, estimate = c("MLE","CD"), seed = 1, ...){ y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) specs <- unlist(sapply(model$terms, '[', 'coef.names'), use.names = FALSE) sy <- summary(formula) dim <- length(sy) if (is.null(prior.mean)) prior.mean <- rep(0, dim) if (is.null(prior.sigma)) prior.sigma <- diag(100, dim, dim) control <- control.ergm(MCMC.burnin = aux.iters, MCMC.interval = aux.thin, MCMC.samplesize = n.aux.draws, seed = seed) estimate <- match.arg(estimate) logpp_short <- function(theta, Y, X, weights, prior.mean, prior.sigma, optimPL){ xtheta <- c(X %*% theta) log.like <- sum(dbinom(weights * Y, weights, exp(xtheta) / (1 + exp(xtheta)), log = TRUE)) log.prior <- dmvnorm(theta, mean = prior.mean, prior.sigma, log = TRUE) if (optimPL == FALSE) out <- log.like + log.prior else out <- -(log.like + log.prior) return(out) } score_logPP <- function(theta, Y, X, weights, prior.mean, prior.sigma, optimPL){ score.log.prior <- -solve(prior.sigma, (theta - prior.mean)) p <- c( exp(as.matrix(X) %*% theta ) / ( 1 + exp(as.matrix(X) %*% theta) ) ) deriv.logpl <- c( t(X) %*% (weights*(Y - p)) ) if (optimPL == TRUE) out <- deriv.logpl + score.log.prior return(out) } rm_ergm <- function(formula, prior.mean, prior.sigma, y, model, sy, dim, control, rm.iters, rm.a, rm.alpha, init, aux.iters, n.aux.draws, aux.thin){ theta <- array(0, c(dim = rm.iters, dim)) theta[1, ] <- init for (i in 2:rm.iters) { y0 <- simulate(formula, coef = theta[i - 1,], nsim = 1, control = control.simulate(MCMC.burnin = 1, MCMC.interval = 1), return.args = "ergm_state")$object z <- as.matrix( ergm_MCMC_sample(y0, theta = theta[i - 1,], stats0 = sy, control = control)$stats[[1]] ) z <- sweep(z, 2, sy, '-') estgrad <- -apply(z, 2, mean) - solve(prior.sigma, (theta[i - 1,] - prior.mean)) theta[i, ] <- theta[i - 1, ] + ((rm.a/i) * (rm.alpha + estgrad)) } out <- list(Theta = theta) return(out) } mplesetup <- ergmMPLE(formula) data.glm.initial <- cbind(mplesetup$response, mplesetup$weights, mplesetup$predictor) colnames(data.glm.initial) <- c("responses", "weights", colnames(mplesetup$predictor)) Vcov.MPLE <- vcov(glm(mplesetup$response ~. - 1, data = data.frame(mplesetup$predictor), weights = mplesetup$weights, family = "binomial")) Sigma.proposal <- diag(rep(V.proposal, dim), dim, dim) S.prop <- Sigma.proposal %*% solve((solve(prior.sigma) + solve(Vcov.MPLE))) %*% Sigma.proposal suppressMessages(mple <- ergm(formula, estimate = "MPLE", verbose = FALSE) ) message(" > MCMC start") clock.start <- Sys.time() capture.output(unadj.sample <- MCMCmetrop1R(logpp_short, theta.init = mple$coefficients, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, optimPL = FALSE, prior.mean = prior.mean, prior.sigma = prior.sigma, V = S.prop, thin = thin, mcmc = main.iters, burnin = burn.in, seed = seed, logfun = TRUE )) message(" > MAP estimation") suppressMessages(rob.mon.init <- ergm(formula, estimate = estimate, verbose = FALSE, control = control.ergm(seed = seed), ...)) theta.star <- rm_ergm(formula, rm.iters = rm.iters, rm.a = rm.a, rm.alpha = rm.alpha, init = rob.mon.init$coefficients, aux.iters = aux.iters, n.aux.draws = n.aux.draws, aux.thin = aux.thin, prior.mean = prior.mean, prior.sigma = prior.sigma, y = y, model = model, sy = sy, dim = dim, control = control ) theta.PLstar <- optim(par = summary(unadj.sample)$statistics[,1], fn = logpp_short, gr = score_logPP, lower = rob.mon.init$coefficients - 3, upper = rob.mon.init$coefficients + 3, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, optimPL = TRUE, prior.mean = prior.mean, prior.sigma = prior.sigma, hessian = TRUE, method = "L-BFGS-B") message(" > Curvature Adjustment") y0 <- simulate(formula, coef = theta.star$Theta[rm.iters, ], nsim = 1, control = control.simulate(MCMC.burnin = 1, MCMC.interval = 1), return.args = "ergm_state")$object z <- as.matrix( ergm_MCMC_sample(y0, theta = theta.star$Theta[rm.iters, ], stats0 = sy, control = control)$stats[[1]] ) sim.samples <- z Hessian.post.truelike <- -cov(sim.samples) - solve(prior.sigma) chol.true.posterior <- chol(-Hessian.post.truelike) chol.PL.posterior <- chol(-theta.PLstar$hessian) W <- solve(chol.PL.posterior) %*% chol.true.posterior V <- solve(W) corrected.sample <- t(apply(data.frame(unadj.sample), 1, function(x) { c(V %*% (unlist(x) - theta.PLstar$par ) + theta.star$Theta[rm.iters, ])})) clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) FF <- mcmc(corrected.sample) ess <- round(effectiveSize(FF), 0) names(ess) <- specs AR <- round(1 - rejectionRate(FF)[1], 2) names(AR) <- NULL out <- list(Theta_star = theta.star$Theta[rm.iters, ], Theta = FF, Time = runtime, formula = formula, AR = AR, ess = ess, dim = dim, specs = specs) class(out) <- "bergm" return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/bergmC.R
#' Parameter estimation for Bayesian ERGMs under missing data #' #' Function to fit Bayesian exponential random graphs models under missing data #' using the approximate exchange algorithm. #' #' @param formula formula; an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param burn.in count; number of burn-in iterations for every chain of the population. #' #' @param main.iters count; number of iterations for every chain of the population. #' #' @param aux.iters count; number of auxiliary iterations used for network simulation. #' #' @param prior.mean vector; mean vector of the multivariate Normal prior. #' By default set to a vector of 0's. #' #' @param prior.sigma square matrix; variance/covariance matrix for the multivariate Normal prior. #' By default set to a diagonal matrix with every diagonal entry equal to 100. #' #' @param nchains count; number of chains of the population MCMC. #' By default set to twice the model dimension (number of model terms). #' #' @param gamma scalar; parallel adaptive direction sampling move factor. #' #' @param V.proposal count; diagonal entry for the multivariate Normal proposal. #' By default set to 0.0025. #' #' @param seed count; #' random number seed for the Bergm estimation. #' #' @param startVals vector; #' optional starting values for the parameter estimation. #' #' @param offset.coef vector; #' A vector of coefficients for the offset terms. #' #' @param nImp count; #' number of imputed networks to be returned. If null, no imputed network will be returned. #' #' @param missingUpdate count; #' number of tie updates in each imputation step. #' By default equal to number of missing ties. #' Smaller numbers increase speed. Larger numbers lead to better sampling. #' #' @param ... additional arguments, to be passed to lower-level functions. #' #' @references #' Caimo, A. and Friel, N. (2011), "Bayesian Inference for Exponential Random Graph Models," #' Social Networks, 33(1), 41-55. \url{https://arxiv.org/abs/1007.5192} #' #' Caimo, A. and Friel, N. (2014), "Bergm: Bayesian Exponential Random Graphs in R," #' Journal of Statistical Software, 61(2), 1-25. \url{https://www.jstatsoft.org/v61/i02} #' #' Koskinen, J.H., Robins, G.L., Pattison, P.E. (2010), "Analysing exponential #' random graph (p-star) models with missing data using Bayesian data augmentation," #' Statistical Methodology 7(3), 366-384. #' #' Krause, R.W., Huisman, M., Steglich, C., Snijders, T.A. (2020), "Missing data in #' cross-sectional networks-An extensive comparison of missing data treatment methods", #' Social Networks 62: 99-112. #' #' @examples #' \dontrun{ #' # Load the florentine marriage network #' data(florentine) #' #' # Create missing data #' set.seed(14021994) #' n <- dim(flomarriage[, ])[1] #' missNode <- sample(1:n, 1) #' flomarriage[missNode, ] <- NA #' flomarriage[, missNode] <- NA #' #' # Posterior parameter estimation: #' m.flo <- bergmM(flomarriage ~ edges + kstar(2), #' burn.in = 50, #' aux.iters = 500, #' main.iters = 1000, #' gamma = 1.2, #' nImp = 5) #' #' # Posterior summaries: #' summary(m.flo) #'} #' @export bergmM <- function(formula, burn.in = 100, main.iters = 1000, aux.iters = 1000, prior.mean = NULL, prior.sigma = NULL, nchains = NULL, gamma = 0.5, V.proposal = 0.0025, seed = NULL, startVals = NULL, offset.coef = NULL, nImp = NULL, missingUpdate = NULL, ...) { if (is.null(seed)) set.seed(sample(1:999, 1)) else set.seed(seed) y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) specs <- unlist(sapply(model$terms, '[', 'coef.names'), use.names = FALSE) sy <- summary(formula) dim <- length(sy) if (dim == 1) {stop("Model dimension must be greater than 1")} if (!is.null(offset.coef)) { if (any(offset.coef %in% c(Inf,-Inf,NaN,NA))) { stop("Inf,-Inf,NaN,NA are not allowed for offset.coef. \n If Inf or -Inf are required use large values instead (e.g., 1000 or -1000).") } } if (!any(is.na(as.matrix.network(y)))) { print("Network has no missing data. Use bergm() for faster estimation instead.") } impNets <- NULL if (!is.null(nImp)) { nImp <- max(0, min(nImp, main.iters)) thinImp <- as.integer(main.iters/nImp) impIter <- 1 impNets <- vector("list", nImp) } missingTies <- matrix(0, y$gal$n, y$gal$n) missingTies[is.na(as.matrix.network(y))] <- 1 missingTies <- as.edgelist(as.network(missingTies), n = y$gal$n) if (is.null(missingUpdate)) { missingUpdate <- sum(is.na(as.matrix.network(y))) } impNet <- y f <- as.character(formula) currentFormula <- formula(paste("impNet", f[3:length(f)], sep = " ~ ")) y0 <- simulate(currentFormula, coef = rep(0, dim), nsim = 1, control = control.simulate(MCMC.burnin = 1, # !!! MCMC.interval = 1), return.args = "ergm_state")$object control <- control.ergm(MCMC.burnin = aux.iters, MCMC.interval = 1, MCMC.samplesize = 1) if (!is.null(control$init)) { if (length(control$init) != length(model$etamap$offsettheta)) { stop(paste("Invalid starting parameter vector control$init:", "wrong number of parameters.", "If you are passing output from another ergm run as control$init,", "in a model with curved terms, see help(enformulate.curved).")) } } else { control$init <- rep(NA, length(model$etamap$offsettheta)) } if (!is.null(offset.coef)) { if (length(control$init[model$etamap$offsettheta]) != length(offset.coef)) { stop("Invalid offset parameter vector offset.coef: ", "wrong number of parameters: expected ", length(control$init[model$etamap$offsettheta]), " got ", length(offset.coef), ".")} control$init[model$etamap$offsettheta] <- offset.coef } if (any(is.na(control$init) & model$etamap$offsettheta)) { stop("The model contains offset terms whose parameter values have not been specified:", paste.and(specs[is.na(control$init) | model$offsettheta]), ".", sep = "") } proposal <- ergm_proposal(object = ~., constraints = ~., arguments = control$MCMC.prop.args, nw = y) if (is.null(prior.mean)) prior.mean <- rep(0, dim) if (is.null(prior.sigma)) prior.sigma <- diag(100, dim, dim) if (is.null(nchains)) nchains <- 2 * dim S.prop <- diag(V.proposal, dim, dim) Theta <- array(NA, c(main.iters, dim, nchains)) if (is.null(startVals)) { suppressMessages(mple <- ergm(formula, estimate = "MPLE", verbose = FALSE, offset.coef = offset.coef) |> stats::coef()) theta <- matrix(mple + runif(dim * nchains, min = -0.1, max = 0.1), dim, nchains) } else { theta <- matrix(startVals + runif(dim * nchains, min = -0.1, max = 0.1), dim, nchains) } theta[model$etamap$offsettheta,] <- offset.coef acc.counts <- rep(0L, nchains) theta1 <- rep(NA, dim) tot.iters <- burn.in + main.iters clock.start <- Sys.time() message(" > MCMC start") for (k in 1:tot.iters) { for (h in 1:nchains) { theta1 <- theta[, h] + gamma * apply(theta[, sample(seq(1, nchains)[-h], 2)], 1, diff) + rmvnorm(1, sigma = S.prop)[1,] theta1[model$etamap$offsettheta] <- offset.coef delta <- ergm_MCMC_sample(y0, theta = theta1, control = control)$stats[[1]][1,] - sy pr <- dmvnorm(rbind(theta1, theta[, h]), mean = prior.mean, sigma = prior.sigma, log = TRUE) beta <- (theta[, h] - theta1) %*% delta + pr[1] - pr[2] if (beta >= log(runif(1))) { theta[, h] <- theta1 if (k > burn.in) { acc.counts[h] <- acc.counts[h] + 1 } impNet <- simulate(currentFormula, coef = theta1, output = "network", basis = y, constraints = ~fixallbut(missingTies), nsim = 1, control = control.simulate( MCMC.burnin = missingUpdate)) y0 <- simulate(currentFormula, coef = rep(0, dim), nsim = 1, control = control.simulate(MCMC.burnin = 1, # !!! MCMC.interval = 1), return.args = "ergm_state")$object } } if (k > burn.in) Theta[k - burn.in, , ] <- theta if (!is.null(nImp)) { if ((k - burn.in) == impIter * thinImp) { impNets[[impIter]] <- impNet impIter <- impIter + 1 } } } clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) FF <- mcmc(apply(Theta, 2, cbind)) AR <- round(1 - rejectionRate(FF)[1], 2) names(AR) <- NULL ess <- round(effectiveSize(FF), 0) names(ess) <- specs out = list(Time = runtime, formula = formula, specs = specs, dim = dim, Theta = FF, AR = AR, ess = ess, impNets = impNets) class(out) <- "bergm" return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/bergmM.R
#' Bayesian goodness-of-fit diagnostics for ERGMs #' #' Function to calculate summaries for degree, #' minimum geodesic distances, #' and edge-wise shared partner distributions #' to diagnose the Bayesian goodness-of-fit of #' exponential random graph models. #' #' @param x an \code{R} object of class \code{bergm}. #' #' @param sample.size count; number of networks #' to be simulated and compared to the observed network. #' #' @param aux.iters count; number of iterations used for network simulation. #' #' @param n.deg count; used to plot only the first #' \code{n.deg}-1 degree distributions. #' By default no restrictions on the number of degree #' distributions is applied. #' #' @param n.dist count; used to plot only the first #' \code{n.dist}-1 geodesic distances distributions. #' By default no restrictions on the number of geodesic #' distances distributions is applied. #' #' @param n.esp count; used to plot only the first #' \code{n.esp}-1 edge-wise shared partner distributions. #' By default no restrictions on the number of #' edge-wise shared partner distributions is applied. #' #' @param n.ideg count; used to plot only the first #' \code{n.ideg}-1 in-degree distributions. #' By default no restrictions on the number of #' in-degree distributions is applied. #' #' @param n.odeg count; used to plot only the first #' \code{n.odeg}-1 out-degree distributions. #' By default no restrictions on the number of #' out-degree distributions is applied. #' #' @param ... additional arguments, #' to be passed to lower-level functions. #' #' @references #' Caimo, A. and Friel, N. (2011), "Bayesian Inference for Exponential Random Graph Models," #' Social Networks, 33(1), 41-55. \url{https://arxiv.org/abs/1007.5192} #' #' Caimo, A. and Friel, N. (2014), "Bergm: Bayesian Exponential Random Graphs in R," #' Journal of Statistical Software, 61(2), 1-25. \url{https://www.jstatsoft.org/v61/i02} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network #' data(florentine) #' #' # Posterior parameter estimation: #' p.flo <- bergm(flomarriage ~ edges + kstar(2), #' burn.in = 50, #' aux.iters = 500, #' main.iters = 1000, #' gamma = 1.2) #' #' # Bayesian goodness-of-fit test: #' bgof(p.flo, #' aux.iters = 500, #' sample.size = 30, #' n.deg = 10, #' n.dist = 9, #' n.esp = 6) #'} #' @export #' bgof <- function(x, sample.size = 100, aux.iters = 10000, n.deg = NULL, n.dist = NULL, n.esp = NULL, n.ideg = NULL, n.odeg = NULL, ...){ FF <- as.matrix(x$Theta[sample(dim(x$Theta)[1], sample.size), ]) DN <- is.directed(ergm.getnetwork(x$formula)) if (DN == FALSE) { # undirected for (i in 1:sample.size) { a <- gof(x$formula, coef = FF[i, ], verbose = FALSE, control = control.gof.formula(nsim = 2, MCMC.burnin = aux.iters, MCMC.interval = 1)) if (i == 1) A <- as.vector(a$pobs.deg) A <- cbind(A, as.vector(a$psim.deg[2, ])) if (i == 1) B <- as.vector(a$pobs.dist) B <- cbind(B,as.vector(a$psim.dist[2, ])) if (i == 1) C <- as.vector(a$pobs.espart) C <- cbind(C,as.vector(a$psim.espart[2, ])) } if (is.null(n.deg)) n.deg <- dim(A)[1] if (is.null(n.dist)) n.dist <- dim(B)[1] - 1 if (is.null(n.esp)) n.esp <- dim(C)[1] a5 <- apply(A[1:n.deg, -1], 1, quantile, probs = 0.05) b5 <- apply(B[-(n.dist:(dim(B)[1] - 1)), -1], 1, quantile, probs = 0.05) c5 <- apply(C[1:n.esp, -1], 1, quantile, probs = 0.05) a95 <- apply(A[1:n.deg, -1], 1, quantile, probs = 0.95) b95 <- apply(B[-(n.dist:(dim(B)[1] - 1)), -1], 1, quantile, probs = 0.95) c95 <- apply(C[1:n.esp, -1], 1, quantile, probs = 0.95) par(mfrow = c(1, 3), oma = c(0, 0, 3, 0), mar = c(4, 3, 1.5, 1)) boxplot(as.data.frame(t(A[1:n.deg, -1])), xaxt = "n", xlab = "degree", ylab = "proportion of nodes") axis(1, seq(1, n.deg), seq(0, n.deg - 1)) lines(A[1:n.deg, 1], lwd = 2, col = 2) lines(a5, col = "darkgray") lines(a95, col = "darkgray") title("Bayesian goodness-of-fit diagnostics", outer = TRUE) boxplot(as.data.frame(t(B[-(n.dist:(dim(B)[1] - 1)), -1])), xaxt = "n", xlab = "minimum geodesic distance", ylab = "proportion of dyads") axis(1, seq(1, n.dist), labels = c(seq(1, (n.dist - 1)), "NR")) lines(B[-(n.dist:(dim(B)[1] - 1)), 1],lwd = 2, col = 2) lines(b5, col = "darkgray") lines(b95, col = "darkgray") boxplot(as.data.frame(t(C[1:n.esp,-1])), xaxt = "n", xlab = "edge-wise shared partners", ylab = "proportion of edges") axis(1,seq(1, n.esp),seq(0, n.esp - 1)) lines(C[1:n.esp, 1],lwd = 2,col = 2) lines(c5,col = "darkgray") lines(c95,col = "darkgray") out = list(sim.degree = A[,-1], sim.dist = B[,-1], sim.esp = C[,-1], obs.degree = A[,1], obs.dist = B[,1], obs.esp = C[,1]) } else {# directed for (i in 1:sample.size) { a <- gof(x$formula, coef = FF[i,], verbose = FALSE, GOF = ~ idegree + odegree + espartners + distance, control = control.gof.formula(nsim = 2, MCMC.burnin = aux.iters, MCMC.interval = 1)) if (i == 1) A <- as.vector(a$pobs.ideg) A <- cbind(A, as.vector(a$psim.ideg[2, ])) if (i == 1) AA <- as.vector(a$pobs.odeg) AA <- cbind(AA, as.vector(a$psim.odeg[2, ])) if (i == 1) B <- as.vector(a$pobs.dist) B <- cbind(B, as.vector(a$psim.dist[2, ])) if (i == 1) C <- as.vector(a$pobs.espart) C <- cbind(C, as.vector(a$psim.espart[2, ])) } if (is.null(n.ideg)) n.ideg <- dim(A)[1] if (is.null(n.odeg)) n.odeg <- dim(AA)[1] if (is.null(n.dist)) n.dist <- dim(B)[1] - 1 if (is.null(n.esp)) n.esp <- dim(C)[1] a5 <- apply(A[1:n.ideg, -1], 1, quantile, probs = 0.05) aa5 <- apply(AA[1:n.odeg, -1], 1, quantile, probs = 0.05) b5 <- apply(B[-(n.dist:(dim(B)[1] - 1)), -1], 1, quantile, probs = 0.05) c5 <- apply(C[1:n.esp, -1], 1, quantile, probs = 0.05) a95 <- apply(A[1:n.ideg, -1], 1, quantile, probs = 0.95) aa95 <- apply(AA[1:n.odeg, -1], 1, quantile, probs = 0.95) b95 <- apply(B[-(n.dist:(dim(B)[1] - 1)), -1], 1, quantile, probs = 0.95) c95 <- apply(C[1:n.esp, -1], 1, quantile, probs = 0.95) par(mfrow = c(2, 2), oma = c(0, 0, 3, 0), mar = c(4, 3, 1.5, 1)) boxplot(as.data.frame(t(A[1:n.ideg,-1])), xaxt = "n", xlab = "in degree", ylab = "proportion of nodes") axis(1, seq(1, n.ideg), seq(0, n.ideg - 1)) lines(A[1:n.ideg, 1], lwd = 2, col = 2) lines(a5, col = "darkgray") lines(a95, col = "darkgray") title("Bayesian goodness-of-fit diagnostics", outer = TRUE) boxplot(as.data.frame(t(AA[1:n.odeg, -1])), xaxt = "n", xlab = "out degree", ylab = "proportion of nodes") axis(1,seq(1, n.odeg),seq(0, n.odeg - 1)) lines(AA[1:n.odeg, 1],lwd = 2,col = 2) lines(aa5, col = "darkgray") lines(aa95, col = "darkgray") boxplot(as.data.frame(t(B[-(n.dist:(dim(B)[1] - 1)), -1])), xaxt = "n", xlab = "minimum geodesic distance", ylab = "proportion of dyads") axis(1, seq(1, n.dist), labels = c(seq(1, (n.dist - 1)), "NR")) lines(B[-(n.dist:(dim(B)[1] - 1)), 1], lwd = 2 , col = 2) lines(b5,col = "darkgray") lines(b95,col = "darkgray") boxplot(as.data.frame(t(C[1:n.esp, -1])), xaxt = "n", xlab = "edge-wise shared partners", ylab = "proportion of edges") axis(1, seq(1, n.esp), seq(0, n.esp - 1)) lines(C[1:n.esp, 1],lwd = 2, col = 2) lines(c5, col = "darkgray") lines(c95, col = "darkgray") out = list(sim.idegree = A[,-1], sim.odegree = AA[,-1], sim.dist = B[,-1], sim.esp = C[,-1], obs.degree = A[,1], obs.dist = B[,1], obs.esp = C[,1]) } }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/bgof.R
#' Adjustment of ERGM pseudolikelihood #' #' Function to estimate the transformation parameters for #' adjusting the pseudolikelihood function. #' #' @param formula formula; an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param aux.iters count; number of auxiliary iterations used for drawing the first network from the ERGM likelihood. See \code{\link[ergm]{control.simulate.formula}}. #' #' @param n.aux.draws count; Number of auxiliary networks drawn from the ERGM likelihood. See \code{\link[ergm]{control.simulate.formula}}. #' #' @param aux.thin count; Number of auxiliary iterations between network draws after the first network is drawn. See \code{\link[ergm]{control.simulate.formula}}. #' #' @param ladder count; Length of temperature ladder (>=3). #' #' @param estimate If "MLE" (the default), then an approximate maximum likelihood estimator is returned. If "CD" , the Monte-Carlo contrastive divergence estimate is returned. See \code{\link[ergm]{ergm}}. #' #' @param seed integer; seed for the random number generator. See \code{set.seed}. #' #' @param ... Additional arguments, to be passed to the ergm function. See \code{\link[ergm]{ergm}}. #' #' @references #' Bouranis, L., Friel, N., & Maire, F. (2018). Bayesian model selection for exponential #' random graph models via adjusted pseudolikelihoods. #' Journal of Computational and Graphical Statistics, 27(3), 516-528. \url{https://arxiv.org/abs/1706.06344} #' #' @export #' ergmAPL <- function(formula, aux.iters = NULL, n.aux.draws = NULL, aux.thin = NULL, ladder = NULL, estimate = c("MLE","CD"), seed = 1, ...) { y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) n <- dim(y[,])[1] sy <- summary(formula) dim <- length(sy) if (dim == 1) stop("Model dimension must be greater than 1") if (is.null(aux.iters)) aux.iters <- 3000 if (is.null(n.aux.draws)) n.aux.draws <- 50 if (is.null(aux.thin)) aux.thin <- 50 if (is.null(ladder)) ladder <- 50 estimate <- match.arg(estimate) control <- control.ergm(MCMC.burnin = aux.iters, MCMC.interval = aux.thin, MCMC.samplesize = n.aux.draws, MCMC.maxedges = Inf, seed = seed) adjusted_logPL <- function(theta, Y, X, weights, calibr.info) { theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) xtheta <- c(X %*% theta_transf) log.like <- sum(dbinom(weights * Y, weights, exp(xtheta)/(1 + exp(xtheta)), log = TRUE)) return(log.like) } Hessian_logPL <- function(theta, X, weights) { p <- exp(as.matrix(X) %*% theta)/(1 + exp(as.matrix(X) %*% theta)) W <- Diagonal(x = as.vector(weights * p * (1 - p))) Hessian <- -t(as.matrix(X)) %*% W %*% as.matrix(X) Hessian <- as.matrix(Hessian) return(Hessian) } mplesetup <- ergmMPLE(formula) data.glm.initial <- cbind(mplesetup$response, mplesetup$weights, mplesetup$predictor) colnames(data.glm.initial) <- c("responses", "weights", colnames(mplesetup$predictor)) path <- seq(0, 1, length.out = ladder) suppressMessages(mle <- ergm(formula, estimate = estimate, verbose = FALSE, control = control.ergm(seed = seed), ...)) suppressMessages(mple <- ergm(formula, estimate = "MPLE", verbose = FALSE)) if( any( c(-Inf, Inf) %in% mle$coefficients) | any( c(-Inf, Inf) %in% mple$coefficients) ) { if( any( c(-Inf, Inf) %in% mle$coefficients) ){ inf_term <- names(mle$coefficients[mle$coefficients %in% c(-Inf, Inf)]) inf_term <- paste(inf_term, collapse = " & ") } else if( any( c(-Inf, Inf) %in% mple$coefficients) ){ inf_term <- names(mple$coefficients[mple$coefficients %in% c(-Inf, Inf)]) inf_term <- paste(inf_term, collapse = " & ") } stop( paste0("The MLE/MPLE contains an infinite value for the following model terms: ", inf_term, ". Consider changing these model terms.") ) } y0 <- simulate(formula, coef = mle$coefficients, nsim = 1, control = control.simulate(MCMC.burnin = 1, MCMC.interval = 1), return.args = "ergm_state")$object z <- as.matrix( ergm_MCMC_sample(y0, theta = mle$coefficients, stats0 = sy, control = control )$stats[[1]] ) H <- -cov(z) HPL <- Hessian_logPL(theta = mple$coefficients, X = mplesetup$predictor, weights = mplesetup$weights) HPL <- round(HPL,5) mat <- -H mat <- round(mat,5) if( is.positive.definite(mat) == TRUE & is.positive.definite(HPL) == TRUE ) { chol.HPL<- chol(-HPL) chol.H <- chol(mat) W <- solve(chol.HPL) %*% chol.H } else if (is.positive.definite(mat) == FALSE & is.positive.definite(HPL) == TRUE){ suppressWarnings( chol.H <- chol(mat, pivot = TRUE) ) pivot <- attr(chol.H, "pivot") oo <- order(pivot) chol.HPL <- chol(-HPL) W <- solve(chol.HPL) %*% chol.H[, oo] } else if (is.positive.definite(mat) == TRUE & is.positive.definite(HPL) == FALSE){ chol.H <- chol(mat) suppressWarnings( chol.HPL <- chol(-HPL, pivot = TRUE) ) pivot <- attr(chol.HPL, "pivot") oo <- order(pivot) W <- solve(chol.HPL[, oo]) %*% chol.H } else { suppressWarnings( chol.H <- chol(mat, pivot = TRUE) ) pivot.H <- attr(chol.H, "pivot") oo.H <- order(pivot.H) suppressWarnings( chol.HPL <- chol(-HPL, pivot = TRUE) ) pivot.HPL <- attr(chol.HPL, "pivot") oo.HPL <- order(pivot.HPL) W <- solve(chol.HPL[, oo.HPL]) %*% chol.H[, oo.H] } adjust.info <- list(Theta_MLE = mle$coefficients, Theta_PL = mple$coefficients, W = W) E <- lapply(seq_along(path)[-length(path)], function(i) { y0E <- simulate(formula, coef = path[i] * adjust.info$Theta_MLE, nsim = 1, control = control.simulate(MCMC.burnin = 1, MCMC.interval = 1), return.args = "ergm_state")$object Monte.Carlo.samples <- as.matrix( ergm_MCMC_sample(y0E, theta = path[i] * adjust.info$Theta_MLE, stats0 = sy, control = control)$stats[[1]]) log( mean( exp( (path[i + 1] - path[i]) * adjust.info$Theta_MLE %*% t(Monte.Carlo.samples) ) ) ) }) E <- sum(unlist(E)) if (y$gal$directed == FALSE) logz0A <- choose(n, 2) * log(2) else logz0A <- (n * (n - 1)) * log(2) logztheta <- E + logz0A ll.true <- c(matrix(adjust.info$Theta_MLE, nrow = 1) %*% sy) - logztheta ll.adjpseudo <- adjusted_logPL(theta = adjust.info$Theta_MLE, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = adjust.info) out <- list(formula = formula, Theta_MLE = mle$coefficients, Theta_PL = mple$coefficients, W = W, logC = ll.true - ll.adjpseudo, ll_true = ll.true, logztheta = logztheta, ll_adjpseudo = ll.adjpseudo) return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/ergmAPL.R
#' Wrapper function for evidence estimation #' #' Function to estimate the evidence (marginal likelihood) with Chib and Jeliazkov's method #' or Power posteriors, based on the adjusted pseudolikelihood function. #' #' @param evidence.method vector Method to estimate the marginal likelihood. Options are: \code{"CJ"}, in #' which case the marginal likelihood is estimated with Chib and Jeliazkov's method; \code{"PP"}, in #' which case the marginal likelihood is estimated with Power posteriors. #' #' @param ... further arguments to be passed. #' See \code{evidenceCJ} and \code{evidencePP}. #' #' @references #' Bouranis, L., Friel, N., & Maire, F. (2018). Bayesian model selection for exponential #' random graph models via adjusted pseudolikelihoods. #' Journal of Computational and Graphical Statistics, 27(3), 516-528. #' \url{https://arxiv.org/abs/1706.06344} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network: #' data(florentine) #' #' # MCMC sampling and evidence estimation: #' CJE <- evidence(evidence.method = "CJ", #' formula = flomarriage ~ edges + kstar(2), #' main.iters = 30000, #' burn.in = 2000, #' aux.iters = 1000, #' num.samples = 25000, #' V.proposal = 2.5, #' ladder = 100, #' seed = 1) #' #' # Posterior summaries: #' summary(CJE) #' #' # MCMC diagnostics plots: #' plot(CJE) #' #' # Log-evidence (marginal likelihood) estimate: #' CJE$log.evidence #'} #' #' @export #' evidence <- function (evidence.method = c("CJ", "PP"), ...) { if (!is.element(evidence.method, c("CJ", "PP"))) { stop("Select a valid evidence estimation method.\n") } call <- (as.list(match.call())[-1])[-1] if (evidence.method == "CJ") { do.call(evidenceCJ, call) } else if (evidence.method == "PP") { do.call(evidencePP, call) } }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/evidence.R
#' Evidence estimation via Chib and Jeliazkov's method #' #' Function to estimate the evidence (marginal likelihood) with Chib and Jeliazkov's method, #' based on the adjusted pseudolikelihood function. #' #' @param formula formula; an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param prior.mean vector; #' mean vector of the multivariate Normal prior. #' By default set to a vector of 0's. #' #' @param prior.sigma square matrix; #' variance/covariance matrix for the multivariate Normal prior. #' By default set to a diagonal matrix with every diagonal entry equal to 100. #' #' @param aux.iters count; #' number of auxiliary iterations used for drawing the first network from the ERGM likelihood. #' See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param n.aux.draws count; #' number of auxiliary networks drawn from the ERGM likelihood. #' See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param aux.thin count; #' number of auxiliary iterations between network draws after the first network is drawn. #' See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param ladder count; length of temperature ladder (>=3). #' See \code{\link[Bergm]{ergmAPL}}. #' #' @param main.iters count; #' number of MCMC iterations after burn-in for the adjusted pseudo-posterior estimation. #' #' @param burn.in count; #' number of burn-in iterations at the beginning of an MCMC run #' for the adjusted pseudo-posterior estimation. #' #' @param thin count; #' thinning interval used in the simulation for the adjusted pseudo-posterior estimation. #' The number of MCMC iterations must be divisible by this value. #' #' @param num.samples integer; #' number of samples used in the marginal likelihood estimate. #' Must be lower than \code{main.iters} - \code{burnin}. #' #' @param V.proposal count; #' diagonal entry for the multivariate Normal proposal. #' By default set to 1.5. #' #' @param seed integer; seed for the random number generator. #' See \code{set.seed} and \code{\link[MCMCpack]{MCMCmetrop1R}}. #' #' @param estimate If "MLE" (the default), then an approximate maximum likelihood estimator is returned. If "CD" , the Monte-Carlo contrastive divergence estimate is returned. See \code{\link[ergm]{ergm}}. #' #' @param ... additional arguments, to be passed to the ergm function. #' See \code{\link[ergm]{ergm}} and \code{\link[Bergm]{ergmAPL}}. #' #' @references #' Caimo, A., & Friel, N. (2013). Bayesian model selection for exponential random graph models. #' Social Networks, 35(1), 11-24. \url{https://arxiv.org/abs/1201.2337} #' #' Bouranis, L., Friel, N., & Maire, F. (2018). Bayesian model selection for exponential #' random graph models via adjusted pseudolikelihoods. #' Journal of Computational and Graphical Statistics, 27(3), 516-528. #' \url{https://arxiv.org/abs/1706.06344} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network: #' data(florentine) #' #' # MCMC sampling and evidence estimation: #' CJE <- evidenceCJ(flomarriage ~ edges + kstar(2), #' main.iters = 2000, #' burn.in = 200, #' aux.iters = 500, #' num.samples = 25000, #' V.proposal = 2.5) #' #' # Posterior summaries: #' summary(CJE) #' #' # MCMC diagnostics plots: #' plot(CJE) #' #' # Log-evidence (marginal likelihood) estimate: #' CJE$log.evidence #'} #' #' @export #' evidenceCJ <- function(formula, prior.mean = NULL, prior.sigma = NULL, aux.iters = 1000, n.aux.draws = 5, aux.thin = 50, ladder = 30, main.iters = 30000, burn.in = 5000, thin = 1, V.proposal = 1.5, num.samples = 25000, seed = 1, estimate = c("MLE","CD"), ...) { y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) specs <- unlist(sapply(model$terms, '[', 'coef.names'), use.names = FALSE) sy <- summary(formula) dim <- length(sy) if (dim == 1) stop("Model dimension must be greater than 1") if (is.null(prior.mean)) prior.mean <- rep(0, dim) if (is.null(prior.sigma)) prior.sigma <- diag(100, dim, dim) estimate <- match.arg(estimate) expit <- function(x) exp(x)/(1 + exp(x)) adjusted_logPL <- function(theta, Y, X, weights, calibr.info) { theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) xtheta <- c(X %*% theta_transf) log.like <- sum(dbinom(weights * Y, weights, expit(xtheta), log = TRUE)) return(log.like) } adjusted_logPP <- function(theta, Y, X, weights, prior.mean, prior.sigma, calibr.info) { theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) xtheta <- c(X %*% theta_transf) log.like <- sum(dbinom(weights * Y, weights, expit(xtheta), log = TRUE)) log.prior <- dmvnorm(theta, prior.mean, prior.sigma, log = TRUE) return(log.like + log.prior) } Hessian_adjusted_logPP <- function(theta, X, weights, calibr.info) { p <- exp(as.matrix(X) %*% theta)/(1 + exp(as.matrix(X) %*% theta)) W <- Diagonal(x = as.vector(weights * p * (1 - p))) Hessian <- as.matrix(-t(as.matrix(X)) %*% W %*% as.matrix(X)) return(t(calibr.info$W) %*% Hessian %*% calibr.info$W) } clock.start <- Sys.time() message(" > Pseudolikelihood adjustment") info.adjustPL <- ergmAPL(formula = formula, aux.iters = aux.iters, n.aux.draws = n.aux.draws, aux.thin = aux.thin, ladder = ladder, estimate = estimate, seed = seed, ...) calibr.info <- list(Theta_MLE = info.adjustPL$Theta_MLE, Theta_PL = info.adjustPL$Theta_PL, W = info.adjustPL$W, logC = info.adjustPL$logC) mplesetup <- ergmMPLE(formula) data.glm.initial <- cbind(mplesetup$response, mplesetup$weights, mplesetup$predictor) colnames(data.glm.initial) <- c("responses", "weights", colnames(mplesetup$predictor)) Sigma.proposal <- diag(rep(V.proposal, dim), dim, dim) H <- Hessian_adjusted_logPP(calibr.info$Theta_PL, X = data.glm.initial[,-c(1, 2)], weights = data.glm.initial[, "weights"], calibr.info = calibr.info) S.prop <- Sigma.proposal %*% solve(solve(prior.sigma) - H) %*% Sigma.proposal message(" > MCMC run") capture.output(T0 <- MCMCmetrop1R(adjusted_logPP, theta.init = calibr.info$Theta_PL, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, prior.mean = prior.mean, prior.sigma = prior.sigma, V = S.prop, thin = thin, mcmc = main.iters, burnin = burn.in, calibr.info = calibr.info, seed = NA, logfun = TRUE)) message(" > Model evidence estimation") T0 <- as.mcmc(T0[(burn.in + 1):main.iters, ]) AR <- round(1 - rejectionRate(T0)[1], 2) names(AR) <- NULL ess <- round(effectiveSize(T0), 0) names(ess) <- specs theta.sum <- summary(T0) thetahat <- theta.sum$statistics[, "Mean"] log.post <- adjusted_logPP(thetahat, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, prior.mean = prior.mean, prior.sigma = prior.sigma, calibr.info = calibr.info) g <- sample(1:nrow(T0), num.samples, replace = TRUE) theta.g <- T0[g, ] q.g <- dmvnorm(theta.g, mean = thetahat, sigma = S.prop, log = FALSE) lik.star <- adjusted_logPL(thetahat, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = calibr.info) lik.g <- apply(theta.g, 1, function(i) { adjusted_logPL(i, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = calibr.info) }) alpha.g <- sapply(lik.g, function(l) min(1, exp(lik.star - l))) theta.j <- rmvnorm(num.samples, mean = thetahat, sigma = S.prop) lik.j <- apply(theta.j, 1, function(i) { adjusted_logPL(i, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = calibr.info) }) alpha.j <- sapply(lik.j, function(l) min(1, exp(l - lik.star))) pi.hat <- mean(alpha.g * q.g)/mean(alpha.j) logEvidence <- calibr.info$logC + log.post - log(pi.hat) clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) out <- list(formula = formula, Theta = T0, AR = AR, ess = ess, log.evidence = logEvidence, dim = dim, specs = specs, Time = runtime) class(out) <- "bergm" return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/evidenceCJ.R
#' Evidence estimation via power posteriors #' #' Function to estimate the evidence (marginal likelihood) with Power posteriors, #' based on the adjusted pseudolikelihood function. #' #' @param formula formula; an \code{\link[ergm]{ergm}} formula object, #' of the form <network> ~ <model terms> #' where <network> is a \code{\link[network]{network}} object #' and <model terms> are \code{ergm-terms}. #' #' @param prior.mean vector; mean vector of the multivariate Normal prior. #' By default set to a vector of 0's. #' #' @param prior.sigma square matrix; variance/covariance matrix for the multivariate Normal prior. #' By default set to a diagonal matrix with every diagonal entry equal to 100. #' #' @param aux.iters count; number of auxiliary iterations used for drawing the first network from the ERGM likelihood. See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param n.aux.draws count; number of auxiliary networks drawn from the ERGM likelihood. See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param aux.thin count; number of auxiliary iterations between network draws after the first network is drawn. See \code{\link[ergm]{control.simulate.formula}} and \code{\link[Bergm]{ergmAPL}}. #' #' @param ladder count; length of temperature ladder (>=3). See \code{\link[Bergm]{ergmAPL}}. #' #' @param main.iters count; number of MCMC iterations after burn-in for the adjusted pseudo-posterior estimation. #' #' @param burn.in count; number of burn-in iterations at the beginning of an MCMC run for the adjusted pseudo-posterior estimation. #' #' @param thin count; thinning interval used in the simulation for the adjusted pseudo-posterior estimation. The number of MCMC iterations must be divisible by this value. #' #' @param V.proposal count; diagonal entry for the multivariate Normal proposal. #' By default set to 1.5. #' #' @param seed integer; seed for the random number generator. #' See \code{set.seed} and \code{\link[MCMCpack]{MCMCmetrop1R}}. #' #' @param temps numeric vector; inverse temperature ladder, \eqn{t \in [0,1]}. #' #' @param estimate If "MLE" (the default), then an approximate maximum likelihood estimator is returned. If "CD" , the Monte-Carlo contrastive divergence estimate is returned. See \code{\link[ergm]{ergm}}. #' #' @param ... additional arguments, to be passed to the ergm function. #' See \code{\link[ergm]{ergm}} and \code{\link[Bergm]{ergmAPL}}. #' #' @references #' Bouranis, L., Friel, N., & Maire, F. (2018). Bayesian model selection for exponential #' random graph models via adjusted pseudolikelihoods. #' Journal of Computational and Graphical Statistics, 27(3), 516-528. #' \url{https://arxiv.org/abs/1706.06344} #' #' @examples #' \dontrun{ #' # Load the florentine marriage network: #' data(florentine) #' #' PPE <- evidencePP(flomarriage ~ edges + kstar(2), #' aux.iters = 500, #' aux.thin = 50, #' main.iters = 2000, #' burn.in = 100, #' V.proposal = 2.5) #' #' # Posterior summaries: #' summary(PPE) #' #' # MCMC diagnostics plots: #' plot(PPE) #' #' # Log-evidence (marginal likelihood) estimate: #' PPE$log.evidence #'} #' #' @export #' evidencePP <- function(formula, prior.mean = NULL, prior.sigma = NULL, aux.iters = 1000, n.aux.draws = 50, aux.thin = 50, ladder = 30, main.iters = 20000, burn.in = 5000, thin = 1, V.proposal = 1.5, seed = 1, temps = NULL, estimate = c("MLE", "CD"), ...) { y <- ergm.getnetwork(formula) model <- ergm_model(formula, y) specs <- unlist(sapply(model$terms, '[', 'coef.names'), use.names = FALSE) sy <- summary(formula) dim <- length(sy) if (dim == 1) stop("Model dimension must be greater than 1") if (is.null(prior.mean)) prior.mean <- rep(0, dim) if (is.null(prior.sigma)) prior.sigma <- diag(100, dim, dim) if (is.null(temps)) temps <- seq(0, 1, length.out = 50)^5 estimate <- match.arg(estimate) expit <- function(x) exp(x)/(1 + exp(x)) adjusted_logPL <- function(theta, Y, X, weights, calibr.info) { theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) xtheta <- c(X %*% theta_transf) log.like <- sum(dbinom(weights * Y, weights, expit(xtheta), log = TRUE)) return(log.like) } adjusted_logPP <- function(theta, Y, X, weights, prior.mean, prior.sigma, temperature, calibr.info) { theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) xtheta <- c(X %*% theta_transf) p <- expit(xtheta) log.like <- sum(dbinom(weights * Y, weights, expit(xtheta), log = TRUE)) log.prior <- dmvnorm(theta, mean = prior.mean, prior.sigma, log = TRUE) return(temperature * log.like + log.prior) } score_logPPt <- function(theta, Y, X, weights, prior.mean, prior.sigma, calibr.info, temperature) { score.log.prior <- -solve(prior.sigma, (theta - prior.mean)) theta_transf <- c(calibr.info$W %*% (theta - calibr.info$Theta_MLE) + calibr.info$Theta_PL) p <- c(exp(as.matrix(X) %*% theta_transf)/(1 + exp(as.matrix(X) %*% theta_transf))) deriv.logpl <- c(t(X) %*% (weights * (Y - p))) out <- temperature * (t(calibr.info$W) %*% deriv.logpl) + score.log.prior return(out) } Hessian_adjusted_logPL <- function(theta, X, weights, calibr.info) { p <- exp(as.matrix(X) %*% theta)/(1 + exp(as.matrix(X) %*% theta)) W <- Diagonal(x = as.vector(weights * p * (1 - p))) Hessian <- -t(as.matrix(X)) %*% W %*% as.matrix(X) Hessian <- as.matrix(Hessian) return(t(calibr.info$W) %*% Hessian %*% calibr.info$W) } numtemp <- length(temps) pplist <- list() acceptances <- rep(0, numtemp) cv.ell.D2 <- rep(0, numtemp) cv.vll.D2 <- rep(0, numtemp) htheta.D2 <- list() clock.start <- Sys.time() message(" > Pseudolikelihood adjustment") info.adjustPL <- ergmAPL(formula = formula, aux.iters = aux.iters, n.aux.draws = n.aux.draws, aux.thin = aux.thin, ladder = ladder, estimate = estimate, seed = seed, ...) calibr.info <- list(Theta_MLE = info.adjustPL$Theta_MLE, Theta_PL = info.adjustPL$Theta_PL, W = info.adjustPL$W, logC = info.adjustPL$logC) mplesetup <- ergmMPLE(formula) data.glm.initial <- cbind(mplesetup$response, mplesetup$weights, mplesetup$predictor) colnames(data.glm.initial) <- c("responses", "weights", colnames(mplesetup$predictor)) Vcov.MPLE <- vcov(glm(mplesetup$response ~ . - 1, data = data.frame(mplesetup$predictor), weights = mplesetup$weights, family = "binomial")) Sigma.proposal <- diag(rep(V.proposal, dim), dim, dim) H <- Hessian_adjusted_logPL(theta = calibr.info$Theta_PL, X = data.glm.initial[, -c(1, 2)], weights = data.glm.initial[,"weights"], calibr.info = calibr.info) S.prop <- Sigma.proposal %*% solve(solve(prior.sigma) - H) %*% Sigma.proposal rownames(S.prop) <- colnames(S.prop) <- rownames(Vcov.MPLE) taup <- 1/sqrt(diag(S.prop)) tau0 <- solve(prior.sigma) a.tilde <- log(tau0[1, 1]/taup)/log(temps[2]) covar.temps <- list() for (i in numtemp:1) { covar.temps[[i]] <- S.prop diag(covar.temps[[i]]) <- diag(S.prop)/(temps[i]^a.tilde[1]) } covar.temps[[1]] <- covar.temps[[2]] message(" > MCMC run") for (l in numtemp:1) { if (l == 1) { pplist[[l]] <- rmvnorm(main.iters, mean = prior.mean, sigma = prior.sigma) acceptances[l] <- 1 } else { if (l == numtemp) l0 <- calibr.info$Theta_PL else l0 <- c(tail(red)[6, ]) capture.output(red <- MCMCmetrop1R(adjusted_logPP, mcmc = main.iters, burnin = burn.in, thin = thin, theta.init = l0, seed = NA, V = covar.temps[[l]], Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = calibr.info, prior.mean = prior.mean, prior.sigma = prior.sigma, temperature = temps[l], logfun = TRUE)) pplist[[l]] <- red acceptances[l] <- round(1 - rejectionRate(red)[1], 3) } } message(" > Model evidence estimation") names(acceptances) <- paste("Temp", "_", 1:numtemp, " = ", round(temps, 5), sep = "") grad.logpp.list <- logPL.list <- list() for (j in 1:numtemp) { if (j == 1) { grad.logpp.list[[j]] <- t(apply(data.frame(pplist[[j]]), 1, function(x) -solve(prior.sigma, (x - prior.mean)))) logPL.list[[j]] <- apply(pplist[[j]], 1, function(x) dmvnorm(x,prior.mean, prior.sigma, log = TRUE)) } else { grad.logpp.list[[j]] <- t(apply(data.frame(pplist[[j]]), 1, function(x) { score_logPPt(theta = x, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, prior.mean = prior.mean, prior.sigma = prior.sigma, temperature = temps[j], calibr.info = calibr.info) })) logPL.list[[j]] <- apply(data.frame(pplist[[j]]), 1, function(x) { adjusted_logPL(theta = x, Y = mplesetup$response, X = mplesetup$predictor, weights = mplesetup$weights, calibr.info = calibr.info) }) } k <- dim * (dim + 3)/2 l <- 2 * dim + 1 w.mat <- matrix(0, nrow = main.iters, ncol = k) w.mat[, 1:dim] <- grad.logpp.list[[j]] w.mat[, (dim + 1):(2 * dim)] <- as.matrix(pplist[[j]]) * grad.logpp.list[[j]] + rep(1, main.iters) for (k1 in (1:(dim - 1))) { for (k2 in (k1 + 1):dim) { w.mat[, l] <- as.matrix(pplist[[j]])[, k1] * grad.logpp.list[[j]][, k2] + as.matrix(pplist[[j]])[,k2] * grad.logpp.list[[j]][, k1] l <- l + 1 } } phi.D2 <- c(-solve(var(w.mat)) %*% c(cov(w.mat, logPL.list[[j]]))) htheta.D2[[j]] <- c(phi.D2 %*% t(w.mat)) cv.ell.D2[j] <- mean(logPL.list[[j]] + htheta.D2[[j]]) cv.vll.D2[j] <- var(logPL.list[[j]] + htheta.D2[[j]]) } clock.end <- Sys.time() runtime <- difftime(clock.end, clock.start) ppml <- function(cv.ell.D2, cv.vll.D2, tempvec) { N <- length(cv.ell.D2) cv.res.D2.mts <- 0 for (i in 1:(N - 1)) { wts <- tempvec[i + 1] - tempvec[i] cv.res.D2.mts <- cv.res.D2.mts + wts * ((cv.ell.D2[i +1] + cv.ell.D2[i])/2) - ((wts^2)/12) * (cv.vll.D2[i + 1] - cv.vll.D2[i]) } return(cv.res.D2.mts) } pp.estimates <- ppml(cv.ell.D2 = cv.ell.D2, cv.vll.D2 = cv.vll.D2, tempvec = temps) log.evidence <- calibr.info$logC + pp.estimates ess <- round(effectiveSize(pplist[[numtemp]]), 0) names(ess) <- specs out <- list(formula = formula, Theta = pplist[[numtemp]], AR_temp = acceptances, AR = acceptances[numtemp], ess = ess, log.evidence = log.evidence, dim = dim, specs = specs, Time = runtime) class(out) <- "bergm" return(out) }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/evidencePP.R
#' Lazega lawyers network data #' #' @source This network dataset comes from a network study of corporate law partnership #' that was carried out in a Northeastern US corporate law firm #' in New England from 1988 to 1991. It represents collaborative relations #' among the 36 attorneys (partners and associates) of this firm. #' Nodal attributes include: Age, Gender, Office, Practice, School, and Years. #' #' @format An oject of class \code{network}. #' #' @references #' Lazega, E. (2001), "The Collegial Phenomenon: #' The Social Mechanisms of Cooperation Among Peers in a #' Corporate Law Partnership," Oxford University Press. #' #' @examples #' \dontrun{ #' par(mfrow = c(1, 2), oma = rep(0, 4)) #' CC <- hcl.colors(3, "Teal") #' set.seed(22) #' plot(lazega, #' vertex.col = CC[lazega %v% "Office"], #' vertex.cex = 2) #' legend("topright", #' pch = 21, #' pt.bg = CC, #' legend = c("Boston", "Hartford", "Providence"), #' title = "OFFICE") #' } "lazega"
/scratch/gouwar.j/cran-all/cranData/Bergm/R/lazega.R
#' Plot BERGM posterior output #' #' This function creates MCMC diagnostic plots for \code{bergm} objects. #' #' @docType methods #' #' @param x an \code{R} object of class \code{bergm}. #' #' @param ... additional arguments, to be passed to lower-level functions. #' #' @examples #' \dontrun{ #' # Load the florentine marriage network #' data(florentine) #' #' # Posterior parameter estimation: #' p.flo <- bergm(flomarriage ~ edges + kstar(2), #' burn.in = 50, #' aux.iters = 500, #' main.iters = 1000, #' gamma = 1.2) #' #' # MCMC diagnostics plots: #' plot(p.flo) #' } #' #' @export #' @method plot bergm plot.bergm <- function(x, ...) { stopifnot(inherits(x, "bergm")) seqq <- 4 par(mfrow = c(min(4, x$dim), 3), oma = c(0, 0, 3, 0), mar = c(4, 3, 1.5, 1)) for (i in 1:x$dim) { if (i %in% c(5, 9, 13)) { dev.new() par(mfrow = c(min(4, x$dim - (i - 1)), 3), oma = c(0, 0, 3, 0), mar = c(4, 3, 1.5, 1)) } plot(density(x$Theta[, i]), main = "", axes = FALSE, xlab = bquote(paste(theta[.(i)], " (", .(x$specs[i]), ")")), ylab = "", lwd = 2) axis(1) axis(2) traceplot(x$Theta[, i], type = "l", xlab = "Iterations", ylab = "") autocorr.plot(x$Theta[, i], auto.layout = FALSE, ...) if (x$dim > 4) seqq <- seq(4, x$dim, 4) if (i %in% union(x$dim, seqq)) { title(paste("MCMC output for Model: y ~", x$formula[3]), outer = TRUE) } } }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/plot.bergm.R
#' Summary of BERGM posterior output #' #' This function summarises MCMC output for \code{bergm} objects. #' #' @docType methods #' #' @param object an \code{R} object of class \code{bergm}. #' #' @param ... additional arguments, to be passed to lower-level functions. #' #' @export #' @method summary bergm summary.bergm <- function(object, ...) { x <- object stopifnot(inherits(x, "bergm")) cat("\n", "Posterior Density Estimate for Model: y ~", paste(x$formula[3]), "\n", "\n") Theta <- as.mcmc(x$Theta) quantiles <- c(0.025, 0.25, 0.5, 0.75, 0.975) statnames <- c("Mean", "SD", "Naive SE", "Time-series SE") varstats <- matrix(nrow = nvar(Theta), ncol = length(statnames), dimnames = list(varnames(Theta), statnames)) Thetavar <- apply(Theta, 2, var) Thetatsvar <- apply(Theta, 2, function(x) coda::spectrum0.ar(x)$spec) varquant <- t(apply(Theta, 2, quantile, quantiles)) varstats[, 1] <- apply(Theta, 2, mean) varstats[, 2] <- sqrt(Thetavar) varstats[, 3] <- sqrt(Thetavar / niter(Theta)) varstats[, 4] <- sqrt(Thetatsvar / niter(Theta)) table1 <- drop(varstats) table2 <- drop(varquant) rNames <- paste("theta", seq(1, x$dim), " (", x$specs[seq(1, x$dim)], ")", sep = "") rownames(table1) <- rownames(table2) <- rNames print(table1); cat("\n"); print(table2) cat("\n", "Acceptance rate:", x$AR, "\n", "\n", "\n") }
/scratch/gouwar.j/cran-all/cranData/Bergm/R/summary.bergm.R
#' Bayesian inference and model selection for stochastic epidemics #' #' \code{Bernadette} provides Bayesian analysis for stochastic extensions of dynamic non-linear systems using advanced computational algorithms. #' #' @docType package #' @name Bernadette-package #' @aliases Bernadette #' @useDynLib Bernadette, .registration=TRUE #' @import ggplot2 #' @importFrom grid unit.c #' @importFrom gridExtra grid.arrange arrangeGrob #' @importFrom magrittr `%>%` #' @import methods #' @rawNamespace import(Rcpp, except = c(LdFlags,.DollarNames,prompt)) #' @import RcppParallel #' @import rstantools #' @importFrom rstan optimizing sampling vb extract #' @importFrom scales percent pretty_breaks #' @rawNamespace import(stats, except = c(lag, filter)) #' @import utils #' #' @references #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.3. \url{https://mc-stan.org} NULL
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/Bernadette-package.R
#' Country-specific age distribution #' #' Function to extract the age distribution of a country for a given year, broken #' down by 5-year age bands and gender, following the United Nations 2019 Revision of #' World Population Prospects. #' #' @param country character; #' country identifier, following the List of United Nations Member States. See \link[Bernadette]{countries_un}. #' #' @param year numeric; #' calendar year. #' #' @return An object of class \emph{data.frame} that contains the age distribution. #' #' @references #' United Nations, Department of Economic and Social Affairs, Population Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. #' #' Prem, K., van Zandvoort, K., Klepac, P. et al (2017). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' #' @examples #' # Age distribution for Greece in 2020: #'age_distr <- age_distribution(country = "Greece", year = 2020) #' #' @export age_distribution <- function(country, year ){ if(country %in% countries_un() == FALSE) stop("The user-defined country name is not available. Please check countries_un().") tmp_env <- new.env() data_path <- paste0("https://github.com//kieshaprem//synthetic-contact-matrices//", "raw//master//generate_synthetic_matrices//input//pop//", "poptotal", ".rdata" ) load(base::url(data_path), envir = tmp_env) dem_table <- tmp_env$poptotal filter <- dem_table$countryname == country & dem_table$year == year dem_table <- unique( dem_table[filter, ] ) dem_table$total <- NULL dem_table_long <- stats::reshape(dem_table, direction = "long", varying = list(names(dem_table)[4:ncol(dem_table)]), v.names = "PopTotal", idvar = c("iso3c", "countryname", "year"), timevar = "AgeGrpStart", times = colnames(dem_table)[-c(1:3)]) rownames(dem_table_long) <- NULL dem_table_long$AgeGrpStart <- as.numeric( gsub("age", "", dem_table_long$AgeGrpStart)) dem_table_pre70 <- dem_table_long[dem_table_long$AgeGrpStart < 75,] dem_table_75plus <- dem_table_long[dem_table_long$AgeGrpStart >= 75,] dem_table_75plus <- cbind(dem_table_75plus[1, c("iso3c", "countryname", "year", "AgeGrpStart")], data.frame(PopTotal = sum(dem_table_75plus$PopTotal))) dem_table_75plus$AgeGrp <- "75+" dem_table_pre70$AgeGrp <- dem_table_pre70$AgeGrpStart + 4 dem_table_pre70$AgeGrp <- paste0(dem_table_pre70$AgeGrpStart, "-", dem_table_pre70$AgeGrp) #---------------- out <- rbind(dem_table_pre70, dem_table_75plus) out <- as.data.frame(out) out$iso3c <- as.character(out$iso3c) out$countryname <- as.character(out$countryname) colnames(out)[1:3] <- c("iso3c", "Location", "Time") out <- out[c("Location", "Time", "AgeGrp", "AgeGrpStart", "PopTotal")] return(out) }# End function
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/age_distribution.R
#' Aggregate the age distribution matrix #' #' Function to aggregate the age distribution according to user-defined age groups. #' #' @param x data.frame; #' an age distribution matrix. See \link[Bernadette]{age_distribution}. #' #' @param lookup_table data.frame; #' a user-defined dataframe which maps the sixteen 5-year age bands to a new set of age bands. #' #' @return An object of class \emph{data.frame} that contains the aggregated age distribution. #' #' @references #' United Nations, Department of Economic and Social Affairs, Population Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. #' #' Prem, K., van Zandvoort, K., Klepac, P. et al (2020). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' #' @examples #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Plot the aggregated age distribution matrix: #' plot_age_distribution(aggr_age) #' #' @export #' aggregate_age_distribution <- function(x, lookup_table ){ dt <- base::merge(x, lookup_table, by.x = "AgeGrp", by.y = "Initial", all.x = TRUE) %>% base::subset(select = !grepl("^AgeGrpStart$", names(.))) %>% stats::aggregate(PopTotal ~ Mapping, data = ., FUN = sum) %>% stats::setNames(c("AgeGrp", "PopTotal")) dt$Location <- rep(unique(x$Location), dim(dt)[1]) dt$Time <- rep(unique(x$Time), dim(dt)[1]) dt <- dt[c("Location", "Time", "AgeGrp", "PopTotal")] return(dt) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/aggregate_age_distribution.R
#' Aggregate a contact matrix #' #' Function to aggregate a contact matrix according to user-defined age groups. #' #' @param object data.frame; #' a contact matrix. See \link[Bernadette]{contact_matrix}. #' #' @param lookup_table data.frame; #' a user-defined data.frame which maps the sixteen 5-year age bands to a new set of age bands. #' #' @param age_distr data.frame; #' the aggregated age distribution. See \link[Bernadette]{aggregate_contact_matrix}. #' #' @return An object of class \emph{data.frame}. #' #' @examples #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Plot the contact matrix: #' plot_contact_matrix(aggr_cm) #' #' @export #' aggregate_contact_matrix <- function(object, lookup_table, age_distr ){ if(!identical(unique(lookup_table$Mapping), age_distr$AgeGrp)) { stop("The mapped age group labels do not correspond to the age group labels of the aggregated age distribution matrix.\n") } indiv_age_df <- base::data.frame(indiv_age = lookup_table$Initial) object_df <- cbind(object, indiv_age_df) long_dt <- stats::reshape(object_df, varying = list(base::names(object_df)[-ncol(object_df)]), direction = "long", sep = "_", timevar = "contact_age", times = base::names(object_df)[-ncol(object_df)], v.names = "contact") long_dt$id <- NULL long_dt$indiv_age <- as.factor(long_dt$indiv_age) long_dt$contact_age <- as.factor(long_dt$contact_age) names(lookup_table) <- c("indiv_age", "indiv_age_agr") long_dt <- base::merge(long_dt, lookup_table, by = "indiv_age", all.x = TRUE) names(lookup_table) <- c("contact_age", "contact_age_agr") long_dt <- base::merge(long_dt, lookup_table, by = "contact_age", all.x = TRUE) long_dt$diag_element <- ifelse(long_dt$indiv_age == long_dt$contact_age, TRUE, FALSE) dt_aggregate <- stats::aggregate(long_dt$contact, by = list(long_dt$indiv_age_agr, long_dt$contact_age_agr, long_dt$diag_element), mean) names(dt_aggregate) <- c("indiv_age_agr", "contact_age_agr", "diag_element", "mean_cm") dt_aggregate <- stats::aggregate(dt_aggregate$mean_cm, by = list(dt_aggregate$indiv_age_agr, dt_aggregate$contact_age_agr), sum) names(dt_aggregate) <- c("indiv_age_agr", "contact_age_agr", "mean_cm") cm_to_rescale <- as.matrix( stats::reshape(dt_aggregate, idvar = "indiv_age_agr", timevar = "contact_age_agr", direction = "wide")) cm_to_rescale <- cm_to_rescale[,2:ncol(cm_to_rescale)] class(cm_to_rescale) <- "numeric" age_bands <- length(unique(age_distr$AgeGrp)) ret <- matrix(0, age_bands, age_bands) for (i in seq(1, age_bands, 1)) { for (j in seq(1, age_bands, 1)) { ret[i,j] <- 0.5*(1/age_distr$PopTotal[i])*(cm_to_rescale[i,j]*age_distr$PopTotal[i] + cm_to_rescale[j,i]*age_distr$PopTotal[j]) } } rownames(ret) <- colnames(ret) <- unique(long_dt$indiv_age_agr) ret <- as.data.frame(ret) return(ret) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/aggregate_contact_matrix.R
#' Aggregate the Infection Fatality Ratio #' #' Function to aggregate the age-specific Infection Fatality Ratio (IFR) estimates reported by the REACT-2 large-scale community study of SARS-CoV-2 seroprevalence in England according to user-defined age groups. #' #' @param x data.frame; #' an age distribution matrix. See \link[Bernadette]{age_distribution}. #' #' @param user_AgeGrp vector; #' a user-defined vector which maps the four age groups considered in REACT-2 to a new set of age groups. #' #' @param data_cases data.frame; #' time series dataset containing the age-stratified infection counts. See \link[Bernadette]{age_specific_infection_counts}. #' #' @return A list of two data frames that contains the aggregated IFR estimates. #' #' @references #' Ward, H., Atchison, C., Whitaker, M. et al. (2021). SARS-CoV-2 antibody prevalence in England following the first peak of the pandemic. Nature Communications 12, 905 #' #' @examples #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' # Lookup table: #' age_mapping <- c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+", 3)) #' #' data(age_specific_infection_counts) #' #' # Aggregate the IFR: #' aggr_age_ifr <- aggregate_ifr_react(age_distr, age_mapping, age_specific_infection_counts) #' #' @export #' aggregate_ifr_react <- function(x, user_AgeGrp, data_cases){ if( length(user_AgeGrp) != nrow(x) ) stop("The mapped age group labels do not correspond to the age group labels of the aggregated age distribution matrix.\n") ifr_react <- data.frame(AgeGrp = c("0-14","15-44", "45-64", "65-74", "75-100"), IFR = c(0, 0.03, 0.52, 3.13, 11.64)/100) ifr_react$AgeGrpStart <- sapply(1:nrow(ifr_react), function(x){min(as.numeric(strsplit(ifr_react$AgeGrp, "-")[[x]]))}) ifr_react$AgeGrpEnd <- sapply(1:nrow(ifr_react), function(x){max(as.numeric(strsplit(ifr_react$AgeGrp, "-")[[x]]))}) temp_x <- x temp_x$AgeGrp <- gsub("\\+", "-100", temp_x$AgeGrp) temp_x$AgeGrpEnd <- sapply(1:nrow(temp_x), function(x){max(as.numeric(strsplit(temp_x$AgeGrp, "-")[[x]]))}) temp_x$IFR <- rep(0,nrow(temp_x)) temp_x$Group_mapping <- user_AgeGrp output <- list() # English data (population Europe Website): if ("AgeGrpStart" %nin% colnames(temp_x) ) temp_x$AgeGrpStart <- sapply(1:nrow(temp_x), function(x){min(as.numeric(strsplit(temp_x$AgeGrp, "-")[[x]]))}) for (i in 1:nrow(temp_x)){ for (j in 1:nrow(ifr_react)) { if ( (temp_x$AgeGrpStart[i] >= ifr_react$AgeGrpStart[j]) & (temp_x$AgeGrpEnd[i] <= ifr_react$AgeGrpEnd[j]) ) temp_x$IFR[i] <- ifr_react$IFR[j] }# End for }# End for # Group data by Group_mapping and calculate PopPerc and AgrIFR using base R functions pop_total <- tapply(temp_x$PopTotal, temp_x$Group_mapping, sum) output[[1]] <- base::transform(temp_x, PopPerc = ave(PopTotal, Group_mapping, FUN = function(x) prop.table(x)) ) output[[1]] <- base::transform(output[[1]], AgrIFR = ave(IFR * PopPerc, Group_mapping, FUN = sum)) #---- Time-independent IFR: output[[2]] <- aggregate(AgrIFR ~ Group_mapping, output[[1]], FUN = function(x) x[1]) #---- Time-dependent IFR: if( any( c("Index", "Right", "Week_ID") %in% colnames(data_cases) ) ) { data_cases_weights <- data_cases[, !(colnames(data_cases) %in% c("Index", "Right", "Week_ID"))] } else data_cases_weights <- data_cases data_cases_weights <- cbind(data_cases_weights[, "Date", drop = FALSE], data.frame(sapply(data_cases_weights[, -1, drop = FALSE], function(x) x / data_cases$Total_Cases))) data_cases_weights$Date <- data_cases$Date data_cases_weights <- data_cases_weights[, !(names(data_cases_weights) %in% "Total_Cases")] colnames(data_cases_weights)[2:length(colnames(data_cases_weights))] <- output[[2]]$Group_mapping data_cases_weights <- stats::reshape(data_cases_weights, direction = "long", varying = list(names(data_cases_weights)[2:ncol(data_cases_weights)]), v.names = "Weight", idvar = c("Date"), timevar = "Group", times = colnames(data_cases_weights)[-1]) rownames(data_cases_weights) <- NULL data_cases_weights <- data_cases_weights[order(data_cases_weights$Date, data_cases_weights$Group), ] data_cases_weights <- base::merge(data_cases_weights, output[[2]], by.x = "Group", by.y = "Group_mapping", all.x = TRUE) data_cases_weights$Weighted_IFR <- with(data_cases_weights, Weight * AgrIFR) data_cases_weights <- data_cases_weights[, !(names(data_cases_weights) %in% c("Weight", "AgrIFR"))] output[[3]] <- stats::reshape(data_cases_weights, idvar = "Date", timevar = "Group", direction = "wide") colnames(output[[3]]) <- gsub("Weighted_IFR.", "", colnames(output[[3]])) output[[3]] <- output[[3]][order(output[[3]]$Date), ] return(output) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/aggregate_ifr_react.R
#' Country-specific contact matrix #' #' A 16 by 16 contact matrix whose row i of a column j corresponds to the number of contacts #' made by an individual in group i with an individual in group j. #' #' @param country #' A character indicating the country identifier. See \link[Bernadette]{country_contact_matrices}. #' #' @return An object of class "data.frame". #' #' @references #' Prem, K., van Zandvoort, K., Klepac, P. et al (2020). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' #' @examples #' conmat <- contact_matrix(country = "GRC") #' #' @export #' contact_matrix <- function(country){ if(country %in% country_contact_matrices() == FALSE) stop("The user-defined country name is not available. Please check country_contact_matrices().") tmp_env <- new.env() utils::data(contact_matrices, envir = tmp_env) contact_matrix <- data.frame(tmp_env$contact_matrices[[country]] ) age_groups <- c("0-4", "5-9", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75+") rownames(contact_matrix) <- colnames(contact_matrix) <- age_groups return(contact_matrix) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/contact_matrix.R
#' Names of countries for which a contact matrix is available #' #' Function to extract the names of the countries whose projected contact matrix is available. #' #' @return A character vector of length 177 with the IDs of each of the 177 geographical regions. #' #' @references #' Prem, K., van Zandvoort, K., Klepac, P. et al (2017). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' #' @examples #' country_contact_matrices() #' #' @export #' country_contact_matrices <- function() { tmp_env <- new.env() utils::data(contact_matrices, envir = tmp_env) out <- tmp_env$contact_matrices country_initials <- names(out) return (country_initials) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/countries_contact_matrix.R
#' Names of countries with an available age distribution #' #' Function to extract the names of the countries whose discrete age distribution is #' available by the United Nations. #' #' @return A character vector that contains the full names of 201 countries/areas. #' #' @references #' United Nations, Department of Economic and Social Affairs, Population Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. #' #' Prem, K., van Zandvoort, K., Klepac, P. et al (2017). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' #' @examples #' countries_un() #' #' @export #' countries_un <- function() { data_path <- paste0("https://github.com//kieshaprem//synthetic-contact-matrices//", "raw//master//generate_synthetic_matrices//input//pop//", "poptotal", ".rdata" ) load(base::url(data_path)) country_names <- as.vector( unique(poptotal$countryname) ) return(country_names) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/countries_un.R
#' Contact matrices per country #' #' A list of 16 by 16 contact matrices for 177 countries. #' Row i of a column j of a contact matrix corresponds to the number of contacts #' made by an individual in group i with an individual in group j. #' #' @format A list of 16 by 16 dataframes for 177 countries. #' #' @return A list object of 16 by 16 dataframes for 177 countries. #' #' @usage data(contact_matrices) #' #' @section References: #' Prem, K., van Zandvoort, K., Klepac, P. et al (2020). Projecting contact matrices in 177 geographical regions: an update and comparison with empirical data for the COVID-19 era. medRxiv 2020.07.22.20159772; doi: https://doi.org/10.1101/2020.07.22.20159772 #' "contact_matrices" #' Age distribution of new reported deaths for Greece #' #' A dataset containing the age distribution of reported deaths in Greece from 2020-08-31 to 2021-03-28 (30 weeks). #' The dataset has been extracted from the Hellenic National Public Health Organization database. #' #' @format A data frame with 210 rows and 8 variables: #' \describe{ #' \item{Index}{integer; a sequence of integer numbers from 1 to 210} #' \item{Right}{numeric; Index + 1} #' \item{Date}{Date, format; date in the format "2020-08-31"} #' \item{Week_ID}{numeric; index of the week that each day falls into. A week is assumed to have 7 days} #' \item{New_Deaths}{numeric; count of new total reported deaths on a given date} #' \item{0-39}{numeric; count of new reported deaths on a given date for the age group "0-39"} #' \item{40-64}{numeric; count of new reported deaths on a given date for the age group "40-64"} #' \item{65+}{numeric; count of new reported deaths on a given date for the age group "65+"} #' } #' #' @return A data.frame object with 210 rows and 8 variables. #' #' @usage data(age_specific_mortality_counts) #' #' @section References: #' Sandbird (2022). Daily regional statistics for covid19 cases in Greece. #' #' @source \url{https://github.com/Sandbird/covid19-Greece/} #' "age_specific_mortality_counts" #' Age distribution of new reported infections for Greece #' #' A dataset containing the age distribution of new reported infections in Greece from 2020-08-31 to 2021-03-28 (30 weeks). #' The dataset has been extracted from the Hellenic National Public Health Organization database. #' #' @format A data frame with 210 rows and 8 variables: #' \describe{ #' \item{Index}{integer; a sequence of integer numbers from 1 to 210} #' \item{Right}{numeric; Index + 1} #' \item{Date}{Date, format; date in the format "2020-08-31"} #' \item{Week_ID}{numeric; index of the week that each day falls into. A week is assumed to have 7 days} #' \item{Total_Cases}{numeric; count of total reported infections on a given date} #' \item{0-39}{numeric; count of reported infections on a given date for the age group "0-39"} #' \item{40-64}{numeric; count of reported infections on a given date for the age group "40-64"} #' \item{65+}{numeric; count of reported infections on a given date for the age group "65+"} #' } #' #' @return A data.frame object with 210 rows and 8 variables. #' #' @usage data(age_specific_infection_counts) #' #' @section References: #' Sandbird (2022). Daily regional statistics for covid19 cases in Greece. #' #' @source \url{https://github.com/Sandbird/covid19-Greece/} #' "age_specific_infection_counts" #' Age distribution of cumulative reported infections for Greece #' #' A dataset containing the age distribution of cumulative reported infections in Greece from 2020-08-31 to 2021-03-28 (30 weeks). #' The dataset has been extracted from the Hellenic National Public Health Organization database. #' #' @format A data frame with 210 rows and 5 variables: #' \describe{ #' \item{Date}{Date, format; date in the format "2020-08-31"} #' \item{Total_Cases}{numeric; count of total cumulative reported infections on a given date} #' \item{0-39}{numeric; count of cumulative reported infections on a given date for the age group "0-39"} #' \item{40-64}{numeric; count of cumulative reported infections on a given date for the age group "40-64"} #' \item{65+}{numeric; count of cumulative reported infections on a given date for the age group "65+"} #' } #' #' @return A data.frame object with 210 rows and 5 variables. #' #' @usage data(age_specific_cusum_infection_counts) #' #' @section References: #' Sandbird (2022). Daily regional statistics for covid19 cases in Greece. #' #' @source \url{https://github.com/Sandbird/covid19-Greece/} #' "age_specific_cusum_infection_counts"
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/data.R
# The names supplied below are of functions or other objects that # should be regarded as defined globally. # The user is advised to be mindful of this list of names when performing # an action that might conflict it. utils::globalVariables(c("diag_element", "AgeGrp", "Mapping", "mean_cm", "PopFemale", "PopMale", "PopTotal", "contact", "contact_age", "contact_age_agr", "contact_matrices", "poptotal", "indiv_age", "indiv_age_agr", "proportion", "stats", "Group_mapping", "IFR", "PopPerc", "Posterior", "Date", "low25", "high75", "low", "high", "." ))
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/globals.R
#' Distribution of the time between infection and death #' #' Function to discretize the infection-to-death distribution #' #' @param ts_length integer; #' time from infection to death in days. #' #' @param gamma_mean numeric; #' mean of a gamma distribution, for a given shape and rate. See also \code{\link[stats]{GammaDist}}. #' #' @param gamma_cv numeric; #' coefficient of variation of a gamma distribution, for a given shape and rate. See also \code{\link[stats]{GammaDist}}. #' #' @param gamma_shape numeric; #' shape parameter of a gamma distribution. See also \code{\link[stats]{GammaDist}}. #' #' @param gamma_rate numeric; #' rate parameter of a gamma distribution. See also \code{\link[stats]{GammaDist}}. #' #' @references #' Flaxman et al (2020). Estimating the effects of non-pharmaceutical interventions on COVID-19 in Europe. #' Nature, 584, 257-261. #' #' @return A vector of length \emph{ts_length}. #' #' @examples #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' @export #' itd_distribution <- function(ts_length, gamma_mean = 24.19231, gamma_cv = 0.3987261, gamma_shape= 6.29, gamma_rate = 0.26 ){ if ( ( is.numeric(gamma_mean) & is.numeric(gamma_cv) & (is.null(gamma_shape) & is.null(gamma_rate) ) ) | ( is.numeric(gamma_mean) & is.numeric(gamma_cv) & (is.numeric(gamma_shape) & is.numeric(gamma_rate) ) ) ){ shape <- 1/(gamma_cv^2) rate <- shape/gamma_mean } else if( is.null(gamma_mean) & is.null(gamma_cv) & (is.numeric(gamma_shape) & is.numeric(gamma_rate) ) ){ shape <- gamma_shape rate <- gamma_rate } else if(is.null(gamma_mean) & is.null(gamma_cv) & (is.null(gamma_shape) & is.null(gamma_rate) )){ stop("Either the pair (gamma_mean, gamma_cv) or the pair (gamma_shape, gamma_rate) must be numeric.") } if (shape <= 0 ) stop("The shape parameter is not positive.") if (rate <= 0 ) stop("The rate parameter is not positive.") infection_death <- rep(0, ts_length) infection_death[1] <- stats::pgamma(1.5, shape = shape, rate = rate) - stats::pgamma(0, shape = shape, rate = rate) for(i in 2:ts_length) { infection_death[i] <- stats::pgamma(i+.5, shape = shape, rate = rate) - stats::pgamma(i-.5, shape = shape, rate = rate) } return(infection_death) }# End function
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/infection_to_death.R
# Set arguments for sampling # # Prepare a list of arguments to use with \code{rstan::sampling} via # \code{do.call}. # # @param object The stanfit object to use for sampling. # # @param user_dots The contents of \code{...} from the user's call to # the \code{stan_*} modeling function. # # @param user_adapt_delta numeric; The value for \code{adapt_delta} specified by the user. # # @param user_max_treedepth numeric; The value for \code{max_treedepth} specified by the user. # # @param ... Other arguments to \code{\link[rstan]{sampling}} not coming from # \code{user_dots} (e.g. \code{data}, \code{pars}, \code{init}, etc.). # # @return A list of arguments to use for the \code{args} argument for # \code{do.call(sampling, args)}. set_sampling_args <- function(object, user_dots = list(), user_adapt_delta = NULL, user_max_treedepth = NULL, ...) { args <- list(object = object, ...) unms <- names(user_dots) for (j in seq_along(user_dots)) args[[unms[j]]] <- user_dots[[j]] defaults <- default_stan_control(adapt_delta = user_adapt_delta, max_treedepth = user_max_treedepth) if (!"control" %in% unms) { # no user-specified 'control' argument args$control <- defaults } else { # user specifies a 'control' argument if (!is.null(user_adapt_delta)) { # if user specified adapt_delta argument to stan_* then # set control$adapt_delta to user-specified value args$control$adapt_delta <- user_adapt_delta } else { # use default adapt_delta for the user's chosen prior args$control$adapt_delta <- defaults$adapt_delta } if (!is.null(user_max_treedepth)) { # if user specified max_treedepth argument to stan_* then # set control$max_treedepth to user-specified value args$control$max_treedepth <- user_max_treedepth } else { # use default adapt_delta for the user's chosen prior args$control$max_treedepth <- defaults$max_treedepth } } args$save_warmup <- FALSE return(args) } # Default control arguments for sampling # # Called by set_sampling_args to set the default 'control' argument for # \code{rstan::sampling} if none specified by user. # # @param adapt_delta numeric; User's \code{adapt_delta} argument. # @param max_treedepth numeric; Default for \code{max_treedepth}. # @return A list with \code{adapt_delta} and \code{max_treedepth}. default_stan_control <- function(adapt_delta = NULL, max_treedepth = NULL) { if (is.null(adapt_delta)) adapt_delta <- 0.80 if (is.null(max_treedepth)) max_treedepth <- 15L nlist(adapt_delta, max_treedepth) } # Test if an object is a stanigbm object # # @param x The object to test. is.stanigbm <- function(x) inherits(x, "stanigbm") # Throw error if object isn't a stanreg object # # @param x The object to test. validate_stanigbm_object <- function(x, call. = FALSE) { if (!is.stanigbm(x)) stop("Object is not a stanigbm object.", call. = call.) } # Check that a stanfit object (or list returned by rstan::optimizing) is valid check_stanfit <- function(x) { if (is.list(x)) { if (!all(c("par", "value") %in% names(x))) stop("Invalid object produced please report bug") } else { stopifnot(is(x, "stanfit")) if (x@mode != 0) stop("Invalid stanfit object produced please report bug") } return(TRUE) } # Convert mean and variance to alpha and beta parameters for a Beta(alpha, beta) distribution # # The calculations will only work (alpha, beta > 0) if the variance is less than mean*(1-mean). # # @param mu numeric; Mean of Beta distribution. # @param variance numeric; Variance of Beta distribution. estBetaParams <- function(mu, var) { if ( var >= mu*(1-mu) ) stop("Ensure that var < mu*(1-mu).") alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2 beta <- alpha * (1 / mu - 1) return(params = c(alpha = alpha, beta = beta)) }# End function # Create a named list using specified names or, if names are omitted, using the # names of the objects in the list # # @param ... Objects to include in the list. # @return A named list. nlist <- function(...) { m <- match.call() out <- list(...) no_names <- is.null(names(out)) has_name <- if (no_names) FALSE else nzchar(names(out)) if (all(has_name)) return(out) nms <- as.character(m)[-1L] if (no_names) { names(out) <- nms } else { names(out)[!has_name] <- nms[!has_name] } return(out) } # Calculate the spectral radius of a matrix # # @param x a numeric or complex matrix whose spectral decomposition is to be computed. # Logical matrices are coerced to numeric. eigen_mat <- function(x) base::max( base::Re( base::eigen(x)$values ) ) #---- Helpers: `%nin%` <- Negate(`%in%`)
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/misc.R
#' Bar plot of the age distribution #' #' @param x data.frame; #' the age distribution matrix. See \link[Bernadette]{age_distribution} and \link[Bernadette]{aggregate_age_distribution}. #' #' @return A ggplot object that can be further customized using the #' \pkg{ggplot2} package. #' #' @references #' United Nations, Department of Economic and Social Affairs, Population Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. #' #' @examples #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' plot_age_distribution(age_distr) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Plot the aggregated age distribution matrix: #' plot_age_distribution(aggr_age) #' #' @export #' plot_age_distribution <- function(x) { percentage_data <- x[c("AgeGrp", "PopTotal")] percentage_data$proportion = percentage_data$PopTotal/sum(percentage_data$PopTotal) percentage_data$AgeGrp <- x$AgeGrp levs <- as.factor( percentage_data$AgeGrp ) levs <- factor(levs, levels = percentage_data$AgeGrp) out <- percentage_data %>% ggplot2::ggplot(aes(x = AgeGrp, y = proportion)) + ggplot2::geom_bar(stat = "identity", fill = "steelblue4") + ggplot2::xlim(levs) + ggplot2::labs(x = "Age group (years)", y = "Proportion of the population") + ggplot2::scale_y_continuous(labels = scales::percent) + ggplot2::theme_bw() return(out) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/plot_age_distribution.R
#' Contact matrix heatmap #' #' @param x data.frame; #' a contact matrix. See \link[Bernadette]{contact_matrix}. #' #' @return A ggplot object that can be further customized using the #' \pkg{ggplot2} package. #' #' @examples #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' plot_contact_matrix(conmat) #' #' @export #' plot_contact_matrix <- function(x) { levs <- as.factor( colnames(x) ) levs <- factor(levs, levels = colnames(x)) x$indiv_age <- rownames(x) plot_data <- stats::reshape(x, idvar = "indiv_age", timevar = "contact_age", v.names = "contact", varying = list(names(x)[1:(ncol(x)-1)]), times = colnames(x)[-ncol(x)], direction = "long") rownames(plot_data) <- NULL plot_data <- plot_data[order(plot_data$indiv_age, plot_data$contact_age), ] out <- ggplot2::ggplot(plot_data, ggplot2::aes(y = contact_age, x = indiv_age)) + ggplot2::geom_tile(ggplot2::aes(fill = contact)) + ggplot2::xlim(levs) + ggplot2::ylim(levs) + ggplot2::labs(x = "Age of individual", y = "Age of contact") + ggplot2::coord_cartesian(expand = FALSE) + ggplot2::scale_fill_continuous(guide = ggplot2::guide_legend(), type ="viridis") + ggplot2::guides(fill = ggplot2::guide_colourbar(barwidth = 0.5, title = "Contact rate") ) + ggplot2::theme_bw() return(out) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/plot_contact_matrix.R
#' Plot the posterior contact matrix #' #' @param object An object of class \code{stanigbm}. See \code{\link[Bernadette]{stan_igbm}}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @param ... Optional arguments passed to \code{\link[ggplot2]{theme}}. #' #' @return A \code{grid.arrange} object which can be further customised using the \pkg{gridExtra} package. #' #' @references #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' # Visualise the posterior distribution of the random contact matrix: #' plot_posterior_cm(object = igbm_fit, #' y_data = age_specific_mortality_counts) #'} #' @export #' plot_posterior_cm <- function(object, y_data, ...){ check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") posterior_draws <- rstan::extract(object) age_grps <- ncol(y_data[,-c(1:5)]) if(ncol(posterior_draws$cm_sample) != age_grps) stop( paste0("The number of rows in the age distribution table must be equal to ", age_grps) ) niters <- nrow(posterior_draws[["cm_sample"]]) plot_cm_list <- list() plot_cm_indx <- 1 for (i in 1:age_grps){ for (j in 1:age_grps){ dt_cm_post <- data.frame(Posterior = posterior_draws[["cm_sample"]][,i, j]) p <- ggplot2::ggplot(dt_cm_post, ggplot2::aes(x = Posterior), fill = "gray") + ggplot2::geom_density(alpha = 0.8, fill = "gray") + ggplot2::scale_x_continuous(breaks = scales::pretty_breaks(n = 2)) + ggplot2::labs(x = "", y = "Density", title = paste0("C[",i,",",j,"]")) + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank(), ... ) plot_cm_list[[plot_cm_indx]] <- p # Increment the index of stored graph: plot_cm_indx <- plot_cm_indx + 1 }# End for }# End for plots_no_legend <- lapply(plot_cm_list[1:(age_grps^2)], function(x) x + ggplot2::theme(legend.position="none")) gridExtra::grid.arrange( gridExtra::arrangeGrob(grobs = plots_no_legend, nrow = age_grps) ) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/posterior_contactmatrix.R
#' Summarize the posterior distribution of the infection counts #' #' @param object An object of class \code{stanigbm}. See \code{\link[Bernadette]{stan_igbm}}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @return #' A named list with elements \code{Age_specific} and \code{Aggregated} which can be visualized using \code{\link[Bernadette]{plot_posterior_infections}}. #' #' @references #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_inf_summary <- posterior_infections(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the infection counts: #' plot_posterior_infections(post_inf_summary, type = "age-specific") #' plot_posterior_infections(post_inf_summary, type = "aggregated") #'} #' @export #' posterior_infections <- function(object, y_data){ check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") posterior_draws <- rstan::extract(object) cov_data <- list() cov_data$y_data <- y_data[,-c(1:5)] cov_data$dates <- y_data$Date cov_data$A <- ncol(y_data[,-c(1:5)]) #---- Age-specific: output_age_cols <- c("Date", "Group", "median", "low", "high", "low25", "high75") output_age <- data.frame(matrix(ncol = length(output_age_cols), nrow = 0)) colnames(output_age) <- output_age_cols for (i in 1:cov_data$A){ fit_age <- posterior_draws$E_casesByAge[,,i] dt_infections_age_grp <- data.frame(Date = cov_data$dates, Group = rep( colnames(cov_data$y_data)[i], length(cov_data$dates) ) ) dt_infections_age_grp$median <- apply(fit_age, 2, median) dt_infections_age_grp$low <- apply(fit_age, 2, quantile, probs = c(0.025)) dt_infections_age_grp$high <- apply(fit_age, 2, quantile, probs = c(0.975)) dt_infections_age_grp$low25 <- apply(fit_age, 2, quantile, probs = c(0.25)) dt_infections_age_grp$high75 <- apply(fit_age, 2, quantile, probs = c(0.75)) output_age <- rbind(output_age, dt_infections_age_grp) } #---- Aggregated: fit_aggregated <- posterior_draws$E_cases output_aggregated <- data.frame(Date = cov_data$dates) output_aggregated$median <- apply(fit_aggregated, 2, median) output_aggregated$low <- apply(fit_aggregated, 2, quantile, probs = c(0.025)) output_aggregated$high <- apply(fit_aggregated, 2, quantile, probs = c(0.975)) output_aggregated$low25 <- apply(fit_aggregated, 2, quantile, probs = c(0.25)) output_aggregated$high75 <- apply(fit_aggregated, 2, quantile, probs = c(0.75)) output <- list(Age_specific = output_age, Aggregated = output_aggregated) return(output) } #' Plot the posterior distribution of the infection counts #' #' @param object #' A dataframe from \code{\link[Bernadette]{posterior_infections}}. #' #' @param type character; #' Plot the output for the 'age-specific' infection counts or the 'aggregated' infections. #' #' @param xlab character; #' title of x-axis. #' #' @param ylab character; #' title of y-axis. #' #' @param ... Optional arguments passed to \code{\link[ggplot2]{scale_x_date}}. #' #' @return A \code{ggplot} object which can be further customised using the \pkg{ggplot2} package. #' #' @seealso \code{\link{posterior_infections}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_inf_summary <- posterior_infections(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the infection counts: #' plot_posterior_infections(post_inf_summary, type = "age-specific") #' plot_posterior_infections(post_inf_summary, type = "aggregated") #'} #' @export #' plot_posterior_infections <- function(object, type = c("age-specific", "aggregated"), xlab = NULL, ylab = NULL, ...){ aggr_type <- match.arg(type) if (is.null(xlab)) xlab <- "Epidemiological Date" if (is.null(ylab)) ylab <- "New daily infection counts" if (aggr_type %nin% type){ stop("Please select a type of aggregation from ('age-specific', 'aggregated').") } else if (aggr_type == "age-specific"){ ret <- ggplot2::ggplot(object$Age_specific) + ggplot2::facet_wrap(. ~ Group, scales = "free_y") + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.5) + # ggplot2::geom_ribbon(ggplot2::aes(x = Date, # ymin = low, # ymax = high, # fill = "95% CrI"), # alpha = 0.5) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray70"#, #"95% CrI" = "gray40" )) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank()) } else if (aggr_type == "aggregated"){ ret <- ggplot2::ggplot(object$Aggregated) + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.5) + # ggplot2::geom_ribbon(ggplot2::aes(x = Date, # ymin = low, # ymax = high, # fill = "95% CrI"), # alpha = 0.5) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray70"#, #"95% CrI" = "gray40" )) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank()) } ret }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/posterior_infections.R
#' Summarize the posterior distribution of the mortality counts #' #' @param object An object of class \code{stanigbm}. See \code{\link[Bernadette]{stan_igbm}}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @return #' #' A named list with elements \code{Age_specific} and \code{Aggregated} which can be visualised using \code{\link[Bernadette]{plot_posterior_mortality}}. #' #' @references #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_mortality_summary <- posterior_mortality(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the mortality counts: #' plot_posterior_mortality(post_mortality_summary, type = "age-specific") #' plot_posterior_mortality(post_mortality_summary, type = "aggregated") #'} #' @export #' posterior_mortality <- function(object, y_data){ check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") posterior_draws <- rstan::extract(object) cov_data <- list() cov_data$y_data <- y_data[,-c(1:5)] cov_data$dates <- y_data$Date cov_data$A <- ncol(y_data[,-c(1:5)]) #---- Age-specific: output_age_cols <- c("Date", "Group", "median", "low", "high", "low25", "high75") output_age <- data.frame(matrix(ncol = length(output_age_cols), nrow = 0)) colnames(output_age) <- output_age_cols for (i in 1:cov_data$A){ fit_age <- posterior_draws$E_deathsByAge[,,i] dt_deaths_age_grp <- data.frame(Date = cov_data$dates, Group = rep( colnames(cov_data$y_data)[i], length(cov_data$dates) ) ) dt_deaths_age_grp$median <- apply(fit_age, 2, median) dt_deaths_age_grp$low <- apply(fit_age, 2, quantile, probs = c(0.025)) dt_deaths_age_grp$high <- apply(fit_age, 2, quantile, probs = c(0.975)) dt_deaths_age_grp$low25 <- apply(fit_age, 2, quantile, probs = c(0.25)) dt_deaths_age_grp$high75 <- apply(fit_age, 2, quantile, probs = c(0.75)) output_age <- rbind(output_age, dt_deaths_age_grp) } #---- Aggregated: fit_aggregated <- posterior_draws$E_deaths output_aggregated <- data.frame(Date = cov_data$dates) output_aggregated$median <- apply(fit_aggregated, 2, median) output_aggregated$low <- apply(fit_aggregated, 2, quantile, probs = c(0.025)) output_aggregated$high <- apply(fit_aggregated, 2, quantile, probs = c(0.975)) output_aggregated$low25 <- apply(fit_aggregated, 2, quantile, probs = c(0.25)) output_aggregated$high75 <- apply(fit_aggregated, 2, quantile, probs = c(0.75)) output <- list(Age_specific = output_age, Aggregated = output_aggregated) return(output) } #' Plot the posterior distribution of the mortality counts #' #' @param object #' A dataframe from \code{\link[Bernadette]{posterior_mortality}}. #' #' @param type character; #' Plot the output for the 'age-specific' mortality counts or the 'aggregated' mortality counts. #' #' @param xlab character; #' title of x-axis. #' #' @param ylab character; #' title of y-axis. #' #' @param ... Optional arguments passed to \code{\link[ggplot2]{scale_x_date}}. #' #' @return A \code{ggplot} object which can be further customised using the \pkg{ggplot2} package. #' #' @seealso \code{\link{posterior_mortality}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_mortality_summary <- posterior_mortality(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the mortality counts: #' plot_posterior_mortality(post_mortality_summary, type = "age-specific") #' plot_posterior_mortality(post_mortality_summary, type = "aggregated") #'} #' @export #' plot_posterior_mortality <- function(object, type = c("age-specific", "aggregated"), xlab = NULL, ylab = NULL, ...){ aggr_type <- match.arg(type) if (is.null(xlab)) xlab <- "Epidemiological Date" if (is.null(ylab)) ylab <- "New daily mortality counts" if (aggr_type %nin% type){ stop("Please select a type of aggregation from ('age-specific', 'aggregated').") } else if (aggr_type == "age-specific"){ ret <- ggplot2::ggplot(object$Age_specific) + ggplot2::facet_wrap(. ~ Group, scales = "free_y") + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.5) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low, ymax = high, fill = "95% CrI"), alpha = 0.5) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray70", "95% CrI" = "gray40")) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank()) } else if (aggr_type == "aggregated"){ ret <- ggplot2::ggplot(object$Aggregated) + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.5) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low, ymax = high, fill = "95% CrI"), alpha = 0.5) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray70", "95% CrI" = "gray40")) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank()) } ret }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/posterior_mortality.R
#' Estimate the effective reproduction number with the next generation matrix approach #' #' @param object An object of class \code{stanigbm}. See \code{\link[Bernadette]{stan_igbm}}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @param age_distribution_population data.frame; #' the age distribution of a given population. See \code{aggregate_age_distribution}. #' #' @param infectious_period integer; #' length of infectious period in days. Must be >=1. #' #' @return A data.frame which can be visualised using \code{\link[Bernadette]{plot_posterior_rt}}. #' #' @references #' Diekmann, O., Heesterbeek, J., and Roberts, M. (2010). The construction of next-generation matrices for compartmental epidemic models. \emph{J. R. Soc. Interface}, 7, 873–-885. #' #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_rt_summary <- posterior_rt(object = igbm_fit, #' y_data = age_specific_mortality_counts, #' age_distribution_population = aggr_age, #' infectious_period = 4) #' #' # Visualise the posterior distribution of the effective reproduction number: #' plot_posterior_rt(post_rt_summary) #'} #' @export #' posterior_rt <- function(object, y_data, age_distribution_population, infectious_period){ check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") posterior_draws <- rstan::extract(object) cov_data <- list() cov_data$y_data <- y_data[,-c(1:5)] cov_data$dates <- y_data$Date cov_data$pop_diag <- 1/(age_distribution_population$PopTotal) cov_data$infectious_period <- infectious_period age_grps <- ncol(y_data[,-c(1:5)]) if(ncol(posterior_draws$cm_sample) != age_grps) stop( paste0("The number of rows in the age distribution table must be equal to ", age_grps) ) zero_mat <- matrix(0L, nrow = age_grps, ncol = age_grps) identity_mat <- diag(age_grps) reciprocal_age_distr <- matrix(rep(cov_data$pop_diag, age_grps), ncol = age_grps, nrow = age_grps, byrow = TRUE) age_distr <- matrix(rep(1/cov_data$pop_diag, age_grps), ncol = age_grps, nrow = age_grps, byrow = FALSE) Q_inverse <- cov_data$infectious_period * identity_mat beta_draws <- posterior_draws$beta_trajectory chain_length <- nrow(beta_draws) ts_length <- dim(beta_draws)[2] R_eff_mat <- matrix(0L, nrow = chain_length, ncol = ts_length) for (i in 1:chain_length) { for (j in 1:ts_length){ B_eff_tmp <- matrix( rep(beta_draws[i,,][j,], age_grps), ncol = age_grps, nrow = age_grps, byrow = FALSE) * matrix( posterior_draws$cm_sample[i,,], nrow = age_grps, ncol = age_grps) * matrix(rep( posterior_draws$Susceptibles[i,,][j,], age_grps), ncol = age_grps, nrow = age_grps, byrow = FALSE) * reciprocal_age_distr BQinv_eff_tmp <- B_eff_tmp %*% Q_inverse R_eff_mat[i,j] <- eigen_mat(BQinv_eff_tmp) } } data_eff_repnumber <- data.frame(Date = cov_data$dates) data_eff_repnumber$median <- apply(R_eff_mat, 2, median) data_eff_repnumber$low0025 <- apply(R_eff_mat, 2, quantile, probs = c(0.025)) # c(0.025) data_eff_repnumber$low25 <- apply(R_eff_mat, 2, quantile, probs = c(0.25)) # c(0.025) data_eff_repnumber$high75 <- apply(R_eff_mat, 2, quantile, probs = c(0.75)) # c(0.975) data_eff_repnumber$high975 <- apply(R_eff_mat, 2, quantile, probs = c(0.975)) # c(0.975) return(data_eff_repnumber) } #' Plot the estimated effective reproduction number trajectory #' #' @param object A data frame from \code{\link[Bernadette]{posterior_rt}}. #' #' @param xlab character; #' Title of x-axis. #' #' @param ylab character; #' Title of y-axis. #' #' @param ... Optional arguments passed to \code{\link[ggplot2]{scale_x_date}} and \code{\link[ggplot2]{theme}}. #' #' @return A \code{ggplot} object which can be further customised using the \pkg{ggplot2} package. #' #' @seealso \code{\link{posterior_rt}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_rt_summary <- posterior_rt(object = igbm_fit, #' y_data = age_specific_mortality_counts, #' age_distribution_population = aggr_age, #' infectious_period = 4) #' #' # Visualise the posterior distribution of the effective reproduction number: #' plot_posterior_rt(post_rt_summary) #'} #' @export #' plot_posterior_rt <- function(object, xlab = NULL, ylab = NULL, ... ){ if (is.null(xlab)) xlab <- "Epidemiological Date" if (is.null(ylab)) ylab <- "Effective reproduction number" ret <- ggplot2::ggplot(object) + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.6) + ggplot2::geom_hline(yintercept = 1, color = "black") + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_y_continuous(limits = c(0, max(object$high75)*1.1), breaks = c(seq(0, max(object$high75)*1.1, 0.2)) ) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray40") ) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank(), ...) ret }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/posterior_rt.R
#' Estimate the age-specific transmission rate #' #' @param object #' An object of class \code{stanigbm}. See \code{\link[Bernadette]{stan_igbm}}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @return #' A data.frame which can be visualised using \code{\link[Bernadette]{plot_posterior_transmrate}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_transmrate_summary <- posterior_transmrate(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the age-specific transmission rate: #' plot_posterior_transmrate(post_transmrate_summary) #'} #' @export #' posterior_transmrate <- function(object, y_data){ check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") posterior_draws <- rstan::extract(object) cov_data <- list() cov_data$y_data <- y_data[,-c(1:5)] cov_data$dates <- y_data$Date age_grps <- ncol(cov_data$y_data) if(ncol(posterior_draws$cm_sample) != age_grps) stop( paste0("The number of rows in the age distribution table must be equal to ", age_grps) ) beta_draws <- posterior_draws$beta_trajectory chain_length <- nrow(beta_draws) ts_length <- dim(beta_draws)[2] data_transmission_rate_cols <- c("Date", "Group", "median", "low0025", "low25", "high75", "high975") data_transmission_rate <- data.frame(matrix(ncol = length(data_transmission_rate_cols), nrow = 0)) colnames(data_transmission_rate) <- data_transmission_rate_cols for (k in 1:age_grps){ trans_rate_temp <- matrix(0L, nrow = chain_length, ncol = ts_length) data_trans_rate_age_grp <- data.frame(Date = cov_data$dates, Group = rep( colnames(cov_data$y_data)[k], length(cov_data$dates) )) for (j in 1:ts_length) trans_rate_temp[,j] <- beta_draws[,j,k] * posterior_draws$cm_sample[,k,k] data_trans_rate_age_grp$median <- apply(trans_rate_temp, 2, median) data_trans_rate_age_grp$low0025 <- apply(trans_rate_temp, 2, quantile, probs = c(0.025)) # c(0.025) data_trans_rate_age_grp$low25 <- apply(trans_rate_temp, 2, quantile, probs = c(0.25)) # c(0.025) data_trans_rate_age_grp$high75 <- apply(trans_rate_temp, 2, quantile, probs = c(0.75)) # c(0.975) data_trans_rate_age_grp$high975 <- apply(trans_rate_temp, 2, quantile, probs = c(0.975)) # c(0.975) data_transmission_rate <- rbind(data_transmission_rate, data_trans_rate_age_grp) }# End for return(data_transmission_rate) } #' Plot the estimated age-specific transmission rate #' #' @param object A dataframe from \code{\link[Bernadette]{posterior_transmrate}}. #' #' @param xlab character; Title of x-axis. #' #' @param ylab character; Title of y-axis. #' #' @param ... Optional arguments passed to \code{\link[ggplot2]{scale_x_date}}. #' #' @return A \code{ggplot} object which can be further customised using the \pkg{ggplot2} package. #' #' @seealso \code{\link{posterior_transmrate}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' post_transmrate_summary <- posterior_transmrate(object = igbm_fit, #' y_data = age_specific_mortality_counts) #' #' # Visualise the posterior distribution of the age-specific transmission rate: #' plot_posterior_transmrate(post_transmrate_summary) #'} #' @export #' plot_posterior_transmrate <- function(object, xlab = NULL, ylab = NULL, ... ){ if (is.null(xlab)) xlab <- "Epidemiological Date" if (is.null(ylab)) ylab <- "Transmission rate" ret <- ggplot2::ggplot(object) + ggplot2::facet_wrap(. ~ Group, scales = "free_y") + ggplot2::geom_line(ggplot2::aes(x = Date, y = median, color = "Median"), size = 1.3) + ggplot2::geom_ribbon(ggplot2::aes(x = Date, ymin = low25, ymax = high75, fill = "50% CrI"), alpha = 0.6) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_date(...) + ggplot2::scale_fill_manual(values = c("50% CrI" = "gray40")) + ggplot2::scale_colour_manual(name = '', values = c('Median' = "black")) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "bottom", legend.title = ggplot2::element_blank()) ret }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/posterior_transmrate.R
#' Prior distributions and options #' #' @name priors #' #' @description The functions described on this page are used to specify the #' prior-related arguments of the modeling functions in the #' \pkg{Bernadette} package. #' #' The default priors used in the \pkg{Bernadette} modeling functions #' are intended to be \emph{weakly informative}. For many applications the #' defaults will perform well, but prudent use of more informative priors is #' encouraged. Uniform prior distributions are possible (e.g. by setting #' \code{\link{stan_igbm}}'s \code{prior} argument to \code{NULL}) but, unless #' the data is very strong, they are not recommended and are \emph{not} #' non-informative, giving the same probability mass to implausible values as #' plausible ones. #' #' @param location #' Prior location. In most cases, this is the prior mean, but #' for \code{cauchy} (which is equivalent to \code{student_t} with #' \code{df=1}), the mean does not exist and \code{location} is the prior #' median. The default value is \eqn{0}. #' #' @param scale #' Prior scale. The default depends on the family (see \strong{Details}). #' #' @param df #' Degrees of freedom. The default is \eqn{1} for #' \code{student_t}, in which case it is equivalent to \code{cauchy}. #' #' @param shape #' Prior shape for the Gamma distribution. Defaults to \code{2}. #' #' @param rate #' Prior rate for the Gamma or the Exponential distribution. Defaults to \code{1}. #' #' @details The details depend on the family of the prior being used: #' \subsection{Student t family}{ #' Family members: #' \itemize{ #' \item \code{normal(location, scale)} #' \item \code{student_t(df, location, scale)} #' \item \code{cauchy(location, scale)} #' } #' #' As the degrees of freedom approaches infinity, the Student t distribution #' approaches the normal distribution and if the degrees of freedom are one, #' then the Student t distribution is the Cauchy distribution. #' If \code{scale} is not specified it will default to \eqn{2.5}. #' } #' #' @return A named list to be used internally by the \pkg{Bernadette} model #' fitting functions. #' #' @seealso The vignette for the \pkg{Bernadette} package discusses #' the use of some of the supported prior distributions. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Can assign priors to names: #' N05 <- normal(0, 5) #' Gamma22 <- gamma(2,2) #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' likelihood_variance_type = "quadratic", #' prior_volatility = N05, #' prior_nb_dispersion = Gamma22, #' algorithm_inference = "optimizing") #' } NULL #' @rdname priors #' @export normal <- function(location = 0, scale = NULL) { validate_parameter_value(scale) nlist(dist = "normal", df = NA, location, scale, shape = NA, rate = NA) } #' @rdname priors #' @export student_t <- function(df = 1, location = 0, scale = NULL) { validate_parameter_value(scale) validate_parameter_value(df) nlist(dist = "t", df, location, scale, shape = NA, rate = NA) } #' @rdname priors #' @export cauchy <- function(location = 0, scale = NULL) { student_t(df = 1, location = location, scale = scale) } #' @rdname priors #' @export #' @param shape Prior shape for the gamma distribution. Defaults to \code{2}. #' @param rate Prior rate for the gamma distribution. Defaults to \code{1}. gamma <- function(shape = 2, rate = 1) { stopifnot(length(shape) == 1) stopifnot(length(rate) == 1) validate_parameter_value(shape) validate_parameter_value(rate) nlist(dist = "gamma", df = NA, location = NA, scale = NA, shape, rate) } #' @rdname priors #' @export #' @param rate Prior rate for the exponential distribution. Defaults to #' \code{1}. For the exponential distribution, the rate parameter is the #' \emph{reciprocal} of the mean. #' exponential <- function(rate = 1) { stopifnot(length(rate) == 1) validate_parameter_value(rate) nlist(dist = "exponential", df = NA, location = NA, scale = NA, shape = NA, rate) } # internal ---------------------------------------------------------------- # Check for positive scale or df parameter (NULL OK) # # @param x The value to check. # @return Either an error is thrown or \code{TRUE} is returned invisibly. validate_parameter_value <- function(x) { nm <- deparse(substitute(x)) if (!is.null(x)) { if (!is.numeric(x)) stop(nm, " should be NULL or numeric", call. = FALSE) if (any(x <= 0)) stop(nm, " should be positive", call. = FALSE) } invisible(TRUE) } # Deal with priors # # @param prior A list # @param default_mean Default value to use for the mean if not specified by user. Set to \eqn{0}. # @param default_scale Default value to use to scale if not specified by user. Set to \eqn{2.5}. # @param default_df Default value to use for the degrees of freedom if not specified by user. Set to \eqn{1}. # @param default_shape Default value to use for the shape if not specified by user. Set to \eqn{2}. # @param default_rate Default value to use for the rate if not specified by user. Set to \eqn{1}. # @param ok_dists A list of admissible distributions. handle_prior <- function(prior, default_mean = 0, default_scale = 2.5, default_df = 1, default_shape = 2, default_rate = 1, ok_dists = nlist("normal", student_t = "t", "cauchy", "gamma", "exponential")) { if (!length(prior)) return(list(prior_dist = 1L, prior_mean = default_mean, prior_scale = default_scale, prior_df = default_df, prior_shape = default_shape, prior_rate = default_rate, prior_dist_name = "normal")) if (!is.list(prior)) stop(base::sQuote(deparse(substitute(prior))), " should be a named list") prior_dist_name <- prior$dist if (!prior_dist_name %in% unlist(ok_dists)) { stop("The prior distribution should be one of ", paste(names(ok_dists), collapse = ", ")) } else if (prior_dist_name %in% c("normal", "t", "cauchy", "gamma", "exponential")) { if (prior_dist_name == "normal") { prior_dist <- 1L } else if (prior_dist_name == "cauchy") { prior_dist <- 2L } else if (prior_dist_name == "t") { prior_dist <- 3L } else if (prior_dist_name == "gamma") { prior_dist <- 4L } else if (prior_dist_name == "exponential"){ prior_dist <- 5L } prior_mean <- prior$location prior_scale <- prior$scale prior_df <- prior$df prior_shape <- prior$shape prior_rate <- prior$rate prior_mean[is.na(prior_mean)] <- default_mean prior_scale[is.na(prior_scale)] <- default_scale prior_df[is.na(prior_df)] <- default_df prior_shape[is.na(prior_shape)] <- default_shape prior_rate[is.na(prior_rate)] <- default_rate }# End if nlist(prior_dist, prior_mean, prior_scale, prior_df, prior_shape, prior_rate, prior_dist_name) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/priors.R
#' Bayesian diffusion-driven multi-type epidemic models via Stan #' #' A Bayesian evidence synthesis approach to model the age-specific transmission #' dynamics of COVID-19 based on daily age-stratified mortality counts. The temporal #' evolution of transmission rates in populations containing multiple types of individual #' is reconstructed via independent diffusion processes assigned to the key epidemiological #' parameters. A suitably tailored Susceptible-Exposed-Infected-Removed (SEIR) compartmental #' model is used to capture the latent counts of infections and to account for fluctuations #' in transmission influenced by phenomena like public health interventions and changes in human behaviour. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @param contact_matrix matrix; #' a squared matrix representing the the number of contacts between age groups. #' #' @param age_distribution_population data.frame; #' the age distribution of a given population. See \code{aggregate_age_distribution}. #' #' @param itd_distr vector; #' Infection-to-death distribution. A vector of length \emph{ts_length}. #' #' @param age_specific_ifr data.frame; #' time-varying age-specific infection-fatality ratio. See \code{aggregate_ifr_react}. #' #' @param incubation_period integer; #' length of incubation period in days. Must be >=1. #' #' @param infectious_period integer; #' length of infectious period in days. Must be >=1. #' #' @param likelihood_variance_type integer; #' If \eqn{0}, the variance of the over-dispersed count model is a quadratic function of the mean; #' if \eqn{1}, the variance of the over-dispersed count model is a linear function of the mean. #' #' @param ecr_changes integer; #' between 1 and 7, defaults to 1. Expresses the number of changes of the effective contact rate during the course of 7 days. #' #' @param prior_scale_x0 double; #' scale parameter of a Normal prior distribution assigned to the age-specific log(transmissibility) at time \eqn{t = 0}. #' #' @param prior_scale_x1 double; #' scale parameter of a Normal prior distribution assigned to the age-specific log(transmissibility) at time \eqn{t = 1}. #' #' @param prior_scale_contactmatrix double; #' defaults to 0.05. A positive number that scales the informative Normal prior distribution assigned to the random contact matrix. #' #' @param pi_perc numeric; #' between 0 and 1. It represents the proportion of Exposed individuals in each age group of a given population at time \eqn{t = 0}. #' while the rest \eqn{100*(1-pi_perc)} remain Susceptible. #' #' @param prior_volatility #' Prior distribution for the volatility parameters of the age-specific diffusion processes. #' \code{prior_volatility} can be a call to \code{exponential} to use an exponential distribution, \code{gamma} #' to use a Gamma distribution or one of \code{normal}, \code{student_t} or \code{cauchy} to use a half-normal, #' half-t, or half-Cauchy prior. See \code{priors} for details on these functions. #' #' @param prior_nb_dispersion #' Prior distribution for the dispersion parameter \code{phi} of the over-dispersed count model. #' Same options as for \code{prior_volatility}. #' #' @param algorithm_inference #' One of the sampling algorithms that are implemented in Stan. See \code{\link[rstan]{stan}}. #' #' @param nBurn integer; #' number of burn-in iterations at the beginning of an MCMC run. See \code{\link[rstan]{sampling}}. #' #' @param nPost integer; #' number of MCMC iterations after burn-in. See \code{\link[rstan]{sampling}}. #' #' @param nThin integer; #' a positive integer specifying the period for saving samples. The default is 1, which is usually the recommended value. #' See \code{\link[rstan]{sampling}}. #' #' @param adapt_delta double; #' between 0 and 1, defaults to 0.8. See \code{\link[rstan]{stan}}. #' #' @param max_treedepth integer; #' defaults to 14. See \code{\link[rstan]{stan}}. #' #' @param seed integer; #' seed for the random number generator. See \code{set.seed}. #' #' @param ... Additional arguments, to be passed to lower-level functions. #' #' @details #' The \code{stan_igbm} function performs full Bayesian estimation (if #' \code{algorithm_inference} is \code{"sampling"}) via MCMC. The Bayesian model adds #' priors (i) on the diffusion processes used to express the time-varying transmissibility #' of the virus, the probability that a contact between an infectious person in age #' group alpha and a susceptible person in age group alpha leads to #' transmission at time \eqn{t} and (ii) on a random contact matrix which represents #' the average number of contacts between individuals of age group alpha and #' age group alpha' The \code{stan_igbm} function calls the workhorse #' \code{stan_igbm.fit} function. #' #' @return An object of class \emph{stanigbm} representing the fitted results. Slot mode for this object indicates if the sampling is done or not. #' #' @references #' Bouranis, L., Demiris, N. Kalogeropoulos, K. and Ntzoufras, I. (2022). Bayesian analysis of diffusion-driven multi-type epidemic models with application to COVID-19. arXiv: \url{https://arxiv.org/abs/2211.15229} #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' # print_summary <- summary(object = igbm_fit, y_data = age_specific_mortality_counts)$summary #'} #' @export #' stan_igbm <- function(y_data, contact_matrix, age_distribution_population, age_specific_ifr, itd_distr, incubation_period = 3, infectious_period = 4, likelihood_variance_type = c("quadratic", "linear"), ecr_changes = 1, prior_scale_x0 = 1, prior_scale_x1 = 1, prior_scale_contactmatrix = 0.05, pi_perc = 0.1, # Assume that 10% of each age group are Exposed, rest 90% are Susceptible prior_volatility = normal(location = 0, scale = 2.5), prior_nb_dispersion = gamma(shape = 2, rate = 1), algorithm_inference = c("sampling", "optimizing", "meanfield", "fullrank"), nBurn = 500, nPost = 500, nThin = 1, adapt_delta = 0.8, max_treedepth = 14, seed = 1, ... ) { if( is.null(y_data) | is.null(contact_matrix) | is.null(age_distribution_population) | is.null(age_specific_ifr) ) stop("Please provide the data sources that are missing from ('y_data', 'contact_matrix', 'age_distribution_population', 'age_specific_ifr').") if( nrow(age_distribution_population) != ncol(contact_matrix) ) stop("Incorrect dimensions - The age distribution of the population and the contact matrix must refer to the same number of age groups.") if( nrow(age_specific_ifr) != nrow(y_data) ) stop("The number of rows of 'age_specific_ifr' must be equal to the number of rows of 'y_data'.") if( !identical(colnames(y_data[,-c(1:5)]), age_distribution_population$AgeGrp)) stop("The mortality counts dataset 'y_data' and the age distribution of the population 'age_distribution_population' must refer to the same age group labels.") if( !identical(colnames(y_data[,-c(1:5)]), colnames(age_specific_ifr[,-1]) ) ) stop("The mortality counts dataset 'y_data' and the age-specific IFR dataset 'age_specific_ifr' must refer to the same age group labels.") if( !identical(y_data$Date, age_specific_ifr$Date ) ) stop("The ordering of the dates between the dataset 'y_data' and the dataset 'age_specific_ifr' must be identical.") if( length(itd_distr) != nrow(y_data) ) stop("The length of 'itd_distr' must be equal to the number of rows of 'y_data'.") if( incubation_period == 0) stop("'incubation_period' must be set to a positive integer number.") if( infectious_period == 0) stop("'infectious_period' must be set to a positive integer number.") if( likelihood_variance_type %nin% c("quadratic", "linear") ) stop("'likelihood_variance_type' must be set to one of 'quadratic' or 'linear'.") if( algorithm_inference %nin% c("sampling", "optimizing", "meanfield", "fullrank") ) stop("'algorithm_inference' must be set to one of 'sampling', 'optimizing', 'meanfield', 'fullrank'.") if( pi_perc > 1 ) stop("'pi_perc' must be between 0 and 1.") if( (ecr_changes > 7) | (ecr_changes < 1) ) stop("'ecr_changes' must be between 1 and 7.") pi_prior_params <- lapply(pi_perc, function(x) estBetaParams(x, (0.05*x)) ) algorithm_inference <- match.arg(algorithm_inference) likelihood_variance_type <- match.arg(likelihood_variance_type) if( likelihood_variance_type == "quadratic") l_variance_type <- 0 else if(likelihood_variance_type == "linear") l_variance_type <- 1 standata_preproc <- nlist(Dates = y_data$Date, A = nrow(age_distribution_population), n_obs = nrow(y_data), y_data = y_data[,-c(1:5)], n_pop = sum(age_distribution_population$PopTotal), age_dist = age_distribution_population$PopTotal/sum(age_distribution_population$PopTotal), pop_diag = 1/(age_distribution_population$PopTotal), ecr_changes = ecr_changes, n_difeq = length(c("S", "E", "E", "I", "I", "C")), L_cm = t( base::chol( base::diag(age_distribution_population$PopTotal) %*% as.matrix(contact_matrix) ) ), age_specific_ifr = age_specific_ifr[,-1], # Remove the Date column I_D = itd_distr, t0 = 0, ts = y_data$Index, left_t = y_data$Index, right_t = y_data$Right, E_deathsByAge_day1 = unlist(y_data[1,-c(1:5)]) + 0.001, incubation_period = incubation_period, infectious_period = infectious_period, prior_scale_x0 = prior_scale_x0, prior_scale_x1 = prior_scale_x1, prior_dist_pi = data.frame(do.call(rbind, pi_prior_params)), likelihood_variance_type = l_variance_type, prior_scale_contactmatrix = prior_scale_contactmatrix ) stanfit <- stan_igbm.fit(standata_preprocessed = standata_preproc, prior_volatility = prior_volatility, prior_nb_dispersion = prior_nb_dispersion, algorithm = algorithm_inference, nBurn = nBurn, nPost = nPost, nThin = nThin, adapt_delta = adapt_delta, max_treedepth = max_treedepth, seed = seed, ...) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/stan_igbm.R
# Part of the Bernadette package for estimating model parameters # #' @rdname stan_igbm #' #' @param standata_preprocessed #' A named list providing the data for the model. See \code{\link[rstan]{sampling}}. #' #' @param algorithm #' See \code{algorithm} in \link[Bernadette]{stan_igbm}. #' #' @return An object of S4 class \emph{stanfit} representing the fitted results. Slot mode for this object indicates if the sampling is done or not. #' #' @export #' stan_igbm.fit <- function(standata_preprocessed, prior_volatility, prior_nb_dispersion, algorithm, nBurn, nPost, nThin, adapt_delta = NULL, max_treedepth = NULL, seed, ... ) { nIter <- nBurn + nPost nBurnin <- nBurn # standata_preprocessed <- list() # standata_preprocessed$ecr_changes <- 7 # standata_preprocessed$n_obs <- 210 # standata_preprocessed$A <- 3 n_changes <- ceiling(standata_preprocessed$n_obs / standata_preprocessed$ecr_changes) n_remainder <- (standata_preprocessed$n_obs - (n_changes-1)*standata_preprocessed$ecr_changes) L_raw_length <- (standata_preprocessed$A * (standata_preprocessed$A + 1)) / 2 #(n_changes-1)*standata_preprocessed$ecr_changes + n_remainder #---- Useless assignments to pass R CMD check prior_dist_volatility <- prior_dist_nb_dispersion <- prior_mean_volatility <- prior_scale_volatility <- prior_df_volatility <- prior_shape_volatility <- prior_rate_volatility <- prior_mean_nb_dispersion <- prior_scale_nb_dispersion <- prior_df_nb_dispersion <- prior_shape_nb_dispersion <- prior_rate_nb_dispersion <- NULL ok_dists <- nlist("normal", student_t = "t", "cauchy", "gamma", "exponential") #---- Prior distribution for the volatilities (handle_prior() from priors.R): prior_params_volatilities <- handle_prior(prior_volatility, ok_dists = ok_dists) names(prior_params_volatilities) <- paste0(names(prior_params_volatilities), "_volatility") for (i in names(prior_params_volatilities)) assign(i, prior_params_volatilities[[i]]) #---- Prior distribution for the dispersion (handle_prior() from priors.R): prior_params_dispersion <- handle_prior(prior_nb_dispersion, ok_dists = ok_dists) names(prior_params_dispersion) <- paste0(names(prior_params_dispersion), "_nb_dispersion") for (i in names(prior_params_dispersion)) assign(i, prior_params_dispersion[[i]]) #---- Create entries in the data block of the .stan file: standata <- c(standata_preprocessed, nlist( prior_dist_volatility = prior_dist_volatility, prior_mean_volatility = prior_mean_volatility, prior_scale_volatility = prior_scale_volatility, prior_df_volatility = prior_df_volatility, prior_shape_volatility = prior_shape_volatility, prior_rate_volatility = prior_rate_volatility, prior_dist_nb_dispersion = prior_dist_nb_dispersion, prior_mean_nb_dispersion = prior_mean_nb_dispersion, prior_scale_nb_dispersion = prior_scale_nb_dispersion, prior_df_nb_dispersion = prior_df_nb_dispersion, prior_shape_nb_dispersion = prior_shape_nb_dispersion, prior_rate_nb_dispersion = prior_rate_nb_dispersion, n_changes = n_changes, n_remainder = n_remainder, L_raw_length = L_raw_length )) #---- List of parameters that will be monitored: parameters <- c("pi", "phiD", "volatilities", "cm_sample", "beta0", "beta_trajectory", "E_casesByAge", "E_deathsByAge", "E_cases", "E_deaths", "Susceptibles", "log_lik", "deviance") #---- Call Stan to draw from posterior distribution: stanfit <- stanmodels$igbm #---- Optimizing: if (algorithm == "optimizing") { optimizing_args <- list(...) if (is.null(optimizing_args$draws)) optimizing_args$draws <- 1000L optimizing_args$object <- stanfit optimizing_args$data <- standata optimizing_args$seed <- seed out <- do.call(rstan::optimizing, args = optimizing_args) check_stanfit(out) return(out) #---- Sampling: } else { if (algorithm == "sampling") { sampling_args <- set_sampling_args(object = stanfit, user_dots = list(...), user_adapt_delta = adapt_delta, user_max_treedepth = max_treedepth, data = standata, pars = parameters, warmup = nBurnin, iter = nIter, thin = nThin, seed = seed, show_messages = FALSE ) stanfit <- do.call(rstan::sampling, sampling_args) #---- Variational Bayes: } else { # Algorithm either "meanfield" or "fullrank": stanfit <- rstan::vb(stanfit, data = standata, pars = parameters, seed = seed, algorithm = algorithm, ...) } #---- Production of output: check <- check_stanfit(stanfit) if (!isTRUE(check)) return(standata) return(stanfit) } }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/stan_igbm.fit.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("igbm") # load each stan module Rcpp::loadModule("stan_fit4igbm_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/stanmodels.R
#' Summary of stanigbm posterior output #' #' This function summarizes the MCMC output for \code{stanigbm} objects. #' #' @param object #' An \code{R} object of class \code{stanigbm}. #' #' @param y_data data.frame; #' age-specific mortality counts in time. See \code{data(age_specific_mortality_counts)}. #' #' @param ... #' Additional arguments, to be passed to lower-level functions. #' #' @return A named list with elements \code{summary} and \code{c_summary}, #' which contain summaries for for all Markov chains merged and individual chains, #' respectively. See \code{\link[rstan]{stanfit-method-summary}}. #' #' @examples #' \donttest{ #' # Age-specific mortality/incidence count time series: #' data(age_specific_mortality_counts) #' data(age_specific_cusum_infection_counts) #' #' # Import the age distribution for Greece in 2020: #' age_distr <- age_distribution(country = "Greece", year = 2020) #' #' # Lookup table: #' lookup_table <- data.frame(Initial = age_distr$AgeGrp, #' Mapping = c(rep("0-39", 8), #' rep("40-64", 5), #' rep("65+" , 3))) #' #' # Aggregate the age distribution table: #' aggr_age <- aggregate_age_distribution(age_distr, lookup_table) #' #' # Import the projected contact matrix for Greece: #' conmat <- contact_matrix(country = "GRC") #' #' # Aggregate the contact matrix: #' aggr_cm <- aggregate_contact_matrix(conmat, lookup_table, aggr_age) #' #' # Aggregate the IFR: #' ifr_mapping <- c(rep("0-39", 8), rep("40-64", 5), rep("65+", 3)) #' #' aggr_age_ifr <- aggregate_ifr_react(age_distr, ifr_mapping, age_specific_cusum_infection_counts) #' #' # Infection-to-death distribution: #' ditd <- itd_distribution(ts_length = nrow(age_specific_mortality_counts), #' gamma_mean = 24.19231, #' gamma_cv = 0.3987261) #' #' # Posterior sampling: #' #' rstan::rstan_options(auto_write = TRUE) #' chains <- 1 #' options(mc.cores = chains) #' #' igbm_fit <- stan_igbm(y_data = age_specific_mortality_counts, #' contact_matrix = aggr_cm, #' age_distribution_population = aggr_age, #' age_specific_ifr = aggr_age_ifr[[3]], #' itd_distr = ditd, #' incubation_period = 3, #' infectious_period = 4, #' likelihood_variance_type = "linear", #' ecr_changes = 7, #' prior_scale_x0 = 1, #' prior_scale_x1 = 1, #' prior_scale_contactmatrix = 0.05, #' pi_perc = 0.1, #' prior_volatility = normal(location = 0, scale = 1), #' prior_nb_dispersion = exponential(rate = 1/5), #' algorithm_inference = "sampling", #' nBurn = 10, #' nPost = 30, #' nThin = 1, #' chains = chains, #' adapt_delta = 0.6, #' max_treedepth = 14, #' seed = 1) #' #' # print_summary <- summary(object = igbm_fit, y_data = age_specific_mortality_counts)$summary #'} #' #' @export #' #' @method summary stanigbm #' summary.stanigbm <- function(object, y_data, ...) { check <- check_stanfit(object) if (!isTRUE(check)) stop("Provide an object of class 'stanfit' using rstan::sampling() or rstan::vb()") if("theta_tilde" %in% names(object) ) stop("Perform MCMC sampling using rstan::sampling() or rstan::vb()") parameters <- c("pi", "phiD", "volatilities", "cm_sample", "beta0", "beta_trajectory", "E_casesByAge", "E_deathsByAge", "E_cases", "E_deaths", "Susceptibles") cov_data <- list() cov_data$A <- ncol(y_data[,-c(1:5)]) cov_data$n_obs <- nrow(y_data) rest_params <- c(parameters[1], parameters[2], paste0(parameters[3],"[", 1:cov_data$A,"]"), parameters[5] ) cm_params <- paste0(parameters[4], "[", apply(expand.grid(1:cov_data$A, 1:cov_data$A), 1, paste, collapse = ","), "]") beta_params <- paste0(parameters[6], "[", apply(expand.grid(1:cov_data$n_obs, 1:cov_data$A), 1, paste, collapse = ","), "]") E_casesAge_params <- paste0(parameters[7], "[", apply(expand.grid(1:cov_data$n_obs, 1:cov_data$A), 1, paste, collapse = ","), "]") E_deathsAge_params <- paste0(parameters[8], "[", apply(expand.grid(1:cov_data$n_obs, 1:cov_data$A), 1, paste, collapse = ","), "]") E_cases_params <- paste0(parameters[9], "[", 1:cov_data$n_obs, "]") E_deaths_params <- paste0(parameters[10], "[", 1:cov_data$n_obs, "]") out <- summary(object, pars = c("lp__", rest_params, cm_params, beta_params, E_casesAge_params, E_deathsAge_params, E_cases_params, E_deaths_params ), ...) return(out) }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/summary.stangbm.R
# Part of the Bernadette package for estimating model parameters # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. .onAttach <- function(...) { ver <- utils::packageVersion("Bernadette") packageStartupMessage("This is Bernadette version ", ver) packageStartupMessage("- See https://github.com/bernadette-eu/Bernadette for changes to default priors.") packageStartupMessage("- Default priors may change, so it's safest to specify priors, even if equivalent to the defaults.") packageStartupMessage("- For execution on a local, multicore CPU with excess RAM we recommend calling") packageStartupMessage(" options(mc.cores = parallel::detectCores()).") }
/scratch/gouwar.j/cran-all/cranData/Bernadette/R/zzz.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(Bernadette)
/scratch/gouwar.j/cran-all/cranData/Bernadette/inst/doc/Bernadette.R
--- title: "Bernadette" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Bernadette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(Bernadette) ```
/scratch/gouwar.j/cran-all/cranData/Bernadette/inst/doc/Bernadette.Rmd
--- title: "Bernadette" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Bernadette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(Bernadette) ```
/scratch/gouwar.j/cran-all/cranData/Bernadette/vignettes/Bernadette.Rmd
###--- This used to be part /u/maechler/R/MM/NUMERICS/bessel-fn.R #### "Modified" Bessel Function I_nu(x) #### ---------------------------------------------- ## besselI() - Definition as infinite sum -- working for "mpfr" numbers, too! ## --------- besselIs <- function(x, nu, nterm = 800, expon.scaled = FALSE, log = FALSE, Ceps = if(isNum) 8e-16 else 2^(- [email protected][[1]]@prec)) { ## Purpose: besselI() primitively ## ---------------------------------------------------------------------- ## Arguments: (x,nu) as besselI; nterm: number of terms for "infinite" sum ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 21 Oct 2002, 21:59 if(length(nu) > 1) stop(" 'nu' must be scalar (length 1)!") n <- length(x) if(n == 0) return(x) j <- (nterm-1):0 # sum smallest first! if(is.numeric(x) && is(nu, "mpfr")) { x <- mpfr(x, precBits = max(64, .getPrec(nu))) isNum <- FALSE } else isNum <- is.numeric(x) || is.complex(x) l.s.j <- outer(j, (x/2), function(X,Y) X*2*log(Y))##-> {nterm x n} matrix ## ## improve accuracy for lgamma(j+1) for "mpfr" numbers ## -- this is very important [evidence: e.g. bI(10000, 1)] if(is(l.s.j, "mpfr")) j <- mpfr(j, precBits = max(sapply([email protected], slot, "prec"))) else if(!isNum) j <- as(j, class(x)) ## underflow (64-bit AMD) for x > 745.1332 ## for large x, this already overflows to Inf : ## s.j <- outer(j, (x^2/4), function(X,Y) Y^X) ##-> {nterm x n} matrix ## s.j <- s.j / (gamma(j+1) * gamma(nu+1 + j)) but without overflow : log.s.j <- if(expon.scaled) l.s.j - rep(abs(Re(x)), each=nterm) - lgamma(j+1) - lgamma(nu+1 + j) else l.s.j - lgamma(j+1) - lgamma(nu+1 + j) if(log) { s.j <- lsum(log.s.j) # == log(sum_{j} exp(log.s.j) ); NB: lsum() works on whole matrix if(any(i0 <- x == 0)) s.j[i0] <- 0 if(any(lrgS <- Re(log.s.j[1,]) > Re(log(Ceps) + s.j))) lapply(x[lrgS], function(x) warning(gettextf(" 'nterm=%d' may be too small for %s", nterm, paste(format(x), collapse=", ")), domain=NA)) nu*log(x/2) + s.j } else { ## !log s.j <- ## log I_nu(x) -- trying to avoid overflow/underflow for large x OR large nu ## log(s.j) ; e..x <- exp(-x) # subnormal for x > 1024*log(2) ~ 710; exp(log.s.j) s <- colSums(s.j) if(any(i0 <- x == 0)) s[i0] <- 0 if(!all(iFin <- is.finite(s))) stop(sprintf("infinite s for x=%g", x[!iFin][1])) if(any(lrgS <- Re(s.j[1,]) > Re(Ceps * s))) lapply(x[lrgS], function(x) warning(gettextf(" 'nterm=%d' may be too small for %s", nterm, paste(format(x), collapse=", ")), domain=NA)) (x/2)^nu * s } } ## old name [back compatibility]: bI <- function(x, nu, nterm = 800, expon.scaled = FALSE, log = FALSE, Ceps = if(isNum) 8e-16 else 2^(- [email protected][[1]]@prec)) { .Deprecated("besselIs") isNum <- is.numeric(x) || is.complex(x) besselIs(x, nu, nterm=nterm, expon.scaled=expon.scaled, log=log, Ceps=Ceps) } ###--------------- besselI() for large x or also large nu --- ### ### see --> ~/R/MM/NUMERICS/bessel-large-x.R for code usage ### ================================ besselIasym <- function(x, nu, k.max = 10, expon.scaled=FALSE, log=FALSE) { ## Purpose: Asymptotic expansion of Bessel I_nu(x) function x -> oo ## by Abramowitz & Stegun (9.7.1), p.377 : ## ## I_a(z) = exp(z) / sqrt(2*pi*z) * f(z,..) where ## f(z,..) = 1 - (mu-1)/ (8*z) + (mu-1)(mu-9)/(2! (8z)^2) - ... ## = 1- (mu-1)/(8z)*(1- (mu-9)/(2(8z))*(1-(mu-25)/(3(8z))*..)) ## where mu = 4*a^2 *and* |arg(z)| < pi/2 ## ---------------------------------------------------------------------- ## Arguments: x, nu, expon.scaled: as besselI() ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 11 Apr, 21 Nov 2008 ## Note that for each x the series eventually *DIVERGES* ## it should be "stopped" as soon as it *converged* (:-) stopifnot(k.max == round(k.max)) ## I_\nu(z) = exp(z) / sqrt(2*pi*z) * f(z, nu) ## First compute f(x, nu) to order k.max ## f <- 1 d <- 0 ## d = 1 - f <==> f = 1 - d if(k.max >= 1) { ## m. <- 4*nu^2 x8 <- 8*x for(k in k.max:1) { ## mu <- 4*nu^2; d <- (1 - d)*(mu - (2*k-1)^2)/(k*x8) ## mu - (2k-1)^2 = (2nu - (2k-1)) (2nu + (2k-1)) = ## = (2(nu-k)+1)(2(nu+k)-1) d <- (1 - d)*((2*(nu-k)+1)*(2*(nu+k)-1))/(k*x8) } } if(expon.scaled) sx <- x - abs(if(is.complex(x)) Re(x) else x) pi2 <- 2* (if(inherits(x, "mpfr")) Rmpfr::Const("pi", max(.getPrec(x))) else pi) if(log) { ## f = 1 - d ==> log(f) = log1p(-d) : (if(expon.scaled) sx else x) + log1p(-d) - log(pi2*x) / 2 } else { exp(if(expon.scaled) sx else x) * (1-d) / sqrt(pi2*x) } } besselKasym <- function(x, nu, k.max = 10, expon.scaled=FALSE, log=FALSE) { ## Purpose: Asymptotic expansion of Bessel K_nu(x) function x -> oo ## by Abramowitz & Stegun (9.7.2), p.378 : ## ## K_nu(z) = exp(-z) * sqrt(pi/(2*z)) * f(z,..) where ## f(z,..) = 1 + (mu-1)/ (8*z) + (mu-1)(mu-9)/(2! (8z)^2) + ... ## = 1 + (mu-1)/(8z)*(1 + (mu-9)/(2(8z))*(1 + (mu-25)/(3(8z))*..)) ## where mu = 4*nu^2 *and* |arg(z)| < pi/2 ## ---------------------------------------------------------------------- ## Arguments: x, nu, expon.scaled: as besselK() ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 15. Dec 2018 ##__NB: This *very* similar to besselIasym(): 1) different initial factor; 2) *not* alternating ## Note that for each x the series eventually *DIVERGES* ## it should be "stopped" as soon as it *converged* (:-) stopifnot(k.max == round(k.max)) ## K_\nu(z) = exp(-z) * sqrt(pi/(2*z)) * f(z, nu) ## First compute f(x, nu) to order k.max ## f <- 1 -- via d := f - 1 <==> f = 1 + d d <- 0 if(k.max >= 1) { x8 <- 8*x for(k in k.max:1) { ## mu <- 4*nu^2; d <- (1 + d)*(mu - (2*k-1)^2)/(k*x8) ## mu - (2k-1)^2 = (2nu - (2k-1)) (2nu + (2k-1)) = ## = (2(nu-k)+1)(2(nu+k)-1) d <- (1 + d)*((2*(nu-k)+1)*(2*(nu+k)-1))/(k*x8) } } pi.2 <- (if(inherits(x, "mpfr")) Rmpfr::Const("pi", max(.getPrec(x))) else pi)/2 if(log) { ## f = 1 + d ==> log(f) = log1p(d) : (if(expon.scaled) 0 else -x ) + log1p(d) + (log(pi.2) - log(x)) / 2 } else { (if(expon.scaled) 1 else exp(-x)) * (1+d) * sqrt(pi.2/x) } } besselI.ftrms <- function(x, nu, K = 20) { ## Purpose: all *Terms* in besselIasym() ## by Abramowitz & Stegun (9.7.1), p.377 : ## f(x,..) = 1 - (mu-1)/ (8*x) + (mu-1)(mu-9)/(2! (8x)^2) - ... ## = 1- (mu-1)/(8x)*(1- (mu-9)/(2(8x))*(1-(mu-25)/(3(8x))*..)) ## where mu = 4*nu^2 ## ---------------------------------------------------------------------- ## Arguments: x, nu: as besselI() ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 11 Apr, 21 Nov 2008 stopifnot(length(x) == 1, x > 0, length(nu) == 1, length(K) == 1, K == round(K), K >= 0) ## I_\nu(x) = exp(x) / sqrt(2*pi*x) * f(x, nu) kk <- seq_len(K) mu <- 4*nu^2 x8 <- 8*x multf <- - (mu - (2*kk-1)^2)/(kk*x8) ## ser <- cumprod(multf) ## now, for k-term approximation f_k of f(x,nu) we have ## f_k = 1 + sum(ser[1:k]), i.e. ## (f_k)_k = 1 + cumsum(c(0, ser))[k+1] for k= 0,1,...,K ## ser := cumprod(multf) } ###---------------------------------------------------------------------------- ### When BOTH nu and x are large : ## ^^^^ (in practice, works fine already in some cases of |x| large) besselI.nuAsym <- function(x, nu, k.max, expon.scaled=FALSE, log=FALSE) { ## Purpose: Asymptotic expansion of Bessel I_nu(x) function ## when BOTH nu and x are large ## Abramowitz & Stegun , p.378, __ 9.7.7. __ ## ## I_nu(nu * z) ~ 1/sqrt(2*pi*nu) * exp(nu*eta)/(1+z^2)^(1/4) * ## * {1 + u_1(t)/nu + u_2(t)/nu^2 + ... } ## where ## __ 9.7.11 __ ## t := 1 / sqrt(1 + z^2) = 1/sz ## eta := sqrt(1 + z^2) + log(z / (1 + sqrt(1+z^2))) = sz + log(z / (1 + sz)) ## with sz := sqrt(1 + z^2) ## ## and u_k(t) from p.366 __ 9.3.9 __ ## ## u0(t) = 1 ## u1(t) = (3*t - 5*t^3)/24 ## u2(t) = (81*t^2 - 462*t^4 + 385*t^6)/1152 ## ... up to u4(t) ## with recursion 9.3.10 for k = 0, 1, .... : ## ## u_{k+1}(t) = t^2/2 * (1 - t^2) * u'_k(t) + ## 1/8 \int_0^t (1 - 5*s^2)* u_k(s) ds ## ---------------------------------------------------------------------- ## Arguments: x, nu, expon.scaled: as besselI() ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 22 Nov 2008, 15:22 stopifnot(k.max == round(k.max), 0 <= k.max, k.max <= 5) z <- x/nu # -> computing I_nu(z * nu) sz <- sqrt(1 + z^2) ## << "FIXME": use hypot(.) t <- 1/sz ## if(expon.scaled) ## scale by * exp(-abs(Re(x))): ## sx <- x - abs(if(is.complex(x)) Re(x) else x) eta <- (if(expon.scaled) { ## <==> * exp(-|Re(x)|) == exp(- |Re(z) * nu|) <==> "eta - |Re(z)|" ## For real z, have ## sz - |z| = sqrt(1 + z^2) - |z| =!= 1/(sqrt(..) + |z|) = 1/(sz + |z|); ## in complex case sz - |x| = w/(sz + |x|) with w := 1-y^2 + 2xy i; z = x + iy w <- if(is.complex(z)) { x. <- Re(z); y <- Im(z) 1-y^2 + 2i*x.*y # complex(real = 1-y^2, imaginary = 2*x.*y) } else 1 w/(sz + abs(if(is.complex(z)) x. else z)) } else sz) + log(z / (1 + sz)) ## I_nu(nu * z) ~ 1/sqrt(2*pi*nu) * exp(nu*eta)/(1+z^2)^(1/4) * ## * {1 + u_1(t)/nu + u_2(t)/nu^2 + ... } if(k.max == 0) d <- 0 else { ## k.max >= 1 --- Find the Debye polynomials u_j(t) in ../misc/QRMlib_src_bessel.c ## or in a newer GSL, but much more hidden ..../gsl-2.5/specfunc/debye.c t2 <- t^2 u1.t <- (t*(3 - 5*t2))/24 d <- if(k.max == 1) { u1.t/nu } else { ## k.max >= 2 u2.t <- ##(81*t^2 - 462*t^4 + 385*t^6)/1152 t2*(81 + t2*(-462 + t2*385)) / 1152 if(k.max == 2) (u1.t + u2.t/nu)/nu else { ## k.max >= 3 u3.t <- t*t2*(30375 + t2*(-369603 + t2*(765765 - t2*425425)))/ 414720 if(k.max == 3) (u1.t + (u2.t + u3.t/nu)/nu)/nu else { ## k.max >= 4 t4 <- t2*t2 u4.t <- t4*(4465125 + t2*(-94121676 + t2*(349922430 + t2*(-446185740 + t2*185910725))))/39813120 if(k.max == 4) (u1.t + (u2.t + (u3.t + u4.t/nu)/nu)/nu)/nu else { ## k.max >= 5 u5.t <- t*t4*(1519035525 + t2*(-49286948607 + t2*(284499769554 + t2*(-614135872350 + t2*(566098157625 - t2*188699385875)) )))/6688604160 if(k.max == 5) (u1.t + (u2.t + (u3.t + (u4.t + u5.t/nu)/nu)/nu)/nu)/nu else stop("k.max > 5: not yet implemented (but should NOT happen)") } } } } } pi2 <- 2* (if(inherits(x, "mpfr")) Rmpfr::Const("pi", max(.getPrec(x))) else pi) if(log) { log1p(d) + nu*eta - (log(sz) + log(pi2*nu))/2 } else { (1+d) * exp(nu*eta) / sqrt(pi2*nu*sz) } } ## {besselI.nuAsym} besselK.nuAsym <- function(x, nu, k.max, expon.scaled=FALSE, log=FALSE) { ## Purpose: Asymptotic expansion of Bessel K_nu(x) function ## when BOTH nu and x are large ## Abramowitz & Stegun , p.378, __ 9.7.8. __ ## ## K_nu(nu * z) ~ sqrt(pi/(2*nu)) * exp(-nu*eta)/(1+z^2)^(1/4) * ## * {1 - u_1(t)/nu + u_2(t)/nu^2 - ... } ## { see besselI.nuAsym() above, for t, eta, u_k ...} ## ---------------------------------------------------------------------- ## Arguments: x, nu, expon.scaled: as besselK() ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 23 Nov 2009 stopifnot(k.max == round(k.max), 0 <= k.max, k.max <= 5) z <- x/nu # -> computing K_nu(z * nu) sz <- sqrt(1 + z^2) ## << "FIXME": use hypot(.) t <- 1/sz eta <- (if(expon.scaled) ## <==> * exp(-x) == exp(- z * nu) <==> "eta - z" ## sz - z = sqrt(1 + z^2) - z = 1/(sqrt(..) + z) 1/(z + sz) else sz) + log(z / (1 + sz)) ## K_nu(nu * z) ~ [see above] if(k.max == 0) d <- 0 else { ## k.max >= 1 --- Find the Debye polynomials u_j(t) ..... [see above!] t2 <- t^2 u1.t <- (t*(3 - 5*t2))/24 ## NB: Difference here for K() to I() above is *alternating signs*: "-" for odd uj() d <- if(k.max == 1) { - u1.t/nu } else { ## k.max >= 2 u2.t <- ##(81*t^2 - 462*t^4 + 385*t^6)/1152 t2*(81 + t2*(-462 + t2*385)) / 1152 if(k.max == 2) (- u1.t + u2.t/nu)/nu else { ## k.max >= 3 u3.t <- t*t2*(30375 + t2*(-369603 + t2*(765765 - t2*425425)))/ 414720 if(k.max == 3) (- u1.t + (u2.t - u3.t/nu)/nu)/nu else { ## k.max >= 4 t4 <- t2*t2 u4.t <- t4*(4465125 + t2*(-94121676 + t2*(349922430 + t2*(-446185740 + t2*185910725))))/39813120 if(k.max == 4) (- u1.t + (u2.t + (-u3.t + u4.t/nu)/nu)/nu)/nu else { ## k.max >= 5 u5.t <- t*t4*(1519035525 + t2*(-49286948607 + t2*(284499769554 + t2*(-614135872350 + t2*(566098157625 - t2*188699385875)) )))/6688604160 if(k.max == 5) (- u1.t + (u2.t + (-u3.t + (u4.t - u5.t/nu)/nu)/nu)/nu)/nu else stop("k.max > 5: not yet implemented (but should NOT happen)") } } } } } if(log) { log1p(d) - nu*eta - (log(sz) - log(pi/(2*nu)))/2 } else { (1+d) * exp(-nu*eta)*sqrt(pi/(2*nu * sz)) } } ## {besselK.nuAsym}
/scratch/gouwar.j/cran-all/cranData/Bessel/R/I-fn.R
#### Bessel Function J_nu(x) #### ---------------------------------------------- ## It is possible to define the function by its Taylor series expansion around x = 0: ## J_\alpha(x) = \sum_{m=0}^\infty \frac{(-1)^m}{m! \, \Gamma(m+\alpha+1)} {\left(\frac{x}{2}\right)}^{2m+\alpha} ## besselJ() - Definition as infinite sum -- working for "mpfr" numbers, too! ## --------- besselJs <- function(x, nu, nterm = 800, log = FALSE, Ceps = if(isNum) 8e-16 else 2^(- [email protected][[1]]@prec)) { ## Purpose: besselJ() primitively ## ---------------------------------------------------------------------- ## Arguments: (x,nu) as besselJ; nterm: number of terms for "infinite" sum ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: Dec 2014 if(length(nu) > 1) stop(" 'nu' must be scalar (length 1)!") n <- length(x) if(n == 0) return(x) isNum <- is.numeric(x) || is.complex(x) if (nu < 0) { ## Using Abramowitz & Stegun 9.1.2 ## this may not be quite optimal (CPU and accuracy wise) na <- floor(nu) return(if(!log) (if(nu - na == 0.5) 0 else besselJs(x, -nu, nterm=nterm, Ceps=Ceps) * cospi(nu)) + (if(nu == na ) 0 else besselY (x, -nu ) * sinpi(nu)) ## TODO: besselYs() series ## (if(nu == na ) 0 else besselYs(x, -nu, nterm=nterm, Ceps=Ceps) * sinpi(nu)) else ## same on log scale ==> need lsum() ? stop("besselJs(*, nu < 0, log = TRUE) not yet implemented") ) } j <- (nterm-1):0 # sum smallest first! sgns <- rep_len(if(nterm %% 2 == 0) c(-1,1) else c(1,-1), nterm) has0 <- any(i0 <- x == 0) x. <- if(has0) x[!i0] else x if(is.numeric(x) && is(nu, "mpfr")) { x. <- mpfr(x., precBits = max(64, .getPrec(nu))) isNum <- FALSE } l.s.j <- outer(j, x./2, function(X,Y) X*2*log(Y))##-> {nterm x n} matrix ## ## improve accuracy for lgamma(j+1) for "mpfr" numbers ## -- this is very important [evidence: e.g. besselJs(10000, 1)] if(is(l.s.j, "mpfr")) j <- mpfr(j, precBits = max(sapply([email protected], slot, "prec"))) else if(!isNum) j <- as(j, class(x)) ## underflow (64-bit AMD) for x > 745.1332 ## for large x, this already overflows to Inf : ## s.j <- outer(j, (x^2/4), function(X,Y) Y^X) ##-> {nterm x n} matrix ## s.j <- s.j / (gamma(j+1) * gamma(nu+1 + j)) but without overflow : log.s.j <- l.s.j - lgamma(j+1) - lgamma(nu+1 + j) s.j <- if(log) # NB: lsum() works on whole matrix lssum(log.s.j, signs=sgns) # == log(sum_{j} exp(log.s.j) ) else ## log J_nu(x) -- trying to avoid overflow/underflow for large x OR large nu ## log(s.j) ; e..x <- exp(-x) # subnormal for x > 1024*log(2) ~ 710; exp(log.s.j) if(log) { if(any(lrgS <- log.s.j[1,] > log(Ceps) + s.j)) lapply(x.[lrgS], function(x) warning(gettextf(" 'nterm=%d' may be too small for x=%g", nterm, x), domain=NA)) if(has0) { sj <- x sj[!i0] <- s.j s.j <- sj } nu*log(x/2) + s.j } else { ## !log s <- colSums(sgns * s.j) if(!all(iFin <- is.finite(s))) stop(sprintf("infinite s for x=%g", x.[!iFin][1])) if(any(lrgS <- s.j[1,] > Ceps * s)) lapply(x.[lrgS], function(x) warning(gettextf(" 'nterm=%d' may be too small for x=%g", nterm, x), domain=NA)) if(has0) { sj <- x sj[!i0] <- s s <- sj } (x/2)^nu * s } }
/scratch/gouwar.j/cran-all/cranData/Bessel/R/J-fn.R
### From copula package [not yet exported there]. as of Dec.2014: Re_ <- function(z) if(is.complex(z)) Re(z) else z ##' Properly compute log(x_1 + .. + x_n) for a given (n x d)-matrix of n row ##' vectors log(x_1),..,log(x_n) (each of dimension d) ##' Here, x_i > 0 for all i ##' @title Properly compute the logarithm of a sum ##' @param lx (n,d)-matrix containing the row vectors log(x_1),..,log(x_n) ##' each of dimension d --- can be "mpfrArray" !!!! ##' @param l.off the offset to substract and re-add; ideally in the order of ##' the maximum of each column ##' @return log(x_1 + .. + x_n) [i.e., OF DIMENSION d!!!] computed via ##' log(sum(x)) = log(sum(exp(log(x)))) ##' = log(exp(log(x_max))*sum(exp(log(x)-log(x_max)))) ##' = log(x_max) + log(sum(exp(log(x)-log(x_max))))) ##' = lx.max + log(sum(exp(lx-lx.max))) ##' => VECTOR OF DIMENSION d ##' @author Marius Hofert, Martin Maechler lsum <- function(lx, l.off = apply(Re_(lx), 2, max)) { ## do not use cbind or rbind here, since it is not clear if the user specified ## only one vector log(x) or several vectors of dimension 1 !!! stopifnot(length(dim(lx)) == 2L) # is.matrix(.) generalized l.off + log(colSums(exp(lx - rep(l.off, each=nrow(lx))))) } ##' Properly compute log(x_1 + .. + x_n) for a given matrix of column vectors ##' log(|x_1|),.., log(|x_n|) and corresponding signs sign(x_1),.., sign(x_n) ##' Here, x_i is of arbitrary sign ##' @title compute logarithm of a sum with signed large coefficients ##' @param lxabs (d,n)-matrix containing the column vectors log(|x_1|),..,log(|x_n|) ##' each of dimension d ##' @param signs corresponding matrix of signs sign(x_1), .., sign(x_n) ##' @param l.off the offset to substract and re-add; ideally in the order of max(.) ##' @param strict logical indicating if it should stop on some negative sums ##' @return log(x_1 + .. + x_n) [i.e., of dimension d] computed via ##' log(sum(x)) = log(sum(sign(x)*|x|)) = log(sum(sign(x)*exp(log(|x|)))) ##' = log(exp(log(x0))*sum(signs*exp(log(|x|)-log(x0)))) ##' = log(x0) + log(sum(signs* exp(log(|x|)-log(x0)))) ##' = l.off + log(sum(signs* exp(lxabs - l.off ))) ##' @author Marius Hofert and Martin Maechler lssum <- function (lxabs, signs, l.off = apply(Re_(lxabs), 2, max), strict = TRUE) { stopifnot(length(dim(lxabs)) == 2L) # is.matrix(.) generalized sum. <- colSums(signs * exp(lxabs - rep(l.off, each=nrow(lxabs)))) if(anyNA(sum.) || any(sum. <= 0)) (if(strict) stop else warning)("lssum found non-positive sums") l.off + log(sum.) } ##' Compute sqrt(1 + z^2) in a numerical stable way, ##' notably for |z| << 1 (|z| := Arg(z) = ph z for complex z) ##' ##' This is a special case of the more general `hypot(u,v) := sqrt(u^2 + v^2)` ##' (more details) ##' @title Compute sqrt(1 + z^2) in a numerical stable way, ##' @param z numeric or complex (or "mpfr" ..) vector (or matrix, array ..) ##' @return Numeric/complex/... vector (or array ..) with the same attributes as \code{z} ##' @author Martin Maechler sqrt1pSqr <- function(z) { if(!length(z)) return(z) z2 <- z^2 u <- 1+z2 r <- sqrt(u) # direct form for normal case if(length(sml <- which(1 - z2^2/8 == 1))) { # also "works" for mpfr z22 <- z2[sml]/2 r[sml] <- 1 + z22*(1 - z22/2) # 2-term approx 1 + z^2/2 - z^4/8 } r } ## TODO: check etc !!!! ==> ~/R/MM/NUMERICS/sqrt1pSqr-hypot.R ## ==== --------------------------------- ## "FIXME": From ~/R/MM/NUMERICS/complex.R , 15 Jan 2000 ((c) Martin Maechler): ## ----- This will be better & faster __only__ in the is.numeric() case hypot <- function(x,y) Mod(x + 1i*y) ## NB: Binomial series of (1+u)^(1/2), exact coefficients c_j / 2^(2j-1) : if(FALSE) { k <- 0:14; noquote(formatC(choose(1/2, k) * 2^(2*k-1), w=1, format="fg")) ## [1] 0.5 1 -1 2 -5 14 -42 132 -429 ## [10] 1430 -4862 16796 -58786 208012 -742900 }
/scratch/gouwar.j/cran-all/cranData/Bessel/R/lsum.R
#### At first, simple interfaces to the main subroutines #### in ../src/zbsubs.f #### ~~~~~~~~~~~~~~~ ## 10: zbesh(zr, zi, fnu, kode, m, n, cyr, cyi, nz, ierr) ## 360: zbesi(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) ## 631: zbesj(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) ## 899: zbesk(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) ## 1182: zbesy(zr, zi, fnu, kode, n, cyr, cyi, nz, cwrkr, cwrki, ierr) ## ## 1467: zairy(zr, zi, id, kode, air, aii, nz, ierr) ## 1862: zbiry(zr, zi, id, kode, bir, bii, ierr) ## 360: zbesi(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) BesselI <- function(z, nu, expon.scaled = FALSE, nSeq = 1, verbose = 0) { nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") nu <- as.double(nu) stopifnot(length(nu) == 1, length(verbose) == 1, nSeq >= 1, nSeq == as.integer(nSeq)) if(nu < 0) { ## I(-nu,z) = I(nu,z) + (2/pi)*sin(pi*nu)*K(nu,z) [ = A.& S. 9.6.2, p.375 ] if(nu == round(nu)) ## <==> sin(pi*nu) == 0 return(BesselI(z, -nu, expon.scaled, nSeq=nSeq, verbose=verbose)) ## else nu. <- -nu + seq_len(nSeq) - 1 kf <- rep(2/pi*sin(pi*nu.), each=nz) if (expon.scaled) kf <- kf * rep(exp(-z-abs(zr)), nSeq) return( BesselI(z, -nu, expon.scaled, nSeq=nSeq, verbose=verbose) + kf*BesselK(z, -nu, expon.scaled, nSeq=nSeq, verbose=verbose)) } ## else nu >= 0 : r <- if(isNum) numeric(nz * nSeq) else complex(nz * nSeq) if(nSeq > 1) r <- matrix(r, nz, nSeq) for(i in seq_len(nz)) { ri <- .C(zbesi, zr[i], zi[i], fnu = nu, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired n = as.integer(nSeq), cyr = double(nSeq), cyi = double(nSeq), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zbesi(%g %s %gi, nu=%g)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), nu) if(ri$ierr == 3) warning(gettextf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x), domain = NA) else if(ri$ierr == 2) { if(verbose) message(gettextf("%s -> overflow ; returning Inf\n", f.x)) ri$cyr <- ri$cyi <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| or nu too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be 'Inf' without any warning ri$cyr[] <- NaN ri$cyi[] <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } rz <- if(isNum && all(!is.na(ri$cyi) & ri$cyi == 0.)) ri$cyr else complex(real = ri$cyr, imaginary = ri$cyi) if(nSeq > 1) r[i,] <- rz else r[i] <- rz } r } ## I() ## 631: zbesj(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) BesselJ <- function(z, nu, expon.scaled = FALSE, nSeq = 1, verbose = 0) { nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") nu <- as.double(nu) stopifnot(length(nu) == 1, length(verbose) == 1, nSeq >= 1, nSeq == as.integer(nSeq)) if(nu < 0) { ## J(-fnu,z) = J(fnu,z)*cos(pi*fnu) - Y(fnu,z)*sin(pi*fnu) if(expon.scaled) stop("'expon.scaled=TRUE' not yet implemented for nu < 0") pnu <- rep(pi*(-nu + seq_len(nSeq) - 1), each=nz) return(BesselJ(z, -nu, nSeq=nSeq, verbose=verbose)*cos(pnu) - if(nu == round(nu)) 0 else BesselY(z, -nu, nSeq=nSeq, verbose=verbose)*sin(pnu)) } ## else nu >= 0 : r <- if(isNum) numeric(nz * nSeq) else complex(nz * nSeq) if(nSeq > 1) r <- matrix(r, nz, nSeq) for(i in seq_len(nz)) { ri <- .C(zbesj, zr[i], zi[i], fnu = nu, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired n = as.integer(nSeq), cyr = double(nSeq), cyi = double(nSeq), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zbesj(%g %s %gi, nu=%g)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), nu) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$cyr <- ri$cyi <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| or nu too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 (w/o warning!) ri$cyr[] <- NaN ri$cyi[] <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } rz <- if(isNum) ri$cyr else complex(real = ri$cyr, imaginary = ri$cyi) if(nSeq > 1) r[i,] <- rz else r[i] <- rz } r } ## J() ## 899: zbesk(zr, zi, fnu, kode, n, cyr, cyi, nz, ierr) BesselK <- function(z, nu, expon.scaled = FALSE, nSeq = 1, verbose = 0) { nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") nu <- as.double(nu) stopifnot(length(nu) == 1, length(verbose) == 1, nSeq >= 1, nSeq == as.integer(nSeq)) if(nu < 0) { ## K(-nu,z) = K(nu,z) return(BesselK(z, -nu, expon.scaled, nSeq=nSeq, verbose=verbose)) } ## else nu >= 0 : r <- if(isNum) numeric(nz * nSeq) else complex(nz * nSeq) if(nSeq > 1) r <- matrix(r, nz, nSeq) for(i in seq_len(nz)) { ri <- .C(zbesk, zr[i], zi[i], fnu = nu, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired n = as.integer(nSeq), cyr = double(nSeq), cyi = double(nSeq), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zbesk(%g %s %gi, nu=%g)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), nu) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$cyr <- ri$cyi <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| or nu too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 (w/o warning!) ri$cyr[] <- NaN ri$cyi[] <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } rz <- if(isNum && all(!is.na(ri$cyi) & ri$cyi == 0.)) ri$cyr else complex(real = ri$cyr, imaginary = ri$cyi) if(nSeq > 1) r[i,] <- rz else r[i] <- rz } r } ## K() ## 1182: zbesy(zr, zi, fnu, kode, n, cyr, cyi, nz, cwrkr, cwrki, ierr) BesselY <- function(z, nu, expon.scaled = FALSE, nSeq = 1, verbose = 0) { nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") nu <- as.double(nu) stopifnot(length(nu) == 1, length(verbose) == 1, nSeq >= 1, nSeq == as.integer(nSeq)) if(nu < 0) { ## Y(-fnu,z) = Y(fnu,z)*cos(pi*fnu) + J(fnu,z)*sin(pi*fnu) if(expon.scaled) stop("'expon.scaled=TRUE' not yet implemented for nu < 0") pnu <- rep(pi*(-nu + seq_len(nSeq) - 1), each=nz) return(BesselY(z, -nu, nSeq=nSeq, verbose=verbose)*cos(pnu) + if(nu == round(nu)) 0 else BesselJ(z, -nu, nSeq=nSeq, verbose=verbose)*sin(pnu)) } ## else nu >= 0 : isNum <- isNum && all(zr >= 0) r <- if(isNum) numeric(nz * nSeq) else complex(nz * nSeq) if(nSeq > 1) r <- matrix(r, nz, nSeq) for(i in seq_len(nz)) { if(zr[i] == 0 && zi[i] == 0) { rz <- if(isNum) -Inf else ## A limit z -> 0 depends on the *direction*; it is ## ``a version of Inf"', i.e. the only *complex* Inf, if you think ## of the complex sphere. --> we use the same as 1/(0+0i): 1/(0+0i) } else { ## 1182: zbesy(zr, zi, fnu, kode, n, cyr, cyi, nz, cwrkr, cwrki, ierr) ri <- .C(zbesy, zr[i], zi[i], fnu = nu, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired n = as.integer(nSeq), cyr = double(nSeq), cyi = double(nSeq), nz = integer(1), cwrkr= double(nSeq), cwrki= double(nSeq), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zbesy(%g %s %gi, nu=%g)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), nu) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$cyr <- ri$cyi <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| or nu too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 (w/o warning!) ri$cyr[] <- NaN ri$cyi[] <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } rz <- if(isNum) ri$cyr else complex(real = ri$cyr, imaginary = ri$cyi) } if(nSeq > 1) r[i,] <- rz else r[i] <- rz } r } ## Y() ##---------------- Hankel function H() ------------------ ## 10: zbesh(zr, zi, fnu, kode, m, n, cyr, cyi, nz, ierr) BesselH <- function(m, z, nu, expon.scaled = FALSE, nSeq = 1, verbose = 0) { ## c***keywords H-bessel functions,bessel functions of complex argument, ## c bessel functions of third kind,hankel functions ## c***author amos, donald e., sandia national laboratories ## c***purpose to compute the H-bessel functions of a complex argument ## c***description ## c ## c ***a double precision routine*** ## c on kode=1, zbesh computes an n member sequence of complex ## c hankel (bessel) functions cy(j) = H(m,fnu+j-1,z) for kinds m=1 or 2, ## c real, nonnegative orders fnu+j-1, j=1,...,n, and complex ## c z != cmplx(0.,0.) in the cut plane -pi < arg(z) <= pi. ## c on kode=2, zbesh returns the scaled Hankel functions ## c ## c cy(i)= exp(-mm*z*i) * H(m,fnu+j-1,z) mm=3-2*m, i**2=-1. ## c ## c which removes the exponential behavior in both the upper and ## c lower half planes. definitions and notation are found in the ## c nbs handbook of mathematical functions (ref. 1). m <- as.integer(m) stopifnot(length(m) == 1, m == 1 || m == 2) nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") nu <- as.double(nu) stopifnot(length(nu) == 1, length(verbose) == 1, nSeq >= 1, nSeq == as.integer(nSeq)) if(nu < 0) { ## H(1,-fnu,z) = H(1,fnu,z)*cexp( pi*fnu*i) ## H(2,-fnu,z) = H(2,fnu,z)*cexp(-pi*fnu*i) ; i^2=-1 if(expon.scaled) stop("'expon.scaled=TRUE' not yet implemented for nu < 0") pnu <- rep(c(1i,-1i)[m] * pi*(-nu + seq_len(nSeq) - 1), each=nz) return(BesselH(m, z, -nu, nSeq=nSeq, verbose=verbose)* exp(pnu)) } ## else nu >= 0 : r <- if(isNum) numeric(nz * nSeq) else complex(nz * nSeq) if(nSeq > 1) r <- matrix(r, nz, nSeq) for(i in seq_len(nz)) { ## zbesh(zr, zi, fnu, kode, m, n, cyr, cyi, nz, ierr) ri <- .C(zbesh, zr[i], zi[i], fnu = nu, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired m = m, n = as.integer(nSeq), cyr = double(nSeq), cyi = double(nSeq), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zbesh(%g %s %gi, nu=%g)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), nu) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$cyr <- ri$cyi <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| or nu too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 (w/o warning!) ri$cyr[] <- NaN ri$cyi[] <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } rz <- if(isNum && all(!is.na(ri$cyi) & ri$cyi == 0.)) ri$cyr else complex(real = ri$cyr, imaginary = ri$cyi) if(nSeq > 1) r[i,] <- rz else r[i] <- rz } r } ## H() ##---------------- Airy Functions Ai() Bi() ------------------ ## 1471: zairy(zr, zi, id, kode, air, aii, nz, ierr) ## 1867: zbiry(zr, zi, id, kode, bir, bii, ierr) AiryA <- function(z, deriv = 0, expon.scaled = FALSE, verbose = 0) { ## Purpose: to compute airy functions ai(z) and dai(z) for complex z ## airy function : "Bessel functions of order one third" deriv <- as.integer(deriv) stopifnot(length(deriv) == 1, deriv == 0 || deriv == 1, length(verbose) == 1) nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") r <- if(isNum) numeric(nz) else complex(nz) for(i in seq_len(nz)) { ## zairy(zr, zi, id, kode, air, aii, nz, ierr) ri <- .C(zairy, zr[i], zi[i], id = deriv, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired air = double(1), aii = double(1), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zairy(%g %s %gi, deriv=%d)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), deriv) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$air <- ri$aii <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 or .... (w/o warning!) ri$air <- NaN ri$aii <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } r[i] <- if(isNum) ri$air else complex(real = ri$air, imaginary = ri$aii) } r } ## AiryA() AiryB <- function(z, deriv = 0, expon.scaled = FALSE, verbose = 0) { ## Purpose: to compute airy functions bi(z) and dbi(z) for complex z ## airy function : "Bessel functions of order one third" deriv <- as.integer(deriv) stopifnot(length(deriv) == 1, deriv == 0 || deriv == 1, length(verbose) == 1) nz <- length(z) if(nz == 0) return(z) isNum <- is.numeric(z) if(isNum) { zr <- as.double(z) zi <- numeric(nz) } else if(is.complex(z)) { zr <- Re(z) zi <- Im(z) } else stop("'z' must be complex or numeric") r <- if(isNum) numeric(nz) else complex(nz) for(i in seq_len(nz)) { ## zairy(zr, zi, id, kode, air, aii, nz, ierr) ri <- .C(zbiry, zr[i], zi[i], id = deriv, kode= as.integer(1L + as.logical(expon.scaled)), # 1 or 2, exactly as desired bir = double(1), bii = double(1), nz = integer(1), ierr = as.integer(verbose)) if(ri$ierr) { f.x <- sprintf("'zairy(%g %s %gi, deriv=%d)'", zr[i], c("-","+")[1+(zi[i] >= 0)], abs(zi[i]), deriv) if(ri$ierr == 3) warning(sprintf( "%s large arguments -> precision loss (of at least half machine accuracy)", f.x)) else if(ri$ierr == 2) { if(verbose) message(sprintf("%s -> overflow ; returning Inf\n", f.x)) ri$bir <- ri$bii <- Inf } else if(ri$ierr == 4) { warning(gettextf("%s -> ierr=4: |z| too large\n", f.x), domain = NA) ## FIXME: In some cases, the answer should just be Inf or 0 or .... (w/o warning!) ri$bir <- NaN ri$bii <- if(isNum) 0 else NaN } else stop(gettextf("%s unexpected error 'ierr = %d'", f.x, ri$ierr), domain = NA) } r[i] <- if(isNum) ri$bir else complex(real = ri$bir, imaginary = ri$bii) } r } ## AiryB()
/scratch/gouwar.j/cran-all/cranData/Bessel/R/toms644.R
### R code from vignette source 'other-Bessels.Rnw' ### Encoding: UTF-8 ################################################### ### code chunk number 1: preliminaries ################################################### options(width=75) library(Bessel) ################################################### ### code chunk number 2: Rmpfr-1 ################################################### suppressPackageStartupMessages(require("Rmpfr")) ################################################### ### code chunk number 3: gsl-do ################################################### library(gsl) ################################################### ### code chunk number 4: gsl-help (eval = FALSE) ################################################### ## ?bessel_Knu ## ?Airy ################################################### ### code chunk number 5: gsl-bessel-ls ################################################### igsl <- match("package:gsl", search()) aB <- apropos("Bessel", where=TRUE); unname(aB)[names(aB) == igsl] aA <- apropos("Airy", where=TRUE); unname(aA)[names(aA) == igsl] ################################################### ### code chunk number 6: bessel-real-nu ################################################### lst <- ls(patt="bessel_.*nu", pos="package:gsl") l2 <- sapply(lst, function(.) args(get(.)), simplify=FALSE) lnms <- setNames(format(lst), lst) arglst <- lapply(lst, ## a bit ugly, using deparse(.) function(nm) sub(" *$","", sub("^function", lnms[[nm]], deparse(l2[[nm]])[[1]]))) .tmp <- lapply(arglst, function(.) cat(format(.),"\n")) ################################################### ### code chunk number 7: bessel_Inu_scaled ################################################### x <- (1:500)*50000; b2 <- BesselI(x, pi, expo=TRUE) b1 <- bessel_Inu_scaled(pi, x) all.equal(b1,b2,tol=0) ## "Mean relative difference: 1.544395e-12" ## the accuracy is *as* limited (probably): b1 <- bessel_Inu_scaled(pi, x, give=TRUE) summary(b1$err) ################################################### ### code chunk number 8: bessel_Inu-relErr ################################################### range(b1$err/ b1$val) ################################################### ### code chunk number 9: Jnu-100 ################################################### bessel_Jnu(100, 2^seq(-5,1, by=1/4)) bessel_Jnu( 20, 2^seq(-50,-40, by=1/2)) bessel_Jnu( 5, 2^seq(-210,-200, by=.5)) ################################################### ### code chunk number 10: Jnu-underflow-status-ex ################################################### as.data.frame(bessel_Jnu( 20, 2^seq(-50,-40, by=1/2), give=TRUE, strict=FALSE)) ################################################### ### code chunk number 11: J-gsl ################################################### gslJ <- function(nu, f1 = .90, f2 = 1.10, nout = 512, give=FALSE, strict=FALSE) { stopifnot(is.numeric(nu), length(nu) == 1, nout >= 1, f1 <= 1, f2 >= 1) x <- seq(f1*nu, f2*nu, length.out = nout) list(x=x, Jnu.x = bessel_Jnu(nu, x, give=give, strict=strict)) } plJ <- function(nu, f1 =.90, f2=1.10, nout=512, col=2, lwd=2, main = bquote(nu == .(nu)), ...) { dJ <- gslJ(nu, f1=f1, f2=f2, nout=nout) plot(Jnu.x ~ x, data=dJ, type="l", col=col, lwd=lwd, main=main, ...) abline(h=0, lty=3, col=adjustcolor(1, 0.5)) invisible(dJ) } sfsmisc::mult.fig(4) plJ(500, f1=0) r1k <- plJ(1000, f1=0) head(as.data.frame(r1k)) # all 0 now (NaN's for 'strict=TRUE' !!) r10k <- plJ(10000, f1=0.5, f2=2) str( with(r10k, x[!is.finite(Jnu.x)]) ) # empty; had all NaN upto x = 8317 r1M <- plJ(1e6, f1=0.8) ################################################### ### code chunk number 12: require-again ################################################### ################################################### ### code chunk number 13: sessionInfo ################################################### toLatex(sessionInfo(), locale=FALSE) ################################################### ### code chunk number 14: show-date ################################################### cat(sprintf("Date (run in R): %s\n", format(Sys.Date())))
/scratch/gouwar.j/cran-all/cranData/Bessel/inst/doc/other-Bessels.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 collectC <- function(xs, ys, n) { .Call('_Bestie_collectC', PACKAGE = 'Bestie', xs, ys, n) }
/scratch/gouwar.j/cran-all/cranData/Bestie/R/RcppExports.R
#' Exact estimation of intervention effects for a single DAG or a chain of sampled DAGs #' #' \code{DAGintervention} takes a DAG or a sampled chain of DAGs (for example from #' the \code{\link[BiDAG]{partitionMCMC}} function of the BiDAG package) and computes the #' intervention effect of each node on all others. For binary data, this is performed #' by exhaustively examining all possible binary states. This is exponentially complex in the #' number of variables which should therefore be limited to around 20 or fewer. For more #' variables there is a Monte Carlo version \code{\link{DAGinterventionMC}} instead. #' For continuous data, the intervention estimation is performed by extracting the edge #' coefficients from their posterior distribution and using matrix inversion #' following arXiv:2010.00684. User-defined scores are also supported as long as #' the DAG parameters are analogous to the BDe/BGe cases, see \code{\link{DAGparameters}}. #' #' @param incidences a single adjacency matrix of a list of adjacency matrices of #' sampled DAGs, with entry [i,j] equal to 1 when a directed edge exists from #' node i to node j #' @param dataParams the data and parameters used to learn the DAGs derived from the #' \code{\link[BiDAG]{scoreparameters}} function of the BiDAG package #' @param sample logical indicating whether to sample the parameters of each node #' from the posterior (TRUE, default) or to take the expectation (FALSE) #' @param unrollDBN logical indicating whether to unroll a DBN to a full DAG over #' all time slices (TRUE, default) or to use the compact representation (FALSE) #' #' @return a single matrix or a list of matrices containing the full set of #' intervention effects for each input DAG. Entry [i,j] is the downstream #' effect on node j of intervening on node i #' (the difference observed at node j when setting node i to 1 and 0) #' #' @examples #' #' scoreParam <- BiDAG::scoreparameters("bde", BiDAG::Asia) #' causalmat <- DAGintervention(BiDAG::Asiamat, scoreParam) #' #' @export #' #' @seealso \code{\link[BiDAG]{scoreparameters}} DAGintervention <- function(incidences, dataParams, sample = TRUE, unrollDBN = TRUE){ # this wrapper takes in a chain of DAG, computes their parameters and returns all intervention effects if (!is.list(incidences)) { # turn to list internally incidences <- list(incidences) } if (dataParams$type == "usr"){ localtype <- dataParams$pctesttype } else { localtype <- dataParams$type } n <- ncol(incidences[[1]]) # number of nodes in DAG if (dataParams$DBN && unrollDBN) { # number of nodes in DAG from unrolled DBN n <- dataParams$bgn + dataParams$nsmall*dataParams$slices } if (localtype == "bde") { # only for BDe version if (n > 20) { warning("Exhaustive enumeration may not be feasible") } allBinaryVecs <- matrix(0, 2^n, n) for (ii in 1:2^n) { allBinaryVecs[ii, ] <- as.integer(intToBits(ii-1))[n:1] } } numDAGs <- length(incidences) interventionMats <- vector("list", numDAGs) # to store the intervention effects for(kk in 1:numDAGs){ DAGparams <- DAGparameters(incidences[[kk]], dataParams, unrollDBN = unrollDBN) DAGparamsInternal <- DAGparams if (localtype == "bde") { # only for BDe version if(sample==TRUE){ # then we take a sample of parameters from the posterior instead of taking the expectation DAGparamsInternal$pmeans <- SampleParameters(DAGparams) } allLogScores <- BinaryScoreAgainstDAG(DAGparamsInternal, allBinaryVecs) interventionMats[[kk]] <- InterventionEstimation(allLogScores, allBinaryVecs) colnames(interventionMats[[kk]]) <- colnames(DAGparamsInternal$DAG) rownames(interventionMats[[kk]]) <- rownames(DAGparamsInternal$DAG) } else { # bge version if(sample==TRUE){ # then we take a sample of parameters from the posterior instead of taking the expectation DAGparamsInternal$mus <- SampleParameters(DAGparams, type = "bge") } interventionMats[[kk]] <- InterventionEstimationBGe(DAGparamsInternal) colnames(interventionMats[[kk]]) <- colnames(DAGparamsInternal$DAG) rownames(interventionMats[[kk]]) <- rownames(DAGparamsInternal$DAG) } } if (numDAGs == 1) { # turn back to single matrix interventionMats <- interventionMats[[1]] } return(interventionMats) } # this function is no longer needed DAGinterventionparams <- function(DAGparams, sample = TRUE){ # this wrapper takes in a DAG with parameters and returns all intervention effects n <- ncol(DAGparams$DAG) # number of nodes in DAG allBinaryVecs <- matrix(0, 2^n, n) for (ii in 1:2^n) { allBinaryVecs[ii, ] <- as.integer(intToBits(ii-1))[n:1] } DAGparamsInternal <- DAGparams if(sample==TRUE){ # then we take a sample of parameters from the posterior instead of taking the expectation DAGparamsInternal$pmeans <- SampleParameters(DAGparams) } allLogScores <- BinaryScoreAgainstDAG(DAGparamsInternal, allBinaryVecs) interventionMat <- InterventionEstimation(allLogScores, allBinaryVecs) return(interventionMat) } InterventionEstimationBGe <- function(DAGparams){ # estimate intervention effects from edge coefficients n <- ncol(DAGparams$DAG) # number of nodes coeffMatrix <- matrix(0, nrow=n, ncol=n) for (j in 1:n) { parentNodes <- which(DAGparams$DAG[, j]==1) if (length(parentNodes) > 0) { coeffMatrix[parentNodes, j] <- DAGparams$mus[[j]] } } interventionMatrix <- solve(diag(n) - coeffMatrix) # adding all paths with the inverse return(interventionMatrix) } InterventionEstimation <- function(logScores, dataToScore){ # estimate intervention effects from the scores of all binary vectors n <- ncol(dataToScore) # number of nodes interventionMatrix <- matrix(0, nrow=n, ncol=n) for (j in 1:n) { interventionMatrix[j,] <- InterventionEstimationCore(j, logScores, dataToScore) } return(interventionMatrix) } InterventionEstimationCore <- function(fixedNode, logScores, dataToScore){ # core function involves setting one node to 0 and to 1 and removing its probability component upRows <- which(dataToScore[, fixedNode]==1) weightVecTemp <- rowSums(logScores[upRows, -fixedNode]) # probability of each vector with fixedNode set to one weightVec <- exp(weightVecTemp - max(weightVecTemp)) weightVec <- weightVec/sum(weightVec) probs1 <- colSums(weightVec*dataToScore[upRows, ]) downRows <- which(dataToScore[, fixedNode]==0) weightVecTemp <- rowSums(logScores[downRows, -fixedNode]) # probability of each vector with fixedNode set to zero weightVec <- exp(weightVecTemp - max(weightVecTemp)) weightVec <- weightVec/sum(weightVec) probs0 <- colSums(weightVec*dataToScore[downRows, ]) return(probs1-probs0) }
/scratch/gouwar.j/cran-all/cranData/Bestie/R/interventionest.R
#' Monte Carlo estimation of intervention effects for a DAG or chain of sampled DAGs #' #' \code{DAGinterventionMC} takes a DAG or a sampled chain of DAGs (for example from #' the \code{\link[BiDAG]{partitionMCMC}} function of the BiDAG package) and computes, #' for binary data, a Monte Carlo estimate of the intervention effect of #' each node on all others by simulating data from the DAG. #' By default each node is intervened upon and the downstream effects #' estimated by further sampling. A faster but less robust and accurate version is #' also offered which reweights a single simulated dataset. #' #' @param incidences a single adjacency matrix of a list of adjacency matrices #' of sampled DAGs, with entry [i,j] equal to 1 when a directed edge exists from #' node i to node j #' @param dataParams the data and parameters used to learn the DAGs derived from the #' \code{\link[BiDAG]{scoreparameters}} function of the BiDAG package #' @param sampleSize the number of Monte Carlo samples to draw #' @param sample logical indicating whether to sample the parameters of each node #' from the posterior (TRUE, default) or to take the expectation (FALSE) #' @param fixNode logical indicating whether to intervene on each node (TRUE, default) #' and resample downstream nodes or to sample once and reweight the sample (FALSE) #' @param reducedVarianceSampling logical indicating whether to perform Bernoulli #' samping for each node (FALSE) or to sample from a distribution with the same mean #' and lower variance (TRUE, default) #' @param unrollDBN logical indicating whether to unroll a DBN to a full DAG over #' all time slices (TRUE, default) or to use the compact representation (FALSE) #' #' @return a single matrix or a list of matrices containing the full set of #' intervention effects for each input DAG. Entry [i,j] is the downstream #' effect on node j of intervening on node i #' (the difference observed at node j when setting node i to 1 and 0) #' #' @examples #' #' scoreParam <- BiDAG::scoreparameters("bde", BiDAG::Asia) #' causalmatMC <- DAGinterventionMC(BiDAG::Asiamat, scoreParam, 1e4) #' #' @export #' #' @seealso \code{\link[BiDAG]{scoreparameters}} DAGinterventionMC <- function(incidences, dataParams, sampleSize, sample = TRUE, fixNode = TRUE, reducedVarianceSampling = TRUE, unrollDBN = TRUE){ # this wrapper takes in a chain of DAG, computes their parameters and returns all MC intervention effects if (sampleSize < 1e3) { sampleSize <- 1e3 warning("Setting sample size to a minimum of 1000.") } if (dataParams$type != "bde") { stop("Only implemented for the BDe score.") } if (!is.list(incidences)) { # turn to list internally incidences <- list(incidences) } numDAGs <- length(incidences) interventionMats <- vector("list", numDAGs) # to store the intervention effects for(kk in 1:numDAGs){ DAGparams <- DAGparameters(incidences[[kk]], dataParams, unrollDBN = unrollDBN) interventionMats[[kk]] <- DAGinterventionMCparams(DAGparams, sampleSize, sample, fixNode, reducedVarianceSampling) colnames(interventionMats[[kk]]) <- colnames(DAGparams$DAG) rownames(interventionMats[[kk]]) <- rownames(DAGparams$DAG) } if (numDAGs == 1) { # turn back to single matrix interventionMats <- interventionMats[[1]] } return(interventionMats) } DAGinterventionMCparams <- function(DAGparams, sampleSize, sample = FALSE, fixNode = TRUE, reducedVarianceSampling = TRUE){ # this wrapper takes in a DAG with parameters and returns all intervention effects estimated via Monte Carlo # when fixNode is FALSE this will simply sample binary vectors from the DAG which can give problems with low probability events! DAGparamsInternal <- DAGparams if(sample==TRUE){ # then we take a sample of parameters from the posterior instead of taking the expectation DAGparamsInternal$pmeans <- SampleParameters(DAGparams) } if(fixNode==FALSE){ # we sample the set of binary vectors and reweight sampledBinaryVecs <- BinarySampleFromDAG(DAGparamsInternal, sampleSize, reducedVarianceSampling) sampledLogScores <- BinaryScoreAgainstDAG(DAGparamsInternal, sampledBinaryVecs) interventionMat <- InterventionEstimationFromSample(sampledLogScores, sampledBinaryVecs) } else { # we fix each node in turn and calculate the downstream effects interventionMat <- InterventionEstimationMCfix(DAGparamsInternal, sampleSize, reducedVarianceSampling) } return(interventionMat) } BinarySampleFromDAG <- function(DAGparams, sampleSize, reducedVarianceSampling=TRUE){ # sample a set of binary vectors from a DAG with parameters n <- ncol(DAGparams$DAG) binarySample <- matrix(NA, sampleSize, n) nodeOrder <- DAGtoorder(DAGparams$DAG) # get topological order for (j in rev(nodeOrder)) { # start with outpoints etc parentNodes <- which(DAGparams$DAG[, j]==1) binarySample[, j] <- BinarySampleFromDAGcore(j, parentNodes, DAGparams, binarySample, reducedVarianceSampling) } return(binarySample) } BinarySampleFromDAGcore <- function(j, parentNodes, DAGparams, binarySample, reducedVarianceSampling=TRUE){ # sample one variable for a set of binary vectors from a DAG with parameters sampleNode <- rep(NA, nrow(binarySample)) # store the sampled values lp <- length(parentNodes) # number of parents noParams <- 2^lp # number of binary states of the parents switch(as.character(lp), "0"={# no parents theta <- DAGparams$pmeans[[j]] # the probability of each state sampleNode <- SampleBinaryVec(theta, nrow(binarySample), reducedVarianceSampling) }, "1"={# one parent summysfull <- binarySample[, parentNodes] for (i in 1:noParams - 1) { theta <- DAGparams$pmeans[[j]][i+1] # the probability of each state toScore <- which(summysfull==i) if (length(toScore) > 0) { sampleNode[toScore] <- SampleBinaryVec(theta, length(toScore), reducedVarianceSampling) } } }, { # more parents summysfull <- colSums(2^(c(0:(lp-1)))*t(binarySample[, parentNodes])) Ns <- tabulate(summysfull+1,noParams) # can use tabulate instead of collectC, but we need to add one tempScoreVec <- rep(NA, length(summysfull)) localCounter <- 0 for (i in 1:noParams) { # we run over the data size once if (Ns[i]>0) { # only if there are states to consider theta <- DAGparams$pmeans[[j]][i] # the probability of each state tempScoreVec[localCounter + 1:Ns[i]] <- SampleBinaryVec(theta, Ns[i], reducedVarianceSampling) localCounter <- localCounter + Ns[i] } } sampleNode <- tempScoreVec[rank(summysfull, ties.method="first")] # use the rank function to map scores to entries } ) return(sampleNode) } SampleBinaryVec <- function(theta, vecSize, reducedVarianceSampling=TRUE){ # sample with same expectation as Bernoulli sampling, but with much less variance. # or straightforward Bernoulli sampling if reducedVarianceSampling=FALSE if (reducedVarianceSampling==FALSE || vecSize==1) { binarySample <- stats::rbinom(vecSize, 1, theta) } else { binaryVecTemp <- rep(0, vecSize) expectedOnes <- vecSize*theta certainOnes <- floor(expectedOnes) if (certainOnes > 0) { binaryVecTemp[1:certainOnes] <- 1 } if (certainOnes < vecSize) { binaryVecTemp[certainOnes + 1] <- stats::rbinom(1, 1, expectedOnes-certainOnes) } binarySample <- sample(binaryVecTemp) # we need to have a random order!!! } return(binarySample) } InterventionEstimationFromSample <- function(logScores, dataToScore){ # estimate intervention effects from a set of binary vectors # which have been sampled proportionally to their score n <- ncol(dataToScore) interventionMatrix <- matrix(0, nrow=n, ncol=n) for (j in 1:n) { interventionMatrix[j,] <- InterventionEstimationFromSampleCore(j, logScores, dataToScore) } return(interventionMatrix) } InterventionEstimationFromSampleCore <- function(fixedNode, logScores, dataToScore){ # core function involves setting one node to 0 and to 1 and removing its probability component upRows <- which(dataToScore[, fixedNode]==1) if (length(upRows)>1) { weightVecTemp <- -(logScores[upRows, fixedNode]) # probability of choosing fixednode as one for each vector weightVec <- exp(weightVecTemp - max(weightVecTemp)) weightVec <- weightVec/sum(weightVec) probs1 <- colSums(weightVec*dataToScore[upRows, ]) } else { warning("Sample size too small!") probs1 <- rep(NA, ncol(logScores)) } downRows <- which(dataToScore[, fixedNode]==0) if (length(downRows)>1) { weightVecTemp <- -(logScores[downRows, fixedNode]) # probability of choosing fixednode as zero for each vector weightVec <- exp(weightVecTemp - max(weightVecTemp)) weightVec <- weightVec/sum(weightVec) probs0 <- colSums(weightVec*dataToScore[downRows, ]) } else { warning("Sample size too small!") probs0 <- rep(NA, ncol(logScores)) } return(probs1-probs0) } NodeDescendantsFromDAG <- function(fixedNode, DAG){ # compute the descendants of a node from the adjacency matrix # this could be more efficient descendants <- c() children <- which(DAG[fixedNode, ]==1) # the children of the node newDescendants <- children while(length(newDescendants) > 0 && length(children) > 0) { newDescendants <- setdiff(children, descendants) if(length(newDescendants) > 1) { children <- which(colSums(DAG[newDescendants, ]) > 0) } else if (length(newDescendants)==1) { children <- which(DAG[newDescendants, ]==1) } else { children <- c() } descendants <- union(newDescendants, descendants) } return(descendants) } InterventionEstimationMCfix <- function(DAGparams, sampleSize, reducedVarianceSampling=TRUE){ # estimate intervention effects by sampling binary vectors in topological order # while fixing one node at a time to compute intervention effects n <- ncol(DAGparams$DAG) # number of nodes interventionMatrix <- matrix(0, nrow=n, ncol=n) binarySample <- matrix(NA, sampleSize, n) nodeOrder <- DAGtoorder(DAGparams$DAG) # get topological order for (j in rev(nodeOrder)) { # start with outpoints etc parentNodes <- which(DAGparams$DAG[, j]==1) binarySample[, j] <- BinarySampleFromDAGcore(j, parentNodes, DAGparams, binarySample, reducedVarianceSampling) } binarySampleOriginal <- binarySample # we reuse the original sample to match across different MC estimates for (j in nodeOrder) { # start with the inpoints # use graph descent to get descendents, but would be more efficient to build the descendant matrix descendants <- NodeDescendantsFromDAG(j, DAGparams$DAG) if(length(descendants) > 0) { binarySample[, j] <- 1 # upregulate for (kk in intersect(rev(nodeOrder), descendants)) { # resample descendents in correct order parentNodes <- which(DAGparams$DAG[, kk]==1) binarySample[, kk] <- BinarySampleFromDAGcore(kk, parentNodes, DAGparams, binarySample, reducedVarianceSampling) } probs1 <- colMeans(binarySample) binarySample[, j] <- 0 # downregulate for (kk in intersect(rev(nodeOrder), descendants)) { # resample descendents in correct order parentNodes <- which(DAGparams$DAG[, kk]==1) binarySample[, kk] <- BinarySampleFromDAGcore(kk, parentNodes, DAGparams, binarySample, reducedVarianceSampling) } probs0 <- colMeans(binarySample) interventionMatrix[j, ] <- probs1 - probs0 binarySample <- binarySampleOriginal # reset binary vectors } else { interventionMatrix[j, j] <- 1 } } return(interventionMatrix) } # this function takes in an adjacency matrix and returns the permutation DAGtoorder <- function(incidence) { n <- ncol(incidence) # number of nodes permy <- numeric(n) # to store the permutation m <- n # counter while (m>0) { topnodes <- which(colSums(incidence)==0) # find the outpoints incidence[topnodes, ] <- 0 # remove their edges incidence[cbind(topnodes, topnodes)] <- 1 # add a one to their columns so they are no longer counted l <- length(topnodes) # the number of outpoints m <- m - l permy[m + 1:l]<-topnodes } return(permy) }
/scratch/gouwar.j/cran-all/cranData/Bestie/R/interventionestMC.R
#' Augment a DAG with parameters #' #' \code{DAGparameters} takes a DAG and augments it with parameters. #' For binary data these are the parameters of the posterior beta #' distributions and its mean. For continuous data, these are parameters #' of the posterior distributions of the edge coefficients from arXiv:2010.00684. #' There is support for user-defined augmentation, with the caveat that it #' must match the output format of either the binary or continuous cases. #' #' @param incidence a single adjacency matrix with entry [i,j] equal to 1 #' when a directed edge exists from node i to node j #' @param dataParams the data and parameters used to learn the DAGs derived from the #' \code{\link[BiDAG]{scoreparameters}} function of the BiDAG package #' @param unrollDBN logical indicating whether to unroll a DBN to a full DAG over #' all time slices (TRUE, default) or to use the compact representation (FALSE) #' #' @return the DAG and a list of parameters for each node given #' its parents #' #' @examples #' #' scoreParam <- BiDAG::scoreparameters("bde", BiDAG::Asia) #' AsiaParam <- DAGparameters(BiDAG::Asiamat, scoreParam) #' #' @export DAGparameters <- function(incidence, dataParams, unrollDBN = TRUE) { # add parameters to an incidence matrix if (dataParams$DBN && !dataParams$type %in% c("bde", "bge")) { stop("The implementation for DBNs is currently only for the BDe and BGe score.") } bg_flag <- FALSE if (dataParams$type == "usr"){ localtype <- dataParams$pctesttype if (!is.null(dataParams$bgremove)){ if (dataParams$bgremove && dataParams$bgn > 0) { bg_flag <- TRUE } } } else { localtype <- dataParams$type } if (!localtype %in% c("bde", "bge")) { stop("The implementation is currently only for the BDe and BGe score, or user-defined scores with the same structure.") } if (dataParams$DBN) { n <- dataParams$n # number of nodes including background nsmall <- dataParams$nsmall # number of nodes without background bgn <- dataParams$bgn # number of background nodes slices <- dataParams$slices # number of time slices ### First slice parameters # remember background nodes were placed at the end of first slice, which we need to undo! dataParams_first <- dataParams$firstslice if(bgn > 0){ # place backgound nodes at the start again reorder <- c(1:bgn + nsmall, 1:nsmall) if (dataParams$type == "bde") { # bde score dataParams_first$data <- dataParams_first$data[, reorder] dataParams_first$d1 <- dataParams_first$d1[, reorder] dataParams_first$d0 <- dataParams_first$d0[, reorder] } else { # bge score dataParams_first$TN <- dataParams_first$TN[reorder, reorder] } } params_first <- DAGparameters(incidence[1:n, 1:n], dataParams_first) ## Other slices parameters # remember the later times were placed first, which we need to undo! dataParams_other <- dataParams$otherslices reorder <- c(1:n + nsmall, 1:nsmall) # put them back in order if (dataParams$type == "bde") { # bde score dataParams_other$data <- dataParams_other$data[, reorder] dataParams_other$d1 <- dataParams_other$d1[, reorder] dataParams_other$d0 <- dataParams_other$d0[, reorder] } else { # bge score dataParams_other$TN <- dataParams_other$TN[reorder, reorder] } params <- DAGparameters(incidence, dataParams_other) ### Combine parameters from the slices if (dataParams$type == "bde") { # bde score allalphas <- params$alphas allalphas[1:n] <- params_first$alphas # update the first slice allbetas <- params$betas allbetas[1:n] <- params_first$betas # update the first slice allpmeans <- params$pmeans allpmeans[1:n] <- params_first$pmeans # update the first slice } else { # bge score allmus <- params$mus allmus[1:n] <- params_first$mus # update the first slice allsigmas <- params$sigmas allsigmas[1:n] <- params_first$sigmas # update the first slice alldfs <- params$dfs alldfs[1:n] <- params_first$dfs # update the first slice } ### Need to unroll the DBN, if more than 2 slices and we choose to unroll if (slices > 2 && unrollDBN) { nbig <- n + nsmall*(slices - 1) incidence_unroll <- matrix(0, nbig, nbig) incidence_unroll[1:nrow(incidence), 1:ncol(incidence)] <- incidence inc_names_unroll <- paste(rep(colnames(incidence)[bgn+1:nsmall], (slices - 2)), rep(3:slices, each=nsmall), sep="_") colnames(incidence_unroll) <- c(colnames(incidence), inc_names_unroll) rownames(incidence_unroll) <- c(colnames(incidence), inc_names_unroll) if (dataParams$type == "bde") { # bde score allalphas_unroll <- vector("list", nbig) allbetas_unroll <- vector("list", nbig) allpmeans_unroll <- vector("list", nbig) allalphas_unroll[1:ncol(incidence)] <- allalphas allbetas_unroll[1:ncol(incidence)] <- allbetas allpmeans_unroll[1:ncol(incidence)] <- allpmeans } else { # bge score allmus_unroll <- vector("list", nbig) allsigmas_unroll <- vector("list", nbig) alldfs_unroll <- vector("list", nbig) allmus_unroll[1:ncol(incidence)] <- allmus allsigmas_unroll[1:ncol(incidence)] <- allsigmas alldfs_unroll[1:ncol(incidence)] <- alldfs } for (ii in 1:(slices - 2)) { block_rows <- n - nsmall + 1:(2*nsmall) block_cols <- n + 1:nsmall incidence_unroll[block_rows + nsmall*ii, block_cols + nsmall*ii] <- incidence[block_rows, block_cols] if (dataParams$type == "bde") { # bde score allalphas_unroll[block_cols + nsmall*ii] <- allalphas[block_cols] allbetas_unroll[block_cols + nsmall*ii] <- allbetas[block_cols] allpmeans_unroll[block_cols + nsmall*ii] <- allpmeans[block_cols] } else { # bge score allmus_unroll[block_cols + nsmall*ii] <- allmus[block_cols] allsigmas_unroll[block_cols + nsmall*ii] <- allsigmas[block_cols] alldfs_unroll[block_cols + nsmall*ii] <- alldfs[block_cols] } if (bgn > 0) { # if there are background nodes, repeat across slices block_rows <- 1:bgn incidence_unroll[block_rows, block_cols + nsmall*ii] <- incidence[block_rows, block_cols] } } incidence <- incidence_unroll if (dataParams$type == "bde") { # bde score allalphas <- allalphas_unroll allbetas <- allbetas_unroll allpmeans <- allpmeans_unroll } else { # bge score allmus <- allmus_unroll allsigmas <- allsigmas_unroll alldfs <- alldfs_unroll } } } else { n <- nrow(incidence) # number of nodes in DAG if (localtype == "bde") { # bde score allalphas <- vector("list", n) allbetas <- vector("list", n) allpmeans <- vector("list", n) } else { # bge score allmus <- vector("list", n) allsigmas <- vector("list", n) alldfs <- vector("list", n) } for (j in 1:n) { parentNodes <- which(incidence[, j]==1) if (dataParams$type == "usr") { tempResult <- usrDAGparametersCore(j, parentNodes, dataParams) } else { tempResult <- DAGparametersCore(j, parentNodes, dataParams) } if (localtype == "bde") { # bde score allalphas[[j]] <- tempResult$alphas allbetas[[j]] <- tempResult$betas allpmeans[[j]] <- tempResult$pmeans } else { # bge score allmus[[j]] <- tempResult$mus allsigmas[[j]] <- tempResult$sigmas alldfs[[j]] <- tempResult$dfs } } if (bg_flag) { # remove all background nodes to_keep <- setdiff(1:n, dataParams$bgnodes) incidence <- incidence[to_keep, to_keep] if (localtype == "bde") { # bde score allalphas <- allalphas[to_keep] allbetas <- allbetas[to_keep] allpmeans <- allpmeans[to_keep] } else { # bge score allmus <- allmus[to_keep] allsigmas <- allsigmas[to_keep] alldfs <- alldfs[to_keep] } } } posteriorParams <- list() posteriorParams$DAG <- incidence if (localtype == "bde") { # bde score posteriorParams$alphas <- allalphas posteriorParams$betas <- allbetas posteriorParams$pmeans <- allpmeans } else { # bge score posteriorParams$mus <- allmus posteriorParams$sigmas <- allsigmas posteriorParams$dfs <- alldfs } return(posteriorParams) } usrDAGparametersCore <- function(j, parentNodes, param) { # this is a template function for computing the parameters # and their posterior distribution. It requires the output # to be in the corresponding BDe or BGe format param$type <- param$pctesttype # for the template we just use the BDe or BGe DAGparametersCore(j, parentNodes, param) } DAGparametersCore <- function(j, parentNodes, param) { # this function computes the parameters and their posterior distribution # for a given node with parent set and for the data switch(param$type, "bge" = { coreParams <- list() lp <- length(parentNodes) # number of parents if (lp > 0) {# otherwise no regression coefficients df <- param$awpN - param$n + lp + 1 R11 <- param$TN[parentNodes, parentNodes] R12 <- param$TN[parentNodes, j] R11inv <- solve(R11) # could avoid inversions, but here for simplicity mb <- R11inv %*% R12 # mean part divisor <- param$TN[j, j] - R12 %*% mb coreParams$mus <- as.vector(mb) coreParams$sigmas <- as.numeric(divisor/df) * R11inv coreParams$dfs <- df } else { coreParams$mus <- NA coreParams$sigmas <- NA coreParams$dfs <- NA } return(coreParams) }, "bde" = { lp <- length(parentNodes) # number of parents noParams <- 2^lp # number of binary states of the parents chi <- param$chi alphas <- rep(NA, noParams) betas <- rep(NA, noParams) switch(as.character(lp), "0"={ # no parents N1 <- sum(param$d1[, j]) N0 <- sum(param$d0[, j]) NT <- N0 + N1 alphas <- (N1 + chi/(2*noParams)) betas <- (N0 + chi/(2*noParams)) }, "1"={ # one parent summys <- param$data[, parentNodes] for (i in 1:noParams-1) { totest <- which(summys==i) N1 <- sum(param$d1[totest, j]) N0 <- sum(param$d0[totest, j]) NT <- N0 + N1 alphas[i+1] <- (N1 + chi/(2*noParams)) betas[i+1] <- (N0 + chi/(2*noParams)) } }, { # more parents summys <- colSums(2^(c(0:(lp-1)))*t(param$data[, parentNodes])) N1s <- collectC(summys, param$d1[, j], noParams) N0s <- collectC(summys, param$d0[, j], noParams) NTs <- N1s + N0s alphas <- (N1s + chi/(2*noParams)) betas <- (N0s + chi/(2*noParams)) } ) coreParams <- list() coreParams$alphas <- alphas coreParams$betas <- betas coreParams$pmeans <- alphas/(alphas + betas) return(coreParams) } ) } SampleParameters <- function(DAGparams, type = "bde") { # this function resamples the probability parameters from the posterior # beta distributions or from the posterior edge coefficient distribution # for an unrolled DBN they are sampled at each slice # rather than sampled once and copied over the slices if (type == "bde") { sampledps <- DAGparams$pmeans } else { # bge version sampledps <- DAGparams$mus } n <- length(sampledps) for(jj in 1:n){ if (type == "bde") { as <- DAGparams$alphas[[jj]] bs <- DAGparams$betas[[jj]] ps <- rep(0,length(as)) for(ii in 1:length(as)){ ps[ii] <- stats::rbeta(1, as[ii], bs[ii]) } } else { # bge version if (!is.na(DAGparams$dfs[[jj]])) { ps <- mvtnorm::rmvt(1, sigma = as.matrix(DAGparams$sigmas[[jj]]), df = DAGparams$dfs[[jj]], delta = DAGparams$mus[[jj]]) } else { ps <- NA } } sampledps[[jj]] <- ps } return(sampledps) } BinaryScoreAgainstDAG <- function(DAGparams, dataToScore) { # score a set of binary vectors against a DAG with parameters n <- nrow(DAGparams$DAG) # number of nodes logscoresagainstDAG<-matrix(NA,nrow(dataToScore),n) for (j in 1:n) { parentNodes <- which(DAGparams$DAG[, j]==1) logscoresagainstDAG[, j] <- BinaryScoreAgainstDAGcore(j, parentNodes, DAGparams, dataToScore) } return(logscoresagainstDAG) } BinaryScoreAgainstDAGcore <- function(j, parentNodes, DAGparams, dataToScore) { # score of a single node of binary vectors against a DAG sampleNodeScores <- rep(NA, nrow(dataToScore)) # store the log scores lp <- length(parentNodes) # number of parents noParams <- 2^lp # number of binary states of the parents switch(as.character(lp), "0"={ # no parents theta <- DAGparams$pmeans[[j]] # the probability of each state sampleNodeScores[which(dataToScore[, j]==1)] <- log(theta) # log scores of 1s sampleNodeScores[which(dataToScore[, j]==0)] <- log(1-theta) # log scores of 0s }, "1"={ # one parent summysfull<-dataToScore[,parentNodes] for (i in 1:noParams-1) { theta <- DAGparams$pmeans[[j]][i+1] # the probability of each state toScore <- which(summysfull==i) sampleNodeScores[toScore[which(dataToScore[toScore,j]==1)]] <- log(theta) # log scores of 1s sampleNodeScores[toScore[which(dataToScore[toScore,j]==0)]] <- log(1-theta) # log scores of 0s } }, { # more parents summysfull <- colSums(2^(c(0:(lp-1)))*t(dataToScore[, parentNodes])) # find the entries where the child is 1 toScore<-which(dataToScore[, j]==1) #Ns <- collectC(summysfull[toScore], rep(1, length(toScore)), noParams) # works like the table command Ns <- tabulate(summysfull[toScore]+1,noParams) # can use tabulate instead of collectC, but we need to add one tempScoreVec <- rep(log(DAGparams$pmeans[[j]]), Ns) # make relevant number of copies of each log score sampleNodeScores[toScore] <- tempScoreVec[rank(summysfull[toScore], ties.method="first")] # use the rank function to map scores to entries # find the entries where the child is 0 toScore<-which(dataToScore[,j]==0) Ns<-tabulate(summysfull[toScore]+1,noParams) # again we need to add one tempScoreVec<-rep(log(1-DAGparams$pmeans[[j]]),Ns) # make relevant number of copies of each log score sampleNodeScores[toScore]<-tempScoreVec[rank(summysfull[toScore],ties.method="first")] # use the rank function to map scores to entries } ) return(sampleNodeScores) }
/scratch/gouwar.j/cran-all/cranData/Bestie/R/posteriorparameters.R
#' The vector of letter frequencies in English. #' #' The vector with frequencies of 26 English letters. #' It is sorted by the frequency of usage. #' May be used to refine the transliteraton. #' #' @docType data #' @keywords datasets #' @name EnglishLetterFrequency #' @usage data(EnglishLetterFrequency) #' @format a named vector with 26 elements. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/EnglishLetterFrequency.R
#' The data from the study of Polish upper-secondary schools students. #' #' It was conducted firstly in the same time as PISA 2009 study, with use of the #' same cognitive tests and questionnaires as in PISA 2009, but on a different #' group of students: first grade students of upper-secondary schools (in Poland #' most of the students in a regular PISA sample attends lower-secondary #' schools). The students who participated in the first wave of the study were #' followed in the 2nd grade of upper-secondary school within the research #' program \emph{Our further study and work} (\emph{Nasza Dalsza Nauka i Praca}). #' Both studies were conducted by the Institute of Philosophy and Sociology #' Polish Academy of Sciences. #' #' \strong{The original data was changed a little, to better fit the purpose of #' the game.} #' #' @docType data #' @keywords datasets #' @name FSW #' @format data frame: 3796 obs. of 54 variables NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/FSW.R
#' The history of recently executed commands. #' #' The character vector of recently executed commands. #' Each element of the vector consists of command's name and command's arguments #' separated with a space. #' #' @docType data #' @keywords datasets #' @name bash_history #' @usage data(bash_history) #' @format a character vector with 19913 elements. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/bash_history.R
#' The data from the study of Polish upper-secondary schools students. #' #' It was conducted firstly in the same time as PISA 2009 study, with use of the #' same cognitive tests and questionnaires as in PISA 2009, but on a different #' group of students: first grade students of upper-secondary schools (in Poland #' most of the students in a regular PISA sample attends lower-secondary #' schools). The students who participated in the first wave of the study were #' followed in the 2nd grade of upper-secondary school within the research #' program \emph{Our further study and work} (\emph{Nasza Dalsza Nauka i Praca}). #' Both studies were conducted by the Institute of Philosophy and Sociology #' Polish Academy of Sciences. #' #' \strong{The original data was changed a little, to better fit the purpose of #' the game.} #' #' @docType data #' @keywords datasets #' @name dataFSW #' @format data frame: 3796 obs. of 54 variables NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/dataFSW.R
#' The database with employees of Faculty of Electronics and Information Technology of Warsaw University of Technology. #' #' The dataset describing names, surnames and faculty employees' logins. #' Note that it is an artificial dataset that imitates real database. #' The subsequent columns in this dataset describe: #' \itemize{ #' \item name. The name of an employee. #' \item surname. The surname of an employee. #' \item login. The login of an employee on the Proton server. #' } #' #' @docType data #' @keywords datasets #' @name employees #' @format a data frame with 541 rows and three columns. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/employees.R
#' The food data from Rijksinstituut voor Volksgezondheid en Milieu #' #' The Dutch institute Rijksinstituut voor Volksgezondheid en Milieu #' has compiled a database with ingredient information on more than #' 2,200 food products. The data in this package is only a processed #' fraction of the huge and very interesting NEVO database available #' at \url{https://www.rivm.nl/documenten/nevo-online-versie}. #' #' The preprocessed data can be used to reproduce the charts #' from the book Wykresy od kuchni (Chart runners) #' \url{https://github.com/BetaAndBit/Charts} #' #' Note that data frames \code{food}, \code{food_max}, \code{food_mini}, \code{food_all} #' have product names in English, while #' \code{food_pl}, \code{food_max_pl}, \code{food_mini_pl}, \code{food_all_pl} #' have product names in Polish. #' #' @examples #' library("ggplot2") #' head(food) #' #' \donttest{ #' library("ggthemes") #' ggplot(data = food, aes(x = Energy)) + #' geom_histogram(color = "white") + #' facet_wrap(~Group) + #' labs(title = "Energy value of the products", subtitle = "per 100 g", #' x = "Energy value", y = "Number") + #' theme_economist() #' #' ggplot(data = food_mini, aes(x = Energy)) + #' geom_histogram(color = "white") + #' facet_wrap(~Group) + #' labs(title = "Energy value of the products", subtitle = "per 100 g", #' x = "Energy value", y = "Number") + #' theme_economist() #' #' ggplot(data = food, aes(x = Protein, y = Fats, #' color = Group, size = Energy)) + #' geom_point() + #' scale_color_brewer(type = "qual", palette = "Dark2") + #' labs(title = "Share of protein and fats", subtitle = "per 100 g", #' y = "Fats [g]", x = "Protein [g]") + #' theme_gdocs() #' #' ggplot(data = food, aes(x = Group, y = Energy)) + #' geom_rug(sides = "l") + #' geom_violin(scale = "width", aes(fill = Group)) + #' geom_text(data = food_max, aes(label = Name), #' hjust = 0, vjust = 0, color = "blue4") + #' geom_boxplot(width = 0.2, coef = 100) + #' coord_flip() + #' labs(title = "Energy value distribution", subtitle = "per 100 g") + #' theme_gdocs() + theme(legend.position = "none") #' #' } #' #' @docType data #' @keywords datasets #' @name food #' @aliases food food_max food_mini food_all food_pl food_max_pl food_mini_pl food_all_pl #' @format data frame: 2207 obs. of 9 variables NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/food.R
#' @title The Frequon (Frequency Analysis) Game #' #' @description #' The \code{frequon} function is used for solving problems in the data-based game ,,The Frequon Game''. #' #' @param ... \code{frequon} function is called by different arguments, which vary depending #' on a problem that Bit is trying to solve. See \code{Details} in order to learn more about the list of possible arguments. #' #' @details Every time when some additional hints are needed one should add #' \code{hint=TRUE} argument to the \code{frequon} function. #' #' In this game you are in contact with a group of people that are going to stop terrorists. #' You can communicate with them through \code{frequon} function. #' #' In each call add \code{subject} parameter that will indicate which message you are answering. #' Add \code{content} parameter. It's value should match the request. #' #' ,,The Frequon Game'' is a free of charge, educational project of the SmarterPoland.pl Foundation. #' #' @author #' \itemize{ #' \item{Katarzyna Fak - the idea and the implementation,} #' \item{Przemyslaw Biecek - comments and the integration with the `BetaBit` package.} #' } #' #' @examples #' frequon() #' frequon(hint=TRUE) #' @rdname frequon #' @importFrom stats filter #' @importFrom stats na.omit #' @export frequon <- function(...) { args <- list(...) hintf <- function(level){ cat(txt[[paste0("hint",level)]]) } taskf <- function(level){ cat(txt[[paste0("task",level)]]) } txt <- as.list(dcode(.frequon.)) # plain start if( length(args) == 0 ){ cat(txt$intro) .pouch$level <- 0 return(invisible(TRUE)) } subjects <- c('0' = 're: interested?', '1' = 're: frequencies', '2' = 're: transcription', '3' = 're: key', '4' = 're: next text', '5' = 're: lengths in the text', '6' = 're: language in and message', '7' = 're: password') if ("subject" %in% names(args)) { if( tolower(args$subject) == subjects[1] ){ if( digest(args$content) == digest(BetaBit::roses) ){ .pouch$level <- 1 taskf(.pouch$level) } else if(.pouch$level == 0) cat(txt$errorIntro) } if (!(tolower(args$subject) %in% subjects)) { cat("Check the mail subject. Something is wrong there!") return(invisible(FALSE)) } # 1. give a vector of frequencies if( tolower(args$subject) == subjects[2] ){ if(is.null(names(args$content))){ cat(txt$errorNoNAMES) } else{ if( any(is.na(args$content)) || any(is.na(names(args$content))) ) cat(txt$errorNAs) else{ if((length(na.omit(names(args$content[letters]))) != length(letters)) || (!all(names(args$content[letters])==letters))) cat(txt$errorLetters) if( digest(as.numeric(args$content[letters])) == '8a37375e11927ab4564a62598ae764dd' ){ .pouch$level <- 2 # a double assignement to be able to call hint on the # proper level of the game. taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorFrequencies) } } } ## 2. substitute letters from EnglishLetterFrequency if( tolower(args$subject) == subjects[3] ){ In <- gsub("[^A-Z]","", toupper(args$content)) if( digest(In) == 'd5bc6f3d64e0199e08d029ee25d835a2' ){ .pouch$level <- 3 taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorDecipher) } ## 3. translate ALL of the letters (find a complete key) if( tolower(args$subject) == subjects[4] ){ if( length(args$content) != 2 || any(sort(names(args$content)) != c("new","old")) ) cat(txt$errorKey1) else{ if( any(nchar(args$content) != c(26,26)) ) cat(txt$errorKey2) else{ ## I assume that a player may not give the letters in the indicated order "abc...xyz" ## if only they are correct. ord <- order( strsplit(args$content['old'], "")[[1]] ) In <- strsplit(args$content['new'], "")[[1]][ord] if( digest(In[-c(2, 21, 9, 11)]) == 'e81104a8409e08ab2eaaa41fe6645056' ){ .pouch$level <- 4 taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorBadKey) } } } ## 4. find another cipher if( tolower(args$subject) == subjects[5] ){ if( strsplit(args$subject, " ")[[1]][3] %in% c('and', 'guns') ){ cat(txt$errorBadX) } else { if( strsplit(args$subject, " ")[[1]][3] == "guns" ){ cat(txt$errorBadX2) } } In <- gsub("[^A-Z]","", toupper(args$content)) if( digest(In) %in% c('e876c26d9d8bad8028cefb95eb54df21', '54bac78ea14b3ddd536318edbd629ad4', digest(gsub("[^A-Z]","", toupper(BetaBit::pcs)))) ){ .pouch$level <- 5 taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorBadAND) } ## 5. count the words' lengths if( tolower(args$subject) == subjects[6] ){ if( ! any(names(args) %in% "attachment") ){ cat(txt$errorAttachment) } else { if( !all(names(args$attachment) %in% names(BetaBit::wikiquotes)) ){ cat(txt$errorWikiNames) } if( digest(as.numeric(args$content[as.character(6:10)])) == "a99031c077f98cd93b351882400b7dbd"){ if (digest(as.numeric(args$attachment[["Czech"]][as.character(2:10)])) == "a976e39ca9e1eb15418cce4606575002") { .pouch$level <- 6 taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorLengths0) } else cat(txt$errorLengths) } } ## 6. give a language if( tolower(args$subject) == subjects[7] ){ if( digest(tolower(args$content)) == '4f808eee5fb8c3a585d76daf132e3990'){ .pouch$level <- 7 taskf(.pouch$level ) return(invisible(TRUE)) } else cat(txt$errorLanguage) } ## 7. give a password if( tolower(args$subject) == subjects[8] ){ if( digest(args$content) == 'ba61e613c2207f0a81e0697914f3dc96' ) { cat(txt$outro) return(invisible(TRUE)) } else { cat(txt$errorEnd) return(invisible(FALSE)) } } } if( length(args)>0 && !("content" %in% names(args)) && !('hint' %in% names(args))) { cat(txt$errorContent) return(invisible(FALSE)) } if( length(args)>0 && !("subject" %in% names(args)) && !('hint' %in% names(args))) { cat("Did you send this message without subject?\n") return(invisible(FALSE)) } if(!is.null(args$hint) && args$hint == TRUE) { hintf(.pouch$level) return(invisible(TRUE)) } return(invisible(FALSE)) }
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/frequon.R
#' The history of logs into the Proton server #' #' The dataset describing the history of logs: who, from where and when logged into the Proton server. #' The subsequent columns in this dataset describe: #' \itemize{ #' \item login. The login of the user which logs into the Proton server. #' \item host. The IP address of the computer, from which the log into the Proton server was detected. #' \item date. The date of log into the Proton server. Rows are sorted by this column. #' } #' #' @docType data #' @keywords datasets #' @name logs #' @usage data(logs) #' @format a data frame with 59366 rows and 3 columns. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/logs.R
#' The three messages to be decoded. #' #' The messages to be decoded in the game `frequon()`. #' How to access it? You have to figure out this by yourself. #' #' @docType data #' @keywords datasets #' @name guns #' @aliases and roses pistoale lyo pcs NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/messages.R
#' @title The Proton Game #' #' @description #' The \code{proton} function is used for solving problems in the data-based game ,,The Proton Game''. #' Solve four data-based puzzles in order to crack into Pietraszko's account! #' #' @param ... \code{proton} function is called by different arguments, which vary depending #' on a problem that Bit is trying to solve. See \code{Details} in order to learn more about the list of possible arguments. #' #' @details Every time when some additional hints are needed one should add #' \code{hint=TRUE} argument to the \code{proton} function. #' #' In order to get more information about a user on the Proton server #' one should pass \code{action = "login"}, \code{login="XYZ"} arguments #' to the \code{proton} function. #' #' In order to log into the Proton server one should pass \code{action = "login"}, #' \code{login="XYZ"}, \code{password="ABC"} arguments to the \code{proton} function. #' If the password matches login, then one will receive a message about successful login. #' #' In order to log into a server different from Proton one should pass #' \code{action = "server"}, \code{host="XYZ"} arguments to the \code{proton} function. #' #' ,,The Proton Game'' is a free of charge, educational project of the SmarterPoland.pl Foundation. #' #' @author #' Przemyslaw Biecek, \email{przemyslaw.biecek@@gmail.com}, SmarterPoland.pl Foundation. #' #' @examples #' proton() #' proton(hint=TRUE) #' @rdname proton #' @importFrom digest digest #' @export proton <- function(...) { args <- list(...) texts <- dcode(.proton.) # plain start if (length(args) == 0) { cat(texts["proton.init"]) return(invisible(NULL)) } if (length(args) == 1 && !is.null(args$hint) && args$hint) { cat(texts["proton.init"], "\n\nHINT:\n",texts["proton.init.w"], sep = "") return(invisible(NULL)) } # action = server if(length(args)>0 && !is.null(args$action) && args$action == "server") { if (!is.null(args$host) && digest(args$host) == "94265570be658d9fafa4861d7252afa9") { cat(texts["proton.host.instr"]) if (!is.null(args$hint) && args$hint) { cat("\n\nHINT:\n",texts["proton.host.instr.w"], sep = "") } return(invisible(NULL)) } else { cat("Bit spent some time to infiltrate this workstation. \nBut there is nothing interesting here.\nFind the workstation which Pietraszko is using most often to log into the Proton server and try again.") } } # action = login if(length(args)>0 && !is.null(args$action) && args$action == "login") { # only user is set to johnins if (!is.null(args$login) && args$login == texts["log.1"] && is.null(args$password)) { cat(texts["proton.login.init"]) if (!is.null(args$hint) && args$hint) { cat("\nHINT:\n",texts["proton.login.init.w"], sep = "") } return(invisible(NULL)) } if(is.null(args$login)) { cat("\nIf action='login' argument is set then one should also set login=. argument \n") return(invisible(NULL)) } # user is set to janie and password is provided if (!is.null(args$login) && args$login == texts["log.1"] && !is.null(args$password)) { if (digest(args$password) == "bbfb4a474b61b80225fd49d7c67e5a01") { cat(texts["proton.login.pass.instr"]) if (!is.null(args$hint) && args$hint) { cat("\nHINT:\n",texts["proton.login.pass.instr.w"], sep = "") } return(texts["proton.login.pass"]) } else { return(texts["proton.login.fail"]) } } # user is set to sl and password is provided if (!is.null(args$login) && args$login == texts["log.2"] && !is.null(args$password)) { if (digest(args$password) == "ce3494fef4545c1b6160e5430d7efe66") { cat(texts["proton.final"]) return(texts["proton.login.pass"]) } else { return(texts["proton.login.fail"]) } } # only user is set if (!is.null(args$login) && args$login != texts["log.1"]) { cat(texts["proton.login.weak"]) return(invisible(NULL)) } } }
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/proton.R