content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @method updateRegistry ExperimentRegistry #' @export updateRegistry.ExperimentRegistry = function(reg) { # update the BatchJobs part first updated = NextMethod() is.updated = !identical(updated, FALSE) if (is.updated) reg = updated version.reg = reg$packages$BatchExperiments$version version.pkg = packageVersion("BatchExperiments") if (version.reg == version.pkg) { return(if (is.updated) reg else FALSE) } if (version.reg > version.pkg) { warningf("The registry has been used with BatchExperiments version %s, installed is version %s. You should update BatchExperiments on this machine.", version.reg, version.pkg) return(if (is.updated) reg else FALSE) } # do updates if (version.reg < package_version("1.0.767")) { path = file.path(reg$file.dir, "problems") src = list.files(path, full.names = TRUE, pattern = "_static\\.RData$") ids = sub("_static\\.RData$", "", basename(src)) dest = vapply(ids, function(id) getProblemFilePaths(reg$file.dir, id)["static"], character(1L)) file.rename(src, dest) path = file.path(reg$file.dir, "problems") src = list.files(path, full.names = TRUE, pattern = "_dynamic\\.RData$") ids = sub("_dynamic\\.RData$", "", basename(src)) dest = vapply(ids, function(id) getProblemFilePaths(reg$file.dir, id)["dynamic"], character(1L)) file.rename(src, dest) path = file.path(reg$file.dir, "algorithms") src = list.files(path, full.names = TRUE) ids = sub("\\.RData$", "", basename(src)) dest = getAlgorithmFilePath(reg$file.dir, ids) file.rename(src, dest) } reg$packages$BatchExperiments$version = version.pkg reg }
/scratch/gouwar.j/cran-all/cranData/BatchExperiments/R/updateExperimentRegistry.R
#' @title The BatchExperiments package #' #' @description #' Extends the BatchJobs package to run statistical experiments on #' batch computing clusters. #' #' @section Additional information: #' #' \describe{ #' \item{Homepage:}{\url{https://github.com/tudo-r/BatchExperiments}} #' \item{Wiki:}{\url{https://github.com/tudo-r/BatchExperiments/wiki}} #' } #' #' @docType package #' @name BatchExperiments #' @import checkmate #' @import BBmisc #' @import DBI #' @import RSQLite #' @import BatchJobs #' @import data.table #' @importFrom stats setNames na.omit #' @importFrom utils head capture.output packageVersion NULL .onLoad <- function(libname, pkgname) { backports::import(pkgname) } addIntModulo = getFromNamespace("addIntModulo", "BatchJobs") buffer = getFromNamespace("buffer", "BatchJobs") checkDir = getFromNamespace("checkDir", "BatchJobs") checkPart = getFromNamespace("checkPart", "BatchJobs") createShardedDirs = getFromNamespace("createShardedDirs", "BatchJobs") dbConnectToJobsDB = getFromNamespace("dbConnectToJobsDB", "BatchJobs") dbCreateJobStatusTable = getFromNamespace("dbCreateJobStatusTable", "BatchJobs") dbConnectToJobsDB = getFromNamespace("dbConnectToJobsDB", "BatchJobs") dbCreateJobStatusTable = getFromNamespace("dbCreateJobStatusTable", "BatchJobs") getJobInfoInternal = getFromNamespace("getJobInfoInternal", "BatchJobs") getRandomSeed = getFromNamespace("getRandomSeed", "BatchJobs") getResult = getFromNamespace("getResult", "BatchJobs") isRegistryDir = getFromNamespace("isRegistryDir", "BatchJobs") makeRegistryInternal = getFromNamespace("makeRegistryInternal", "BatchJobs") saveRegistry = getFromNamespace("saveRegistry", "BatchJobs") seeder = getFromNamespace("seeder", "BatchJobs")
/scratch/gouwar.j/cran-all/cranData/BatchExperiments/R/zzz.R
#' Function to download financial data #' #' This function downloads financial data from Yahoo Finance using \code{\link[quantmod]{getSymbols}}. #' Based on a set of tickers and a time period, the function will download the data for each ticker and return a report of the process, along with the actual data in the long dataframe format. #' The main advantage of the function is that it automatically recognizes the source of the dataset from the ticker and structures the resulting data from different sources in the long format. #' A caching system is also available, making it very fast. #' #' @section Warning: #' #' Do notice that since 2019, adjusted prices are no longer available from google finance. #' When using this source, the function will output NA values for this column. #' #' Also, be aware that when using cache system in a local folder (and not the default tempdir()), the aggregate prices series might not match if #' a split or dividends event happens in between cache files. #' #' @param tickers A vector of tickers. If not sure whether the ticker is available, check the websites of google and yahoo finance. The source for downloading #' the data can either be Google or Yahoo. The function automatically selects the source webpage based on the input ticker. #' @param first.date The first date to download data (date or char as YYYY-MM-DD) #' @param last.date The last date to download data (date or char as YYYY-MM-DD) #' @param bench.ticker The ticker of the benchmark asset used to compare dates. My suggestion is to use the main stock index of the market from where the data is coming from (default = ^GSPC (SP500, US market)) #' @param type.return Type of price return to calculate: 'arit' (default) - aritmetic, 'log' - log returns. #' @param freq.data Frequency of financial data ('daily', 'weekly', 'monthly', 'yearly') #' @param how.to.aggregate Defines whether to aggregate the data using the first observations of the aggregating period or last ('first', 'last'). #' For example, if freq.data = 'yearly' and how.to.aggregate = 'last', the last available day of the year will be used for all #' aggregated values such as price.adjusted. #' @param thresh.bad.data A percentage threshold for defining bad data. The dates of the benchmark ticker are compared to each asset. If the percentage of non-missing dates #' with respect to the benchmark ticker is lower than thresh.bad.data, the function will ignore the asset (default = 0.75) #' @param do.complete.data Return a complete/balanced dataset? If TRUE, all missing pairs of ticker-date will be replaced by NA or closest price (see input do.fill.missing.prices). Default = FALSE. #' @param do.fill.missing.prices Finds all missing prices and replaces them by their closest price with preference for the previous price. This ensures a balanced dataset for all assets, without any NA. Default = TRUE. #' @param do.cache Use cache system? (default = TRUE) #' @param cache.folder Where to save cache files? (default = file.path(tempdir(), 'BGS_Cache') ) #' @param do.parallel Flag for using parallel or not (default = FALSE). Before using parallel, make sure you call function future::plan() first. #' @param be.quiet Logical for printing statements (default = FALSE) #' @return A list with the following items: \describe{ #' \item{df.control }{A dataframe containing the results of the download process for each asset} #' \item{df.tickers}{A dataframe with the financial data for all valid tickers} } #' @export #' @import dplyr #' #' @seealso \link[quantmod]{getSymbols} #' #' @examples #' tickers <- c('FB','MMM') #' #' first.date <- Sys.Date()-30 #' last.date <- Sys.Date() #' #' l.out <- BatchGetSymbols(tickers = tickers, #' first.date = first.date, #' last.date = last.date, do.cache=FALSE) #' #' print(l.out$df.control) #' print(l.out$df.tickers) BatchGetSymbols <- function(tickers, first.date = Sys.Date()-30, last.date = Sys.Date(), thresh.bad.data = 0.75, bench.ticker = '^GSPC', type.return = 'arit', freq.data = 'daily', how.to.aggregate = 'last', do.complete.data = FALSE, do.fill.missing.prices = TRUE, do.cache = TRUE, cache.folder = file.path(tempdir(), 'BGS_Cache'), do.parallel = FALSE, be.quiet = FALSE) { # 20220501 DEPRECATION my_message <- stringr::str_glue( "2022-05-01: Package BatchGetSymbols will soon be replaced by yfR. \n", "More details about the change is available at github <<www.github.com/msperlin/yfR>", "\nYou can install yfR by executing:\n\n", "remotes::install_github('msperlin/yfR')" ) lifecycle::deprecate_soft(when = "2.6.4", "BatchGetSymbols()", "yfR::yf_get()", details = my_message) # check for internet test.internet <- curl::has_internet() if (!test.internet) { stop('No internet connection found...') } # check cache folder if ( (do.cache)&(!dir.exists(cache.folder))) dir.create(cache.folder) # check options possible.values <- c('arit', 'log') if (!any(type.return %in% possible.values)) { stop(paste0('Input type.ret should be one of:\n\n', paste0(possible.values, collapse = '\n'))) } possible.values <- c('first', 'last') if (!any(how.to.aggregate %in% possible.values)) { stop(paste0('Input how.to.aggregate should be one of:\n\n', paste0(possible.values, collapse = '\n'))) } # check for NA if (any(is.na(tickers))) { my.msg <- paste0('Found NA value in ticker vector.', 'You need to remove it before running BatchGetSymbols.') stop(my.msg) } possible.values <- c('daily', 'weekly', 'monthly', 'yearly') if (!any(freq.data %in% possible.values)) { stop(paste0('Input freq.data should be one of:\n\n', paste0(possible.values, collapse = '\n'))) } # check date class first.date <- as.Date(first.date) last.date <- as.Date(last.date) if (class(first.date) != 'Date') { stop('ERROR: Input first.date should be of class Date') } if (class(last.date) != 'Date') { stop('ERROR: Input first.date should be of class Date') } if (last.date<=first.date){ stop('The last.date is lower (less recent) or equal to first.date. Check your dates!') } # check tickers if (!is.null(tickers)){ tickers <- as.character(tickers) if (class(tickers)!='character'){ stop('The input tickers should be a character object.') } } # check threshold if ( (thresh.bad.data<0)|(thresh.bad.data>1)){ stop('Input thresh.bad.data should be a proportion between 0 and 1') } # build tickers.src (google tickers have : in their name) tickers.src <- ifelse(stringr::str_detect(tickers,':'),'google','yahoo') if (any(tickers.src == 'google')) { my.msg <- 'Google is no longer providing price data. You should be using tickers from YFinance' stop(my.msg) } # fix for dates with google finance data # details: http://stackoverflow.com/questions/20472376/quantmod-empty-dates-in-getsymbols-from-google if(any(tickers.src=='google')){ suppressWarnings({ invisible(Sys.setlocale("LC_MESSAGES", "C")) invisible(Sys.setlocale("LC_TIME", "C")) }) } # check if using do_parallel = TRUE # 20220501 Yahoo finance started setting limits to api calls, which # invalidates the use of any parallel computation if (do.parallel) { my_message <- stringr::str_glue( "Since 2022-04-25, Yahoo Finance started to set limits to api calls, ", "resulting in 401 errors. When using parallel computations for fetching ", "data, the limit is reached easily. Said that, the parallel option is now", " disabled by default. Please set do_parallel = FALSE to use this function.", "\n\n", "Returning empty dataframe.") cli::cli_alert_danger(my_message) return(data.frame()) } # disable dplyr group message options(dplyr.summarise.inform = FALSE) # first screen msgs if (!be.quiet) { message('\nRunning BatchGetSymbols for:', appendLF = FALSE ) message('\n tickers =', paste0(tickers, collapse = ', '), appendLF = FALSE ) message('\n Downloading data for benchmark ticker', appendLF = FALSE ) } # detect if bench.src is google or yahoo (google tickers have : in their name) bench.src <- ifelse(stringr::str_detect(bench.ticker,':'),'google','yahoo') df.bench <- myGetSymbols(ticker = bench.ticker, i.ticker = 1, length.tickers = 1, src = bench.src, first.date = first.date, last.date = last.date, do.cache = do.cache, cache.folder = cache.folder, be.quiet = be.quiet) # run fetching function for all tickers l.args <- list(ticker = tickers, i.ticker = seq_along(tickers), length.tickers = length(tickers), src = tickers.src, first.date = first.date, last.date = last.date, do.cache = do.cache, cache.folder = cache.folder, df.bench = rep(list(df.bench), length(tickers)), thresh.bad.data = thresh.bad.data, be.quiet = be.quiet) if (!do.parallel) { my.l <- purrr::pmap(.l = l.args, .f = myGetSymbols) } else { # find number of used cores formals.parallel <- formals(future::plan()) used.workers <- formals.parallel$workers available.cores <- future::availableCores() if (!be.quiet) { message(paste0('\nRunning parallel BatchGetSymbols with ', used.workers, ' cores (', available.cores, ' available)'), appendLF = FALSE ) message('\n\n', appendLF = FALSE ) } # test if plan() was called msg <- utils::capture.output(future::plan()) flag <- stringr::str_detect(msg[1], 'sequential') if (flag) { stop(paste0('When using do.parallel = TRUE, you need to call future::plan() to configure your parallel settings. \n', 'A suggestion, write the following lines:\n\n', 'future::plan(future::multisession, workers = floor(parallel::detectCores()/2))', '\n\n', 'The last line should be placed just before calling BatchGetSymbols. ', 'Notice it will use half of your available cores so that your OS has some room to breathe.')) } my.l <- furrr::future_pmap(.l = l.args, .f = myGetSymbols, .progress = TRUE) } df.tickers <- dplyr::bind_rows(purrr::map(my.l, 1)) df.control <- dplyr::bind_rows(purrr::map(my.l, 2)) # remove tickers with bad data tickers.to.keep <- df.control$ticker[df.control$threshold.decision=='KEEP'] idx <- df.tickers$ticker %in% tickers.to.keep df.tickers <- df.tickers[idx, ] # do data manipulations if (do.complete.data) { ticker <- ref.date <- NULL # for cran check: "no visible binding for global..." df.tickers <- tidyr::complete(df.tickers, ticker, ref.date) l.out <- lapply(split(df.tickers, f = df.tickers$ticker), df.fill.na) df.tickers <- dplyr::bind_rows(l.out) } # change frequency of data if (freq.data != 'daily') { str.freq <- switch(freq.data, 'weekly' = '1 week', 'monthly' = '1 month', 'yearly' = '1 year') # find the first monday (see issue #19) # https://github.com/msperlin/BatchGetSymbols/issues/19 temp_dates <- seq(as.Date(paste0(lubridate::year(min(df.tickers$ref.date)), '-01-01')), as.Date(paste0(lubridate::year(max(df.tickers$ref.date))+1, '-12-31')), by = '1 day') temp_weekdays <- lubridate::wday(temp_dates, week_start = 1) first_idx <- min(which(temp_weekdays == 1)) first_monday <- temp_dates[first_idx] if (freq.data == 'weekly') { # make sure it starts on a monday week.vec <- seq(first_monday, as.Date(paste0(lubridate::year(max(df.tickers$ref.date))+1, '-12-31')), by = str.freq) } else { # every other case week.vec <- seq(as.Date(paste0(lubridate::year(min(df.tickers$ref.date)), '-01-01')), as.Date(paste0(lubridate::year(max(df.tickers$ref.date))+1, '-12-31')), by = str.freq) } df.tickers$time.groups <- cut(x = df.tickers$ref.date, breaks = week.vec, right = FALSE) # set NULL vars for CRAN check: "no visible binding..." time.groups <- volume <- price.open <- price.close <- price.adjusted <- NULL price.high <- price.low <- NULL if (how.to.aggregate == 'first') { df.tickers <- df.tickers %>% group_by(time.groups, ticker) %>% summarise(ref.date = min(ref.date), volume = sum(volume, na.rm = TRUE), price.open = first(price.open), price.high = max(price.high), price.low = min(price.low), price.close = first(price.close), price.adjusted = first(price.adjusted)) %>% ungroup() %>% #select(-time.groups) %>% arrange(ticker, ref.date) } else if (how.to.aggregate == 'last') { df.tickers <- df.tickers %>% group_by(time.groups, ticker) %>% summarise(ref.date = min(ref.date), volume = sum(volume, na.rm = TRUE), price.open = first(price.open), price.high = max(price.high), price.low = min(price.low), price.close = last(price.close), price.adjusted = last(price.adjusted) ) %>% ungroup() %>% #select(-time.groups) %>% arrange(ticker, ref.date) } df.tickers$time.groups <- NULL } # calculate returns df.tickers$ret.adjusted.prices <- calc.ret(df.tickers$price.adjusted, df.tickers$ticker, type.return) df.tickers$ret.closing.prices <- calc.ret(df.tickers$price.close, df.tickers$ticker, type.return) # fix for issue with repeated rows (see git issue 16) # https://github.com/msperlin/BatchGetSymbols/issues/16 df.tickers = unique(df.tickers) # remove rownames from output (see git issue #18) # https://github.com/msperlin/BatchGetSymbols/issues/18 rownames(df.tickers) <- NULL my.l <- list(df.control = df.control, df.tickers = df.tickers) # check if cach folder is tempdir() flag <- stringr::str_detect(cache.folder, pattern = stringr::fixed(tempdir())) if (!flag) { warning(stringr::str_glue('\nIt seems you are using a non-default cache folder at {cache.folder}. ', 'Be aware that if any stock event -- split or dividend -- happens ', 'in between cache files, the resulting aggregate cache data will not correspond to reality as ', 'some part of the price data will not be adjusted to the event. ', 'For safety and reproducibility, my suggestion is to use cache system only ', 'for the current session with tempdir(), which is the default option.') ) } # enable dplyr group message options(dplyr.summarise.inform = TRUE) return(my.l) }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/BatchGetSymbols.R
#' Function to download the current components of the FTSE100 index from Wikipedia #' #' This function scrapes the stocks that constitute the FTSE100 index from the wikipedia page at <https://en.wikipedia.org/wiki/FTSE_100_Index#List_of_FTSE_100_companies>. #' #' @inheritParams BatchGetSymbols #' #' @return A dataframe that includes a column with the list of tickers of companies that belong to the FTSE100 index #' @export #' @import rvest #' @examples #' \dontrun{ #' df.FTSE100 <- GetFTSE100Stocks() #' print(df.FTSE100$tickers) #' } GetFTSE100Stocks <- function(do.cache = TRUE, cache.folder = file.path(tempdir(), 'BGS_Cache')){ cache.file <- file.path(cache.folder, paste0('FTSE100_Composition_', Sys.Date(), '.rds') ) if (do.cache) { # check if file exists flag <- file.exists(cache.file) if (flag) { df.FTSE100Stocks <- readRDS(cache.file) return(df.FTSE100Stocks) } } my.url <- 'https://en.wikipedia.org/wiki/FTSE_100_Index' read_html <- 0 # fix for global variable nagging from BUILD my.xpath <- '//*[@id="mw-content-text"]/div/table[2]' # old xpath my.xpath <- '//*[@id="constituents"]' df.FTSE100Stocks <- my.url %>% read_html() %>% html_nodes(xpath = my.xpath) %>% html_table() df.FTSE100Stocks <- df.FTSE100Stocks[[1]] colnames(df.FTSE100Stocks) <- c('company','tickers','ICB.sector') if (do.cache) { if (!dir.exists(cache.folder)) dir.create(cache.folder) saveRDS(df.FTSE100Stocks, cache.file) } return(df.FTSE100Stocks) }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/GetFTSE100Stocks.R
#' Function to download the current components of the SP500 index from Wikipedia #' #' This function scrapes the stocks that constitute the SP500 index from the wikipedia page at https://en.wikipedia.org/wiki/List_of_S%26P_500_companies. #' #' @inheritParams BatchGetSymbols #' #' @return A dataframe that includes a column with the list of tickers of companies that belong to the SP500 index #' @export #' @import rvest #' @examples #' \dontrun{ #' df.SP500 <- GetSP500Stocks() #' print(df.SP500$tickers) #' } GetSP500Stocks <- function(do.cache = TRUE, cache.folder = file.path(tempdir(), 'BGS_Cache')){ cache.file <- file.path(cache.folder, paste0('SP500_Composition_', Sys.Date(), '.rds') ) if (do.cache) { # check if file exists flag <- file.exists(cache.file) if (flag) { df.SP500Stocks <- readRDS(cache.file) return(df.SP500Stocks) } } my.url <- 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies' read_html <- 0 # fix for global variable nagging from BUILD my.xpath <- '//*[@id="constituents"]' df.SP500Stocks <- my.url %>% read_html() %>% html_nodes(xpath = my.xpath) %>% html_table(fill = TRUE) df.SP500Stocks <- df.SP500Stocks[[1]] colnames(df.SP500Stocks) <- c('Tickers','Company','SEC.filings','GICS.Sector', 'GICS.Sub.Industry','HQ.Location','Date.First.Added','CIK', 'Founded') if (do.cache) { if (!dir.exists(cache.folder)) dir.create(cache.folder) saveRDS(df.SP500Stocks, cache.file) } return(df.SP500Stocks) }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/GetSP500Stocks.R
#' Function to download the current components of the Ibovespa index from Bovespa website #' #' This function scrapes the stocks that constitute the Ibovespa index from the wikipedia page at http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraTeorica.aspx?Indice=IBOV&idioma=pt-br. #' #' @param max.tries Maximum number of attempts to download the data #' @inheritParams BatchGetSymbols #' #' @return A dataframe that includes a column with the list of tickers of companies that belong to the Ibovespa index #' @export #' @examples #' \dontrun{ #' df.ibov <- GetIbovStocks() #' print(df.ibov$tickers) #' } GetIbovStocks <- function(do.cache = TRUE, cache.folder = file.path(tempdir(), 'BGS_Cache'), max.tries = 10){ # warning note: # https://github.com/msperlin/BatchGetSymbols/issues/25 warning(paste0("IBOV data is no longer available from the exchange site. ', ' if you know a different and RELIABLE source of Ibov composition, let me know at <https://github.com/msperlin/BatchGetSymbols/issues/25>.", "Also, you can manually download a csv file with the current composition from B3 in <https://www.b3.com.br/pt_br/market-data-e-indices/indices/indices-amplos/indice-ibovespa-ibovespa-composicao-da-carteira.htm>")) cache.file <- file.path(cache.folder, paste0('Ibov_Composition_', Sys.Date(), '.rds') ) # get list of ibovespa's tickers from wbsite if (do.cache) { # check if file exists flag <- file.exists(cache.file) if (flag) { df.ibov.comp <- readRDS(cache.file) return(df.ibov.comp) } } for (i.try in seq(max.tries)) { myUrl <- 'http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraTeorica.aspx?Indice=IBOV&idioma=pt-br' #df.ibov.comp <- XML::readHTMLTable(myUrl)[[1]] df.ibov.comp <- as.data.frame(XML::readHTMLTable(myUrl)) Sys.sleep(0.5) if (nrow(df.ibov.comp) > 0) break() } names(df.ibov.comp) <- c('tickers', 'ticker.desc', 'type.stock', 'quantity', 'percentage.participation') df.ibov.comp$quantity <- as.numeric(stringr::str_replace_all(df.ibov.comp$quantity, stringr::fixed('.'), '')) df.ibov.comp$percentage.participation <- as.numeric(stringr::str_replace_all(df.ibov.comp$percentage.participation, stringr::fixed(','), '.')) df.ibov.comp$ref.date <- Sys.Date() df.ibov.comp$tickers <- as.character(df.ibov.comp$tickers) if (do.cache) { if (!dir.exists(cache.folder)) dir.create(cache.folder) saveRDS(df.ibov.comp, cache.file) } return(df.ibov.comp) }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/Get_Ibov_Stocks.R
#' Fix name of ticker #' #' Removes bad symbols from names of tickers. This is useful for naming files with cache system. #' #' @param ticker.in A bad ticker name #' @return A good ticker name #' @export #' @examples #' bad.ticker <- '^GSPC' #' good.ticker <- fix.ticker.name(bad.ticker) #' good.ticker fix.ticker.name <- function(ticker.in){ ticker.in <- stringr::str_replace_all(ticker.in, stringr::fixed('.'), '') ticker.in <- stringr::str_replace_all(ticker.in, stringr::fixed('^'), '') return(ticker.in) } #' Get clean data from yahoo/google #' #' @param src Source of data (yahoo or google) #' @inheritParams BatchGetSymbols #' #' @return A dataframe with the cleaned data #' @export #' #' @examples #' df.sp500 <- get.clean.data('^GSPC', #' first.date = as.Date('2010-01-01'), #' last.date = as.Date('2010-02-01')) get.clean.data <- function(tickers, src = 'yahoo', first.date, last.date) { # dont push luck with yahoo servers # No problem in my testings, so far. You can safely leave it unrestricted #Sys.sleep(0.5) # set empty df for errors df.out <- data.frame() suppressMessages({ suppressWarnings({ try(df.out <- quantmod::getSymbols(Symbols = tickers, src = src, from = first.date, to = last.date, auto.assign = F), silent = T) }) }) if (nrow(df.out) == 0) return(df.out) df.out <- as.data.frame(df.out[!duplicated(zoo::index(df.out))]) # adjust df for difference of columns from yahoo and google if (src=='google'){ colnames(df.out) <- c('price.open','price.high','price.low','price.close','volume') df.out$price.adjusted <- NA } else { colnames(df.out) <- c('price.open','price.high','price.low','price.close','volume','price.adjusted') } # get a nice column for dates and tickers df.out$ref.date <- as.Date(rownames(df.out)) df.out$ticker <- tickers # remove rownames rownames(df.out) <- NULL # remove rows with NA idx <- !is.na(df.out$price.adjusted) df.out <- df.out[idx, ] if (nrow(df.out) ==0) return('Error in download') return(df.out) } #' Transforms a dataframe in the long format to a list of dataframes in the wide format #' #' @param df.tickers Dataframe in the long format #' #' @return A list with dataframes in the wide format #' @export #' #' @examples #' #' my.f <- system.file( 'extdata/ExampleData.rds', package = 'BatchGetSymbols' ) #' df.tickers <- readRDS(my.f) #' l.wide <- reshape.wide(df.tickers) #' l.wide reshape.wide <- function(df.tickers) { cols.to.keep <- c('ref.date', 'ticker') my.cols <- setdiff(names(df.tickers), cols.to.keep) fct.format.wide <- function(name.in, df.tickers) { temp.df <- df.tickers[, c('ref.date', 'ticker', name.in)] ticker <- NULL # fix for CHECK: "no visible binding..." temp.df.wide <- tidyr::spread(temp.df, ticker, name.in) return(temp.df.wide) } l.out <- lapply(my.cols, fct.format.wide, df.tickers = df.tickers) names(l.out) <- my.cols return(l.out) } #' Function to calculate returns from a price and ticker vector #' #' Created so that a return column is added to a dataframe with prices in the long (tidy) format. #' #' @param P Price vector #' @param tickers Ticker of symbols (usefull if working with long dataframe) #' @inheritParams BatchGetSymbols #' #' @return A vector of returns #' @export #' #' @examples #' P <- c(1,2,3) #' R <- calc.ret(P) calc.ret <- function(P, tickers = rep('ticker', length(P)), type.return = 'arit') { my.length <- length(P) ret <- switch(type.return, 'arit' = P/dplyr::lag(P) - 1, 'log' = log(P/dplyr::lag(P)) ) idx <- (tickers != dplyr::lag(tickers)) ret[idx] <- NA return(ret) } #' Replaces NA values in dataframe for closest price #' #' Helper function for BatchGetSymbols. Replaces NA values and returns fixed dataframe. #' #' @param df.in DAtaframe to be fixed #' #' @return A fixed dataframe. #' @export #' #' @examples #' #' df <- data.frame(price.adjusted = c(NA, 10, 11, NA, 12, 12.5, NA ), volume = c(1,10, 0, 2, 0, 1, 5)) #' #' df.fixed.na <- df.fill.na(df) #' df.fill.na = function(df.in) { # find NAs or volume == 0 idx.na <- which(is.na(df.in$price.adjusted) | df.in$volume == 0) if (length(idx.na) ==0) return(df.in) idx.not.na <- which(!is.na(df.in$price.adjusted)) cols.to.adjust <- c("price.open", "price.high", "price.low", "price.close", "price.adjusted") print(unique(df.in$ticker)) cols.to.adjust <- cols.to.adjust[cols.to.adjust %in% names(df.in)] # function for finding closest price fct.find.min.dist <- function(x, vec.comp) { if (x < min(vec.comp)) return(min(vec.comp)) my.dist <- x - vec.comp my.dist <- my.dist[my.dist > 0] idx <- which.min(my.dist)[1] return(vec.comp[idx]) } for (i.col in cols.to.adjust) { # adjust for NA by replacing values idx.to.use <- sapply(idx.na, fct.find.min.dist, vec.comp = idx.not.na) df.in[idx.na, i.col] <- unlist(df.in[idx.to.use, i.col]) } # adjust volume for all NAs df.in$volume[idx.na] <- 0 return(df.in) } .onAttach <- function(libname,pkgname) { do_color <- crayon::make_style("#FF4141") this_pkg <- 'BatchGetSymbols' if (interactive()) { msg <- paste0('\nWant to learn more about ', do_color(this_pkg), ' and other R packages for Finance and Economics?', '\nThe second edition (2020) of ', do_color('Analyzing Financial and Economic Data with R'), ' is available at\n', do_color('https://www.msperlin.com/afedR/'), "\n\n", "WARNING - Package BatchGetSymbols is **soft-deprecated** will soon be substituted ", "by yfR <https://github.com/msperlin/yfR>. You can still use BatchGetSymbols, ", "but be aware that it will be removed from CRAN once yfR reaches a stable version and ", "is submitted to CRAN. If you can, start using ", "yfR in your new projects.", '\n\n') } else { msg <- '' } packageStartupMessage(msg) }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/Utils.R
#' An improved version of function \code{\link[quantmod]{getSymbols}} from quantmod #' #' This is a helper function to \code{\link{BatchGetSymbols}} and it should normaly not be called directly. The purpose of this function is to download financial data based on a ticker and a time period. #' The main difference from \code{\link[quantmod]{getSymbols}} is that it imports the data as a dataframe with proper named columns and saves data locally with the caching system. #' #' @param ticker A single ticker to download data #' @param src The source of the data ('google' or'yahoo') #' @param i.ticker A index for the stock that is downloading (for cat() purposes) #' @param length.tickers total number of stocks being downloaded (also for cat() purposes) #' @param df.bench Data for bechmark ticker #' @inheritParams BatchGetSymbols #' #' @return A dataframe with the financial data #' #' @export #' @seealso \link[quantmod]{getSymbols} for the base function #' #' @examples #' ticker <- 'FB' #' #' first.date <- Sys.Date()-30 #' last.date <- Sys.Date() #' #' \dontrun{ #' df.ticker <- myGetSymbols(ticker, #' first.date = first.date, #' last.date = last.date) #' } myGetSymbols <- function(ticker, i.ticker, length.tickers, src = 'yahoo', first.date, last.date, do.cache = TRUE, cache.folder = file.path(tempdir(),'BGS_Cache'), df.bench = NULL, be.quiet = FALSE, thresh.bad.data) { if (!be.quiet) { message(paste0('\n', ticker, ' | ', src, ' (', i.ticker,'|', length.tickers,')'), appendLF = FALSE ) } # do cache if ( (do.cache)) { # check if data is in cache files my.cache.files <- list.files(cache.folder, full.names = TRUE) if (length(my.cache.files) > 0) { l.out <- stringr::str_split(tools::file_path_sans_ext(basename(my.cache.files)), '_') df.cache.files <- dplyr::tibble(f.name = my.cache.files, ticker = sapply(l.out, function(x) x[1]), src = sapply(l.out, function(x) x[2]), first.date = as.Date(sapply(l.out, function(x) x[3])), last.date = as.Date(sapply(l.out, function(x) x[4]))) } else { # empty df df.cache.files <- dplyr::tibble(f.name = '', ticker = '', src = '', first.date = first.date, last.date = last.date) } # check dates fixed.ticker <-fix.ticker.name(ticker) temp.cache <- dplyr::filter(df.cache.files, ticker == fixed.ticker, src == src) if (nrow(temp.cache) > 1) { stop(paste0('Found more than one file in cache for ', ticker, '\nYou must manually remove one of \n\n', paste0(temp.cache$f.name, collapse = '\n'))) } if (nrow(temp.cache) != 0) { df.cache <- data.frame() flag.dates <- TRUE if (!be.quiet) { message(' | Found cache file', appendLF = FALSE ) } df.cache <- readRDS(temp.cache$f.name) # check if data matches max.diff.dates <- 0 flag.dates <- ((first.date - temp.cache$first.date) < - max.diff.dates )| ((last.date - temp.cache$last.date) > max.diff.dates) df.out <- data.frame() if (flag.dates) { if (!be.quiet) { message(' | Need new data', appendLF = FALSE ) } flag.date.bef <- ((first.date - temp.cache$first.date) < - max.diff.dates ) df.out.bef <- data.frame() if (flag.date.bef) { df.out.bef <- get.clean.data(ticker, src, first.date, temp.cache$first.date) } flag.date.aft <- ((last.date - temp.cache$last.date) > max.diff.dates) df.out.aft <- data.frame() if (flag.date.aft) { df.out.aft <- get.clean.data(ticker, src, temp.cache$last.date, last.date) } df.out <- rbind(df.out.bef, df.out.aft) } # merge with cache df.out <- unique(rbind(df.cache, df.out)) # sort it if (nrow(df.out) > 0 ) { idx <- order(df.out$ticker, df.out$ref.date) df.out <- df.out[idx, ] } # remove old file file.remove(temp.cache$f.name) my.f.out <- paste0(fixed.ticker, '_', src, '_', min(c(temp.cache$first.date, first.date)), '_', max(c(temp.cache$last.date, last.date)), '.rds') saveRDS(df.out, file = file.path(cache.folder, my.f.out)) # filter for dates ref.date <- NULL df.out <- dplyr::filter(df.out, ref.date >= first.date, ref.date <= last.date) } else { if (!be.quiet) { message(' | Not Cached', appendLF = FALSE ) } my.f.out <- paste0(fixed.ticker, '_', src, '_', first.date, '_', last.date, '.rds') df.out <- get.clean.data(ticker, src, first.date, last.date) # only saves if there is data if (nrow(df.out) > 1) { if (!be.quiet) { message(' | Saving cache', appendLF = FALSE ) } saveRDS(df.out, file = file.path(cache.folder, my.f.out)) } } } else { df.out <- get.clean.data(ticker, src, first.date, last.date) } # control for ERROr in download if (nrow(df.out) == 0 ){ download.status = 'NOT OK' total.obs = 0 perc.benchmark.dates = 0 threshold.decision = 'OUT' df.out <- data.frame() if (!be.quiet) { message(' - Error in download..', appendLF = FALSE ) } } else { # control for returning data when importing bench ticker if (is.null(df.bench)) return(df.out) download.status = 'OK' total.obs = nrow(df.out) perc.benchmark.dates = sum(df.out$ref.date %in% df.bench$ref.date)/length(df.bench$ref.date) if (perc.benchmark.dates >= thresh.bad.data){ threshold.decision = 'KEEP' } else { threshold.decision = 'OUT' } morale.boost <- c(rep(c('OK!', 'Got it!','Nice!','Good stuff!', 'Looking good!', 'Good job!', 'Well done!', 'Feels good!', 'You got it!', 'Youre doing good!'), 10), 'Boa!', 'Mas bah tche, que coisa linda!', 'Mais contente que cusco de cozinheira!', 'Feliz que nem lambari de sanga!', 'Mais faceiro que guri de bombacha nova!') if (!be.quiet) { if (threshold.decision == 'KEEP') { message(paste0(' - ', 'Got ', scales::percent(perc.benchmark.dates), ' of valid prices | ', sample(morale.boost, 1)), appendLF = FALSE ) } else { message(paste0(' - ', 'Got ', scales::percent(perc.benchmark.dates), ' of valid prices | ', 'OUT: not enough data (thresh.bad.data = ', scales::percent(thresh.bad.data), ')'), appendLF = FALSE ) } } df.control <- tibble::tibble(ticker=ticker, src = src, download.status, total.obs, perc.benchmark.dates, threshold.decision) l.out <- list(df.tickers = df.out, df.control = df.control) return(l.out) } }
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/R/myGetSymbols.R
## ----example1----------------------------------------------------------------- if (!require(BatchGetSymbols)) install.packages('BatchGetSymbols') library(BatchGetSymbols) # set dates first.date <- Sys.Date() - 60 last.date <- Sys.Date() freq.data <- 'daily' # set tickers tickers <- c('FB','MMM','PETR4.SA','abcdef') l.out <- BatchGetSymbols(tickers = tickers, first.date = first.date, last.date = last.date, freq.data = freq.data, cache.folder = file.path(tempdir(), 'BGS_Cache') ) # cache in tempdir() ## ----example2----------------------------------------------------------------- print(l.out$df.control) ## ----plot.prices, fig.width=7, fig.height=2.5--------------------------------- library(ggplot2) p <- ggplot(l.out$df.tickers, aes(x = ref.date, y = price.close)) p <- p + geom_line() p <- p + facet_wrap(~ticker, scales = 'free_y') print(p) ## ----example3,eval=FALSE------------------------------------------------------ # library(BatchGetSymbols) # # first.date <- Sys.Date()-365 # last.date <- Sys.Date() # # df.SP500 <- GetSP500Stocks() # tickers <- df.SP500$Tickers # # l.out <- BatchGetSymbols(tickers = tickers, # first.date = first.date, # last.date = last.date) # # print(l.out$df.control) # print(l.out$df.tickers) #
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/inst/doc/BatchGetSymbols-vignette.R
--- title: "How to use BatchGetSymbols" author: "Marcelo Perlin" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use BatchGetSymbols} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Motivation One of the great things of working in finance is that financial datasets from capital markets are freely available from sources such as Yahoo Finance. This is an excelent feature for building up to date content for classes and conducting academic research. In the past I have used function GetSymbols from the CRAN package [quantmod](https://cran.r-project.org/package=quantmod) in order to download end of day trade data for several stocks in the financial market. The problem in using GetSymbols is that it does not aggregate or clean the financial data for several tickers. In the usage of GetSymbols, each stock will have its own `xts` object with different column names and this makes it harder to store data from several tickers in a single dataframe. Package BatchGetSymbols is my solution to this problem. Based on a list of tickers and a time period, BatchGetSymbols will download price data from yahoo finance and organize it so that you don't need to worry about cleaning it yourself. ## Main features: - Organizes data in a tabular format, returning prices and returns - A cache system was implemented in version 2.0, meaning that the data is saved locally and only missings portions of the data are downloaded, if needed. - All dates are compared to a benchmark ticker such as SP500. You can choose to ignore ticker with a high number of missing dates. - User can choose a complete/balanced dataset output. The package uses a benchmark ticker for date comparison (e.g. SP500 - ^GSPC). Days with missing prices and traded volume equal to zero are found and prices are either set to NA or replaced by closest available value. - Allows the choice for the wide format, with tickers as columns - Users can choose the frequency of the resulting dataset (daily, weekly, monthly, yearly) ## A simple example As a simple exercise, let's download data for three stocks, facebook (FB), 3M (MMM), PETR4.SA (PETROBRAS) and abcdef, a ticker I just made up. We will use the last 60 days as the time period. This example will show the simple interface of the package and how it handles invalid tickers. ```{r example1} if (!require(BatchGetSymbols)) install.packages('BatchGetSymbols') library(BatchGetSymbols) # set dates first.date <- Sys.Date() - 60 last.date <- Sys.Date() freq.data <- 'daily' # set tickers tickers <- c('FB','MMM','PETR4.SA','abcdef') l.out <- BatchGetSymbols(tickers = tickers, first.date = first.date, last.date = last.date, freq.data = freq.data, cache.folder = file.path(tempdir(), 'BGS_Cache') ) # cache in tempdir() ``` After downloading the data, we can check the success of the process for each ticker. Notice that the last ticker does not exist in yahoo finance and therefore results in an error. All information regarding the download process is provided in the dataframe df.control: ```{r example2} print(l.out$df.control) ``` Moreover, we can plot the daily closing prices using ggplot2: ```{r plot.prices, fig.width=7, fig.height=2.5} library(ggplot2) p <- ggplot(l.out$df.tickers, aes(x = ref.date, y = price.close)) p <- p + geom_line() p <- p + facet_wrap(~ticker, scales = 'free_y') print(p) ``` ## Downloading data for all tickers in the SP500 index The package was designed for large scale download of financial data. An example is downloading all stocks in the current composition of the SP500 stock index. The package also includes a function that downloads the current composition of the SP500 index from the internet. By using this function along with BatchGetSymbols, we can easily import end-of-day data for all assets in the index. In the following code we download data for the SP500 stocks for the last year. The code is not executed in this vignette given its time duration, but you can just copy and paste on its own R script in order to check the results. In my computer it takes around 5 minutes to download the whole dataset. ```{r example3,eval=FALSE} library(BatchGetSymbols) first.date <- Sys.Date()-365 last.date <- Sys.Date() df.SP500 <- GetSP500Stocks() tickers <- df.SP500$Tickers l.out <- BatchGetSymbols(tickers = tickers, first.date = first.date, last.date = last.date) print(l.out$df.control) print(l.out$df.tickers) ```
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/inst/doc/BatchGetSymbols-vignette.Rmd
--- title: "How to use BatchGetSymbols" author: "Marcelo Perlin" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use BatchGetSymbols} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Motivation One of the great things of working in finance is that financial datasets from capital markets are freely available from sources such as Yahoo Finance. This is an excelent feature for building up to date content for classes and conducting academic research. In the past I have used function GetSymbols from the CRAN package [quantmod](https://cran.r-project.org/package=quantmod) in order to download end of day trade data for several stocks in the financial market. The problem in using GetSymbols is that it does not aggregate or clean the financial data for several tickers. In the usage of GetSymbols, each stock will have its own `xts` object with different column names and this makes it harder to store data from several tickers in a single dataframe. Package BatchGetSymbols is my solution to this problem. Based on a list of tickers and a time period, BatchGetSymbols will download price data from yahoo finance and organize it so that you don't need to worry about cleaning it yourself. ## Main features: - Organizes data in a tabular format, returning prices and returns - A cache system was implemented in version 2.0, meaning that the data is saved locally and only missings portions of the data are downloaded, if needed. - All dates are compared to a benchmark ticker such as SP500. You can choose to ignore ticker with a high number of missing dates. - User can choose a complete/balanced dataset output. The package uses a benchmark ticker for date comparison (e.g. SP500 - ^GSPC). Days with missing prices and traded volume equal to zero are found and prices are either set to NA or replaced by closest available value. - Allows the choice for the wide format, with tickers as columns - Users can choose the frequency of the resulting dataset (daily, weekly, monthly, yearly) ## A simple example As a simple exercise, let's download data for three stocks, facebook (FB), 3M (MMM), PETR4.SA (PETROBRAS) and abcdef, a ticker I just made up. We will use the last 60 days as the time period. This example will show the simple interface of the package and how it handles invalid tickers. ```{r example1} if (!require(BatchGetSymbols)) install.packages('BatchGetSymbols') library(BatchGetSymbols) # set dates first.date <- Sys.Date() - 60 last.date <- Sys.Date() freq.data <- 'daily' # set tickers tickers <- c('FB','MMM','PETR4.SA','abcdef') l.out <- BatchGetSymbols(tickers = tickers, first.date = first.date, last.date = last.date, freq.data = freq.data, cache.folder = file.path(tempdir(), 'BGS_Cache') ) # cache in tempdir() ``` After downloading the data, we can check the success of the process for each ticker. Notice that the last ticker does not exist in yahoo finance and therefore results in an error. All information regarding the download process is provided in the dataframe df.control: ```{r example2} print(l.out$df.control) ``` Moreover, we can plot the daily closing prices using ggplot2: ```{r plot.prices, fig.width=7, fig.height=2.5} library(ggplot2) p <- ggplot(l.out$df.tickers, aes(x = ref.date, y = price.close)) p <- p + geom_line() p <- p + facet_wrap(~ticker, scales = 'free_y') print(p) ``` ## Downloading data for all tickers in the SP500 index The package was designed for large scale download of financial data. An example is downloading all stocks in the current composition of the SP500 stock index. The package also includes a function that downloads the current composition of the SP500 index from the internet. By using this function along with BatchGetSymbols, we can easily import end-of-day data for all assets in the index. In the following code we download data for the SP500 stocks for the last year. The code is not executed in this vignette given its time duration, but you can just copy and paste on its own R script in order to check the results. In my computer it takes around 5 minutes to download the whole dataset. ```{r example3,eval=FALSE} library(BatchGetSymbols) first.date <- Sys.Date()-365 last.date <- Sys.Date() df.SP500 <- GetSP500Stocks() tickers <- df.SP500$Tickers l.out <- BatchGetSymbols(tickers = tickers, first.date = first.date, last.date = last.date) print(l.out$df.control) print(l.out$df.tickers) ```
/scratch/gouwar.j/cran-all/cranData/BatchGetSymbols/vignettes/BatchGetSymbols-vignette.Rmd
#' @title Load exported R data objects. #' #' @description #' Loads exported \code{RData} object files in the \dQuote{exports} subdirectory of your \code{file.dir} #' and assigns the objects to the global environment. #' #' @template arg_reg #' @param what [\code{character}]\cr #' Names of objects to load. Defaults to all objects exported. #' @return [\code{character}]. Invisibly returns a character vector of loaded objects. #' @family exports #' @export loadExports = function(reg, what = NULL) { checkRegistry(reg, writeable = FALSE) if (!is.null(what)) assertCharacter(what, any.missing = FALSE) path = getExportDir(reg$file.dir) fns = list.files(path, pattern = "\\.RData$", all.files = TRUE) keys = gsub("\\.RData$", "", fns) if (!is.null(what)) { i = which(keys %in% what) fns = fns[i] keys = keys[i] } if (length(fns) > 0L) { messagef("Loading RData files: %s", collapse(keys)) for (i in seq_along(fns)) assign(keys[i], load2(file.path(path, fns[i]), envir = .GlobalEnv)) } invisible(keys) } #' @title Export R object to be available on the slaves. #' #' @description #' Saves objects as \code{RData} files in the \dQuote{exports} subdirectory of your \code{file.dir} #' to be later loaded on the slaves. #' #' @template arg_reg #' @param ... [any]\cr #' Objects to export. You must provide a valid name. #' @param li [\code{list}]\cr #' More objects to export provided as a named list. #' @param overwrite [\code{logical(1)}]\cr #' If set to \code{FALSE} (default), exported objects are protected from being overwritten #' by multiple calls of this function. Setting this to \code{TRUE} disables this check. #' @return [\code{character}]. Invisibly returns a character vector of exported objects. #' @family exports #' @export batchExport = function(reg, ..., li = list(), overwrite = FALSE) { checkRegistry(reg, writeable = FALSE) ddd = list(...) assertList(li, names = "strict") assertList(ddd, names = "strict") assertFlag(overwrite) keys = c(names(li), names(ddd)) dup = anyDuplicated(keys) if (dup > 0L) stopf("Object for export provided more than once: '%s'", keys[dup]) path = getExportDir(reg$file.dir) if (!overwrite) { fns = list.files(path, pattern = "\\.RData$", all.files = TRUE) old.keys = gsub("\\.RData$", "", fns) collision = which.first(keys %in% old.keys) if (length(collision) > 0L) stopf("Object named '%s' already exported and 'overwrite' is set to FALSE", keys[collision]) } objs = list2env(c(li, ddd)) for (i in seq_along(keys)) save(list = keys[i], envir = objs, file = file.path(path, sprintf("%s.RData", keys[i]))) invisible(keys) } #' @title Unload exported R objects. #' #' @description #' Removes \code{RData} files from the \dQuote{exports} subdirectory of your \code{file.dir} #' and thereby prevents loading on the slave. #' #' @template arg_reg #' @param what [\code{character}]\cr #' Names of objects to remove. #' @return [\code{character}]. Invisibly returns a character vector of unexported objects. #' @family exports #' @export batchUnexport = function(reg, what) { checkRegistry(reg, writeable = FALSE) assertCharacter(what, any.missing = FALSE) path = getExportDir(reg$file.dir) fns = list.files(path, pattern = "\\.RData$", all.files = TRUE) keys = gsub("\\.RData$", "", fns) i = which(keys %in% what) file.remove(file.path(path, fns[i])) invisible(keys[i]) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Exports.R
#' Creates a job description. #' #' Usually you will not do this manually. #' Every object is a list that contains the passed arguments of the constructor. #' #' @param id [\code{integer(1)}]\cr #' Job id, determined by DB autoincrement. #' Default is \code{NA}. #' @param fun [\code{function}]\cr #' Job function to apply on parameters. #' @param fun.id [\code{character(1)}]\cr #' Id used to store function on disk. #' Default is \code{digest(fun)}. #' @param pars [\code{list}]\cr #' Parameter list for job function. #' @param name [\code{character(1)}]\cr #' Alias name for this job. #' @param seed [\code{integer(1)}]\cr #' Random seed for job. #' @aliases Job #' @export makeJob = function(id = NA_integer_, fun, fun.id = digest(fun), pars, name, seed) { setClasses(list(id = id, fun = fun, fun.id = fun.id, pars = pars, name = name, seed = seed), "Job") } #' Get number of jobs in registry. #' @template arg_reg #' @return [\code{integer(1)}]. #' @export getJobNr = function(reg) { checkRegistry(reg, writeable = FALSE) dbGetJobCount(reg) } #' Get ids of jobs in registry. #' @template arg_reg #' @return [\code{character}]. #' @export getJobIds = function(reg) { checkRegistry(reg, writeable = FALSE) dbGetJobIds(reg) } #' @export print.Job = function(x, ...) { cat("BatchJobs job:\n") catf(" Job id: %s", x$id) catf(" Fun id: %s", x$fun.id) if (!is.null(x$fun) && is.function(x$fun)) catf(" Fun formals: %s", collapse(names(formals(x$fun)))) catf(" Name: %s", x$name) catf(" Seed: %i", x$seed) catf(" Pars: %s", convertToShortString(x$pars)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Job.R
#' @title Add packages to registry. #' #' @description #' Mutator function for \code{packages} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param packages [\code{character}]\cr #' Packages to add to registry. #' @template ret_reg_mut #' @family exports #' @export addRegistryPackages = function(reg, packages) { checkRegistry(reg, writeable = TRUE) assertCharacter(packages, any.missing = FALSE) packages = setdiff(packages, names(reg$packages)) packages = setNames(lapply(packages, function(pkg) list(version = packageVersion(pkg))), packages) reg$packages = c(reg$packages, packages) saveRegistry(reg) } #' @title Remove packages from registry. #' #' @description #' Mutator function for \code{packages} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param packages [\code{character}]\cr #' Packages to remove from registry. #' @template ret_reg_mut #' @family exports #' @export removeRegistryPackages = function(reg, packages) { checkRegistry(reg, writeable = TRUE) assertCharacter(packages, any.missing = FALSE) mandatory = names(filterNull(extractSubList(reg$packages, "mandatory"))) reg$packages = reg$packages[names(reg$packages) %nin% setdiff(packages, mandatory)] saveRegistry(reg) } #' @title Set packages for a registry. #' #' @description #' Mutator function for \code{packages} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param packages [\code{character}]\cr #' Character vector of package names to load. #' @template ret_reg_mut #' @family exports #' @export setRegistryPackages = function(reg, packages) { checkRegistry(reg, writeable = TRUE) assertCharacter(packages, any.missing = FALSE) mandatory = names(filterNull(extractSubList(reg$packages, "mandatory"))) packages = unique(c(mandatory, packages)) reg$packages = setNames(lapply(packages, function(pkg) list(version = packageVersion(pkg))), packages) saveRegistry(reg) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Packages.R
makeRegistryInternal = function(id, file.dir, sharding, work.dir, multiple.result.files, seed, packages, src.dirs, src.files) { checkIdValid(id, allow.minus = FALSE) assertString(file.dir) checkDir(file.dir, create = TRUE, check.empty = TRUE, check.posix = TRUE, msg = TRUE) file.dir = sanitizePath(file.dir, make.absolute = TRUE) if (missing(work.dir)) work.dir = getwd() else assertString(work.dir) checkDir(work.dir, check.posix = TRUE) work.dir = sanitizePath(work.dir, make.absolute = TRUE) assertFlag(sharding) assertFlag(multiple.result.files) seed = if (missing(seed)) getRandomSeed() else asInt(seed) assertCharacter(packages, any.missing = FALSE) packages = unique(c("BatchJobs", packages)) requirePackages(packages, stop = TRUE, suppress.warnings = TRUE, default.method = "attach") assertCharacter(src.dirs, any.missing = FALSE) src.dirs = sanitizePath(src.dirs, make.absolute = FALSE) assertCharacter(src.files, any.missing = FALSE) src.files = sanitizePath(src.files, make.absolute = FALSE) # make paths absolute to be sure. otherwise cfSSH wont work for example # also check the dirs # file dir # job dir job.dir = getJobParentDir(file.dir) checkDir(job.dir, create = TRUE, check.empty = TRUE) # fun dir fun.dir = getFunDir(file.dir) checkDir(fun.dir, create = TRUE, check.empty = TRUE) # resources, pending, exports, work.dir checkDir(getResourcesDir(file.dir), create = TRUE, check.empty = TRUE) checkDir(getPendingDir(file.dir), create = TRUE, check.empty = TRUE) checkDir(getExportDir(file.dir), create = TRUE, check.empty = TRUE) sourceRegistryFilesInternal(work.dir, src.dirs, src.files) packages = setNames(lapply(packages, function(pkg) list(version = packageVersion(pkg))), packages) packages$BatchJobs$mandatory = TRUE conf = getConfig() setClasses(list( id = id, read.only = FALSE, version = R.version, RNGkind = RNGkind(), db.driver = conf$db.driver, db.options = conf$db.options, seed = seed, file.dir = file.dir, sharding = sharding, work.dir = work.dir, src.dirs = src.dirs, src.files = src.files, multiple.result.files = multiple.result.files, packages = packages ), "Registry") } #' Construct a registry object. #' #' Note that if you don't want links in your paths (\code{file.dir}, \code{work.dir}) to get resolved and have #' complete control over the way the path is used internally, pass an absolute path which begins with \dQuote{/}. #' #' Every object is a list that contains the passed arguments of the constructor. # #' @param id [\code{character(1)}]\cr #' Name of registry. Displayed e.g. in mails or in cluster queue. #' @param file.dir [\code{character(1)}]\cr #' Path where files regarding the registry / jobs should be saved. #' Default is \dQuote{<id>-files} in current working directory if \code{id} is set. #' @param sharding [\code{logical(1)}]\cr #' Enable sharding to distribute result files into different subdirectories? #' Important if you have many experiments. #' Default is \code{TRUE}. #' @param work.dir [\code{character(1)}]\cr #' Working directory for R process when experiment is executed. #' Default is the current working directory when registry is created. #' @param multiple.result.files [\code{logical(1)}]\cr #' Should a result file be generated for every list element of the #' returned list of the job function? #' Note that the function provided to \code{\link{batchMap}} or #' \code{\link{batchReduce}} must return a named list if this is set to \code{TRUE}. #' The result file will be named \dQuote{<id>-result-<element name>.RData} #' instead of \dQuote{<id>-result.RData}. #' Default is \code{FALSE}. #' @param seed [\code{integer(1)}]\cr #' Start seed for experiments. The first experiment in the registry will use this #' seed, for the subsequent ones the seed is incremented by 1. #' Default is a random number from 1 to \code{.Machine$integer.max/2}. #' @param packages [\code{character}]\cr #' Packages that will always be loaded on each node. #' Default is \code{character(0)}. #' @param src.dirs [\code{character}]\cr #' Directories containing R scripts #' to be sourced on registry load (both on slave and master). #' Files not matching the pattern \dQuote{\\.[Rr]$} are ignored. #' Useful if you have many helper functions that are needed during the execution of your jobs. #' These files should only contain function definitions and no executable code. #' Default is \code{character(0)}. #' @param src.files [\code{character}]\cr #' R scripts files #' to be sourced on registry load (both on slave and master). #' Useful if you have many helper functions that are needed during the execution of your jobs. #' These files should only contain function and constant definitions and no long running, executable code. #' These paths are considered to be relative to your \code{work.dir}. #' As a last remedy in problematic cases you can use absolute paths, by passing paths that #' start with \dQuote{/}, see the comment about \code{file.dir} and \code{work.dir} above, #' where we allow the same thing. #' Note that this is a less portable approach and therefore usually a less good idea. #' Default is \code{character(0)}. #' @param skip [\code{logical(1)}]\cr #' Skip creation of a new registry if a registry is found in \code{file.dir}. #' Defaults to \code{TRUE}. #' @return [\code{\link{Registry}}] #' @aliases Registry #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' print(reg) makeRegistry = function(id, file.dir, sharding = TRUE, work.dir, multiple.result.files = FALSE, seed, packages = character(0L), src.dirs = character(0L), src.files = character(0L), skip = TRUE) { if (missing(file.dir)) file.dir = file.path(getwd(), paste0(id, "-files")) assertFlag(skip) if (skip && isRegistryDir(file.dir)) return(loadRegistry(file.dir = file.dir)) reg = makeRegistryInternal(id, file.dir, sharding, work.dir, multiple.result.files, seed, packages, src.dirs, src.files) dbCreateJobStatusTable(reg) dbCreateJobDefTable(reg) saveRegistry(reg) reg } #' @export print.Registry = function(x, ...) { cat("Job registry: ", x$id, "\n") cat(" Number of jobs: ", dbGetJobCount(x), "\n") cat(" Files dir:", x$file.dir, "\n") cat(" Work dir:", x$work.dir, "\n") cat(" Multiple result files:", x$multiple.result.files, "\n") cat(" Seed:", x$seed, "\n") cat(" Required packages:", collapse(names(x$packages), ", "), "\n") } saveRegistry = function(reg) { fn = getRegistryFilePath(reg$file.dir) info("Saving registry: %s", fn) save(file = fn, reg) reg } isRegistryDir = function(dir) { isDirectory(dir) && file.exists(getRegistryFilePath(dir)) } checkRegistry = function(reg, strict = FALSE, writeable = TRUE) { cl = class(reg) expected = "Registry" if (strict) { if (head(cl, 1L) != expected) stopf("Registry class mismatch: Expected argument with first class '%s'", expected) } else { if (expected %nin% cl) stopf("Registry class mismatch: Expected argument of class '%s'", expected) } if (writeable && isTRUE(reg$read.only)) stop("Registry is read-only. Operation not permitted.") invisible(TRUE) } #' Remove a registry object. #' #' If there are no live/running jobs, the registry will be closed #' and all of its files will be removed from the file system. #' If there are live/running jobs, an informative error is generated. #' The default is to prompt the user for confirmation. #' #' @template arg_reg #' @param ask [\code{character(1)}]\cr #' If \code{"yes"} the user is prompted to confirm the action. #' If trying to prompt the user this way in a non-interactive #' session, then an informative error is generated. #' If \code{"no"}, the registry will be removed without #' further confirmation. #' #' @return [\code{logical[1]}] #' #' @export removeRegistry = function(reg, ask = c("yes", "no")) { checkRegistry(reg, writeable = TRUE) syncRegistry(reg) ask = match.arg(ask) if (ask == "yes") { if (!interactive()) stopf("removeRegistry(..., ask = \"yes\") only works in interactive sessions.") prompt = sprintf("Are you sure you wish to delete BatchJobs registry '%s' and all of it's files in directory '%s'? [y/N]: ", reg$id, reg$file.dir) ans = 2L repeat { ans = tolower(readline(prompt)) ans = gsub("[ ]", "", ans) if (ans == "") ans = "no" ans = pmatch(ans, table=c("yes", "no"), nomatch=0L) if (ans > 0L) break } if (ans != 1L) return(invisible(FALSE)) } running = findOnSystem(reg) if (length(running) > 0L) stopf("Can't remove registry, because there are %d live jobs on the system.", length(running)) removeDirs(reg$file.dir, recursive=TRUE, must.work=TRUE) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Registry.R
resrc = function(res.new) { res.old = getBatchJobsConf()$default.resources if(!isProperlyNamed(res.new) || !isProperlyNamed(res.old)) stop("Resources must be all be uniquely named!") insert(res.old, res.new) } saveResources = function(reg, resources, timestamp = now()) { fn = getResourcesFilePath(reg, timestamp) save2(file = fn, resources = resources) return(timestamp) } #' Function to get job resources in job function. #' #' Return the list passed to \code{\link{submitJobs}}, e.g. #' nodes, walltime, etc. #' #' Can only be called in job function during job execution on slave. #' #' @return [\code{list}]. #' @export getResources = function() { if (!isOnSlave()) stop("getResources can only be called during job execution on slave!") load2(getOption("BatchJobs.resources.path")) } #' Function to get the resources that were submitted for some jobs. #' #' Throws an error if call it for unsubmitted jobs. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all submitted jobs. #' @param as.list [\code{integer(1)}]\cr #' If \code{FALSE} a data.frame will be returned. #' Default is \code{TRUE}. #' @return [\code{list} | \code{data.frame}]. List (or data.frame) of resource lists as passed to \code{\link{submitJobs}}. #' @export getJobResources = function(reg, ids, as.list = TRUE) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = dbFindSubmitted(reg) } else { ids = checkIds(reg, ids) nsub = dbFindSubmitted(reg, ids, negate = TRUE, limit = 1L) if (length(nsub) > 0L) stopf("Some of your jobs have not been submitted, so no resources are available, e.g. for id=%i", nsub) } query = sprintf("SELECT job_id, resources_timestamp FROM %s_job_status", reg$id) df = dbSelectWithIds(reg, query, ids) res = namedList(df$job_id) for(ts in unique(df$resources_timestamp)) { res[df$resources_timestamp == ts] = load2(getResourcesFilePath(reg, ts), simplify = FALSE) } if (!as.list) { res = rbindlist(res) setDF(res, rownames = as.character(df$job_id)) } return(res) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Resources.R
# ******************** Constructors ******************** # Abstract base class constructor for general workers. # # @param ssh [\code{logical(1)})]\cr # Use ssh for remote node? # @param nodename [\code{character(1)}]\cr # Host name of node. # @param ssh.cmd [\code{character(1)})]\cr # CLI command to ssh into remote node. # @param ssh.args [\code{character}]\cr # CLI args for \code{ssh.cmd}. # @param rhome [\code{character(1)}]\cr # Path to R installation on worker. # \dQuote{} means R installation on the PATH is used. # @param r.options [\code{character}] # Options for R and Rscript, one option per element of the vector, # a la \dQuote{--vanilla}. # @param script [\code{character(1)}]\cr # Path to helper script on worker. # Default means to call \code{\link{findHelperScriptLinux}}. # @param ncpus [\code{integers(1)}]\cr # Number of VPUs of worker. # Default means to query the worker via \code{\link{getWorkerNumberOfCPUs}}. # @param max.jobs [\code{integer(1)}]\cr # Maximal number of jobs that can run concurrently for the current registry. # Default is \code{ncpus}. # @param max.load [\code{numeric(1)}]\cr # Load average (of the last 5 min) at which the worker is considered occupied, # so that no job can be submitted. # Default is \code{ncpus-1}. # @param nice [\code{integer(1)}]\cr # Process priority to run R with set via nice. Integers between -20 and 19 are allowed. # If missing, processes are not nice'd and the system default applies (usually 0). # @param classes [\code{character}]\cr # Extra classes, more specific than dQuote{Worker}. # Will be added to the class attribute of the object. # @return [\code{\link{Worker}}]. makeWorker = function(ssh, nodename, ssh.cmd, ssh.args, rhome, r.options = c("--no-save", "--no-restore", "--no-init-file", "--no-site-file"), script, ncpus, max.jobs, max.load, nice, classes) { assertFlag(ssh) assertString(nodename) if (!is.null(ssh.cmd)) assertString(ssh.cmd) if (!is.null(ssh.args)) assertCharacter(ssh.args, any.missing = FALSE) assertString(rhome) assertCharacter(r.options, any.missing = FALSE) if (missing(script)) { # FIXME: dont use linux specific in base class script = findHelperScriptLinux(rhome, r.options, ssh, ssh.cmd, ssh.args, nodename) } else { assertString(script) } # construct object partially so we can query ncpus w = as.environment(list( ssh = ssh, ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename, rhome = rhome, r.options = r.options, script = script, last.update = -Inf, available = "A", # worker is available, we can submit, set in update loop in scheduleWorkerJobs.R status = NULL)) class(w) = c(classes, "Worker") if (missing(ncpus)) { ncpus = getWorkerNumberOfCPUs(w) messagef("Setting for worker %s: ncpus=%i", w$nodename, ncpus) } else { ncpus = asCount(ncpus) } if (missing(max.jobs)) { max.jobs = ncpus } else { max.jobs = asCount(max.jobs) } if (missing(max.load)) { max.load = max(getOption("mc.cores", parallel::detectCores()) - 1L, 1L) } else { assertNumber(max.load, lower = 0) } if (missing(nice)) { nice = "" } else { nice = asInt(nice, lower = -20, upper = 19) } w$ncpus = ncpus w$max.jobs = max.jobs w$max.load = max.load w$nice = nice return(w) } # ******************** Interface definition ******************** # Return number of cores on worker. # @param worker [\code{\link{Worker}}]. # Worker. # @return [\code{integer(1)}]. getWorkerNumberOfCPUs = function(worker) { UseMethod("getWorkerNumberOfCPUs") } # Return 4 numbers to describe worker status. # - load average of last 1 min, as given by e.g. uptime # - number of R processes by _all_ users # - number of R processes by _all_ users which have a load of >= 50% # - number of R processes by current user which match $FILEDIR/jobs in the cmd call of R # @param worker [\code{\link{Worker}}]. # Worker. # @param file.dir [\code{character(1)}}]. # File dir of registry. # @return [named \code{list} of \code{numeric(1)}]. getWorkerStatus = function(worker, file.dir) { UseMethod("getWorkerStatus") } # Start a job on worker, probably with R CMD BATCH. # @param worker [\code{\link{Worker}}]. # Worker. # @param rfile [\code{character(1)}]. # Path to R file to execute. # @param outfile [\code{character(1)}]. # Path to log file for R process. # @return [\code{character(1)}]. Relevant process id. startWorkerJob = function(worker, rfile, outfile) { UseMethod("startWorkerJob") } # Kill a job on worker. Really do it. # @param worker [\code{\link{Worker}}]. # Worker. # @param pid [\code{character(1)}]. # Process id from DB/batch.job.id to kill. # @return Nothing. killWorkerJob = function(worker, pid) { UseMethod("killWorkerJob") } # List all jobs on worker belonging to the current registry. # @param worker [\code{\link{Worker}}]. # Worker. # @param file.dir [\code{character(1)}}]. # File dir of registry. # @return [\code{character}]. Vector of process ids. listWorkerJobs = function(worker, file.dir) { UseMethod("listWorkerJobs") }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/Worker.R
# ******************** Constructors ******************** # Construct a remote worker for a Linux machine via SSH. makeWorkerRemoteLinux = function(nodename, ssh.cmd, ssh.args, rhome, r.options, script, ncpus, max.jobs, max.load, nice) { makeWorker(ssh = TRUE, nodename, ssh.cmd, ssh.args, rhome, r.options, script, ncpus, max.jobs, max.load, nice, c("WorkerRemoteLinux", "WorkerLinux")) } # Construct a worker for local Linux machine to spawn parallel jobs. makeWorkerLocalLinux = function(r.options, script, ncpus, max.jobs, max.load, nice) { makeWorker(ssh = FALSE, nodename = "localhost", ssh.cmd = NULL, ssh.args = NULL, R.home(), r.options, script, ncpus, max.jobs, max.load, nice, c("WorkerLocalLinux", "WorkerLinux")) } # ******************** Interface implementation ******************** #' @export getWorkerNumberOfCPUs.WorkerLinux = function(worker) { as.integer(runWorkerCommand(worker, "number-of-cpus")) } #' @export getWorkerStatus.WorkerLinux = function(worker, file.dir) { res = runWorkerCommand(worker, "status", file.dir) setNames( as.list(as.numeric(stri_split_regex(res, "\\s+")[[1L]])), c("load", "n.rprocs", "n.rprocs.50", "n.jobs") ) } #' @export startWorkerJob.WorkerLinux = function(worker, rfile, outfile) { runWorkerCommand(worker, "start-job", c(worker$rhome, worker$nice, worker$r.options, rfile, outfile)) } #' @export killWorkerJob.WorkerLinux = function(worker, pid) { runWorkerCommand(worker, "kill-job", pid) } #' @export listWorkerJobs.WorkerLinux = function(worker, file.dir) { res = runWorkerCommand(worker, "list-jobs", file.dir) gsub("^\\s+|\\s+$", "", res) } # ******************** Run commands on OS ******************** # Runs a command, either by SSH or directly on localhost. # @param cmd [\code{character(1)}] # System command to run. # @param args [\code{character}] # System command arguments. # Default is \code{character(0)}. # @param stdin [\code{character(1)}] # See \code{\link{system3}}. # Default is \dQuote{}. # @param stop.on.exit.code [\code{character}] # See \code{\link{system3}}. # Default is \code{TRUE}. # @param ssh [\code{logical(1)}] # Use SSH? # Default is \code{FALSE}. # @param nodename [\code{character(1)}] # Nodename for SSH. # @return See \code{\link{system3}}. runOSCommandLinux = function(cmd, args = character(0L), stdin = "", stop.on.exit.code = TRUE, ssh = FALSE, ssh.cmd, ssh.args, nodename) { conf = getBatchJobsConf() if (ssh) { sys.cmd = ssh.cmd sys.args = c(ssh.args, nodename, sprintf("'%s'", collapse(c(cmd, args), sep = " "))) } else { sys.cmd = cmd sys.args = args } if (conf$debug) { catf("OS cmd: %s %s", sys.cmd, collapse(sys.args, " ")) res = try(system3(sys.cmd, sys.args, stdin = stdin, stdout = TRUE, stderr = TRUE, wait = TRUE, stop.on.exit.code = stop.on.exit.code)) catf("OS result:") print(res) } else { res = system3(sys.cmd, sys.args, stdin = stdin, stdout = TRUE, stderr = TRUE, wait = TRUE, stop.on.exit.code = stop.on.exit.code) } if(is.error(res)) stopf("Error in runLinuxOSCommand: %s (cmd: %s || args: %s)", as.character(res), sys.cmd, collapse(sys.args)) res } # Find helper script on a Linux machine in package dir. # @param rhome [\code{character(1)}] # RHOME dir. # @param r.options [\code{character}] # Options for R and Rscript, one option per element of the vector, # a la "--vanilla". # @param ssh [\code{logical(1)}] # Use SSH? # Default is \code{FALSE}. # @param nodename [\code{character(1)}] # Nodename for SSH. # @return [\code{character(1)}]. Path of script. findHelperScriptLinux = function(rhome, r.options, ssh = FALSE, ssh.cmd, ssh.args, nodename) { # i think we dont need to quote anything here, because system2 uses shQuote if (rhome == "") rscript = "Rscript" else rscript = file.path(rhome, "bin", "Rscript") minus.e = "-e \"message(normalizePath(system.file(\\\"bin/linux-helper\\\", package = \\\"BatchJobs\\\")))\"" args = c(r.options, minus.e) # only take the last line, if stuff is printed by Rscript before our message tail(runOSCommandLinux(rscript, args, ssh = ssh, ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename)$output, 1L) } # Perform a batch helper command on a Linux machine. # @param worker [\code{\link{WorkerLinux}}] # Worker. # @param command [\code{character(1)}] # Helper command. # @param args [\code{character}] # Arguments for helper command. # See documenation of linux-helper. runWorkerCommand = function(worker, command, args = character(0L)) { # in paths can be whitespaces and other bad stuff, quote it! args = sprintf("\"%s\"", args) script.args = c(command, args) runOSCommandLinux(worker$script, script.args, ssh = worker$ssh, ssh.cmd = worker$ssh.cmd, ssh.args = worker$ssh.args, nodename = worker$nodename)$output }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/WorkerLinux.R
saveFunction = function(reg, fun, more.args) { fun = checkUserFunction(fun) fun.id = digest(list(fun, more.args)) save2(file = getFunFilePath(reg, fun.id), fun = fun, more.args = more.args) return(fun.id) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/addJob.R
#' applyJobFunction #' ONLY FOR INTERNAL USAGE. #' @template arg_reg #' @param job [\code{\link{Job}}]\cr #' Job. #' @param cache [\code{FileCache}]\cr #' Instance of \code{\link[BBmisc]{makeFileCache}}. #' @return [any]. Result of job. #' @keywords internal #' @export applyJobFunction = function(reg, job, cache) { UseMethod("applyJobFunction") } #' @method applyJobFunction Registry #' @export applyJobFunction.Registry = function(reg, job, cache) { fn = file.path(getFunDir(reg$file.dir), sprintf("%s.RData", job$fun.id)) stuff = cache(fn, parts = c("fun", "more.args"), simplify = FALSE) do.call(stuff$fun, c(job$pars, stuff$more.args)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/applyJobFunction.R
# @title Apply a function on maticies or arrays # # A simple wrapper to batchMap to define jobs as an application of a function. # on margins of matrices or arrays. # # @param reg [\code{\link{Registry}}]\cr # Empty Registry that will store jobs for the mapping. # @param X [\code{\link[base]{matrix}} | \code{\link[base]{array}}]\cr # A matrix or an array. # @param margin [\code{integer(1)}]\cr # Margin of the matrix. 1 for rows, 2 for columns. # @param fun [\code{function}]\cr # Function to map over \code{...}. # @param chunk.size [\code{integer(1)}]\cr # Preferred number of jobs in each chunk. # Can not be used in combination with \code{n.chunks}. # Default is \code{chunk.size = 1} if \code{n.chunks} is also not provided and # results in \code{\link{nrow}} or \code{\link{ncol}} jobs, respectively. # @param n.chunks [\code{integer(1)}]\cr # Preferred number of chunks. # Can not be used in combination with \code{chunk.size}. # @param ... [any]\cr # Additional arguments passed to \code{fun}. # @return [\code{integer}]. Ids of created jobs. # @examples # reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) # X = matrix(1:16, 4) # # Define two jobs to calculate the row sums: # batchApply(reg, X, 1, sum, n.chunks = 2) # submitJobs(reg) # waitForJobs(reg) # reduceResultsVector(reg, use.names = FALSE) == rowSums(X) batchApply = function(reg, X, margin, fun, chunk.size, n.chunks, ..., use.names = FALSE) { checkRegistry(reg, strict = TRUE, writeable = TRUE) assert(checkMatrix(X), checkArray(X)) dX = dim(X) margin = asInt(margin, lower = 1L, upper = length(dX)) fun = checkUserFunction(fun) more.args = list(...) checkMoreArgs(more.args, c(".X", ".inds", ".user.fun")) assertFlag(use.names) inds = chunk(seq_len(dX[margin]), chunk.size = chunk.size, n.chunks = n.chunks, shuffle = FALSE) if (use.names && !is.null(dimnames(X)[[margin]])) names(inds) = dimnames(X)[[margin]] more.args = c(more.args, list(.X = aperm(X, c(margin, seq_along(dX)[-margin])), .user.fun = fun)) batchMap(reg, batchApplyWrapper, .inds = inds, more.args = more.args, use.names = use.names) } batchApplyWrapper = function(.inds, .X, .user.fun, ...) { apply(.X[.inds,, drop = FALSE], 1L, .user.fun, ...) } # FIXME: we should split the matrix on the master first. # here is a piece of code which allows this, but there is no sufficient # mechanism in BJ to find these files again on the slave? # maybe store in work.dir? # splitX = function(X, margin, chunks) { # n = length(chunks) # X = aperm(X, c(margin, seq_along(dim(X))[-margin])) # dn = file.path(reg$file.dir, "data-chunks") # dir = checkDir(dn, create = TRUE) # dest = file.path(dn, sprintf("chunk-%i.RData", seq_len(n))) # # info("Splitting input into %i files ...", n) # for (i in seq_len(n)) { # save2(.X = X[chunks[[i]],, drop = FALSE], file = dest[i]) # } # dest # }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchApply.R
#' @title Map function over all combinations. #' #' @description #' Maps an n-ary-function over a list of all combinations which are given by some vectors. #' Internally \code{\link{expand.grid}} is used to compute the combinations, then #' \code{\link{batchMap}} is called. #' #' @param reg [\code{\link{Registry}}]\cr #' Empty Registry that will store jobs for the mapping. #' @param fun [\code{function}]\cr #' Function to map over the combinations. #' @param ... [any]\cr #' Vectors that are used to compute all combinations. #' If the arguments are named, these names are used to bind to arguments of \code{fun}. #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @return [\code{data.frame}]. Expanded grid of combinations produced by \code{\link{expand.grid}}. #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x, y, z) x * y + z #' # lets store the param grid #' grid = batchExpandGrid(reg, f, x = 1:2, y = 1:3, more.args = list(z = 10)) #' submitJobs(reg) #' waitForJobs(reg) #' y = reduceResultsVector(reg) #' # later, we can always access the param grid like this #' grid = getJobParamDf(reg) #' cbind(grid, y = y) batchExpandGrid = function(reg, fun, ..., more.args = list()) { checkRegistry(reg, strict = TRUE, writeable = TRUE) assertFunction(fun) args = list(...) ns = names(args) if (length(args) == 0L) return(invisible(integer(0L))) if(!all(vlapply(args, is.vector))) stop("All args in '...' must be vectors!") checkMoreArgs(more.args) reserved = c("KEEP.OUT.ATTRS", "stringsAsFactors") if (any(reserved %in% ns)) stopf("You cannot use the reserved arg names %s in ... args!", collapse(reserved)) args$KEEP.OUT.ATTRS = FALSE args$stringsAsFactors = FALSE grid = do.call(expand.grid, args) if (is.null(ns)) colnames(grid) = NULL do.call(batchMap, c(as.list(grid), list(reg = reg, fun = fun, more.args = more.args))) return(setRowNames(grid, as.character(getJobIds(reg)))) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchExpandGrid.R
#' @title Maps a function over lists or vectors, adding jobs to a registry. #' #' @description #' You can then submit these jobs to the batch system. #' #' @param reg [\code{\link{Registry}}]\cr #' Empty Registry that will store jobs for the mapping. #' @param fun [\code{function}]\cr #' Function to map over \code{...}. #' @param ... [any]\cr #' Arguments to vectorize over (list or vector). #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @param use.names [\code{logical(1)}]\cr #' Store parameter names to enable named results in \code{\link{loadResults}} and some other functions. #' Default is \code{FALSE}. #' @return Vector of type \code{integer} with job ids. #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg, f, 1:10) #' print(reg) #' @export batchMap = function(reg, fun, ..., more.args = list(), use.names = FALSE) { checkRegistry(reg, strict = TRUE, writeable = TRUE) assertFunction(fun) ddd = list(...) if (length(ddd) == 0L) return(invisible(integer(0L))) n = unique(viapply(ddd, length)) if(length(n) != 1L) stop("All args in '...' must be of the same length!") if (n == 0L) return(invisible(integer(0L))) checkMoreArgs(more.args) assertFlag(use.names) if (dbGetJobCount(reg) > 0L) stop("Registry is not empty!") info("Adding %i jobs to DB.", n) # create seeds seed = reg$seed seeds = addIntModulo(seed, seq(0L, n-1L)) # serialize pars to char vector pars = mapply(function(...) { rawToChar(serialize(list(...), connection = NULL, ascii = TRUE)) }, ..., USE.NAMES = FALSE) fun.id = saveFunction(reg, fun, more.args) # generate jobnames col if (use.names) { jobname = getArgNames(ddd) if (is.null(jobname)) jobname = rep.int(NA_character_, n) } else { jobname = rep.int(NA_character_, n) } # add jobs to DB n = dbAddData(reg, "job_def", data = data.frame(fun_id = fun.id, pars = pars, jobname = jobname, stringsAsFactors = FALSE)) job.def.ids = dbGetLastAddedIds(reg, "job_def", "job_def_id", n) n = dbAddData(reg, "job_status", data = data.frame(job_def_id = job.def.ids, seed = seeds, stringsAsFactors = FALSE)) job.ids = dbGetLastAddedIds(reg, "job_status", "job_id", n) # we can only create the dir after we have obtained the ids from the DB createShardedDirs(reg, job.ids) invisible(job.ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchMap.R
#' Combination of makeRegistry, batchMap and submitJobs. #' #' @description #' Combination of \code{\link{makeRegistry}}, \code{\link{batchMap}} #' and \code{\link{submitJobs}} #' for quick computations on the cluster. #' Should only be used by skilled users who know what they are doing. #' Creates the file.dir, maps function, potentially chunks jobs and submits them. #' #' @param fun [\code{function}]\cr #' Function to map over \code{...}. #' @param ... [any]\cr #' Arguments to vectorize over (list or vector). #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @param file.dir [\code{character}]\cr #' See \code{\link{makeRegistry}}. #' Default is \code{NULL}, which means that it is created in the current directory under the name #' \dQuote{bmq_[random alphanumerics]}. #' @param packages [\code{character}]\cr #' See \code{\link{makeRegistry}}. #' @param chunk.size [\code{integer(1)}]\cr #' Preferred number of jobs in each chunk. #' Can not be used in combination with \code{n.chunks}. #' Note that the ids will get shuffled to balance out possible run time differences. #' Default is not to use chunking. #' @param n.chunks [\code{integer(1)}]\cr #' Preferred number chunks. #' Can not be used in combination with \code{chunk.size}. #' Note that the ids will get shuffled to balance out possible run time differences. #' Default is not to use chunking. #' @param chunks.as.arrayjobs [\code{logical(1)}]\cr #' Submit chunks as array jobs? #' Default is \code{FALSE}. #' @param inds [\code{integer}]\cr #' Indices of ids / chunks to submit. #' Default is all. If ids get chunked, this subsets the list of shuffled ids. #' @param resources [\code{list}]\cr #' Required resources for all batch jobs. #' Default is empty list. #' @return [\code{\link{Registry}}] #' @export batchMapQuick = function(fun, ..., more.args = list(), file.dir = NULL, packages = character(0L), chunk.size, n.chunks, chunks.as.arrayjobs = FALSE, inds, resources = list()) { if (is.null(file.dir)) { # create name for temp file dir in current dir file.dir = tempfile(pattern = "bmq_", tmpdir = getwd()) } else { assertString(file.dir) } id = basename(file.dir) reg = makeRegistry(id = id, file.dir = file.dir, packages = packages) on.exit(messagef("Interrupted. You can find your registry in %s.", reg$file.dir)) # we want to return the reg in any case # otherwise we cannot look at it / do anything with it in case of errors try({ ids = batchMap(reg, fun, ..., more.args = more.args) if (!missing(chunk.size) || !missing(n.chunks)) ids = chunk(ids, chunk.size = chunk.size, n.chunks = n.chunks, shuffle = TRUE) if (!missing(inds)) ids = ids[inds] submitJobs(reg, ids, resources = resources) }, silent = FALSE) on.exit(NULL) return(reg) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchMapQuick.R
#' Maps a function over the results of a registry by using batchMap. #' #' @param reg [\code{\link{Registry}}]\cr #' Registry whose results should be mapped by \code{fun}. #' @param reg2 [\code{\link{Registry}}]\cr #' Empty registry that should store the job for the mapping. #' @param fun [\code{function(job, res, ...)}]\cr #' Function to map over results of \code{reg}. #' Further arguments come from ... of \code{batchMapResults} and \code{more.args}. #' @param ... [any]\cr #' Furher arguments to vectorize over (list or vector). #' Must all be the same length as number of results in \code{reg}. #' @param ids [\code{integer}]\cr #' Ids of jobs whose results should be mapped with \code{fun}. #' Default is all jobs. #' @param part [\code{character}] #' Only useful for multiple result files, then defines which result file part(s) should be loaded. #' \code{NA} means all parts are loaded, which is the default. #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @return Vector of type \code{integer} with job ids. #' @export #' @examples #' reg1 = makeRegistry(id = "BatchJobsExample1", file.dir = tempfile(), seed = 123) #' # square some numbers #' f = function(x) x^2 #' batchMap(reg1, f, 1:10) #' #' # submit jobs and wait for the jobs to finish #' submitJobs(reg1) #' waitForJobs(reg1) #' #' # look at results #' reduceResults(reg1, fun = function(aggr,job,res) c(aggr, res)) #' #' reg2 = makeRegistry(id = "BatchJobsExample2", file.dir = tempfile(), seed = 123) #' #' # define function to tranform results, we simply do the inverse of the squaring #' g = function(job, res) sqrt(res) #' batchMapResults(reg1, reg2, fun = g) #' #' # submit jobs and wait for the jobs to finish #' submitJobs(reg2) #' waitForJobs(reg2) #' #' # check results #' reduceResults(reg2, fun = function(aggr,job,res) c(aggr, res)) batchMapResults = function(reg, reg2, fun, ..., ids, part = NA_character_, more.args = list()) { # FIXME conserve jobnames checkRegistry(reg, writeable = FALSE) syncRegistry(reg) checkRegistry(reg2, writeable = TRUE) syncRegistry(reg2) assertFunction(fun, c("job", "res")) if (missing(ids)) { ids = dbGetJobIdsIfAllDone(reg) } else { ids = checkIds(reg, ids) if (length(dbFindDone(reg, ids, negate = TRUE, limit = 1L)) > 0L) stop("Not all jobs with corresponding ids finished (yet)!") } checkMoreArgs(more.args, reserved = c(".reg", ".fun", ".part")) if (dbGetJobCount(reg2) > 0L) stop("Registry 'reg2' is not empty!") if(reg$file.dir == reg2$file.dir) stop("Both registries cannot point to the same file dir. Files would get overwritten!") reg2$packages = insert(reg2$packages, reg$packages) saveRegistry(reg2) batchMap(reg2, batchMapResultsWrapper, ids, ..., more.args = c(more.args, list(.reg = reg, .fun = fun, .part = part))) } batchMapResultsWrapper = function(id, ..., .reg, .fun, .part) { .fun(job = getJob(.reg, id, check.id = FALSE), res = getResult(.reg, id, part = .part), ...) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchMapResults.R
#' Manually query the BatchJobs database #' #' @template arg_reg #' @param query [\code{character(1)}]\cr #' SQL query to send to the database. #' @param flags [\code{character(1)}]\cr #' One of \dQuote{ro}, \dQuote{rw} or \dQuote{rwc} which is translated #' to \code{SQLITE_RO}, \code{SQLITE_RW} or \code{SQLITE_RWC}, respectively. #' See \link[RSQLite]{SQLITE_RO} for more info. #' @return [\code{data.frame}] Result of the query. #' @export #' @examples #' reg = makeRegistry("test", file.dir = tempfile()) #' batchMap(reg, identity, i = 1:10) #' batchQuery(reg, "SELECT * FROM test_job_status") batchQuery = function(reg, query, flags = "ro") { assertString(query) assertChoice(flags, c("ro", "rw", "rwc")) dbDoQuery(reg, query = query, flags = flags, max.retries = 3L) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchQuery.R
#' Reduces via a binary function over a list adding jobs to a registry. #' #' @description #' Each jobs reduces a certain number of elements on one slave. #' You can then submit these jobs to the batch system. #' @param reg [\code{\link{Registry}}]\cr #' Empty Registry. #' @param fun [\code{function(aggr, x, ...)}]\cr #' Function to reduce \code{xs} with. #' @param xs [\code{vector}]\cr #' Vector to reduce. #' @param init [any]\cr #' Initial object for reducing. #' @param block.size [\code{integer(1)}]\cr #' Number of elements of \code{xs} reduced in one job. #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @return Vector of type \code{integer} with job ids. #' @export #' @examples #' # define function to reduce on slave, we want to sum a vector #' f = function(aggr, x) aggr + x #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' #' # sum 20 numbers on each slave process, i.e. 5 jobs #' batchReduce(reg, fun = f, 1:100, init = 0, block.size = 5) #' submitJobs(reg) #' waitForJobs(reg) #' #' # now reduce one final time on master #' reduceResults(reg, fun = function(aggr,job,res) f(aggr, res)) batchReduce = function(reg, fun, xs, init, block.size, more.args = list()) { checkRegistry(reg, strict = TRUE, writeable = FALSE) syncRegistry(reg) assertFunction(fun, c("aggr", "x")) if (!is.vector(xs)) stop("Argument xs must be a vector") block.size = asCount(block.size, positive = TRUE) if (dbGetJobCount(reg) > 0L) stop("Registry is not empty!") checkMoreArgs(more.args, reserved = c(".fun", ".init")) xs.blocks = chunk(xs, chunk.size = block.size, shuffle = FALSE) more.args = c(more.args, list(.fun = fun, .init = init)) batchMap(reg, batchReduceWrapper, xs.blocks, more.args = more.args) } batchReduceWrapper = function(xs.block, .fun, .init, ...) { fun = function(aggr, x) .fun(aggr, x, ...) Reduce(fun, xs.block, init = .init) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchReduce.R
#' Reduces results via a binary function and adds jobs for this to a registry. #' #' @description #' Each jobs reduces a certain number of results on one slave. #' You can then submit these jobs to the batch system. #' Later, you can do a final reduction with \code{\link{reduceResults}} on the master. #' @param reg [\code{\link{Registry}}]\cr #' Registry whose results should be reduced by \code{fun}. #' @param reg2 [\code{\link{Registry}}]\cr #' Empty registry that should store the job for the mapping. #' @param fun [\code{function(aggr, job, res, ...)}]\cr #' Function to reduce results with. #' @param ids [\code{integer}]\cr #' Ids of jobs whose results should be reduced with \code{fun}. #' Default is all jobs. #' @param part [\code{character}] #' Only useful for multiple result files, then defines which result file part(s) should be loaded. #' \code{NA} means all parts are loaded, which is the default. #' @param init [any]\cr #' Initial object for reducing. #' @param block.size [\code{integer(1)}]\cr #' Number of results reduced in one job. #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @return Vector of type \code{integer} with job ids. #' @export #' @examples #' # generating example results: #' reg1 = makeRegistry(id = "BatchJobsExample1", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg1, f, 1:20) #' submitJobs(reg1) #' waitForJobs(reg1) #' #' # define function to reduce on slave, we want to sum the squares #' myreduce = function(aggr, job, res) aggr + res #' #' # sum 5 results on each slave process, i.e. 4 jobs #' reg2 = makeRegistry(id = "BatchJobsExample2", file.dir = tempfile(), seed = 123) #' batchReduceResults(reg1, reg2, fun = myreduce, init = 0, block.size = 5) #' submitJobs(reg2) #' waitForJobs(reg2) #' #' # now reduce one final time on master #' reduceResults(reg2, fun = myreduce) batchReduceResults = function(reg, reg2, fun, ids, part = NA_character_, init, block.size, more.args = list()) { checkRegistry(reg, writeable = FALSE) checkRegistry(reg2, writeable = TRUE) syncRegistry(reg) syncRegistry(reg2) assertFunction(fun, c("aggr", "job", "res")) if (missing(ids)) { ids = dbGetJobIdsIfAllDone(reg) } else { ids = checkIds(reg, ids) if (length(dbFindDone(reg, ids, negate = TRUE, limit = 1L)) > 0L) stop("Not all jobs with corresponding ids finished (yet)!") } block.size = asCount(block.size) checkMoreArgs(more.args, reserved = c("..reg", "..fun", "..part")) if (dbGetJobCount(reg2) > 0L) stop("Registry 'reg2' is not empty!") if(reg$file.dir == reg2$file.dir) stop("Both registries cannot point to the same file dir. Files would get overwritten!") reg2$packages = insert(reg2$packages, reg$packages) saveRegistry(reg2) batchReduce(reg2, batchReduceResultsWrapper, ids, init = init, block.size = block.size, more.args = c(more.args, list(..reg = reg, ..fun = fun, ..part = part))) } batchReduceResultsWrapper = function(aggr, x, ..reg, ..fun, ..part) { # x is id # use lazy evaluation, if fun doesn't access job or res (unlikely) ..fun(aggr = aggr, job = getJob(..reg, x, check.id = FALSE), res = getResult(..reg, x, ..part)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/batchReduceResults.R
# a simple preallocated stack. buffer = function(type = "list", capacity = 0L, value = TRUE, init = NULL, ...) { if (is.null(init)) { st = vector(type, capacity) n = 0L } else { st = init n = length(init) } rm(type) rm(init) force(capacity) force(value) ddd = list(...) clear = function() { if (is.function(value)) ret = do.call(value, c(list(head(st, n)), ddd)) else ret = value n <<- 0L ret } list( get = function() { head(st, n) }, clear = clear, push = function(x) { if (n == capacity) clear() n <<- n + 1L st[[n]] <<- x }, pos = function() { n }, empty = function() { n == 0L } ) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/buffer.R
#' Call an arbitrary function on specified SSH workers. #' #' @description #' Calls can be made in parallel or consecutively, #' the function waits until all calls have finished and #' returns call results. #' In consecutive mode the output on the workers can also be shown on the master #' during computation. #' #' Please read and understand the comments for argument \code{dir}. #' #' Note that this function should only be used for short administrative #' tasks or information gathering on the workers, the true work horse for #' real computation is \code{\link{submitJobs}}. #' #' In \code{\link{makeSSHWorker}} various options for load #' management are possible. Note that these will be #' ignored for the current call to execute it immediatly. #' #' @param nodenames [\code{character}]\cr #' Nodenames of workers to call function on. #' Only workers which were specified in your #' \code{\link{makeClusterFunctionsSSH}} configuration can be used. #' @param fun [\code{function}]\cr #' Function to call on workers. #' @param ... [any]\cr #' Arguments for \code{fun}. #' @param consecutive [\code{logical(1)}]\cr #' Do calls consecutively and always wait until each worker is done. #' Default is \code{FALSE}. #' @param show.output [\code{logical(1)}]\cr #' Show output of workers on master during call. #' Can be useful to see what is happening. #' Can only be used in consecutive mode. #' Default is \code{consecutive}. #' @param simplify [\code{logical(1)}]\cr #' Should the result be simplified? See \code{\link{sapply}}. #' Default is \code{TRUE}. #' @param use.names [\code{logical(1)}]\cr #' Name results by \code{nodenames}. #' Default is \code{TRUE}. #' @param dir [\code{character(1)}]\cr #' Directory under which a temporary registry will be #' created in a subdirectory for communication. #' This has to be somewhere on the shared #' filesystem. The created subdirectory will be cleaned up on exit. #' Default is current working directory. #' @return Results of function calls, either a list or simplified. #' @export callFunctionOnSSHWorkers = function(nodenames, fun, ..., consecutive = FALSE, show.output = consecutive, simplify = TRUE, use.names = TRUE, dir = getwd()) { assertCharacter(nodenames, any.missing = FALSE) assertFunction(fun) assertFlag(consecutive) assertFlag(show.output) assertFlag(simplify) assertFlag(use.names) if (!consecutive && show.output) stop("show.output = TRUE can only be used in consecutive mode.") conf = getBatchJobsConf() cf = conf$cluster.functions mail.old = c(conf$mail.start, conf$mail.done, conf$mail.error) if (cf$name != "SSH") stop("callFunctionOnSSHWorkers can only be used in SSH mode!") # create dummy registry, we submit our command from this regid = sprintf("BatchJobs_callFunctionOnSSHWorkers_%i", as.integer(Sys.time())) regdir = file.path(dir, regid) # we will change mailing and cluster funs, reset them on exit # also kill all still running jobs and remove reg dir on.exit({ conf$cluster.functions = cf conf$mail.start = mail.old[1L] conf$mail.done = mail.old[2L] conf$mail.error = mail.old[3L] }) # no mails during for the following jobs, also get the nodenames ssh workers conf$mail.start = conf$mail.done = conf$mail.error = "none" workers = environment(cf$submitJob)$workers d = setdiff(nodenames, names(workers)) if(length(d) > 0L) stopf("For some nodenames no workers exist: %s", collapse(d)) workers = workers[nodenames] # ignore load constraints old.worker.settings = list() wsettings = c("ncpus", "max.jobs", "max.load") for (wn in names(workers)) { w = workers[[wn]] old.worker.settings[[wn]] = mget(wsettings, w) for (ws in wsettings) w[[ws]] = Inf } on.exit({ # reset load settings for (wn in names(old.worker.settings)) { for (ws in wsettings) workers[[wn]][[ws]] = old.worker.settings[[wn]][[ws]] } }, add = TRUE) args = if (consecutive) args = list(fun) else replicate(length(nodenames), fun) suppressMessages({ reg = makeRegistry(regid, file.dir = regdir, sharding = FALSE) more.args = list(...) batchMap(reg, function(fun, ...) { print("###logstart###") res = fun(...) print("###logend###") res }, args, more.args = more.args) }) on.exit({ if (length(findOnSystem(reg)) > 0L) killJobs(reg, getJobIds(reg)) if (file.exists(regdir)) unlink(regdir, recursive = TRUE) }, add = TRUE) # read log as char string, get part between stamps and print only new chars printLog = function(log.old) { log.fn = getLogFiles(reg, 1L) log.new = readChar(log.fn, file.info(log.fn)$size) j = gregexpr("###logstart###", log.new)[[1L]][1L] if (j == -1) { # start not found log.new = "" } else { # start found, clip log.new = substr(log.new, j+15, nchar(log.new)) j = gregexpr("###logend###", log.new)[[1L]][1L] if (j != -1L) { # end found, clip log.new = substr(log.new, 1L, j-7L) } } cat(substr(log.new, nchar(log.old)+1L, nchar(log.new))) log.new } mysubmit = function(nodenames, reg) { messagef("Calling function on: %s.", collapse(nodenames)) conf$cluster.functions = makeClusterFunctionsSSH(workers = workers) capture.output(suppressMessages(submitJobs(reg, getJobIds(reg)))) } # while job not done, sleep and maybe print log waitTillJobsDone = function(reg) { log.old = "" while(length(findOnSystem(reg) > 0L)) { if (show.output) log.old = printLog(log.old) Sys.sleep(1) } if (show.output) { log.old = printLog(log.old) cat("\n\n") } } # if error, throw it on master checkJobErrors = function(reg, nodenames) { errids = findErrors(reg) if (length(errids) > 0L) { j = errids[1L] stopf("Error on %s: %s", nodenames[j], getErrorMessages(reg, j)) } } doit = function(reg, nodenames) { mysubmit(nodenames, reg) waitTillJobsDone(reg) checkJobErrors(reg, nodenames) } if (consecutive) { # loop though nodes individually and call function results = lapply(nodenames, function(nn) { log.old = "" doit(reg, nn) loadResult(reg, 1L) }) } else { doit(reg, nodenames) results = loadResults(reg, simplify = FALSE, use.names = FALSE) } if (use.names) names(results) = nodenames if (simplify) results = simplify2array(results) return(results) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/callFunctionOnSSHWorkers.R
# check for a valid registry id: # must start with an alphabetic # then alpha-numerics and underscores are allowed # pattern = "^[0-9a-zA-Z]+[0-9a-zA-Z_-]*$" # we must be quite restrictive here because we prefix # the table name with the registry name checkIdValid = function(id, allow.minus = TRUE) { assertString(id) if (allow.minus) pattern = "^[a-zA-Z]+[0-9a-zA-Z_-]*$" else pattern = "^[a-zA-Z]+[0-9a-zA-Z_]*$" if (!grepl(pattern, id)) stopf("Id does not comply with pattern %s: %s", pattern, id) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/checkIdValid.R
#' Check job ids. #' #' @description #' Simply checks if probided vector of job ids is valid and #' throws an error if something is odd. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Vector of job ids. #' @param check.present [\code{logical(1)}]\cr #' Check if the ids are present in the database? Default is \code{TRUE}. #' @param len [\code{integer(1)}]\cr #' Expected length of id vector. Passed to \code{\link[checkmate]{asInteger}}. #' @return Invisibly returns the vector of ids, converted to integer. #' @export checkIds = function(reg, ids, check.present = TRUE, len = NULL) { ids = asInteger(ids, any.missing = FALSE, unique = TRUE, len = len) if (check.present) dbCheckJobIds(reg, ids) invisible(ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/checkIds.R
#' Create a SubmitJobResult object. #' #' @description #' Use this function in your implementation of \code{\link{makeClusterFunctions}} #' to create a return value for the \code{submitJob} function. #' @param status [\code{integer(1)}]\cr #' Launch status of job. #' 0 means success, codes bewteen 1 and 100 are temporary #' errors and any error greater than 100 is a permanent failure. #' @param batch.job.id [\code{character(1)}]\cr #' Unique id of this job on batch system. Note that this is not the usual job id used in BatchJobs! #' Must be globally unique so that the job can be terminated #' using just this information. #' @param msg [\code{character(1)}]\cr #' Optional error message in case \code{status} is not equal to 0. #' Default is \dQuote{OK}, \dQuote{TEMPERR}, \dQuote{ERROR}, depending on \code{status}. #' @param ... [\code{any}]\cr #' Currently unused. #' @return [\code{\link{SubmitJobResult}}]. A list, containing #' \code{status}, \code{batch.job.id} and \code{msg}. #' @export #' @aliases SubmitJobResult makeSubmitJobResult = function(status, batch.job.id, msg, ...) { if (missing(msg)) { msg = if (status == 0L) "OK" else if (status <= 100L) "TEMPERR" else "ERROR" } setClasses(list(status = status, batch.job.id = batch.job.id, msg = msg), "SubmitJobResult") } #' @export #' @method print SubmitJobResult print.SubmitJobResult = function(x, ...) { cat("Job submission result:\n") catf(" ID : '%s'", x$batch.job.id) catf(" Status : %i", x$status) catf(" Msg : %s", x$msg) } #' Create a ClusterFuntions object. #' #' Use this funtion when you implement a backend for a batch system. #' You must define the functions specified in the arguments. #' @param name [\code{character(1)}]\cr #' Name of cluster functions. #' @param submitJob [\code{function(conf, reg, job.name, rscript, log.file, job.dir, resources, ...)}]\cr #' Function to submit a new job. #' Must return a \code{\link{SubmitJobResult}} object.\cr #' The arguments are:\cr #' conf [\code{environment}]: The user configuration.\cr #' reg [\code{\link{Registry}}]: The registry.\cr #' job.name [\code{character(1)}]: Name of job, used if the job is displayed on the batch system. This is just for display and not an id!\cr #' rscript [\code{character(1)}]: File path to R script that is used to execute the job.\cr #' log.file [\code{character(1)}]: File path where log file (.Rout) has to be placed.\cr #' job.dir [\code{character(1)}]: Directory where all files relating to this job are placed.\cr #' resources [\code{list}]: Freely definable list of required resources for this job, e.g. walltime or memory. #' @param killJob [\code{function(conf, reg, batch.job.id)}]\cr #' Function to kill a job on the batch system. #' Make sure that you definately kill the job! #' Return value is currently ignored.\cr #' The arguments are:\cr #' conf [\code{environment}]: The user configuration.\cr #' reg [\code{\link{Registry}}]: The registry.\cr #' batch.job.id [\code{character(1)}]: Batch job id, as produced by \code{submitJob}.\cr #' Set \code{killJob} to \code{NULL} if killing jobs cannot be supported. #' @param listJobs [\code{function(conf, reg)}]\cr #' List all jobs on the batch system for the current user / registry. #' This includes queued, running, held, idle, etc. jobs. #' Must return an integer vector of batch job ids, same format as they are produced by \code{submitJob}. #' It does not matter if you return a few job ids too many (e.g. all for the current user instead #' of all for the current registry), but you have to include all relevant ones. #' The arguments are:\cr #' conf [\code{environment}]: The user configuration.\cr #' reg [\code{\link{Registry}}]: The registry.\cr #' Set \code{listJobs} to \code{NULL} if listing jobs cannot be supported. #' @param getArrayEnvirName [\code{function()}]\cr #' Returns the name of the environment variable specifying the array ID. #' Should return \code{NA} if not supported. #' @param class [\code{character(1)}]\cr #' Optional class name for cluster functions object. #' Useful to provide a nice print method #' which might show additional information about the workers. #' Default is \code{NULL}. #' @param ... [\code{any}]\cr #' Currently ignored. #' @export #' @aliases ClusterFunctions #' @family clusterFunctions makeClusterFunctions = function(name, submitJob, killJob, listJobs, getArrayEnvirName, class = NULL, ...) { assertString(name) assertFunction(submitJob, c("conf", "reg", "job.name", "rscript", "log.file", "job.dir", "resources")) if (!is.null(killJob)) assertFunction(killJob, c("conf", "reg", "batch.job.id")) if (!is.null(listJobs)) assertFunction(listJobs, c("conf", "reg")) if (!is.null(getArrayEnvirName)) assertFunction(getArrayEnvirName, character(0L)) setClasses(list(name = name, submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName), c("ClusterFunctions", class)) } #' @export #' @method print ClusterFunctions print.ClusterFunctions = function(x, ...) { catf("%s cluster functions.", x$name) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctions.R
#' Cluster functions helper: Read in your brew template file. #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Simply reads your template and returns it as a character vector. #' If you do this in the constructor of your cluster functions once, you can avoid this #' repeated file access later on. #' #' @param template.file [\code{character(1)}]\cr #' File path. #' @return [\code{character}]. #' @export cfReadBrewTemplate = function(template.file) { assertFileExists(template.file, "r") tmpl = readLines(template.file) if (length(tmpl) == 0L) stopf("Error reading template '%s' or empty template", template.file) collapse(tmpl, "\n") } #' Cluster functions helper: Brew your template into a job description file. #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Calls brew silently on your template, any error will lead to an exception. #' If debug mode is turned on in the configuration, the file is stored at the same place as the #' corresponding R script in the \dQuote{jobs}-subdir of your files directory, #' otherwise in the temp dir via \code{\link{tempfile}}. #' #' @param conf [\code{environment}]\cr #' BatchJobs configuration. #' @param template [\code{character(1)}]\cr #' Job desfription template as a char vecrtor, #' possibly read in via \code{\link{cfReadBrewTemplate}}. #' @param rscript [\code{character(1)}]\cr #' File path to you your corresponding R script for the job. #' @param extension [\code{character(1)}]\cr #' Extension for the job description file, e.g. \dQuote{pbs}. #' @return [\code{character(1)}]. File path of result. #' @export cfBrewTemplate = function(conf, template, rscript, extension) { assertEnvironment(conf) assertString(template) assertString(rscript) assertString(extension) if (conf$debug) { # if debug, place in jobs dir outfile = sub("\\.R$", sprintf(".%s", extension), rscript) } else { outfile = tempfile("template") } pf = parent.frame() old = getOption("show.error.messages") on.exit(options(show.error.messages = old)) options(show.error.messages = FALSE) z = suppressAll(try(brew(text = template, output = outfile, envir = pf), silent = TRUE)) if (is.error(z)) stopf("Error brewing template: %s", as.character(z)) waitForFiles(outfile, conf$fs.timeout) return(outfile) } #' Cluster functions helper: Handle an unknown error during job submission. #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Simply constructs a \code{\link{SubmitJobResult}} object with status code 101, #' NA as batch job id and an informative error message containing the output of the OS command in \code{output}. #' #' @param cmd [\code{character(1)}]\cr #' OS command used to submit the job, e.g. qsub. #' @param exit.code [\code{integer(1)}]\cr #' Exit code of the OS command, should not be 0. #' @param output [\code{character}]\cr #' Output of the OS command, hopefully an informative error message. #' If these are mutiple lines in a vector, they are automatically pasted together. #' @return [\code{\link{SubmitJobResult}}]. #' @export cfHandleUnknownSubmitError = function(cmd, exit.code, output) { assertString(cmd) exit.code = asInt(exit.code) assertCharacter(output, any.missing = FALSE) msg = sprintf("%s produced exit code %i; output %s", cmd, exit.code, collapse(output, sep = "\n")) makeSubmitJobResult(status = 101L, batch.job.id = NA_character_, msg = msg) } #' Cluster functions helper: Kill a batch job via OS command #' #' @description #' This function is only intended for use in your own cluster functions implementation. #' #' Calls the OS command to kill a job via \code{system} like this: \dQuote{cmd batch.job.id}. #' If the command returns an exit code > 0, the command is repeated #' after a 1 second sleep \code{max.tries-1} times. #' If the command failed in all tries, an exception is generated. #' #' @param cmd [\code{character(1)}]\cr #' OS command, e.g. \dQuote{qdel}. #' @param batch.job.id [\code{character(1)}]\cr #' Id of the batch job on the batch system. #' @param max.tries [\code{integer(1)}]\cr #' Number of total times to try execute the OS command in cases of failures. #' Default is \code{3}. #' @return Nothing. #' @export cfKillBatchJob = function(cmd, batch.job.id, max.tries = 3L) { assertString(cmd) assertString(batch.job.id) max.tries = asCount(max.tries) assertCount(max.tries) for (tmp in seq_len(max.tries)) { res = runOSCommandLinux(cmd, batch.job.id, stop.on.exit.code = FALSE) if (res$exit.code == 0L) return() Sys.sleep(1) } stopf("Really tried to kill job, but could not do it. batch job id is %s.\nMessage: %s", batch.job.id, collapse(res$output, sep = "\n")) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsHelpers.R
#' Create cluster functions for sequential execution in same session. #' #' @description #' All jobs executed under these cluster functions are executed #' sequentially, in the same interactive R process that you currently are. #' That is, \code{submitJob} does not return until the #' job has finished. The main use of this \code{ClusterFunctions} #' implementation is to test and debug programs on a local computer. #' #' Listing jobs returns an empty vector (as no jobs can be running when you call this) #' and \code{killJob} returns at once (for the same reason). #' #' @param write.logs [\code{logical(1)}]\cr #' Sink the output to log files. Turning logging off can increase the speed of #' calculations but makes it next to impossible to debug. #' Default is \code{TRUE}. #' @return [\code{\link{ClusterFunctions}}]. #' @family clusterFunctions #' @export makeClusterFunctionsInteractive = function(write.logs = TRUE) { assertFlag(write.logs) submitJob = if(write.logs) { function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { # open log file for writing fn = file(log.file, open = "wt") sink(fn, type = "output") sink(fn, type = "message") on.exit({ sink(NULL, type = "output") sink(NULL, type = "message") close(fn) }) # sink both output and message streams try(sys.source(rscript, envir = new.env(), keep.source = FALSE)) # return job result (always successful) makeSubmitJobResult(status = 0L, batch.job.id = "cfInteractive", msg = "") } } else { function(conf, reg, job.name, rscript, log.file, job.dir, resources) { suppressAll(try(sys.source(rscript, envir = new.env(), keep.source = FALSE))) makeSubmitJobResult(status = 0L, batch.job.id = "cfInteractive", msg = "") } } killJob = function(conf, reg, batch.job.id) NULL listJobs = function(conf, reg) integer(0L) getArrayEnvirName = function() NA_character_ makeClusterFunctions(name = "Interactive", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsInteractive.R
#' @title Create cluster functions for LSF systems. #' #' @description #' Job files are created based on the brew template #' \code{template.file}. This file is processed with brew and then #' submitted to the queue using the \code{bsub} command. Jobs are #' killed using the \code{bkill} command and the list of running jobs #' is retrieved using \code{bjobs -u $USER -w}. The user must have the #' appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all arguments passed to the #' \code{submitJob} function, see here \code{\link{ClusterFunctions}}. #' It is the template file's job to choose a queue for the job #' and handle the desired resource allocations. #' Examples can be found on #' \url{https://github.com/tudo-r/BatchJobs/tree/master/examples/cfLSF}. #' #' @template arg_template #' @template arg_list_jobs_cmd #' @template ret_cf #' @family clusterFunctions #' @export makeClusterFunctionsLSF = function(template.file, list.jobs.cmd = c("bjobs", "-u $USER", "-w")) { assertCharacter(list.jobs.cmd, min.len = 1L, any.missing = FALSE) template = cfReadBrewTemplate(template.file) # When LSB_BJOBS_CONSISTENT_EXIT_CODE = Y, the bjobs command exits with 0 only # when unfinished jobs are found, and 255 when no jobs are found, # or a non-existent job ID is entered. Sys.setenv(LSB_BJOBS_CONSISTENT_EXIT_CODE = "Y") submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { outfile = cfBrewTemplate(conf, template, rscript, "job") # returns: "Job <128952> is submitted to default queue <s_amd>." res = runOSCommandLinux("bsub", stdin = outfile, stop.on.exit.code = FALSE) # FIXME filled queues if (res$exit.code > 0L) { cfHandleUnknownSubmitError("bsub", res$exit.code, res$output) } else { # collapse output strings and first number in string is batch.job.id batch.job.id = stri_extract_first_regex(collapse(res$output, sep = " "), "\\d+") makeSubmitJobResult(status = 0L, batch.job.id = batch.job.id) } } killJob = function(conf, reg, batch.job.id) { cfKillBatchJob("bkill", batch.job.id) } listJobs = function(conf, reg) { # JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME # 106560 rogon UNKWN m_amd hpc84 hpc25 QScript Mar 19 12:18 # res = runOSyyCommandLinux("bjobs", c("-u $USER", "-w"), stop.on.exit.code = FALSE) res = runOSCommandLinux(list.jobs.cmd[1L], list.jobs.cmd[-1L], stop.on.exit.code = FALSE) if (res$exit.code == 255L && grepl("No unfinished job found", res$output, fixed = TRUE)) return(character(0L)) if (res$exit.code > 0L) stopf("bjobs produced exit code %i; output %s", res$exit.code, res$output) # drop first header line of output out = tail(res$output, -1L) # first number in strings are batch.job.ids stri_extract_first_regex(out, "\\d+") } getArrayEnvirName = function() "LSB_JOBINDEX" makeClusterFunctions(name = "LSF", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsLSF.R
#' Create cluster functions for sequential execution on local host. #' #' @description #' All jobs executed under these cluster functions are executed #' sequentially, but in an independent, new R session. #' That is, \code{submitJob} does not return until the #' job has finished. The main use of this \code{ClusterFunctions} #' implementation is to test and debug programs on a local computer. #' #' Listing jobs returns an empty vector (as no jobs can be running when you call this) #' and \code{killJob} returns at once (for the same reason). #' #' @return [\code{\link{ClusterFunctions}}]. #' @family clusterFunctions #' @export makeClusterFunctionsLocal = function() { submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { system2(command = file.path(R.home("bin"), "Rscript"), args = rscript, stdout = log.file, stderr = log.file, wait = TRUE) makeSubmitJobResult(status = 0L, batch.job.id = "cfLocal") } killJob = function(conf, reg, batch.job.id) NULL listJobs = function(conf, reg) integer(0L) getArrayEnvirName = function() NA_character_ makeClusterFunctions(name = "Local", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsLocal.R
#' Use multiple cores on local Linux machine to spawn parallel jobs. #' #' @description #' Jobs are spawned by starting multiple R sessions on the commandline #' (similar like on true batch systems). #' Packages \code{parallel} or \code{multicore} are not used in any way. #' #' @param ncpus [\code{integer(1)}]\cr #' Number of VPUs of worker. #' Default is to use all cores but one, where total number of cores #' "available" is given by option \code{\link[base:options]{mc.cores}} #' and if that is not set it is inferred by #' \code{\link[parallel]{detectCores}}. #' @param max.jobs [\code{integer(1)}]\cr #' Maximal number of jobs that can run concurrently for the current registry. #' Default is \code{ncpus}. #' @param max.load [\code{numeric(1)}]\cr #' Load average (of the last 5 min) at which the worker is considered occupied, #' so that no job can be submitted. #' Default is inferred by \code{\link[parallel]{detectCores}}, cf. argument \code{ncpus}. #' @param nice [\code{integer(1)}]\cr #' Process priority to run R with set via nice. Integers between -20 and 19 are allowed. #' If missing, processes are not nice'd and the system default applies (usually 0). #' @param r.options [\code{character}] #' Options for R and Rscript, one option per element of the vector, #' a la \dQuote{--vanilla}. #' Default is \code{c("--no-save", "--no-restore", "--no-init-file", "--no-site-file")}. #' @param script [\code{character(1)}]\cr #' Path to helper bash script which interacts with the worker. #' You really should not have to touch this, as this would imply that we have screwed up and #' published an incompatible version for your system. #' This option is only provided as a last resort for very experienced hackers. #' Note that the path has to be absolute. #' This is what is done in the package: #' \url{https://github.com/tudo-r/BatchJobs/blob/master/inst/bin/linux-helper} #' Default means to take it from package directory. #' @return [\code{\link{ClusterFunctions}}]. #' @family clusterFunctions #' @export makeClusterFunctionsMulticore = function(ncpus = max(getOption("mc.cores", parallel::detectCores()) - 1L, 1L), max.jobs, max.load, nice, r.options = c("--no-save", "--no-restore", "--no-init-file", "--no-site-file"), script) { if (isWindows()) stop("ClusterFunctionsMulticore do not work in Windows") worker = makeWorkerLocalLinux(r.options, script, ncpus, max.jobs, max.load, nice) submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { updateWorker(worker, reg$file.dir, tdiff = 0L) s = worker$available if (s != "A") { makeSubmitJobResult(status = 1L, batch.job.id = NULL, msg = sprintf("Multicore busy: %s", s)) } else { pid = try(startWorkerJob(worker, rscript, log.file)) if (is.error(pid)) makeSubmitJobResult(status = 101L, batch.job.id = NULL, msg = "Submit failed.") else makeSubmitJobResult(status = 0L, batch.job.id = pid) } } killJob = function(conf, reg, batch.job.id) { killWorkerJob(worker, batch.job.id) } listJobs = function(conf, reg) { listWorkerJobs(worker, reg$file.dir) } getArrayEnvirName = function() NA_character_ makeClusterFunctions(name = "Multicore", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsMulticore.R
#' @title Create cluster functions for OpenLava systems. #' #' @description #' Job files are created based on the brew template #' \code{template.file}. This file is processed with brew and then #' submitted to the queue using the \code{bsub} command. Jobs are #' killed using the \code{bkill} command and the list of running jobs #' is retrieved using \code{bjobs -u $USER -w}. The user must have the #' appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all arguments passed to the #' \code{submitJob} function, see here \code{\link{ClusterFunctions}}. #' It is the template file's job to choose a queue for the job #' and handle the desired resource allocations. #' Examples can be found on #' \url{https://github.com/tudo-r/BatchJobs/tree/master/examples/cfOpenLava}. #' #' @template arg_template #' @template arg_list_jobs_cmd #' @template ret_cf #' @family clusterFunctions #' @export makeClusterFunctionsOpenLava = function(template.file, list.jobs.cmd = c("bjobs", "-u $USER", "-w")) { assertCharacter(list.jobs.cmd, min.len = 1L, any.missing = FALSE) template = cfReadBrewTemplate(template.file) # When LSB_BJOBS_CONSISTENT_EXIT_CODE = Y, the bjobs command exits with 0 only # when unfinished jobs are found, and 255 when no jobs are found, # or a non-existent job ID is entered. Sys.setenv(LSB_BJOBS_CONSISTENT_EXIT_CODE = "Y") submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { outfile = cfBrewTemplate(conf, template, rscript, "job") # returns: "Job <128952> is submitted to default queue <s_amd>." res = runOSCommandLinux("bsub", stdin = outfile, stop.on.exit.code = FALSE) # FIXME filled queues if (res$exit.code > 0L) { cfHandleUnknownSubmitError("bsub", res$exit.code, res$output) } else { # collapse output strings and first number in string is batch.job.id batch.job.id = stri_extract_first_regex(collapse(res$output, sep = " "), "\\d+") makeSubmitJobResult(status = 0L, batch.job.id = batch.job.id) } } killJob = function(conf, reg, batch.job.id) { cfKillBatchJob("bkill", batch.job.id) } listJobs = function(conf, reg) { # JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME # 106560 rogon UNKWN m_amd hpc84 hpc25 QScript Mar 19 12:18 # res = runOSyyCommandLinux("bjobs", c("-u $USER", "-w"), stop.on.exit.code = FALSE) res = runOSCommandLinux(list.jobs.cmd[1L], list.jobs.cmd[-1L], stop.on.exit.code = FALSE) if (res$exit.code == 255L && grepl("No unfinished job found", res$output, fixed = TRUE)) return(character(0L)) if (res$exit.code > 0L) stopf("bjobs produced exit code %i; output %s", res$exit.code, res$output) # drop first header line of output out = tail(res$output, -1L) # first number in strings are batch.job.ids stri_extract_first_regex(out, "\\d+") } getArrayEnvirName = function() "LSB_JOBINDEX" makeClusterFunctions(name = "OpenLava", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsOpenLava.R
#' @title Create cluster functions for Sun Grid Engine systems. #' #' @description #' Job files are created based on the brew template #' \code{template.file}. This file is processed with brew and then #' submitted to the queue using the \code{qsub} command. Jobs are #' killed using the \code{qdel} command and the list of running jobs #' is retrieved using \code{qselect}. The user must have the #' appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all arguments passed to the #' \code{submitJob} function, see here \code{\link{ClusterFunctions}}. #' It is the template file's job to choose a queue for the job #' and handle the desired resource allocations. #' Examples can be found on #' \url{https://github.com/tudo-r/BatchJobs/tree/master/examples/cfSGE}. #' #' @template arg_template #' @template arg_list_jobs_cmd #' @template ret_cf #' @family clusterFunctions #' @export makeClusterFunctionsSGE = function(template.file, list.jobs.cmd = c("qstat", "-u $USER")) { assertCharacter(list.jobs.cmd, min.len = 1L, any.missing = FALSE) template = cfReadBrewTemplate(template.file) submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { outfile = cfBrewTemplate(conf, template, rscript, "job") # returns: "Your job 240933 (\"sleep 60\") has been submitted" res = runOSCommandLinux("qsub", outfile, stop.on.exit.code = FALSE) # FIXME filled queues if (res$exit.code > 0L) { cfHandleUnknownSubmitError("qsub", res$exit.code, res$output) } else { # collapse output strings and first number in string is batch.job.id batch.job.id = stri_extract_first_regex(collapse(res$output, sep = " "), "\\d+") makeSubmitJobResult(status = 0L, batch.job.id = batch.job.id) } } killJob = function(conf, reg, batch.job.id) { cfKillBatchJob("qdel", batch.job.id) } listJobs = function(conf, reg) { # looks like this # job-ID prior name user state submit/start at queue slots ja-task-ID #----------------------------------------------------------------------------------------------------------------- # 240935 0.00000 sleep 60 matthias qw 04/03/2012 15:45:54 1 # res = runOSCommandLinux("qstat", "-u $USER") res = runOSCommandLinux(list.jobs.cmd[1L], list.jobs.cmd[-1L]) # drop first 2 header lines out = tail(res$output, -2L) # first number in strings are batch.job.ids stri_extract_first_regex(out, "\\d+") } getArrayEnvirName = function() "SGE_TASK_ID" makeClusterFunctions(name = "SGE", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsSGE.R
#' @title Create cluster functions for SLURM-based systems. #' #' @description #' Job files are created based on the brew template #' \code{template.file}. This file is processed with brew and then #' submitted to the queue using the \code{sbatch} command. Jobs are #' killed using the \code{scancel} command and the list of running jobs #' is retrieved using \code{squeue}. The user must have the #' appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all arguments passed to the #' \code{submitJob} function, see here \code{\link{ClusterFunctions}}. #' It is the template file's job to choose a queue for the job #' and handle the desired resource allocations. #' Examples can be found on #' \url{https://github.com/tudo-r/BatchJobs/tree/master/examples/cfSLURM}. #' #' @template arg_template #' @template arg_list_jobs_cmd #' @template ret_cf #' @family clusterFunctions #' @export makeClusterFunctionsSLURM = function(template.file, list.jobs.cmd = c("squeue", "-h", "-o %i", "-u $USER")) { assertCharacter(list.jobs.cmd, min.len = 1L, any.missing = FALSE) template = cfReadBrewTemplate(template.file) submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { outfile = cfBrewTemplate(conf, template, rscript, "sb") res = runOSCommandLinux("sbatch", outfile, stop.on.exit.code = FALSE) max.jobs.msg = "sbatch: error: Batch job submission failed: Job violates accounting policy (job submit limit, user's size and/or time limits)" temp.error = "Socket timed out on send/recv operation" output = collapse(res$output, sep = "\n") if (grepl(max.jobs.msg, output, fixed = TRUE)) { makeSubmitJobResult(status = 1L, batch.job.id = NA_character_, msg = max.jobs.msg) } else if (grepl(temp.error, output, fixed = TRUE)) { # another temp error we want to catch makeSubmitJobResult(status = 2L, batch.job.id = NA_character_, msg = temp.error) } else if (res$exit.code > 0L) { cfHandleUnknownSubmitError("sbatch", res$exit.code, res$output) } else { makeSubmitJobResult(status = 0L, batch.job.id = stri_trim_both(stri_split_fixed(output, " ")[[1L]][4L])) } } killJob = function(conf, reg, batch.job.id) { cfKillBatchJob("scancel", batch.job.id) } listJobs = function(conf, reg) { # Result is lines of fully quantified batch.job.ids jids = runOSCommandLinux(list.jobs.cmd[1L], list.jobs.cmd[-1L])$output stri_extract_first_regex(jids, "[0-9]+") } getArrayEnvirName = function() "SLURM_ARRAY_TASK_ID" makeClusterFunctions(name = "SLURM", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsSLURM.R
#' Create SSH worker for SSH cluster functions. #' #' @param nodename [\code{character(1)}]\cr #' Host name of node. #' @param ssh.cmd [\code{character(1)})]\cr #' CLI command to ssh into remote node. #' Default is \dQuote{ssh}. #' @param ssh.args [\code{character}]\cr #' CLI args for \code{ssh.cmd}. #' Default is none. #' @param rhome [\code{character(1)}]\cr #' Path to R installation on worker. #' \dQuote{} means R installation on the PATH is used, #' of course this implies that it must be on the PATH #' (also for non-interactive shells)! #' Default is \dQuote{}. #' @param ncpus [\code{integers(1)}]\cr #' Number of VPUs of worker. #' Default means to query the worker via \dQuote{/proc/cpuinfo}. #' @param max.jobs [\code{integer(1)}]\cr #' Maximal number of jobs that can run concurrently for the current registry. #' Default is \code{ncpus}. #' @param max.load [\code{numeric(1)}]\cr #' Load average (of the last 5 min) at which the worker is considered occupied, #' so that no job can be submitted. #' Default is \code{ncpus-1}. #' @param nice [\code{integer(1)}]\cr #' Process priority to run R with set via nice. Integers between -20 and 19 are allowed. #' If missing, processes are not nice'd and the system default applies (usually 0). #' Default is no niceing. #' @param r.options [\code{character}]\cr #' Options for R and Rscript, one option per element of the vector, #' a la \dQuote{--vanilla}. #' Default is \code{c("--no-save", "--no-restore", "--no-init-file", "--no-site-file")}. #' @param script [\code{character(1)}]\cr #' Path to helper bash script which interacts with the worker. #' You really should not have to touch this, as this would imply that we have screwed up and #' published an incompatible version for your system. #' This option is only provided as a last resort for very experienced hackers. #' Note that the path has to be absolute. #' This is what is done in the package: #' \url{https://github.com/tudo-r/BatchJobs/blob/master/inst/bin/linux-helper} #' Default means to take it from package directory. #' @return [\code{\link{SSHWorker}}]. #' @export #' @aliases SSHWorker makeSSHWorker = function(nodename, ssh.cmd = "ssh", ssh.args = character(0L), rhome = "", ncpus, max.jobs, max.load, nice, r.options = c("--no-save", "--no-restore", "--no-init-file", "--no-site-file"), script) { worker = makeWorkerRemoteLinux(nodename, ssh.cmd, ssh.args, rhome, r.options, script, ncpus, max.jobs, max.load, nice) class(worker) = c("SSHWorker", class(worker)) return(worker) } #' Create an SSH cluster to execute jobs. #' #' Worker nodes must share the same file system and be accessible by ssh #' without manually entering passwords (e.g. by ssh-agent or passwordless pubkey). #' Note that you can also use this function to parallelize on multiple cores on your local machine. #' But you still have to run an ssh server and provide passwordless access to #' localhost. #' #' @param ... [\code{\link{SSHWorker}}]\cr #' Worker objects, all created with \code{\link{makeSSHWorker}}. #' @param workers [list of \code{\link{SSHWorker}}]\cr #' Alternative way to pass workers. #' @return [\code{ClusterFunctions}]. #' #' @examples \dontrun{ #' #' # Assume you have three nodes larry, curley and moe. All have 6 #' # cpu cores. On curley and moe R is installed under #' # "/opt/R/R-current" and on larry R is installed under #' # "/usr/local/R/". larry should not be used extensively because #' # somebody else wants to compute there as well. #' # Then a call to 'makeClusterFunctionsSSH' #' # might look like this: #' #' cluster.functions = makeClusterFunctionsSSH( #' makeSSHWorker(nodename = "larry", rhome = "/usr/local/R", max.jobs = 2), #' makeSSHWorker(nodename = "curley", rhome = "/opt/R/R-current"), #' makeSSHWorker(nodename = "moe", rhome = "/opt/R/R-current")) #' } #' @export #' @family clusterFunctions #' @seealso \code{\link{makeSSHWorker}} makeClusterFunctionsSSH = function(..., workers) { args = list(...) if (!xor(length(args) > 0L, !missing(workers))) stop("You must use exactly only 1 of: '...', 'workers'!") if (missing(workers)) workers = args checkListElementClass(workers, "SSHWorker") if (length(workers) == 0L) stop("You must pass at least 1 SSH worker!") nodenames = extractSubList(workers, "nodename") dup = duplicated(nodenames) if (any(dup)) stopf("Multiple definitions for worker nodenames: %s!", collapse(nodenames[dup])) names(workers) = nodenames rm(nodenames, dup) submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { worker = findWorker(workers, reg$file.dir, tdiff = 5L) if (is.null(worker)) { states = collapse(extractSubList(workers, "available", simplify = TRUE), sep = "") makeSubmitJobResult(status = 1L, batch.job.id = NULL, msg = sprintf("Workers busy: %s", states)) } else { pid = try(startWorkerJob(worker, rscript, log.file)) if (is.error(pid)) makeSubmitJobResult(status = 101L, batch.job.id = NULL, msg = "Submit failed.") else makeSubmitJobResult(status = 0L,batch.job.id = paste0(worker$nodename, "#", pid)) } } killJob = function(conf, reg, batch.job.id) { parts = stri_split_fixed(batch.job.id, "#")[[1L]] nodename = parts[1L] pid = parts[2L] worker = workers[[nodename]] if (is.null(worker)) stopf("Unknown worker node '%s'.", nodename) killWorkerJob(worker, pid) } listJobs = function(conf, reg) { res = NULL for (worker in workers) { nodename = worker[["nodename"]] pids = listWorkerJobs(worker, reg$file.dir) if (length(pids) > 0L) { res = c(res, paste0(nodename, "#", pids)) } } res } getArrayEnvirName = function() NA_character_ makeClusterFunctions(name = "SSH", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsSSH.R
#' @title Create cluster functions for torque-based systems. #' #' @description #' Job files are created based on the brew template #' \code{template.file}. This file is processed with brew and then #' submitted to the queue using the \code{qsub} command. Jobs are #' killed using the \code{qdel} command and the list of running jobs #' is retrieved using \code{qselect}. The user must have the #' appropriate privileges to submit, delete and list jobs on the #' cluster (this is usually the case). #' #' The template file can access all arguments passed to the #' \code{submitJob} function, see here \code{\link{ClusterFunctions}}. #' It is the template file's job to choose a queue for the job #' and handle the desired resource allocations. #' Examples can be found on #' \url{https://github.com/tudo-r/BatchJobs/tree/master/examples/cfTorque}. #' #' @template arg_template #' @template arg_list_jobs_cmd #' @template ret_cf #' @family clusterFunctions #' @export makeClusterFunctionsTorque = function(template.file, list.jobs.cmd = c("qselect", "-u $USER", "-s EHQRTW")) { assertCharacter(list.jobs.cmd, min.len = 1L, any.missing = FALSE) template = cfReadBrewTemplate(template.file) submitJob = function(conf, reg, job.name, rscript, log.file, job.dir, resources, arrayjobs) { outfile = cfBrewTemplate(conf, template, rscript, "pbs") res = runOSCommandLinux("qsub", outfile, stop.on.exit.code = FALSE) max.jobs.msg = "Maximum number of jobs already in queue" output = collapse(res$output, sep = "\n") if (grepl(max.jobs.msg, output, fixed = TRUE)) { makeSubmitJobResult(status = 1L, batch.job.id = NA_character_, msg = max.jobs.msg) } else if (res$exit.code > 0L) { cfHandleUnknownSubmitError("qsub", res$exit.code, res$output) } else { makeSubmitJobResult(status = 0L, batch.job.id = stri_trim_both(output)) } } killJob = function(conf, reg, batch.job.id) { cfKillBatchJob("qdel", batch.job.id) } listJobs = function(conf, reg) { # Result is lines of fully quantified batch.job.ids batch.ids = runOSCommandLinux(list.jobs.cmd[1L], list.jobs.cmd[-1L])$output # simplify batch ids of array jobs, i.e. remove the array id from the batch id unique(gsub("\\[[[:digit:]]\\]", "[]", batch.ids)) } getArrayEnvirName = function() "PBS_ARRAYID" makeClusterFunctions(name = "Torque", submitJob = submitJob, killJob = killJob, listJobs = listJobs, getArrayEnvirName = getArrayEnvirName) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/clusterFunctionsTorque.R
#' BatchJobs configuration. #' #' In order to understand how the package should be configured #' please read #' \url{https://github.com/tudo-r/BatchJobs/wiki/Configuration}. #' #' @name configuration #' @rdname configuration #' @family conf #' @aliases .BatchJobs.R NULL # sources 1 config file and returns the envir sourceConfFile = function(conffile) { assertFileExists(conffile) conf = new.env() x = try(sys.source(conffile, envir = conf)) if (is.error(x)) stopf("There was an error in sourcing your configuration file '%s': %s!", conffile, as.character(x)) checkConf(conf) do.call(checkConfElements, as.list(conf)) return(conf) } # sources multiple config files, the later overwrite the first, and returns the envir sourceConfFiles = function(conffiles) { conf = new.env() for (cf in conffiles) { conf2 = sourceConfFile(cf) lapply(ls(conf2), function(x) assign(x, conf2[[x]], envir = conf)) } return(conf) } # assigns a conf to namespace assignConf = function(conf) { conf.in.ns = getBatchJobsConf() lapply(ls(conf), function(x) assign(x, conf[[x]], envir = conf.in.ns)) } # locates package conf, userhome conf, working dir conf findConfigs = function(path = find.package("BatchJobs")) { fn.pack = file.path(path, "etc", "BatchJobs_global_config.R") fn.user = path.expand("~/.BatchJobs.R") fn.wd = suppressWarnings(normalizePath(".BatchJobs.R")) Filter(file.exists, unique(c(fn.pack, fn.user, fn.wd))) } # reads available config files and assigns them to namespace readConfs = function(path = find.package("BatchJobs")) { conffiles = findConfigs(path) if (length(conffiles) == 0L) { warning("No configuration found at all. Not in package, not in user.home, not in work dir!") return(character(0L)) } # really do this in 2 steps # otherwise weird things might happen due to lazy eval combined with envirs # and we might not see the error msg triggered in the checking of the config file conf = sourceConfFiles(conffiles) assignConf(conf) invisible(conffiles) } # loads conf into namespace on slave loadConf = function(reg) { fn = getConfFilePath(reg) info("Loading conf: %s", fn) loaded = load2(fn, simplify = FALSE)$conf # assign all stuff to conf in namespace conf = getBatchJobsConf() lapply(ls(loaded), function(x) assign(x, loaded[[x]], envir = conf)) invisible(conf) } getBatchJobsConf = function() { .BatchJobs.conf } saveConf = function(reg) { fn = getConfFilePath(reg) info("Saving conf: %s", fn) conf = getBatchJobsConf() save(file = fn, conf) } getConfNames = function() { c("cluster.functions", "mail.start", "mail.done", "mail.error", "mail.from", "mail.to", "mail.control", "db.driver", "db.options", "default.resources", "debug", "raise.warnings", "staged.queries", "max.concurrent.jobs", "fs.timeout", "measure.mem") } checkConf = function(conf) { ns = if (is.list(conf)) names(conf) else ls(conf, all.names = TRUE) ns2 = getConfNames() if (any(ns %nin% ns2)) stopf("You are only allowed to define the following R variables in your config:\n%s\nBut you also had:\n%s", collapse(ns2, sep = ", "), collapse(setdiff(ns, ns2), sep = ", ")) } checkConfElements = function(cluster.functions, mail.to, mail.from, mail.start, mail.done, mail.error, mail.control, db.driver, db.options, default.resources, debug, raise.warnings, staged.queries, max.concurrent.jobs, fs.timeout, measure.mem) { mail.choices = c("none", "first", "last", "first+last", "all") if (!missing(cluster.functions)) assertClass(cluster.functions, "ClusterFunctions") if (!missing(mail.from)) assertString(mail.from) if (!missing(mail.to)) assertString(mail.to) if (!missing(mail.start)) assertChoice(mail.start, mail.choices) if (!missing(mail.done)) assertChoice(mail.done, mail.choices) if (!missing(mail.error)) assertChoice(mail.error, mail.choices) if (!missing(mail.control)) assertList(mail.control) if (!missing(db.driver)) assertString(db.driver) if (!missing(db.options)) assertList(db.options, names = "named") if (!missing(default.resources)) assertList(default.resources, names = "named") if (!missing(debug)) assertFlag(debug) if (!missing(raise.warnings)) assertFlag(raise.warnings) if (!missing(staged.queries)) assertFlag(staged.queries) if (!missing(max.concurrent.jobs)) assertCount(max.concurrent.jobs) if (!missing(fs.timeout)) assertNumber(fs.timeout) if (!missing(measure.mem)) assertFlag(measure.mem) } getClusterFunctions = function(conf) { conf$cluster.functions } # Function which returns a printable string describing the config # Used in packageStartupMessage and in print.Config printableConf = function(conf) { x = as.list(conf) x[setdiff(getConfNames(), names(x))] = "" fmt = paste( "BatchJobs configuration:", " cluster functions: %s", " mail.from: %s", " mail.to: %s", " mail.start: %s", " mail.done: %s", " mail.error: %s", " default.resources: %s", " debug: %s", " raise.warnings: %s", " staged.queries: %s", " max.concurrent.jobs: %s", " fs.timeout: %s", " measure.mem: %s\n", sep = "\n") sprintf(fmt, x$cluster.functions$name, x$mail.from, x$mail.to, x$mail.start, x$mail.done, x$mail.error, convertToShortString(x$default.resources), x$debug, x$raise.warnings, x$staged.queries, x$max.concurrent.jobs, x$fs.timeout, x$measure.mem) } #' @export print.Config = function(x, ...) { cat(printableConf(x)) } #' Load a specific configuration file. #' #' @param conffile [\code{character(1)}]\cr #' Location of the configuration file to load. #' Default is \dQuote{.BatchJobs.conf} in the current working directory. #' @return Invisibly returns a list of configuration settings. #' @family conf #' @export loadConfig = function(conffile = ".BatchJobs.R") { # checks are done in sourceConfFile conf = sourceConfFile(conffile) assignConf(conf) invisible(setClasses(as.list(conf), "Config")) } #' Set and overwrite configuration settings #' #' @param conf [\code{Config} or \code{list}]\cr #' List of configuration parameters as returned by \code{\link{loadConfig}} or \code{\link{getConfig}}. #' @param ... [\code{ANY}]\cr #' Named configuration parameters. Overwrites parameters in \code{conf}, if provided. #' @return Invisibly returns a list of configuration settings. #' @family conf #' @export setConfig = function(conf = list(), ...) { if (!is.list(conf) && !inherits(conf, "Config")) stopf("Argument 'conf' must be of class 'list' or 'Config', not %s", head(conf, 1L)) overwrites = insert(conf, list(...)) if (length(overwrites) == 0L) return(invisible(getConfig())) if (!isProperlyNamed(overwrites)) stopf("All configuration arguments in '...' must be properly named") checkConf(overwrites) conf = insert(as.list(getBatchJobsConf()), overwrites) assignConf(as.environment(conf)) invisible(setClasses(conf, "Config")) } #' Returns a list of BatchJobs configuration settings #' #' @return \code{list} of current configuration variables with classs \dQuote{Config}. #' @family conf #' @export getConfig = function() { setClasses(as.list(getBatchJobsConf()), "Config") }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/conf.R
############################################ ### Common database functions ############################################ sqlQuote = function(x) { sprintf("'%s'", x) } dbGetConnection = function(drv, reg, ...) { # method dispatch to support different DBMS UseMethod("dbGetConnection") } dbGetConnection.SQLiteDriver = function(drv, reg, flags = "ro", ...) { flags = switch(flags, "ro" = SQLITE_RO, "rw" = SQLITE_RW, "rwc" = SQLITE_RWC) opts = list(dbname = file.path(reg$file.dir, "BatchJobs.db"), flags = flags, drv = drv) con = do.call(dbConnect, args = c(dropNamed(reg$db.options, "pragmas"), opts)) for (pragma in reg$db.options$pragmas) dbClearResult(dbSendQuery(con, sprintf("PRAGMA %s", pragma))) return(con) } dbConnectToJobsDB = function(reg, flags = "ro") { drv = do.call(reg$db.driver, list()) dbGetConnection(drv, reg, flags) } dbDoQueries = function(reg, queries, flags = "ro", max.retries = 100L, sleep = function(r) 1.025^r) { for (i in seq_len(max.retries)) { con = try(dbConnectToJobsDB(reg, flags), silent = TRUE) if (is.error(con)) { if (!grepl("(lock|i/o|readonly)", tolower(con))) stopf("Error while etablishing the connection: %s", as.character(con)) } else { ok = try ({ dbBegin(con) for (query in queries) { if (startsWith(query, "SELECT")) { ress = dbGetQuery(con, query) } else { ress = dbSendQuery(con, query) if (dbHasCompleted(ress)) dbClearResult(ress) } } }, silent = TRUE) if (!is.error(ok)) { # this can fail because DB is locked ok2 = dbCommit(con) if (ok2) { dbDisconnect(con) return(ress) } else { dbRollback(con) dbDisconnect(con) } } else { ok = as.character(ok) dbRollback(con) dbDisconnect(con) # catch known temporary errors: # - database is still locked # - disk I/O error # - disk I/O error # - database is only readable if(!grepl("(lock|i/o|readonly)", tolower(ok))) stopf("Error in dbDoQueries. Displaying only 1st query. %s (%s)", ok, queries[1L]) } } # if we reach this here, DB was locked or temporary I/O error Sys.sleep(runif(1L, min = 1, max = sleep(i))) } stopf("dbDoQueries: max retries (%i) reached, database is still locked!", max.retries) } dbDoQuery = function(reg, query, flags = "ro", max.retries = 100L, sleep = function(r) 1.025^r) { for (i in seq_len(max.retries)) { con = try(dbConnectToJobsDB(reg, flags), silent = TRUE) if (is.error(con)) { if (!grepl("(lock|i/o|readonly)", tolower(con))) stopf("Error while etablishing the connection: %s", as.character(con)) } else { if (startsWith(query, "SELECT")) { res = try(dbGetQuery(con, query), silent = TRUE) } else { res = try(dbSendQuery(con, query), silent = TRUE) if (!is.error(res)) dbClearResult(res) } dbDisconnect(con) if (!is.error(res)) return(res) res = as.character(res) if(!grepl("(lock|i/o|readonly)", tolower(res))) { stopf("Error in dbDoQuery. %s (%s)", res, query) } } # if we reach this here, DB was locked or temporary I/O error Sys.sleep(runif(1L, min = 1, max = sleep(i))) } stopf("dbDoQuery: max retries (%i) reached, database is still locked!", max.retries) } dbAddData = function(reg, tab, data) { query = sprintf("INSERT INTO %s_%s (%s) VALUES(%s)", reg$id, tab, collapse(colnames(data)), collapse(rep.int("?", ncol(data)))) con = dbConnectToJobsDB(reg, flags = "rw") on.exit(dbDisconnect(con)) dbBegin(con) res = dbSendQuery(con, query) for (i in seq_row(data)) { row = unname(as.list(data[i, ])) ok = try(dbBind(res, row)) if(is.error(ok)) { dbClearResult(res) dbRollback(con) stopf("Error in dbAddData: %s", as.character(ok)) } } dbClearResult(res) dbCommit(con) as.integer(dbGetQuery(con, "SELECT total_changes()")) } dbSelectWithIds = function(reg, query, ids, where = TRUE, group.by, limit = NULL, reorder = TRUE) { if(!missing(ids)) query = sprintf("%s %s job_id IN (%s)", query, ifelse(where, "WHERE", "AND"), collapse(ids)) if(!missing(group.by)) query = sprintf("%s GROUP BY %s", query, collapse(group.by)) if(!is.null(limit)) query = sprintf("%s LIMIT %i", query, limit) res = dbDoQuery(reg, query) if(missing(ids) || !reorder) return(res) return(res[na.omit(match(ids, res$job_id)),, drop = FALSE]) } ############################################ ### CREATE ############################################ #' ONLY FOR INTERNAL USAGE. #' @param reg [\code{\link{Registry}}]\cr #' Registry. #' @return Nothing. #' @keywords internal #' @export dbCreateJobDefTable = function(reg) { UseMethod("dbCreateJobDefTable") } #' @export dbCreateJobDefTable.Registry = function(reg) { query = sprintf("CREATE TABLE %s_job_def (job_def_id INTEGER PRIMARY KEY, fun_id TEXT, pars TEXT, jobname TEXT)", reg$id) dbDoQuery(reg, query, flags = "rwc") dbCreateExpandedJobsView(reg) } dbCreateJobStatusTable = function(reg, extra.cols = "", constraints = "") { query = sprintf(paste("CREATE TABLE %s_job_status (job_id INTEGER PRIMARY KEY, job_def_id INTEGER,", "first_job_in_chunk_id INTEGER, seed INTEGER, resources_timestamp INTEGER, memory REAL, submitted INTEGER,", "started INTEGER, batch_job_id TEXT, node TEXT, r_pid INTEGER,", "done INTEGER, error TEXT, error_time INTEGER %s %s)"), reg$id, extra.cols, constraints) dbDoQuery(reg, query, flags = "rwc") query = sprintf("CREATE INDEX job_def_id ON %s_job_status(job_def_id)", reg$id) dbDoQuery(reg, query, flags = "rw") return(invisible(TRUE)) } dbCreateExpandedJobsView = function(reg) { query = sprintf("CREATE VIEW %1$s_expanded_jobs AS SELECT * FROM %1$s_job_status LEFT JOIN %1$s_job_def USING(job_def_id)", reg$id) dbDoQuery(reg, query, flags = "rw") } ############################################ ### SELECT ############################################ #' ONLY FOR INTERNAL USAGE. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of selected jobs. #' @return [list of \code{\link{Job}}]. Retrieved jobs from DB. #' @keywords internal #' @export dbGetJobs = function(reg, ids) { UseMethod("dbGetJobs") } # note that this does not load the job function from disk to increase speed #' @method dbGetJobs Registry #' @export dbGetJobs.Registry = function(reg, ids) { query = sprintf("SELECT job_id, fun_id, pars, jobname, seed FROM %s_expanded_jobs", reg$id) tab = dbSelectWithIds(reg, query, ids) lapply(seq_row(tab), function(i) { makeJob(id = tab$job_id[i], fun.id = tab$fun_id[i], fun = NULL, pars = unserialize(charToRaw(tab$pars[i])), name = tab$jobname[i], seed = tab$seed[i]) }) } dbGetExpandedJobsTable = function(reg, ids, cols = "*") { # Note: job_id must be in cols! query = sprintf("SELECT %s FROM %s_expanded_jobs", collapse(cols), reg$id) tab = dbSelectWithIds(reg, query, ids) setRowNames(tab, tab$job_id) } dbGetJobStatusTable = function(reg, ids, cols = "*") { # Note: job_id must be in cols! query = sprintf("SELECT %s FROM %s_job_status", collapse(cols), reg$id) tab = dbSelectWithIds(reg, query, ids) setRowNames(tab, tab$job_id) } dbGetJobCount = function(reg) { query = sprintf("SELECT COUNT(*) AS count FROM %s_job_status", reg$id) dbDoQuery(reg, query)$count } dbGetJobId = function(reg) { query = sprintf("SELECT job_id FROM %s_job_status LIMIT 1", reg$id) as.integer(dbDoQuery(reg, query)$job_id) } dbGetJobIds = function(reg) { query = sprintf("SELECT job_id FROM %s_job_status", reg$id) dbDoQuery(reg, query)$job_id } dbCheckJobIds = function(reg, ids) { not.found = setdiff(ids, dbGetJobIds(reg)) if (length(not.found) > 0L) stopf("Ids not present in registry: %s", collapse(not.found)) } dbGetJobIdsIfAllDone = function(reg) { query = sprintf("SELECT job_id, done FROM %s_job_status", reg$id) res = dbDoQuery(reg, query) if (all(! is.na(res$done))) return(res$job_id) stop("Not all jobs finished (yet)!") } dbGetLastAddedIds = function(reg, tab, id.col, n) { query = sprintf("SELECT %s AS id_col FROM %s_%s ORDER BY %s DESC LIMIT %i", id.col, reg$id, tab, id.col, n) rev(dbDoQuery(reg, query)$id_col) } dbFindDone = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (done IS NOT NULL)", reg$id, if(negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindErrors = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (error IS NOT NULL)", reg$id, if(negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindTerminated = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (done IS NOT NULL OR error IS NOT NULL)", reg$id, if(negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindSubmitted = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (submitted IS NOT NULL)", reg$id, if (negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindStarted = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (started IS NOT NULL)", reg$id, if (negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindOnSystem = function(reg, ids, negate = FALSE, limit = NULL, batch.ids) { if (missing(batch.ids)) batch.ids = getBatchIds(reg, "Cannot find jobs on system") query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (batch_job_id IN (%s))", reg$id, if (negate) "NOT" else "", collapse(sqlQuote(batch.ids))) dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindSubmittedNotTerminated = function(reg, ids, negate = FALSE, limit = NULL) { query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (submitted IS NOT NULL AND done IS NULL AND error IS NULL)", reg$id, if (negate) "NOT" else "") dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindRunning = function(reg, ids, negate = FALSE, limit = NULL, batch.ids) { if (missing(batch.ids)) batch.ids = getBatchIds(reg, "Cannot find jobs on system") query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (batch_job_id IN (%s) AND started IS NOT NULL AND done IS NULL AND error IS NULL)", reg$id, if (negate) "NOT" else "", collapse(sqlQuote(batch.ids))) dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindExpiredJobs = function(reg, ids, negate = FALSE, limit = NULL, batch.ids) { if (missing(batch.ids)) batch.ids = getBatchIds(reg, "Cannot find jobs on system") # started, not terminated, not running query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (started IS NOT NULL AND done IS NULL AND error is NULL AND batch_job_id NOT IN (%s))", reg$id, if (negate) "NOT" else "", collapse(sqlQuote(batch.ids))) dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbFindDisappeared = function(reg, ids, negate = FALSE, limit = NULL, batch.ids) { if (missing(batch.ids)) batch.ids = getBatchIds(reg, "Cannot find jobs on system") query = sprintf("SELECT job_id FROM %s_job_status WHERE %s (submitted IS NOT NULL AND started IS NULL AND batch_job_id NOT IN (%s))", reg$id, if (negate) "NOT" else "", collapse(sqlQuote(batch.ids))) dbSelectWithIds(reg, query, ids, where = FALSE, limit = limit)$job_id } dbGetFirstJobInChunkIds = function(reg, ids){ query = sprintf("SELECT job_id, first_job_in_chunk_id FROM %s_job_status", reg$id) dbSelectWithIds(reg, query, ids)$first_job_in_chunk_id } dbGetErrorMsgs = function(reg, ids, filter = FALSE, limit = NULL) { query = sprintf("SELECT job_id, error from %s_job_status", reg$id) if (filter) query = sprintf("%s WHERE error IS NOT NULL", query) dbSelectWithIds(reg, query, ids, where = !filter, limit = limit) } dbGetStats = function(reg, ids, running = FALSE, expired = FALSE, times = FALSE, batch.ids) { cols = c(n = "COUNT(job_id)", submitted = "COUNT(submitted)", started = "COUNT(started)", done = "COUNT(done)", error = "COUNT(error)", running = "NULL", expired = "NULL", t_min = "NULL", t_avg = "NULL", t_max = "NULL") if (missing(batch.ids) && (expired || running)) batch.ids = getBatchIds(reg, "Cannot find jobs on system") if(running) cols["running"] = sprintf("SUM(started IS NOT NULL AND done IS NULL AND error IS NULL AND batch_job_id IN (%s))", collapse(sqlQuote(batch.ids))) if(expired) cols["expired"] = sprintf("SUM(started IS NOT NULL AND done IS NULL AND error IS NULL AND batch_job_id NOT IN (%s))", collapse(sqlQuote(batch.ids))) if (times) cols[c("t_min", "t_avg", "t_max")] = c("MIN(done - started)", "AVG(done - started)", "MAX(done - started)") query = sprintf("SELECT %s FROM %s_job_status", collapse(paste(cols, "AS", names(cols)), sep = ", "), reg$id) df = dbSelectWithIds(reg, query, ids, reorder = FALSE) # Convert to correct type. Null has no type and casts tend to not work properly with RSQLite x = c("n", "submitted", "started", "done", "error", "running", "expired") df[x] = lapply(df[x], as.integer) x = c("t_min", "t_avg", "t_max") df[x] = lapply(df[x], as.double) df } dbGetJobNames = function(reg, ids) { query = sprintf("SELECT job_id, jobname FROM %s_expanded_jobs", reg$id) as.character(dbSelectWithIds(reg, query, ids)$jobname) } dbMatchJobNames = function(reg, ids, jobnames) { query = sprintf("SELECT job_id FROM %s_expanded_jobs WHERE jobname IN (%s)", reg$id, collapse(sqlQuote(jobnames))) dbSelectWithIds(reg, query, ids, where = FALSE)$job_id } ############################################ ### Messages ############################################ dbSendMessage = function(reg, msg, staged = useStagedQueries(), fs.timeout = NA_real_) { ## AD HOC/FIXME: Avoid partial matching; some functions pass 'msg' with ## field 'msgs' and some with field 'msg' (e.g. dbMakeMessageError()). msgT <- if ("msgs" %in% names(msg)) msg$msgs else msg$msg if (staged) { fn = getPendingFile(reg, msg$type, msg$ids[1L]) writeSQLFile(msgT, fn) waitForFiles(fn, timeout = fs.timeout) } else { dbDoQuery(reg, msgT, flags = "rw") } } dbSendMessages = function(reg, msgs, max.retries = 200L, sleep = function(r) 1.025^r, staged = useStagedQueries(), fs.timeout = NA_real_) { if (length(msgs) == 0L) return(TRUE) if (staged) { chars = .OrderChars # reorder messages in sublist msgs = split(msgs, extractSubList(msgs, "type")) msgs = msgs[order(match(names(msgs), names(chars)))] fns = vcapply(msgs, function(cur) { first = cur[[1L]] fn = getPendingFile(reg, first$type, first$ids[1L], chars[first$type]) writeSQLFile(extractSubList(cur, "msg"), fn) fn }) waitForFiles(fns, timeout = fs.timeout) } else { ok = try(dbDoQueries(reg, extractSubList(msgs, "msg"), flags = "rw", max.retries, sleep)) if (is.error(ok)) { ok = as.character(ok) if (ok == "dbDoQueries: max retries reached, database is still locked!") { return(FALSE) } else { # throw exception again stopf("Error in dbSendMessages: %s", ok) } } } return(TRUE) } dbMakeMessageSubmitted = function(reg, job.ids, time = now(), batch.job.id, first.job.in.chunk.id = NULL, resources.timestamp, type = "submitted") { if(is.null(first.job.in.chunk.id)) first.job.in.chunk.id = "NULL" updates = sprintf("first_job_in_chunk_id=%s, submitted=%i, batch_job_id='%s', resources_timestamp=%i", first.job.in.chunk.id, time, batch.job.id, resources.timestamp) list(msg = sprintf("UPDATE %s_job_status SET %s WHERE job_id in (%s)", reg$id, updates, collapse(job.ids)), ids = job.ids, type = type) } dbMakeMessageStarted = function(reg, job.ids, time = now(), type = "started") { node = gsub("'", "\"", Sys.info()["nodename"], fixed = TRUE) updates = sprintf("started=%i, node='%s', r_pid=%i, error=NULL, done=NULL", time, node, Sys.getpid()) list(msg = sprintf("UPDATE %s_job_status SET %s WHERE job_id in (%s)", reg$id, updates, collapse(job.ids)), ids = job.ids, type = type) } dbMakeMessageError = function(reg, job.ids, err.msg, time = now(), memory = -1, type = "error") { # FIXME how to escape ticks (')? Just replaced with double quotes for the moment err.msg = gsub("'", "\"", err.msg, fixed = TRUE) err.msg = gsub("[^[:print:]]", " ", err.msg) updates = sprintf("error='%s', error_time=%i, done=NULL, memory='%.4f'", err.msg, time, memory) list(msg = sprintf("UPDATE %s_job_status SET %s WHERE job_id in (%s)", reg$id, updates, collapse(job.ids)), ids = job.ids, type = type) } dbMakeMessageDone = function(reg, job.ids, time = now(), memory = -1, type = "done") { updates = sprintf("done=%i, error=NULL, memory='%.04f'", time, memory) list(msg = sprintf("UPDATE %s_job_status SET %s WHERE job_id in (%s)", reg$id, updates, collapse(job.ids)), ids = job.ids, type = type) } dbMakeMessageKilled = function(reg, job.ids, type = "last") { updates = "resources_timestamp=NULL, memory=NULL, submitted=NULL, started=NULL, batch_job_id=NULL, node=NULL, r_pid=NULL, done=NULL, error=NULL" list(msgs = sprintf("UPDATE %s_job_status SET %s WHERE job_id in (%s)", reg$id, updates, collapse(job.ids)), ids = job.ids, type = type) } dbConvertNumericToPOSIXct = function(x) { now = Sys.time() as.POSIXct(x, origin = now - as.integer(now)) } dbSetJobFunction = function(reg, ids, fun.id) { query = sprintf("UPDATE %1$s_job_def SET fun_id = '%2$s' WHERE job_def_id IN (SELECT job_def_id FROM %1$s_job_status WHERE job_id IN (%3$s))", reg$id, fun.id, collapse(ids)) dbDoQuery(reg, query, flags = "rw") } dbSetJobNames = function(reg, ids, jobnames) { queries = sprintf("UPDATE %1$s_job_def SET jobname = '%2$s' WHERE job_def_id IN (SELECT job_def_id FROM %1$s_job_status WHERE job_id IN (%3$i))", reg$id, jobnames, ids) dbDoQueries(reg, queries, flags = "rw") } # this is used in parallelMap :/ dbRemoveJobs = function(reg, ids) { query = sprintf("DELETE FROM %s_job_status WHERE job_id IN (%s)", reg$id, collapse(ids)) dbDoQuery(reg, query, flags = "rw") query = sprintf("DELETE FROM %1$s_job_def WHERE job_def_id NOT IN (SELECT DISTINCT job_def_id FROM %1$s_job_status)", reg$id) dbDoQuery(reg, query, flags = "rw") return(invisible(TRUE)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/database.R
#' Helper function to debug multicore mode. #' #' @description #' Useful in case of severe errors. #' Tries different operations of increasing difficulty #' and provides debug output on the console #' #' @param r.options [\code{list}] #' Options for R and Rscript, one option per element of the vector, #' a la \dQuote{--vanilla}. #' Default is \code{c("--no-save", "--no-restore", "--no-init-file", "--no-site-file")}. #' @return Nothing. #' @family debug #' @export debugMulticore = function(r.options) { if (missing(r.options)) r.options = c("--no-save", "--no-restore", "--no-init-file", "--no-site-file") conf = getBatchJobsConf() conf$debug = TRUE conf$mail.start = conf$mail.done = conf$mail.error = "none" rhome = R.home() messagef("*** System info: ***") print(Sys.info()) catf("\n") messagef("*** which R: ***") res = runOSCommandLinux("which", "R") messagef("which R result: %s", res$output) catf("\n") messagef("*** Find helper script: ***") script = findHelperScriptLinux(rhome = rhome, r.options = r.options) messagef("Find helper script result: %s", script) catf("\n") messagef("*** Auto-detecting ncpus: ***") worker = makeWorkerLocalLinux(r.options = r.options, script = script, ncpus = 1) ncpus = runWorkerCommand(worker, "number-of-cpus") messagef("Auto-detecting ncpus result: %s", ncpus) catf("\n") messagef("*** Query worker status: ***") res = runWorkerCommand(worker, "status", args = "") messagef("Query worker status result: %s", res) catf("\n") messagef("*** Submitting 1 job: ***") conf$cluster.functions = makeClusterFunctionsMulticore() fd = tempfile() reg = makeRegistry(id = "debug_multicore", file.dir = fd, sharding = FALSE) batchMap(reg, identity, 1) submitJobs(reg) Sys.sleep(3) messagef("Submitting 1 job result: %i", loadResult(reg, 1)) messagef("Query worker status:") res = runWorkerCommand(worker, "status", args = reg$file.dir) messagef("Query worker status result: %s", res) catf("\n") messagef("*** Killing 2 jobs: ***") fd = tempfile() reg = makeRegistry(id = "debug_multicore", file.dir = fd, sharding = FALSE) f = function(i) if(i <= 1) i else f(i-1) + f(i-2) xs = 50 + seq(1,2) ids = 1:2 batchMap(reg, f, xs) submitJobs(reg) Sys.sleep(3) messagef("Query worker status:") res = runWorkerCommand(worker, "status", args = reg$file.dir) messagef("Query worker status result: %s", res) messagef("Running jobs: %s", collapse(findRunning(reg))) killJobs(reg, ids) messagef("Query worker status:") res = runWorkerCommand(worker, "status", args = reg$file.dir) messagef("Query worker status result: %s", res) messagef("Running jobs: %s", collapse(findRunning(reg))) catf("\n") }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/debugMulticore.R
#' @title Helper function to debug SSH mode. #' #' @description #' Useful in case of configuration problems. #' Tries different operations of increasing difficulty #' and provides debug output on the console. #' #' Note that this function does not access nor use information specified for #' your cluster functions in your configuration. #' #' @param nodename [\code{character(1)}]\cr #' Node on which worker should be constructed for the test. #' @param ssh.cmd [\code{character(1)})]\cr #' CLI command to ssh into remote node. #' Default is \dQuote{ssh}. #' @param ssh.args [\code{character}]\cr #' CLI args for \code{ssh.cmd}. #' Default is none. #' @param rhome [\code{character(1)}]\cr #' Path to R installation on the worker. #' \dQuote{} means R installation on the PATH is used, #' of course this implies that it must be on the PATH #' (also for non-interactive shells)! #' Default is \dQuote{}. #' @param r.options [\code{list}] #' Options for R and Rscript, one option per element of the vector, #' a la \dQuote{--vanilla}. #' Default is \code{c("--no-save", "--no-restore", "--no-init-file", "--no-site-file")}. #' @param dir [\code{character(1)}]\cr #' Path where internally used test registries can be created. #' Note that this must be shared for the worker. #' Default is current working directory. #' @return Nothing. #' @family debug #' @export debugSSH = function(nodename, ssh.cmd = "ssh", ssh.args = character(0L), rhome = "", r.options = c("--no-save", "--no-restore", "--no-init-file", "--no-site-file"), dir = getwd()) { assertString(ssh.cmd) assertCharacter(ssh.args, any.missing = FALSE) assertString(nodename) assertString(rhome) assertString(dir) conf = getBatchJobsConf() conf$debug = TRUE conf$mail.start = conf$mail.done = conf$mail.error = "none" wd = dir messagef("*** System info on master: ***") print(Sys.info()) catf("\n") messagef("*** which R on slave: ***") res = runOSCommandLinux(cmd = "which", args = "R", ssh = TRUE, ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename, stop.on.exit.code = TRUE) messagef("which R result:") print(res) catf("\n") messagef("*** Find helper script on slave: ***") res = findHelperScriptLinux(rhome, r.options, ssh = TRUE, ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename) messagef("Find helper script result:") print(res) catf("\n") messagef("*** Auto-detecting ncpus for slave: ***") worker = makeWorkerRemoteLinux(ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename, rhome = rhome, r.options = r.options, ncpus = 1) res = runWorkerCommand(worker = worker, command = "number-of-cpus") messagef("Auto-detecting ncpus result:") print(res) catf("\n") queryWorkerStatus = function() { messagef("*** Query worker status: ***") res = runWorkerCommand(worker = worker, command = "status", args = "") messagef("Query worker status result:") message("load n.rprocs n.rprocs.50 n.jobs") print(res) catf("\n") } queryWorkerStatus() messagef("*** Submitting 1 job: ***") ssh.workers = list(makeSSHWorker(ssh.cmd = ssh.cmd, ssh.args = ssh.args, nodename = nodename, rhome = rhome, r.options = r.options)) conf$cluster.functions = do.call(makeClusterFunctionsSSH, ssh.workers) id = "debug_ssh_1" reg = makeRegistry(id = id, file.dir = file.path(dir, id), work.dir = wd, sharding = FALSE) batchMap(reg, identity, 1) submitJobs(reg) Sys.sleep(3) messagef("Submitting 1 job result: %i", loadResult(reg, 1)) queryWorkerStatus() messagef("*** Killing 2 jobs: ***") id = "debug_ssh_2" reg = makeRegistry(id = id, file.dir = file.path(dir, id), work.dir = wd, sharding = FALSE) f = function(i) if(i <= 1) i else f(i-1) + f(i-2) xs = 50 + seq(1,2) ids = 1:2 batchMap(reg, f, xs) submitJobs(reg) Sys.sleep(3) queryWorkerStatus() messagef("Running jobs: %s", collapse(findRunning(reg))) killJobs(reg, ids) queryWorkerStatus() messagef("Running jobs: %s", collapse(findRunning(reg))) catf("\n") }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/debugSSH.R
doJob = function(reg, ids, multiple.result.files, staged, disable.mail, first, last, array.id) { saveOne = function(result, name) { fn = getResultFilePath(reg, job$id, name) message("Writing result file: ", fn) save2(file = fn, result = result) } # Get the conf conf = loadConf(reg) # Say hi. messagef("%s: Starting job on node %s.", Sys.time(), Sys.info()["nodename"]) messagef("Auto-mailer settings: start=%s, done=%s, error=%s.", conf$mail.start, conf$mail.done, conf$mail.error) # We need to see all warnings immediatly if (conf$cluster.functions$name != "Testing") options(warning.length = 8170L, warn = 1L + conf$raise.warnings) # Go to the work.dir wd = switchWd(reg) on.exit({ wd$reset() message("Memory usage according to gc:") print(gc()) }) if (!is.na(array.id)) { # FIXME better send error to database here, we don't see those errors on the master :( array.id = asInt(as.integer(array.id), lower = 1L, upper = length(ids)) messagef("Processing array id %s", array.id) ids = ids[array.id] } n = length(ids) results = character(n) error = logical(n) mail.extra.msg = "" cache = makeFileCache(use.cache = n > 1L) # notify status sendMail(reg, ids, results, "", disable.mail, condition = "start", first, last) # create buffer of started messages msg.buf = buffer(capacity = 2L * n) next.flush = 0L if (staged) { fn = getJobFile(reg, first) messagef("Loading jobs from file '%s'", fn) jobs = readRDS(fn) } else { jobs = getJobs(reg, ids, check.ids = FALSE) } for (i in seq_len(n)) { job = jobs[[i]] messagef("########## Executing jid=%s ##########", job$id) started = Sys.time() msg.buf$push(dbMakeMessageStarted(reg, ids[i], time = as.integer(started))) messagef("Timestamp: %s" , started) print(job) if (now() > next.flush) { if (dbSendMessages(reg, msg.buf$get(), staged = staged)) msg.buf$clear() next.flush = now() + as.integer(runif(1L, 1200L, 24001L)) } message("Setting seed: ", job$seed) seed = seeder(reg, job$seed) if (conf$measure.mem) gc(reset = TRUE) result = try(applyJobFunction(reg, job, cache), silent = TRUE) mem.used = if (conf$measure.mem) sum(gc()[, 6L]) else -1 seed$reset() catf("Result:") print(str(result, max.level = 1L, list.len = 5L)) error[i] = is.error(result) if (error[i]) { results[i] = as.character(result) } else if (multiple.result.files) { if (!is.list(result)) { results[i] = "multiple.result.files is TRUE, but your algorithm did not return a list!" error[i] = TRUE } else if (!isProperlyNamed(result)) { results[i] = "multiple.result.files is TRUE, but some the returned lists is not fully, distinctly named!" error[i] = TRUE } } if (error[i]) { msg.buf$push(dbMakeMessageError(reg, job$id, err.msg = results[i], memory = mem.used)) message("Error occurred: ", results[i]) } else { results[i] = paste0(capture.output(str(result)), collapse = "\n") msg.buf$push(dbMakeMessageDone(reg, job$id, memory = mem.used)) if (multiple.result.files) { Map(saveOne, result = result, name = names(result)) } else { saveOne(result, NA_character_) } } } # try to flush the remaining msgs at the end for (i in seq_len(10L)) { if (dbSendMessages(reg, msg.buf$get(), staged = staged)) { msg.buf$clear() break } Sys.sleep(round(runif(1L, 30, 120))) } # check if there are still remaining messages if (!msg.buf$empty()) { mail.extra.msg = paste("Some DB messages could not be flushed.", "This indicates some DB problem or too much communication with the DB.", "Everything should still be ok, you only might have to resubmit some jobs as they are not recorded as 'done'.", sep = "\n") warningf(mail.extra.msg) } sendMail(reg, ids, results, mail.extra.msg, disable.mail, condition = ifelse(any(error), "error", "done"), first, last) messagef("%s: All done.", Sys.time()) return(!any(error)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/doJob.R
checkDir = function(path, create = FALSE, check.empty = FALSE, check.posix = FALSE, msg = FALSE) { if (create) { if (file.exists(path)) { if (!isDirectory(path)) stop("File in place where dir should be created: ", path) } else { if (msg) info("Creating dir: %s", path) if (!dir.create(path, recursive = TRUE)) stopf("Could not create dir: %s", path) } } if (!isDirectory(path)) stopf("Directory '%s' does not exists", path) if (!is.accessible(path)) stopf("Directory '%s' is not readable/writable!", path) if (check.empty && any(list.files(path, all.files = TRUE) %nin% c(".", ".."))) stopf("Directory '%s' does not seem to be empty: %s", path, paste(setdiff(list.files(path, all.files = TRUE), c(".", "..")), collapse=", ")) if (check.posix && getOption("BatchJobs.check.posix", TRUE)) { path.abs = sanitizePath(path, make.absolute = TRUE) if(! grepl("^[[:alnum:]:/_.-]+$", path.abs)) stopf("Directory '%s' contains characters that are not fully portable according to POSIX standards. Allowed: a-z A-Z 0-9 : / . - _", path.abs) } } checkDirs = function(paths, ...) { vcapply(paths, checkDir) } createShardedDirs = function(reg, ids) { if (reg$sharding) { lapply(getJobDirs(reg, ids, unique = TRUE), checkDir, create = TRUE) } } # tests a directory for read and write permissions # uses a heuristic for windows is.accessible = function(path) { if (isWindows()) { # Workaround: No POSIX file system informations available, use a heuristic rnd = basename(tempfile("")) tf1 = file.path(path, sprintf("test_write_access_file_%s", rnd)) td1 = file.path(path, sprintf("test_write_access_dir_%s", rnd)) tf2 = file.path(td1, "test_write_access_subfile") td2 = file.path(td1, "test_write_access_subdir") # on exit, try to clean up the mess we might have caused on.exit(try(removeDirs(c(tf2, td2, tf1, td1), recursive = TRUE))) fileCreate = function(pathname) { if (!file.create(pathname)) return(FALSE) msg0 = "Hello world!" cat(file=pathname, msg0) msg = readLines(pathname, warn=FALSE) if (identical(msg, msg0)) return(TRUE) stop(sprintf("Created test file does not contain expected content ('%s'): '%s'", msg, msg0)) } # perform the checks ok = try({ fileCreate(tf1) && dir.create(td1) && dir.create(td2) && length(list.files(td1)) == 1L && fileCreate(tf2) && all(removeDirs(c(tf2, td2, tf1, td1), recursive = TRUE)) }) if (is.error(ok) || !isTRUE(ok)) return(FALSE) # we don't need the on exit handler anymore, everything should be fine on.exit(NULL) return(TRUE) } return(file.access(path, mode = c(2L, 4L)) == 0L) } # a more trusty version than file.exists() fileExists = function(path) { if (file.exists(path)) return(TRUE) ## Double check with what dir() reports, because (at least) on Windows ## there seems to be a delay between a file/directory being removed ## and the directory listings being updated. /HB 2015-02-08 filename = basename(path) dirs = dir(path = dirname(path), all.files = TRUE, include.dirs = TRUE) return(filename %in% dirs) } # a more robust and insisting version than unlink() removeDirs = function(paths, recursive=FALSE, ..., must.work=TRUE, max.tries=30L, interval=0.2) { exists = vlapply(paths, fileExists) for (ii in which(exists)) { path = paths[ii] for (kk in seq_len(max.tries)) { unlink(path, recursive=recursive, ...) exists[ii] = fileExists(path) if (!exists[ii]) break if (kk < max.tries) Sys.sleep(interval) } } exists = vlapply(paths, fileExists) failed = path[exists] if (must.work && length(failed) > 0L) stop("Failed to remove files/directories: ", paste(sQuote(path), collapse=", ")) return(!exists) } isPathFromRoot = function(path) { (isWindows() & grepl("^[[:alpha:]]:", path)) | grepl("^[/\\]", path) } getJobDirs = function(reg, ids, unique = FALSE) { if (reg$sharding) { shards = sprintf("%02i", ids %% 100L) if(unique) shards = unique(shards) return(file.path(reg$file.dir, "jobs", shards)) } file.path(reg$file.dir, "jobs") } getFilePaths = function(reg, id, suffix, ext) { fn = sprintf("%i%s.%s", id, ifelse(is.null(suffix), "", paste0("-", suffix)), ext) file.path(getJobDirs(reg, id), fn) } getJobParentDir = function(file.dir) file.path(file.dir, "jobs") getFunDir = function(file.dir) file.path(file.dir, "functions") getFunFilePath = function(reg, fun.id) file.path(getFunDir(reg$file.dir), sprintf("%s.RData", fun.id)) getJobFile = function(reg, id) getFilePaths(reg, id, NULL, "rds") getConfFilePath = function(reg) file.path(reg$file.dir, "conf.RData") getRegistryFilePath = function(file.dir) file.path(file.dir, "registry.RData") getRScriptFilePath = function(reg, id) getFilePaths(reg, id, NULL, "R") getLogFilePath = function(reg, id) getFilePaths(reg, id, NULL, "out") getResultFilePath = function(reg, id, part = NA_character_) { s = if (is.na(part)) "result" else paste0("result-", part) getFilePaths(reg, id, s, "RData") } getResourcesDir = function(file.dir) file.path(file.dir, "resources") getResourcesFilePath = function(reg, timestamp) file.path(getResourcesDir(reg$file.dir), sprintf("resources_%i.RData", timestamp)) getPendingDir = function(file.dir) file.path(file.dir, "pending") getExportDir = function(file.dir) file.path(file.dir, "exports") getPendingFile = function(reg, type, id, char = .OrderChars[type]) { file.path(getPendingDir(reg$file.dir), sprintf("pending_%s_%s_%i.sql", char, type, id)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/filenames.R
#' Find all results where a specific condition is true. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs whose results you want to test for the condition. #' Default is all jobs for which results are available. #' @param fun [\code{fun(job, res)}]\cr #' Predicate function that returns \code{TRUE} or \code{FALSE}. #' @param ... [any]\cr #' Additional arguments to \code{fun}. #' @return [\code{integer}]. Ids of jobs where \code{fun(job, result)} returns \code{TRUE}. #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg, f, 1:10) #' submitJobs(reg) #' waitForJobs(reg) #' #' # which square numbers are even: #' filterResults(reg, fun = function(job, res) res %% 2 == 0) filterResults = function(reg, ids, fun, ...) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) assertFunction(fun, c("job", "res")) if (missing(ids)) ids = dbFindDone(reg) else ids = checkIds(reg, ids) Filter(function(id) { fun(job = getJob(reg, id, check.id = FALSE), res = getResult(reg, id), ...) }, ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/filterResults.R
#' Finds ids of jobs that match a query. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Subset of job ids to restrict the result to. #' Default is all jobs. #' @param pars [R expression]\cr #' All jobs whose parameters match the given expression are selected. #' This implies that you have named the parameters when you passed the vectors. #' If you forgot to do this you can use \code{.arg1}, \code{.arg2}, etc., to refer to the #' the unnamed ones. #' @param jobnames [\code{character}]\cr #' Restrict to jobs with stored names. Exact matching is used. #' @return [\code{integer}]. Ids for jobs which match the query. #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x, y) x * y #' batchExpandGrid(reg, f, x = 1:2, y = 1:3) #' findJobs(reg, pars = (y > 2)) findJobs = function(reg, ids, pars, jobnames) { checkRegistry(reg, strict = TRUE, writeable = FALSE) syncRegistry(reg) if (!missing(ids)) checkIds(reg, ids) if (missing(pars) && missing(jobnames)) return(getJobIds(reg)) if (!missing(jobnames)) { assertCharacter(jobnames, any.missing = FALSE) ids = dbMatchJobNames(reg, ids, jobnames) } if (!missing(pars)) { jobs = dbGetJobs(reg, ids) rename = function(pars) { ns = names(pars) if (is.null(ns)) { ns = rep.int("", length(pars)) } j = which(is.na(ns) | ns == "") ns[j] = paste0(".arg", seq_along(j)) setNames(pars, ns) } ind = vlapply(jobs, function(job, pars, ee) eval(pars, rename(job$pars), ee), pars = substitute(pars), ee = parent.frame()) ids = extractSubList(jobs[!is.na(ind) & ind], "id", element.value = integer(1L)) } ids }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/findJobs.R
findState = function(reg, ids, fun, negate, limit) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (!missing(ids)) ids = checkIds(reg, ids) if (!is.null(limit)) assertCount(limit) fun(reg, ids, negate, limit) } #' Find jobs depending on computional state. #' #' \code{findDone}: Find jobs which succesfully terminated. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Subset of job ids to restrict the result to. #' Default is all jobs. #' @param limit [\code{integer(1)}]\cr #' Limit the number of returned ids. #' Default is all ids. #' @return [\code{integer}]. Ids of jobs. #' @export #' @rdname findState findDone = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindDone, FALSE, limit) } #' \code{findNotDone}: Find jobs for which results are still missing. #' @export #' @rdname findState findNotDone = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindDone, TRUE, limit) } #' \code{findMissingResults}: Deprecated. Alias for findNotDone. #' @export #' @rdname findState findMissingResults = function(reg, ids, limit = NULL) { findNotDone(reg, ids, limit) } #' \code{findErrors}: Find jobs where errors occured. #' @export #' @rdname findState findErrors = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindErrors, FALSE, limit) } #' \code{findNotErrors}: Find jobs where no errors occured. #' @export #' @rdname findState findNotErrors = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindErrors, TRUE, limit) } #' \code{findTerminated}: Find jobs which have terminated (done / error). #' @export #' @rdname findState findTerminated = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindTerminated, FALSE, limit) } #' \code{findNotTerminated}: Find jobs which have not terminated (not done / no error). #' @export #' @rdname findState findNotTerminated = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindTerminated, TRUE, limit) } #' \code{findSubmitted}: Find jobs which have been submitted. #' @export #' @rdname findState findSubmitted = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindSubmitted, FALSE, limit) } #' \code{findNotSubmitted}: Find jobs which have not been submitted. #' @export #' @rdname findState findNotSubmitted = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindSubmitted, TRUE, limit) } #' \code{findOnSystem}: Find jobs which are present on the batch system at the moment. #' @export #' @rdname findState findOnSystem = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindOnSystem, FALSE, limit) } #' \code{findNotOnSystem}: Find jobs which are not present on the batch system at the moment. #' @export #' @rdname findState findNotOnSystem = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindOnSystem, TRUE, limit) } #' \code{findRunning}: Find jobs which are running. #' @export #' @rdname findState findRunning = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindRunning, FALSE, limit) } #' \code{findNotRunning}: Find jobs which are not running. #' @export #' @rdname findState findNotRunning = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindRunning, TRUE, limit) } #' \code{findStarted}: Find jobs which have been started on the batch system. #' @export #' @rdname findState findStarted = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindStarted, FALSE, limit) } #' \code{findStarted}: Find jobs which have not been started on the batch system. #' \code{findNotRunning}: Find jobs which are not running. #' @export #' @rdname findState findNotStarted = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindStarted, TRUE, limit) } #' \code{findExpired}: Find jobs where walltime was probably hit. #' Right now the heuristic is as follows: #' Find all jobs that have started, did not abort with an error, #' did not complete with a result and are not submitted or running anymore. #' Note that this heuristic does not include jobs the scheduler looses before starting. #' @export #' @rdname findState findExpired = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindExpiredJobs, FALSE, limit) } #' \code{findDisappeared}: Find jobs which disappeared from the system. #' Right now the heuristic is as follows: #' Find all jobs that are submitted but not started nor on the system anymore. #' @export #' @rdname findState findDisappeared = function(reg, ids, limit = NULL) { findState(reg, ids, dbFindDisappeared, FALSE, limit) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/findStatus.R
#' Get error messages of jobs. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all jobs with errors. #' @return [\code{character}]. Error messages for jobs as character vector\cr #' \code{NA} if job has terminated successfully. #' @family debug #' @export getErrorMessages = function(reg, ids) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) ids = dbFindErrors(reg) else ids = checkIds(reg, ids) tab = dbGetErrorMsgs(reg, ids, filter = FALSE) setNames(tab$error, tab$job_id) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getErrorMessages.R
#' Get job from registry by id. #' @template arg_reg #' @param id [\code{integer(1)}]\cr #' Id of job. #' @param check.id [\code{logical(1)}]\cr #' Check the job id? #' Default is \code{TRUE}. #' @return [\code{\link{Job}}]. #' @export getJob = function(reg, id, check.id = TRUE) { if (check.id) id = checkIds(reg, id, len = 1L) getJobs(reg, id, check.ids = FALSE)[[1L]] } #' Get jobs from registry by id. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all jobs. #' @param check.ids [\code{logical(1)}]\cr #' Check the job ids? #' Default is \code{TRUE}. #' @return [list of \code{\link{Job}}]. #' @export getJobs = function(reg, ids, check.ids = TRUE) { checkRegistry(reg, writeable = FALSE) # syncRegistry(reg) NOT! assertFlag(check.ids) UseMethod("getJobs") } #' @method getJobs Registry #' @export getJobs.Registry = function(reg, ids, check.ids = TRUE) { if (!missing(ids) && check.ids) ids = checkIds(reg, ids) dbGetJobs(reg, ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getJob.R
getJobInfoInternal = function(reg, ids, select, unit = "seconds", columns) { if (!missing(ids)) ids = checkIds(reg, ids) assertChoice(unit, c("seconds", "minutes", "hours", "days", "weeks")) select.db = c("submitted", "started", "done", "CASE WHEN error IS NULL THEN done - started ELSE error_time - started END AS time_running", "memory", "started - submitted AS time_queued", "error", "error_time", "node", "batch_job_id", "r_pid", "seed") select.cns = c("time.submitted", "time.started", "time.done", "time.running", "memory", "time.queued", "error.msg", "time.error", "nodename", "batch.id", "r.pid", "seed") columns = c(columns, setNames(select.db, select.cns)) if (!missing(select)) { assertSubset(select, c("id", select.cns)) columns = columns[names(columns) %in% c("id", select)] } tab = setNames(dbGetExpandedJobsTable(reg, ids, columns), names(columns)) if (nrow(tab) == 0L) return(tab) # convert times to POSIX if (!is.null(tab$time.submitted)) tab$time.submitted = dbConvertNumericToPOSIXct(tab$time.submitted) if (!is.null(tab$time.started)) tab$time.started = dbConvertNumericToPOSIXct(tab$time.started) if (!is.null(tab$time.done)) tab$time.done = dbConvertNumericToPOSIXct(tab$time.done) if (!is.null(tab$time.error)) tab$time.error = dbConvertNumericToPOSIXct(tab$time.error) # shorten error messages if (!is.null(tab$error.msg)) tab$error.msg = vcapply(tab$error.msg, clipString, len = 30L) # convert time diffs div = setNames(c(1L, 60L, 3600L, 86400L, 604800L), c("seconds", "minutes", "hours", "days", "weeks"))[unit] if (!is.null(tab$time.running)) tab$time.running = as.numeric(tab$time.running) / div if (!is.null(tab$time.queued)) tab$time.queued = as.numeric(tab$time.queued) / div return(tab) } #' @title Get computational information of jobs. #' #' @description #' Returns time stamps (submitted, started, done, error), #' time running, approximate memory usage (in Mb), #' error messages (shortened, see \code{\link{showLog}} for detailed error messages), #' time in queue, hostname of the host the job was executed, #' assigned batch ID, the R PID and the seed of the job. #' #' To estimate memory usage the sum of the last column of \code{\link[base]{gc}} is used. #' #' Column \dQuote{time.running} displays the time until either the job was done, or an error occured; #' it will by \code{NA} in case of time outs or hard R crashes. #' #' @template arg_reg #' @template arg_ids #' @param pars [\code{logical(1)}]\cr #' Include job parameters in the output? #' Default is \code{FALSE}. #' @param prefix.pars [\code{logical(1)}]\cr #' Should a prefix be added to job parameter names (column names) to avoid name clashes? #' Default is \code{FALSE}. #' @param select [\code{character}]\cr #' Select only a subset of columns. #' Usually this is not required and you can subset yourself, #' but in some rare cases it may be advantageous to not query all information. #' Note that the column \dQuote{id} (job id) is always selected. #' If not provided, all columns are queried and returned. #' @param unit [\code{character(1)}]\cr #' Unit to convert execution and queing times to. #' Possible values: \dQuote{seconds}, \dQuote{minutes}, \dQuote{hours}, #' \dQuote{days} and \dQuote{weeks}. #' Default is \dQuote{seconds}. #' @return [\code{data.frame}]. #' @family debug #' @export getJobInfo = function(reg, ids, pars = FALSE, prefix.pars = FALSE, select, unit = "seconds") { UseMethod("getJobInfo") } #' @method getJobInfo Registry #' @export getJobInfo.Registry = function(reg, ids, pars = FALSE, prefix.pars = FALSE, select, unit = "seconds") { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) assertFlag(pars) columns = c(id = "job_id") if (pars) columns = c(columns, c(pars = "pars")) tab = getJobInfoInternal(reg, ids = ids, select = select, unit = unit, columns = columns) if (pars && nrow(tab) > 0L && !is.null(tab$pars)) { pars = deserialize(tab$pars) if (prefix.pars) setnames(pars, sprintf("job.par.%s", names(pars))) tab = cbind(dropNamed(tab, "pars"), pars) } return(tab) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getJobInfo.R
#' Get the physical location of job files on the hard disk. #' @template arg_reg #' @template arg_ids #' @export #' @return [\code{character}] Vector of directories. getJobLocation = function(reg, ids) { checkRegistry(reg) getJobDirs(reg, ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getJobLocation.R
#' @title Retrieve Job Parameters. #' #' @description #' Returns parameters for all jobs as the rows of a data.frame. #' #' @template arg_reg #' @template arg_ids #' @return [\code{data.frame}]. Rows are named with job ids. #' @export #' @examples #' # see batchExpandGrid getJobParamDf = function(reg, ids) { checkRegistry(reg, strict = TRUE, writeable = FALSE) syncRegistry(reg) if (!missing(ids)) ids = checkIds(reg, ids) tab = dbGetExpandedJobsTable(reg, ids, cols = c("job_id", "pars")) res = deserialize(tab$pars) setDF(res, rownames = rownames(tab)) return(res) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getJobParamDf.R
#' Get log file paths for jobs. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all jobs. #' @return [\code{character}]. Vector of file paths to log files. #' @family debug #' @export getLogFiles = function(reg, ids) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) ids = getJobIds(reg) else ids = checkIds(reg, ids) fids = dbGetFirstJobInChunkIds(reg, ids) getLogFilePath(reg, ifelse(is.na(fids), ids, fids)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getLogFiles.R
#' Print and return R installation and other information for SSH workers. #' #' @description #' Workers are queried in parallel via \code{\link{callFunctionOnSSHWorkers}}. #' #' The function will display a warning if the first lib path on the worker #' is not writable as this indicates potential problems in the configuration #' and \code{\link{installPackagesOnSSHWorkers}} will not work. #' #' @param nodenames [\code{character}]\cr #' Nodenames of workers. #' @return [\code{list}]. Displayed information as a list named by nodenames. #' @export #' @seealso \code{\link{callFunctionOnSSHWorkers}} getSSHWorkersInfo = function(nodenames) { fun = function() { lib.paths = .libPaths() list( r.home = R.home(), session.info = sessionInfo(), lib.paths = lib.paths, is.lib.path.writeable = is.accessible(head(lib.paths, 1L)) ) } res = callFunctionOnSSHWorkers(nodenames, fun = fun, consecutive = FALSE, show.output = FALSE, use.names = TRUE, simplify = FALSE) for (nn in nodenames) { r = res[[nn]] messagef("Node: %s", nn) messagef(r$session.info$R.version$version.string) messagef("Platform: %s", r$session.info$platform) messagef("R Home: %s", r$r.home) messagef("First lib path: %s", head(r$lib.paths, 1L)) messagef("") } notok = names(Filter(function(r) !r$is.lib.path.writeable, res)) if (length(notok) > 0L) warningf("On the following nodes the first lib path is not writeable: %s", collapse(notok)) invisible(res) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/getSSHWorkersInfo.R
#' Grep log files for a pattern. #' #' @description #' Searches for occurence of \code{pattern} in log files. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs to grep. #' Default is all terminated jobs (done + errors). #' @param pattern [\code{character(1)}]\cr #' Pattern to search for. See \code{\link{grep}}. #' Default is \code{"warn"}. #' @param ignore.case [\code{logical(1)}]\cr #' Ignore case. See \code{\link{grep}}. #' Default is \code{TRUE}. #' @param verbose [\code{logical(1)}]\cr #' Print matches. #' Default is \code{FALSE}. #' @param range [\code{integer(1)}]\cr #' If \code{verbose} is set to \code{TRUE}, print \code{range} #' leading and trailing lines for contextual information about the warning. #' Default is \code{2}. #' @return [\code{integer}]. Ids of jobs where pattern was found in the log file. #' @family debug #' @export grepLogs = function(reg, ids, pattern = "warn", ignore.case = TRUE, verbose = FALSE, range = 2L) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = dbFindTerminated(reg) } else { nterminated = dbFindTerminated(reg, ids, negate = TRUE, limit = 1L) if (length(nterminated) > 0L) stopf("Not all jobs with provided ids have finished yet and therefore possess no log file, e.g. id=%i.", nterminated[1L]) } assertString(pattern) assertFlag(ignore.case) assertFlag(verbose) range = asCount(range) fids = dbGetFirstJobInChunkIds(reg, ids) fns = getLogFilePath(reg, ifelse(is.na(fids), ids, fids)) matched = logical(length(ids)) getLines = function(lines, match, range) { start = max(1L, match - range) stop = min(length(lines), match + range) collapse(lines[start:stop], "\n") } for(i in seq_along(fns)) { if (!file.exists(fns[i])) stopf("File '%s' does not exist.", fns[i]) # read lines from log and trim to output of job with id 'ids[i]' lines = readLines(fns[i]) start = grep(sprintf("^########## Executing jid=%i ##########$", ids[i]), lines) if (length(start) != 1L) stop("The output of the job with id=%i could not be found in file '%s' or was found more than once", ids[i], fns[i]) end = head(grep("^########## Executing jid = [0-9]+ ##########$", tail(lines, -start)), 1L) lines = lines[start:min(start+end, length(lines))] matches = grep(pattern, lines, ignore.case = ignore.case) matched[i] = (length(matches) > 0L) if (verbose && matched[i]) { messagef("\n##### Matches for job with id=%i (%s) #####", ids[i], basename(fns[i])) message(collapse(vcapply(matches, getLines, lines = lines, range = range), "\n---\n")) } } ids[matched] }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/grepLogs.R
checkMoreArgs = function(more.args, reserved) { assertList(more.args, names = "strict") n = names(more.args) if(is.null(n) || missing(reserved)) return(invisible(TRUE)) check = reserved %in% n if (any(check)) stopf("more.args uses element names which are internally reserved: %s", collapse(reserved[check])) return(invisible(TRUE)) } checkPart = function(reg, part) { if (reg$multiple.result.files) { if (!testScalarNA(part) && !testCharacter(part, any.missing = FALSE)) stop("'part' must be NA or a character vector without NAs!") } else { if (!testScalarNA(part)) stop("'part' must be NA because multiple.result.files is FALSE!") } } getListJobs = function(msg = NULL) { conf = getBatchJobsConf() cf = getClusterFunctions(conf) fun = cf$listJobs if (is.null(fun) && !is.null(msg)) stopf("%s because %s cluster functions do not support listing of jobs!", msg, cf$name) return(fun) } getKillJob = function(msg = NULL) { conf = getBatchJobsConf() cf = getClusterFunctions(conf) fun = cf$killJob if (is.null(fun) && !is.null(msg)) stopf("%s because %s cluster functions do not support killing of jobs!", msg, cf$name) return(fun) } getBatchIds = function(reg, msg = NULL) { fun = getListJobs(msg) fun(getBatchJobsConf(), reg) } getRandomSeed = function(n = 1L) { as.integer(runif(n, 1, .Machine$integer.max / 2L)) } seeder = function(reg, seed) { if(!exists(".Random.seed", envir = .GlobalEnv)) runif(1L) prev.seed = get(".Random.seed", envir = .GlobalEnv) prev.kind = RNGkind() set.seed(seed, kind = reg$RNGkind[1L], normal.kind = reg$RNGkind[2L]) return(list( reset = function() { RNGkind(kind = prev.kind[1L], normal.kind = prev.kind[2L]) assign(".Random.seed", prev.seed, envir = .GlobalEnv) })) } switchWd = function(reg) { cur = getwd() message("Setting work dir: ", reg$work.dir) setwd(reg$work.dir) return(list(reset = function() { message("Setting work back to: ", cur) setwd(cur) })) } addIntModulo = function(x, y, mod = .Machine$integer.max) { as.integer((as.double(x) + as.double(y)) %% mod) } isOnSlave = function() { getOption("BatchJobs.on.slave", default = FALSE) } setOnSlave = function(x, resources.path = as.character(NA)) { options(BatchJobs.on.slave = x) options(BatchJobs.resources.path = resources.path) } now = function() { as.integer(Sys.time()) } getArgNames = function(args) { if (!length(args)) return(NULL) if (is.null(names(args[[1L]])) && is.character(args[[1L]])) return(args[[1L]]) return(names(args[[1L]])) } convertUseNames = function(use.names) { if (is.character(use.names) && length(use.names) == 1L && use.names %in% c("none", "ids", "names")) return(use.names) assertFlag(use.names) c("none", "ids")[use.names+1L] } waitForFiles = function(fn, timeout = NA_real_, sleep = 1) { if (is.na(timeout)) return(invisible(TRUE)) fn = fn[!file.exists(fn)] if (length(fn)) { start = now() repeat { Sys.sleep(sleep) fn = fn[!file.exists(fn)] if (!length(fn)) break if (now() - start > timeout) stopf("Error waiting for file system. File '%s' timed out after %.1f seconds", head(fn, 1L), timeout) } } invisible(TRUE) } info = function(...) { if (getOption("BatchJobs.verbose", default = TRUE)) message(sprintf(...)) } getProgressBar = function(condition, ...) { if (condition) { pb = makeProgressBar(...) pb$set() } else { pb = makeProgressBar(style = "off") } pb } checkUserFunction = function(fun) { fun = match.fun(fun) if (getOption("BatchJobs.clear.function.env")) { environment(fun) = .GlobalEnv } else { ee = environment(fun) if (!is.null(ee) && !isNamespace(ee)) { nn = ls(ee, all.names = TRUE) if (sum(vnapply(nn, function(nn) object.size(ee[[nn]])) / 1024^2) > 10) warning("The environment of provided function exceeds 10Mb.") } } fun } deserialize = function(pars) { pars = lapply(pars, function(x) unserialize(charToRaw(x))) if (length(pars) == 0L) return(data.table()) pn = make.names(names2(pars[[1L]], missing.val = ""), unique = TRUE) pars = rbindlist(pars) setnames(pars, pn) pars }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/helpers.R
#' Install packages on SSH workers. #' #' @description #' Installation is done via \code{\link{callFunctionOnSSHWorkers}} #' and \code{\link{install.packages}}. #' #' Note that as usual the function tries to install #' the packages into the first path of \code{.libPaths()} #' of each each worker. #' #' @param nodenames [\code{character}]\cr #' Nodenames of workers. #' @param pkgs [\code{character}]\cr #' See \code{\link{install.packages}}. #' @param repos [\code{character}]\cr #' See \code{\link{install.packages}}. #' If the user must be queried this is of course done on the master. #' @param consecutive [\code{logical(1)}]\cr #' See \code{\link{callFunctionOnSSHWorkers}}. #' Default is \code{TRUE}. #' @param show.output [\code{logical(1)}]\cr #' See \code{\link{callFunctionOnSSHWorkers}}. #' Default is \code{consecutive}. #' @param ... [any]\cr #' Passed to \code{\link{install.packages}}. #' @return Nothing. #' @export #' @seealso \code{\link{callFunctionOnSSHWorkers}} installPackagesOnSSHWorkers = function(nodenames, pkgs, repos = getOption("repos"), consecutive = TRUE, show.output = consecutive, ...) { assertCharacter(pkgs, min.len = 1L, any.missing = FALSE) if (repos == "@CRAN@") repos = chooseCRANmirror() callFunctionOnSSHWorkers(nodenames, fun = install.packages, pkgs = pkgs, repos = repos, consecutive = consecutive, show.output = show.output, ...) invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/installPackagesOnSSHWorkers.R
#' Kill some jobs on the batch system. #' #' @description #' Kill jobs which have already been submitted to the batch system. #' If a job is killed its internal state is reset as if it had not been submitted at all. #' #' The function informs if #' (a) the job you want to kill has not been submitted, #' (b) the job has already terminated, #' (c) for some reason no batch job id is available. #' In all 3 cases above, nothing is changed for the state of this job and no call #' to the internal kill cluster function is generated. #' #' In case of an error when killing, the function tries - after a short sleep - to kill the remaining #' batch jobs again. If this fails again for some jobs, the function gives up. Only jobs that could be #' killed are reset in the DB. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs to kill. #' Default is none. #' @template arg_progressbar #' @return [\code{integer}]. Ids of killed jobs. #' @export #' @family debug #' @examples #' \dontrun{ #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) Sys.sleep(x) #' batchMap(reg, f, 1:10 + 5) #' submitJobs(reg) #' waitForJobs(reg) #' #' # kill all jobs currently _running_ #' killJobs(reg, findRunning(reg)) #' # kill all jobs queued or running #' killJobs(reg, findNotTerminated(reg)) #' } killJobs = function(reg, ids, progressbar = TRUE) { checkRegistry(reg, writeable = TRUE) syncRegistry(reg) if (missing(ids)) return(invisible(integer(0L))) else ids = checkIds(reg, ids) assertFlag(progressbar) conf = getBatchJobsConf() killfun = getKillJob("Cannot kill jobs") # FIXME select and order (see below) could be done more efficiently in SQLite data = dbGetJobStatusTable(reg, ids = dbFindOnSystem(reg, ids), cols = c("job_id", "batch_job_id", "submitted", "started", "done", "error")) # print first summary information on jobs to kill info("Trying to kill %i jobs.", length(ids)) info("Jobs on system: %i", nrow(data)) info("Of these: %i not submitted, %i with no batch.job.id, %i already terminated", sum(is.na(data$submitted)), sum(is.na(data$batch_job_id)), sum(!is.na(data$done) | !is.na(data$error))) # subset data: restrict to jobs submitted, not done, no error, has bji # other jobs can be ignored -> overwrite ids data = subset(data, !is.na(data$submitted) & is.na(data$done) & is.na(data$error) & !is.na(data$batch_job_id), select = c("job_id", "batch_job_id", "started")) # kill queued jobs first, otherwise they might get started while killing running jobs data = data[order(data$started, na.last = FALSE),, drop = FALSE] ids = data$job_id bjids = unique(data$batch_job_id) info("Killing real batch jobs: %i", length(bjids)) if (length(bjids) == 0L) { info("No batch jobs to kill.") return(invisible(integer(0L))) } doKill = function(bjids) { n = length(bjids) old.warn = getOption("warn") bar = getProgressBar(progressbar, min = 0L, max = n, label = "killJobs") on.exit({ options(warn = old.warn); bar$kill() }) options(warn = 0L) bar$set() notkilled = logical(n) for (i in seq_len(n)) { ok = try(killfun(conf, reg, bjids[i])) if (is.error(ok)) { notkilled[i] = TRUE warning(as.character(ok)) } bar$set(i) } bjids[notkilled] } # first try to kill info(clipString(collapse(bjids), 200L, ",...")) bjids.notkilled = doKill(bjids) # second try to kill if (length(bjids.notkilled) > 0L) { info("Could not kill %i batch jobs, trying again.", length(bjids.notkilled)) Sys.sleep(2) bjids.notkilled = doKill(bjids.notkilled) } # second try also not successful if (length(bjids.notkilled) > 0L) { fn = file.path(reg$file.dir, sprintf("killjobs_failed_ids_%i", now())) warningf("Could not kill %i batch jobs, kill them manually!\nTheir ids have been saved in %s.", length(bjids.notkilled), fn) writeLines(as.character(bjids.notkilled), con = fn) } # reset killed jobs ids = ids[data$batch_job_id %nin% bjids.notkilled] info("Resetting %i jobs in DB.", length(ids)) dbSendMessage(reg, dbMakeMessageKilled(reg, ids, type = "last")) invisible(ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/killJobs.R
#' @title Load a previously saved registry. #' #' @description #' Loads a previously created registry from the file system. #' The \code{file.dir} is automatically updated upon load if \code{adjust.paths} is set to #' \code{TRUE}, so be careful if you use the registry on multiple machines simultaneously, #' e.g. via sshfs or a samba share. #' #' There is a heuristic included which tries to detect if the location of the registry #' has changed and returns a read-only registry if necessary. #' #' @param file.dir [\code{character(1)}]\cr #' Location of the file.dir to load the registry from. #' @param work.dir [\code{character(1)}]\cr #' Location of the work. Unchanged if missing. #' @param adjust.paths [\code{logical(1)}]\cr #' If set to \code{FALSE} (default), the paths for the \code{file.dir} and \code{work.dir} #' will not be updated. Set to \code{TRUE} if you moved the directoy to another system #' \emph{after} all computations finished. #' @return [\code{\link{Registry}}]. #' @export loadRegistry = function(file.dir, work.dir, adjust.paths = FALSE) { assertString(file.dir) assertFlag(adjust.paths) fn = getRegistryFilePath(file.dir) if (!file.exists(fn)) stopf("No registry found in '%s'", file.dir) info("Loading registry: %s", fn) reg = load2(fn, "reg") requirePackages(names(reg$packages), why = sprintf("registry %s", reg$id), default.method = "attach") if (!isOnSlave()) { save.reg = FALSE read.only = FALSE adjusted = adjustRegistryPaths(reg, file.dir, work.dir) if (!isFALSE(adjusted)) { if (!adjust.paths) { warning("It seems like you've moved the registry to a new location or system. ", "To update the paths, call 'loadRegistry' with option 'adjust.paths' set to TRUE. ", "But make sure that there are no jobs running on the system. ", "Returning a read-only registry, and not updating the database to the latest layout, ", "i.e. your registry may be defunct.") read.only = TRUE } reg = adjusted save.reg = TRUE } if (!read.only) { updated = updateRegistry(reg) if (!isFALSE(updated)) { reg = updated save.reg = TRUE } } if (save.reg) { if (read.only) reg$read.only = TRUE else saveRegistry(reg) } } loadExports(reg) sourceRegistryFiles(reg) return(reg) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/loadRegistry.R
#' Loads a specific result file. #' @template arg_reg #' @param id [\code{integer(1)}]\cr #' Id of job. #' @param part [\code{character}] #' Only useful for multiple result files, then defines which result file part(s) should be loaded. #' \code{NA} means all parts are loaded, which is the default. #' @param missing.ok [\code{logical(1)}]\cr #' If \code{FALSE} an error is thrown if no result file is found. #' Otherwise \code{NULL} is returned. #' Default is \code{FALSE}. #' @param impute.val [any]\cr #' The value to return when no result is available. #' Defaults to \code{NULL} (the previous behavior) #' @return [any]. Result of job. #' @seealso \code{\link{reduceResults}} #' @export loadResult = function(reg, id, part = NA_character_, missing.ok = FALSE, impute.val = NULL) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) id = checkIds(reg, id, len = 1L) checkPart(reg, part) assertFlag(missing.ok) getResult(reg, id, part, missing.ok, impute.val) } getResult = function(reg, id, part = NA_character_, missing.ok = FALSE, impute.val = NULL) { getResults(reg, id, part, missing.ok, impute.val)[[1L]] }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/loadResult.R
#' Loads result files for id vector. #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all done jobs. #' @param part [\code{character}] #' Only useful for multiple result files, then defines which result file part(s) should be loaded. #' \code{NA} means all parts are loaded, which is the default. #' @param simplify [\code{logical(1)}]\cr #' Should the result be simplified to a vector, matrix or higher dimensional array if possible? #' Default is \code{TRUE}. #' @param use.names [\code{character(1)}]\cr #' Name the results with job ids (\dQuote{ids}), stored job names (\dQuote{names}) #' or return a unnamed result (\dQuote{none}). #' Default is \code{ids}. #' @param missing.ok [\code{logical(1)}]\cr #' If \code{FALSE} an error is thrown if the results are not found. #' Otherwise missing results are imputed to \code{NULL}. #' Default is \code{FALSE}. #' @param impute.val [any]\cr #' The value to return when no result is available. #' Defaults to \code{NULL} (the previous behavior) #' @return [\code{list}]. Results of jobs as list, possibly named by ids. #' @seealso \code{\link{reduceResults}} #' @export loadResults = function(reg, ids, part = NA_character_, simplify = FALSE, use.names = "ids", missing.ok = FALSE, impute.val = NULL) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = dbFindDone(reg) } else { ids = checkIds(reg, ids) } checkPart(reg, part) assertFlag(simplify) use.names = convertUseNames(use.names) assertFlag(missing.ok) res = getResults(reg, ids, part, missing.ok, impute.val = impute.val) names(res) = switch(use.names, "none" = NULL, "ids" = as.character(ids), "names" = dbGetJobNames(reg, ids)) if(simplify && length(res) > 0L) res = simplify2array(res, higher = (simplify=="array")) return(res) } getResults = function(reg, ids, part = NA_character_, missing.ok = FALSE, impute.val = NULL) { if (reg$multiple.result.files) { read.files = function(id, dir, pattern) { fns = list.files(dir, pattern, full.names = TRUE) found.parts = sub(".+-(.+)\\.RData$", "\\1", basename(fns)) if(length(found.parts) == 0L) { if (missing.ok) return(list()) stop("No partial result files found for job with id ", id) } setNames(lapply(fns, load2, "result"), found.parts) } dirs = getJobDirs(reg, ids) if(length(part) == 1L && is.na(part)) { patterns = sprintf("^%i-result-.+\\.RData$", ids) } else { patterns = sprintf("^%i-result-(%s)\\.RData$", ids, collapse(part, "|")) } return(mapply(read.files, id = ids, dir = dirs, pattern = patterns, SIMPLIFY = FALSE, USE.NAMES = FALSE)) } fns = getResultFilePath(reg, ids, part) miss = !file.exists(fns) if (any(miss)) { if (!missing.ok) stopf("Some job result files do not exist, showing up to first 10:\n%s", collapse(head(fns[miss], 10L), "\n")) ret = rep.int(list(impute.val), times = length(ids)) ret = replace(ret, !miss, lapply(fns[!miss], load2, "result")) } else { ret = lapply(fns, load2, "result") } ret }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/loadResults.R
#' Reduce results from result directory. #' #' @description #' The following functions provide ways to reduce result files into either specific R objects (like #' vectors, lists, matrices or data.frames) or to arbitrarily aggregate them, which is a more general #' operation. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of selected jobs. #' Default is all jobs for which results are available. #' @param part [\code{character}] #' Only useful for multiple result files, then defines which result file part(s) should be loaded. #' \code{NA} means all parts are loaded, which is the default. #' @param fun [\code{function}]\cr #' For \code{reduceResults}, a function \code{function(aggr, job, res, ...)} to reduce things, #' for all others, a function \code{function(job, res, ...)} to select stuff. #' Here, \code{job} is the current job descriptor (see \code{\link{Job}}), \code{result} is the current result object and #' \code{aggr} are the so far aggregated results. When using \code{reduceResults}, #' your function should add the stuff you want to have from \code{job} and #' \code{result} to \code{aggr} and return that. #' When using the other reductions, you should select the stuff you want to have from \code{job} and #' \code{result} and return something that can be coerced to an element of the selected return data structure #' (reasonable conversion is tried internally). #' Default behavior for this argument is to return \code{res}, except for \code{reduceResults} where no #' default is available. #' @param init [\code{ANY}]\cr #' Initial element, as used in \code{\link{Reduce}}. #' Default is first result. #' @param ... [any]\cr #' Additional arguments to \code{fun}. #' @param use.names [\code{character(1)}]\cr #' Name the results with job ids (\dQuote{ids}), stored job names (\dQuote{names}) #' or return a unnamed result (\dQuote{none}). #' Default is \code{ids}. #' @param impute.val [any]\cr #' For \code{reduceResults}: If not missing, the value of \code{impute.val} is passed to function \code{fun} #' as argument \code{res} for jobs with missing results.\cr #' For the specialized reduction functions \code{reduceResults[Type]}: If not missing, \code{impute.val} is #' used as a replacement for the return value of \code{fun} on missing results. #' @template arg_progressbar #' @param rows [\code{logical(1)}]\cr #' Should the selected vectors be used as rows (or columns) in the result matrix? #' Default is \code{TRUE}. #' @param strings.as.factors [\code{logical(1)}] #' Should all character columns in result be converted to factors? #' Default is \code{FALSE}. #' @return Aggregated results, return type depends on function. If \code{ids} is empty: \code{reduceResults} #' returns \code{init} (if available) or \code{NULL}, \code{reduceResultsVector} returns \code{c()}, #' \code{reduceResultsList} returns \code{list()}, \code{reduceResultsMatrix} returns \code{matrix(0,0,0)}, #' \code{reduceResultsDataFrame} returns \code{data.frame()}. #' @export #' @examples #' # generate results: #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg, f, 1:5) #' submitJobs(reg) #' waitForJobs(reg) #' #' # reduce results to a vector #' reduceResultsVector(reg) #' # reduce results to sum #' reduceResults(reg, fun = function(aggr, job, res) aggr+res) #' #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) list(a = x, b = as.character(2*x), c = x^2) #' batchMap(reg, f, 1:5) #' submitJobs(reg) #' waitForJobs(reg) #' #' # reduce results to a vector #' reduceResultsVector(reg, fun = function(job, res) res$a) #' reduceResultsVector(reg, fun = function(job, res) res$b) #' # reduce results to a list #' reduceResultsList(reg) #' # reduce results to a matrix #' reduceResultsMatrix(reg, fun = function(job, res) res[c(1,3)]) #' reduceResultsMatrix(reg, fun = function(job, res) c(foo = res$a, bar = res$c), rows = TRUE) #' reduceResultsMatrix(reg, fun = function(job, res) c(foo = res$a, bar = res$c), rows = FALSE) #' # reduce results to a data.frame #' print(str(reduceResultsDataFrame(reg))) #' # reduce results to a sum #' reduceResults(reg, fun = function(aggr, job, res) aggr+res$a, init = 0) reduceResults = function(reg, ids, part = NA_character_, fun, init, impute.val, progressbar = TRUE, ...) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = done = dbFindDone(reg) with.impute = FALSE } else { ids = checkIds(reg, ids) done = dbFindDone(reg, ids) with.impute = !missing(impute.val) if (!with.impute) { if (length(ids) > length(done)) stopf("No results available for jobs with ids: %s", collapse(setdiff(ids, done))) } } assertFunction(fun, c("aggr", "job", "res")) assertFlag(progressbar) n = length(ids) info("Reducing ", n, " results...") if (n == 0L) { if (missing(init)) return(NULL) return(init) } bar = getProgressBar(progressbar, max = n, label = "reduceResults") tryCatch({ if (missing(init)) { # fetch first result as init aggr = getResult(reg, ids[1L], part) ids = tail(ids, -1L) bar$inc(1L) } else { aggr = init } not.done = (ids %nin% done) for (i in seq_along(ids)) { # use lazy evaluation aggr = fun(aggr, job = dbGetJobs(reg, ids[i])[[1L]], res = if (with.impute && not.done[i]) impute.val else getResult(reg, ids[i], part), ...) bar$inc(1L) } }, error = bar$error) return(aggr) } #' @export #' @rdname reduceResults reduceResultsList = function(reg, ids, part = NA_character_, fun, ..., use.names = "ids", impute.val, progressbar = TRUE) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = done = dbFindDone(reg) with.impute = FALSE } else { ids = checkIds(reg, ids) done = dbFindDone(reg, ids) with.impute = !missing(impute.val) if (!with.impute) { if (length(ids) > length(done)) stopf("No results available for jobs with ids: %s", collapse(setdiff(ids, done))) } } if (missing(fun)) fun = function(job, res) res else assertFunction(fun, c("job", "res")) use.names = convertUseNames(use.names) assertFlag(progressbar) n = length(ids) info("Reducing %i results...", n) if (n == 0L) return(list()) res = vector("list", n) if (with.impute) { res = replace(res, ids %nin% done, impute.val) it = match(done, ids) } else { it = seq_len(n) } bar = getProgressBar(progressbar, max = n, label = "reduceResults") tryCatch({ for (i in it) { # use lazy evaluation! tmp = fun(job = dbGetJobs(reg, ids[i])[[1L]], res = getResult(reg, ids[i], part), ...) if (!is.null(tmp)) res[[i]] = tmp bar$inc(1L) } }, error = bar$error) names(res) = switch(use.names, "none" = NULL, "ids" = as.character(ids), "names" = dbGetJobNames(reg, ids)) return(res) } #' @export #' @rdname reduceResults reduceResultsVector = function(reg, ids, part = NA_character_, fun, ..., use.names = "ids", impute.val) { unlist(reduceResultsList(reg, ids, part, fun, ..., use.names = use.names, impute.val = impute.val)) } #' @export #' @rdname reduceResults reduceResultsMatrix = function(reg, ids, part = NA_character_, fun, ..., rows = TRUE, use.names = "ids", impute.val) { assertFlag(rows) use.names = convertUseNames(use.names) res = reduceResultsList(reg, ids, part, fun, ..., use.names = use.names, impute.val = impute.val) if (length(res) == 0L) return(matrix(0, nrow = 0L, ncol = 0L)) n = length(res) dn = if (use.names != "none") list(names(res), names(res[[1L]])) else NULL res = unlist(res, use.names = FALSE) if (rows) matrix(res, nrow = n, byrow = TRUE, dimnames = dn) else matrix(res, ncol = n, byrow = FALSE, dimnames = rev(dn)) } #' @export #' @rdname reduceResults reduceResultsDataFrame = function(reg, ids, part = NA_character_, fun, ..., use.names = "ids", impute.val, strings.as.factors = FALSE) { assertFlag(strings.as.factors) res = reduceResultsList(reg, ids, part, fun, ..., use.names = use.names, impute.val = impute.val) if (!length(res)) return(data.frame()) convertListOfRowsToDataFrame(res, strings.as.factors = strings.as.factors) } #' @export #' @rdname reduceResults reduceResultsDataTable = function(reg, ids, part = NA_character_, fun, ..., use.names = "ids", impute.val) { res = reduceResultsList(reg, ids, part, fun, ..., use.names = use.names, impute.val = impute.val) if (!length(res)) return(data.table()) rbindlist(res, fill = TRUE) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/reduceResults.R
#' Reset computational state of jobs. #' #' @description #' Reset state of jobs in the database. Useful under two circumstances: #' Either to re-submit them because of changes in e.g. external #' data or to resolve rare issues when jobs are killed in an unfortunate state #' and therefore blocking your registry. #' #' The function internally lists all jobs on the batch system and #' if those include some of the jobs you want to reset, it informs you to kill them first by raising #' an exception. #' If you really know what you are doing, you may set \code{force} to \code{TRUE} to omit this sanity check. #' Note that this is a dangerous operation to perform which may harm #' the database integrity. In this case you HAVE to make externally sure that none of the jobs #' you want to reset are still running. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs to kill. #' Default is none. #' @param force [\code{logical(1)}]\cr #' Reset jobs without checking whether they are currently running. #' READ THE DETAILS SECTION! #' Default is \code{FALSE}. #' @return Vector of reseted job ids. #' @family debug #' @export resetJobs = function(reg, ids, force = FALSE) { checkRegistry(reg, writeable = TRUE) syncRegistry(reg) if (missing(ids) || length(ids) == 0L) return(integer(0L)) ids = checkIds(reg, ids) assertFlag(force) if (!force) { if(is.null(getListJobs()) || is.null(getKillJob())) { stop("Listing or killing of jobs not supported by your cluster functions\n", "You need to set force = TRUE to reset jobs, but see the warning in ?resetJobs") } running = dbFindOnSystem(reg, ids, limit = 10L) if (length(running) > 0L) stopf("Can't reset jobs which are live on system. You have to kill them first!\nIds: %s", collapse(running)) } info("Resetting %i jobs in DB.", length(ids)) dbSendMessage(reg, dbMakeMessageKilled(reg, ids), staged = FALSE) invisible(ids) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/resetJobs.R
#' Sanitize a path #' #' @description #' Replaces backward slashes with forward slashes and optionally #' normalizes the path. #' #' @param path [\code{character}]\cr #' Vector of paths to sanitize. #' @param make.absolute [\code{logical}]\cr #' If \code{TRUE} convert to an absolute path. #' @param normalize.absolute [\code{logical}]\cr #' Also call \code{\link[base]{normalizePath}} on absolute paths? #' This will immediately resolve symlinks. #' @return \code{character} with sanitized paths. #' @export sanitizePath = function(path, make.absolute = TRUE, normalize.absolute = FALSE) { assertCharacter(path, any.missing = FALSE) assertFlag(make.absolute) assertFlag(normalize.absolute) if (make.absolute) { normalize = if (normalize.absolute) rep.int(TRUE, length(path)) else !isPathFromRoot(path) path[normalize] = normalizePath(path[normalize], mustWork = FALSE, winslash = "/") } gsub("\\", "/", path, fixed = TRUE) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/sanitizePath.R
# is a worker busy, see rules below getWorkerSchedulerStatus = function(worker) { # we have already used up our maximal load on this node if (worker$status$n.jobs >= worker$max.jobs) return("J") # should not have too much load average if (worker$status$load[1L] > worker$max.load) return("L") # there are already ncpus expensive R jobs running on the node if (worker$status$n.rprocs.50 >= worker$ncpus) return("R") # should not have too many R sessions open if(worker$status$n.rprocs >= 3 * worker$ncpus) return("r") # else all clear, submit the job! return("A") } # update status of worker IN PLACE updateWorker = function(worker, file.dir, tdiff) { time = now() if (worker$available == "A" || time - worker$last.update >= tdiff) { worker$last.update = time worker$status = getWorkerStatus(worker, file.dir) worker$available = getWorkerSchedulerStatus(worker) } } # find worker via isBusyWorker and update workers while looking # workers with a low load are more likely to be selected when there are # multiple workers available findWorker = function(workers, file.dir, tdiff) { lapply(workers, updateWorker, file.dir = file.dir, tdiff = tdiff) rload = vnapply(workers, function(w) w$status$load / w$ncpus) Find(function(w) w$available=="A", sample(workers, prob = 1 / (rload + 0.1)), nomatch = NULL) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/scheduleWorkerJobs.R
sendMail = function(reg, ids, result.str, extra.msg = "", disable.mail, condition, first, last, conf) { if (disable.mail) return(invisible(NULL)) conf = getBatchJobsConf() ischunk = (length(ids) > 1L) first.id = if(ischunk) ids[[1L]] else ids # should we mail mail.conds = list(start = conf$mail.start, done = conf$mail.done, error = conf$mail.error) mail.cond = mail.conds[[condition]] if (mail.cond == "all" || (mail.cond %in% c("first", "first+last") && first.id == first) || (mail.cond %in% c("last", "first+last") && first.id == last)) { myformat = function(title, lines) { collapse(c(sprintf("### %s", title), lines, "", ""), "\n") } cstr = switch(condition, "start" = "started", "done" = "finished", "error" = "terminated with exception") subj = sprintf("[%s]: %s %s has %s", reg$id, ifelse(ischunk, "Chunk with first job ", "Job"), first.id, cstr) msg = myformat("Ids", ids) # append result and status information if (condition != "start") { if (extra.msg != "") msg = paste0(msg, myformat("Message", extra.msg)) msg = paste0(msg, myformat("Results", result.str)) if(first.id == last) msg = paste0(msg, myformat("Status", capture.output(showStatus(reg, run.and.exp = FALSE)))) } # if a mail problem occurs, we only warn but do not terminate ok = try (sendmail(conf$mail.from, conf$mail.to, subj, msg, control = conf$mail.control)) if (is.error(ok)) { warningf("Could not send mail to signal condition '%s'!\nFrom: %s\nTo: %s\nControl: %s\nError message: %s", condition, conf$mail.from, conf$mail.to, convertToShortString(conf$mail.control), as.character(ok)) } else { messagef("Mail signaling condition '%s' send to %s", condition, conf$mail.to) } } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/sendMail.R
#' Sets the job function for already existing jobs. #' #' @description #' Use this function only as last measure when there is a bug #' in a part of your job function and you have already computed a large number #' of (unaffected) results. This function allows you to fix the error and to #' associate the jobs with the corrected function. #' #' Note that by default the computational state of the affected jobs is also reset. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all jobs. #' @param fun [\code{function}]\cr #' Replacement function. #' @param more.args [\code{list}]\cr #' A list of other arguments passed to \code{fun}. #' Default is empty list. #' @param reset [\code{logical(1)}]\cr #' Reset job status via \code{\link{resetJobs}}. #' Default is \code{TRUE}. #' @param force [\code{logical(1)}]\cr #' See \code{\link{resetJobs}}. #' Default is \code{FALSE}. #' @return Nothing. #' @family debug #' @export setJobFunction = function(reg, ids, fun, more.args = list(), reset = TRUE, force = FALSE) { checkRegistry(reg, strict = FALSE, writeable = TRUE) syncRegistry(reg) assertFunction(fun) checkMoreArgs(more.args) assertFlag(reset) assertFlag(force) UseMethod("setJobFunction") } #' @export setJobFunction.Registry = function(reg, ids, fun, more.args = list(), reset = TRUE, force = FALSE) { if (missing(ids)) { # nothing to do ... return(invisible(NULL)) } else { ids = checkIds(reg, ids) } fun.id = saveFunction(reg, fun, more.args) dbSetJobFunction(reg, ids, fun.id) if (reset) resetJobs(reg, ids, force = force) invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/setJobFunction.R
#' Set job names. #' #' @param reg [\code{\link{Registry}}]\cr #' Registry. #' @param ids [\code{integer}]\cr #' Ids of jobs. #' Default is all jobs. #' @param jobnames [\code{character}]\cr #' Character vector with length equal to \code{length(ids)}. #' \code{NA} removes the names stored in the registry. #' A single \code{NA} is replicated to match the length of ids provided. #' @return Named vector of job ids. #' @export setJobNames = function(reg, ids, jobnames) { checkRegistry(reg, strict = TRUE, writeable = TRUE) if (missing(ids)) ids = dbGetJobIds(reg) else ids = checkIds(reg, ids) if (isScalarNA(jobnames)) jobnames = rep.int(NA_character_, length(ids)) else assertCharacter(jobnames, any.missing = FALSE) if (length(ids) != length(jobnames)) stop("Arguments 'ids' and 'jobnames' must have same length") dbSetJobNames(reg, ids, jobnames) setNames(ids, jobnames) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/setJobNames.R
#' Show information about available computational resources on cluster. #' #' @description #' Currently only supported for multicore and SSH mode. #' Displays: Name of node, current load, number of running R processes, number of R processes #' with more than 50% load, number of BatchJobs jobs running. #' The latter counts either jobs belonging to \code{reg} or all BatchJobs jobs if reg was not passed. #' #' @param reg [\code{\link{Registry}}]\cr #' Registry. #' Must not be passed and this is the default. #' @return [\code{data.frame}]. #' @export showClusterStatus = function(reg) { if (missing(reg)) { file.dir = "" } else { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) file.dir = reg$file.dir } conf = getBatchJobsConf() cf = conf$cluster.functions if (cf$name %nin% c("Multicore", "SSH")) stop("showWorkerStatus can only be used in multicore or SSH mode!") workers = environment(cf$submitJob)$workers data = rbindlist(lapply(workers, getWorkerStatus, file.dir = file.dir)) data$ncpus = extractSubList(workers, "ncpus") setDF(data, rownames = names(workers)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/showClusterStatus.R
#' Display the contents of a log file. #' #' @description #' Display the contents of a log file, useful in case of errors. #' #' Note this rare special case: When you use chunking, submit some jobs, some jobs fail, #' then you resubmit these jobs again in different chunks, the log files will contain the log #' of the old, failed job as well. \code{showLog} tries to jump to the correct part #' of the new log file with a supported pager. #' #' @template arg_reg #' @param id [\code{integer(1)}]\cr #' Id of selected job. #' Default is first id in registry. #' @param pager [\code{any}]\cr #' Pager to use to display the log. Defaults to \code{getOption("pager")}. #' This option is passed to \code{file.show} and is highly OS dependant and GUI dependant. #' If either R's pager or the environment variable \dQuote{PAGER} is set to \dQuote{less} #' or \dQuote{vim}, the correct part of the log file will be shown. #' Otherwise you find information about the correct part in the beginning of the displayed file. #' @return [\code{character(1)}]. Invisibly returns path to log file. #' @family debug #' @export showLog = function(reg, id, pager = getOption("pager")) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(id)) { id = dbGetJobId(reg) if (length(id) == 0L) stop("No jobs in registry!") } else { id = checkIds(reg, id, len = 1L) } fn = getLogFiles(reg, id) if (!file.exists(fn)) stopf("Log file does not exist: %s", fn) exact1 = sprintf("Executing jid=%i", id) exact2 = sprintf("########## %s ##########", exact1) header = c(sprintf("Showing log file for job with id=%i", id), sprintf("The string '%s' indicates start of requested job", exact1)) if (!is.function(pager)) { sys.pager = Sys.getenv("PAGER") if ((grepl("/pager$", pager) || pager == "internal") && sys.pager != "") { # prefer the system pager pager = sys.pager } bn = basename(stri_split_regex(pager, "\\s+")[[1L]][1L]) # check for less or vim as pager # if we find the pattern, we jump to the matching line if (bn %in% c("less", "vim")) { pos = grep(exact2, readLines(fn), fixed = TRUE) if (length(pos) == 1L) { pos = pos + length(header) + 1L if (pager == "less") pager = function(files, header, title, delete.file) system2("less", c(sprintf("+%ig", pos), files)) else pager = function(files, header, title, delete.file) system2("vim", c(sprintf("+%i", pos), files)) } } } file.show(fn, pager = pager, header = collapse(header, sep = "\n")) invisible(fn) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/showLog.R
#' @title Retrieve or show status information about jobs. #' #' @description #' E.g.: How many there are, how many are done, any errors, etc. #' \code{showStatus} displays on the console, \code{getStatus} returns an informative result #' without console output. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Ids of selected jobs. #' Default is all jobs. #' @param errors [\code{integer(1)}]\cr #' How many of the error messages should be displayed if any errors occured in the jobs? #' Default is 10. #' @param run.and.exp [\code{logical(1)}]\cr #' Show running and expired jobs? #' Requires to list the job on the batch system. If not possible, because #' that cluster function is not avilable, this option is ignored anyway. #' Default is \code{TRUE}. #' @return [\code{list}]. List of absolute job numbers. \code{showStatus} returns them #' invisibly. #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg, f, 1:10) #' submitJobs(reg) #' waitForJobs(reg) #' #' # should show 10 submitted jobs, which are all done. #' showStatus(reg) showStatus = function(reg, ids, run.and.exp = TRUE, errors = 10L) { checkRegistry(reg, writeable = FALSE) if (!missing(ids)) ids = checkIds(reg, ids) assertFlag(run.and.exp) errors = asCount(errors) stats = getStatus(reg, ids = ids, run.and.exp = run.and.exp) procent = function(x, n) { if(is.na(x)) return("") p = ifelse(n == 0L, 0, x / n * 100) sprintf("(%6.2f%%)", p) } output = collapse(c("Status for %%i jobs at %%s", "Submitted: %%%1$ii %%s", "Started: %%%1$ii %%s", "Running: %%%1$ii %%s", "Done: %%%1$ii %%s", "Errors: %%%1$ii %%s", "Expired: %%%1$ii %%s", "Time: min=%%.2fs avg=%%.2fs max=%%.2fs"), "\n") output = sprintf(output, min(4L, nchar(sprintf("%i", stats$n + 1L)))) with(stats, catf(output, n, Sys.time(), submitted, procent(submitted, n), started, procent(started, n), running, procent(running, n), done, procent(done, n), error, procent(error, n), expired, procent(expired, n), t_min, t_avg, t_max)) m = min(errors, stats$error) if(m > 0L) { msgs = dbGetErrorMsgs(reg, ids, filter = TRUE, limit = m) catf("\nShowing first %i errors:", m) cat(sprintf("Error in %i: %s", msgs$job_id, msgs$error), sep = "\n") } return(invisible(stats)) } #' @rdname showStatus #' @export getStatus getStatus = function(reg, ids, run.and.exp = TRUE) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) assertFlag(run.and.exp) if (!missing(ids)) ids = checkIds(reg, ids) run.and.exp = run.and.exp && !is.null(getListJobs()) dbGetStats(reg, ids, running = run.and.exp, expired = run.and.exp, times = TRUE) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/showStatus.R
#' Source registry files #' #' @description #' Sources all files found in \code{src.dirs} and specified via \code{src.files}. #' #' @template arg_reg #' @param envir [\code{environment}]\cr #' Environment to source the files into. Default is the global environment. #' @return Nothing. #' @export sourceRegistryFiles = function(reg, envir = .GlobalEnv) { checkRegistry(reg, writeable = FALSE) assertEnvironment(envir) sourceRegistryFilesInternal(reg$work.dir, reg$src.dirs, reg$src.files) } sourceRegistryFilesInternal = function(work.dir, dirs, files, envir = .GlobalEnv) { # add work dir if not /foo/bar path w = !isPathFromRoot(files) files[w] = file.path(work.dir, files[w]) w = which.first(!file.exists(files)) if (length(w)) stopf("Files to source not found, e.g. %s", files[w]) w = !isPathFromRoot(dirs) dirs[w] = file.path(work.dir, dirs[w]) w = which.first(!isDirectory(dirs)) if (length(w)) stopf("Directories to source not found, e.g. %s", dirs[w]) lapply(c(getRScripts(dirs), files), sys.source, envir = envir) invisible(TRUE) } getRScripts = function(dirs) { if (length(dirs)) { ok = isDirectory(dirs) if (any(!ok)) stopf("Directories not found: %s", collapse(dirs[!ok])) unlist(lapply(dirs, list.files, pattern = "\\.[Rr]$", full.names = TRUE)) } else { character(0L) } } #' @title Add source files to registry. #' #' @description #' Mutator function for \code{src.files} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param src.files [\code{character}]\cr #' Paths to add to registry. #' See \code{\link{makeRegistry}}. #' @param src.now [\code{logical(1)}] #' Source files now on master? #' Default is \code{TRUE}. #' @template ret_reg_mut #' @family exports #' @export addRegistrySourceFiles = function(reg, src.files, src.now = TRUE) { checkRegistry(reg, writeable = TRUE) assertCharacter(src.files, any.missing = FALSE) assertFlag(src.now) src.files = sanitizePath(src.files, make.absolute = FALSE) if (src.now) sourceRegistryFilesInternal(reg$work.dir, character(0L), src.files) reg$src.files = union(reg$src.files, src.files) saveRegistry(reg) } #' @title Add source dirs to registry. #' #' @description #' Mutator function for \code{src.dirs} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param src.dirs [\code{character}]\cr #' Paths to add to registry. #' See \code{\link{makeRegistry}}. #' @param src.now [\code{logical(1)}] #' Source files now on master? #' Default is \code{TRUE}. #' @template ret_reg_mut #' @family exports #' @export addRegistrySourceDirs = function(reg, src.dirs, src.now = TRUE) { checkRegistry(reg, writeable = TRUE) assertCharacter(src.dirs, any.missing = FALSE) assertFlag(src.now) src.dirs = sanitizePath(src.dirs, make.absolute = FALSE) if (src.now) sourceRegistryFilesInternal(reg$work.dir, src.dirs, character(0L)) reg$src.dirs = c(reg$src.dirs, src.dirs) saveRegistry(reg) } #' @title Remove source files from registry. #' #' @description #' Mutator function for \code{src.files} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param src.files [\code{character}]\cr #' Paths to remove from registry. #' @template ret_reg_mut #' @family exports #' @export removeRegistrySourceFiles = function(reg, src.files) { checkRegistry(reg, writeable = TRUE) assertCharacter(src.files, any.missing = FALSE) reg$src.files = setdiff(reg$src.files, src.files) saveRegistry(reg) } #' @title Remove packages from registry. #' #' @description #' Mutator function for \code{src.dirs} in \code{\link{makeRegistry}}. #' #' @template arg_reg #' @param src.dirs [\code{character}]\cr #' Paths to remove from registry. #' @template ret_reg_mut #' @family exports #' @export removeRegistrySourceDirs = function(reg, src.dirs) { checkRegistry(reg, writeable = TRUE) assertCharacter(src.dirs, any.missing = FALSE) reg$src.dirs = setdiff(reg$src.dirs, src.dirs) saveRegistry(reg) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/sourceRegistryFiles.R
#' @title Submit jobs or chunks of jobs to batch system via cluster function. #' #' @description #' If the internal submit cluster function completes successfully, the \code{retries} #' counter is set back to 0 and the next job or chunk is submitted. #' If the internal submit cluster function returns a fatal error, the submit process #' is completely stopped and an exception is thrown. #' If the internal submit cluster function returns a temporary error, the submit process #' waits for a certain time, which is determined by calling the user-defined #' \code{wait}-function with the current \code{retries} counter, the counter is #' increased by 1 and the same job is submitted again. If \code{max.retries} is #' reached the function simply terminates. #' #' Potential temporary submit warnings and errors are logged inside your file #' directory in the file \dQuote{submit.log}. #' To keep track you can use \code{tail -f [file.dir]/submit.log} in another #' terminal. #' #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Vector for job id or list of vectors of chunked job ids. #' Only corresponding jobs are submitted. Chunked jobs will get executed #' sequentially as a single job for the scheduler. #' Default is all jobs which were not yet submitted to the batch system. #' @param resources [\code{list}]\cr #' Required resources for all batch jobs. The elements of this list #' (e.g. something like \dQuote{walltime} or \dQuote{nodes} are defined by your template job file. #' Defaults can be specified in your config file. #' Default is empty list. #' @param wait [\code{function(retries)}]\cr #' Function that defines how many seconds should be waited in case of a temporary error. #' Default is exponential back-off with \code{10*2^retries}. #' @param max.retries [\code{integer(1)}]\cr #' Number of times to submit one job again in case of a temporary error #' (like filled queues). Each time \code{wait} is called to wait a certain #' number of seconds. #' Default is 10 times. #' @param chunks.as.arrayjobs [\code{logical(1)}]\cr #' If ids are passed as a list of chunked job ids, execute jobs in a chunk #' as array jobs. Note that your scheduler and your template must be adjusted to #' use this option. Default is \code{FALSE}. #' @param job.delay [\code{function(n, i)} or \code{logical(1)}]\cr #' Function that defines how many seconds a job should be delayed before it starts. #' This is an expert option and only necessary to change when you want submit #' extremely many jobs. We then delay the jobs a bit to write the submit messages as #' early as possible to avoid writer starvation. #' \code{n} is the number of jobs and \code{i} the number of the ith job. #' The default function used with \code{job.delay} set to \code{TRUE} is no delay for #' 100 jobs or less and otherwise \code{runif(1, 0.1*n, 0.2*n)}. #' If set to \code{FALSE} (the default) delaying jobs is disabled. #' @template arg_progressbar #' @return [\code{integer}]. Vector of submitted job ids. #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) x^2 #' batchMap(reg, f, 1:10) #' submitJobs(reg) #' waitForJobs(reg) #' #' # Submit the 10 jobs again, now randomized into 2 chunks: #' chunked = chunk(getJobIds(reg), n.chunks = 2, shuffle = TRUE) #' submitJobs(reg, chunked) submitJobs = function(reg, ids, resources = list(), wait, max.retries = 10L, chunks.as.arrayjobs = FALSE, job.delay = FALSE, progressbar = TRUE) { ### helper function to calculate the delay getDelays = function(cf, job.delay, n) { if (is.logical(job.delay)) { if (job.delay && n > 100L && cf$name %nin% c("Interactive", "Multicore", "SSH")) { return(runif(n, n*0.1, n*0.2)) } return(delays = rep.int(0, n)) } vnapply(seq_along(ids), job.delay, n = n) } ### argument checks on registry and ids checkRegistry(reg, writeable = TRUE) syncRegistry(reg) if (missing(ids)) { ids = dbFindSubmitted(reg, negate = TRUE) if (length(ids) == 0L) { info("All jobs submitted, nothing to do!") return(invisible(integer(0L))) } } else { if (is.list(ids)) { ids = lapply(ids, checkIds, reg = reg, check.present = FALSE) dbCheckJobIds(reg, unlist(ids)) } else if(is.numeric(ids)) { ids = checkIds(reg, ids) } else { stop("Parameter 'ids' must be a integer vector of job ids or a list of chunked job ids (list of integer vectors)!") } } ### initialization of some helping vars conf = getBatchJobsConf() cf = getClusterFunctions(conf) limit.concurrent.jobs = is.finite(conf$max.concurrent.jobs) n = length(ids) ### argument checks for other parameters assertList(resources) resources = resrc(resources) if (missing(wait)) wait = function(retries) 10 * 2^retries else assertFunction(wait, "retries") if (is.finite(max.retries)) max.retries = asCount(max.retries) assertFlag(chunks.as.arrayjobs) if (chunks.as.arrayjobs && is.na(cf$getArrayEnvirName())) { warningf("Cluster functions '%s' do not support array jobs, falling back on chunks", cf$name) chunks.as.arrayjobs = FALSE } assert(checkFlag(job.delay), checkFunction(job.delay, c("n", "i"))) assertFlag(progressbar) if (!is.null(cf$listJobs)) { ### check for running jobs ids.intersect = intersect(unlist(ids), dbFindOnSystem(reg, unlist(ids))) if (length(ids.intersect) > 0L) { stopf("Some of the jobs you submitted are already present on the batch system! E.g. id=%i.", ids.intersect[1L]) } } if (limit.concurrent.jobs && (cf$name %in% c("Interactive", "Local", "Multicore", "SSH") || is.null(cf$listJobs))) { warning("Option 'max.concurrent.jobs' is enabled, but your cluster functions implementation does not support the listing of system jobs.\n", "Option disabled, sleeping 5 seconds for safety reasons.") limit.concurrent.jobs = FALSE Sys.sleep(5) } ### quick sanity check if (n > 5000L) { warningf(collapse(c("You are about to submit %i jobs.", "Consider chunking them to avoid heavy load on the scheduler.", "Sleeping 5 seconds for safety reasons."), sep = "\n"), n) Sys.sleep(5) } ### save config, start the work saveConf(reg) is.chunked = is.list(ids) info("Submitting %i chunks / %i jobs.", n, if(is.chunked) sum(viapply(ids, length)) else n) info("Cluster functions: %s.", cf$name) info("Auto-mailer settings: start=%s, done=%s, error=%s.", conf$mail.start, conf$mail.done, conf$mail.error) # use staged queries on master if fs.timeout is set # -> this way we are relatively sure that db transactions are performed in the intended order fs.timeout = conf$fs.timeout staged = useStagedQueries() || !is.na(fs.timeout) interrupted = FALSE submit.msgs = buffer(type = "list", capacity = 1000L, value = dbSendMessages, reg = reg, max.retries = 10000L, sleep = function(r) 5, staged = staged, fs.timeout = fs.timeout) logger = makeSimpleFileLogger(file.path(reg$file.dir, "submit.log"), touch = FALSE, keep = 1L) resources.timestamp = saveResources(reg, resources) ### set on exit handler to avoid inconsistencies caused by user interrupts on.exit({ # we need the second case for errors in brew (e.g. resources) if(interrupted && exists("batch.result", inherits = FALSE)) { submit.msgs$push(dbMakeMessageSubmitted(reg, id, time = submit.time, batch.job.id = batch.result$batch.job.id, first.job.in.chunk.id = if(is.chunked) id1 else NULL, resources.timestamp = resources.timestamp)) } # send remaining msgs now info("Sending %i submit messages...\nMight take some time, do not interrupt this!", submit.msgs$pos()) submit.msgs$clear() # message the existance of the log file if (logger$getSize()) messagef("%i temporary submit errors logged to file '%s'.\nFirst message: %s", logger$getSize(), logger$getLogfile(), logger$getMessages(1L)) }) ### reset status of jobs: delete errors, done, ... dbSendMessage(reg, dbMakeMessageKilled(reg, unlist(ids), type = "first"), staged = staged, fs.timeout = fs.timeout) ### initialize progress bar bar = getProgressBar(progressbar, max = n, label = "SubmitJobs") bar$set() delays = getDelays(cf, job.delay, n) tryCatch({ for (i in seq_along(ids)) { id = ids[[i]] id1 = id[1L] retries = 0L ### write R scripts files = writeFiles(reg, cf, id, chunks.as.arrayjobs, resources.timestamp, disable.mail = FALSE, staged = staged, delay = delays[i]) waitForFiles(files, timeout = fs.timeout) repeat { # max.retries may be Inf if (limit.concurrent.jobs && length(cf$listJobs(conf, reg)) >= conf$max.concurrent.jobs) { # emulate a temporary erroneous batch result batch.result = makeSubmitJobResult(status = 10L, batch.job.id = NA_character_, "Max concurrent jobs exhausted") } else { # try to submit the job interrupted = TRUE submit.time = now() batch.result = cf$submitJob( conf = conf, reg = reg, job.name = sprintf("%s-%i", reg$id, id1), rscript = files[1], log.file = getLogFilePath(reg, id1), job.dir = getJobDirs(reg, id1), resources = resources, arrayjobs = if(chunks.as.arrayjobs) length(id) else 1L ) } ### validate status returned from cluster functions if (batch.result$status == 0L) { submit.msgs$push(dbMakeMessageSubmitted(reg, id, time = submit.time, batch.job.id = batch.result$batch.job.id, first.job.in.chunk.id = if(is.chunked) id1 else NULL, resources.timestamp = resources.timestamp)) interrupted = FALSE bar$inc(1L) break } ### submitJob was not successful, handle the return status interrupted = FALSE if (batch.result$status > 0L && batch.result$status <= 100L) { if (is.finite(max.retries) && retries > max.retries) stopf("Retried already %i times to submit. Aborting.", max.retries) # temp error, wait and increase retries, then submit again in next iteration Sys.sleep(wait(retries)) # log message to file logger$log(batch.result$msg) retries = retries + 1L } else if (batch.result$status > 100L && batch.result$status <= 200L) { # fatal error, abort at once stopf("Fatal error occured: %i. %s", batch.result$status, batch.result$msg) } else { # illeagal status code stopf("Illegal status code %s returned from cluster functions!", batch.result$status) } } } }, error = bar$error) ### return ids (on.exit handler kicks now in to submit the remaining messages) return(invisible(ids)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/submitJobs.R
#' @title Sweep obsolete files from the file system. #' #' @description #' Removes R scripts, log files, resource informations and temporarily stored configuration files #' from the registry's file directory. Assuming all your jobs completed successfully, none of these are needed #' for further work. This operation potentially releases quite a lot of disk space, depending on the number of your jobs. #' BUT A HUGE WORD OF WARNING: #' IF you later notice something strange and need to determine the reason for it, you are at a huge disadvantage. #' Only do this at your own risk and when you are sure that you have successfully completed a project and only #' want to archive your produced experiments and results. #' #' @template arg_reg #' @param sweep [\code{character}]\cr #' Possible choices: #' Temporary R scripts of jobs, #' really not needed for anything else then execution (\dQuote{scripts}), #' log file of jobs, #' think about whether you later want to inspect them (\dQuote{logs}), #' BatchJobs configuration files which are temporarily stored on submit, #' really not needed for anything else then execution (\dQuote{conf}), #' resource lists of \code{\link{submitJobs}} which are temporarily stored on submit, #' think about whether you later want to inspect them (\dQuote{resources}), #' Default is \code{c("scripts", "conf")}. #' @return [\code{logical}]. Invisibly returns \code{TRUE} on success and \code{FALSE} #' if some files could not be removed. #' @export sweepRegistry = function(reg, sweep = c("scripts", "conf")) { checkRegistry(reg, writeable = TRUE) syncRegistry(reg) assertSubset(sweep, c("scripts", "logs", "resources", "conf")) if (length(dbFindRunning(reg, limit = 1L)) > 0L) stop("Can't sweep registry while jobs are running") fd = reg$file.dir jd = getJobParentDir(fd) rd = getResourcesDir(fd) # failed kill ids are always obsolete because no jobs are running anymore files = list.files(fd, pattern = "^killjobs_failed_ids_*", full.names = TRUE) # sweep configuration if ("conf" %in% sweep) files = c(files, list.files(fd, pattern = "^conf.RData$", full.names = TRUE)) # sweep resources if ("resources" %in% sweep) files = c(files, list.files(rd, full.names = TRUE)) # sweep logs and scripts (in one go if possible) if (all(c("logs", "scripts") %in% sweep)) { files = c(files, list.files(jd, pattern = "^[0-9]+\\.[out|R]$", recursive = TRUE, full.names = TRUE)) } else { if ("logs" %in% sweep) files = c(files, list.files(jd, pattern = "^[0-9]+\\.out$", recursive = TRUE, full.names = TRUE)) if ("scripts" %in% sweep) files = c(files, list.files(jd, pattern = "^[0-9]+\\.R$", recursive = TRUE, full.names = TRUE)) } info("Removing %i files ...", length(files)) ok = all(file.remove(files)) if (!ok) warning("Not all files could be deleted. Check file permissions and try again") return(invisible(ok)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/sweepRegistry.R
#' Syncronize staged queries into the registry. #' #' @description #' If the option \dQuote{staged.queries} is enabled, all communication from the nodes #' to the master is done via files in the subdirectory \dQuote{pending} of the \code{file.dir}. #' This function checks for such files and merges the information into the database. #' Usually you do not have to call this function yourself. #' #' @template arg_reg #' @return Invisibly returns \code{TRUE} on success. #' @export syncRegistry = function(reg) { conf = getBatchJobsConf() if (useStagedQueries()) { if (conf$debug && isOnSlave()) stop("SQL query sent from Worker") fns = lsort(list.files(getPendingDir(reg$file.dir), full.names = TRUE)) if (length(fns) == 0L) return(invisible(TRUE)) info("Syncing registry ...") queries = lapply(fns, readSQLFile) ok = !vlapply(queries, isFALSE) tryCatch(dbDoQueries(reg, unlist(queries[ok]), "rw"), error = function(e) stopf("Error syncing registry (%s)", e)) fns = fns[ok] ok = file.remove(fns) if (!all(ok)) warningf("Some pending result sets could not be removed, e.g. '%s'", head(fns[ok], 1L)) } invisible(TRUE) } readSQLFile = function(con) { x = try(readLines(con), silent = TRUE) n = length(x) if (is.error(x) || n == 0L || x[n] != "--EOF--") return(FALSE) x = x[-n] substr(x, 1L, nchar(x) - 1L) } writeSQLFile = function(x, con) { writeLines(c(paste0(x, ";"), "--EOF--"), con = con) } useStagedQueries = function() { isTRUE(getBatchJobsConf()$staged.queries) } .OrderChars = setNames(letters[1L:6L], c("first", "submitted", "started", "done", "error", "last"))
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/syncRegistry.R
#' Tests a job by running it with Rscript in a new process. #' #' @description #' Useful for debugging. #' Note that neither the registry, database or file directory are changed. #' @template arg_reg #' @param id [\code{integer(1)}]\cr #' Id of job to test. #' Default is first job id of registry. #' @param external [\code{logical(1)}]\cr #' Run test in an independent external R session instead of current. #' The former allows to uncover missing variable definitions (which may #' accidentially be defined in the current global environment) and the latter #' is useful to get traceable execeptions. #' Default is \code{TRUE}. #' @param resources [\code{list}]\cr #' Usually not needed, unless you call the function \code{\link{getResources}} in your job. #' See \code{\link{submitJobs}}. #' Default is empty list. #' @return [any]. Result of job. If the job did not complete because of an error, NULL is returned. #' @family debug #' @export #' @examples #' reg = makeRegistry(id = "BatchJobsExample", file.dir = tempfile(), seed = 123) #' f = function(x) if (x==1) stop("oops") else x #' batchMap(reg, f, 1:2) #' testJob(reg, 2) testJob = function(reg, id, resources = list(), external = TRUE) { checkRegistry(reg, writeable = FALSE) #syncRegistry(reg) if (missing(id)) { id = dbGetJobId(reg) if (length(id) == 0L) stop("Registry is empty!") messagef("Testing job with id=%i ...", id) } else { id = checkIds(reg, id, len = 1L) } assertList(resources) resources = resrc(resources) assertFlag(external) if (external) { # we dont want to change anything in the true registry / file dir / DB # so we have to copy stuff a little bit r = reg # get a unique, unused tempdir. tmpdir() always stays the same per session td = tempfile(pattern = "") construct = sprintf("make%s", class(r)[1L]) # copy reg reg = do.call(construct, list(id = reg$id, seed = r$seed, file.dir = td, work.dir = r$work.dir, sharding = FALSE, multiple.result.files = r$multiple.result.files, packages = names(reg$packages), src.dirs = reg$src.dirs, src.files = reg$src.files)) # copy DB file.copy(from = file.path(r$file.dir, "BatchJobs.db"), to = file.path(td, "BatchJobs.db"), overwrite = TRUE) # copy conf saveConf(reg) # copy job stuff copyRequiredJobFiles(r, reg, id) # copy exports file.copy(from = getExportDir(r$file.dir), to = td, recursive = TRUE) # write r script resources.timestamp = saveResources(reg, resources) writeFiles(reg, getClusterFunctions(getBatchJobsConf()), id, chunks.as.arrayjobs = FALSE, resources.timestamp = resources.timestamp, disable.mail = TRUE, staged = FALSE, delay = 0) # execute now = Sys.time() message("### Output of new R process starts here ###") r.interp = file.path(R.home("bin"), sprintf("%s%s", "Rscript", ifelse(isWindows(), ".exe", ""))) system3(r.interp, getRScriptFilePath(reg, id), wait = TRUE) message("### Output of new R process ends here ###") dt = difftime(Sys.time(), now) messagef("### Approximate running time: %.2f %s", as.double(dt), units(dt)) res = try(getResult(reg, id)) if (is.error(res)) return(NULL) } else { setOnSlave(TRUE) on.exit(setOnSlave(FALSE)) # FIXME: stuff we might need to store before: resources saveConf(reg) # trigger loadExports, sourceRegistryFiles, ... loadRegistry(reg$file.dir) res = applyJobFunction(reg, getJob(reg, id), makeFileCache(FALSE)) } return(res) } #' ONLY FOR INTERNAL USAGE. #' @param reg1 [\code{\link{Registry}}]\cr #' Source registry. #' @param reg2 [\code{\link{Registry}}]\cr #' Detination registry. #' @param id [\code{character(1)}]\cr #' Job id. #' @return Nothing. #' @keywords internal #' @export copyRequiredJobFiles = function(reg1, reg2, id) { UseMethod("copyRequiredJobFiles") } #' @export copyRequiredJobFiles.Registry = function(reg1, reg2, id) { job = getJob(reg1, id, check.id = FALSE) file.copy(getFunFilePath(reg1, job$fun.id), getFunFilePath(reg2, job$fun.id)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/testJob.R
#' ONLY FOR INTERNAL USAGE. #' @template arg_reg #' @return [any]. Updated \code{\link{Registry}} or \code{FALSE} if no updates were performed. #' @keywords internal #' @export updateRegistry = function(reg) { UseMethod("updateRegistry") } #' @method updateRegistry Registry #' @export updateRegistry.Registry = function(reg) { # Fix for missing package version (package versions < 1.0.527) if ("BatchJobs" %nin% names(reg$packages)) { reg$packages$BatchJobs = list(version = package_version("1.0.527")) } version.reg = reg$packages$BatchJobs$version version.pkg = packageVersion("BatchJobs") if (version.reg == version.pkg) { return(FALSE) } if (version.reg > version.pkg) { warningf("The registry has been used with BatchJobs version %s, installed is version %s. You should update BatchJobs on this machine.", version.reg, version.pkg) return(FALSE) } # update registry info("Updating Registry and DB to newer version.") if (version.reg < package_version("1.0.606")) { # create new resources dir resources.dir = getResourcesDir(reg$file.dir) checkDir(resources.dir, create = TRUE, check.empty = TRUE) query = sprintf("ALTER TABLE %s_job_status ADD COLUMN resources_timestamp INTEGER", reg$id) dbDoQuery(reg, query, flags = "rwc") # save dummy resources query = sprintf("UPDATE %s_job_status SET resources_timestamp = 0 WHERE submitted IS NOT NULL", reg$id) dbDoQuery(reg, query, flags = "rwc") saveResources(reg, resources = list(), timestamp = 0L) } if (version.reg < package_version("1.0.723")) { checkDir(getPendingDir(reg$file.dir), create = TRUE) } if (version.reg < package_version("1.0.1071")) { checkDir(getExportDir(reg$file.dir), create = TRUE) } if (version.reg < package_version("1.1")) { query = sprintf("ALTER TABLE %s_job_def ADD COLUMN jobname TEXT", reg$id) dbDoQuery(reg, query, flags = "rwc") reg$src.dirs = character(0L) } if (version.reg < package_version("1.2")) { reg$src.files = character(0L) } if (version.reg < package_version("1.4")) { query = sprintf("ALTER TABLE %s_job_status ADD COLUMN memory REAL", reg$id) dbDoQuery(reg, query, flags = "rwc") } if (version.reg < package_version("1.7")) { query = sprintf("ALTER TABLE %s_job_status ADD COLUMN error_time INTEGER", reg$id) dbDoQuery(reg, query, flags = "rwc") reg$read.only = FALSE } reg$packages$BatchJobs$version = version.pkg reg } adjustRegistryPaths = function(reg, file.dir, work.dir) { adjusted = FALSE # adjust file dir if necessary file.dir = sanitizePath(file.dir, make.absolute = TRUE) if (!isDirectory(file.dir)) stopf("file.dir does not exist or is not a directory: %s", file.dir) if (reg$file.dir != file.dir) { reg$file.dir = file.dir adjusted = TRUE } # adjust work dir if necessary if (missing(work.dir)) { if (!isDirectory(reg$work.dir)) warningf("The currently set work.dir '%s' does not exists. Use option 'work.dir' in loadRegistry to change it.", reg$work.dir) } else { work.dir = sanitizePath(work.dir, make.absolute = TRUE) if (!isDirectory(work.dir)) stopf("work.dir does not exist or is not a directory: %s", work.dir) reg$work.dir = work.dir adjusted = TRUE } if (adjusted) reg else FALSE }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/updateRegistry.R
#' Wait for termination of jobs on the batch system. #' #' @description #' Waits for termination of jobs while displaying a progress bar #' containing summarizing informations of the jobs. #' The following abbreviations are used in the progress bar: #' \dQuote{S} for number of jobs on system, \dQuote{D} for number of #' jobs successfully terminated, \dQuote{E} for number ofjobs terminated #' with an R exception and \dQuote{R} for number of jobs currently running #' on the system. #' #' @template arg_reg #' @param ids [\code{integer}]\cr #' Vector of job ids. #' Default is all submitted jobs not yet terminated. #' @param sleep [\code{numeric(1)}]\cr #' Seconds to sleep between status updates. Default is \code{10}. #' @param timeout [\code{numeric(1)}]\cr #' After waiting \code{timeout} seconds, show a message and return \code{FALSE}. #' This argument may be required on some systems where, e.g., expired jobs or jobs on hold #' are problematic to detect. If you don't want a timeout, set this to \code{Inf}. #' Default is \code{604800} (one week). #' @param stop.on.error [\code{logical(1)}]\cr #' Immediately return if a job terminates with an error? Default is \code{FALSE}. #' @template arg_progressbar #' @return [\code{logical(1)}]. Returns \code{TRUE} if all jobs terminated successfully #' and \code{FALSE} if either an error occurred or the timeout is reached. #' @export waitForJobs = function(reg, ids, sleep = 10, timeout = 604800, stop.on.error = FALSE, progressbar = TRUE) { checkRegistry(reg, writeable = FALSE) syncRegistry(reg) if (missing(ids)) { ids = dbFindSubmittedNotTerminated(reg) } else { ids = checkIds(reg, ids) not.submitted = dbFindSubmitted(reg, ids, negate = TRUE, limit = 1L) if (length(not.submitted) > 0L) stopf("Not all jobs have been submitted, e.g. job with id %i", not.submitted) } assertNumber(sleep, lower = 1) if (is.infinite(sleep)) stop("Argument 'sleep' must be finite") assertNumber(timeout, lower = sleep) assertFlag(stop.on.error) assertFlag(progressbar) n = length(ids) if (n == 0L) return(TRUE) timeout = now() + timeout batch.ids = getBatchIds(reg, "Cannot find jobs on system") i = 1L bar = getProgressBar(progressbar, min = 0L, max = n, label = "Waiting ") on.exit(bar$kill()) repeat { stats = dbGetStats(reg, ids, running = TRUE, expired = FALSE, times = FALSE, batch.ids = batch.ids) n.sys = n - stats$done - stats$error bar$set(n - n.sys, msg = sprintf("Waiting [S:%i D:%i E:%i R:%i]", n.sys, stats$done, stats$error, stats$running)) if (stop.on.error && stats$error) { err = dbGetErrorMsgs(reg, ids, filter = TRUE, limit = 1L) warningf("Job %i terminated with an error: %s", err$job_id, err$error) return(FALSE) } if (n.sys == 0L) return(stats$error == 0L) if (i %% 5L == 0L) { # update batch ids batch.ids = getBatchIds(reg, "Cannot find jobs on system") # check if there are still jobs on the system and none has mystically disappeared # NOTE it seems like some schedulers are "laggy", we should not do this operation # in the first loop w/o a sleep if (length(dbFindOnSystem(reg, ids, limit = 1L, batch.ids = batch.ids)) == 0L) { if (length(dbFindDisappeared(reg, ids, limit = 1L, batch.ids = batch.ids)) > 0L) bar$error(stop("Some jobs disappeared, i.e. were submitted but are now gone. Check your configuration and template file.")) return(stats$error == 0L) } } if (is.finite(timeout) && now() > timeout) { warningf("Timeout reached. %i jobs still on system.", n.sys) return(FALSE) } Sys.sleep(sleep) i = i + 1L suppressMessages(syncRegistry(reg)) } }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/waitForJobs.R
writeFiles = function(reg, cf, ids, chunks.as.arrayjobs, resources.timestamp, disable.mail, staged, delay) { ### write r script files template = paste( "Sys.sleep(%f)", "options(BatchJobs.on.slave = TRUE, BatchJobs.resources.path = '%s')", "library(BatchJobs)", "res = BatchJobs:::doJob(", "\treg = loadRegistry('%s'),", "\tids = c(%s),", "\tmultiple.result.files = %s,", "\tstaged = %s,", "\tdisable.mail = %s,", "\tfirst = %iL,", "\tlast = %iL,", "\tarray.id = %s)", "BatchJobs:::setOnSlave(FALSE)", sep = "\n") first = head(ids, 1L) last = tail(ids, 1L) # print the constant arguments (of length 1) into the template resources.path = getResourcesFilePath(reg, resources.timestamp) array.str = if (chunks.as.arrayjobs) sprintf("Sys.getenv(\"%s\", NA)", cf$getArrayEnvirName()) else NA template = sprintf(template, delay, resources.path, reg$file.dir, collapse(paste0(ids, "L")), reg$multiple.result.files, staged, disable.mail, first, last, array.str) r.file = getRScriptFilePath(reg, first) cat(template, file = r.file) ### if staged is FALSE, also write jobs to file system if (staged) { job.file = getJobFile(reg, first) saveRDS(getJobs(reg, ids, check.ids = FALSE), file = job.file) } else { job.file = character(0L) } invisible(c(r.file, job.file)) }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/writeFiles.R
#' @title The BatchJobs package #' #' @description #' Provides Map, Reduce and Filter variants to generate jobs on batch #' computing systems like PBS/Torque, LSF, SLURM and Sun Grid Engine. #' Multicore and SSH systems are also supported. For further details see the #' project web page. #' #' @section Additional information: #' #' \describe{ #' \item{Homepage:}{\url{https://github.com/tudo-r/BatchJobs}} #' \item{Wiki:}{\url{https://github.com/tudo-r/BatchJobs/wiki}} #' \item{FAQ:}{\url{https://github.com/tudo-r/BatchJobs/wiki/FAQ}} #' \item{Configuration:}{\url{https://github.com/tudo-r/BatchJobs/wiki/Configuration}} #' } #' #' The package currently support the following further R options, which you can set #' either in your R profile file or a script via \code{\link{options}}: #' #' \describe{ #' \item{BatchJobs.verbose}{This boolean flag can be set to \code{FALSE} to reduce the #' console output of the package operations. Usually you want to see this output in interactive #' work, but when you use the package in e.g. knitr documents, #' it clutters the resulting document too much.} #' \item{BatchJobs.check.posix}{If this boolean flag is enabled, the package checks your #' registry file dir (and related user-defined directories) quite strictly to be POSIX compliant. #' Usually this is a good idea, you do not want to have strange chars in your file paths, #' as this might results in problems when these paths get passed to the scheduler or other #' command-line tools that the package interoperates with. #' But on some OS this check might be too strict and cause problems. #' Setting the flag to \code{FALSE} allows to disable the check entirely. #' The default is \code{FALSE} on Windows systems and \code{TRUE} else.} #' } #' #' @docType package #' @name BatchJobs NULL #' @import utils #' @import stats #' @import methods #' @import checkmate #' @import data.table #' @import DBI #' @import RSQLite #' @importFrom digest digest #' @importFrom brew brew #' @importFrom sendmailR sendmail #' @importFrom stringi stri_extract_first_regex #' @importFrom stringi stri_trim_both #' @importFrom stringi stri_split_fixed #' @importFrom stringi stri_split_regex #' @importFrom BBmisc %nin% #' @importFrom BBmisc chunk #' @importFrom BBmisc checkListElementClass #' @importFrom BBmisc clipString #' @importFrom BBmisc collapse #' @importFrom BBmisc convertToShortString #' @importFrom BBmisc convertListOfRowsToDataFrame #' @importFrom BBmisc dropNamed #' @importFrom BBmisc extractSubList #' @importFrom BBmisc filterNull #' @importFrom BBmisc insert #' @importFrom BBmisc isDirectory #' @importFrom BBmisc is.error #' @importFrom BBmisc isProperlyNamed #' @importFrom BBmisc isScalarNA #' @importFrom BBmisc isWindows #' @importFrom BBmisc lsort #' @importFrom BBmisc namedList #' @importFrom BBmisc names2 #' @importFrom BBmisc makeFileCache #' @importFrom BBmisc makeProgressBar #' @importFrom BBmisc makeSimpleFileLogger #' @importFrom BBmisc save2 load2 #' @importFrom BBmisc requirePackages #' @importFrom BBmisc setClasses setColNames setRowNames #' @importFrom BBmisc seq_col seq_row #' @importFrom BBmisc suppressAll #' @importFrom BBmisc system3 #' @importFrom BBmisc vcapply viapply vlapply vnapply #' @importFrom BBmisc warningf stopf messagef catf #' @importFrom BBmisc which.first NULL .BatchJobs.conf = new.env(parent = emptyenv()) .BatchJobs.conf$cluster.functions = makeClusterFunctionsInteractive() .BatchJobs.conf$mail.start = "none" .BatchJobs.conf$mail.done = "none" .BatchJobs.conf$mail.error = "none" .BatchJobs.conf$db.driver = "SQLite" .BatchJobs.conf$db.options = list(pragmas = "busy_timeout=5000") .BatchJobs.conf$default.resources = list() .BatchJobs.conf$debug = FALSE .BatchJobs.conf$raise.warnings = FALSE .BatchJobs.conf$staged.queries = TRUE .BatchJobs.conf$max.concurrent.jobs = Inf .BatchJobs.conf$fs.timeout = NA_real_ .BatchJobs.conf$measure.mem = TRUE .BatchJobs.conffiles = character(0L) .onAttach = function(libname, pkgname) { packageStartupMessage("The development of BatchJobs and BatchExperiments is discontinued.") packageStartupMessage("Consider switching to 'batchtools' for new features and improved stability") if (getOption("BatchJobs.verbose", default = TRUE)) { cf = .BatchJobs.conffiles packageStartupMessage(sprintf("Sourced %i configuration files: ", length(cf))) for (i in seq_along(cf)) packageStartupMessage(sprintf(" %i: %s", i, cf[i])) conf = getConfig() packageStartupMessage(printableConf(conf)) } } .onLoad = function(libname, pkgname) { options(BatchJobs.check.posix = getOption("BatchJobs.check.posix", default = !isWindows())) options(BatchJobs.clear.function.env = getOption("BatchJobs.clear.function.env", default = FALSE)) backports::import(pkgname) if (!isOnSlave()) { if (getOption("BatchJobs.load.config", TRUE)) { pkg = if(missing(libname) || missing(pkgname)) find.package(package = "BatchJobs") else file.path(libname, pkgname) .BatchJobs.conffiles <<- readConfs(pkg) } } }
/scratch/gouwar.j/cran-all/cranData/BatchJobs/R/zzz.R
cluster.functions = makeClusterFunctionsInteractive() mail.start = "none" mail.done = "none" mail.error = "none" db.driver = "SQLite" db.options = list() debug = FALSE
/scratch/gouwar.j/cran-all/cranData/BatchJobs/inst/etc/BatchJobs_global_config.R
#' @useDynLib Bayenet, .registration = TRUE #' @importFrom Rcpp sourceCpp NULL #' @docType package #' @keywords overview #' @name Bayenet-package #' @title Bayesian Quantile Elastic Net for Genetic Study #' @aliases Bayenet-package #' @description In this package, we provide a set of robust Bayesian quantile variable selection methods for genetic analysis. A Bayesian formulation of the quantile regression has been adopted to accommodate data contamination and heavy-tailed distributions in the response. #' The proposed method conducts a robust quantile variable selection by accounting for structural sparsity. In particular, the spike-and-slab priors are imposed to identify important genetic effects. #' In addition to the default method, users can also choose different structures (robust or non-robust) and penalty (lasso or elastic net) with or without spike-and-slab priors. #' #' @details The user friendly, integrated interface \strong{Bayenet()} allows users to flexibly choose the fitting methods they prefer. There are three arguments in Bayenet() that control the fitting method: #' robust: whether to use robust methods; sparse: whether to use the spike-and-slab priors to create sparsity; penalty: use lasso or elastic net as penalty. #' The function Bayenet() returns a Bayenet object that contains the posterior estimates of each coefficients. #' predict.Bayenet() and print.Bayenet() are implemented for Bayenet objects. #' predict.Bayenet() takes a Bayenet object and returns the predicted values for new observations. #' #' @references #' Lu, X. and Wu, C. (2023). Bayesian quantile elastic net with spike-and-slab priors. #' #' Lu, X., Fan, K., Ren, J., and Wu, C. (2021). Identifying Gene–Environment Interactions With Robust Marginal Bayesian Variable Selection. #' {\emph{Frontiers in Genetics}, 12:667074} \doi{10.3389/fgene.2021.667074} #' #' Zhou, F., Ren, J., Lu, X., Ma, S. and Wu, C. (2020). Gene–Environment Interaction: a Variable Selection Perspective. Epistasis. Methods in Molecular Biology. #' {\emph{Humana Press} (Accepted)} \url{https://arxiv.org/abs/2003.02930} #' #' Wu, C., Cui, Y., and Ma, S. (2014). Integrative analysis of gene–environment interactions under a multi–response partially linear varying coefficient model. #' {\emph{Statistics in Medicine}, 33(28), 4988–4998} \doi{10.1002/sim.6287} #' #' Li, Q. and Lin, N. (2010). The Bayesian elastic net. {\emph{Bayesian Anal}, 5(1): 151-170} \doi{10.1214/10-BA506} #' #' Li, Q., Xi, R. and Lin, N. (2010). The Bayesian regularized quantile regression. {\emph{Bayesian Analysis}, 5(3): 533-556} #' \doi{:10.1214/10-BA521} #' #' @seealso \code{\link{Bayenet}} NULL
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Bayenet-package.R
#' @useDynLib Bayenet, .registration = TRUE #' @importFrom Rcpp sourceCpp NULL #' fit a robust Bayesian elastic net variable selection model for genetic study. #' @keywords models #' @param X the matrix of predictors (genetic factors). Each row should be an observation vector. #' @param Y the continuous response variable. #' @param clin a matrix of clinical variables. Clinical variables are not subject to penalize. Clinical variables will be centered and a column of 1 will be added to the Clinical matrix as the intercept. #' @param max.steps the number of MCMC iterations. #' @param robust logical flag. If TRUE, robust methods will be used. #' @param sparse logical flag. If TRUE, spike-and-slab priors will be used to shrink coefficients of irrelevant covariates to zero exactly. #' @param penalty two choices are available. "lasso" for lasso penalty. "elastic net" for elastic net penalty. #' @param debugging logical flag. If TRUE, progress will be output to the console and extra information will be returned. #' @return an object of class `Bayenet' is returned, which is a list with component: #' \item{posterior}{the posterior samples of coefficients from the MCMC.} #' \item{coefficient}{the estimated value of coefficients.} #' \item{burn.in}{the total number of burn-ins.} #' \item{iterations}{the total number of iterations.} #' \item{design}{the design matrix of all effects.} #' #' @details Consider the data model described in "\code{\link{dat}}": #' \deqn{Y_{i} = \alpha_{0} + \sum_{k=1}^{q}\gamma_{k}C_{ik}+\sum_{j=1}^{p}\beta_{j}X_{ij}+\epsilon_{i},} #' where \eqn{\alpha_{0}} is the intercept, \eqn{\gamma_{k}}'s and \eqn{\beta_{j}}'s are the regression coefficients corresponding to effects of clinical factors and genetic variants, respectively. #' #' When {penalty="elastic net"} (default), the elastic net penalty is adopted. If {penalty="lasso"}, the lasso penalty is used. #' #' When {sparse=TRUE} (default), spike--and--slab priors are imposed to identify important main and interaction effects. If {sparse=FALSE}, Laplacian shrinkage will be used. #' #' When {robust=TRUE} (default), the distribution of \eqn{\epsilon_{i}} is defined as a Laplace distribution with density #' \eqn{ #' f(\epsilon_{i}|\nu) = \frac{\nu}{2}\exp\left\{-\nu |\epsilon_{i}|\right\} #' }, (\eqn{i=1,\dots,n}), which leads to a Bayesian formulation of LAD regression. If {robust=FALSE}, \eqn{\epsilon_{i}} follows a normal distribution. #' #' Both \eqn{X} and \eqn{clin} will be standardized before the generation of interaction terms to avoid the multicollinearity between main effects and interaction terms. #' #' Please check the references for more details about the prior distributions. #' #' @references #' Lu, X. and Wu, C. (2023). Bayesian quantile elastic net with spike-and-slab priors. #' #' @seealso \code{\link{Selection}} #' #' @examples #' data(dat) #' #' max.steps=5000 #' fit= Bayenet(X, Y, clin, max.steps, penalty="lasso") #' #' ## coefficients of parameters #' fit$coefficient #' #' ## Estimated values of main G effects #' fit$coefficient$G #' #' ## Estimated values of clincal effects #' fit$coefficient$clin #' #' #' @export Bayenet <- function(X, Y,clin, max.steps=10000, robust=TRUE, sparse=TRUE, penalty=c("lasso","elastic net"),debugging=FALSE) { dat = DataMatrix(X, Y, clin, intercept=TRUE, debugging=FALSE) c=dat$c; g=dat$g; y=dat$y; beta_true=dat$coef n = dat$n; p= dat$p; q=ncol(c) G.names = dat$G.names clin.names = dat$clin.names if(robust){ out = robust(X, Y, clin, max.steps, sparse, penalty,debugging=FALSE) }else{ out = nonrobust(X, Y, clin, max.steps, sparse, penalty,debugging=FALSE) } BI=max.steps/2 coeff.clin = apply(out$GS.alpha[-(1:BI),,drop=FALSE], 2, stats::median); names(coeff.clin) = c(1,clin.names); coeff.G = apply(out$GS.beta[-(1:BI),,drop=FALSE], 2, stats::median); names(coeff.G) = G.names; coefficient = list(clin=coeff.clin, G=coeff.G) out = list(GS.C=out$GS.alpha, GS.G=out$GS.beta) fit = list(posterior = out, coefficient=coefficient,burn.in = BI, iterations=max.steps, design=list(g,CLC=c)) class(fit) = "Bayenet" fit }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Bayenet.R
DataMatrix <- function(X, Y, clin,intercept=TRUE, debugging=FALSE) { g = as.matrix(X); y = Y n = nrow(g); p = ncol(g) c=clin; clin.names = G.names = NULL g = scale(g, center = TRUE, scale=FALSE) if(!is.null(y)){ if(length(y) != n) stop("Length of Y does not match the number of rows of X."); } if(!is.null(clin)){ clin = as.matrix(clin) if(nrow(clin) != n) stop("clin has a different number of rows than X."); if(is.null(colnames(clin))){colnames(clin)=paste("clin.", 1:ncol(clin), sep="")} CLC = clin noClin = FALSE clin.names = colnames(clin) } if(intercept){ # add intercept CLC = cbind(matrix(1,n,1,dimnames=list(NULL, "IC")), CLC) } if(is.null(colnames(g))){ G.names = paste("G", 1:p, sep="") }else{ G.names = colnames(g) } dat = list(y=y, c=CLC, g=g, n=n, p=p, G.names=G.names, clin.names=clin.names) return(dat) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/DataMatrix.R
Qenet = function(y,x,c, theta,max.steps) { n = nrow(x) p = ncol(x) q1 <- ncol(c) betasample = matrix(0,max.steps,p) tsample = matrix(0,max.steps,p) tausample = matrix(0,max.steps,1) eta1sample = matrix(0,max.steps,1) eta2sample = matrix(0,max.steps,1) vsample = matrix(0,max.steps,n) Gammasamples <- matrix(0,max.steps,q1) #theta = 0.5 xi1 = (1 - 2*theta) / (theta*(1-theta)) xi2 = sqrt(2 / (theta*(1-theta))) beta = rep(1,p) t = rep(2,p) v = rep(1,n) tau = 1 eta1 = 1 eta2 = 1 a = 1e-1 b = 1e-1 c1 = 1e-1 d1 = 1e-1 c2 = 1e-1 d2 = 1e-1 Gamma = rep(1,q1) gamma0 = 1 for(k in 1:max.steps){ #sample Gamma for(j in 1:q1) { A = c[,j]^2/v invsigma2 = tau*sum(A)/(xi2^2)+1/gamma0 sigma2 = 1/invsigma2 y_j = as.vector(y -c[,-j]%*%Gamma[-j]-x%*%beta-xi1*v) B = y_j*c[,j]/v mu = tau*sum(B)*sigma2/(xi2^2) Gamma[j] = stats::rnorm(1,mean=mu,sd=sqrt(sigma2)) } Gammasamples[k,] <- Gamma #sample v res = y-x%*%beta-c%*%Gamma lambda = xi1^2*tau/(xi2^2) + 2*tau mu = sqrt(lambda * xi2^2/(tau*res^2)) index = 1:n flag=1 while(flag){ inv_v= SuppDists::rinvGauss(length(index),nu=mu[index],lambda=lambda) flag = any(inv_v<=0)|any(is.na(inv_v)) v[index[inv_v>0]] = 1/inv_v[inv_v>0] index = base::setdiff(index,index[inv_v>0]) } vsample[k,] = v #sample beta for(j in 1:p){ A = x[,j]^2/v invsigma2 = tau*sum(A)/xi2^2 + 2*eta2*t[j]/(t[j]-1) sigma2 = 1/invsigma2 y_j = as.vector(y -xi1*v -x[,-j]%*%beta[-j]-c%*%Gamma) B = y_j*x[,j]/v mu = tau*sum(B)*sigma2/xi2^2 beta[j] = stats::rnorm(1,mean=mu,sd=sqrt(sigma2)) } betasample[k,]=beta #sample tau res = y-x%*%beta-xi1*v-c%*%Gamma vec = res^2/(2*xi2^2*v)+v rate = sum(vec)+b shape = a+3*n/2 tau = stats::rgamma(1,shape=shape,rate=rate) tausample[k,] = tau #sample t temp.lambda = 2 * eta1 temp.nu = sqrt(temp.lambda / (2*eta2*beta^2)) index = 1:p flag = 1 while (flag) { temp.s = SuppDists::rinvGauss(length(index), lambda = temp.lambda, nu = temp.nu[index]) flag = any(temp.s <= 0) | any(is.na(temp.s)) t[index[temp.s>0]] = 1 / temp.s[temp.s>0] + 1 index = base::setdiff(index, index[temp.s>0]) } tsample[k,] = t #sample eta1 rejections = 0 temp.shape = p + c1 temp.rate = sum(t-1) + d1 temp.eta1 = stats::rgamma(1, shape=temp.shape, rate=temp.rate) temp = p*log(stats::pgamma(eta1,shape=1/2,lower=F) / stats::pgamma(temp.eta1,shape=1/2,lower=F)) + p/2 * log(eta1/temp.eta1) + p*(eta1 - temp.eta1) temp = base::min(temp, 0) u = log(stats::runif(1)) if(u <= temp) {eta1 = temp.eta1} else {rejections = rejections + 1} eta1sample[k] = eta1 #sample eta2 shape2 = p/2 + c2 rate2 = sum(t/(t-1)*beta^2) + d2 eta2 = stats::rgamma(1, shape=shape2, rate=rate2) eta2sample[k] = eta2 } dat = list(GS.beta=betasample,t=tsample,tau=tausample,GS.b=Gammasamples,eta1=eta1sample,eta2=eta2sample,v=vsample) return(dat) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Qenet.R
Qenetss = function(y,x,c, theta, max.steps) { n = nrow(x) p = ncol(x) q1 <- ncol(c) betasample = matrix(0,max.steps,p) tsample = matrix(0,max.steps,p) tausample = matrix(0,max.steps,1) eta1sample = matrix(0,max.steps,1) eta2sample = matrix(0,max.steps,1) vsample = matrix(0,max.steps,n) pisample = matrix(0,max.steps,1) SS = matrix(0,max.steps,p) Gammasamples <- matrix(0,max.steps,q1) #theta = 0.5 xi1 = (1 - 2*theta) / (theta*(1-theta)) xi2 = sqrt(2 / (theta*(1-theta))) beta = rep(1,p) t = rep(2,p) v = rep(1,n) tau = 1 eta1 = 1 eta2 = 1 a = 1e-1 b = 1e-1 c1 = 1e-1 d1 = 1e-1 c2 = 1e-1 d2 = 1e-1 r1=1 u1=1 pi <- 1/2 Gamma = rep(1,q1) gamma0 = 1 for(k in 1:max.steps){ #sample Gamma for(j in 1:q1) { A = c[,j]^2/v invsigma2 = tau*sum(A)/xi2+1/gamma0 sigma2 = 1/invsigma2 y_j = as.vector(y -c[,-j]%*%Gamma[-j]-x%*%beta-xi1*v) B = y_j*c[,j]/v mu = tau*sum(B)*sigma2/xi2 Gamma[j] = stats::rnorm(1,mean=mu,sd=sqrt(sigma2)) } Gammasamples[k,] <- Gamma #sample v res = y-x%*%beta-c%*%Gamma lambda = xi1^2*tau/(xi2^2) + 2*tau mu = sqrt(lambda * xi2^2/(tau*res^2)) index = 1:n flag=1 while(flag){ inv_v= SuppDists::rinvGauss(length(index),nu=mu[index],lambda=lambda) flag = any(inv_v<=0)|any(is.na(inv_v)) v[index[inv_v>0]] = 1/inv_v[inv_v>0] index = base::setdiff(index,index[inv_v>0]) } vsample[k,] = v #sample beta z <- rep(0,p) sg <- rep(0,p) for(j in 1:p){ A = x[,j]^2/v invsigma2 = tau*sum(A)/xi2^2 + 2*eta2*t[j]/(t[j]-1) sigma2 = 1/invsigma2 y_j = as.vector(y -xi1*v -x[,-j]%*%beta[-j]-c%*%Gamma) B = y_j*x[,j]/v mu = tau*sum(B)*sigma2/xi2^2 BB = tau*sum(B)/xi2^2 d = (2*eta2*t[j]/(t[j]-1))^(1/2)*sqrt(sigma2)*exp((1/2)*(sqrt(sigma2)*BB)^2) l = pi/(pi+(1-pi)*d) u = stats::runif(1) if(u<l){ beta[j] = 0; z[j]=0; sg[j]=0 } else{ beta[j] = stats::rnorm(1,mean=mu,sd=sqrt(sigma2)); z[j]=1; sg[j]=1 } } betasample[k,]=beta SS[k,] <- sg #sample tau res = y-x%*%beta-xi1*v-c%*%Gamma vec = res^2/(2*xi2^2*v)+v rate = sum(vec)+b shape = a+3*n/2 tau = stats::rgamma(1,shape=shape,rate=rate) tausample[k,] = tau #sample t for(j in 1:p){ if(beta[j]==0){ flag = 1 while (flag) { temp.t = hbmem::rtgamma(1,shape= 1/2,scale = 1/eta1,a=1, b=Inf) flag = (temp.t <=1)|(temp.t == Inf) } t[j]=temp.t } else{ temp.lambda = 2 * eta1 temp.nu = sqrt(temp.lambda / (2*eta2*beta[j]^2)) flag = 1 while (flag) { temp.s = SuppDists::rinvGauss(1, lambda = temp.lambda, nu = temp.nu) flag = (temp.s<=0)|(is.na(temp.s))|(temp.s == Inf) } t[j] = 1 / temp.s + 1 } } tsample[k,] = t #sample eta1 rejections = 0 temp.shape = p + c1 temp.rate = sum(t-1) + d1 temp.eta1 = stats::rgamma(1, shape=temp.shape, rate=temp.rate) temp = p*log(stats::pgamma(eta1,shape=1/2,lower=F) / stats::pgamma(temp.eta1,shape=1/2,lower=F)) + p/2 * log(eta1/temp.eta1) + p*(eta1 - temp.eta1) temp = base::min(temp, 0) u = log(stats::runif(1)) if(u <= temp) {eta1 = temp.eta1} else {rejections = rejections + 1} eta1sample[k] = eta1 #sample eta2 shape2 = c2 + 1/2*sum(z) rate2 = sum(t/(t-1)*beta^2*z) + d2 eta2 = stats::rgamma(1, shape=shape2, rate=rate2) eta2sample[k] = eta2 #sample pi shape1 <- r1 + p - sum(z) shape2 <- u1 + sum(z) pi <- stats::rbeta(1,shape1, shape2) pisample[k] <- pi } dat = list(GS.beta=betasample,t=tsample,tau=tausample,GS.b=Gammasamples,eta1=eta1sample,eta2=eta2sample, v=vsample,pi=pisample, GS.SS=SS) return(dat) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Qenetss.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 BL <- function(y, x, c, maxSteps, hatEta, hatb, hatInvTauSq2, invSigb0, hatLambdaSqStar2, hatSigmaSq, aStar, bStar, alpha, gamma, progress) { .Call(`_Bayenet_BL`, y, x, c, maxSteps, hatEta, hatb, hatInvTauSq2, invSigb0, hatLambdaSqStar2, hatSigmaSq, aStar, bStar, alpha, gamma, progress) } BLSS <- function(y, x, c, maxSteps, hatEta, hatb, hatInvTauSq2, sg2, hatPiEta, invSigb0, hatLambdaSqStar2, hatSigmaSq, aStar, bStar, alpha, gamma, mu0, nu0, progress) { .Call(`_Bayenet_BLSS`, y, x, c, maxSteps, hatEta, hatb, hatInvTauSq2, sg2, hatPiEta, invSigb0, hatLambdaSqStar2, hatSigmaSq, aStar, bStar, alpha, gamma, mu0, nu0, progress) } QBL <- function(y, x, c, maxSteps, hatb, hatEta, hatTau, hatV, hatSg2, invSigb0, hatEtaSq2, theta, r, a, b, progress) { .Call(`_Bayenet_QBL`, y, x, c, maxSteps, hatb, hatEta, hatTau, hatV, hatSg2, invSigb0, hatEtaSq2, theta, r, a, b, progress) } QBLSS <- function(y, x, c, maxSteps, hatAlpha, hatBeta, hatTau, hatV, hatSg, ss2, invSigAlpha0, hatPi, hatEtaSq, theta, r1, a, b, sh1, sh0, progress) { .Call(`_Bayenet_QBLSS`, y, x, c, maxSteps, hatAlpha, hatBeta, hatTau, hatV, hatSg, ss2, invSigAlpha0, hatPi, hatEtaSq, theta, r1, a, b, sh1, sh0, progress) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/RcppExports.R
Selection.NonSparse=function(obj, prob=0.95){ BI=obj$burn.in GS.beta = obj$posterior$GS.G GS.beta = GS.beta[-c(1:BI),] coef_G = c() for(j in 1:ncol(GS.beta)) { t12 = GS.beta[,j] coef_G = c(coef_G,quanfun(t12,prob)) } names(coef_G) = names(obj$coefficient$G) Main.G=coef_G method = paste(prob*100,"% credible interval", sep = "") out = list(method=method, Main.G=Main.G) class(out) = "Selection" out }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Selection.NonSparse.R
#' Variable selection for a Bayenet object #' #' @param obj Bayenet object. #' @param sparse logical flag. If TRUE, spike-and-slab priors will be used to shrink coefficients of irrelevant covariates to zero exactly. #' @details For class `Sparse', the inclusion probability is used to indicate the importance of predictors. #' Here we use a binary indicator \eqn{\phi} to denote the membership of the non-spike distribution. #' Take the main effect of the \eqn{j}th genetic factor, \eqn{X_{j}}, as an example. #' Suppose we have collected H posterior samples from MCMC after burn-ins. The \eqn{j}th G factor is included #' in the final model at the \eqn{j}th MCMC iteration if the corresponding indicator is 1, i.e., \eqn{\phi_j^{(h)} = 1}. #' Subsequently, the posterior probability of retaining the \eqn{j}th genetic main effect in the final model is defined as the average of all the indicators for the \eqn{j}th G factor among the H posterior samples. #' That is, \eqn{p_j = \hat{\pi} (\phi_j = 1|y) = \frac{1}{H} \sum_{h=1}^{H} \phi_j^{(h)}, \; j = 1, \dots,p.} #' A larger posterior inclusion probability of \eqn{j}th indicates a stronger empirical evidence that the \eqn{j}th genetic main effect has a non-zero coefficient, i.e., a stronger association with the phenotypic trait. #' Here, we use 0.5 as a cutting-off point. If \eqn{p_j > 0.5}, then the \eqn{j}th genetic main effect is included in the final model. Otherwise, the \eqn{j}th genetic main effect is excluded in the final model. #' For class `NonSparse', variable selection is based on 95\% credible interval. #' Please check the references for more details about the variable selection. #' #' @references #' Lu, X. and Wu, C. (2023). Bayesian quantile elastic net with spike-and-slab priors. #' #' @rdname Selection #' @return an object of class `Selection' is returned, which is a list with components: #' \item{method}{method used for identifying important effects.} #' \item{effects}{a list of indicators of selected effects.} #' #' @seealso \code{\link{Bayenet}} #' @examples #' data(dat) #' max.steps=5000 #' fit= Bayenet(X, Y, clin, max.steps, penalty="lasso") #' selected=Selection(fit,sparse=TRUE) #' selected$Main.G #' #' #' @export Selection <- function(obj,sparse){ if(sparse){ out = Selection.Sparse(obj) }else{ out = Selection.NonSparse(obj, prob=0.95) } out }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Selection.R
Selection.Sparse=function(obj){ BI=obj$burn.in GS.beta = obj$posterior$GS.G GS.beta = GS.beta[-c(1:BI),] coef_G = c() for(j in 1:ncol(GS.beta)) { t12 = GS.beta[,j] t12[t12!=0]=1; t12[t12==0]=0 q_beta = mpm(t12) coef_G = c(coef_G,q_beta) } names(coef_G) = names(obj$coefficient$G) Main.G=coef_G method = paste("posterior inclusion Proportion", sep = "") out = list(method=method, Main.G=Main.G) class(out) = "Selection" out }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/Selection.Sparse.R
#' simulated data for demonstrating the features of Bayenet. #' #' Simulated gene expression data for demonstrating the features of Bayenet. #' #' @docType data #' @keywords datasets #' @name dat #' @aliases dat X Y clin coef #' @usage data("dat") #' @format dat consists of four components: X, Y, clin, coef. #' @details #' #' \strong{The data model for generating Y} #' #' Use subscript \eqn{i} to denote the \eqn{i}th subject. Let \eqn{(Y_{i}, X_{i}, clin_{i})} (\eqn{i=1,\ldots,n}) be #' independent and identically distributed random vectors. \eqn{Y_{i}} is a continuous response variable representing the #' cancer outcome and disease phenotype. \eqn{X_{i}} is the \eqn{p}--dimensional vector of genetic factors. The clinical factors #' is denoted as the \eqn{q}-dimensional vector \eqn{clin_{i}}. #' The \eqn{\epsilon} follows some heavy-tailed distribution. Considering the following model: #' \deqn{Y_{i} = \alpha_{0} + \sum_{k=1}^{q}\gamma_{k}C_{ik}+\sum_{j=1}^{p}\beta_{j}X_{ij}+\epsilon_{i},} #' where \eqn{\alpha_{0}} is the intercept, \eqn{\gamma_{k}}'s and \eqn{\beta_{j}}'s are the regression coefficients corresponding to effects of clinical factors and genetic variants, respectively. #' Denote \eqn{\gamma=(\gamma_{1}, \ldots, \gamma_{q})^{T}}, \eqn{\beta=(\beta_{1}, \ldots, \beta_{p})^{T}}. #' Then model can be written as #' \deqn{Y_{i} = C_{i}\gamma + X_{i}\beta + \epsilon_{i}.} #' #' @examples #' data(dat) #' dim(X) #' @seealso \code{\link{Bayenet}} NULL
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/data.R
enet = function(y,x,c, max.steps) { n <- nrow(x) p <- ncol(x) q1 <- ncol(c) XtX <- t(x) %*% x #Time saving xy <- t(x) %*% y betaSamples <- matrix(0, max.steps, p) sigma2Samples <- matrix(0, max.steps,1) TauSamples <- matrix(0, max.steps, p) eta1sample = matrix(0,max.steps,1) eta2sample = matrix(0,max.steps,1) GammaSamples <- matrix(0,max.steps,q1) #ls = lm(y ~ 0 + x) #sigma2hat = var(ls$resid) #betahat = na.omit(ls$coef) lambda1 = 1 lambda2 = 1 beta <- rep(1, p) sigma2 <- 1 Tau <- rep(1.5, p) eta1=1 eta2 = 1 Gamma = rep(1,q1) # coeffecient of clinical factor c1 = 1e-1 d1 = 1e-1 c2 = 1e-1 d2 = 1e-1 k <- 0 while (k < max.steps) { k <- k + 1 # sample beta AA = XtX + eta2 * diag(Tau / (Tau - 1)) invA = solve(AA) mean_beta = invA %*% t(x)%*%(y-c%*%Gamma) varcov_beta = sigma2 * invA beta <- MASS::mvrnorm(1, mean_beta, varcov_beta) betaSamples[k,] <- beta # sample Gamma invsig1 = solve(diag(1,nrow=q1)) B = invsig1+t(c)%*%c/sigma2 varcov1 = solve(B) res1 = y-x%*%beta mean1 = varcov1%*%t(t(res1)%*%c/sigma2) Gamma = MASS::mvrnorm(1,mean1,varcov1) GammaSamples[k,] <- Gamma # sample tau nu.prime = sqrt(eta1 / (eta2*(beta^2))) lambda.prime = eta1 / sigma2 for (j in 1:p) { flag = 1 while (flag) { temp = VGAM::rinv.gaussian(n=1, mu=nu.prime[j], lambda=lambda.prime) flag = (temp <= 0) } Tau[j] = 1 + 1 / temp } TauSamples[k, ] <- Tau # sample sigma2 a.temp = n/2 + p b.temp = 1/2*sum( (y - x%*%beta-c%*%Gamma)^2 ) + 1/2*eta2*sum((Tau / (Tau - 1)) * beta^2) + 1/2*eta1*sum(Tau) flag.temp = 1 while(flag.temp) { z.temp = MCMCpack::rinvgamma(n=1, shape = a.temp, scale = b.temp) u.temp = stats::runif(1) if (log(u.temp) <= p*log(base::gamma(0.5))-p*log(gsl::gamma_inc(1/2, eta1/(2*z.temp)))) { sigma2 = z.temp flag.temp = 0 } } sigma2Samples[k] <- sigma2 #sample eta1 rejections = 0 temp.shape = p+c1 temp.rate = sum(Tau-1) + (2*sigma2)*d1 temp.eta1 = stats::rgamma(1, shape=temp.shape, rate=temp.rate) temp = p*log(stats::pgamma(eta1,shape=1/2,lower=F) / stats::pgamma(temp.eta1,shape=1/2,lower=F)) + p/2 * log(eta1/temp.eta1) + p*(eta1 - temp.eta1) temp = min(temp, 0) u = log(stats::runif(1)) if(u <= temp) {eta1 = temp.eta1} else {rejections = rejections + 1} eta1sample[k] = eta1*2*sigma2 #sample eta2 shape2 = p/2 + c2 rate2 = sum(Tau/(Tau-1)*beta^2)/(2*sigma2) + d2 eta2 = stats::rgamma(1, shape=shape2, rate=rate2) eta2sample[k] = eta2 } dat = list(GS.beta=betaSamples,sigma2=sigma2Samples,tau=TauSamples,GS.b=GammaSamples,eta1=eta1sample,eta2=eta2sample) return(dat) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/enet.R
enetss = function(y,x,c, max.steps) { n <- nrow(x) p <- ncol(x) q1 <- ncol(c) XtX <- t(x) %*% x #Time saving xy <- t(x) %*% y betaSamples <- matrix(0, max.steps, p) sigma2Samples <- matrix(0, max.steps,1) TauSamples <- matrix(0, max.steps, p) piSamples <- matrix(0, max.steps,1) SS <- matrix(0, max.steps, p) eta1sample = matrix(0,max.steps,1) eta2sample = matrix(0,max.steps,1) GammaSamples <- matrix(0,max.steps,q1) #ls = lm(y ~ 0 + x) #sigma2hat = var(ls$resid) betahat = rep(1, p) lambda1 = 1 lambda2 = 1 beta <- rep(1, p) sigma2 <- 1 Tau <- rep(1.5, p) r=1 u=1 pi <- 1/2 eta1=lambda1^2/(4*lambda2) eta2 = lambda2 Gamma = rep(1,q1) c1 = 1e-1 d1 = 1e-1 c2 = 1e-1 d2 = 1e-1 k <- 0 while (k < max.steps) { k <- k + 1 z <- rep(0,p) sg <- rep(0,p) # sample beta for (j in 1:p) { inv_Tau = (Tau[j]*eta2)/(Tau[j]-1) A <- t(x[,j])%*%x[,j] + inv_Tau inv_A <- 1/A res <- (y-x[,-j]%*%beta[-j]-c%*%Gamma) mean <- inv_A*t(x[,j])%*%res var <- sigma2 * inv_A L <- inv_A*t(res)%*%x[,j]%*%t(x[,j])%*%res #L <- sqrtm(inv_A)%*%t(x[,sub])%*%(y-x%*%Beta+x[,sub]%*%Beta[sub]) l0 <- pi+(1-pi)*(inv_Tau^(1/2))*sqrt(abs(inv_A))*exp((1/(2*sigma2))*L) l <- pi/l0 u<-stats::runif(1) if(u<=l) { beta[j] <- 0; sg[j]=0; z[j]=0 }else { beta[j] <- stats::rnorm(1, mean, sqrt(var)); sg[j]=1; z[j]=1} } betaSamples[k,] <- beta SS[k,] <- sg # sample Gamma invsig1 = solve(diag(1,nrow=q1)) B = invsig1+t(c)%*%c/sigma2 varcov1 = solve(B) res1 = y-x%*%beta mean1 = varcov1%*%t(t(res1)%*%c/sigma2) Gamma = MASS::mvrnorm(1,mean1,varcov1) GammaSamples[k,] <- Gamma # sample tau for (j in 1:p) { nu.prime = sqrt(eta1 / (eta2*(beta[j]^2))) lambda.prime = eta1 / sigma2 if(z[j]==0) { flag = 1 while (flag) { temp <-hbmem::rtgamma(1,shape= 1/2,scale = (2*sigma2)/eta1,a=1, b=Inf) # rtrunc(1,"gamma",shape= 1/2,scale = (8*lambda2*sigma2)/lambda1^2,a=1, b=Inf) #flag = (temp = Inf) flag = (temp <=1)|(temp == Inf) } Tau[j]=temp }else { flag = 1 while (flag) { temp = VGAM::rinv.gaussian(n=1, mu=nu.prime, lambda=lambda.prime) flag = (temp <= 0) } Tau[j] = 1 + 1 / temp } } TauSamples[k, ] <- Tau #sample pi shape1 <- r + p - sum(z) shape2 <- u + sum(z) pi <- stats::rbeta(1,shape1, shape2) piSamples[k] <- pi # sample sigma2 a.temp = n/2 +p/2+ sum(z)/2 b.temp = 1/2*sum( (y - x%*%beta-c%*%Gamma)^2 ) + 1/2*eta2*sum((Tau / (Tau - 1)) * (beta^2)*z) + 1/2*eta1*sum(Tau) flag.temp = 1 while(flag.temp) { z.temp = 1/stats::rgamma(1, shape = a.temp, rate = b.temp) u.temp = stats::runif(1) if (log(u.temp) <= p*log(base::gamma(0.5))-p*log(gsl::gamma_inc(1/2, eta1/(2*z.temp)))) { sigma2 = z.temp flag.temp = 0 } } sigma2Samples[k] <- sigma2 #sample eta1 rejections = 0 temp.shape = p+c1 temp.rate = sum(Tau-1) + (2*sigma2)*d1 temp.eta1 = stats::rgamma(1, shape=temp.shape, rate=temp.rate) temp = p*log(stats::pgamma(eta1,shape=1/2,lower=F) / stats::pgamma(temp.eta1,shape=1/2,lower=F)) + p/2 * log(eta1/temp.eta1) + p*(eta1 - temp.eta1) temp = min(temp, 0) u = log(stats::runif(1)) if(u <= temp) {eta1 = temp.eta1} else {rejections = rejections + 1} eta1sample[k] = eta1*2*sigma2 #sample eta2 shape2 = c2 + 1/2*sum(z) rate2 = sum(Tau/(Tau-1)*beta^2*z)/(2*sigma2) + d2 eta2 = stats::rgamma(1, shape=shape2, rate=rate2) eta2sample[k] = eta2 } t1 = betaSamples t2 = sigma2Samples t3 = TauSamples t4 = piSamples t5 = SS dat = list(GS.beta = betaSamples,sigma2=sigma2Samples,tau=TauSamples,GS.b=GammaSamples,pi=piSamples, GS.SS=SS,eta1=eta1sample, eta2=eta2sample) return(dat) }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/enetss.R
mpm <- function(x) { if (mean(x) >= 0.5) {1} else {0} }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/mpm.R
nonrobust <- function(X, Y, clin, max.steps, sparse, penalty,debugging=FALSE) { dat = DataMatrix(X, Y, clin, intercept=TRUE, debugging=FALSE) c=dat$c; g=dat$g; y=dat$y; beta_true=dat$coef n = dat$n; p= dat$p; q=ncol(c) G.names = dat$G.names clin.names = dat$clin.names hatb = rep(1,q); hatEta= rep(1,p); invSigb0 = diag(rep(1,q)) hatInvTauSq2 = rep(1,p) sg2 = rep(1,p); hatLambdaSqStar2=1; hatSigmaSq=1 aStar=1; bStar=1; alpha=1; gamma=1 progress = 0; hatPiEta=1/2; mu0=1; nu0=1 if(sparse){ fit=switch (penalty, "lasso" = BLSS(y,g,c,max.steps,hatEta,hatb,hatInvTauSq2,sg2,hatPiEta,invSigb0,hatLambdaSqStar2,hatSigmaSq, aStar, bStar, alpha, gamma,mu0,nu0, progress), "elastic net" = enetss(y,g,c, max.steps) ) }else{ fit=switch (penalty, "lasso" = BL(y,g,c,max.steps,hatEta,hatb,hatInvTauSq2,invSigb0, hatLambdaSqStar2,hatSigmaSq, aStar, bStar, alpha, gamma, progress), "elastic net" = enet(y,g,c, max.steps) ) } out = list( GS.alpha = fit$GS.b, GS.beta = fit$GS.beta) if(sparse){ class(out)=c("Sparse", "BVS") }else{ class(out)=c("NonSparse", "BVS") } out }
/scratch/gouwar.j/cran-all/cranData/Bayenet/R/nonrobust.R