content
stringlengths
0
14.9M
filename
stringlengths
44
136
getStandardSectionNames <- function(sort_b_1 = FALSE) { ic <- InputContext(FunctionParameterName('z_l')) v <- ic$retrieveStrategy()$strategy$section_name stopifnot(length(unique(v)) == length(v)) if (sort_b_1) sort(v) else v }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/getStandardSectionNames.R
getTypeLabel <- function(functionParameterName_o) { getAdj <- function(x_s, capitalize_b = FALSE) ifelse(grepl('^[aeiouy]', x_s, perl = TRUE), 'an', 'a') if (functionParameterName_o$isEllipsis()) return('additional arguments.') s <- functionParameterName_o$getTypeSuffix() tf <- wyz.code.offensiveProgramming::retrieveFactory() suffix <- NULL # nse data.table if (tf$checkSuffix(s)) { type <- tf$getType(s) kind <- if (tf$dt[suffix == s]$category %in% c(tf$type_classes$basic, tf$type_classes$numeric, tf$type_classes$math)) 'values' else 'objects' } else { type <- if (functionParameterName_o$isPolymorphic()) 'variable type' else 'unknown' kind <- 'objects' } lu <- functionParameterName_o$getLengthSuffix() ll <- functionParameterName_o$getLengthModifier() constraint <- if (is.na(lu)) 'unconstrained' else { if (!is.na(ll)) { paste0('length-', switch(ll, 'n' = paste('1 or', lu), 'l' = paste(lu, 'or less'), 'm' = paste(lu, 'or more')) ) } else { if (lu == 1L) 'single' else paste0('length-', lu) } } be <- beautify() single <- !is.na(lu) && is.na(ll) && lu == 1L paste0(getAdj(constraint, TRUE), ' ', constraint, ' ', if (type == 'list') be$italicCode(type) else paste0(ifelse(single, '', paste(be$italicCode('vector'), 'of ')), be$italicCode(type), ' ', ifelse(single, substr(kind, 1L, nchar(kind) - 1L), kind) ), ' representing the ', paste(tolower(gsub('([A-Z]+)', ' \\1', functionParameterName_o$getParameterName(), perl = TRUE))) ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/getTypeLabel.R
identifyReplacementVariables <- function(filename_s) { l <- lapply(filename_s, function(f) { if (!file.exists(f)) abort(paste(f, 'is not an existing file')) r <- readLines(f, warn = FALSE) unlist( Filter(function(e) length(e) > 0, regmatches(r, gregexpr('XXX_[\\d]{3}', r, perl = TRUE))) ) }) names(l) <- filename_s l }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/identifyReplacementVariables.R
interpretResults <- function(manualPageGenerationResults_l) { m <- ManualPageBuilder(InputContext(NULL)) m$interpretResults(manualPageGenerationResults_l) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/interpretResults.R
manageSingleStrings <- function(anything_) { if (typeof(anything_) == 'language') return(as.character(as.expression(anything_))) l <- length(anything_) if (typeof(anything_) == 'integer' && l != 1 && !is.na(anything_[1])) return(paste0(as.character(as.expression(anything_)), 'L')) if (l != 1) return(as.character(anything_)) if (is.function(anything_)) abort('functions not managed') if (is.character(anything_) || is.complex(anything_) || is.numeric(anything_)) { if (is.na(anything_)[1]) { if (is.character(anything_)[1]) return('NA_character_') if (is.numeric(anything_)[1]) { if (is.double(anything_)[1]) return('NA_real_') return('NA_integer_') } if (is.complex(anything_)[1]) return('NA_complex_') return('NA') } } if (!is.character(anything_)) { e <- as.character(as.expression(anything_)) if (is.integer(anything_)) return(paste0(e, 'L')) return(e) } paste0('"', gsub('"', '\\"', anything_, fixed = TRUE), '"') }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/manageSingleStrings.R
opRdocInformation <- function() { stratum <- buildIdentityList(c('core', paste0('layer_', 1:3))) phasing <- buildIdentityList(c('design', 'build', 'test', 'run', 'maintain', 'evolve', 'transversal')) intent <- buildIdentityList(c('parts_building', 'parts_assembly', 'quality_control', 'statistics', 'feedback', 'content_generation', 'utilities')) category <- buildIdentityList(c('function', 'class', 'data')) nature <- buildIdentityList(c('exported', 'internal')) buildList <- function(name_s_1, category_s_1, nature_s_1, stratum_s_1, phasing_s_1, intent_s_1) { list(name = name_s_1, category = category_s_1, nature = nature_s_1, stratum = stratum_s_1, phasing = phasing_s_1, intent = intent_s_1 ) } dt <- data.table::rbindlist(list( buildList("beautify", category$FUNCTION, nature$EXPORTED, stratum$LAYER_2, phasing$BUILD, intent$PARTS_BUILDING), buildList("Colorizer", category$CLASS, nature$INTERNAL, stratum$LAYER_1, phasing$TRANSVERSAL, intent$FEEDBACK), buildList("computeDocumentationStatistics", category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$STATISTICS), buildList("convertExamples", category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$PARTS_ASSEMBLY), buildList("extractClassificationInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractEnvObjectInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractObjectOPInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractR6ObjectInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractRCObjectInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractS3ObjectInformation", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("extractS4ObjectInformation",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("generateMarkup",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateEnc",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateEnumeration",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateLabel", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateParagraph",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateParagraphCR",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateParagraph2NL",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateReference", category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateS3MethodSignature", category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateSection",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateOptionLink",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateOptionSexpr",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("generateTable",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("GenerationContext", category$CLASS, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getFunctionSignature", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getObjectConstructor",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getObjectMethodSignature", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getObjectSignature", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getSectionContent",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("identifyReplacementVariables", category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$FEEDBACK), buildList("InputContext", category$CLASS, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$CONTENT_GENERATION), buildList("manageSingleStrings",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("generatePublicFieldParagraph",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("generatePublicMethodParagraph",category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("ManualPageBuilder", category$CLASS, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$PARTS_ASSEMBLY), buildList("opRdocInforamtion",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$FEEDBACK), buildList("ProcessingContext",category$CLASS, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$CONTENT_GENERATION), buildList("produceDocumentationFile", category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("producePackageLink",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$BUILD, intent$PARTS_BUILDING), buildList("rdocKeywords",category$FUNCTION, nature$EXPORTED, stratum$CORE, phasing$RUN, intent$FEEDBACK), buildList("sentensize",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("auditDocumentationFiles",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$QUALITY_CONTROL), buildList("completeManualPage",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$BUILD, intent$CONTENT_GENERATION), buildList("escapeContent",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("getStandardSectionNames",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("produceAllManualPagesFromObject",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$QUALITY_CONTROL), buildList("produceManualPage",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$QUALITY_CONTROL), buildList("interpretResults",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$QUALITY_CONTROL), buildList("verifyDocumentationFile",category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$RUN, intent$QUALITY_CONTROL), buildList( "dummy", category$DATA, nature$EXPORTED, stratum$LAYER_3, phasing$BUILD, intent$UTILITIES), buildList( "family", category$DATA, nature$EXPORTED, stratum$LAYER_3, phasing$BUILD, intent$UTILITIES) )) name <- NULL # nse dt[order(name)] }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/opRdocInformation.R
abort <- function(msg_s_1, ...) { stop(paste(msg_s_1, ...)) } capitalize <- function(text_s) { if (missing(text_s)) return(NA_character_) sapply(text_s, function(e) { if (is.na(e)) return(e) if (!is.character(e)) return(e) n <- nchar(e) if (n == 0) return(e) if (n == 1) return(toupper(e)) paste0(toupper(substr(e, 1, 1)), substring(e, 2)) }, USE.NAMES = !is.null(names(text_s))) } catn <- function(...) cat(..., '\n') ensureFilenameExtension <- function(filename_s_1m, extension_s_1m) { stopifnot(length(extension_s_1m) >= 1) is_valuedS <- function(x_s_1) !is.na(x_s_1) && !is.null(x_s_1) && nchar(x_s_1) > 0 is_valued <- Vectorize(is_valuedS) ext <- ifelse(substr(extension_s_1m, 1, 1) == '.', paste0('\\', extension_s_1m), paste0('\\.', extension_s_1m)) special_files <- filename_s_1m %in% c('.', '..') wf <- function(x_s_1) which(filename_s_1m == x_s_1) ifelse(special_files, filename_s_1m, { ex <- if (length(ext) == 1) ext[1] else ext[wf(filename_s_1m)] ifelse(is_valued(ex), suppressWarnings(paste0(sub(paste0(ex, '$'), '', filename_s_1m, perl = TRUE), substring(ex, 2))), filename_s_1m ) } ) } removeFilenameExtension <- function(filename_s_1m) { b <- stringr::str_count(filename_s_1m, '\\.') special_files <- grepl('^\\.+$', filename_s_1m, perl = TRUE) rfe <- function(fn_s_1) { s <- strsplit(fn_s_1, '.', fixed = TRUE)[[1]] paste0(s[-length(s)], collapse = '.') } vrfe <- Vectorize(rfe) ifelse(b != 0 & !special_files, vrfe(filename_s_1m), filename_s_1m ) } guardExecution <- function(yourExpression_ex, instrumentWarnings_b = TRUE) { if (instrumentWarnings_b) { tryCatch(yourExpression_ex, error = function(e) e, warning = function(w) w) } else { tryCatch(yourExpression_ex, error = function(e) e) } } normalizeSpaces <- function(text_s) { p <- '[\\s\\b]' gsub(paste0('^', p, '+', '|', p, '+', '$'), '', gsub(paste0(p, '+'), ' ', text_s, perl = TRUE), perl = TRUE) } strBracket <- function(text_s_n) { paste0('[', text_s_n, ']') } strBrace <- function(text_s_n) { paste0('{', text_s_n, '}') } strJoin <- function(text_s, join_s_n = ', ') { paste(text_s, sep = '', collapse = join_s_n) } strParenthesis <- function(text_s_n) { paste0('(', text_s_n, ')') } buildIdentityList <- function(entries_s) { d <- toupper(entries_s) names(d) <- gsub('[^A-Z0-9_]', '', d, perl = TRUE) as.list(d) } normalizeFilename <- function(filename_s) { n <- nchar(filename_s) p <- digest::digest(filename_s) s <- gsub('[^a-zA-Z0-9_\\.-]', '', filename_s, perl = TRUE) if (n == 0 || nchar(s) != n) return(paste0(s, '_', p)) s }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/outOfPackage.R
produceAllManualPagesFromObject <- function(object_o_1, processingContext_o_1 = ProcessingContext(), generationContext_o_1 = GenerationContext(), packageName_s_1 = NA_character_) { ic <- InputContext(object_o_1, packageName_s_1 = packageName_s_1) if (ic$kind != 2) abort('object_o_1 does not seem to be a class object') cl <- produceManualPage(ic, processingContext_o_1, generationContext_o_1) l <- lapply(getObjectFunctionNames(object_o_1), function(fn) { ic <- InputContext(object_o_1, fn, packageName_s_1 = packageName_s_1) rv <- produceManualPage(ic, processingContext_o_1, generationContext_o_1) if (generationContext_o_1$verbosity_b_1) cat('produced file', rv$context$filename , '\n') rv }) list(class = cl, methods = l) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/produceAllManualPagesFromObject.R
produceDocumentationFile <- function(filename_s_1, content_s, generationContext_o_1) { fn <- file.path(generationContext_o_1$targetFolder_s_1, ensureFilenameExtension(filename_s_1, '.Rd')) b <- generationContext_o_1$overwrite_b_1 || !file.exists(fn) overwritten <- FALSE if (b) { writeLines(content_s, con = fn) overwritten <- TRUE if (generationContext_o_1$verbosity_b_1) catn('wrote file', fn) } list(filename = fn, overwritten = overwritten) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/produceDocumentationFile.R
produceManualPage <- function(inputContext_o_1, processingContext_o_1 = ProcessingContext(), generationContext_o_1 = GenerationContext()) { if (generationContext_o_1$verbosity_b_1) cat('\n', paste(rep('-', 78), collapse = ''), '\n', sep = '') m <- ManualPageBuilder(inputContext_o_1, processingContext_o_1, generationContext_o_1) res <- m$buildManualPage() if (generationContext_o_1$verbosity_b_1) m$interpretResults(res) verifyDocumentationFile(res$context$filename) res }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/produceManualPage.R
rdocKeywords <- function(asList_b_1 = FALSE) { # Taken from "Writing R Extensions book" and "Parsing_Rd_files" kw <- list( # apparently a mix of what (object kind) and how (presentation format) markingTest = c('acronym', 'bold', 'cite', 'code', 'command', 'dfn', 'dQuote', 'email', 'emph', 'env', 'file', 'href', 'kbd', 'option', 'pkg', 'preformatted', 'samp', 'sQuote', 'special', 'strong', 'url', 'var', 'verb'), math = c('deqn', 'eqn'), insertions = c('dots', 'enc', 'ldots', 'R'), indices = c('concept', 'keyword'), sectionning = c('cr', 'section', 'tab', 'subsection'), listAndTables = c('describe', 'enumerate', 'item', 'itemize', 'tabular'), figures = c('figures'), crossReferences = c('link', 'linkS4class'), documentingFunctions = c('alias', 'docType', 'arguments', 'author', 'description', 'details', 'dontrun', 'dontshow', 'donttest', 'testonly', 'examples', 'method', 'name', 'note', 'references', 'seealso', 'S3method', 'S4method', 'title', 'usage', 'Rdversion', 'synopsis', 'encoding', 'value'), conditionalText = c('if', 'ifelse', 'out'), dynamiquePages = c('RdOpts', 'Sexpr'), userDefinedMacros = c('CRANpkg', 'doi', 'packageAuthor', 'packageDescription', 'packageDESCRIPTION', 'packageIndices', 'packageMaintainer', 'packageTitle', 'newcommand', 'renewcommand', 'sspace'), documentingDataSets = c('format', 'source') ) # some usage knowledge # code ==> e.g. \code{\link{help}} # section ==> to declare new section as Warning with \section{Warning}{.....} # itemize ==> does not seem to be necessary under value and arguments, but required elsewhere # item ==> to emphasize on items \item{comp1}{Description of 'comp1'} - might need space in between # concept ==> use instead of keyword if no keyword matches (see. RshowDow('KEYWORDS') to get a list of keywords) # keywords requires no tilde in R documentation files # href ==> e.g. \href{https://www.google.com}{google search} if (asList_b_1) return(kw) sort(unlist(kw, use.names = FALSE)) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/rdocKeywords.R
sentensize <- function(x_s, ..., punctuationCharacter_s_1 = '.') { s <- normalizeSpaces(paste(x_s, ...)) sapply(s, function(p) { n <- nchar(p) if (n == 0) return(p) last <- substr(p, n, n) if (last != punctuationCharacter_s_1) p <- paste0(p, punctuationCharacter_s_1) first <- substr(p, 1, 1) paste0(toupper(first), substring(p, 2)) }, USE.NAMES = FALSE) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/sentensize.R
shortcuts <- function(arguments_s = character(), doubleEscape_b_1 = TRUE) { valuesToNamedList <- function(values_l) { names(values_l) <- tolower(as.character(values_l)) values_l[order(names(values_l))] } doubleEscape <- function(..., function_f_1) { result <- do.call(function_f_1, list(...)) if (doubleEscape_b_1) return(gsub('\\', '\\\\', result, fixed = TRUE)) result } doubleEscapeItalic <- function(...) doubleEscape(..., function_f_1 = b$italicCode) doubleEscapeBold <- function(...) doubleEscape(..., function_f_1 = b$boldCode) b <- beautify() list( doc = lapply(valuesToNamedList(list('R', 'dots', 'ldots')), function(e) generateMarkup(keyword_s_1 = e)), constant = lapply(valuesToNamedList(list(NULL, TRUE, FALSE)), doubleEscapeItalic), types = lapply(valuesToNamedList(list('vector', 'list', 'function', 'boolean', 'logical', 'character', 'array', 'environment', 'S3', 'S4', 'R6', 'RC', 'numeric', 'table', 'raw', 'warning', 'error', 'simpleError', 'string', 'integer', 'double', 'complex', 'data.frame', 'matrix', 'data.table')), doubleEscapeItalic), arg = sapply(arguments_s, doubleEscapeBold, simplify = FALSE) ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/shortcuts.R
verifyDocumentationFile <- function(filename_s_1) { rv <- tryCatch(tools::checkRd(filename_s_1), error = function(e) e ) colorizer <- Colorizer() msg <- paste('File', filename_s_1) if (is(rv, 'error')) { msg <- paste('ERROR:', msg, 'is erroneous') cat(colorizer$error(msg), '\n') print(rv) } else { if (length(rv) > 0) { msg <- paste("WARNING:", msg) cat(colorizer$warning(msg), '\n') print(rv) } else { msg <- paste(msg, 'passes standard documentation checks') cat(colorizer$info(msg), '\n') } } }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/R/verifyDocumentationFile.R
dummy <- data.table::data.table(x = 1:9, y = LETTERS[1:9])
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/data/dummy.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- eval = TRUE, echo = FALSE----------------------------------------------- pkn <- 'wyz.code.rdoc'
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/documentation.R
--- title: "wyz.code.rdoc documentation" author: "Fabien GELINEAU" date: "Last update 2020 January" output: rmarkdown::html_vignette: css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc documentation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r, eval = TRUE, echo = FALSE} pkn <- 'wyz.code.rdoc' ``` Package **`r pkn`** aims to generate **R documentation** <cite class='comment'>manual pages</cite> directly from R code, being or not offensive programming instrumented. # Documents ## Offensive programming book Read [Offensive programming book](https://neonira.github.io/offensiveProgrammingBook/) to get introduction and advanced knowledge on offensive programming advantages, practice and ecosystem. Main chapter related to [documentation generation](https://neonira.github.io/offensiveProgrammingBook_v1.2.1/generating-r-documentation.html) might be of first interest. ## Vignettes [tutorial](tutorial.html) [use cases](use-cases.html) [tips and tricks](tips-and-tricks.html) [release notes](release-notes.html)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/documentation.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R')
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/release-notes.R
--- title: Release notes of package wyz.code.rdoc author: Fabien GELINEAU date: 2020-04-22 output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{ Release notes of package wyz.code.rdoc } %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R') ``` <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2021-10-05 20:13:20] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate::date() was used in file `r citefile("test_outOfPackage.R")`. 1. Test, Duration: `r citefigure('22.5s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('53.3s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-sigma] package-version [1.1.18] timestamp [2020-11-09 20:06:21] 1. enforced R 4.0 1. Test, Duration: `r citefigure('21.5s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('56.4s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # Release awesome-asterion-rho - 1.1.17 - 2020-05-04 1. Test, Duration: `r citefigure('20.6s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('56.4s')`, 0 errors βœ“ | 1 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # Release 1.1.16 - April 22nd 2020 1. added `r citefun('shortcuts')` function 1. added `r citefun('getTypeLabel')` function 1. added test for `r citefun('shortcuts')` function 1. modified `r citefun('InputContext')` function 1. modified `r citefun('beautify')` function 1. modified `r citefun('extractObjectOPInformation')` function 1. modified `r citefun('generatePublicFieldParagraph')` function 1. modified `r citefun('generatePublicMethodParagraph')` function 1. modified `r citefun('GenerateTable')` function 1. added `r citefile('shortcuts.Rd')` manual page 1. updated all documentation files 1. upgraded and updated vignette files 1. test, `r citefigure('20.6s')`, OK: `r citefigure('824')` 1. R CMD check, `r citefigure('54.6s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ # Release 1.1.8 - January 2020 Main improvements are 1. rebuild entirely manual page generation 1. added a way to generate a manual page from a standard R function 1. added manual page post processing to allow automated tuning of resulting page 1. added manual page statistics - see **computeDocumentStatistics** function manual page 1. enforced capitalization on new section name creation in manual page 1. completed and cleaned up R code - 33 exported functions - 17 internals 1. completed manual pages - 35 manual pages 1. enhanced manual pages documentation: review of all contents and corrections 1. completed vignettes - 5 vignettes 1. added data folder to allow for data manual page production and test 1. fixed issues on vignette 1. completed test panel - 45 test files - 811 tests 1. completed business use cases - 10 BUC 1. Worked on test coverage to reach level higher than 99% 1. **packageFunctionsInformation** verified and upgraded 1. Timing for tests 26s, checks 51s # Release 1.1.7 - October 2019 Main improvements are 1. corrected many typographic errors in i/o with end-user 2. completed unit tests 3. enforced higher code coverage (from 75% up to 99.28%) 4. changed many functions visibility from hidden (internal to package) to visible (end-user available). 5. documentation completion 6. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages if you do not use this version or a higher one!
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/release-notes.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ## ----context, eval = TRUE, echo = FALSE--------------------------------------- library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ## ----parameter_naming_pluralize, echo=TRUE, eval=TRUE------------------------- wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_3')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_3')) # right ## ----parameter_naming_singular, echo=TRUE, eval=TRUE-------------------------- wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_1')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_1')) # right ## ----parameter_naming, echo=TRUE, eval=TRUE----------------------------------- wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b')) # right ## ----echo=TRUE, eval=TRUE----------------------------------------------------- pc <- ProcessingContext( extraneous_l = list( details = 'It is worth to know bla bla bla', concept = paste0('concept-', 1:3) ) ) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- pc <- ProcessingContext( postProcessing_l = list( title = function(content_s) { paste(content_s, sentensize('some complimentary content')) } ) ) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) ## ----content_escape, echo=TRUE, eval=TRUE------------------------------------- content <- 'function(x) { x + 1 }' # To be use in a code section content generateMarkup(content) # To be used in a text section content paste('Some R code:', generateMarkup(content, escapeBraces_b_1 = TRUE)) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- # The function to test divide <- function(x_n, y_n) x_n / y_n # The examples to consider examples <- list( function() { divide(1:3, 1:3 + 13L) }, function() { divide(0L, c(Inf, -Inf)) }, function() { divide(c(Inf, -Inf), 0L) }, function() { divide(0L, 0L) } ) # your documentation complementary parts to consider # and manual page generation context setup ic <- InputContext(NULL, 'divide') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = TRUE) ) ) gc <- GenerationContext(tempdir(), overwrite = TRUE) # The generation of the manual page rv <- produceManualPage(ic, pc, gc) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- readLines(rv$context$filename) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- generateEnumeration(paste('case', 1:4)) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- generateEnumeration(paste('case', 1:4), TRUE) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- dt <- data.table::data.table(x = paste0('XY_', 1:3), y = letters[1:3]) # as-is generateTable(dt) # with row numbering generateTable(dt, numberRows_b_1 = TRUE) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- b <- beautify() names(b) ## ----echo=TRUE, eval=TRUE----------------------------------------------------- b$bold('lorem ipsum') ## ----echo=TRUE, eval=TRUE----------------------------------------------------- b$file('/tmp/result.txt') ## ----echo=TRUE, eval=TRUE----------------------------------------------------- b$acronym('CRAN') ## ----echo=TRUE, eval=TRUE----------------------------------------------------- co <- '{ x %% y }' b$code(co) # very probably wrong e <- beautify(TRUE) e$code(co) # much more probably right ## ----echo=TRUE, eval=TRUE----------------------------------------------------- # link to another package b$code(producePackageLink('ggplot2', 'aes_string')) # link to same package b$codeLink('generateTable') # link to same package with enhanced presentation b$enhanceCodeLink('generateTable')
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/tips-and-tricks.R
--- title: "wyz.code.rdoc Tips and Tricks" author: "Fabien GELINEAU" date: "Last update 2020 January" output: html_document: number_sections: true toc: true css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc Tips and Tricks} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` ```{r context, eval = TRUE, echo = FALSE} library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ``` This use-case vignette is dedicated to some common manual page use case generation using package `r rdoc`. It may make sense for newcommers to read the tutorial vignette prior to read this vignette. # Tips and tricks about R code {.tabset .tabset-fade .tabset-pills} ## Parameter naming Unless you specify a length of `r citefigure('1')` or `r citefigure('1l')`, pluralize the parameter name. For example, avoid `r citeop('countryFlag_b_3')`, prefer `r citeop('countryFlags_b_3')`. Reason is quite simple, with the first one, you will have to correct produce documentation text as it is less likely to be correct in singular form than pluralized. When using no length specification, pluralizing that parameter name is the best practice. Understanding following examples, worth the time. ```{r parameter_naming_pluralize, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_3')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_3')) # right ``` ```{r parameter_naming_singular, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_1')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_1')) # right ``` ```{r parameter_naming, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b')) # right ``` # Tips and tricks about sections {.tabset .tabset-fade .tabset-pills} ## add a section Some sections have to be unique `r cmt('title, description, examples, ...')` some others not `r cmt('keyword, concept, alias, ...')`. You must respect the implicit contract of the standard `r R` documentation. See [writing `r R` extensions](https://cran.r-project.org/doc/manuals/r-release/R-exts.html) for more information. To add a section, just set its content to the one you desired. Content may contain format directives or not. See below, paragraph 'about-format'. My only advice is to keep content as simple and sharp as possible. Using non ambiguous terms and clear sentences helps a lot. For example, to add a `r citefun('details')` section and three `r citefun('concept')` sections, you could do something similar to ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( extraneous_l = list( details = 'It is worth to know bla bla bla', concept = paste0('concept-', 1:3) ) ) ``` ## complete a section Activate post processing for sections you want to complete. For example, to complete the `r citefun('title')` section, you could do ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( postProcessing_l = list( title = function(content_s) { paste(content_s, sentensize('some complimentary content')) } ) ) ``` ## remove a section Just set its content to `r citecode('NULL')`. ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) ``` # Tips and tricks about content ## content escaping Content escaping is sometimes fully necessary, sometimes partially necessary and sometimes unneeded. Quite difficult to have a systematic approach, as content varies at lot according to section intent, section nature `r cmt('code, text, equation, ...')` and also according to surrounding context. To ease handling of content escape, `r rdoc` offers several functions: a high-level function `r citefun("generateMarkup")` and a low-level function `r citefun("escapeContent")`. By default content used is only partially escaped. Characters, `r citechar("'@'")` and `r citechar("'%'")` are systematically escaped, but not characters `r citechar("'{'")` and `r citechar("'}'")`. To escape those last, you must set argument `r citearg("escapeBraces_b_1")` to `r citecode("TRUE")` while using one or the other of those functions. ```{r content_escape, echo=TRUE, eval=TRUE} content <- 'function(x) { x + 1 }' # To be use in a code section content generateMarkup(content) # To be used in a text section content paste('Some R code:', generateMarkup(content, escapeBraces_b_1 = TRUE)) ``` ## Content generation for `r R` documentation section {.tabset .tabset-fade .tabset-pills} As a end-user you should rely on [use cases](use-cases.html). As a programmer, you may need to create your own generation scheme to fulfil some special requirements. Following function could be useful to do so. function name | intent :--------------------|:--------------------------------------------------------- `r citefun('generateSection')` | generate a `r R` documentation section `r citefun('generateParagraph')` | generate a paragraph collating all your inputs with a single new line by default. `r citefun('generateParagraphCR')` | generate a paragraph collating all your inputs with '\\cr' `r citefun('generateParagraph2NL')` | generate a paragraph collating all your inputs with two new lines. ## Content generation for `r citefun('examples')` section {.tabset .tabset-fade .tabset-pills} Examples are a really important part of the documentation. It is also a quite tricky part when handcrafting documentation. This is due an inherent complexity related to contextual processing that has to take into consideration, testing time, necessary testing resources, test execution path, and so on. In order to increase productivity and simplify the `r citefun('examples')` section, `r rdoc` provides a dedicated function that turns pure `r R` code into content. Here is the pattern to follow. 1. create a variable that holds a list of functions taking no arguments. The body of each function must be legal `r R` code, embodying the example 1. use function `r citefun('convertExamples')` to convert examples. You have the opportunity to pass along some keywords in order to manage test that should not be ran, should not be tested, should not be shown. You also have the opportunity to capture the example output and to introduce it automatically into the content. Let's see a sample session to do so ### code ```{r echo=TRUE, eval=TRUE} # The function to test divide <- function(x_n, y_n) x_n / y_n # The examples to consider examples <- list( function() { divide(1:3, 1:3 + 13L) }, function() { divide(0L, c(Inf, -Inf)) }, function() { divide(c(Inf, -Inf), 0L) }, function() { divide(0L, 0L) } ) # your documentation complementary parts to consider # and manual page generation context setup ic <- InputContext(NULL, 'divide') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = TRUE) ) ) gc <- GenerationContext(tempdir(), overwrite = TRUE) # The generation of the manual page rv <- produceManualPage(ic, pc, gc) ``` ### content generated ```{r echo=TRUE, eval=TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated content](images/divide.png) # Tips and tricks about format {.tabset .tabset-fade .tabset-pills} ## enumeration format Function `r citefun('generateEnumeration')` eases enumeration management. ```{r echo=TRUE, eval=TRUE} generateEnumeration(paste('case', 1:4)) ``` ## list format Function `r citefun('generateEnumeration')` also eases item list management. ```{r echo=TRUE, eval=TRUE} generateEnumeration(paste('case', 1:4), TRUE) ``` ## table format To format a table, use function `r citefun('generateTable')`. ```{r echo=TRUE, eval=TRUE} dt <- data.table::data.table(x = paste0('XY_', 1:3), y = letters[1:3]) # as-is generateTable(dt) # with row numbering generateTable(dt, numberRows_b_1 = TRUE) ``` ## special formats Specification of `r R` documentation is quite complex. There are many variants possible and many ways to achieve a result. Following functions try to provide one convenient solution for some common needs. function name | intent :--------------------|:--------------------------------------------------------- `r citefun('generateOptionLink')` | When you need to generate a cross package documentation link use this function. If you need an intra package documentation link use function `r citefun('beautify()$link')`. You could also use `r citefun('producePackageLink')` to generate a cross package documentation link, but you won't be able to customize the labels. `r citefun('generateOptionSexpr')` | When you need to generate a `r citecode('Sexpr')`, use `r citefun('generateMarkup')` when you don't need options, otherwise use funciton `r citefun('generateOptionSexpr')`. `r citefun('generateEnc')` | generate a locale text encoding and ASCII equivalence. Not to be confused with `r citefun('generateEncoding')` that set encoding for the full manual page. `r citefun('generateReference')` | generate the text for a documentary or web reference. Refer to dedicated manual pages for more information. # Tips and tricks about presentation {.tabset .tabset-fade .tabset-pills} Many typographic enhancements are available. They are all grouped behind a facade name `r citefun('beautify')`. ```{r echo=TRUE, eval=TRUE} b <- beautify() names(b) ``` ```{r echo=TRUE, eval=TRUE} b$bold('lorem ipsum') ``` ```{r echo=TRUE, eval=TRUE} b$file('/tmp/result.txt') ``` ```{r echo=TRUE, eval=TRUE} b$acronym('CRAN') ``` ```{r echo=TRUE, eval=TRUE} co <- '{ x %% y }' b$code(co) # very probably wrong e <- beautify(TRUE) e$code(co) # much more probably right ``` and the very convenient ```{r echo=TRUE, eval=TRUE} # link to another package b$code(producePackageLink('ggplot2', 'aes_string')) # link to same package b$codeLink('generateTable') # link to same package with enhanced presentation b$enhanceCodeLink('generateTable') ```
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/tips-and-tricks.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ## ----eval = TRUE, echo = TRUE------------------------------------------------- dt <- wyz.code.rdoc::opRdocInformation() ## ----eval = TRUE, echo = TRUE------------------------------------------------- sort(dt[stratum == 'CORE' & nature == 'EXPORTED']$name) ## ----eval = TRUE, echo = TRUE------------------------------------------------- sort(dt[stratum == 'LAYER_1' & nature == 'EXPORTED']$name) ## ----eval = TRUE, echo = TRUE------------------------------------------------- sort(dt[stratum == 'LAYER_2' & nature == 'EXPORTED']$name) ## ----eval = TRUE, echo = TRUE------------------------------------------------- sort(dt[stratum == 'LAYER_3' & nature == 'EXPORTED']$name)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/tutorial.R
--- title: "wyz.code.rdoc Tutorial" author: "Fabien GELINEAU" date: "Last update 2019.08.27" output: rmarkdown::html_vignette: number_sections: true toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc Tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` Package `r rdoc` aims to ease manual page creation in a very flexible way. It aims to free you from learning `r R` documentation specific language and its arcanes, while providing good and reliable results in a quick and reproducible way. # Understanding manual page generation {.tabset .tabset-fade .tabset-pills} Manual pages are associated either to describe processing functions or to describe data. Both are important, and each comes with its own specification set. From a pratical point of view, it exists several ways to produce manual pages. Indeed two are currently the most commonly used. First one is based on handcrafted manual pages. Second one is based on generated manual pages. ## Handcrafted manual page Standard `r R` documentation tools to generate manual pages belongs to the first approach. The global manual page generation process looks like following one. <img src="images/diagrams-1-trn.png" alt="handcrafted manual page"></img> In this is a two steps process, you first generate a manual page template `r cmt('once and only once')`, and then fill-in the blanks with the desired content. In practice, you end up repeating a variable number of times the fill-in phase for each manual page. Moreover, it requires you to get acknowledge about `r R` documentation arcanes, and this is quite complex due to syntax issues, character escaping and some other not so simple to fulfil needs. ## `r roxy` generated manual page Package `r roxy` meets the second approach. Its manual page generation process looks like following one <img src="images/diagrams-2-trn.png" alt="handcrafted manual page"></img> Theoritically, this is a two steps process, where you first fill-in the code comments according to `r roxy` specification, and then generate on-demand the related manual pages. This is a much more industrial approach. In practice, compliance with code comments specification is not so easy and still requires deep understanding of `r R` documentation scheme. Manual page generation although robust and fast, may sometimes be cumbersome. ## `r rdoc` generated manual page Package `r rdoc` meets the already presentend second approach. Its manual page generation process looks like following one. <img src="images/diagrams-3-trn.png" alt="handcrafted manual page"></img> Theoritically, this is a three steps process 1. create your manual page customization 1. generate the related manual page 1. edit the resulting manual page In practice, this is often a two steps process, as editing resulting manual page is an optional step, only required when the cost of the modification is higher to be achieved by code than by hand. ## Comparison of the three approaches approach | pros | cons :----------------:|:------------------------------|:------------------------------ handcrafted manual page | `r brkfun(c('easy to understand', 'straightforward process'))` | `r brkfun(c('hyper repetitive task', 'difficult to master', 'time consuming activity', 'great variability of the result from person to person'))` `r roxy` generated manual page | `r brkfun(c('fast', 'robust', 'high quality of result', 'Hadley powered'))` | `r brkfun(c('code commenting', 'sometimes tricky', 'repetitive task', 'time consuming activity'))` `r rdoc` generated manual page | `r brkfun(c('pure code, only code', 'highly customizable result', 'time saving activity', 'highly reproducible results', 'high reuse of customization as code'))` | `r brkfun(c('requires some experimentation to feel at ease with', 'still repetitive task, although less'))` # Package `r rdoc` approach ## Why another manual page generation tool? Mainly for following reasons. First, documentation production is an activity consuming too much time. We should reduce the amount of time spent on documentation generation while garanteing a high level of quality of produced documentation. Second, documentation is mandatory. So we need very powerful tools to alleviate the burden and to reduce variability of documentation quality. Third, I do not believe that standard `r R` or `r roxy` ways are the right ones. They are for sure helpful but to my opinion clearly not enough. I wish I could write documentation from code instead of writing documentation. I should be doing so using a high level interface, not requiring me to know much about final `r R` documentation format. Thus will allow me to produce better documentation as I will only have to focus on the content and the style, not on the format of the documentation. Fourth, current level of industrialization of documentation generation provided by the two presented approaches is insufficient too me. I wish to be able to reuse one already generated part from one manual page into another one. This is possible and quite easy to achieve if I use code, difficult otherwise. I also wish to be possible to produce a complete manual page, whatever its format and content, in a fully reproducible and replayable way. ## What can actually be generated? Manual page can be generated 1. from a single `r R` function 1. from a `r R` object instanciated from a `r R` class 1. for a package 1. from a data set Currently, version 1.1.8 of `r rdoc` allows to generate manual pages for each of these cases. See [use cases](use-cases.html) to know more. ## Code organization Package `r rdoc` provides low, medium and high level tools <cite class='comment'>functions</cite> to generate manual pages. You can discover them using following `r R` sequence. ```{r eval = TRUE, echo = TRUE} dt <- wyz.code.rdoc::opRdocInformation() ``` Core level tools deals mainly with deep package internals. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'CORE' & nature == 'EXPORTED']$name) ``` Low level tools deal with `r R` documentation format. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_1' & nature == 'EXPORTED']$name) ``` Medium level tools deal essentially with presentation and beautifying. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_2' & nature == 'EXPORTED']$name) ``` High level tools deal with end-user facilities to ease manual page generation, manage end-user customizations, and increase productivity. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_3' & nature == 'EXPORTED']$name) ```
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/tutorial.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ## ----context, eval = TRUE, echo = FALSE--------------------------------------- library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ## ----function_mpg, eval = TRUE, echo = TRUE----------------------------------- b <- beautify() examples <- list( function() { append(1:5, 0:1, after = 3) } ) ic <- InputContext(NULL, 'append') pc <- ProcessingContext( extraneous_l = list( title = 'Vector Merging', description = sentensize('add elements to a vector'), details = paste('If the parameter', b$code('after'), 'is higher than' , b$code('x'), 'length,\n then insertion is done at the', 'end of the data structure'), references = paste('Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)', 'The New S Language. Wadsworth & Brooks/Cole.'), examples = convertExamples(examples, captureOutput_b_1n = FALSE) ), postProcessing_l = list( arguments = function(content_s) { s <- sub('XXX_001', sentensize('the vector the values are to be appended to'), content_s, fixed = TRUE) s <- sub('XXX_002', sentensize('to be included in the modified vector'), s, fixed = TRUE) s <- sub('XXX_003', 'a subscript, after which the values are to be appended', s, fixed = TRUE) s } ) ) td <- tempdir() gc <- GenerationContext(td, overwrite = TRUE) rv <- produceManualPage(ic, pc, gc) ## ----function_mpg_rv, eval = TRUE, echo = TRUE-------------------------------- readLines(rv$context$filename) ## ----op, eval = TRUE, echo = TRUE--------------------------------------------- iterateOverSet <- function(set_s, enforceUniqueness_b_1 = TRUE) { NULL } ## ----op_mpg, eval = TRUE, echo = TRUE----------------------------------------- examples <- list( function() { iterateOverSet(sample(LETTERS, 35, TRUE), FALSE) }, function() { iterateOverSet(as.character(1:9)) } ) ic <- InputContext(NULL, 'iterateOverSet') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = FALSE) ) ) rv_op <- produceManualPage(ic, pc, gc) ## ----op_mpg_rv, eval = TRUE, echo = TRUE-------------------------------------- readLines(rv_op$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- source(findFilesInPackage('classes', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(MeltingPot_Env()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(Zorg()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(Bu_S3()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- source(findFilesInPackage('Addition_TCFI_Partial_S3.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_S3()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(new('Person_S4', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- source(findFilesInPackage('Addition_TCFI_Partial_S4.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_S4')) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(new('Person_RC', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- source(findFilesInPackage('Addition_TCFI_Partial_RC.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_RC')) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(Accumulator_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- source(findFilesInPackage('Addition_TCFI_Partial_R6.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(NULL, package = 'wyz.code.rdoc') # using an pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename) ## ----eval = TRUE, echo = TRUE------------------------------------------------- ic <- InputContext(dummy, dataFilename = 'dummy.csv') pc <- ProcessingContext( extraneous_l = list( description = 'a dummy datafile for demonstration purpose', format = 'a data.frame 9x2', source = 'fake data - used only for demo' ), postProcessing_l = list( classification = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ## ----eval = TRUE, echo = TRUE------------------------------------------------- readLines(rv$context$filename)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/use-cases.R
--- title: "wyz.code.rdoc use cases" author: "Fabien GELINEAU" date: "Last update 2020 - Q1" output: rmarkdown::html_vignette: number_sections: true toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc use cases} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` ```{r context, eval = TRUE, echo = FALSE} library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ``` This use-case vignette is dedicated to some common manual page use case generation using package `r rdoc`. It may make sense for newcommers to read the tutorial vignette prior to read this vignette. # Manual page generation from a function ## Standard `r R` function {.tabset .tabset-fade .tabset-pills} I will consider function `r citecode('append')` from base package as example. Any other `r R` function may work as well. ### code ```{r function_mpg, eval = TRUE, echo = TRUE} b <- beautify() examples <- list( function() { append(1:5, 0:1, after = 3) } ) ic <- InputContext(NULL, 'append') pc <- ProcessingContext( extraneous_l = list( title = 'Vector Merging', description = sentensize('add elements to a vector'), details = paste('If the parameter', b$code('after'), 'is higher than' , b$code('x'), 'length,\n then insertion is done at the', 'end of the data structure'), references = paste('Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)', 'The New S Language. Wadsworth & Brooks/Cole.'), examples = convertExamples(examples, captureOutput_b_1n = FALSE) ), postProcessing_l = list( arguments = function(content_s) { s <- sub('XXX_001', sentensize('the vector the values are to be appended to'), content_s, fixed = TRUE) s <- sub('XXX_002', sentensize('to be included in the modified vector'), s, fixed = TRUE) s <- sub('XXX_003', 'a subscript, after which the values are to be appended', s, fixed = TRUE) s } ) ) td <- tempdir() gc <- GenerationContext(td, overwrite = TRUE) rv <- produceManualPage(ic, pc, gc) ``` **NOTA BENE**: Embedded new line characters in previous text are present for vignette format purpose only here, in particular to avoid horizontal scrollbars and to ease reading. Indeed, sometimes you will have to do so, to avoid producing too long lines in `r R` documentations files, as this will generate a warning when running `r citefun('R CMD check')`. If you plan to publish your package on `r citefun('CRAN')`, you will have to resolve such warning cases. ### generated `r citecode('.Rd')` file ```{r function_mpg_rv, eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/append.png) ### original content ![base R append documentation](images/append-orig.png) ## Semantic naming function {.tabset .tabset-fade .tabset-pills} Let's consider following function, respectful of semantic naming as defined by offensive programming. Refer to [Offensive programming book](https://neonira.github.io/offensiveProgrammingBook/) to get introduction and advanced knowledge on offensive programming advantages, practice and ecosystem. ```{r op, eval = TRUE, echo = TRUE} iterateOverSet <- function(set_s, enforceUniqueness_b_1 = TRUE) { NULL } ``` The body is set to `r citecode('NULL')` for demonstration purpose and to prove that it does not interfere with documentation generation. ### code ```{r op_mpg, eval = TRUE, echo = TRUE} examples <- list( function() { iterateOverSet(sample(LETTERS, 35, TRUE), FALSE) }, function() { iterateOverSet(as.character(1:9)) } ) ic <- InputContext(NULL, 'iterateOverSet') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = FALSE) ) ) rv_op <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r op_mpg_rv, eval = TRUE, echo = TRUE} readLines(rv_op$context$filename) ``` ### generated content as picture ![generated append documentation](images/iterateOverSet.png) ### Main differences Main differences with standard `r R` function generation are, using semantic naming scheme, you do not have to 1. provide content for arguments. It is automatically deduced from the semantic name you use 1. provide a title. It is deduced from the function name. 1. provide a description. It is also deduced from the function name. You can still change or adapt provided content by a post processing whenever and wherever needed to meet sharper documentation level. # Manual page generation from an environment object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('MeltingPot_Env')` to produce an object based on an environment containing several functions. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('classes', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(MeltingPot_Env()) rv <- produceManualPage(ic, generationContext = gc) ``` **NOTA BENE**: I use default processing context here. This means, only defaultly valuable documentation sections will be filled-in. ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-env.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Zorg')` to produce an object based on an environment containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Zorg()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-env.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an S3 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Bu_S3')` to produce an object based on S3 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Bu_S3()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-s3.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_S3')` to produce an object based on a S3 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_S3.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_S3()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-s3.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an S4 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Person_S4')` to produce an object based on S4 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(new('Person_S4', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-s4.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_S4')` to produce an object based on a S4 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_S4.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_S4')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-s4.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an RC object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Person_RC')` to produce an object based on RC class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(new('Person_RC', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-rc.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_RC')` to produce an object based on an RC class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_RC.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_RC')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-rc.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an R6 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Accumulator_R6')` to produce an object based on R6 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Accumulator_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-r6.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_R6')` to produce an object based on an R6 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_R6.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-r6.png) # Manual page generation for a package ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider package named `r citecode('wyz.code.rdoc')` as example to generate a package manual page. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(NULL, package = 'wyz.code.rdoc') # using an pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/pkg.png) # Manual page generation for data ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider data named `r citecode('dummy')` to generate documentation from. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(dummy, dataFilename = 'dummy.csv') pc <- ProcessingContext( extraneous_l = list( description = 'a dummy datafile for demonstration purpose', format = 'a data.frame 9x2', source = 'fake data - used only for demo' ), postProcessing_l = list( classification = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/data.png)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/doc/use-cases.Rmd
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' pc <- ProcessingContext( extraneous_l = list( keyword = 'documentation', concept = 'documentation generation' ) ) options('rdhoc_hack' = TRUE) # 1. ProcessingContext r1 <- produceAllManualPagesFromObject(pc, pc, gc, target_package_name) # not an error pc, pc, gc - desired # 2. ManualPageBuilder m <- ManualPageBuilder(InputContext(NULL)) r2 <- produceAllManualPagesFromObject(m, pc, gc, target_package_name)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-class-full-generation.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' # beautifier <- beautify() # # pc <- ProcessingContext( # extraneous_l = list( # seealso = sentensize(paste('class', beautifier$codelink('ProcessingContext'), # 'and class', beautifier$codelink('InputContext'))), # keyword = 'documentation', # concept = 'documentation generation' # ) # ) source(findFilesInPackage('AdditionTCFIP.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(AdditionTCFIP(), packageName_s_1 = target_package_name) r <- produceManualPage(ic, gene = gc) interpretResults(r) source(findFilesInPackage('Addition_TCFI_Partial_R6.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_R6$new(), packageName_s_1 = target_package_name) r <- produceManualPage(ic, gene = gc) interpretResults(r) source(findFilesInPackage('Addition_TCFI_Partial_S3.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_S3(), packageName_s_1 = target_package_name) r <- produceManualPage(ic, gene = gc) interpretResults(r) source(findFilesInPackage('Addition_TCFI_Partial_S4.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_S4'), packageName_s_1 = target_package_name) r <- produceManualPage(ic, gene = gc) interpretResults(r) source(findFilesInPackage('Addition_TCFI_Partial_RC.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_RC'), packageName_s_1 = target_package_name) r <- produceManualPage(ic, gene = gc) interpretResults(r)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-class-op.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' beautifier <- beautify() pc <- ProcessingContext( extraneous_l = list( seealso = sentensize(paste('class', beautifier$codelink('ProcessingContext'), 'and class', beautifier$codelink('InputContext'))), keyword = 'documentation', concept = 'documentation generation' ) ) # 1. ProcessingContext ic <- InputContext(pc, packageName_s_1 = 'wyz.code.rdoc') r1 <- produceManualPage(ic, pc, gc) # 2. ManualPageBuilder m <- ManualPageBuilder(InputContext(NULL)) pc <- ProcessingContext( extraneous_l = list( seealso = sentensize(paste('class', beautifier$codelink('InputContext'), 'class', beautifier$codelink('ProcessingContext'), 'and class', beautifier$codelink('GenerationContext'))), keyword = 'documentation', concept = 'documentation generation' ) ) ic <- InputContext(m, packageName_s_1 = 'wyz.code.rdoc') r2 <- produceManualPage(ic, pc, gc) interpretResults(r2)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-class.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-samples', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) pc <- ProcessingContext( extraneous_l = list( description = "Data set to be used as example for demo purpose.", source = 'Data set generated by NEONIRA', concept = 'documentation generation' ) ) target_package_name <- 'wyz.code.rdoc' # 1. file dummy.R ic <- InputContext(dummy, packageName_s_1 = target_package_name, dataFilename_s_1 = 'dummy.R') r1 <- produceManualPage(ic, pc, gc) # 2. file family.csv ic <- InputContext(family, packageName_s_1 = target_package_name, dataFilename_s_1 = 'family.csv') r2 <- produceManualPage(ic, pc, gc)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-data.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' beautifier <- beautify() pc <- ProcessingContext( extraneous_l = list( description = 'A fake description', keyword = 'documentation', concept = 'documentation generation' ) ) # 1. append ic <- InputContext(NULL, 'append', packageName_s_1 = 'wyz.code.rdoc') r1 <- produceManualPage(ic, pc, gc) # 2. sum ic <- InputContext(NULL, 'sum', packageName_s_1 = 'wyz.code.rdoc') r2 <- produceManualPage(ic, pc, gc) # 3. opfun opfun <- function(x_l, x_s, x_b_1) { NA } ic <- InputContext(NULL, 'opfun', packageName_s_1 = 'wyz.code.rdoc') r3 <- produceManualPage(ic, pc, gc)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-function.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' beautifier <- beautify() pc <- ProcessingContext( extraneous_l = list( seealso = sentensize(paste('class', beautifier$codelink('ProcessingContext'), 'and class', beautifier$codelink('InputContext'))), keyword = 'documentation', concept = 'documentation generation' ) ) # 1. ProcessingContext ic <- InputContext(pc, 'verifyPostProcessing', packageName_s_1 = 'wyz.code.rdoc') r1 <- produceManualPage(ic, pc, gc) # 2. ManualPageBuilder # ic <- InputContext(m, 'interpretResults', packageName_s_1 = 'wyz.code.rdoc') # r2 <- produceManualPage(ic, pc, gc) WeirdNames <- function() { self <- environment() class(self) <- append('WeirdNames', class(self)) `%*%` <- function(x_, y_) { Inf } # weird function name f <- function(x_d) x_d self } # 3. Weirdnames r3 <- produceAllManualPagesFromObject(WeirdNames(), pc, gc, packageName_s_1 = 'wyz.code.rdoc')
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-method.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") gc <- GenerationContext('inst/man-generated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) target_package_name <- 'wyz.code.rdoc' pc <- ProcessingContext( extraneous_l = list( keyword = 'documentation', concept = 'documentation generation' ) ) # 1. ProcessingContext ic <- InputContext(NULL, packageName_s_1 = 'wyz.code.rdoc') r1 <- produceManualPage(ic, pc, gc)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-bmp-package.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") source_package_name <- target_package_name <- 'wyz.code.rdoc' pc <- ProcessingContext(postProcessing_l = list( title = function(content_s) { 'a patched title'}, concept = function(content_s) { 'zorg'}, details = function(content_s) { 'details' } ) ) file.copy('inst/man-generated/append.Rd', 'inst/man-generated/append-1.Rd') rv_add <- wyz.code.rdoc:::completeManualPageBis('inst/man-generated/append-1.Rd', pc) file.copy('inst/man-generated/append.Rd', 'inst/man-generated/append-2.Rd') rv_patch <- wyz.code.rdoc:::completeManualPageBis('inst/man-generated/append-2.Rd', pc, FALSE)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-complete-manual-page.R
traceenv <- function(localisator_s_1, n_ui_1, environement_e_1) { cat(n_ui_1, '. localisator=', localisator_s_1, ' name=', environmentName(environement_e_1), '\n', sep = '') } f <- function() { n <- 1 p <- e <- environment() traceenv('function env', n, e) repeat { n <- n + 1 p <- parent.env(p) traceenv('parent env', n, p) if (environmentName(p) == 'R_EmptyEnv') break } } cat('search length', length(search()), '\n') print(search()) f()
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-env.R
library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc") source_package_name <- target_package_name <- 'wyz.code.rdoc' gc <- GenerationContext(targetFolder_s_1 = 'inst/man-regenerated', overwrite_b_1 = TRUE, verbosity_b_1 = FALSE, useMarkers_b_1 = FALSE) beautifier <- beautify() ex_class <- ex_fun <- list( seealso = sentensize(paste('class', beautifier$codelink('InputContext'), 'class', beautifier$codelink('ProcessingContext'), 'class', beautifier$codelink('GenerationContext'), 'and class', beautifier$codelink('ManualPageBuilder'))), keyword = 'documentation', concept = 'documentation generation' ) ex_fun$seealso <- NULL options('rdhoc_hack' = TRUE) dt <- wyz.code.rdoc::packageFunctionsInformation() fn <- dt[nature == 'EXPORTED' & category != "DATA"]$name color <- Colorizer() objs <- list( InputContext = InputContext(color, packageName_s_1 = target_package_name), GenerationContext = gc, ProcessingContext = ProcessingContext(), ManualPageBuilder = NULL ) objs$ManualPageBuilder <- ManualPageBuilder(objs$InputContext, objs$ProcessingContext, objs$GenerationContext) res <- lapply(fn, function(e) { is_class <- grepl('^[A-Z]', e, perl = TRUE) cat(color$other(paste('processing', e)), '\n') if (is_class) { produceAllManualPagesFromObject(objs[[e]], ProcessingContext(ex_class), gc, packageName_s_1 = target_package_name) } else { produceManualPage(InputContext(NULL, e, packageName_s_1 = target_package_name), ProcessingContext(ex_fun), gc) } }) rf <- sapply(c('generateEnc', 'generateParagraph', 'generateParagraph2NL', 'generateParagraphCR'), function(e) { ic <- InputContext(NULL, e, packageName_s_1 = target_package_name) produceManualPage(ic, pc, gc) }) pc <- ProcessingContext( extraneous_l = list( description = "Data set to be used as example for demo purpose.", source = 'Data set generated by NEONIRA', concept = 'documentation generation' ) ) # 1. file dummy.R ic <- InputContext(dummy, packageName_s_1 = target_package_name, dataFilename_s_1 = 'dummy.R') r1 <- produceManualPage(ic, pc, gc) # 2. file family.csv ic <- InputContext(family, packageName_s_1 = target_package_name, dataFilename_s_1 = 'family.csv') r2 <- produceManualPage(ic, pc, gc)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/inst/unit-testing/ut-regenerate-manual-pages.R
citeit <- function(x_s) paste0('<cite class="itb">', x_s, '</cite>') citefun <- function(x_s) paste0('<cite class="it">', x_s, '</cite>') citeop <- function(x_s) paste0('<cite class="op">', x_s, '</cite>') citearg <- function(x_s) paste0('<cite class="os">', x_s, '</cite>') citeval <- function(x_s) paste0('<cite class="ea">', x_s, '</cite>') citesection <- function(x_s) paste0('<cite class="bj">', x_s, '</cite>') citecode <- function(x_s) paste0('<cite class="oc">', x_s, '</cite>') citechar <- function(x_s) paste0('<cite class="isa">', x_s, '</cite>') cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') citefigure <- function(x_s) paste0('<cite class="figure">', x_s, '</cite>') citetime <- function(x_s) paste0('<cite class="time">', x_s, '</cite>') citefile <- function(x_s) paste0('<cite class="file">', x_s, '</cite>') citefolder <- function(x_s) paste0('<cite class="folder">', x_s, '</cite>') citeexec <- function(x_s) paste0('<cite class="exec">', x_s, '</cite>') citeEA <- function() { n <- 0 function(x_s) { n <<- n + 1 paste0('<cite class="oc"> EA#', n, ' ', x_s, '</cite>') } } cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') rdoc <- citeval('wyz.code.rdoc') roxy <- citeval('roxygen2') op <- citeval('wyz.code.offensiveProgramming') R <- citeit('R') brkfun <- function(x_s) { paste(sapply(x_s, function(e) paste('\u25b6', e, '<br/>')), collapse = '') } showTable <- function(x_dt_1) { DT::datatable(x_dt_1, options = list(pageLength = 25)) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/common-style.R
--- title: "wyz.code.rdoc documentation" author: "Fabien GELINEAU" date: "Last update 2020 January" output: rmarkdown::html_vignette: css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc documentation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r, eval = TRUE, echo = FALSE} pkn <- 'wyz.code.rdoc' ``` Package **`r pkn`** aims to generate **R documentation** <cite class='comment'>manual pages</cite> directly from R code, being or not offensive programming instrumented. # Documents ## Offensive programming book Read [Offensive programming book](https://neonira.github.io/offensiveProgrammingBook/) to get introduction and advanced knowledge on offensive programming advantages, practice and ecosystem. Main chapter related to [documentation generation](https://neonira.github.io/offensiveProgrammingBook_v1.2.1/generating-r-documentation.html) might be of first interest. ## Vignettes [tutorial](tutorial.html) [use cases](use-cases.html) [tips and tricks](tips-and-tricks.html) [release notes](release-notes.html)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/documentation.Rmd
--- title: Release notes of package wyz.code.rdoc author: Fabien GELINEAU date: 2020-04-22 output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{ Release notes of package wyz.code.rdoc } %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R') ``` <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2021-10-05 20:13:20] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate::date() was used in file `r citefile("test_outOfPackage.R")`. 1. Test, Duration: `r citefigure('22.5s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('53.3s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-sigma] package-version [1.1.18] timestamp [2020-11-09 20:06:21] 1. enforced R 4.0 1. Test, Duration: `r citefigure('21.5s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('56.4s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # Release awesome-asterion-rho - 1.1.17 - 2020-05-04 1. Test, Duration: `r citefigure('20.6s')`, OK: `r citefigure('824')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('56.4s')`, 0 errors βœ“ | 1 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # Release 1.1.16 - April 22nd 2020 1. added `r citefun('shortcuts')` function 1. added `r citefun('getTypeLabel')` function 1. added test for `r citefun('shortcuts')` function 1. modified `r citefun('InputContext')` function 1. modified `r citefun('beautify')` function 1. modified `r citefun('extractObjectOPInformation')` function 1. modified `r citefun('generatePublicFieldParagraph')` function 1. modified `r citefun('generatePublicMethodParagraph')` function 1. modified `r citefun('GenerateTable')` function 1. added `r citefile('shortcuts.Rd')` manual page 1. updated all documentation files 1. upgraded and updated vignette files 1. test, `r citefigure('20.6s')`, OK: `r citefigure('824')` 1. R CMD check, `r citefigure('54.6s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ # Release 1.1.8 - January 2020 Main improvements are 1. rebuild entirely manual page generation 1. added a way to generate a manual page from a standard R function 1. added manual page post processing to allow automated tuning of resulting page 1. added manual page statistics - see **computeDocumentStatistics** function manual page 1. enforced capitalization on new section name creation in manual page 1. completed and cleaned up R code - 33 exported functions - 17 internals 1. completed manual pages - 35 manual pages 1. enhanced manual pages documentation: review of all contents and corrections 1. completed vignettes - 5 vignettes 1. added data folder to allow for data manual page production and test 1. fixed issues on vignette 1. completed test panel - 45 test files - 811 tests 1. completed business use cases - 10 BUC 1. Worked on test coverage to reach level higher than 99% 1. **packageFunctionsInformation** verified and upgraded 1. Timing for tests 26s, checks 51s # Release 1.1.7 - October 2019 Main improvements are 1. corrected many typographic errors in i/o with end-user 2. completed unit tests 3. enforced higher code coverage (from 75% up to 99.28%) 4. changed many functions visibility from hidden (internal to package) to visible (end-user available). 5. documentation completion 6. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages if you do not use this version or a higher one!
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/release-notes.Rmd
--- title: "wyz.code.rdoc Tips and Tricks" author: "Fabien GELINEAU" date: "Last update 2020 January" output: html_document: number_sections: true toc: true css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc Tips and Tricks} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` ```{r context, eval = TRUE, echo = FALSE} library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ``` This use-case vignette is dedicated to some common manual page use case generation using package `r rdoc`. It may make sense for newcommers to read the tutorial vignette prior to read this vignette. # Tips and tricks about R code {.tabset .tabset-fade .tabset-pills} ## Parameter naming Unless you specify a length of `r citefigure('1')` or `r citefigure('1l')`, pluralize the parameter name. For example, avoid `r citeop('countryFlag_b_3')`, prefer `r citeop('countryFlags_b_3')`. Reason is quite simple, with the first one, you will have to correct produce documentation text as it is less likely to be correct in singular form than pluralized. When using no length specification, pluralizing that parameter name is the best practice. Understanding following examples, worth the time. ```{r parameter_naming_pluralize, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_3')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_3')) # right ``` ```{r parameter_naming_singular, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b_1')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b_1')) # right ``` ```{r parameter_naming, echo=TRUE, eval=TRUE} wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlag_b')) # wrong wyz.code.rdoc:::getTypeLabel(FunctionParameterName('countryFlags_b')) # right ``` # Tips and tricks about sections {.tabset .tabset-fade .tabset-pills} ## add a section Some sections have to be unique `r cmt('title, description, examples, ...')` some others not `r cmt('keyword, concept, alias, ...')`. You must respect the implicit contract of the standard `r R` documentation. See [writing `r R` extensions](https://cran.r-project.org/doc/manuals/r-release/R-exts.html) for more information. To add a section, just set its content to the one you desired. Content may contain format directives or not. See below, paragraph 'about-format'. My only advice is to keep content as simple and sharp as possible. Using non ambiguous terms and clear sentences helps a lot. For example, to add a `r citefun('details')` section and three `r citefun('concept')` sections, you could do something similar to ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( extraneous_l = list( details = 'It is worth to know bla bla bla', concept = paste0('concept-', 1:3) ) ) ``` ## complete a section Activate post processing for sections you want to complete. For example, to complete the `r citefun('title')` section, you could do ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( postProcessing_l = list( title = function(content_s) { paste(content_s, sentensize('some complimentary content')) } ) ) ``` ## remove a section Just set its content to `r citecode('NULL')`. ```{r echo=TRUE, eval=TRUE} pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) ``` # Tips and tricks about content ## content escaping Content escaping is sometimes fully necessary, sometimes partially necessary and sometimes unneeded. Quite difficult to have a systematic approach, as content varies at lot according to section intent, section nature `r cmt('code, text, equation, ...')` and also according to surrounding context. To ease handling of content escape, `r rdoc` offers several functions: a high-level function `r citefun("generateMarkup")` and a low-level function `r citefun("escapeContent")`. By default content used is only partially escaped. Characters, `r citechar("'@'")` and `r citechar("'%'")` are systematically escaped, but not characters `r citechar("'{'")` and `r citechar("'}'")`. To escape those last, you must set argument `r citearg("escapeBraces_b_1")` to `r citecode("TRUE")` while using one or the other of those functions. ```{r content_escape, echo=TRUE, eval=TRUE} content <- 'function(x) { x + 1 }' # To be use in a code section content generateMarkup(content) # To be used in a text section content paste('Some R code:', generateMarkup(content, escapeBraces_b_1 = TRUE)) ``` ## Content generation for `r R` documentation section {.tabset .tabset-fade .tabset-pills} As a end-user you should rely on [use cases](use-cases.html). As a programmer, you may need to create your own generation scheme to fulfil some special requirements. Following function could be useful to do so. function name | intent :--------------------|:--------------------------------------------------------- `r citefun('generateSection')` | generate a `r R` documentation section `r citefun('generateParagraph')` | generate a paragraph collating all your inputs with a single new line by default. `r citefun('generateParagraphCR')` | generate a paragraph collating all your inputs with '\\cr' `r citefun('generateParagraph2NL')` | generate a paragraph collating all your inputs with two new lines. ## Content generation for `r citefun('examples')` section {.tabset .tabset-fade .tabset-pills} Examples are a really important part of the documentation. It is also a quite tricky part when handcrafting documentation. This is due an inherent complexity related to contextual processing that has to take into consideration, testing time, necessary testing resources, test execution path, and so on. In order to increase productivity and simplify the `r citefun('examples')` section, `r rdoc` provides a dedicated function that turns pure `r R` code into content. Here is the pattern to follow. 1. create a variable that holds a list of functions taking no arguments. The body of each function must be legal `r R` code, embodying the example 1. use function `r citefun('convertExamples')` to convert examples. You have the opportunity to pass along some keywords in order to manage test that should not be ran, should not be tested, should not be shown. You also have the opportunity to capture the example output and to introduce it automatically into the content. Let's see a sample session to do so ### code ```{r echo=TRUE, eval=TRUE} # The function to test divide <- function(x_n, y_n) x_n / y_n # The examples to consider examples <- list( function() { divide(1:3, 1:3 + 13L) }, function() { divide(0L, c(Inf, -Inf)) }, function() { divide(c(Inf, -Inf), 0L) }, function() { divide(0L, 0L) } ) # your documentation complementary parts to consider # and manual page generation context setup ic <- InputContext(NULL, 'divide') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = TRUE) ) ) gc <- GenerationContext(tempdir(), overwrite = TRUE) # The generation of the manual page rv <- produceManualPage(ic, pc, gc) ``` ### content generated ```{r echo=TRUE, eval=TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated content](images/divide.png) # Tips and tricks about format {.tabset .tabset-fade .tabset-pills} ## enumeration format Function `r citefun('generateEnumeration')` eases enumeration management. ```{r echo=TRUE, eval=TRUE} generateEnumeration(paste('case', 1:4)) ``` ## list format Function `r citefun('generateEnumeration')` also eases item list management. ```{r echo=TRUE, eval=TRUE} generateEnumeration(paste('case', 1:4), TRUE) ``` ## table format To format a table, use function `r citefun('generateTable')`. ```{r echo=TRUE, eval=TRUE} dt <- data.table::data.table(x = paste0('XY_', 1:3), y = letters[1:3]) # as-is generateTable(dt) # with row numbering generateTable(dt, numberRows_b_1 = TRUE) ``` ## special formats Specification of `r R` documentation is quite complex. There are many variants possible and many ways to achieve a result. Following functions try to provide one convenient solution for some common needs. function name | intent :--------------------|:--------------------------------------------------------- `r citefun('generateOptionLink')` | When you need to generate a cross package documentation link use this function. If you need an intra package documentation link use function `r citefun('beautify()$link')`. You could also use `r citefun('producePackageLink')` to generate a cross package documentation link, but you won't be able to customize the labels. `r citefun('generateOptionSexpr')` | When you need to generate a `r citecode('Sexpr')`, use `r citefun('generateMarkup')` when you don't need options, otherwise use funciton `r citefun('generateOptionSexpr')`. `r citefun('generateEnc')` | generate a locale text encoding and ASCII equivalence. Not to be confused with `r citefun('generateEncoding')` that set encoding for the full manual page. `r citefun('generateReference')` | generate the text for a documentary or web reference. Refer to dedicated manual pages for more information. # Tips and tricks about presentation {.tabset .tabset-fade .tabset-pills} Many typographic enhancements are available. They are all grouped behind a facade name `r citefun('beautify')`. ```{r echo=TRUE, eval=TRUE} b <- beautify() names(b) ``` ```{r echo=TRUE, eval=TRUE} b$bold('lorem ipsum') ``` ```{r echo=TRUE, eval=TRUE} b$file('/tmp/result.txt') ``` ```{r echo=TRUE, eval=TRUE} b$acronym('CRAN') ``` ```{r echo=TRUE, eval=TRUE} co <- '{ x %% y }' b$code(co) # very probably wrong e <- beautify(TRUE) e$code(co) # much more probably right ``` and the very convenient ```{r echo=TRUE, eval=TRUE} # link to another package b$code(producePackageLink('ggplot2', 'aes_string')) # link to same package b$codeLink('generateTable') # link to same package with enhanced presentation b$enhanceCodeLink('generateTable') ```
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/tips-and-tricks.Rmd
--- title: "wyz.code.rdoc Tutorial" author: "Fabien GELINEAU" date: "Last update 2019.08.27" output: rmarkdown::html_vignette: number_sections: true toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc Tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` Package `r rdoc` aims to ease manual page creation in a very flexible way. It aims to free you from learning `r R` documentation specific language and its arcanes, while providing good and reliable results in a quick and reproducible way. # Understanding manual page generation {.tabset .tabset-fade .tabset-pills} Manual pages are associated either to describe processing functions or to describe data. Both are important, and each comes with its own specification set. From a pratical point of view, it exists several ways to produce manual pages. Indeed two are currently the most commonly used. First one is based on handcrafted manual pages. Second one is based on generated manual pages. ## Handcrafted manual page Standard `r R` documentation tools to generate manual pages belongs to the first approach. The global manual page generation process looks like following one. <img src="images/diagrams-1-trn.png" alt="handcrafted manual page"></img> In this is a two steps process, you first generate a manual page template `r cmt('once and only once')`, and then fill-in the blanks with the desired content. In practice, you end up repeating a variable number of times the fill-in phase for each manual page. Moreover, it requires you to get acknowledge about `r R` documentation arcanes, and this is quite complex due to syntax issues, character escaping and some other not so simple to fulfil needs. ## `r roxy` generated manual page Package `r roxy` meets the second approach. Its manual page generation process looks like following one <img src="images/diagrams-2-trn.png" alt="handcrafted manual page"></img> Theoritically, this is a two steps process, where you first fill-in the code comments according to `r roxy` specification, and then generate on-demand the related manual pages. This is a much more industrial approach. In practice, compliance with code comments specification is not so easy and still requires deep understanding of `r R` documentation scheme. Manual page generation although robust and fast, may sometimes be cumbersome. ## `r rdoc` generated manual page Package `r rdoc` meets the already presentend second approach. Its manual page generation process looks like following one. <img src="images/diagrams-3-trn.png" alt="handcrafted manual page"></img> Theoritically, this is a three steps process 1. create your manual page customization 1. generate the related manual page 1. edit the resulting manual page In practice, this is often a two steps process, as editing resulting manual page is an optional step, only required when the cost of the modification is higher to be achieved by code than by hand. ## Comparison of the three approaches approach | pros | cons :----------------:|:------------------------------|:------------------------------ handcrafted manual page | `r brkfun(c('easy to understand', 'straightforward process'))` | `r brkfun(c('hyper repetitive task', 'difficult to master', 'time consuming activity', 'great variability of the result from person to person'))` `r roxy` generated manual page | `r brkfun(c('fast', 'robust', 'high quality of result', 'Hadley powered'))` | `r brkfun(c('code commenting', 'sometimes tricky', 'repetitive task', 'time consuming activity'))` `r rdoc` generated manual page | `r brkfun(c('pure code, only code', 'highly customizable result', 'time saving activity', 'highly reproducible results', 'high reuse of customization as code'))` | `r brkfun(c('requires some experimentation to feel at ease with', 'still repetitive task, although less'))` # Package `r rdoc` approach ## Why another manual page generation tool? Mainly for following reasons. First, documentation production is an activity consuming too much time. We should reduce the amount of time spent on documentation generation while garanteing a high level of quality of produced documentation. Second, documentation is mandatory. So we need very powerful tools to alleviate the burden and to reduce variability of documentation quality. Third, I do not believe that standard `r R` or `r roxy` ways are the right ones. They are for sure helpful but to my opinion clearly not enough. I wish I could write documentation from code instead of writing documentation. I should be doing so using a high level interface, not requiring me to know much about final `r R` documentation format. Thus will allow me to produce better documentation as I will only have to focus on the content and the style, not on the format of the documentation. Fourth, current level of industrialization of documentation generation provided by the two presented approaches is insufficient too me. I wish to be able to reuse one already generated part from one manual page into another one. This is possible and quite easy to achieve if I use code, difficult otherwise. I also wish to be possible to produce a complete manual page, whatever its format and content, in a fully reproducible and replayable way. ## What can actually be generated? Manual page can be generated 1. from a single `r R` function 1. from a `r R` object instanciated from a `r R` class 1. for a package 1. from a data set Currently, version 1.1.8 of `r rdoc` allows to generate manual pages for each of these cases. See [use cases](use-cases.html) to know more. ## Code organization Package `r rdoc` provides low, medium and high level tools <cite class='comment'>functions</cite> to generate manual pages. You can discover them using following `r R` sequence. ```{r eval = TRUE, echo = TRUE} dt <- wyz.code.rdoc::opRdocInformation() ``` Core level tools deals mainly with deep package internals. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'CORE' & nature == 'EXPORTED']$name) ``` Low level tools deal with `r R` documentation format. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_1' & nature == 'EXPORTED']$name) ``` Medium level tools deal essentially with presentation and beautifying. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_2' & nature == 'EXPORTED']$name) ``` High level tools deal with end-user facilities to ease manual page generation, manage end-user customizations, and increase productivity. ```{r eval = TRUE, echo = TRUE} sort(dt[stratum == 'LAYER_3' & nature == 'EXPORTED']$name) ```
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/tutorial.Rmd
--- title: "wyz.code.rdoc use cases" author: "Fabien GELINEAU" date: "Last update 2020 - Q1" output: rmarkdown::html_vignette: number_sections: true toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.rdoc use cases} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <link rel="stylesheet" href="style.css"> <img src='images/rdoc-hex.png' alt='offensive programming - R documentation' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "" ) source('common-style.R') ``` ```{r context, eval = TRUE, echo = FALSE} library("data.table") library("wyz.code.offensiveProgramming") library("wyz.code.rdoc", warn.conflicts = FALSE) ``` This use-case vignette is dedicated to some common manual page use case generation using package `r rdoc`. It may make sense for newcommers to read the tutorial vignette prior to read this vignette. # Manual page generation from a function ## Standard `r R` function {.tabset .tabset-fade .tabset-pills} I will consider function `r citecode('append')` from base package as example. Any other `r R` function may work as well. ### code ```{r function_mpg, eval = TRUE, echo = TRUE} b <- beautify() examples <- list( function() { append(1:5, 0:1, after = 3) } ) ic <- InputContext(NULL, 'append') pc <- ProcessingContext( extraneous_l = list( title = 'Vector Merging', description = sentensize('add elements to a vector'), details = paste('If the parameter', b$code('after'), 'is higher than' , b$code('x'), 'length,\n then insertion is done at the', 'end of the data structure'), references = paste('Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)', 'The New S Language. Wadsworth & Brooks/Cole.'), examples = convertExamples(examples, captureOutput_b_1n = FALSE) ), postProcessing_l = list( arguments = function(content_s) { s <- sub('XXX_001', sentensize('the vector the values are to be appended to'), content_s, fixed = TRUE) s <- sub('XXX_002', sentensize('to be included in the modified vector'), s, fixed = TRUE) s <- sub('XXX_003', 'a subscript, after which the values are to be appended', s, fixed = TRUE) s } ) ) td <- tempdir() gc <- GenerationContext(td, overwrite = TRUE) rv <- produceManualPage(ic, pc, gc) ``` **NOTA BENE**: Embedded new line characters in previous text are present for vignette format purpose only here, in particular to avoid horizontal scrollbars and to ease reading. Indeed, sometimes you will have to do so, to avoid producing too long lines in `r R` documentations files, as this will generate a warning when running `r citefun('R CMD check')`. If you plan to publish your package on `r citefun('CRAN')`, you will have to resolve such warning cases. ### generated `r citecode('.Rd')` file ```{r function_mpg_rv, eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/append.png) ### original content ![base R append documentation](images/append-orig.png) ## Semantic naming function {.tabset .tabset-fade .tabset-pills} Let's consider following function, respectful of semantic naming as defined by offensive programming. Refer to [Offensive programming book](https://neonira.github.io/offensiveProgrammingBook/) to get introduction and advanced knowledge on offensive programming advantages, practice and ecosystem. ```{r op, eval = TRUE, echo = TRUE} iterateOverSet <- function(set_s, enforceUniqueness_b_1 = TRUE) { NULL } ``` The body is set to `r citecode('NULL')` for demonstration purpose and to prove that it does not interfere with documentation generation. ### code ```{r op_mpg, eval = TRUE, echo = TRUE} examples <- list( function() { iterateOverSet(sample(LETTERS, 35, TRUE), FALSE) }, function() { iterateOverSet(as.character(1:9)) } ) ic <- InputContext(NULL, 'iterateOverSet') pc <- ProcessingContext( extraneous_l = list( examples = convertExamples(examples, captureOutput_b_1n = FALSE) ) ) rv_op <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r op_mpg_rv, eval = TRUE, echo = TRUE} readLines(rv_op$context$filename) ``` ### generated content as picture ![generated append documentation](images/iterateOverSet.png) ### Main differences Main differences with standard `r R` function generation are, using semantic naming scheme, you do not have to 1. provide content for arguments. It is automatically deduced from the semantic name you use 1. provide a title. It is deduced from the function name. 1. provide a description. It is also deduced from the function name. You can still change or adapt provided content by a post processing whenever and wherever needed to meet sharper documentation level. # Manual page generation from an environment object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('MeltingPot_Env')` to produce an object based on an environment containing several functions. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('classes', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(MeltingPot_Env()) rv <- produceManualPage(ic, generationContext = gc) ``` **NOTA BENE**: I use default processing context here. This means, only defaultly valuable documentation sections will be filled-in. ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-env.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Zorg')` to produce an object based on an environment containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Zorg()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-env.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an S3 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Bu_S3')` to produce an object based on S3 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Bu_S3()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-s3.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_S3')` to produce an object based on a S3 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_S3.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_S3()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-s3.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an S4 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Person_S4')` to produce an object based on S4 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(new('Person_S4', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-s4.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_S4')` to produce an object based on a S4 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_S4.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_S4')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-s4.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an RC object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Person_RC')` to produce an object based on RC class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(new('Person_RC', name = 'neonira')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-rc.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_RC')` to produce an object based on an RC class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_RC.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(new('Addition_TCFI_Partial_RC')) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-rc.png) ### Main differences There exists only very few differences between standard `r R` and `r citefun('offensive programming')` class documentation generation. Using the later allows to benefit from instrumentation and allows to run dynamic checks instead of static checks. # Manual page generation from an R6 object ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Accumulator_R6')` to produce an object based on R6 class scheme containing several functions. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(Accumulator_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/r-r6.png) ## Offensive programming object {.tabset .tabset-fade .tabset-pills} I will consider class named `r citecode('Addition_TCFI_Partial_R6')` to produce an object based on an R6 class containing several functions. This class is partially offensive programming instrumented. ### code ```{r eval = TRUE, echo = TRUE} source(findFilesInPackage('Addition_TCFI_Partial_R6.R', 'wyz.code.offensiveProgramming')[1]) ic <- InputContext(Addition_TCFI_Partial_R6$new()) rv <- produceManualPage(ic, generationContext = gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/op-r6.png) # Manual page generation for a package ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider package named `r citecode('wyz.code.rdoc')` as example to generate a package manual page. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(NULL, package = 'wyz.code.rdoc') # using an pc <- ProcessingContext( postProcessing_l = list( details = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/pkg.png) # Manual page generation for data ## Standard `r R` object {.tabset .tabset-fade .tabset-pills} I will consider data named `r citecode('dummy')` to generate documentation from. ### code ```{r eval = TRUE, echo = TRUE} ic <- InputContext(dummy, dataFilename = 'dummy.csv') pc <- ProcessingContext( extraneous_l = list( description = 'a dummy datafile for demonstration purpose', format = 'a data.frame 9x2', source = 'fake data - used only for demo' ), postProcessing_l = list( classification = function(content_s) NULL ) ) rv <- produceManualPage(ic, pc, gc) ``` ### generated `r citecode('.Rd')` file ```{r eval = TRUE, echo = TRUE} readLines(rv$context$filename) ``` ### generated content as picture ![generated append documentation](images/data.png)
/scratch/gouwar.j/cran-all/cranData/wyz.code.rdoc/vignettes/use-cases.Rmd
gautfo <- generateAllUnitTestsFromObject <- function(object_o_1, sourceFile_s_1, sourcePackage_s_1, targetFolder_s_1, overwriteFile_b_1 = TRUE, verbose_b_1 = FALSE) { if (!dir.exists(targetFolder_s_1)) abort('target folder', strBracket(targetFolder_s_1), 'does not exit. You must create it.') tcd <- copy(retrieveTestCaseDefinitions(object_o_1)) frt <- copy(retrieveFunctionReturnTypes(object_o_1)) cn <- setdiff(class(object_o_1), c('environment', 'R6'))[1] if (!is.data.table(tcd)) return(paste('Class', strBracket(cn), 'apparently owns no test instrumentation.', 'No test created.')) if (!is.data.table(frt)) return(paste('Class', strBracket(cn), 'apparently owns no function return type instrumentation.', 'No test created.')) uf <- unique(tcd$function_name) filenames <- file.path(targetFolder_s_1, paste0(cn, '-', uf)) names(filenames) <- uf objectreification <- reifyObject(object_o_1, sourceFile_s_1, sourcePackage_s_1) src_uni <- list( objectreification$to_source, call('<-', quote(object_o_1), objectreification$to_reify) ) function_name <- NULL # data.table NSE issue with Rcmd check testthatFactory_f_1 = testthatFactory() tcd[, `:=`(k = .I)] ut <- sapply(uf, function(fn) { ff <- tcd[function_name == fn] sapply(defineEvaluationModes()[c(1, 3)], function(de) { sapply(seq_len(nrow(ff)), function(k) { b <- de == defineEvaluationModes()[1] em <- if (b) { ff[k]$standard_evaluation } else { ff[k]$type_checking_enforcement } dx <- call('do.call', object_o_1[[fn]], ff[k]$test_case[[1]]$getParams()) dd <- call('EvaluationMode', de) df <- call('<-', ifelse(b, quote(emsre), quote(emtce)), dd) dc <- call('runTestCase', quote(object_o_1), ff[k]$k, ifelse(b, quote(emsre), quote(emtce))) rtcname <- paste0('rtc', ifelse(b, 'sre', 'tce'), ff[k]$k) di <- call('<-', as.symbol(rtcname), dc) dz <- call('$', call('[[', call('[[', as.symbol(rtcname), 1), 1), 'value') syn <- call('$', as.symbol(rtcname), 'synthesis') st <- call('$', syn, 'status') cs <- if (em != 'failure') { list(st, call('$', syn, 'value_check')) } else { list(st, call('$', syn, 'execution_evaluation'), call('&&', call('$', syn, 'function_return_check'), call('$', syn, 'parameter_check')) ) } rtc <- testthatFactory_f_1(cs, em) list( pre = if (k == 1) c(df, di) else list(di), label = fn, #paste(fn, de, sep = ' - '), comment = paste(paste0('test ', ff[k]$k), ff[k]$test_case[[1]]$getDescription(), em, sep = ' - '), rtc = rtc) }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = FALSE) call2text <- function(z) as.character(as.expression(z)) rv <- sapply(seq_len(length(ut)), function(k) { entries <- unlist(lapply(ut[[k]], function(l) { c( unlist(lapply(l, function(p) { sapply(p$pre, call2text) })), "", # for presentation purpose generateTestthatEnvelope(unlist(lapply(l, function(p) { c(paste('\n#', p$comment), call2text(p$rtc[[1]]), call2text(p$rtc[[2]]), '') })), l[[1]]$label) ) })) entries <- c(unlist(lapply(src_uni, call2text)), entries) generateUnitTestFile(filenames[k], entries, overwriteFile_b_1, verbose_b_1) }, simplify = FALSE) list(class = cn, filenames = rbindlist(rv)) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/generateAllUnitTestsFromObject.R
generateTestthatEnvelope <- function(entries_s, label_s_1 = 'some tests') { c( paste0("test_that('", label_s_1, "', {"), entries_s, '})\n' ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/generateTestthatEnvelope.R
generateUnitTestFile <- function(filename_s_1, content_s, overwrite_b_1 = FALSE, verbose_b_1 = TRUE) { applyFilenameConvention <- function() { desired_suffix <- '.R' desired_prefix <- 'test_' bn <- basename(filename_s_1) dn <- dirname(filename_s_1) good_prefix <- grepl(paste0('^', desired_prefix), bn, perl = TRUE) good_suffix <- grepl(paste0(desired_suffix, '$'), bn, perl = TRUE) if (good_prefix && good_suffix) return(filename_s_1) if (good_prefix) return(file.path(dn, paste0(bn, desired_suffix))) if (good_suffix) return(file.path(dn, paste0(desired_prefix, bn))) file.path(dn, paste0(desired_prefix, bn, desired_suffix)) } fn <- applyFilenameConvention() b <- overwrite_b_1 || !file.exists(fn) if (b) { writeLines(content_s, con = fn) if (verbose_b_1) catn('wrote file', fn) } list(filename = fn, overwritten = b) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/generateUnitTestFile.R
opTestthatInformation <- function() { stratum <- buildIdentityList(c('core', paste0('layer_', 1:3))) phasing <- buildIdentityList(c('design', 'build', 'test', 'run', 'maintain', 'evolve', 'transversal')) intent <- buildIdentityList(c('parts_building', 'parts_assembly', 'quality_control', 'statistics', 'feedback', 'content_generation', 'utilities')) category <- buildIdentityList(c('function', 'class', 'data')) nature <- buildIdentityList(c('exported', 'internal')) buildList <- function(name_s_1, category_s_1, nature_s_1, stratum_s_1, phasing_s_1, intent_s_1) { list(name = name_s_1, category = category_s_1, nature = nature_s_1, stratum = stratum_s_1, phasing = phasing_s_1, intent = intent_s_1 ) } dt <- data.table::rbindlist(list( buildList("generateAllUnitTestsFromObject", category$FUNCTION, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$PARTS_ASSEMBLY), buildList("gautfo", category$FUNCTION, nature$EXPORTED, stratum$CORE, phasing$BUILD, intent$PARTS_ASSEMBLY), buildList("generateTestthatEnvelope", category$FUNCTION, nature$INTERNAL, stratum$LAYER_2, phasing$RUN, intent$PARTS_ASSEMBLY), buildList("generateUnitTestFile", category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$PARTS_ASSEMBLY), buildList("reifyObject", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("testthatFactory", category$FUNCTION, nature$EXPORTED, stratum$LAYER_1, phasing$BUILD, intent$CONTENT_GENERATION), buildList("transformValuesInCode", category$FUNCTION, nature$INTERNAL, stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), buildList("opTestthatInformation",category$FUNCTION, nature$EXPORTED, stratum$LAYER_3, phasing$RUN, intent$FEEDBACK) )) name <- NULL # nse dt[order(name)] }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/opTestthatInformation.R
abort <- function(msg_s_1, ...) { stop(paste(msg_s_1, ...)) } catn <- function(...) cat(..., '\n') strBracket <- function(text_s_n) { paste0('[', text_s_n, ']') } guardExecution <- function(yourExpression_ex, instrumentWarnings_b = TRUE) { if (instrumentWarnings_b) { tryCatch(yourExpression_ex, error = function(e) e, warning = function(w) w) } else { tryCatch(yourExpression_ex, error = function(e) e) } } buildIdentityList <- function(entries_s) { d <- toupper(entries_s) names(d) <- gsub('[^A-Z0-9_]', '', d, perl = TRUE) as.list(d) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/outOfPackage.R
reifyObject <- function(object_o_1, sourceFile_s_1, sourcePackage_s_1) { on <- getObjectClassKind(object_o_1) if (is.na(on)) return(NA) classnames <- class(object_o_1) cn <- setdiff(classnames, c('environment', 'R6'))[1] fn <- guardExecution({get(cn)}) # beware R6 constructor is not a function # but an environment, so do not use mode = 'function' with get here. if (on == 'R6') fn <- fn$new #cat('class', strBracket(cn), 'classnames', # paste(classnames, sep = '', collapse = ', '), '\n') if (!is.function(fn)) abort('unable to retrieve object signature for object', strBracket(cn)) fo <- formals(fn) list(to_source = call('source', call('system.file', sourceFile_s_1, package = sourcePackage_s_1)), to_reify = if (length(fo) > 0) call(cn, unlist(fo)) else call(cn) ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/reifyObject.R
testthatFactory <- function() { manageCorrect <- function(call_ca_2) { c( call('expect_true', call_ca_2[[1]]), call('expect_true', call_ca_2[[2]]) ) } manageErroneous <- function(call_ca_2) { c( call('expect_true', call_ca_2[[1]]), call('expect_false', call_ca_2[[2]]) ) } manageFailure <- function(call_ca_3) { c( call('expect_equal', call_ca_3[[1]], call_ca_3[[3]]), call('expect_equal', call_ca_3[[2]], 'failure') ) } function(call_ca_2m, testTarget_s_1) { # if (!is.call(call_ca_1)) # abort('parameter call_ca_1 must be a call') fn <- switch(testTarget_s_1, 'correct' = manageCorrect, 'erroneous' = manageErroneous, manageFailure ) fn(call_ca_2m) } }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/testthatFactory.R
transformValuesInCode <- function(values_) { gsub('^\\s*list\\(\\s*(.*?)\\s*\\)\\s*$', '\\1', deparse(substitute(values_)), perl = TRUE) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/R/transformValuesInCode.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R')
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/inst/doc/release-notes.R
--- title: Release notes of package wyz.code.testthat author: Fabien GELINEAU date: 2020-04-08 output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{ Release notes of package wyz.code.testthat } %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} %\usepackage[utf8]{inputenc} %\declareUnicodeCharacter{25B6}{\blacktriangleright} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R') ``` <img src='images/testthat-hex.png' alt='offensive programming - R documentation' style='width:30%'/> # name [awesome-asterion-upsilon] package-version [1.1.20] timestamp [2021-10-05 20:29:24] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate::date() was not used but speficied in `r citefile("DESCRIPTION")` and `r citefile("NAMESPACE")` 1. Test, Duration: `r citefigure('0.7s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('21.5s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2020-11-09 20:13:02] 1. enforced R 4.0 1. Test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('23.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # 1.1.18 - awesome-asterion-sigma - 2020-05-04 1. Test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('23.2s')`, 0 errors βœ“ | 1 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # 1.1.17 - April, 22nd 2020 1. updated all documentations 1. Upgraded and updated vignette 1. test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. test, Duration: `r citefigure('24.1s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ # 1.1.9 - January 2020 Main improvements are 1. `r citefun("packageFunctionsInformation")` renamed to `r citefun("opTestthatInformation")` 1. `r citefun("opTestthatInformation")` verified and upgraded 1. completed manual pages - 4 manual pages 1. enhanced manual pages documentation: review of all contents and corrections 1. upgraded vignette - 1 vignette 1. completed test panel - 5 test files - 20 tests 1. Worked on test coverage to reach level higher than 99% 1. Timing for tests 1s, checks 25s # 1.1.6 Main improvements are 1. corrected erroneous write out of tempdir folder 1. completed unit tests 1. enforced higher code coverage (up to 99.40%) 1. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages if you do not use this version or a higher one!
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/inst/doc/release-notes.Rmd
citeit <- function(x_s) paste0('<cite class="itb">', x_s, '</cite>') citefun <- function(x_s) paste0('<cite class="it">', x_s, '</cite>') citeop <- function(x_s) paste0('<cite class="op">', x_s, '</cite>') citearg <- function(x_s) paste0('<cite class="os">', x_s, '</cite>') citeval <- function(x_s) paste0('<cite class="ea">', x_s, '</cite>') citesection <- function(x_s) paste0('<cite class="bj">', x_s, '</cite>') citecode <- function(x_s) paste0('<cite class="oc">', x_s, '</cite>') citechar <- function(x_s) paste0('<cite class="isa">', x_s, '</cite>') cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') citefigure <- function(x_s) paste0('<cite class="figure">', x_s, '</cite>') citetime <- function(x_s) paste0('<cite class="time">', x_s, '</cite>') citefile <- function(x_s) paste0('<cite class="file">', x_s, '</cite>') citefolder <- function(x_s) paste0('<cite class="folder">', x_s, '</cite>') citeexec <- function(x_s) paste0('<cite class="exec">', x_s, '</cite>') citeEA <- function() { n <- 0 function(x_s) { n <<- n + 1 paste0('<cite class="oc"> EA#', n, ' ', x_s, '</cite>') } } cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') rdoc <- citeval('wyz.code.rdoc') roxy <- citeval('roxygen2') op <- citeval('wyz.code.offensiveProgramming') R <- citeit('R') brkfun <- function(x_s) { paste(sapply(x_s, function(e) paste('\u25b6', e, '<br/>')), collapse = '') } showTable <- function(x_dt_1) { DT::datatable(x_dt_1, options = list(pageLength = 25)) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/vignettes/common-style.R
--- title: Release notes of package wyz.code.testthat author: Fabien GELINEAU date: 2020-04-08 output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{ Release notes of package wyz.code.testthat } %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} %\usepackage[utf8]{inputenc} %\declareUnicodeCharacter{25B6}{\blacktriangleright} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) source('common-style.R') ``` <img src='images/testthat-hex.png' alt='offensive programming - R documentation' style='width:30%'/> # name [awesome-asterion-upsilon] package-version [1.1.20] timestamp [2021-10-05 20:29:24] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate::date() was not used but speficied in `r citefile("DESCRIPTION")` and `r citefile("NAMESPACE")` 1. Test, Duration: `r citefigure('0.7s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('21.5s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2020-11-09 20:13:02] 1. enforced R 4.0 1. Test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('23.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # 1.1.18 - awesome-asterion-sigma - 2020-05-04 1. Test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('23.2s')`, 0 errors βœ“ | 1 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # 1.1.17 - April, 22nd 2020 1. updated all documentations 1. Upgraded and updated vignette 1. test, Duration: `r citefigure('0.6s')`, OK: `r citefigure('21')` 1. test, Duration: `r citefigure('24.1s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ # 1.1.9 - January 2020 Main improvements are 1. `r citefun("packageFunctionsInformation")` renamed to `r citefun("opTestthatInformation")` 1. `r citefun("opTestthatInformation")` verified and upgraded 1. completed manual pages - 4 manual pages 1. enhanced manual pages documentation: review of all contents and corrections 1. upgraded vignette - 1 vignette 1. completed test panel - 5 test files - 20 tests 1. Worked on test coverage to reach level higher than 99% 1. Timing for tests 1s, checks 25s # 1.1.6 Main improvements are 1. corrected erroneous write out of tempdir folder 1. completed unit tests 1. enforced higher code coverage (up to 99.40%) 1. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages if you do not use this version or a higher one!
/scratch/gouwar.j/cran-all/cranData/wyz.code.testthat/vignettes/release-notes.Rmd
#this is a function main which it will call the main script options("encoding" = "UTF-8") xparse <- function(json_path = "",verbose=FALSE) { path = paste(.libPaths()[1], "x.ent/Perl", sep='/') if(json_path == "") { command = paste("perl -I \"",path, "\" \"",path, "/", "Main.pl\"", sep='') } else { command = paste("perl -I \"",path, "\" \"",path, "/", "Main.pl\""," \"",json_path,"\"", sep='') } print(command) print("Please, wait ....") test <- try(system(command, intern=TRUE,wait=TRUE)) #rm(list=ls()) gc(verbose=T) #garbage collection to free up memory print(test) print("Let use functions for viewing the results: xshow(...), xhist(...), xplot(...)....") } xentity <- function() { conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) lst_tag <- c(); #dico if(length(conf$dico$tag) > 0) { for(i in 1:length(conf$dico$tag)) { lst_tag <-add_unique(lst_tag,conf$dico$tag[i]) } } #unitex if(length(conf$unitex$result$tag) > 0) { for(i in 1:length(conf$unitex$result$tag)) { lst_tag <-add_unique(lst_tag,conf$unitex$result$tag[i]) } } return(lst_tag) } xrelation<- function() { conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) lst_tag <- c(); #relation if(length(conf$relation$link) > 0) { for(i in 1:length(conf$relation$link)) { lst_tag <-add_unique(lst_tag,conf$relation$link[i]) } } return(lst_tag) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/parse.R
#count the number of occurrences of a substring within a string options(encoding="utf-8") str_count <- function(x, pattern, sep=""){ unlist(lapply( strsplit(x, sep), function(z) na.omit(length(grep(pattern, z))) )) } #check list current, if it doesn't exist in the list then add this element add_unique<-function(list,value) { if(!is.null(value)) { ifelse (value %in% list, return(list) , return(c(list,value)) ) } } #show all file un the result xfile <- function(sep=":") { tryCatch( { conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) data <- readLines(conf$result$file) lst <- c(); #read line to line for(i in 1:length(data)) { if(nchar(data[i]) > 0) { v <- unlist(strsplit(data[i], sep))[1] lst <- add_unique(lst,v) } } return(lst) }, error=function(cond) { message("There are problems in paths, please use command xconfig() for verifying your parameters!") return(NA) }, warning=function(cond) { message("There are problems in paths, please use command xconfig() for verifying your parameters!") return(NULL) }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/stat.R
upload_dico <- function(file){ if(!grepl(".csv$", file) && !grepl(".txt$", file)){ stop("Uploaded file must be a .csv or .txt file!") } file.copy(file,paste(.libPaths()[1],"x.ent/dico/",sep="/")) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/upload.R
# returns string w/o leading or trailing whitespace trim <- function (x) gsub("^\\s+|\\s+$", "", x)
/scratch/gouwar.j/cran-all/cranData/x.ent/R/utils.R
xdata <- function(e=NULL) { tryCatch( { #create a data frame conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) path = conf$result$file; lst_f <- xfile(sep=":") #get all tags in the file config lst_tag <- xentity(); lst_tag <-add_unique(lst_tag,xrelation()) dta <- data.frame() reg_ent = ":\\$:" reg_rel = ":\\$\\$:" dta <- data.frame(file=lst_f) #read file output.txt text <- readLines(path) if(is.null(e)) { for(i in 1:length(lst_tag)) { dta[,lst_tag[i]] <- "N/A" } for(i in 1:length(text)) { #get name of file f <- unlist(strsplit(text[i],":"))[1] #find entity if(grepl(pattern=reg_ent,x=text[i]))#entity { data_ele <- "" eles <- unlist(strsplit(text[i],":")) if(length(eles) == 4) { data_ele <- eles[4] } else if(length(eles) >= 5) { data_ele <- eles[4] for(j in 5:length(eles)) { data_ele <- paste(data_ele,eles[j],sep="; ") } } dta[dta$file == f,eles[2]] <- data_ele } #relation if(grepl(pattern=reg_rel,x=text[i])) { eles <- unlist(strsplit(text[i],reg_rel)) col = sub(pattern = paste(f,":",sep=""), replacement = "",x = eles[1]) col = gsub(":$", "",col, perl=TRUE)#delete ":" at the end of sentence if(col %in% names(dta)) { if(dta[dta$file == f,col] == "N/A") { dta[dta$file == f,col] <- eles[2] } else { dta[dta$file == f,col] <- paste(dta[dta$file == f,col],eles[2],sep =";") } } } } } else { for(i in 1:length(e)) { dta[,e[i]] <- "N/A" } for(i in 1:length(text)) { #get name of file f <- unlist(strsplit(text[i],":"))[1] for(j in 1:length(e)) { #entity reg = paste(f,":",e[j],reg_ent,sep="") #find all if(grepl(pattern=reg,x=text[i]))#entity { data_ele <- "" eles <- unlist(strsplit(text[i],":")) if(length(eles) == 4) { data_ele <- eles[4] } else if(length(eles) >= 5) { data_ele <- eles[4] for(j in 5:length(eles)) { data_ele <- paste(data_ele,eles[j],sep="; ") } } dta[dta$file == f,eles[2]] <- data_ele } #relation reg = paste(f,":",e[j],reg_rel,sep="") if(grepl(pattern=reg,x=text[i]))#relation { result = gsub(reg, "",text[i], perl=TRUE) if(e[j] %in% names(dta)) { if(dta[dta$file == f,e[j]] == "N/A") { dta[dta$file == f,e[j]] <- result } else { dta[dta$file == f,e[j]] <- paste(dta[dta$file == f,e[j]],result,sep =";") } } } } } } return(dta) }, error=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, warning=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xdata.R
xdata_value <- function(v, sort = "a") { tryCatch( { if(is.null(v)) { print("You must enter an entity represented in the results, please!") } else { conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) path = conf$result$file; lst_f <- xfile(sep=":") #get all tags in the file config lst_tag <- xentity(); lst_tag <-add_unique(lst_tag,xrelation()) dta <- data.frame() reg_ent = ":\\$:" reg_rel = ":\\$\\$:" if(v %in% lst_tag)#only a column { lst <- c() #data frame, local parameter, use as hash dt <- data.frame(value=character(0),freq=integer(0),stringsAsFactors=FALSE)#store values of column and frequency found text <- readLines(path) for(i in 1:length(text)) { #get name of file f_name <- unlist(strsplit(text[i],":"))[1] #creat a regular expression ex=> filename:p:$: reg = paste(f_name,":",v,reg_ent,sep="") if(grepl(pattern=reg,x=text[i]))#entity { #get value eles <- unlist(strsplit(text[i],":")) for(j in 4:length(eles)) { #lst <- add_unique(lst,eles[j]) if(length(dt$value[dt$value %in% eles[j]]) > 0) { dt[dt$value == eles[j],2] = dt[dt$value == eles[j],2] + 1; } else#add new value { dt <- rbind(dt,data.frame(value=eles[j],freq=1)) } } next } #creat a regular expression ex=> filename:p:$$: reg = paste(f_name,":",v,reg_rel,sep="") if(grepl(pattern=reg,x=text[i]))#relation { #replace le result file:p:s:$$:e1:e2:1 result = gsub(reg, "",text[i], perl=TRUE) #result = gsub(":$", "",result, perl=TRUE) #lst <- add_unique(lst,result) if(length(dt$value[dt$value %in% result]) > 0) { dt[dt$value == result,2] = dt[dt$value == result,2] + 1; } else#add new value { dt <- rbind(dt,data.frame(value=result,freq=1)) } } } #convert to vector value <- as.vector(dt$value) freq <- as.vector(dt$freq) dt <- data.frame(value=value,freq=freq) if(sort == "a") { dt <- dt[order(dt$value,-dt$freq),] } else { dt <- dt[order(-dt$freq,dt$value),] } lst <- dt$value frq <- dt$freq dta <- data.frame(value=lst,freq=frq)#global parameter if(nrow(dta)>0) { dta$freq = formatC(dta$freq,digits=0,format="f") } } else { print("The tag isn't valid!") return(NULL) } return(dta) } }, error=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, warning=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xdata_value.R
#draw a graph: histogram xhist <- function(v="") { tryCatch( { #create a data frame conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) lst_f <- xfile(sep=":") value <- c() date <- c() for(i in 1:length(lst_f)) { value <- c(value,0) date <- c(date,0) } d <- data.frame(file=lst_f,date=date,value_date=value,visible=value) data <- readLines(conf$result$file) #check entity reg = ":\\$:" for(i in 1:length(data)) { #get name of file f <- unlist(strsplit(data[i],":"))[1] #find all on data if(v == "") { #update date #format dd.mm.yyyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\3", x = date) month <- sub(pattern = reg_date1, replacement = "\\2", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } #format mm.yyyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\2", x = date) month <- sub(pattern = reg_date1, replacement = "\\1", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } if(!is.na(f)) { d[d$file == f,4] <- 1 } } else { #format dd.mm.yyyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\3", x = date) month <- sub(pattern = reg_date1, replacement = "\\2", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } #format mm.yyyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\2", x = date) month <- sub(pattern = reg_date1, replacement = "\\1", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } #lst <- c(lst,str_count(x = tolower(data[i]), pattern = reg1 , sep="\n")) #check 0 or 1 at the end of relation if(grepl(pattern = ":1$", x = v) || grepl(pattern = ":0$", x = v)) { reg_entity = paste(":",v,sep="") } else { reg_entity = paste(":",v,":",sep="") } count <- str_count(x = tolower(data[i]), pattern = tolower(reg_entity) , sep="\n") if(count > 0) { #update value in the d[d$file == f,4] <- count } } } if(v=="") { ylabel = "All documents" } else { ylabel = paste("Documents contain the key:",v,sep = " ") } if(length(d[!is.na(d$value_date) & (d$value_date > 0) & (d$visible > 0),3])>0) { par(las = 0)#load default par(mfrow=c(1,1))#1 row, 1 column h1 = hist(d[!is.na(d$value_date) & (d$value_date > 0) & (d$visible > 0),3],plot = FALSE) hist(d[!is.na(d$value_date) & (d$value_date > 0) & (d$visible > 0),3], ylim=c(0,max(h1$count)+2) ,breaks = 12, col="blue",labels=TRUE,xaxt="n",border = "pink",main="Histogram of bulletin: date",xlab="Date",ylab= ylabel) axis(side=1, at = d$value_date, labels= d$date) } else { print("No data available") } #return a data frame for users check return(d) }, error=function(cond) { message("Error: Parameters are incorrect or there are problems in the paths, please check your parameters!") }, warning=function(cond) { message("Warning: Parameters are incorrect or there are problems in the paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xhist.R
#draw graph: plot xplot <- function(v1="",v2="",t="") { tryCatch( { #create a data frame conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/')) lst_f <- xfile(sep=":") init <- c() init_pos <- c() for(i in 1:length(lst_f)) { init <- c(init,0) init_pos <- c(init_pos,1) } #create a table for stocking data d <- data.frame(file=lst_f,date=init,value_date=init,visible=init_pos) data <- readLines(conf$result$file) #check entity e1 #check entity reg = ":\\$:" reg_req = ""; if((v1 == "") && (v2 == "")) { stop("Entity v1 or v2 must have a vulue") } if(length(v1) > 1) { stop("Entity v1 has only 0 or 1 value") } if((v1 != "") && (v2 == "")) { #find only a field v1 reg_req = paste(":",v1,":",sep="") d[,v1] <- init } if((v1 != "") && (v2 != "")) { for(j in 1:length(v2)) { d[,paste(v1,"-",v2[j],sep="")] <- init } } if((v1 == "") && (v2 != "")) { for(j in 1:length(v2)) { d[,v2[j]] <- init } } for(i in 1:length(data)) { #get name of file f <- unlist(strsplit(data[i],":"))[1] #fill data to data frame #add value of year #format dd.mm.yyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\3", x = date) month <- sub(pattern = reg_date1, replacement = "\\2", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } #format mm.yyyyy reg_date1 <- ":([[:digit:]]{2}).([[:digit:]]{4}):" if(grepl(pattern = reg_date1, x = data[i])) { date <- str_extract(data[i],reg_date1) year <- sub(pattern = reg_date1, replacement = "\\2", x = date) month <- sub(pattern = reg_date1, replacement = "\\1", x = date) d[d$file == f,2] <- paste(month,year,sep=".") d[d$file == f,3] <- as.numeric(paste(year,month,sep="")) } #add value of entity if((v1 != "") && (v2 == "")) { #find only a field v1 if(grepl(pattern=reg,x=data[i])) { count <- str_count(x = tolower(data[i]), pattern = tolower(reg_req) , sep="\n") if(count > 0) { #update value in the d[d$file == f,5] <- count } } } else if((v1 == "") && (v2 != "")) { if(grepl(pattern=reg,x=data[i])) { for(j in 1:length(v2)) { reg_req = paste(":",v2[j],":",sep="") count <- str_count(x = tolower(data[i]), pattern = tolower(reg_req) , sep="\n") if(count > 0) { #update value in the d[d$file == f,j+4] <- count } } } } else { #more than deux entities for(j in 1:length(v2)) { reg_req = paste(":",v1,":",v2[j],":",sep="") count <- str_count(x = tolower(data[i]), pattern = tolower(reg_req) , sep="\n") if(count > 0) { #update value in the d[d$file == f,j+4] <- count #d[d$file == f,j+4] + count } } } } #filte the data following the time #if time if(any(t != "")) { reg_date = "([[:digit:]]{2}).([[:digit:]]{4})" if(length(t) == 1) #a precise day { if(grepl(pattern = reg_date, x = t)) { month1 <- sub(pattern = reg_date, replacement = "\\1", x = t[1]) year1 <- sub(pattern = reg_date, replacement = "\\2", x = t[1]) date1 = as.numeric(paste(year1,month1,sep="")) if(!is.na(date1)) { d[d$value_date != date1,4] <- 0 } } else { stop("Format of year (mm.yyyy) isn't valid, please check again!") } } if(length(t) == 2) { if((grepl(pattern=reg_date, x=t[1],perl=FALSE)) && (grepl(pattern=reg_date, x=t[2],perl=FALSE))) { month1 <- sub(pattern = reg_date, replacement = "\\1", x = t[1]) year1 <- sub(pattern = reg_date, replacement = "\\2", x = t[1]) date1 = as.numeric(paste(year1,month1,sep="")) month2 <- sub(pattern = reg_date, replacement = "\\1", x = t[2]) year2 <- sub(pattern = reg_date, replacement = "\\2", x = t[2]) date2 = as.numeric(paste(year2,month2,sep="")) if(!is.na(date1) & !is.na(date2)) { d[d$value_date < date1 | d$value_date > date2 ,4] <- 0 } } else { stop("Format of year (mm.yyyy) isn't valid, please check again!") } } if(length(t) > 2)#interval of date { stop("value of date isn't valid, there are two choise: a date (ex 02.2010) or interval of date (02.2010,02.2011) , please check again!") } } #draw graphe par(las = 0)#load default par(mfcol = c(ncol(d)-3,1),mar = c(0.5, 4.0, 0.5, 0.5), oma=c(1, 1, 4, 2)) test = d[d$value_date > 0 & d$visible == 1,] test <- test[order(test$value_date),] label_h = 0 for(i in 5:ncol(d)) { if(nrow(test) > 0) { plot(test[,i], axes = TRUE, col = ifelse(test[test$value_date > 0,i] > 0, "red","purple"), xaxt="n", ylim=c(0,1), xlab = "",cex=2.0, ylab = colnames(test)[i] ,pch=15 , lty=5) if(label_h == 0) { axis(3, at= 1:length(test[test$value_date>0,3]),labels=test$date,col = "violet", las = 2,col.axis = "blue",cex.lab=0.7,cex=0.7,cex.axis=0.7) label_h <- 1 } } else { print("No data available") } } if(nrow(test) > 0) { axis(1, at= 1:length(test[test$value_date>0,1]),labels=test$file, col = "violet", las = 2,col.axis = "blue",cex.lab=0.7,cex=0.7,cex.axis=0.7) #title("Comparison of every entity in documents") #legend("bottomleft", inset=.05, title="Visible", # c("0","1"), fill=terrain.colors(3), horiz=TRUE) } return(d) }, error=function(cond) { message("Error: Parameters are incorrect or there are problems in the paths, please check your parameters!") }, warning=function(cond) { message("Warning: Parameters are incorrect or there are problems in the paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xplot.R
#draw a graph: proposition xprop <- function(v1,v2,type=1) { tryCatch( { #this is a dataframe that stores the results extracted if(is.null(v1) || is.null(v2)) { print("Two arguments are required") } else { dta <- xdata() #list file in the corpus lst_f <- xfile(sep=":") df <- NULL #this is a variable that stores for xprop for(i in 1:length(v1)) { #add columns df1 <- data.frame("val" <- 0) df1[,v2] <- 0 df1[,"cat"] <- 0 for(j in 1:length(v2)) { str <- paste(v1[i],":",v2[j],":",sep="") for(k in 1:ncol(dta)) { rows <- grep(str,dta[,k]) #if column val is egal 0, update value this column, else add new row if(length(rows) > 0) { for(l in 1:length(rows)) { df1[1,"val"] <- v1[i] df1[1,v2[j]] <- 1 df1[1,"cat"] <- v2[j] if(is.null(df)) { df= df1 } else { df <- rbind(df,df1) } #reset df1[1,"val"] <- 0 df1[1,v2] <- 0 df1[1,"cat"] <- 0 } } } } } df = df[rowSums(df[v2]) == 1, ] if(type == 1) { bp = ggplot(df,aes(x =val ,fill = cat)) + geom_bar(position = "fill") bp + coord_flip() } else { bp = ggplot(df,aes(x = cat ,fill = val)) + geom_bar(position = "fill") bp + coord_flip() } } }, error=function(cond) { message("Parameters are incorrect or there are problems in the paths, please check your parameters!") }, warning=function(cond) { message("Parameters are incorrect or there are problems in the paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xprop.R
xshow <- function(e=NULL,sort="a") { tryCatch( { dta <- data.frame() if(is.null(e))#argument default, get all data { #all columns dta = xdata(); } else#get a column from user { if(length(e) == 1)#if 1 argument or argument default { dta = xdata_value(e,sort) } else { dta = xdata(e) } } path = paste(.libPaths()[1],"x.ent/www/output.html",sep="/") html= print(xtable(dta),"html",file=path) html = gsub(";", "<br/>",html, perl=TRUE) html = paste("<meta http-equiv=Content-Type content=text/html; charset=utf-8>",html,sep="") write(html, file=path) browseURL(path) }, error=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, warning=function(cond) { message("Parameters are incorrect or there are problems in paths, please check your parameters!") }, finally={ rm(list=ls()) }) }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xshow.R
xtest <- function(v1,v2) { if((length(v1) < 1) || (length(v2) < 2)) { print("This vector must be greater than 2 elements") } else { #get the result as a data frame dt <- xdata() rows <- nrow(dt) #build a data frame for containing the result df <- NULL for(i in 1:length(v1)) { for(j in 1:length(v2)) { reg <- paste(v1[i],":",v2[j],sep="") df1 <- data.frame(entity = reg,stringsAsFactors=FALSE) reg <- paste(reg,":",sep="") for(r in 1:rows) { name_col <- paste("d",r,sep="") df1[,name_col] <- 0 #search each row of data frame cols <- grep(reg,dt[r,]) if(length(cols) > 0) { df1[1,name_col] <- 1 } } if(is.null(df)) { df= df1 } else { df <- rbind(df,df1) } } } #tranpose the data frame names <- df$entity df <- as.data.frame(t(df[,-1])) colnames(df) <- names n <- ncol(df) col_names <- names(df) #create a data frame for store every test of relation result <- NULL if(n >= 2) { #pair-wised comparaisons for(i in 1:(n-1)) { for(j in (i+1):n) { mat <- NULL v1 = df[,i] v2 = df[,j] mat <- data.matrix(v1) mat <- cbind(mat,v2) #print labels label = paste(col_names[i],"/",col_names[j],sep= "") count1 <- 0 count2 <- 0 v <- c() if(sum(mat[,1]) > 0 && sum(mat[,2]) > 0) { rs1 <- data.frame(relation = label,stringsAsFactors=FALSE) #TEST du KOLMOGOROV test <- ks.test(mat[,1], mat[,2]) rs1[,"KOLMOGOROV"] <- test$p.value #v <- c(v,test$p.value) #print("TEST du KOLMOGOROV:",test$p.value,sep="")) #TEST de WILCOXON test = wilcox.test(mat[,1], mat[,2]) rs1[,"WILCOXON"] <- test$p.value #v <- c(v,test$p.value) #print(paste("TEST de WILCOXON:",test$p.value,sep="")) #TEST de STUDENT test = t.test(mat[,1], mat[,2]) rs1[,"STUDENT"] <- test$p.value #v <- c(v,test$p.value) #print(paste("TEST de STUDENT:",test$p.value,sep="")) rs1[,"GrowthCurves"] <- NA tryCatch({ #TEST de comparaison de courbe de croissance test = compareGrowthCurves(mat[,1], as.matrix(mat[,2])) rs1[1,"GrowthCurves"] <- test$P.Value #v <- c(v,test$P.Value) #print(paste("TEST de comparaison de courbe de croissance:",test$P.Value,sep="")) #count1 = count1 + 1 }, error=function(cond) { message("Less than 2 groups to compare!") }, warning=function(cond) { message("Less than 2 groups to compare!") }, finally={ #rm(list=ls()) }) ############## if(is.null(result)) { result= rs1 } else { result <- rbind(result,rs1) } } } } } path = paste(.libPaths()[1],"x.ent/www/statistics.html",sep="/") html= print(xtable(result),"html",file=path) html = paste("<meta http-equiv=Content-Type content=text/html; charset=utf-8>",html,sep="") write(html, file=path) browseURL(path) return(df) } }
/scratch/gouwar.j/cran-all/cranData/x.ent/R/xtest.R
### Class definitions ### ## Types for representations setClassUnion("listOrNULL", c("list", "NULL")) setClassUnion("tsOrNULL", c("ts", "NULL")) setClassUnion("matrixOrNULL", c("matrix", "NULL")) setClassUnion("dfOrNULL", c("data.frame", "NULL")) setClassUnion("characterOrNULL", c("character", "NULL")) setClassUnion("numericOrNULL", c("numeric", "NULL")) setClassUnion("listOrNULLOrnumeric", c("list", "numeric","NULL")) setClassUnion("listOrNULLOrcharacter", c("list", "character","NULL")) setClassUnion("numericOrNULLOrcharacter", c("numeric", "character","NULL")) ### Parameter Object class: x12Parameter ### setClass( Class="x12Parameter", representation=representation( #period="numeric", series.span="numericOrNULLOrcharacter", series.modelspan="numericOrNULLOrcharacter", #series.type="characterOrNULL", #decimals="numeric", transform.function="character", transform.power="numericOrNULL", transform.adjust="characterOrNULL", regression.variables="characterOrNULL", regression.user="characterOrNULL", regression.file="characterOrNULL", regression.usertype="characterOrNULL", regression.centeruser="characterOrNULL", regression.start="numericOrNULLOrcharacter", regression.aictest="characterOrNULL", #outlier="logical", outlier.types="characterOrNULL", outlier.critical="listOrNULLOrnumeric", outlier.span="numericOrNULLOrcharacter", outlier.method="characterOrNULL", identify="logical", identify.diff="numericOrNULL", identify.sdiff="numericOrNULL", identify.maxlag="numericOrNULL", arima.model="numericOrNULL", arima.smodel="numericOrNULL", arima.ar="numericOrNULLOrcharacter", arima.ma="numericOrNULLOrcharacter", automdl="logical", automdl.acceptdefault="logical", automdl.balanced="logical", automdl.maxorder="numeric", automdl.maxdiff="numeric", forecast_years="numericOrNULL", backcast_years="numericOrNULL", forecast_conf="numeric", forecast_save="character", estimate="logical", estimate.outofsample="logical", check="logical", check.maxlag="numericOrNULL", slidingspans="logical", slidingspans.fixmdl="characterOrNULL", slidingspans.fixreg="characterOrNULL", slidingspans.length="numericOrNULL", slidingspans.numspans="numericOrNULL", slidingspans.outlier="characterOrNULL", slidingspans.additivesa="characterOrNULL", slidingspans.start="numericOrNULLOrcharacter", history="logical", history.estimates="characterOrNULL", history.fixmdl="logical", history.fixreg="characterOrNULL", history.outlier="characterOrNULL", history.sadjlags="numericOrNULL", history.trendlags="numericOrNULL", history.start="numericOrNULLOrcharacter", history.target="characterOrNULL", x11.sigmalim="numericOrNULL", x11.type="characterOrNULL",#vorher onlytd="logical" x11.sfshort="logical", x11.samode="characterOrNULL", x11.seasonalma="characterOrNULL", x11.trendma="numericOrNULL", x11.appendfcst="logical", x11.appendbcst="logical", x11.calendarsigma="characterOrNULL", x11.excludefcst="logical", x11.final="character", x11regression="logical" # seats="logical", # seatsparameter="characterOrNULL" ), prototype=prototype( series.span=NULL, series.modelspan=NULL, #series.type=NULL, transform.function="auto", transform.power=NULL, transform.adjust=NULL, regression.variables=NULL, regression.user=NULL, regression.file=NULL, regression.usertype=NULL, regression.centeruser=NULL, regression.start=NULL, regression.aictest=NULL, #outlier=FALSE, outlier.types=NULL, outlier.critical=NULL, outlier.span=NULL, outlier.method=NULL, identify=FALSE, identify.diff=NULL, identify.sdiff=NULL, identify.maxlag=NULL, arima.model=NULL, arima.smodel=NULL, arima.ar=NULL, arima.ma=NULL, automdl=TRUE, automdl.acceptdefault=FALSE, automdl.balanced=TRUE, automdl.maxorder=c(3,2), automdl.maxdiff=c(1,1), forecast_years=1, backcast_years=NULL, forecast_conf=.95, forecast_save="ftr", estimate=FALSE, estimate.outofsample=TRUE, check=TRUE, check.maxlag=NULL, slidingspans=FALSE, slidingspans.fixmdl=NULL, slidingspans.fixreg=NULL, slidingspans.length=NULL, slidingspans.numspans=NULL, slidingspans.outlier=NULL, slidingspans.additivesa=NULL, slidingspans.start=NULL, history=FALSE, history.estimates=NULL, history.fixmdl=FALSE, history.fixreg=NULL, history.outlier=NULL, history.sadjlags=NULL, history.trendlags=NULL, history.start=NULL, history.target=NULL, x11.sigmalim=c(1.5,2.5), x11.type=NULL, x11.sfshort=FALSE, x11.samode=NULL, x11.seasonalma=NULL, x11.trendma=NULL, x11.appendfcst=TRUE, x11.appendbcst=FALSE, x11.calendarsigma=NULL, x11.excludefcst=FALSE, x11.final="user", x11regression=FALSE # seats=FALSE, # seatsparameter=NULL ), validity=function(object) { return(TRUE) } ) setClass( Class="spectrum", representation=representation( frequency="numeric", spectrum="numeric" ),prototype= prototype( frequency=new("numeric"), spectrum=new("numeric") ), validity=function(object) { length(object@spectrum)==length(object@frequency) } ) setClass( Class="fbcast", representation=representation( estimate="ts", lowerci="ts", upperci="ts" ),prototype= prototype( estimate=new("ts"), lowerci=new("ts"), upperci=new("ts") ), validity=function(object) { length(object@estimate)==length(object@lowerci)&&length(object@estimate)==length(object@upperci) } ) setClass( Class="x12BaseInfo", representation=representation( x12path = "characterOrNULL", #x13path = "characterOrNULL", use = "character", showWarnings = "logical" ),prototype= prototype( x12path = NULL, #x13path = NULL, use = "x12", showWarnings = FALSE ), validity=function(object) { (!is.null(object@x12path))&&object@use%in%c("x12") } ) setClass(Class="diagnostics",contains="list") ### Output Object class: x12Output ### setClass( Class="x12Output", representation=representation( a1="ts", d10="ts", d11="ts", d12="ts", d13="ts", d16="ts", c17="ts", d9="ts", e2="ts", d8="ts", b1="ts", td="tsOrNULL", otl="tsOrNULL", sp0="spectrum", sp1="spectrum", sp2="spectrum", spr="spectrum", forecast="fbcast", backcast="fbcast", dg="list", # seats="logical", file="character", tblnames="character", Rtblnames="character" ), prototype=prototype( a1=new("ts"), d10=new("ts"), d11=new("ts"), d12=new("ts"), d13=new("ts"), d16=new("ts"), c17=new("ts"), d9=new("ts"), e2=new("ts"), d8=new("ts"), b1=new("ts"), # td=new("ts"), # otl=new("ts"), sp0=new("spectrum"), sp1=new("spectrum"), sp2=new("spectrum"), spr=new("spectrum"), forecast=new("fbcast"), backcast=new("fbcast"), dg=new("diagnostics"), # seats=new("logical"), file=new("character"), tblnames=new("character"), Rtblnames=new("character") ), validity=function(object) { return(TRUE) } ) setClass( Class="x12Single", representation=representation( ts="ts", x12Parameter="x12Parameter", x12Output="x12Output", x12OldParameter="list", x12OldOutput="list", tsName="characterOrNULL", firstRun="logical" ), prototype=prototype( ts=new("ts"), x12Parameter=new("x12Parameter"), x12Output=new("x12Output"), x12OldParameter=new("list"), x12OldOutput=new("list"), tsName=NULL, firstRun=FALSE ), validity=function(object) { return(TRUE) } ) setClass(Class="x12List",contains="list",validity=function(object){ all(lapply(object,class),"x12Single") }) setClass( Class="x12Batch", representation=representation( x12List="x12List", x12BaseInfo="x12BaseInfo" ), prototype=prototype( x12List=new("x12List"), x12BaseInfo=new("x12BaseInfo",use="x12",x12path="x12adummy") ), validity=function(object) { return(TRUE) } ) setMethod( f='initialize', signature=signature(.Object = "x12Batch"), definition=function(.Object,tsList,tsName=NULL,x12BaseInfo=new("x12BaseInfo")) { if (is.null(tsName)) { tsName <- paste0("Series_",1:length(tsList)) } res <- list(); length(res) <- length(tsList) for(i in 1:length(tsList)){ if(is(tsList[[i]],"x12Single")) { res[[i]] <- tsList[[i]] } else{ res[[i]] <- new("x12Single",ts=tsList[[i]],tsName=tsName[i]) } } .Object@[email protected] <- res .Object@x12BaseInfo <- x12BaseInfo return(.Object) } ) ###Handling of x12path setMethod( f='initialize', signature=signature(.Object = "x12BaseInfo"), definition=function(.Object,x12path=NULL, #x13path=NULL, use=NULL,showWarnings=FALSE) { if(is.null(x12path)&&!existd("x12path")) stop("Please use the function x12path() to define the path to the binaries.") if(is.null(x12path)&&existd("x12path")){ if(file.exists(getd("x12path"))) .Object@x12path <- getd("x12path") else stop("file specified in global variable x12path does not exist!\n") } # if(is.null(x13path)&&existd("x13path")){ # if(file.exists(getd("x13path"))) # .Object@x13path <- getd("x13path") # else # stop("file specified in global variable x13path does not exist!\n") # } if(!is.null(x12path)){ if(file.exists(x12path)||x12path=="x12adummy") .Object@x12path <- x12path else stop("file specified in argument x12path does not exist!\n") } # if(!is.null(x13path)){ # if(file.exists(x13path)) # .Object@x13path <- x13path # else # stop("file specified in argument x13path does not exist!\n") # } if(is.null(.Object@x12path)){ stop("Please use the function x12path() to define the paths to the binaries.") } .Object@use <- "x12" .Object@showWarnings <- showWarnings return(.Object) } ) #Basic methods for x12Batch and x12Single setMethod( f='dim', signature=signature(x = "x12Batch"), definition=function(x) { return(length(x@x12List)) } ) setMethod( f='length', signature=signature(x = "x12Batch"), definition=function(x) { return(length(x@x12List)) } ) setMethod( f='print', signature=signature(x = "x12Batch"), definition=function(x) { cat("A batch of time series of length ",length(x@x12List),".\n") for(i in 1:length(x@x12List)) print(x@x12List[[i]]) } ) setMethod( f='print', signature=signature(x = "x12Single"), definition=function(x) { cat("Name: ",x@tsName,"\n") cat("processed with x12: ",x@firstRun,"\n") print(x@ts) } ) setClass(Class="crossValidation", representation=representation( backcast="dfOrNULL", forecast="dfOrNULL"), prototype=prototype( backcast=NULL, forecast=NULL), # validity=function(object) { # length(object@spectrum)==length(object@frequency) # } )
/scratch/gouwar.j/cran-all/cranData/x12/R/class.R
setGeneric("crossVal", function(object, x12Parameter=new("x12Parameter"), x12BaseInfo=new("x12BaseInfo"),...) { standardGeneric("crossVal")} ) setMethod( f='crossVal', signature=signature(object = "ts"), definition=function(object, x12Parameter,x12BaseInfo, showCI=FALSE,main="Cross Validation", col_original="black",col_fc="#2020ff",col_bc="#2020ff", col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1, lwd_original=1,lwd_fc=1,lwd_bc=1,lwd_ci=1,ytop=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=TRUE,col_line="grey",lty_line=3, ylab="Value",xlab="Date",ylim=NULL,span=NULL) { fbcastP<-getP(x12Parameter,whichP=list("forecast_years","backcast_years")) forecast_years<-fbcastP$forecast_years backcast_years<-fbcastP$backcast_years start_ts <- start(object) freq_ts<-frequency(object) orig_ts<-object end_ts <- end(object) if(!is.null(forecast_years) && forecast_years!=0){ forecast_years<-forecast_years*freq_ts object <- object[-((length(object)-forecast_years+1):length(object))] object <- ts(object,start=start_ts,frequency=freq_ts) end_ts <- end(object) forecast=TRUE if(points_fc) addpoints_fc<-TRUE }else{ forecast=FALSE forecast_years=0 if(points_fc) addpoints_fc<-FALSE } if(!is.null(backcast_years) && backcast_years!=0){ backcast_years<-backcast_years*freq_ts object <- object[-(1:backcast_years)] object <- ts(object,end=end_ts,freq=freq_ts) if(points_bc) addpoints_bc<-TRUE backcast=TRUE}else{ backcast=FALSE backcast_years=0 if(points_bc) addpoints_bc<-FALSE } # x12Paramter<-setP(x12Parameter,listP=list(forecast_years=forecast_years,backcast_years=backcast_years)) #if(any(file.exists(grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE)))) # cat(file.exists(grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE))) #new.env # file.copy(from=grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE),to=paste(basename("Rout"),"Temp",gsub("Rout","",grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE)),sep="")) #print(getwd()) #dir.create("M:/Meraner/Workspace/Saisonbereinigung_Test/x12Test/tmp") #file.copy(from=grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE)[1],to="M:/Meraner/Workspace/Saisonbereinigung_Test/x12Test") olddir<-getwd() setwd(tempdir()) tryCatch(cvout <- x12(object,x12Parameter,x12BaseInfo), finally={if(!exists("cvout")) cat("=> No cross validation can be performed!\n") }) setwd(olddir) bc<-cvout@backcast@estimate fc<-cvout@forecast@estimate object <- object freq_ts<-freq_ts ts_plot<- ts(c(bc,object,fc),start=start(bc),end=end(fc),frequency=freq_ts) ts.lower<-ts(c(cvout@backcast@lowerci,object,cvout@forecast@lowerci),start=start(cvout@backcast@lowerci),end=end(cvout@forecast@lowerci),frequency=freq_ts) ts.upper<-ts(c(cvout@backcast@upperci,object,cvout@forecast@upperci),start=start(cvout@backcast@upperci),end=end(cvout@forecast@upperci),frequency=freq_ts) if(is.null(span)){ xlim <- NULL if(!showCI){ limits.y.lower<-min(orig_ts,bc,fc,na.rm=TRUE) limits.y.upper<-max(orig_ts,bc,fc,na.rm=TRUE) limits.y<-c(limits.y.lower,limits.y.upper) } if(showCI){ limits.y.lower<-min(orig_ts,bc,fc,cvout@forecast@lowerci,cvout@backcast@lowerci,cvout@forecast@upperci,cvout@backcast@upperci,na.rm=TRUE) limits.y.upper<-max(orig_ts,bc,fc,cvout@forecast@lowerci,cvout@backcast@lowerci,cvout@forecast@upperci,cvout@backcast@upperci,na.rm=TRUE) limits.y<-c(limits.y.lower,limits.y.upper) } }else{ if(length(span)==4){ if(any(!c(span[2],span[4]))%in%c(1:12)) stop("Span argument wrong!") xlim <- c(span[1]+(span[2]-1)/freq_ts,span[3]+(span[4]-1)/freq_ts) limits.y<-c(min(window(ts_plot,span[1:2],span[3:4]),window(orig_ts,span[1:2],span[3:4]),na.rm=TRUE),max(window(ts_plot,span[1:2],span[3:4]),window(orig_ts,span[1:2],span[3:4]),na.rm=TRUE)*ytop) if(showCI) limits.y<-c(min(limits.y[1],window(ts.lower,span[1:2],span[3:4]),window(ts.upper,span[1:2],span[3:4]),na.rm=TRUE), max(limits.y[2],window(ts.lower,span[1:2],span[3:4]),window(ts.upper,span[1:2],span[3:4]),na.rm=TRUE)) }else if(length(span)==2){ xlim <- span limits.y <- c(min(window(ts_plot,c(span[1],1),c(span[2],1)),window(orig_ts,c(span[1],1),c(span[2],1)),na.rm=TRUE),max(window(ts_plot,span[1],span[2]),window(orig_ts,span[1],span[2]),na.rm=TRUE)*ytop) if(showCI) limits.y<-c(min(limits.y[1],window(ts.lower,c(span[1],1),c(span[2],1)),window(ts.upper,c(span[1],1),c(span[2],1)),na.rm=TRUE), max(limits.y[2],window(ts.lower,span[1],span[2]),window(ts.upper,span[1],span[2]),na.rm=TRUE)) }else stop("Span argument wrong!") } if(!is.null(ylim)) limits.y<-ylim ts<-plotFbcast(cvout,backcast=backcast,forecast=forecast, showCI=showCI,main=main, col_original=col_original,col_fc=col_fc,col_bc=col_bc, col_ci=col_ci,col_cishade=col_cishade, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci, lwd_original=lwd_original,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, ytop=ytop,points_bc=points_bc,points_fc=points_fc, showLine=showLine,col_line=col_line,lty_line=lty_line, ylab=ylab,xlab=xlab,points_original=points_original,ylim=limits.y,xlim=xlim) lines(orig_ts,col=col_original) if(!points_fc) addpoints_fc<-FALSE if(!points_bc) addpoints_bc<-FALSE if(addpoints_fc) points(x=time(cvout@forecast@estimate),y=orig_ts[((length(orig_ts)-forecast_years+1):length(orig_ts))],col=col_original) if(addpoints_bc) points(time(cvout@backcast@estimate),orig_ts[(1:backcast_years)],col=col_original) aT <- aL <- axTicks(1) if(!is.null(xlim)) tp <- expand.grid(floor(xlim[1]):ceiling(xlim[2]),(0:(frequency(ts)-1))/frequency(ts)) else tp <- expand.grid(floor(time(ts)[1]):ceiling(time(ts)[length(ts)]),(0:(frequency(ts)-1))/frequency(ts)) mm <- round(tp[,2]*frequency(ts)) yy <- tp[,1] tp <- tp[,1]+tp[,2] for(i in 1:length(aT)){ ii <- which.min(abs(tp-aT[i])) aT[i] <- tp[ii] if(mm[ii]<9) aL[i] <- yy[ii]+(mm[ii]+1)/10 else aL[i] <- yy[ii]+(mm[ii]+1)/100 } axis(1,at=aT,labels=aL) if(backcast){ res.bc<-as.data.frame(rbind(orig_ts[(1:backcast_years)],cvout@backcast@estimate),row.names=c("original","backcast")) colnames(res.bc)<-(1:backcast_years) } if(forecast){ res.fc<-as.data.frame(rbind(orig_ts[((length(orig_ts)-forecast_years+1):length(orig_ts))],cvout@forecast@estimate),row.names=c("original","forecast")) colnames(res.fc)<-((length(orig_ts)-forecast_years+1):length(orig_ts)) } #file.remove(grep(basename("Rout"),list.files(dirname("Rout")),value=TRUE,fixed=TRUE)) #if(any(file.exists(grep(basename("RoutTemp"),list.files(dirname("RoutTemp")),value=TRUE)))) #file.rename(from=grep(basename("RoutTemp"),list.files(dirname("RoutTemp")),value=TRUE),to=paste("Rout",gsub("RoutTemp","",grep(basename("RoutTemp"),list.files(dirname("RoutTemp")),value=TRUE)),sep="")) crossVal <- new("crossValidation") if(backcast) crossVal@backcast<-res.bc if(forecast) crossVal@forecast<-res.fc invisible(crossVal) } ) setMethod( f='crossVal', signature=signature(object = "x12Single"), definition=function(object,x12BaseInfo=new("x12BaseInfo"), showCI=FALSE,main="Cross Validation", col_original="black",col_fc="#2020ff",col_bc="#2020ff", col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1, lwd_original=1,lwd_fc=1,lwd_bc=1,lwd_ci=1,ytop=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=TRUE,col_line="grey",lty_line=3, ylab="Value",xlab="Date",ylim=NULL,span=NULL) { crossVal(object@ts,object@x12Parameter, showCI=showCI,main=main, col_original=col_original,col_fc=col_fc,col_bc=col_bc, col_ci=col_ci,col_cishade=col_cishade, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci, lwd_original=lwd_original,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, ytop=ytop,points_bc=points_bc,points_fc=points_fc, showLine=showLine,col_line=col_line,lty_line=lty_line, ylab=ylab,xlab=xlab,points_original=points_original,ylim=ylim,span=span) } )
/scratch/gouwar.j/cran-all/cranData/x12/R/crossVal-methods.R
x12env <- new.env() putd <- function(x, value) { assign(x, value, envir=x12env) # add () to sdcGUIenv } rmd <- function(x) { rm(list=x, envir=x12env) # rm () from sdcGUIenv } getd <- function(x, mode="any") { get(x, envir=x12env, mode=mode, inherits=FALSE) # add () to sdcGUIenv } existd <- function(x, mode="any") { exists(x, envir=x12env, mode=mode, inherits=FALSE) # add () to sdcGUIenv } x12path <- function(path=NULL){ if(is.null(path)){ x13p <- x13binary::x13path() files <- list.files(x13p) path <- file.path(x13p,files[head(grep("x13as",files),1)]) } pathWork("x12path",path) invisible(path) } #x13path <- function(path=NULL){ # pathWork("x13path",path) #} pathWork <- function(name,path){ if(is.null(path)||length(path)!=1){ if(existd(name)) return(getd(name)) else cat("Not defined!\n") }else{ if(!file.exists(path)) stop(paste(path," - does not exists.",sep="")) putd(name,path) } }
/scratch/gouwar.j/cran-all/cranData/x12/R/env_functions.R
setGeneric("lineX12", function(x, param1,param2,...) { standardGeneric("lineX12")} ) setMethod( f='lineX12', signature=signature(x = "x12Output"),definition=function(x,param1,param2,...) { plot(1) }) setMethod( f='lineX12', signature=signature(x = "x12Single"),definition=function(x,param1,param2,...) { lineX12(x@x12Output,param1=param1,param2=param2,...) }) #x <- s1@x12Output setGeneric("specPlotX12", function(x, param1,param2,...) { standardGeneric("specPlotX12")} ) setGeneric("acfPlotX12", function(x, param1,param2,...) { standardGeneric("acfPlotX12")} ) setGeneric("seasFacX12", function(x, param1,param2,...) { standardGeneric("seasFacX12")} )
/scratch/gouwar.j/cran-all/cranData/x12/R/ggplot2Fn.R
# Generic function setP, set Parameter setGeneric("setP", function(object, ...) { standardGeneric("setP")} ) # Generic function getP, get Parameter setGeneric("getP", function(object, ...) { standardGeneric("getP")} ) #Methods for signature x12Parameter setMethod( f='setP', signature=signature(object = "x12Parameter"), definition=function(object, listP) { paras <- c( #"period", "series.span", "series.modelspan", #"series.type", #"decimals", "transform.function", "transform.power", "transform.adjust", "regression.variables", "regression.user", "regression.file", "regression.usertype", "regression.centeruser", "regression.start", "regression.aictest", #"outlier", "outlier.types", "outlier.critical", "outlier.span", "outlier.method", "identify", "identify.diff", "identify.sdiff", "identify.maxlag", "arima.model", "arima.smodel", "arima.ar", "arima.ma", "automdl", "automdl.acceptdefault", "automdl.balanced", "automdl.maxorder", "automdl.maxdiff", "forecast_years", "backcast_years", "forecast_conf", "forecast_save", "estimate", "estimate.outofsample", "check", "check.maxlag", "slidingspans", "slidingspans.fixmdl", "slidingspans.fixreg", "slidingspans.length", "slidingspans.numspans", "slidingspans.outlier", "slidingspans.additivesa", "slidingspans.start", "history", "history.estimates", "history.fixmdl", "history.fixreg", "history.outlier", "history.sadjlags", "history.trendlags", "history.start", "history.target", "x11.sigmalim", "x11.type", "x11.sfshort", "x11.samode", "x11.seasonalma", "x11.trendma", "x11.appendfcst", "x11.appendbcst", "x11.calendarsigma", "x11.excludefcst", "x11.final", "x11regression" #"tblnames", #"Rtblnames", #"seats", #"seatsparameter" ) mn <- names(listP)%in%paras if(any(!mn)){ warning("The following parameters could not be matched: ",paste(names(listP)[!mn],collapse=" , ")) } mn <- names(listP)[mn] for(nam in mn){ slot(object,nam) <- listP[[nam]] } return(object) } ) setMethod( f='getP', signature=signature(object = "x12Parameter"), definition=function(object, whichP) { paras <- c( #"period", "series.span", "series.modelspan", #"series.type", #"decimals", "transform.function", "transform.power", "transform.adjust", "regression.variables", "regression.user", "regression.file", "regression.usertype", "regression.centeruser", "regression.start", "regression.aictest", #"outlier", "outlier.types", "outlier.critical", "outlier.span", "outlier.method", "identify", "identify.diff", "identify.sdiff", "identify.maxlag", "arima.model", "arima.smodel", "arima.ar", "arima.ma", "automdl", "automdl.acceptdefault", "automdl.balanced", "automdl.maxorder", "automdl.maxdiff", "forecast_years", "backcast_years", "forecast_conf", "forecast_save", "estimate", "estimate.outofsample", "check", "check.maxlag", "slidingspans", "slidingspans.fixmdl", "slidingspans.fixreg", "slidingspans.length", "slidingspans.numspans", "slidingspans.outlier", "slidingspans.additivesa", "slidingspans.start", "history", "history.estimates", "history.fixmdl", "history.fixreg", "history.outlier", "history.sadjlags", "history.trendlags", "history.start", "history.target", "x11.sigmalim", "x11.type", "x11.sfshort", "x11.samode", "x11.seasonalma", "x11.trendma", "x11.appendfcst", "x11.appendbcst", "x11.calendarsigma", "x11.excludefcst", "x11.final", "x11regression" #"tblnames", #"Rtblnames", #"seats", #"seatsparameter" ) mn <- whichP%in%paras if(any(!mn)){ warning("The following parameters could not be matched: ",paste(whichP[!mn],collapse=" , ")) } mn <- whichP[mn] ret <- list() for(nam in mn){ ret[[nam]] <- slot(object,nam) } return(ret) } ) #Methods for signature x12Single setMethod( f='getP', signature=signature(object = "x12Single"),definition=function(object, whichP) { getP(object@x12Parameter,whichP=whichP) }) setMethod( f='setP', signature=signature(object = "x12Single"),definition=function(object, listP) { object@x12Parameter <- setP(object@x12Parameter,listP=listP) return(object) }) #Methods for signature x12Batch setMethod( f='getP', signature=signature(object = "x12Batch"),definition=function(object, whichP,index=NULL) { ret <- list() if(is.null(index)){##changing all cat("The parameters for all objects are shown.\n") for(i in 1:length(object@x12List)){ ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP) } }else{ if(is.integer(index)){ if(min(index)>0&max(index)<=length(object@x12List)){ for(i in index){ ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP) } }else stop("argument index is out of bounds!\n") }else if(is.character(index)){ namTS <- vector() for(i in 1:length(object@x12List)){ namTS <- c(namTS,object@x12List[[i]]@tsName) } if(all(index%in%namTS)){ for(nam in index){ ind <- which(nam==namTS) ret[[length(ret)+1]] <- getP(object@x12List[[ind]],whichP=whichP) } }else stop("argument index contained names not found in the series names!\n") }else stop("argument index must be either integer or character!\n") } return(ret) }) setMethod( f='setP', signature=signature(object = "x12Batch"),definition=function(object, listP,index=NULL) { res <- object@[email protected] if(is.null(index)){##changing all cat("The parameters for all objects are changed.\n") for(i in 1:length(object@x12List)){ res[[i]] <- setP(res[[i]],listP=listP) } }else{ if(is.numeric(index)){ if(min(index)>0&max(index)<=length(object@x12List)){ for(i in index){ res[[i]] <- setP(res[[i]],listP=listP) } }else stop("argument index is out of bounds!\n") }else if(is.character(index)){ namTS <- vector() for(i in 1:length(object@x12List)){ namTS <- c(namTS,res[[i]]@tsName) } if(all(index%in%namTS)){ for(nam in index){ ind <- which(nam==namTS) res[[ind]] <- setP(res[[ind]],listP=listP) } }else stop("argument index contained names not found in the series names!\n") }else stop("argument index must be either integer or character!\n") } object@[email protected] <- res return(object) }) #Goto previous parameter setting and output # Generic function prev, cleanArchive setGeneric("prev", function(object, ...) { standardGeneric("prev")} ) setMethod( f='prev', signature=signature(object = "x12Single"),definition=function(object,n=NULL) { if(is.null(n)) ind <- length(object@x12OldParameter) else if(n%in%c(1:length(object@x12OldParameter))) ind <- n else stop("Please provide an index corresponding to a previous run. (see summary with oldOutput>0)") object@x12Output <- object@x12OldOutput[[ind]] object@x12Parameter <- object@x12OldParameter[[ind]] oldout <- list() oldpar <- list() for(i in 1:length(object@x12OldParameter)){ if(i!=ind){ oldout[[length(oldout)+1]] <- object@x12OldOutput[[i]] oldpar[[length(oldpar)+1]] <- object@x12OldParameter[[i]] } } object@x12OldOutput <- oldout object@x12OldParameter <- oldpar return(object) }) setMethod( f='prev', signature=signature(object = "x12Batch"),definition=function(object,index=NULL,n=NULL) { if(is.null(index)){##changing all cat("All current parameters and outputs are replaced by the previous ones.\n") for(i in 1:length(object@x12List)){ object@x12List[[i]] <- prev(object@x12List[[i]],n=n) } }else{ if(is.numeric(index)){ if(min(index)>0&max(index)<=length(object@x12List)){ for(i in index){ object@x12List[[i]] <- prev(object@x12List[[i]],n=n) } }else stop("argument index is out of bounds!\n") }else if(is.character(index)){ namTS <- vector() for(i in 1:length(object@x12List)){ namTS <- c(namTS,object@x12List[[i]]@tsName) } if(all(index%in%namTS)){ for(nam in index){ ind <- which(nam==namTS) object@x12List[[ind]] <- prev(object@x12List[[ind]],n=n) } }else stop("argument index contained names not found in the series names!\n") }else stop("argument index must be either integer or character!\n") } return(object) }) setGeneric("cleanArchive", function(object, ...) { standardGeneric("cleanArchive")} ) setGeneric("cleanHistory", function(object, ...) { .Deprecated("cleanArchive") cleanArchive(object,...) } ) setMethod( f='cleanArchive', signature=signature(object = "x12Single"),definition=function(object) { object@x12OldParameter <- object@x12OldOutput <- list() return(object) }) setMethod( f='cleanArchive', signature=signature(object = "x12Batch"),definition=function(object,index=NULL) { if(is.null(index)){##changing all cat("All previous parameters and outputs are deleted.\n") for(i in 1:length(object@x12List)){ object@x12List[[i]] <- cleanArchive(object@x12List[[i]]) } }else{ if(is.numeric(index)){ if(min(index)>0&max(index)<=length(object@x12List)){ for(i in index){ object@x12List[[i]] <- cleanArchive(object@x12List[[i]]) } }else stop("argument index is out of bounds!\n") }else if(is.character(index)){ namTS <- vector() for(i in 1:length(object@x12List)){ namTS <- c(namTS,object@x12List[[i]]@tsName) } if(all(index%in%namTS)){ for(nam in index){ ind <- which(nam==namTS) object@x12List[[ind]] <- cleanArchive(object@x12List[[ind]]) } }else stop("argument index contained names not found in the series names!\n") }else stop("argument index must be either integer or character!\n") } return(object) }) ####SAVE setGeneric("saveP", function(object, file="x12Parameter.RData") { standardGeneric("saveP")} ) setGeneric("loadP", function(object, file) { standardGeneric("loadP")} ) setMethod( f='saveP', signature=signature(object = "x12Parameter"), definition=function(object,file) { save(object,file=file) } ) setMethod( f='saveP', signature=signature(object = "x12Single"), definition=function(object,file) { out=object@x12Parameter save(out,file=file) } ) setMethod( f='saveP', signature=signature(object = "x12Batch"), definition=function(object,file) { x12ParList <- list() for(i in 1:length(object@x12List)){ x12ParList[[object@x12List[[i]]@tsName]] <- object@x12List[[i]]@x12Parameter } save(x12ParList,file=file) } ) setMethod( f='loadP', signature=signature(object = "x12Parameter"), definition=function(object,file) { par <- get(load(file=file)) if(!is(par,"x12Parameter")) stop("no parameter settings found in the file!\n") return(par) } ) setMethod( f='loadP', signature=signature(object = "x12Single"), definition=function(object,file) { par <- get(load(file=file)) if(!is(par,"x12Parameter")) stop("no parameter settings found in the file!\n") object@x12Parameter <- par return(object) } ) setMethod( f='loadP', signature=signature(object = "x12Batch"), definition=function(object,file) { parList <- get(load(file=file)) if(is(parList,"x12Parameter")){ warning("All Parameters will be overwritten with one loaded parameter configuration") for(i in 1:length(object@x12List)){ object@x12List[[i]]@x12Parameter <- parList } }else{ if(length(parList)!=length(object@x12List)) stop("loaded Parameter list does not fit to the x12Batch object \n") for(i in 1:length(parList)){ if(!is(parList[[i]],"x12Parameter")) stop("The file does not contain a list of x12Parameter objects!") object@x12List[[i]]@x12Parameter <- parList[[i]] } } return(object) } )
/scratch/gouwar.j/cran-all/cranData/x12/R/parameter-methods.R
setMethod(f='plot', signature=signature(x = "x12Output"), definition=function(x,original=TRUE,sa=FALSE,trend=FALSE, log_transform=FALSE, ylab="Value",xlab="Date", main="TS", col_original="black",col_sa="blue",col_trend="green", lwd_original=1,lwd_sa=1,lwd_trend=1,lty_sa=1,lty_trend=1, ytop=1,showAllout=FALSE,showAlloutLines=FALSE,showOut=NULL,annComp=TRUE,annCompTrend=TRUE, col_ao="red",col_ls="red",col_tc="red",col_annComp="grey",lwd_out=1,cex_out=1.5, pch_ao=4,pch_ls=2,pch_tc=23,plot_legend=TRUE,legend_horiz=TRUE,legend_bty="o", ### implement plotFbcast forecast=FALSE,backcast=FALSE,showCI=TRUE, col_fc="#2020ff",col_bc="#2020ff",col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1,lwd_fc=1,lwd_bc=1,lwd_ci=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=FALSE,col_line="grey",lty_line=3,ylim=NULL,span=NULL,... ) { if(showAllout) legend_horiz <- FALSE if(is.null(span)) xlim <- NULL else{ if(length(span)==4){ if(any(!c(span[2],span[4]))%in%c(1:12)) stop("Span argument wrong!") xlim <- c(span[1]+(span[2]-1)/frequency(x@a1),span[3]+(span[4]-1)/frequency(x@a1)) if(is.null(ylim)){ ylim <- c(min(window(x@a1,span[1:2],span[3:4]),na.rm=TRUE),max(window(x@a1,span[1:2],span[3:4]),na.rm=TRUE)*ytop) if(log_transform) ylim <- c(min(log(window(x@a1,span[1:2],span[3:4])),na.rm=TRUE),max(log(window(x@a1,span[1:2],span[3:4])),na.rm=TRUE)*ytop) } }else if(length(span)==2){ xlim <- span ylim <- c(min(window(x@a1,c(span[1],1),c(span[2],1)),na.rm=TRUE),max(window(x@a1,span[1],span[2]),na.rm=TRUE)*ytop) if(log_transform) ylim <- c(min(log(window(x@a1,c(span[1],1),c(span[2],1))),na.rm=TRUE),max(log(window(x@a1,span[1],span[2])),na.rm=TRUE)*ytop) }else stop("Span argument wrong!") } #Achtung: plotFbcast #keine Option log_transform #object und nicht x ## # c(object,showCI=TRUE, # main="Time Series",forecast=TRUE,backcast=TRUE, # col_original="black",col_fc="#2020ff",col_bc="#2020ff", # col_ci="#d1d1ff",col_cishade="#d1d1ff", # lty_original=1,lty_fc=1,lty_bc=1,lty_ci=1, # lwd_original=1,lwd_fc=1,lwd_bc=1,lwd_ci=1,ytop=1, # points_bc=FALSE,points_fc=FALSE,points_original=FALSE, # showLine=FALSE,col_line="grey",lty_line=3, # ylab="Value",xlab="Date",ylim=NULL,...) showWarnings=TRUE # fuer plotFbcast object <- x leg.txt <- vector() leg.col <- vector() leg.lty <- vector() user.main.logical<-FALSE if(main!="TS"){ user.main.logical<-TRUE user.main<-main} # if(!is.null(ylim)) # span<-ylim if(!is.null(showOut)) showAllout<-FALSE gp<-par() for(i in c("cin","cra","csi","cxy","din","pin")){ gp <- gp[-which(names(gp)%in%i)] } tryCatch({ if(original){ if(!log_transform){ ts <- object@a1 main<-main.orig <- "Original Series" leg.txt <- c(leg.txt,"Original") leg.col <- c(leg.col,col_original) leg.lty <- c(leg.lty,lty_original) }else{ ts <- log(object@a1) main<-main.orig<- "Log transformed Original Series" leg.txt <- c(leg.txt,"Original") leg.col <- c(leg.col,col_original) leg.lty <- c(leg.lty,lty_original) }} if(sa){ if(!log_transform){ ts.sa <- object@d11 main<-"Seasonally Adjusted Series" leg.txt <- c(leg.txt,"Seasonally Adjusted") leg.col <- c(leg.col,col_sa) leg.lty <- c(leg.lty,lty_sa) }else{ ts.sa <- log(ts.sa <- object@d11) main<-"Log transformed Seasonally Adjusted Series" leg.txt<- c(leg.txt,"Seasonally Adjusted") leg.col <- c(leg.col,col_sa) leg.lty <- c(leg.lty,lty_sa) }} if(trend){ if(!log_transform){ ts.trend <- object@d12 main<-"Trend" leg.txt <- c(leg.txt,"Trend") leg.col <- c(leg.col,col_trend) leg.lty <- c(leg.lty,lty_trend) }else{ ts.trend <- log(ts.trend <- object@d12) main<-"Log transformed Trend" leg.txt <- c(leg.txt,"Trend") leg.col <- c(leg.col,col_trend) leg.lty <- c(leg.lty,lty_trend) }} if(sa && trend &! original){ if(!log_transform) main <- "Seasonally Adjusted Series and Trend" else main <- "Log transformed Seasonally Adjusted Series and Trend" } if(original && sa &!trend) main <- paste(main.orig,"and Seasonally Adjusted Series") if(original &! sa &&trend) main <- paste(main.orig,"and Trend") if(original && sa && trend) main <- paste(main.orig,", Seasonally Adjusted Series and Trend",sep="") if(user.main.logical) main<-user.main if(forecast && backcast &! is.na(object@forecast@estimate[1]) &! is.na(object@backcast@estimate[1])) main<-"Time Series with Back- and Forecasts" if(forecast &! backcast &! is.na(object@forecast@estimate[1])) main<-"Time Series with Forecasts" if(!forecast && backcast &! is.na(object@backcast@estimate[1])) main<-"Time Series with Backcasts" if(forecast && backcast && is.na(object@forecast@estimate[1])) main<-"Time Series with Backcasts" if(forecast && backcast && is.na(object@backcast@estimate[1])) main<-"Time Series with Forecasts" #Falls nur SA/nur Trend geplottet werden soll if((sa &!original &!trend) | (sa && trend &!original)){ ts<-ts.sa col_original <- col_sa lwd_original <- lwd_sa lty_original <- lty_sa } if(trend &!original &!sa){ ts <- ts.trend col_original <- col_trend lwd_original <- lwd_trend lty_original <- lty_trend } ts.plot<-ts #if(sa && trend &!original){ # ts<-ts.sa # col_original <- col_sa # lwd_original <- lwd_sa #} if(showAllout && object@dg$outlier=="-" && object@dg$autoout=="-") showAllout=FALSE if(showAllout | !is.null(showOut)){ if(showAllout){ if(any(object@dg$outlier!="-")){ names.out <- names(object@dg$outlier) names.out <- tolower(gsub("outlier_","",names.out))} if(any(object@dg$autoout!="-")){ if(!exists("names.out")) names.out <- tolower(gsub("autooutlier_","",names(object@dg$autoout))) else names.out <- tolower(c(names.out,gsub("autooutlier_","",names(object@dg$autoout)))) }} if(!is.null(showOut)){ names.out <- tolower(showOut) } months <- c("jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec") num.months <- sapply(names.out,function(y) which(unlist(sapply(months,function(x) grepl(x,y,fixed=TRUE))))) num.months <- as.numeric(num.months) years <- suppressWarnings(as.numeric(sapply(strsplit(names.out,""),function(x)paste(as.vector(na.omit(as.numeric(x)))[1:4],collapse="")))) rest.months <- suppressWarnings(as.numeric(sapply(strsplit(names.out,""),function(x){ l.mon<-5:ifelse(length(unlist(x))==8, 5, 6) paste(as.vector(na.omit(as.numeric(x)))[l.mon],collapse="")}))) months <- na.omit(c(rbind(num.months,rest.months))) out <- cbind(years,months) pch.out <- col.out <- names.out2 <- sapply(1:(dim(out)[1]),function(i) unlist(strsplit(names.out[i],as.character(years)[i]))[1]) # col.out <- names.out2 col.out <- replace(col.out,which(col.out%in%"ao"),col_ao) col.out <- replace(col.out,which(col.out%in%"ls"),col_ls) col.out <- replace(col.out,which(col.out%in%"tc"),col_tc) pch.out <- replace(pch.out,which(pch.out%in%"ao"),pch_ao) pch.out <- replace(pch.out,which(pch.out%in%"ls"),pch_ls) pch.out <- as.numeric(replace(pch.out,which(pch.out%in%"tc"),pch_tc)) } if(plot_legend){ par(mar = c(4, 4, 4, 2) + 0.1) layout(matrix(c(rep(1,16),2,2),nrow=9,byrow=TRUE))#,heights = c(1, 6), respect = FALSE) if(forecast | backcast){ if(!original){ cat("Fore-/Backcasts are only available for 'original' (log transformed) time series!\n") if(is.null(xlim)){ plot(ts,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } }else{ ts.plot <- plotFbcast(object=object,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=col_original,col_fc=col_fc,col_bc=col_bc, col_ci=col_ci,col_cishade=col_cishade,points_original=points_original, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci, lwd_original=lwd_original,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, ytop=ytop,points_bc=points_bc,points_fc=points_fc, showLine=showLine,col_line=col_line,lty_line=lty_line, ylab=ylab,xlab=xlab,ylim=ylim,xlim=xlim,showWarnings=showWarnings,...) } }else if(is.null(xlim)){ plot(ts,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,ylim=ylim,xlim=xlim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } if(original && sa) lines(ts.sa,col=col_sa,type="l",lwd=lwd_sa,lty=lty_sa) if(original && trend) lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend,lty=lty_trend) if(sa && trend &!original) lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend,lty=lty_trend) if((!forecast || !backcast) && points_original) points(ts.plot,col=col_original,lwd=lwd_original) ts<-ts.plot aT <- aL <- axTicks(1) if(!is.null(xlim)) tp <- expand.grid(floor(xlim[1]):ceiling(xlim[2]),(0:(frequency(ts)-1))/frequency(ts)) else tp <- expand.grid(floor(time(ts)[1]):ceiling(time(ts)[length(ts)]),(0:(frequency(ts)-1))/frequency(ts)) mm <- round(tp[,2]*frequency(ts)) yy <- tp[,1] tp <- tp[,1]+tp[,2] for(i in 1:length(aT)){ ii <- which.min(abs(tp-aT[i])) aT[i] <- tp[ii] if(mm[ii]<9) aL[i] <- yy[ii]+(mm[ii]+1)/10 else aL[i] <- yy[ii]+(mm[ii]+1)/100 } axis(1,at=aT,labels=aL) if(is.null(showOut) &! showAllout){ par(mar = c(0, 0, 0, 0)) if(forecast | backcast){ ts.plot <- plotFbcast(object=object,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=NA,col_fc=NA,col_bc=NA, col_ci=NA,col_cishade=NA, ytop=ytop, col_line=NA, ylim=ylim,xlim=xlim,showWarnings=FALSE,type = "n", axes = FALSE, bty="n", ann = FALSE,...) }else{ if(is.null(xlim)){ plot(ts,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE, xaxt="n", ...) }else{ plot(ts,xlim=xlim,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE, xaxt="n", ...) } } if(length(leg.txt)>1){ legend("center",legend=leg.txt,col=leg.col,lty=leg.lty,bg="white",horiz=legend_horiz,bty=legend_bty) } } if(!is.null(showOut)){ out.type <- toupper(unlist(strsplit(names.out,out[1]))[1]) out.trend <- out[2]+frequency(ts)*c(0:(floor(length(ts)/ frequency(ts))-1)) out.trend.x <- time(ts)[out.trend] out.trend.y <- ts[out.trend] col.out.trend <- rep(col_annComp,length(out.trend)) col.out.trend[grep(out[1],out.trend.x)]<-col.out if(annComp && annCompTrend){ lines(out.trend.x,out.trend.y,type="l",lty=1,lwd=lwd_out,col=col_annComp) lapply(1:length(out.trend),function(i)abline(v=out.trend.x[i],col=col.out.trend[i],lwd=lwd_out,lty=3)) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=cex_out,lwd=lwd_out) } if(!annComp &! annCompTrend){ #points(window(ts, out, out, frequency=frequency(ts)),pch=pch.out,cex=cex_out,lwd=lwd_out,col=col.out) abline(v=out.trend.x[grep(out[1],out.trend.x)],col=col.out,lty=3,lwd=lwd_out) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=cex_out,lwd=lwd_out) } if(!annComp && annCompTrend){ lines(out.trend.x,out.trend.y,type="l",lty=1,lwd=lwd_out,col=col_annComp) abline(v=out.trend.x[grep(out[1],out.trend.x)],col=col.out,lty=3,lwd=lwd_out) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=cex_out,lwd=lwd_out) # points(window(ts, out, out, frequency=frequency(ts)),pch=pch.out,cex=cex_out,lwd=lwd_out,col=col.out) } if(annComp &! annCompTrend){ # lines(out.trend.x,out.trend.y,type="p",lty=1,lwd=lwd_out,col=col.out,pch=pch.out.trend,cex=cex_out) #lapply(1:(dim(out.othermonths)[1]),function(x)points(window(ts, out.othermonths[x,], out.othermonths[x,], frequency=frequency(ts)),pch=4,cex=cex_out,lwd=lwd_out,col=col.out)) lapply(1:length(out.trend),function(i)abline(v=out.trend.x[i],col=col.out.trend[i],lwd=lwd_out,lty=3)) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=cex_out,lwd=lwd_out) } par(mar = c(0, 0, 0, 0)) if(forecast | backcast){ ts.plot <- plotFbcast(object=object,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=NA,col_fc=NA,col_bc=NA, col_ci=NA,col_cishade=NA, ytop=ytop, col_line=NA,showWarnings=FALSE, ylim=ylim,xlim=xlim,type = "n", axes = FALSE, bty="n", ann = FALSE,...) }else{ if(is.null(xlim)){ plot(ts,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE,xaxt="n",...) } } if(annComp && annCompTrend){ if(original && sa && trend) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison",leg.txt[3]),col=c(leg.col[1],col.out,leg.col[2],col_annComp,leg.col[3]),bg="white", lty=c(leg.lty[1],3,leg.lty[2],3,leg.lty[3]),pch=c(NA,pch.out,NA,NA,NA),ncol=3,horiz=legend_horiz,bty=legend_bty) else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) legend("center",legend=c(leg.txt[1],out.type,"","Annual comparison"),col=c(leg.col[1],col.out,NA,col_annComp), bg="white",lty=c(leg.lty[1],3,NA,3),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison"), col=c(leg.col[1],col.out,leg.col[2],col_annComp),bg="white", lty=c(leg.lty[1],3,leg.lty[2],3),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) } if(!annComp && annCompTrend){ if(original && sa && trend) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison",leg.txt[3]),col=c(leg.col[1],col.out,leg.col[2],col_annComp,leg.col[3]), bg="white",lty=c(leg.lty[1],3,leg.lty[2],1,leg.lty[3]), pch=c(NA,pch.out,NA,NA,NA),ncol=3,horiz=legend_horiz,bty=legend_bty) else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) legend("center",legend=c(leg.txt[1],out.type,"","Annual comparison"),col=c(leg.col[1],col.out,NA,col_annComp),bg="white", lty=c(leg.lty[1],3,NA,1),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison"), col=c(leg.col[1],col.out,leg.col[2],col_annComp),bg="white", lty=c(leg.lty[1],3,leg.lty[2],1),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) } if(annComp &! annCompTrend){ if(original && sa && trend) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison",leg.txt[3]), col=c(leg.col[1],col.out,leg.col[2],col_annComp,leg.col[3]),bg="white", lty=c(leg.lty[1],3,leg.lty[2],3,leg.lty[3]),pch=c(NA,pch.out,NA,NA,NA),ncol=3, horiz=legend_horiz,bty=legend_bty) else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) legend("center",legend=c(leg.txt[1],out.type,"","Annual comparison"), col=c(leg.col[1],col.out,NA,col_annComp),bg="white",lty=c(leg.lty[1],3,NA,3), pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"Annual comparison"), col=c(leg.col[1],col.out,leg.col[2],col_annComp),bg="white", lty=c(leg.lty[1],3,leg.lty[2],3),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) } if(!annComp &! annCompTrend){ if(original && sa && trend) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],"",leg.txt[3]),col=c(leg.col[1],col.out,leg.col[2],NA,leg.col[3]),bg="white",lty=c(leg.lty[1],3,leg.lty[2],NA,leg.lty[3]), pch=c(NA,pch.out,NA,NA,NA),ncol=3,horiz=legend_horiz,bty=legend_bty) else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) legend("center",legend=c(leg.txt[1],out.type),col=c(leg.col[1],col.out), bg="white",lty=c(leg.lty[1],3),pch=c(NA,pch.out),horiz=legend_horiz,bty=legend_bty) else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) legend("center",legend=c(leg.txt[1],out.type,leg.txt[2],""),col=c(leg.col[1],col.out,leg.col[2],NA), bg="white",lty=c(leg.lty[1],3,leg.lty[2],NA),pch=c(NA,pch.out,NA,NA),ncol=2,horiz=legend_horiz,bty=legend_bty) } } if(showAllout){ lapply(1:(dim(out)[1]),function(x)points(window(ts, out[x,], out[x,], frequency=frequency(ts)),cex=cex_out,lwd=lwd_out,col=col.out[x],pch=pch.out[x])) out.type.ind<- unlist(lapply(c("ao","ls","tc"),function(x) any(grepl(x,names.out)))) out.type<-rep("",3) out.type.col<-rep(NA,3) out.type.pch<-rep(NA,3) if(length(which(out.type.ind))>0){ out.type[1:length(which(out.type.ind))] <- toupper(c("ao","ls","tc")[out.type.ind]) out.type.col<-c(col_ao,col_ls,col_tc)[out.type.ind] out.type.pch<-c(pch_ao,pch_ls,pch_tc)[out.type.ind] } if(showAlloutLines) lapply(1:(dim(out)[1]),function(x)abline(v=time(window(ts, out[x,], out[x,], frequency=frequency(ts))),col=col.out[x],lwd=lwd_out,lty=3)) # par(mar = c(0, 0, 0, 0)) # plot(ts,ylim=c(min(ts,na.rm=TRUE),max(ts,na.rm=TRUE)*ytop),type = "n", axes = FALSE, ann = FALSE) # if(original && sa && trend) # legend("center",legend=c(leg.txt[1],"AO",leg.txt[2],"LS",leg.txt[3],"TC"),col=c(leg.col[1],col_ao,leg.col[2],col_ls,leg.col[3],col_tc),bg="white",lty=c(1,NA,1,NA,1,NA),pch=c(NA,pch_ao,NA,pch_ls,NA,pch_tc),pt.cex=c(NA,2,NA,2,NA,2),ncol=3) # else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) # legend("center",legend=c("AO","LS","TC"),col=c(col_ao,col_ls,col_tc),bg="white",pch=c(pch_ao,pch_ls,pch_tc),pt.cex=2,horiz=legend_horiz) # else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) # legend("center",legend=c(leg.txt[1],"AO",leg.txt[2],"LS","","TC"),col=c(leg.col[1],col_ao,leg.col[2],col_ls,NA,col_tc),bg="white",lty=c(1,NA,1,NA,NA,NA),pch=c(NA,pch_ao,NA,pch_ls,NA,pch_tc),pt.cex=c(NA,2,NA,2,NA,2),ncol=3) par(mar = c(0, 0, 0, 0)) if(forecast | backcast){ ts.plot <- plotFbcast(object=object,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=NA,col_fc=NA,col_bc=NA, col_ci=NA,col_cishade=NA, ytop=ytop, col_line=NA,showWarnings=FALSE, ylim=ylim,xlim=xlim,type = "n", axes = FALSE, bty="n", ann = FALSE,...) }else{ if(is.null(xlim)){ plot(ts,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE,xaxt="n",...) } } if(original && sa && trend) legend("center",legend=c(leg.txt[1],out.type[1],leg.txt[2],out.type[2],leg.txt[3],out.type[3]), col=c(leg.col[1],out.type.col[1],leg.col[2],out.type.col[2],leg.col[3],out.type.col[3]), bg="white",lty=c(leg.lty[1],NA,leg.lty[2],NA,leg.lty[3],NA),pch=c(NA,out.type.pch[1],NA,out.type.pch[2],NA,out.type.pch[3]),pt.cex=c(NA,2,NA,2,NA,2), ncol=3,horiz=legend_horiz,bty=legend_bty) else if((original &! sa &! trend) | (!original && sa &! trend) | (!original && trend &! sa)) legend("center",legend=c(out.type[1],out.type[2],out.type[3]), col=c(out.type.col[1],out.type.col[2],out.type.col[3]),bg="white", pch=c(out.type.pch[1],out.type.pch[2],out.type.pch[3]), pt.cex=2,horiz=legend_horiz,bty=legend_bty) else if((original && sa &! trend) | (original &! sa && trend) | (sa && trend &!original)) legend("center",legend=c(leg.txt[1],out.type[1],leg.txt[2],out.type[2],"",out.type[3]), col=c(leg.col[1],out.type.col[1],leg.col[2],out.type.col[2],NA,out.type.col[3]),bg="white",lty=c(leg.lty[1],NA,leg.lty[2],NA,NA,NA),pch=c(NA,out.type.pch[1],NA,out.type.pch[2],NA,out.type.pch[3]),pt.cex=c(NA,2,NA,2,NA,2), ncol=3,horiz=legend_horiz,bty=legend_bty) } #end if plot legend }else{ if(forecast | backcast){ if(!original){ cat("Fore-/Backcasts are only available for 'original' (log transformed) time series!\n") if(is.null(xlim)){ plot(ts,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } }else{ ts.plot <- plotFbcast(object=object,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=col_original,col_fc=col_fc,col_bc=col_bc, col_ci=col_ci,col_cishade=col_cishade,points_original=points_original, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci, lwd_original=lwd_original,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, ytop=ytop,points_bc=points_bc,points_fc=points_fc, showLine=showLine,col_line=col_line,lty_line=lty_line, ylab=ylab,xlab=xlab,ylim=ylim,xlim=xlim,...) } }else if(is.null(xlim)){ plot(ts,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } if(original && sa) lines(ts.sa,col=col_sa,type="l",lwd=lwd_sa,lty=lty_sa) if(original && trend) lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend,lty=lty_trend) if(sa && trend &!original) lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend,lty=lty_trend) if(!is.null(showOut)){ out.trend <- out[2]+frequency(ts)*c(0:(floor(length(ts)/ frequency(ts))-1)) out.trend.x <- time(ts)[out.trend] out.trend.y <- ts[out.trend] col.out.trend <- rep(col_annComp,length(out.trend)) col.out.trend[grep(out[1],out.trend.x)]<-col.out if(annComp && annCompTrend){ lines(out.trend.x,out.trend.y,type="l",lty=1,lwd=lwd_out,col=col_annComp) lapply(1:length(out.trend),function(i)abline(v=out.trend.x[i],col=col.out.trend[i],lwd=lwd_out,lty=3)) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=ceiling(cex_out/2),lwd=lwd_out) } if(!annComp &! annCompTrend){ abline(v=out.trend.x[grep(out[1],out.trend.x)],col=col.out,lty=3,lwd=lwd_out) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=ceiling(cex_out/2),lwd=lwd_out) } if(!annComp && annCompTrend){ lines(out.trend.x,out.trend.y,type="l",lty=1,lwd=lwd_out,col=col_annComp) abline(v=out.trend.x[grep(out[1],out.trend.x)],col=col.out,lty=3,lwd=lwd_out) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=ceiling(cex_out/2),lwd=lwd_out) } if(annComp &! annCompTrend){ lapply(1:length(out.trend),function(i)abline(v=out.trend.x[i],col=col.out.trend[i],lwd=lwd_out,lty=3)) points(out.trend.x[grep(out[1],out.trend.x)],out.trend.y[grep(out[1],out.trend.x)],col=col.out,pch=pch.out,cex=ceiling(cex_out/2),lwd=lwd_out) } }else if(showAllout){ points.temp<-lapply(1:(dim(out)[1]),function(x)points(window(ts, out[x,], out[x,], frequency=frequency(ts)),cex=ceiling(cex_out/2),lwd=lwd_out,col=col.out[x],pch=pch.out[x])) if(showAlloutLines) lines.temp<-lapply(1:(dim(out)[1]),function(x)abline(v=time(window(ts, out[x,], out[x,], frequency=frequency(ts))),col=col.out[x],lwd=lwd_out,lty=3)) } #else if(is.null(showOut) &! showAllout) { # # #} if showAllout | !is.null(showOut) # # plot(ts,ylim=c(min(ts,na.rm=TRUE),max(ts,na.rm=TRUE)*ytop),xlab=xlab,ylab=ylab,main=main,lwd=lwd_original,col=col_original) # if(original && sa) # lines(ts.sa,col=col_sa,type="l",lwd=lwd_sa) # if(original && trend) # lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend) # if(sa && trend &!original) # lines(ts.trend,col=col_trend,type="l",lwd=lwd_trend) # # } ts<-ts.plot aT <- aL <- axTicks(1) if(!is.null(xlim)) tp <- expand.grid(floor(xlim[1]):ceiling(xlim[2]),(0:(frequency(ts)-1))/frequency(ts)) else tp <- expand.grid(floor(time(ts)[1]):ceiling(time(ts)[length(ts)]),(0:(frequency(ts)-1))/frequency(ts)) mm <- round(tp[,2]*frequency(ts)) yy <- tp[,1] tp <- tp[,1]+tp[,2] for(i in 1:length(aT)){ ii <- which.min(abs(tp-aT[i])) aT[i] <- tp[ii] if(mm[ii]<9) aL[i] <- yy[ii]+(mm[ii]+1)/10 else aL[i] <- yy[ii]+(mm[ii]+1)/100 } axis(1,at=aT,labels=aL) } if((!forecast || !backcast) && points_original && !plot_legend){ points(ts.plot,col=col_original,lwd=lwd_original) ts<-ts.plot aT <- aL <- axTicks(1) if(!is.null(xlim)) tp <- expand.grid(floor(xlim[1]):ceiling(xlim[2]),(0:(frequency(ts)-1))/frequency(ts)) else tp <- expand.grid(floor(time(ts)[1]):ceiling(time(ts)[length(ts)]),(0:(frequency(ts)-1))/frequency(ts)) mm <- round(tp[,2]*frequency(ts)) yy <- tp[,1] tp <- tp[,1]+tp[,2] for(i in 1:length(aT)){ ii <- which.min(abs(tp-aT[i])) aT[i] <- tp[ii] if(mm[ii]<9) aL[i] <- yy[ii]+(mm[ii]+1)/10 else aL[i] <- yy[ii]+(mm[ii]+1)/100 } axis(1,at=aT,labels=aL) } gp.new<-par() invisible(gp.new) gp <- gp[-which(names(gp)=="page")] par(gp) },finally=par(gp)) } ) setMethod(f='plot', signature=signature(x = "x12Single"), definition=function(x,original=TRUE,sa=FALSE,trend=FALSE, log_transform=FALSE, ylab="Value",xlab="Date", main="TS", col_original="black",col_sa="blue",col_trend="green", lwd_original=1,lwd_sa=1,lwd_trend=1,lty_sa=1,lty_trend=1, ytop=1,showAllout=FALSE,showAlloutLines=FALSE,showOut=NULL,annComp=TRUE,annCompTrend=TRUE, col_ao="red",col_ls="red",col_tc="red",col_annComp="grey",lwd_out=1,cex_out=1.5, pch_ao=4,pch_ls=2,pch_tc=23,plot_legend=TRUE,legend_horiz=TRUE,legend_bty="o", ### implement plotFbcast forecast=FALSE,backcast=FALSE,showCI=TRUE, col_fc="#2020ff",col_bc="#2020ff",col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1,lwd_fc=1,lwd_bc=1,lwd_ci=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=FALSE,col_line="grey",lty_line=3,ylim=NULL,span=NULL,...){ plot(x@x12Output,original=original,sa=sa,trend=trend, log_transform=log_transform, ylab=ylab,xlab=xlab, main=main, col_original=col_original,col_sa=col_sa,col_trend=col_trend, lwd_original=lwd_original,lwd_sa=lwd_sa,lwd_trend=lwd_trend,lty_sa=lty_sa,lty_trend=lty_sa, ytop=ytop,showAllout=showAllout,showAlloutLines=showAlloutLines,showOut=showOut,annComp=annComp,annCompTrend=annCompTrend, col_ao=col_ao,col_ls=col_ls,col_tc=col_tc,col_annComp=col_annComp,lwd_out=lwd_out,cex_out=cex_out, pch_ao=pch_ao,pch_ls=pch_ls,pch_tc=pch_tc,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ### implement plotFbcast forecast=forecast,backcast=backcast,showCI=showCI, col_fc=col_fc,col_bc=col_bc,col_ci=col_ci,col_cishade=col_cishade, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, points_bc=points_bc,points_fc=points_fc,points_original=points_original, showLine=showLine,col_line=col_line,lty_line=lty_line,ylim=ylim,span=span,...) } ) setMethod(f='plot', signature=signature(x = "x12Batch"), definition=function(x,what="ask",original=TRUE,sa=FALSE,trend=FALSE, log_transform=FALSE, ylab="Value",xlab="Date", main="TS", col_original="black",col_sa="blue",col_trend="green", lwd_original=1,lwd_sa=1,lwd_trend=1,lty_sa=1,lty_trend=1, ytop=1,showAllout=FALSE,showAlloutLines=FALSE,showOut=NULL,annComp=TRUE,annCompTrend=TRUE, col_ao="red",col_ls="red",col_tc="red",col_annComp="grey",lwd_out=1,cex_out=1.5, pch_ao=4,pch_ls=2,pch_tc=23,plot_legend=TRUE,legend_horiz=TRUE,legend_bty="o", ### implement plotFbcast forecast=FALSE,backcast=FALSE,showCI=TRUE, col_fc="#2020ff",col_bc="#2020ff",col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1,lwd_fc=1,lwd_bc=1,lwd_ci=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=FALSE,col_line="grey",lty_line=3,ylim=NULL,span=NULL,...){ n <- length(x@x12List) if(what=="ask"){ ask=par("ask") par(ask=TRUE) for(i in 1:n){ mainB <- paste(x@x12List[[i]]@tsName,main,sep="-") plot(x@x12List[[i]]@x12Output,original=original,sa=sa,trend=trend, log_transform=log_transform, ylab=ylab,xlab=xlab, main=mainB, col_original=col_original,col_sa=col_sa,col_trend=col_trend, lwd_original=lwd_original,lwd_sa=lwd_sa,lwd_trend=lwd_trend,lty_sa=lty_sa,lty_trend=lty_sa, ytop=ytop,showAllout=showAllout,showAlloutLines=showAlloutLines,showOut=showOut,annComp=annComp,annCompTrend=annCompTrend, col_ao=col_ao,col_ls=col_ls,col_tc=col_tc,col_annComp=col_annComp,lwd_out=lwd_out,cex_out=cex_out, pch_ao=pch_ao,pch_ls=pch_ls,pch_tc=pch_tc,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ### implement plotFbcast forecast=forecast,backcast=backcast,showCI=showCI, col_fc=col_fc,col_bc=col_bc,col_ci=col_ci,col_cishade=col_cishade, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, points_bc=points_bc,points_fc=points_fc,points_original=points_original, showLine=showLine,col_line=col_line,lty_line=lty_line,ylim=ylim,span=span,...) } } par(ask=ask) } ) setGeneric("plotRsdAcf", function(x, ...) { standardGeneric("plotRsdAcf")} ) setMethod( f='plotRsdAcf', signature=signature(x = "x12Output"),definition=function(x,which="acf", xlab="Lag",ylab="ACF", main="default",col_acf="darkgrey",lwd_acf=4, col_ci="blue",lt_ci=2,ylim="default",...) { x <-x@dg if(which=="acf") which <- "rsd.acf" else if(which=="pacf") which <- "rsd.pacf" else if(which=="acf2") which <- "rsd.acf2" #lwd_bar=4,plot_legend=TRUE){ if(main=="default"){ if(which=="rsd.acf"){main <- "Autocorrelations of the Residuals"} else if(which=="rsd.pacf"){main <- "Partial Autocorrelations of the Residuals"} else if(which=="rsd.acf2"){main <- "Autocorrelations of the Squared Residuals"} } if(!is.null(x[[which]])){ if(ylim=="default"){ ylim<-c(-max(abs(x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]]),2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]]),max(abs(x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]]),2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]])) } if(which=="rsd.pacf") ylab="Partial ACF" plot(x[[which]]$lag,x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]],type="h",xlab=xlab,ylab=ylab,main=main,col=col_acf,ylim=ylim,lwd=lwd_acf,xaxt="n",...) if(length(x[[which]]$lag)%in%c(12,24)){ aT <- c(6,12,18,24) axis(side=1,at=aT) }else{ aT <- c(4,8,12,16) axis(side=1,at=aT) } abline(h=0,col="black") lines(x[[which]]$lag,2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]],type="l",col=col_ci,lty=lt_ci) lines(x[[which]]$lag,-2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]],type="l",col=col_ci,lty=lt_ci) } else{ plot(1:10, type = "n", xaxt="n", yaxt="n", xlab="", ylab="", main=main) text(5.5,5.5,"Not Available") } } ) setMethod(f='plotRsdAcf', signature=signature(x = "x12Single"), definition=function(x,which="acf", xlab="Lag",ylab="ACF", main="default",col_acf="darkgrey",lwd_acf=4, col_ci="blue",lt_ci=2,ylim="default",...){ plotRsdAcf(x@x12Output,which=which, xlab=xlab,ylab=ylab, main=main,col_acf=col_acf,lwd_acf=lwd_acf, col_ci=col_ci,lt_ci=lt_ci,ylim=ylim,...) } ) setGeneric("plotSeasFac", function(x, ...) { standardGeneric("plotSeasFac")} ) setMethod( f='plotSeasFac', signature=signature(x = "x12Output"),definition=function(x,SI_Ratios=TRUE,ylab="Value",xlab="", lwd_seasonal=1,col_seasonal="black",lwd_mean=1,col_mean="blue",col_siratio="darkgreen", col_replaced="red",cex_siratio=.9,cex_replaced=.9,SI_Ratios_replaced=TRUE,plot_legend=TRUE, legend_horiz=FALSE,legend_bty="o", ...) { if(!SI_Ratios) v <- as.vector(x@d10) # Seasonal Factors else v <- as.vector(x@d10)[1:length(x@d8)] # Seasonal Factors without forecast f <- frequency(x@d10) dif <- length(v)%%f if(dif>0) v[length(v)+(1:(f-dif))]<-NA out_matrix <- matrix(v,ncol=f,byrow=TRUE) if(f==12){ lab <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") }else if(f==4){ lab <- c("Qtr1","Qtr2","Qtr3","Qtr4") }else if(f==2){ lab <- c("1st Half","2nd Half") }else{ lab <- 1:f } if("main"%in%names(list(...))){ main <- list(...)[["main"]] }else{ if(SI_Ratios){ main="Seasonal Factors by period and SI Ratios" }else{ main="Seasonal Factors by period" } } #ylim <- c(min(v,na.rm=TRUE)*.95,max(v,na.rm=TRUE)*1.09) if(!"ylim"%in%names(list(...))) ylim <- c(min(v,na.rm=TRUE)*.99,max(v,na.rm=TRUE)*1.01) xlim <- c(0,f) gp<-par() for(i in c("cin","cra","csi","cxy","din")){ gp <- gp[-which(names(gp)%in%i)] } tryCatch({ if(plot_legend){ par(mar = c(4, 4, 4, 2) + 0.1) layout(matrix(c(rep(1,16),2,2),nrow=9,byrow=TRUE)) cex_siratio<-cex_siratio*1.5 cex_replaced <-cex_replaced*1.5 } if(!"ylim"%in%names(list(...))){ if(!"main"%in%names(list(...))) plot(1,type="n",main=main,xlim=xlim,ylim=ylim,xaxt="n",ylab=ylab,xlab=xlab,cex=cex_siratio,...) else plot(1,type="n",xlim=xlim,ylim=ylim,xaxt="n",ylab=ylab,xlab=xlab,cex=cex_siratio,...) }else{ if(!"main"%in%names(list(...))) plot(1,type="n",main=main,xlim=xlim,xaxt="n",ylab=ylab,xlab=xlab,cex=cex_siratio,...) else plot(1,type="n",xlim=xlim,xaxt="n",ylab=ylab,xlab=xlab,cex=cex_siratio,...) } axis(1,at=(1:f)-1/2,labels=lab) for(i in 0:(f)){ abline(v=i,col="grey") } if(SI_Ratios){ vv <- as.vector(x@d8) #final unmodified SI Ratios dif <- length(vv)%%f if(dif>0) vv[length(vv)+(1:(f-dif))]<-NA out_matrix2 <- matrix(vv,ncol=f,byrow=TRUE) vvv <- as.vector(x@d9) # final replacement for SI Ratios dif <- length(vvv)%%f if(dif>0) vvv[length(vvv)+(1:(f-dif))]<-NA out_matrix3 <- matrix(vvv,ncol=f,byrow=TRUE) } for(i in 0:(f-1)){ s <- seq(.1+i,(i+1)-.1,l=nrow(out_matrix)) m <- mean(out_matrix[,i+1],na.rm=TRUE) points(rep(m,2)~c(s[1],s[length(s)]),type="l",col=col_mean,lwd=lwd_mean) points(out_matrix[,i+1]~s,type="l",col=col_seasonal,lwd=lwd_seasonal) if(SI_Ratios){ points(out_matrix2[,i+1]~s,pch=20,cex=cex_siratio,col=col_siratio) if(SI_Ratios_replaced) points(out_matrix3[,i+1]~s,pch=20,cex=cex_replaced,col=col_replaced) } } if(plot_legend){ par(mar = c(0, 0, 0, 0)) if(!"ylim"%in%names(list(...))) plot(1,xlim=xlim,ylim=ylim,type = "n", axes = FALSE, bty="n", ann = FALSE, ...) else plot(1,xlim=xlim,type = "n", axes = FALSE, bty="n", ann = FALSE, ...) if(SI_Ratios){ if(SI_Ratios_replaced) legend("center",legend=c("Seasonal Factors","Mean","SI Ratio","Replaced SI Ratio"),col=c(col_seasonal,col_mean,col_siratio,col_replaced),pch=c(NA,NA,20,20), lty=c(1,1,NA,NA),bg="white",pt.cex=1.4,horiz=legend_horiz,bty=legend_bty) else legend("center",legend=c("Seasonal Factors","Mean","SI Ratio"), col=c(col_seasonal,col_mean,col_siratio),pch=c(NA,NA,20), lty=c(1,1,NA),bg="white",pt.cex=1.4,horiz=legend_horiz,bty=legend_bty) }else legend("center",legend=c("Seasonal Factors","Mean"),col=c(col_seasonal,col_mean), lty=c(1,1),bg="white",horiz=legend_horiz,bty=legend_bty) } gp.new<-par() invisible(gp.new) gp <- gp[-which(names(gp)=="page")] par(gp)},finally=par(gp)) } ) setMethod(f='plotSeasFac', signature=signature(x = "x12Single"), definition=function(x,SI_Ratios=TRUE,ylab="Value",xlab="", lwd_seasonal=1,col_seasonal="black",lwd_mean=1,col_mean="blue",col_siratio="darkgreen", col_replaced="red",cex_siratio=.9,cex_replaced=.9,SI_Ratios_replaced=TRUE,plot_legend=TRUE,legend_horiz=FALSE,legend_bty="o",...){ plotSeasFac(x@x12Output,SI_Ratios=SI_Ratios,ylab=ylab,xlab=xlab, lwd_seasonal=lwd_seasonal,col_seasonal=col_seasonal,lwd_mean=lwd_mean,col_mean=col_mean, col_siratio=col_siratio,col_replaced=col_replaced,cex_siratio=cex_siratio,cex_replaced=cex_replaced, SI_Ratios_replaced=SI_Ratios_replaced,plot_legend=plot_legend,legend_horiz=legend_horiz,legend_bty=legend_bty,...) } ) setGeneric("plotSpec", function(x, ...) { standardGeneric("plotSpec")} ) setMethod( f='plotSpec', signature=signature(x = "x12Output"),definition=function(x,which="sa", xlab="Frequency",ylab="Decibels", main="Spectrum",highlight=TRUE, col_bar="darkgrey",col_seasonal="red",col_td="blue", lwd_bar=4,lwd_seasonal=4,lwd_td=4,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", ...) { if(main=="Spectrum"){ if(which=="sa") main <- "Spectrum of the Seasonally Adjusted Series" else if(which=="original") main <- "Spectrum of the Original Series" else if(which=="irregular") main <- "Spectrum of the Irregular" else if(which=="residuals") main <- "Spectrum of the RegARIMA Residuals" } f<-frequency(x@a1) if(which=="sa"){ which <- "sp1" x <- slot(x,which) }else if(which=="original"){ which <- "sp0" x <- slot(x,which) }else if(which=="irregular"){ which <- "sp2" x <- slot(x,which) }else if(which=="residuals"){ which <- "spr" x <- slot(x,which) } #out[[which]]$frequency plot_spectrum_work(x@frequency,x@spectrum,xlab=xlab,ylab=ylab,f=f, main=main,highlight=highlight, col_bar=col_bar,col_seasonal=col_seasonal,col_td=col_td, lwd_bar=lwd_bar,lwd_seasonal=lwd_seasonal,lwd_td=lwd_td,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ...) }) setMethod(f='plotSpec', signature=signature(x = "x12Single"), definition=function(x,which="sa", xlab="Frequency",ylab="Decibels", main="Spectrum",highlight=TRUE, col_bar="darkgrey",col_seasonal="red",col_td="blue", lwd_bar=4,lwd_seasonal=4,lwd_td=4,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", ...){ plotSpec(x@x12Output,which=which, xlab=xlab,ylab=ylab, main=main,highlight=highlight, col_bar=col_bar,col_seasonal=col_seasonal,col_td=col_td, lwd_bar=lwd_bar,lwd_seasonal=lwd_seasonal,lwd_td=lwd_td,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ...) } ) setMethod( f='plotSpec', signature=signature(x = "spectrum"),definition=function(x,frequency, xlab="Frequency",ylab="Decibels", main="Spectrum",highlight=TRUE, col_bar="darkgrey",col_seasonal="red",col_td="blue", lwd_bar=4,lwd_seasonal=4,lwd_td=4,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", ...) { # myfun <- function(x) deparse(substitute(x)) # which=NULL # if(main=="Spectrum" && !is.null(which)){ # if(which=="sa") # main <- "Spectrum of the Seasonally Adjusted Series" # else if(which=="original") # main <- "Spectrum of the Original Series" # else if(which=="irregular") # main <- "Spectrum of the Irregular" # else if(which=="residuals") # main <- "Spectrum of the RegARIMA Residuals" # } f<-frequency #out[[which]]$frequency plot_spectrum_work(x@frequency,x@spectrum,xlab=xlab,ylab=ylab,f=f, main=main,highlight=highlight, col_bar=col_bar,col_seasonal=col_seasonal,col_td=col_td, lwd_bar=lwd_bar,lwd_seasonal=lwd_seasonal,lwd_td=lwd_td,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ...) }) setGeneric("plotFbcast", function(object, ...) { standardGeneric("plotFbcast")} ) setMethod( f='plotFbcast', signature=signature(object = "x12Output"),definition=function(object,showCI=TRUE, main="Time Series",forecast=TRUE,backcast=TRUE,log_transform=FALSE, col_original="black",col_fc="#2020ff",col_bc="#2020ff", col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1, lwd_original=1,lwd_fc=1,lwd_bc=1,lwd_ci=1,ytop=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=FALSE,col_line="grey",lty_line=3, ylab="Value",xlab="Date",ylim=NULL,xlim=NULL,showWarnings=TRUE,...) { ts.plot <- ts <- object@a1 fc <- object@forecast@estimate bc <- object@backcast@estimate lci_fc <- object@forecast@lowerci uci_fc <- object@forecast@upperci lci_bc <- object@backcast@lowerci uci_bc <- object@backcast@upperci if(log_transform){ ts.plot <- ts <- log(ts) fc <- log(fc) bc <- log(bc) lci_fc <- log(lci_fc) uci_fc <- log(uci_fc) lci_bc <- log(lci_bc) uci_bc <- log(uci_bc) } if((!forecast &! backcast) | (forecast && is.na(fc[1]) &! backcast) | (backcast && is.na(bc[1]) &! forecast) | (forecast && is.na(fc[1]) && backcast && is.na(bc[1]))){ if(is.null(ylim)) ylim<-c(min(ts,na.rm=TRUE),max(ts,na.rm=TRUE)*ytop) plot(ts,xlim=xlim,ylim=ylim,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) if(showLine) abline(v=time(ts)[length(ts)],col=col_line,lty=lty_line) if(points_original) points(ts,col=col_original,lwd=lwd_original) if(showWarnings) cat("No forecasts or backcasts plotted!\n") } if(forecast && is.na(fc[1]) && showWarnings) cat("Warning: No forecasts available for plotting.\n") if(backcast && is.na(bc[1]) && showWarnings) cat("Warning: No backcasts available for plotting.\n") if(forecast &! is.na(fc[1]) && (!backcast | is.na(bc[1]))){ ts.fc<-ts(c(ts[length(ts)],fc),start=end(ts),end=end(fc),frequency=frequency(ts)) ts.plot<- ts(c(ts,fc),start=start(ts),end=end(fc),frequency=frequency(ts)) if(main=="Time Series") main<-"Time Series with Forecasts" if(showCI){ limits.y<-c(min(ts,ts.fc,lci_fc,uci_fc,na.rm=TRUE),max(ts,ts.fc,lci_fc,uci_fc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.fc),na.rm=TRUE),max(time(ts),time(ts.fc),na.rm=TRUE)) }else{ limits.y<-c(min(ts,ts.fc,na.rm=TRUE),max(ts,ts.fc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.fc),na.rm=TRUE),max(time(ts),time(ts.fc),na.rm=TRUE)) } # leg.txt <- c(leg.txt,"Original TS") # leg.col <- c(leg.col,col_original) if(is.null(ylim)){ if(is.null(xlim)){ plot(ts,ylim=limits.y,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=limits.y,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } }else{ #ylim<-c(min(limits.y[1],ylim[1]),max(limits.y[2],ylim[2])) if(is.null(xlim)){ plot(ts,ylim=ylim,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } } if(showCI){ yy <- as.numeric(uci_fc) yCI=c(as.numeric(lci_fc),yy[length(time(yy)):1]) xCI=c(time(lci_fc),time(lci_fc)[length(time(yy)):1]) # yCI=c(yCI[length(yCI)],yCI)#,yCI[1]) # xCI=c(xCI[length(xCI)],xCI)#,xCI[1]) polygon(xCI,yCI,col=col_cishade,border=NA) lines(lci_fc,col=col_ci,lty=lty_ci,lwd=lwd_ci) lines(uci_fc,col=col_ci,lty=lty_ci,lwd=lwd_ci) if(length(lci_fc)==1){ lines(x=rep(time(ts.fc)[length(ts.fc)],2),y=c(ts.fc[length(ts.fc)],lci_fc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_") ) lines(x=rep(time(ts.fc)[length(ts.fc)],2),y=c(ts.fc[length(ts.fc)],uci_fc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) } } lines(ts.fc,col=col_fc,lty=lty_fc,lwd=lwd_fc) if(showLine) abline(v=time(ts)[length(ts)],col=col_line,lty=lty_line) if(points_fc) points(fc,col=col_fc,lwd=lwd_fc) if(points_original) points(ts,col=col_original,lwd=lwd_original) } if(backcast &! is.na(bc[1]) && (!forecast | is.na(fc[1]))){ ts.bc<-ts(c(bc,ts[1]),start=start(bc),end=start(ts),frequency=frequency(ts)) ts.plot<- ts(c(bc,ts),start=start(bc),end=end(ts),frequency=frequency(ts)) if(main=="Time Series") main<-"Time Series with Backcasts" # leg.txt <- c(leg.txt,"Original TS") # leg.col <- c(leg.col,col_original) if(showCI){ limits.y<-c(min(ts,ts.bc,lci_bc,na.rm=TRUE),max(ts,ts.bc,uci_bc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.bc),na.rm=TRUE),max(time(ts),time(ts.bc),na.rm=TRUE)) }else{ limits.y<-c(min(ts,ts.bc,na.rm=TRUE),max(ts,ts.bc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.bc),na.rm=TRUE),max(time(ts),time(ts.bc),na.rm=TRUE)) } if(is.null(ylim)){ if(is.null(xlim)){ plot(ts,ylim=limits.y,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=limits.y,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } }else{ #ylim<-c(min(limits.y[1],ylim[1]),max(limits.y[2],ylim[2])) if(is.null(xlim)){ plot(ts,ylim=ylim,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } } if(showLine) abline(v=time(ts)[1],col=col_line,lty=lty_line) if(showCI){ yy <- as.numeric(uci_bc) yy <- yy[length(yy):1] yCI=c(as.numeric(lci_bc),yy) xCI=c(time(lci_bc),time(lci_bc)[length(yy):1]) polygon(xCI,yCI,col=col_cishade,border=NA) lines(lci_bc,col=col_ci,lty=lty_ci,lwd=lwd_ci) lines(uci_bc,col=col_ci,lty=lty_ci,lwd=lwd_ci) if(length(lci_bc)==1){ lines(x=rep(time(ts.bc)[length(ts.bc)],2),y=c(ts.bc[length(ts.bc)],lci_bc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) lines(x=rep(time(ts.bc)[length(ts.bc)],2),y=c(ts.bc[length(ts.bc)],uci_bc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) } } lines(ts.bc,col=col_bc,lty=lty_bc,lwd=lwd_bc) if(points_bc) points(bc,col=col_bc,lwd=lwd_bc) if(points_original) points(ts,col=col_original,lwd=lwd_original) } if(forecast &! is.na(fc[1]) && backcast &! is.na(bc[1])){ ts.fc<-ts(c(ts[length(ts)],fc),start=end(ts),end=end(fc),frequency=frequency(ts)) ts.bc<-ts(c(bc,ts[1]),start=start(bc),end=start(ts),frequency=frequency(ts)) ts.plot<- ts(c(bc,ts,fc),start=start(bc),end=end(fc),frequency=frequency(ts)) if(main=="Time Series") main<-"Time Series with Back- and Forecasts" # leg.txt <- c(leg.txt,"Original TS") # leg.col <- c(leg.col,col_original) if(showCI){ limits.y<-c(min(ts,ts.fc,ts.bc,lci_bc,lci_bc,lci_fc,uci_fc,na.rm=TRUE),max(ts,ts.fc,ts.bc,lci_bc,lci_bc,lci_fc,uci_fc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.fc),time(ts.bc),na.rm=TRUE),max(time(ts),time(ts.fc),time(ts.bc),na.rm=TRUE)) }else{ limits.y<-c(min(ts,ts.fc,ts.bc,na.rm=TRUE),max(ts,ts.fc,ts.bc,na.rm=TRUE)*ytop) limits.x<-c(min(time(ts),time(ts.fc),time(ts.bc),na.rm=TRUE),max(time(ts),time(ts.fc),time(ts.bc),na.rm=TRUE)) } if(is.null(ylim)){ if(is.null(xlim)){ plot(ts,ylim=limits.y,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=limits.y,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } }else{ #ylim<-c(min(limits.y[1],ylim[1]),max(limits.y[2],ylim[2])) if(is.null(xlim)){ plot(ts,ylim=ylim,xlim=limits.x,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) }else{ plot(ts,xlim=xlim,ylim=ylim,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original,col=col_original,lty=lty_original,xaxt="n",...) } } if(showLine){ abline(v=time(ts)[length(ts)],col=col_line,lty=lty_line) abline(v=time(ts)[1],col=col_line,lty=lty_line) } if(showCI){ yy.fc <- as.numeric(uci_fc) yy.fc <- yy.fc[length(yy.fc):1] yCI.fc=c(as.numeric(lci_fc),yy.fc) xCI.fc=c(time(lci_fc),time(lci_fc)[length(yy.fc):1]) polygon(xCI.fc,yCI.fc,col=col_cishade,border=NA) yy.bc <- as.numeric(uci_bc) yy.bc <- yy.bc[length(yy.bc):1] yCI.bc=c(as.numeric(lci_bc),yy.bc) xCI.bc=c(time(lci_bc),time(lci_bc)[length(yy.bc):1]) polygon(xCI.bc,yCI.bc,col=col_cishade,border=NA) lines(lci_fc,col=col_ci,lty=lty_ci,lwd=lwd_ci) lines(uci_fc,col=col_ci,lty=lty_ci,lwd=lwd_ci) lines(lci_bc,col=col_ci,lty=lty_ci,lwd=lwd_ci) lines(uci_bc,col=col_ci,lty=lty_ci,lwd=lwd_ci) if(length(lci_bc)==1){ lines(x=rep(time(ts.bc)[length(ts.bc)],2),y=c(ts.bc[length(ts.bc)],lci_bc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) lines(x=rep(time(ts.bc)[length(ts.bc)],2),y=c(ts.bc[length(ts.bc)],uci_bc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) } if(length(lci_fc)==1){ lines(x=rep(time(ts.fc)[length(ts.fc)],2),y=c(ts.fc[length(ts.fc)],lci_fc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_") ) lines(x=rep(time(ts.fc)[length(ts.fc)],2),y=c(ts.fc[length(ts.fc)],uci_fc),col=col_ci,lty=lty_ci,lwd=lwd_ci,type="o",pch=c(NA,"_")) } } lines(ts.fc,col=col_fc,lty=lty_fc,lwd=lwd_fc) lines(ts.bc,col=col_bc,lty=lty_bc,lwd=lwd_bc) if(points_fc) points(fc,col=col_fc,lwd=lwd_fc) if(points_bc) points(bc,col=col_bc,lwd=lwd_bc) if(points_original) points(ts,col=col_original,lwd=lwd_original) } invisible(ts.plot) } ) setMethod(f='plotFbcast', signature=signature(object = "x12Single"), definition=function(object,showCI=TRUE, main="Time Series",forecast=TRUE,backcast=TRUE,log_transform=FALSE, col_original="black",col_fc="#2020ff",col_bc="#2020ff", col_ci="#d1d1ff",col_cishade="#d1d1ff", lty_original=1,lty_fc=2,lty_bc=2,lty_ci=1, lwd_original=1,lwd_fc=1,lwd_bc=1,lwd_ci=1,ytop=1, points_bc=FALSE,points_fc=FALSE,points_original=FALSE, showLine=FALSE,col_line="grey",lty_line=3, ylab="Value",xlab="Date",ylim=NULL,xlim=NULL,showWarnings=TRUE,...){ plotFbcast(object@x12Output,showCI=showCI, main=main,forecast=forecast,backcast=backcast,log_transform=log_transform, col_original=col_original,col_fc=col_fc,col_bc=col_bc, col_ci=col_ci,col_cishade=col_cishade,points_original=points_original, lty_original=lty_original,lty_fc=lty_fc,lty_bc=lty_bc,lty_ci=lty_ci, lwd_original=lwd_original,lwd_fc=lwd_fc,lwd_bc=lwd_bc,lwd_ci=lwd_ci, ytop=ytop,points_bc=points_bc,points_fc=points_fc, showLine=showLine,col_line=col_line,lty_line=lty_line, ylab=ylab,xlab=xlab,ylim=ylim,xlim=xlim,showWarnings=showWarnings,...) } )
/scratch/gouwar.j/cran-all/cranData/x12/R/plot-methods.R
plot.x12work<-function(x,plots=c(1:9),...){ #plots 1: Original #plots 2: Original Trend Adjusted #plots 3: Log Original #plots 4: Seasonal Factors #plots 5: Seasonal Factors with SI Ratios #plots 6: Spectrum Adjusted Orig #plots 7: Spectrum Seasonal Adjusted #plots 8: Spectrum Irregular #plots 9: Spectrum Residulas par(ask=TRUE) if(x$seats) plots <- plots [apply(cbind(!plots==3,!plots==4,!plots==5),1,all)] if(any(plots==1)){ plot_original(x) } if(any(plots==2)){ plot_original_seasonal_trend(x) } if(any(plots==3)){ plot_original(x,log_transform=TRUE) } if(any(plots==4)){ plot_seasonal_factors(x,SI_Ratios=FALSE) } if(any(plots==5)){ plot_seasonal_factors(x) } if(any(plots==6)){ plot_spectrum(x,which="original") } if(any(plots==7)){ plot_spectrum(x,which="seasonaladj") } if(any(plots==8)){ plot_spectrum(x,which="irregular") } if(any(plots==9)){ plot_spectrum(x,which="residuals") } par(ask=FALSE) } plot_seasonal_factors <- function(out,SI_Ratios=TRUE,ylab="Value",xlab="",lwd_seasonal=1, col_seasonal="black",lwd_mean=1,col_mean="blue",col_siratio="darkgreen",col_replaced="red", cex_siratio=.9,cex_replaced=.9,SI_Ratios_replaced=TRUE, plot_legend=TRUE,legend_horiz=FALSE,legend_bty="o", ...){ if(!SI_Ratios) v <- as.vector(out[["d10"]]) # Seasonal Factors else v <- as.vector(out[["d10"]])[1:length(out[["d8"]])] # Seasonal Factors without forecast f <- frequency(out[["d10"]]) dif <- length(v)%%f if(dif>0) v[length(v)+(1:(f-dif))]<-NA out_matrix <- matrix(v,ncol=f,byrow=TRUE) if(f==12){ lab <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") }else if(f==4){ lab <- c("Qtr1","Qtr2","Qtr3","Qtr4") }else if(f==2){ lab <- c("1st Half","2nd Half") }else{ lab <- 1:f } if(SI_Ratios){ main="Seasonal Factors by period and SI Ratios" }else{ main="Seasonal Factors by period" } ylim <- c(min(v,na.rm=TRUE)*.95,max(v,na.rm=TRUE)*1.09) xlim <- c(0,f) plot(1,type="n",main=main,xlim=xlim,ylim=ylim,xaxt="n",ylab=ylab,xlab=xlab,cex=cex_siratio,...) axis(1,at=(1:f)-1/2,labels=lab) for(i in 0:(f)){ abline(v=i,col="grey") } if(SI_Ratios){ vv <- as.vector(out[["d8"]]) #final unmodified SI Ratios dif <- length(vv)%%f if(dif>0) vv[length(vv)+(1:(f-dif))]<-NA out_matrix2 <- matrix(vv,ncol=f,byrow=TRUE) vvv <- as.vector(out[["d9"]]) # final replacement for SI Ratios dif <- length(vvv)%%f if(dif>0) vvv[length(vvv)+(1:(f-dif))]<-NA out_matrix3 <- matrix(vvv,ncol=f,byrow=TRUE) } for(i in 0:(f-1)){ s <- seq(.1+i,(i+1)-.1,l=nrow(out_matrix)) m <- mean(out_matrix[,i+1],na.rm=TRUE) points(rep(m,2)~c(s[1],s[length(s)]),type="l",col=col_mean,lwd=lwd_mean) points(out_matrix[,i+1]~s,type="l",col=col_seasonal,lwd=lwd_seasonal) if(SI_Ratios){ points(out_matrix2[,i+1]~s,pch=20,cex=cex_siratio,col=col_siratio) if(SI_Ratios_replaced) points(out_matrix3[,i+1]~s,pch=20,cex=cex_replaced,col=col_replaced) } } if(plot_legend){ if(SI_Ratios){ if(SI_Ratios_replaced) legend(x=(f/2)-1,y=ylim[2],legend=c("Seasonal Factors","Mean","SI Ratio","Replaced SI Ratio"), col=c(col_seasonal,col_mean,col_siratio,col_replaced),pch=c(NA,NA,20,20), lty=c(1,1,NA,NA),bg="white",horiz=legend_horiz,bty=legend_bty) else legend(x=(f/2)-1,y=ylim[2],legend=c("Seasonal Factors","Mean","SI Ratio"), col=c(col_seasonal,col_mean,col_siratio),pch=c(NA,NA,20), lty=c(1,1,NA),bg="white",horiz=legend_horiz,bty=legend_bty) }else legend(x=(f/2)-1,y=ylim[2],legend=c("Seasonal Factors","Mean"),col=c(col_seasonal,col_mean), lty=c(1,1),bg="white",horiz=legend_horiz,bty=legend_bty) } } plot_original <- function(out,ylab="Value",xlab="Date", main=if(!log_transform){"Original Series"}else{"Logs of the Original Series"}, col="black",ytop=1,log_transform=FALSE,...){ if(!log_transform) ts <- out[["a1"]] else ts <- log(out[["a1"]]) plot(ts,ylim=c(min(ts,na.rm=TRUE),max(ts,na.rm=TRUE)*ytop),xlab=xlab,ylab=ylab,main=main,col=col,...) } plot_original_seasonal_trend <- function(out,ylab="Value",xlab="Date", main="Original Series, Seasonally Adjusted Series and Trend", col_original="black",col_seasonaladj="blue",col_trend="green", lwd_original=1,lwd_seasonaladj=1,lwd_trend=1, seasonaladj=TRUE,trend=TRUE,original=TRUE,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", log_transform=FALSE,...){ if(original) plot_original(out,ytop=1.1,col=col_original,main=main,xlab=xlab,ylab=ylab,lwd=lwd_original, log_transform=log_transform,...) else plot_original(out,ytop=1.1,col=col_original,main=main,xlab=xlab,ylab=ylab, lwd=lwd_original,log_transform=log_transform,type="n",...) text_leg <- vector() col_leg <- vector() if(original){ text_leg <- "Original" col_leg <- c(col_original) } if(seasonaladj){ ts_adj <- out[["d11"]] if(log_transform) ts_adj <- log(ts_adj) points(ts_adj,col=col_seasonaladj,type="l",lwd=lwd_seasonaladj) text_leg <- c(text_leg,"Seasonally Adjusted") col_leg <- c(col_leg,col_seasonaladj) } if(trend){ ts_trend <- out[["d12"]] if(log_transform) ts_trend <- log(ts_trend) points(ts_trend,col=col_trend,type="l",lwd=lwd_trend) text_leg <- c(text_leg,"Trend") col_leg <- c(col_leg,col_trend) } lty <- rep(1,length(col_leg)) if(plot_legend){ if(!log_transform){ legend(x=start(out[["a1"]])[1],y=max(out[["a1"]]*1.05,na.rm=TRUE)*1.05,lty=lty,legend=text_leg, col=col_leg,horiz=legend_horiz,bty=legend_bty) }else{ legend(x=start(out[["a1"]])[1],y=log(max(out[["a1"]]*1.05,na.rm=TRUE))*1.05,lty=lty, legend=text_leg,col=col_leg,horiz=legend_horiz,bty=legend_bty) } } } plot_spectrum <- function(out,which="seasonaladj",xlab="Frequency",ylab="Decibels", main="default", col_bar="darkgrey",col_seasonal="red",col_td="blue", lwd_bar=4,lwd_seasonal=4,lwd_td=4,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", ...){ if(which=="seasonaladj") which <- "sp1" else if(which=="original") which <- "sp0" else if(which=="irregular") which <- "sp2" else if(which=="residuals") which <- "spr" if(main=="default"){ if(which=="sp1") main <- "Spectrum of the Seasonally Adjusted Series" else if(which=="sp0") main <- "Spectrum of the Original Series" else if(which=="sp2") main <- "Spectrum of the Irregular" else if(which=="spr") main <- "Spectrum of the RegARIMA Residuals" } plot_spectrum_work(out[[which]]$frequency,out[[which]]$spectrum,xlab=xlab,ylab=ylab, main=main, col_bar=col_bar,col_seasonal=col_seasonal,col_td=col_td, lwd_bar=lwd_bar,lwd_seasonal=lwd_seasonal,lwd_td=lwd_td,plot_legend=plot_legend, legend_horiz=legend_horiz,legend_bty=legend_bty, ...) } plot_spectrum_work <- function(frequency,spectrum,xlab="Frequency",ylab="Decibels", f=12,main="default",highlight=TRUE, col_bar="darkgrey",col_seasonal="red",col_td="blue", lwd_bar=4,lwd_seasonal=4,lwd_td=4,plot_legend=TRUE, legend_horiz=TRUE,legend_bty="o", ...) { gp<-par() for(i in c("cin","cra","csi","cxy","din")){ gp <- gp[-which(names(gp)%in%i)] } tryCatch({ par(mar = c(4, 4, 4, 2) + 0.1) layout(matrix(c(rep(1,16),2,2),nrow=9,byrow=TRUE))#,heights = c(1, 6), respect = FALSE) plot(frequency,spectrum,type="n",xlab=xlab,ylab=ylab,main=main,col=col_bar,xaxt="n",...) #f <- 12#frequency(out[["a1"]]) # abline(v=(1:(f/2))*1/f,col=col_seasonal,lwd=lwd_seasonal) # if(f==12) # abline(v=frequency[c(43,53)],col=col_td,lwd=lwd_td) coord <- par("usr")[3] if(highlight){ for(i in 1:length(frequency)){ points(x=rep(frequency[i],2),y=c(spectrum[i],coord),type="l",col=col_bar,lwd=lwd_bar) } if(f==12){ for(i in seq(11,61,10)){ points(x=rep(frequency[i],2),y=c(spectrum[i],coord),type="l",col=col_seasonal,lwd=lwd_seasonal) } for(i in c(43,53)){ points(x=rep(frequency[i],2),y=c(spectrum[i],coord),type="l",col=col_td,lwd=lwd_td) } }else if(f==4){ for(i in c(31,61)){ points(x=rep(frequency[i],2),y=c(spectrum[i],coord),type="l",col=col_seasonal,lwd=lwd_seasonal) } } }else{ abline(v=(1:(f/2))*1/f,col=col_seasonal,lwd=lwd_seasonal) if(f==12) abline(v=frequency[c(43,53)],col=col_td,lwd=lwd_td) coord <- par("usr")[3] for(i in 1:length(frequency)){ points(x=rep(frequency[i],2),y=c(spectrum[i],coord),type="l",col=col_bar,lwd=lwd_bar) } } if(f==12){ aT <- frequency[c(seq(11,61,10),43,53)] aL <- paste(round(aT*12),"/",12,sep="") aL[(length(aL)-1):length(aL)] <- c("0.348","0.432") axis(side=1,at=aT,labels=aL,las=2) }else{ aT <- frequency[c(31,61)] aL <- paste(aT*12,"/",12,sep="") axis(side=1,at=aT,labels=aL) } par(mar = c(0, 0, 0, 0)) plot(frequency,spectrum,type = "n", axes = FALSE, ann = FALSE,...) if(plot_legend){ if(f==12) legend("center",legend=c("Spectrum","Seasonal Freq.","Trading Day Freq."), lty=rep(1,3),col=c(col_bar,col_seasonal,col_td),bg="white",horiz=legend_horiz,bty=legend_bty,lwd=lwd_bar) else legend("center",legend=c("Spectrum","Seasonal Freq."),lty=rep(1,2), col=c(col_bar,col_seasonal),bg="white",horiz=legend_horiz,bty=legend_bty,lwd=lwd_bar) } gp <- gp[-which(names(gp)=="page")] par(gp) },finally=par(gp)) } plot_rsd_acf <- function(out,which="acf",xlab="Lag",ylab="ACF", main="default", col_acf="black",col_ci="blue",lt_ci=2,ylim="default"){ x <-out$dg if(which=="acf") which <- "rsd.acf" else if(which=="pacf") which <- "rsd.pacf" else if(which=="acf2") which <- "rsd.acf2" #lwd_bar=4,plot_legend=TRUE){ if(main=="default"){ if(which=="rsd.acf"){main <- "Autocorrelations of the Residuals"} else if(which=="rsd.pacf"){main <- "Partial Autocorrelations of the Residuals"} else if(which=="rsd.acf2"){main <- "Autocorrelations of the Squared Residuals"} } if(ylim=="default"){ ylim<-c(-max(x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]],2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]]),max(x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]],2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]])) } if(which=="rsd.pacf") ylab="Partial ACF" plot(x[[which]]$lag,x[[which]][[grep("sample",names(x[[which]]),value=TRUE)]],type="h",xlab=xlab,ylab=ylab,main=main,col=col_acf,ylim=ylim,xaxt="n") abline(h=0,col=col_acf) if(length(x[[which]]$lag)%in%c(12,24)){ aT <- c(6,12,18,24) axis(side=1,at=aT) }else{ aT <- c(4,8,12,16) axis(side=1,at=aT) } lines(x[[which]]$lag,2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]],type="l",col=col_ci,lty=lt_ci) lines(x[[which]]$lag,-2*x[[which]][[grep("stderr",names(x[[which]]),value=TRUE)]],type="l",col=col_ci,lty=lt_ci) }
/scratch/gouwar.j/cran-all/cranData/x12/R/plotFunctions.R
readSpc <- function(file,filename=TRUE){ if(length(file)==1) return(readSpcS(file)) else{ tsl <- list() for(i in 1:length(file)){ tsl[[i]] <- readSpcS(file[i]) } return(new("x12Batch",tsList=tsl)) } } readSpcS <- function(file,filename=TRUE){ whichgrep <- function(x,txt){ which(unlist(lapply(x,function(y)length(grep(txt,y,ignore.case=TRUE))>0))) } gsub1 <- function(x,word){ x <- gsub(word,"",x,ignore.case=TRUE) x <- gsub("=","",x) x <- gsub("\\(","",x) x <- gsub("\\)"," ",x) x <- gsub("\\'","",x) x <- gsub("\\\"","",x) x <- gsub("\\{","",x) x <- gsub("\\}","",x) x <- unlist(strsplit(x," ")) x[x!=""] } gsub2 <- function(x,word){ x <- gsub(word,"",x,ignore.case=TRUE) x <- gsub("=","",x) x <- gsub("\\(","",x) x <- gsub("\\)","",x) x <- gsub("\\{","",x) x <- gsub("\\}","",x) x <- gsub("\\'","",x) x <- gsub("\\\"","",x) gsub(" ","",x) } yes <- function(x) length(gregexpr("yes",x,ignore.case=TRUE))>0 getPart <- function(x,word){ startreg <- whichgrep(x,word) if(length(startreg>0)){ endreg <- whichgrep(x,"}") endreg <- endreg[endreg>=startreg][1] return(c(startreg,endreg)) }else return(vector()) } para <- new("x12Parameter") Lines <- readLines(file) ###CLEANING COMMENTS #comment line todel <- NULL for(i in 1:length(Lines)){ Lines[i] <- str_trim(Lines[i]) if(substr(Lines[i],0,1)=="#") todel <- c(todel,i) } if(!is.null(todel)) Lines <- Lines[-todel] #end of line comments commentX <- whichgrep(Lines,"#") for(l in commentX){ Lines[l] <- substr(Lines[l],0,which(unlist(strsplit(Lines[l],""))=="#")-1) } ###SERIES ind <- getPart(Lines,"series") series <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] #Series_name if(!filename){ namX <- whichgrep(series,"name") namX2 <- whichgrep(series,"title") if(length(namX)>0){ name <- gsub2(series[namX],"name") }else if(length(namX2)>0){ name <- gsub2(series[namX2],"title") }else{ name <- "Series_1" } }else{ name <- substr(file,0,nchar(file)-4) } perX <- whichgrep(series,"period") if(length(perX)>0){ period <- as.numeric(gsub1(series[perX],"period")) if(!period%in%c(4,12)) stop("Period argument wring?!") }else{ period <- 12 } startX <- whichgrep(series,"start") if(length(startX)>0){ start <- gsub1(series[startX],"start") start <- unlist(strsplit(start,"\\.")) if(!period%in%c(4,12)) stop("Period argument wrong?!") }else{ start <- 1 } start <- as.numeric(start) dataX <- whichgrep(substring(trimws(series),1,4),"data") fileX <- whichgrep(substring(trimws(series),1,4),"file") if(length(dataX)>0){ dataXEnd <- whichgrep(series,")")[1] dataXEnd <- dataXEnd[dataXEnd>=dataX][1] data <- c() for(i in dataX:dataXEnd){ line <- gsub1(series[i],"data") if(length(line)>0){ data <- c(data,unlist(strsplit(line," "))) } } data <- as.numeric(data[data!=""]) }else if(length(fileX)>0){ dataFile <- gsub2(series[fileX] ,"file") dataFile <- gsub("\\\\","/",dataFile) dataFile <- gsub("//","/",dataFile) data <- scan(dataFile) }else{ warning("No data found, only the x12Parameter object will be created") } data <- ts(data,start=start,frequency=period) mspanX <- whichgrep(series,"modelspan") if(length(mspanX)>0){ modelspan <- gsub2(series[mspanX],"modelspan") modelspan <- unlist(lapply(unlist(strsplit(modelspan,",")),function(x)strsplit(x,"\\."))) para <- setP(para,list(series.modelspan=as.numeric(modelspan))) } spanX <- whichgrep(series,"span") spanX <- spanX[!spanX%in%mspanX] if(length(spanX)>0){ span <- gsub2(series[spanX],"span") span <- unlist(lapply(unlist(strsplit(span,",")),function(x)strsplit(x,"\\."))) para <- setP(para,list(series.span=as.numeric(span))) } ###TRANSFORM ind <- getPart(Lines,"transform") if(length(ind)>0){ trans <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] func <- whichgrep(trans,"function") transF <- gsub2(trans[func],"function") para <- setP(para,list(transform.function=transF)) adj <- whichgrep(trans,"adjust") trans <- gsub2(trans[adj],"adjust") if(length(trans>0)) para <- setP(para,list(transform.adjust=trans)) } ###OUTLIER ind <- getPart(Lines,"outlier") if(length(ind)>0){ outlier <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] typX <- whichgrep(outlier,"types") if(length(typX)>0){ types <- outlier[typX] types <- gsub("types","",types,ignore.case=TRUE) types <- gsub("=","",types) types <- gsub("\\(","",types) types <- gsub("\\)","",types) types <- unlist(strsplit(types," ")) types <- types[types!=""] para <- setP(para,list(outlier.types=types)) } spanX <- whichgrep(outlier,"span") if(length(spanX)>0){ out_span <- gsub2(outlier[spanX],"span") out_span <- unlist(lapply(unlist(strsplit(out_span,",")),function(x)strsplit(x,"\\."))) para <- setP(para,list(outlier.span=as.numeric(out_span))) } methodX <- whichgrep(outlier,"method") if(length(methodX)>0){ out_meth <- gsub2(outlier[methodX],"method") para <- setP(para,list(outlier.method=out_meth)) } critX <- whichgrep(outlier,"critical") if(length(critX)>0){ crit <- gsub1(outlier[critX],"critical") if(length(crit)==1) crit <- as.numeric(crit) else{ crit <- as.list(as.numeric(crit)) names(crit) <- c("AO","LS","TC")[1:length(crit)] } para <- setP(para,list(outlier.critical=crit)) } } #REGRESSION ind <- getPart(Lines,"regression") if(length(ind)>0){ regression <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] varX <- whichgrep(regression,"variables") if(length(varX)>0){ variables <- gsub1(regression[varX],"variables") para <- setP(para,list(regression.variables=variables)) } centeruserX <- whichgrep(regression,"centeruser") if(length(centeruserX)>0){ centeruser <- gsub1(regression[centeruserX],"centeruser") para <- setP(para,list(regression.centeruser=centeruser)) regression <- regression[-centeruserX] } usertypeX <- whichgrep(regression,"usertype") if(length(usertypeX)>0){ usertype <- gsub1(regression[centeruserX],"usertype") para <- setP(para,list(regression.usertype=usertype)) regression <- regression[-usertypeX] } userX <- whichgrep(regression,"user") if(length(userX)>0){ reguser <- gsub1(regression[userX],"user") para <- setP(para,list(regression.user=reguser)) } fileX <- whichgrep(regression,"file") if(length(fileX)>0){ regfile <- gsub1(regression[fileX],"file") para <- setP(para,list(regression.file=regfile)) } } #ARIMA ind <- getPart(Lines,"arima") if(length(ind)>0){ arima <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] modelX <- whichgrep(arima,"model") if(length(modelX)>0){ #aa <<- arima[modelX] arr <- arima[modelX] arr <- gsub(","," ",arr) model <- as.numeric(gsub1(arr,"model")) if(length(model)==6) para <- setP(para,list(arima.model=model[1:3],arima.smodel=model[4:6])) else if(length(model)==3) para <- setP(para,list(arima.model=model[1:3],arima.smodel=c(0,0,0))) else stop("Problem in reading ARIMA specification") } } #AUTOMDL ind <- getPart(Lines,"automdl") if(length(ind)>0){ para <- setP(para,list=(automdl=TRUE)) automdl <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] orderX <- whichgrep(automdl,"automdl.maxorder") if(length(orderX)>0){ maxorder <- as.numeric(unlist(strsplit(gsub1(automdl[orderX],"maxorder"),","))) para <- setP(para,list(maxorder=maxorder)) } diffX <- whichgrep(automdl,"automdl.maxdiff") if(length(diffX)>0){ maxdiff <- as.numeric(unlist(strsplit(gsub1(automdl[diffX],"maxdiff"),","))) para <- setP(para,list(maxdiff=maxdiff)) } acceptX <- whichgrep(automdl,"acceptdefault") if(length(acceptX)>0){ accept <- yes(gsub1(automdl[acceptX],"acceptdefault")) para <- setP(para,list(automdl.acceptdefault=accept)) } balancedX <- whichgrep(automdl,"balanced") if(length(balancedX)>0){ balanced <- yes(gsub1(automdl[balancedX],"balanced")) para <- setP(para,list(automdl.balanced=balanced)) } }else{ para <- setP(para,list(automdl=FALSE)) } #ESTIMATE ind <- getPart(Lines,"estimate") if(length(ind)>0){ para <- setP(para,list=(estimate=TRUE)) estimate <- Lines[ind[1]:ind[2]] Lines <- Lines[-c(ind[1]:ind[2])] oosX <- whichgrep(estimate,"outofsample") if(length(oosX)>0){ oos <- yes(gsub1(estimate[oosX],"outofsample")) if(oos) para <- setP(para,list(estimate=TRUE)) para <- setP(para,list(estimate.outofsample=oos)) } } #SLIDINGSSPAN ind <- getPart(Lines,"slidingspans") if(length(ind)>0){ para <- setP(para,list=(slidingspans=TRUE)) Lines <- Lines[-c(ind[1]:ind[2])] } #Forecast ind <- getPart(Lines,"forecast") if(length(ind)>0){ forecast <- Lines[c(ind[1]:ind[2])] Lines <- Lines[-c(ind[1]:ind[2])] maxleadX <- whichgrep(forecast,"maxlead") if(length(maxleadX)>0){ fc_y <- as.numeric(gsub1(forecast[maxleadX],"maxlead"))/period para <- setP(para,list(forecast_years=fc_y)) } maxbackX <- whichgrep(forecast,"maxback") if(length(maxbackX)>0){ bc_y <- as.numeric(gsub1(forecast[maxbackX],"maxback"))/period para <- setP(para,list(backcast_years=bc_y)) } probX <- whichgrep(forecast,"PROBABILITY") if(length(probX)>0){ prob_conf <- as.numeric(gsub1(forecast[probX],"PROBABILITY")) para <- setP(para,list(forecast_conf=prob_conf)) } } #X11 ind <- getPart(Lines,"x11") if(length(ind)>0){ x11 <- Lines[c(ind[1]:ind[2])] Lines <- Lines[-c(ind[1]:ind[2])] sigmalimX <- whichgrep(x11,"sigmalim") if(length(sigmalimX)>0){ sigmalim <- as.numeric(unlist(strsplit(gsub2(x11[sigmalimX],"sigmalim"),","))) para <- setP(para,list(x11.sigmalim=sigmalim)) } calsigX <- whichgrep(x11,"calendarsigma") if(length(calsigX)>0){ calendarsigma <- gsub1(x11[calsigX],"calendarsigma") para <- setP(para,list(x11.calendarsigma=calendarsigma)) } modeX <- whichgrep(x11,"mode") if(length(modeX)>0){ samode <- gsub1(x11[modeX],"mode") para <- setP(para,list(x11.samode=samode)) } seasonalmaX <- whichgrep(x11,"seasonalma") if(length(seasonalmaX)>0){ seasonalma <- gsub1(x11[seasonalmaX],"seasonalma") para <- setP(para,list(x11.seasonalma=seasonalma)) } trendmaX <- whichgrep(x11,"trendma") if(length(trendmaX)>0){ trendma <- gsub1(x11[trendmaX],"trendma") para <- setP(para,list(x11.trendma=trendma)) } appendfcstX <- whichgrep(x11,"appendfcst") if(length(appendfcstX)>0){ appendfcst <- yes(gsub1(x11[appendfcstX],"appendfcst")) para <- setP(para,list(x11.appendfcst=appendfcst)) } appendbcstX <- whichgrep(x11,"appendbcst") if(length(appendbcstX)>0){ appendbcst <- yes(gsub1(x11[appendbcstX],"appendbcst")) para <- setP(para,list(x11.appendbcst=appendbcst)) } excludefcstX <- whichgrep(x11,"excludefcst") if(length(excludefcstX)>0){ excludefcst <- yes(gsub1(x11[excludefcstX],"excludefcst")) para <- setP(para,list(x11.excludefcst=excludefcst)) } finalX <- whichgrep(x11,"final") if(length(finalX)>0){ final <- gsub1(x11[finalX],"final") para <- setP(para,list(x11.final=final)) } } cat("File:",file,"processed\n") return(new("x12Single",ts=data,x12Parameter=para,tsName=name)) }
/scratch/gouwar.j/cran-all/cranData/x12/R/readSpc.R
readx12Out <- function(file,tblnames=NULL,Rtblnames=NULL,freq_series,start_series,end_series, seats=FALSE,transform,slidingspans,history,x11regress,outlier,showWarnings,keep_x12out){ # cat("start_series: ") # print(start_series) readAndCatError <- function(file,noWarnings=FALSE){ errorfile <- readLines(con=paste(file,"_","err.html",sep=""),n=-1) errorfile <- errorfile[(which(errorfile=="<body>")+1):(which(errorfile=="</body>")-1)] if(noWarnings){ errorfile <- errorfile[grep("ERROR:",errorfile):length(errorfile)] } for(i in seq_along(errorfile)){ cat(errorfile[i],"\n") } } dirgra <- paste("gra_",gsub("\\.","_",basename(file)),sep="") out<-list() Rtblnames <- c("Original series", "Final seasonal factors", "Final seasonally adjusted data", "Final trend cycle", "Final irregular components","Combined adjustment factors","Final weights for irregular component", "Final replacements for SI ratios", "Differenced, transformed, seasonally adjusted data","Final unmodified SI Ratios","Orig2","Trading day component", Rtblnames) if(seats==TRUE) tblnames <- c("a1", "s10", "s11", "s12", "s13","s16","c17","s9","e2","d8","b1","td", tblnames) else tblnames <- c("a1", "d10", "d11", "d12", "d13","d16","c17","d9","e2","d8","b1","td", tblnames) if(!(file=="Example_for_X1")){ sp_file <- strsplit(file,"/")[[1]] filename <- paste(paste(sp_file[-length(sp_file)],collapse="/"),"/",dirgra,"/",sp_file[length(sp_file)],sep="") if(substring(filename,1,1)=="/") filename <- substring(filename,2) }else filename <- paste(searchpaths()[grep("x12",searchpaths())],"/doc/Rout",sep="") ############ #if(file.exists(paste(file,".","err",sep="")) && file.exists(paste(file,".","spc",sep=""))){#evt unnoetige Abfrage # x1<- file.info(paste(filename,".","udg",sep="")) # cat("udg:",str(x1),"\n") # x2<- file.info(paste(file,".","err",sep="")) # cat("err:",str(x2),"\n") # ind<- !identical(x1$mtime,x2$mtime) # cat("ind:",ind,"\n") # udgtime<-file.info(paste(filename,".","udg",sep=""))$mtime # errtime<-file.info(paste(file,".","err",sep=""))$mtime # if(ind){ # #cat("udg:",file.info(paste(filename,".","udg",sep=""))$mtime,"\n") # #cat("err:",file.info(paste(file,".","err",sep=""))$mtime,"\n") # errorfile <- readLines(con=paste(file,".","err",sep=""),n=-1) # for(i in 1:length(errorfile)){ # cat(errorfile[i],"\n") # } # stop("Error! No proper run of x12! Check your parameter settings.\n=> Beware that files in \"gra\" directory do not represent current x12 output") # } if(!file.exists(paste(filename,".","udg",sep=""))){ readAndCatError(file) if(!keep_x12out) unlink(paste(dirname(file),"/",dirgra,sep=""),recursive=TRUE) stop("Error! No proper run of x12! Check your parameter settings.") } udg <- readLines(con=paste(filename,".","udg",sep=""),n=-1) if(showWarnings && file.exists(paste(file,"_","err.html",sep=""))){ readAndCatError(file) } if(any(any(grepl("errorstop: yes",udg)),length(udg)==0)){ if(!showWarnings){ readAndCatError(file,noWarnings=TRUE) if(!keep_x12out) unlink(paste(dirname(file),"/",dirgra,sep=""),recursive=TRUE) stop("An error occured when running x12! Program halted!","\n") }else{ if(!keep_x12out) unlink(paste(dirname(file),"/",dirgra,sep=""),recursive=TRUE) stop("An error occured when running x12! Program halted!","\n") } #stop("Errorstop! (Check error file \"",file,".err\")",sep="") } ############ for(i in seq_along(tblnames)){ if(file.exists(paste(filename,".",tblnames[i],sep=""))) out[[tblnames[i]]] <- ts(read.table(paste(filename,".",tblnames[i],sep=""),header=FALSE,skip=2,sep=" ",na.strings="-999")[,2],frequency=freq_series,start=start_series) } # if(!x11regress){ # spnames <- c("Spectrum_AdjOri","Spectrum_SA","Spectrum_Irr","Spectrum_Rsd") # sptblnames <- c("sp0", "sp1", "sp2","spr") # }else{ # spnames <- c("Spectrum_AdjOri","Spectrum_SA","Spectrum_Irr") # sptblnames <- c("sp0", "sp1", "sp2") # } sptblnames <- vector() for(i in c("sp0", "sp1", "sp2","spr")){ if(file.exists(paste(filename,".",i,sep=""))) sptblnames <- c(sptblnames,i) } if(!seats){ for(i in seq_along(sptblnames)){ out[[sptblnames[i]]] <- read.table(paste(filename,".",sptblnames[i],sep=""),header=FALSE,skip=2,sep=" ")[,2:3] names(out[[sptblnames[i]]]) <- c("frequency","spectrum") } } out[["d9"]][out[["d9"]]==-999]<-NA if(!x11regress){ ### Forecasts: if(file.exists(paste(filename,".","fct",sep=""))){ out[["forecast"]] <- list() fct <- read.table(paste(filename,".","fct",sep=""),header=FALSE,skip=2,sep=" ") if((freq_series==12 && end_series[2]==12) | (freq_series==4 && end_series[2]==4)){ start_forecast <- c(end_series[1]+1,1) # }else if(freq_series==4 && end_series[2]==4){ # start_forecast <- c(end_series[1]+1,1)} }else{ start_forecast <- end_series start_forecast[2] <- start_forecast[2]+1 } out[["forecast"]]$estimate <-ts(fct[,2],frequency=freq_series,start=start_forecast) out[["forecast"]]$lowerci <-ts(fct[,3],frequency=freq_series,start=start_forecast) out[["forecast"]]$upperci <-ts(fct[,4],frequency=freq_series,start=start_forecast) } ### Backcasts: if(file.exists(paste(filename,".","bct",sep=""))){ out[["backcast"]] <- list() bct <- read.table(paste(filename,".","bct",sep=""),header=FALSE,skip=2,sep=" ") if(start_series[2]==1){ if(freq_series==12) #start_backcast <- c(start_series[1]-1,12) end_backcast <- c(start_series[1]-1,12) if(freq_series==4) end_backcast <- c(start_series[1]-1,4) }else{ end_backcast <- start_series end_backcast[2] <- end_backcast[2]-1 } out[["backcast"]]$estimate <-ts(bct[,2],frequency=freq_series,end=end_backcast) out[["backcast"]]$lowerci <-ts(bct[,3],frequency=freq_series,end=end_backcast) out[["backcast"]]$upperci <-ts(bct[,4],frequency=freq_series,end=end_backcast) } } #Testbeispiele: #filename <- "M:/Meraner/Workspace/Saisonbereinigung_Test/gra/Air" #filename <- "M:/Meraner/Saisonbereinigung/x12probierfiles/loge_d" #filename <- "M:/Meraner/Saisonbereinigung/x12probierfiles/a" #filename <- "M:/Meraner/Saisonbereinigung/x12probierfiles/b05" #file<-"M:/Meraner/Workspace/Saisonbereinigung_Test/Air" #filename <- "M:/Meraner/Workspace/Saisonbereinigung_Test/gra/Rout" #udg <- readLines(con=paste(filename,".","udg",sep=""),n=-1) if("x11regress: no" %in% udg){ #Informationen die aus udg File eingelesen werden sollen: dglist <- c("x11regress:","transform:","samode:","finmode:","seasonalma:","trendma:","sfmsr:", "arimamdl:","automdl:", "finalreg", "outlier.total:","autoout:","nalmostout:", "almostoutlier$","crit:", "Outlier$","User-defined$","AutoOutlier$", #Autokorrelationen #"sfmsr:","finaltrendma:", "peaks.seas:","peaks.td:", "f2.idseasonal:", "d11.f:", "spcrsd", "spcori", "spcsa", "spcirr", "f3.m01:","f3.m02:","f3.m03:","f3.m04:","f3.m05:","f3.m06:", "f3.m07:","f3.m08:","f3.m09:","f3.m10:","f3.m11:", "f3.q:","f3.qm2:","f3.fail:", "loglikelihood:","aic:","aicc:","bic:","hq:","aape") #Hier die gewuenschten Variablennamen eintragen: dglistnames <- c("x11regress","transform","samode","finalsamode","seasonalma","trendma","finalseasonalma", "arimamdl","automdl", "regmdl", "nout","nautoout","nalmostout", "almostoutlier","crit", "outlier","userdefined","autooutlier", #Autokorrelationen #"sfmsr","finaltrendma", "peaks.seas","peaks.td", "id.seas", "id.rsdseas", "spcrsd", "spcori", "spcsa", "spcirr", "m1","m2","m3","m4","m5","m6","m7","m8", "m9","m10","m11", "q","q2","nmfail", "loglikelihood","aic","aicc","bic","hq","aape") if(transform=="auto"){# &&!x11regress --- x11regress Abfrage hier eigentl nicht mehr notw dglist[length(dglist)+1]<-"aictrans:" dglistnames[length(dglistnames)+1]<-"autotransform" } numvariables <- c("nout","nautoout","nalmostout", "crit","spcrsd", "spcori", "spcsa", "spcirr", "m1","m2","m3","m4","m5","m6","m7","m8", "m9","m10","m11", "q","q2","nmfail","seasonalma","trendma","finalseasonalma", "loglikelihood","aic","aicc","bic","hq","aape") if(slidingspans){ dglist[(length(dglist)+1):(length(dglist)+8)]<-c("ssa:","ssfstab:","ssfmov:","ssm7:","ssident:", "ssran.","s2.","s3.") dglistnames[(length(dglistnames)+1):(length(dglistnames)+8)]<-c("ss.options","ss.stabseas","ss.movseas","ss.m7","ss.idseas", "ss.S1","ss.S2","ss.S3") } if(history){ dglist[(length(dglist)+1):(length(dglist)+7)]<-c("historytarget", "r01.lag","r02.lag","r04.lag","r05.lag","r06","meanssfe") dglistnames[(length(dglistnames)+1):(length(dglistnames)+7)]<-c("h.target", "h.R1","h.R2","h.R4","h.R5","h.R6","h.meanssfe") } dg <- lapply(dglist,function(x)grep(x,udg,value=TRUE,fixed=TRUE)) #Extrawurst fuer Regression variables Teil 1: if(length(dg[[which(dglist=="finalreg")]])>1){ dg[[which(dglist=="finalreg")]]<-dg[[which(dglist=="finalreg")]][-grep("nfinalreg",dg[[which(dglist=="finalreg")]])] } regvar<-unlist(strsplit(dg[[which(dglist=="finalreg")]],": ")) if(any(grepl("+",regvar,fixed=TRUE))){ regvar <- strsplit(regvar,"+",fixed=TRUE) regvar<-unlist(lapply(regvar,function(x)gsub("^\\s+|\\s+$", "", x))[-(grep("finalreg",regvar))]) if(""%in%regvar){ regvar<-regvar[-which(regvar=="")]} } othername<-which(regvar %in% c("User-defined","Automatically Identified Outliers")) if(length(othername)>0){ regvar <- regvar[-othername]} #End Extrawurst Teil 1 empty <- which(lapply(dg,function(x)length(x))==0) dglist[which(dglist%in%grep("$",grep(":",dglist[empty],invert=TRUE,fixed=TRUE,value=TRUE),invert=TRUE,fixed=TRUE,value=TRUE))]<-paste(grep("$",grep(":",dglist[empty],invert=TRUE,fixed=TRUE,value=TRUE),invert=TRUE,fixed=TRUE,value=TRUE),":",sep="") dg[empty] <- paste(gsub("$",replacement=":",dglist[empty],fixed=TRUE),"-") dg <- lapply(dg,function(x)strsplit(x,": ")) names(dg)<- dglistnames if(length(which(dg[["outlier"]]%in%dg[["autooutlier"]]))>0){ dg[["outlier"]]<-dg[["outlier"]][-which(dg[["outlier"]]%in%dg[["autooutlier"]])]} if(length(dg[["outlier"]])==0){ dg["outlier"] <- list(strsplit(paste("outlier","-")," ",fixed=TRUE)) empty <- c(empty,which(names(dg)=="outlier")) } grone <- which(lapply(1:length(dg),function(x){length(dg[[x]])})>1) grone.nodoll <- unlist(lapply(grep("$",dglist[grone],fixed=TRUE,value=TRUE,invert=TRUE),function(x){ grep(x,dglist,fixed=TRUE)})) for(i in c(1:length(dg))[-grone.nodoll]){ # cat("i=",i,"\n") # cat("name=",names(dg)[i],"\n") names(dg[[i]])<-lapply(1:length(dg[[i]]),function(x){ if(grepl("$",dg[[i]][[x]][1],fixed=TRUE)){ gsub(grep("$",dglist[i],fixed=TRUE,value=TRUE),replacement=paste(dglistnames[i],"_",sep=""),dg[[i]][[x]][1],fixed=TRUE) }else{ gsub(dg[[i]][[x]][1],replacement=dglistnames[i],dg[[i]][[x]][1],fixed=TRUE) }}) for(j in 1:length(dg[[i]])){ dg[[i]][[j]] <- dg[[i]][[j]][-1] # returns string w/o leading or trailing whitespace: dg[[i]][[j]] <- gsub("^\\s+|\\s+$", "", dg[[i]][[j]]) }} #Extrawurst fuer Regression variables Teil 2: if(length(regvar)!=0 && head(regvar,1)!="none"){ reglist <- lapply(1:length(regvar),function(x)grep(paste(regvar[x],"$",sep=""),grep(regvar[x],udg,value=TRUE,fixed=TRUE),fixed=TRUE,value=TRUE)) reglist <- lapply(reglist,function(x)strsplit(x,"$",fixed=TRUE)) if(length(which(sapply(reglist,function(x)!length(x)>0)))!=0){ reglist <- reglist[-(which(sapply(reglist,function(x)!length(x)>0)))]} if(length(reglist)!=0){ reglistnames <- vector() for(i in 1:length(reglist)){ regsublistnames<-vector() inregvar<-which(sapply(1:length(reglist[[i]]),function(x)!reglist[[i]][[x]][1]%in%regvar)) if(length(inregvar)>0){ reglist[[i]] <- reglist[[i]][-(inregvar)] } reglistnames[i] <- reglist[[i]][[1]][1] reglist[[i]]<-lapply(1:length(reglist[[i]]),function(x){ reglist[[i]][[x]]<-reglist[[i]][[x]][-1] strsplit(reglist[[i]][[x]],": ")}) regsublistnames <-lapply(1:length(reglist[[i]]),function(x){ regsublistnames <- reglist[[i]][[x]][[1]][1] if("Leap Year"%in%regsublistnames){ regsublistnames<-gsub("Leap Year",replacement="leapyear",regsublistnames,fixed=TRUE) } if("Trading Day"%in%regsublistnames){ regsublistnames<-gsub("Trading Day",replacement="td",regsublistnames,fixed=TRUE) } regsublistnames<-regsublistnames }) # reglist[[i]]<-lapply(1:length(reglist[[i]]),function(x){reglist[[i]][[x]][[1]]<-reglist[[i]][[x]][[1]][-1] reglist[[i]][[x]][[1]] <- gsub("^\\s+|\\s+$", "", reglist[[i]][[x]][[1]])}) if("Leap Year"%in%reglistnames){ reglistnames<-gsub("Leap Year",replacement="leapyear",reglistnames,fixed=TRUE) } if("Trading Day"%in%reglistnames){ reglistnames<-gsub("Trading Day",replacement="td",reglistnames,fixed=TRUE) } names(reglist[[i]])<-regsublistnames } dg[(length(dg)+1):(length(dg)+length(reglist))]<-reglist names(dg)<-dglistnames<-c(dglistnames,reglistnames) }else{ names(dg)<- dglistnames reglistnames <- NULL }}else{ names(dg)<- dglistnames reglistnames <- NULL } #End Extrawurst Teil 2 #Checking for derived parameter estimates: if(any(grepl("nregderived:",udg,fixed=TRUE))){ regderived<-grep("nregderived:",udg,value=TRUE,fixed=TRUE) indnrd<-which(udg==regderived) nrd<-as.numeric(strsplit(regderived,": ",fixed=TRUE)[[1]][2]) regder<-udg[(indnrd+1):(indnrd+nrd)] derived.coef<-sapply(strsplit(gsub("$",replacement="_",regder,fixed=TRUE),": ",fixed=TRUE),function(x) gsub("\\s+",replacement="",x[[1]])) } } #End RegARIMA Option # x11Regression Option: else{ #Informationen die aus udg File eingelesen werden sollen: dglist <- c("x11regress:", "samode:","finmode:","seasonalma:","trendma:","sfmsr:", "finalxreg","x11irrcrtval:", #"$", "$AO","User-defined$","Automatically Identified Outliers$", #"sfmsr:","finaltrendma:", "peaks.seas:","peaks.td:", "f2.idseasonal:","d11.f:", "spcori", "spcsa", "spcirr", "f3.m01:","f3.m02:","f3.m03:","f3.m04:","f3.m05:","f3.m06:", "f3.m07:","f3.m08:","f3.m09:","f3.m10:","f3.m11:", "f3.q:","f3.qm2:","f3.fail:") #Neue Variablennamen: dglistnames <- c("x11regress", "samode","finalsamode","seasonalma","trendma","finalseasonalma", "regmdl","crit","outlier","userdefined","autooutlier", #Autokorrelationen #"sfmsr","finaltrendma", "peaks.seas","peaks.td", "id.seas","id.rsdseas", "spcori", "spcsa", "spcirr", "m1","m2","m3","m4","m5","m6","m7","m8", "m9","m10","m11", "q","q2","nmfail") numvariables <- c("crit", "spcori", "spcsa", "spcirr", "m1","m2","m3","m4","m5","m6","m7","m8", "m9","m10","m11", "q","q2","nmfail","seasonalma","trendma","finalseasonalma") if(slidingspans){ dglist[(length(dglist)+1):(length(dglist)+8)]<-c("ssa:","ssfstab:","ssfmov:","ssm7:","ssident:", "ssran.","s2.","s3.") dglistnames[(length(dglistnames)+1):(length(dglistnames)+8)]<-c("ss.options","ss.stabseas","ss.movseas","ss.m7","ss.idseas", "ss.S1","ss.S2","ss.S3") } if(history){ dglist[(length(dglist)+1):(length(dglist)+7)]<-c("historytarget", "r01.lag","r02.lag","r04.lag","r05.lag","r06","meanssfe") dglistnames[(length(dglistnames)+1):(length(dglistnames)+7)]<-c("h.target", "h.R1","h.R2","h.R4","h.R5","h.R6","h.meanssfe") } dg <- lapply(dglist,function(x)grep(x,udg,value=TRUE,fixed=TRUE)) #Extrawurst fuer Regression variables Teil 1: if(length(dg[[which(dglist=="finalxreg")]])>1){ dg[[which(dglist=="finalxreg")]]<-dg[[which(dglist=="finalxreg")]][-grep("nfinalxreg",dg[[which(dglist=="finalxreg")]])] } regvar<-unlist(strsplit(dg[[which(dglist=="finalxreg")]],": ")) if(any(grepl("+",regvar,fixed=TRUE))){ regvar <- strsplit(regvar,"+",fixed=TRUE) regvar<-unlist(lapply(regvar,function(x)gsub("^\\s+|\\s+$", "", x))[-(grep("finalxreg",regvar))]) if(""%in%regvar){ regvar<-regvar[-which(regvar=="")]} } othername<-which(regvar %in% c("User-defined","Automatically Identified Outliers")) if(length(othername)>0){ regvar <- regvar[-othername]} #End Extrawurst Teil 1 empty <- which(lapply(dg,function(x)length(x))==0) dglist[which(dglist%in%grep("$",grep(":",dglist[empty],invert=TRUE,fixed=TRUE,value=TRUE),invert=TRUE,fixed=TRUE,value=TRUE))]<-paste(grep("$",grep(":",dglist[empty],invert=TRUE,fixed=TRUE,value=TRUE),invert=TRUE,fixed=TRUE,value=TRUE),":",sep="") dg[empty] <- paste(gsub("$",replacement=":",dglist[empty],fixed=TRUE),"-") dg <- lapply(dg,function(x)strsplit(x,": ")) names(dg)<- dglistnames if(length(which(dg[["outlier"]]%in%dg[["autooutlier"]]))>0){ dg[["outlier"]]<-dg[["outlier"]][-which(dg[["outlier"]]%in%dg[["autooutlier"]])]} if(length(dg[["outlier"]])==0){ dg["outlier"] <- list(strsplit(paste("outlier","-")," ",fixed=TRUE)) empty <- c(empty,which(names(dg)=="outlier")) } grone <- which(lapply(1:length(dg),function(x){length(dg[[x]])})>1) grone.nodoll <- unlist(lapply(grep("$",dglist[grone],fixed=TRUE,value=TRUE,invert=TRUE),function(x){ grep(x,dglist,fixed=TRUE)})) for(i in c(1:length(dg))[-grone.nodoll]){ names(dg[[i]])<-lapply(1:length(dg[[i]]),function(x){ if(grepl("$",dg[[i]][[x]][1],fixed=TRUE)){ strsplit(dg[[i]][[x]][1],"$",fixed=TRUE) paste(dglistnames[i],"_",strsplit(dg[[i]][[x]][1],"$",fixed=TRUE)[[1]][2],sep="") #gsub(grep("$",dglist[i],fixed=TRUE,value=TRUE),replacement=paste(dglistnames[i],"_",sep=""),dg[[i]][[x]][1],fixed=TRUE) }else{ gsub(dg[[i]][[x]][1],replacement=dglistnames[i],dg[[i]][[x]][1],fixed=TRUE) }}) for(j in 1:length(dg[[i]])){ dg[[i]][[j]] <- dg[[i]][[j]][-1] # returns string w/o leading or trailing whitespace: dg[[i]][[j]] <- gsub("^\\s+|\\s+$", "", dg[[i]][[j]]) }} #Extrawurst fuer Regression variables Teil 2: if(length(regvar)!=0 &&regvar!="none"){ reglist <- lapply(1:length(regvar),function(x)grep(paste(regvar[x],"$",sep=""),grep(regvar[x],udg,value=TRUE),fixed=TRUE,value=TRUE)) reglist <- lapply(reglist,function(x)strsplit(x,"$",fixed=TRUE)) if(length(which(sapply(reglist,function(x)!length(x)>0)))!=0){ reglist <- reglist[-(which(sapply(reglist,function(x)!length(x)>0)))]}} if(length(reglist)!=0){ reglistnames <- vector() for(i in 1:length(reglist)){ regsublistnames<-vector() inregvar<-which(sapply(1:length(reglist[[i]]),function(x)!reglist[[i]][[x]][1]%in%regvar)) if(length(inregvar)!=0){ reglist[[i]] <- reglist[[i]][-(inregvar)] } reglistnames[i] <- reglist[[i]][[1]][1] reglist[[i]]<-lapply(1:length(reglist[[i]]),function(x){ reglist[[i]][[x]]<-reglist[[i]][[x]][-1] strsplit(reglist[[i]][[x]],": ")}) regsublistnames <-lapply(1:length(reglist[[i]]),function(x){ regsublistnames <- reglist[[i]][[x]][[1]][1] if(any(grepl("AO",regsublistnames))){ regsublistnames<-paste("outlier_",grep("AO",regsublistnames,value=TRUE),sep="")} if("Leap Year"%in%regsublistnames){ regsublistnames<-gsub("Leap Year",replacement="leapyear",regsublistnames,fixed=TRUE) } if("Trading Day"%in%regsublistnames){ regsublistnames<-gsub("Trading Day",replacement="td",regsublistnames,fixed=TRUE) } regsublistnames<-regsublistnames }) reglist[[i]]<-lapply(1:length(reglist[[i]]),function(x){reglist[[i]][[x]][[1]]<-reglist[[i]][[x]][[1]][-1] reglist[[i]][[x]][[1]] <- gsub("^\\s+|\\s+$", "", reglist[[i]][[x]][[1]])}) if("Leap Year"%in%reglistnames){ reglistnames<-gsub("Leap Year",replacement="leapyear",reglistnames,fixed=TRUE) } if("Trading Day"%in%reglistnames){ reglistnames<-gsub("Trading Day",replacement="td",reglistnames,fixed=TRUE) } if(any(grepl("AO",reglistnames))){ reglistnames[i]<-"outlier"} names(reglist[[i]])<-regsublistnames } dg[(length(dg)+1):(length(dg)+length(reglist))]<-reglist names(dg)<-dglistnames<-c(dglistnames,reglistnames) }else{ names(dg)<- dglistnames reglistnames <- NULL } #End Extrawurst Teil 2 #Checking for derived parameter estimates: if(any(grepl("nxregderived:",udg,fixed=TRUE))){ regderived<-grep("nxregderived:",udg,value=TRUE,fixed=TRUE) indnrd<-which(udg==regderived) nrd<-as.numeric(strsplit(regderived,": ",fixed=TRUE)[[1]][2]) regder<-udg[(indnrd+1):(indnrd+nrd)] derived.coef<-sapply(strsplit(gsub("$",replacement="_",regder,fixed=TRUE),": ",fixed=TRUE),function(x) gsub("\\s+",replacement="",x[[1]])) } } #End x11Regression Option #Extrawurst fuer Critical Value und die Spektren for(i in grone.nodoll){ names(dg[[i]])<-lapply(1:length(dg[[i]]),function(x){names <- dg[[i]][[x]][1]}) for(j in 1:length(dg[[i]])){ dg[[i]][[j]] <- dg[[i]][[j]][-1] dg[[i]][[j]] <- gsub("^\\s+|\\s+$", "", dg[[i]][[j]]) } } #Numerische Daten auch als solche ausgeben lassen: suppressWarnings(for(i in which(names(dg) %in% numvariables)){ for(j in 1:length(dg[[i]])){ num.possible <- as.numeric(dg[[i]][[j]]) if(length(which(is.na(num.possible)))==0){ dg[[i]][[j]] <- num.possible }else{ num.split <- strsplit(dg[[i]][[j]],"\\s+") if(is(num.split,"list")){ ex.num <- as.numeric(unlist(num.split)) num.split <- as.list(num.split[[1]]) }else{ ex.num <- as.numeric(num.split) } if(length(which(!is.na(ex.num)))>0){ dg[[i]][[j]]<- as.list(num.split) dg[[i]][[j]][[which(!is.na(ex.num))]]<- ex.num[which(!is.na(ex.num))] } } }}) sel <- c("userdefined","autooutlier","outlier",reglistnames) #werden extra behandelt for(i in which(names(dg) %in% sel)[!which(names(dg) %in% sel)%in%empty]){ for(j in 1:length(dg[[i]])){ #(Regular Expressions as used in R) #+ :The preceding item will be matched one or more times. #Symbols \d, \s, \D and \S denote the digit and space classes and their negations. dg[[i]][[j]] <- strsplit(dg[[i]][[j]],"\\s+") dg[[i]][[j]][[1]]<-as.numeric(dg[[i]][[j]][[1]]) names(dg[[i]][[j]][[1]])<- c("coef","stderr","tval")#"coef" oder "estimate" }} if("almostoutlier"%in%names(dg)){ if(!which(names(dg)=="almostoutlier")%in%empty){ for(j in 1:length(dg[["almostoutlier"]])){ #cat("j=",j, "\n") dg[["almostoutlier"]][[j]] <- strsplit(dg[["almostoutlier"]][[j]],"\\s+") dg[["almostoutlier"]][[j]][[1]]<-as.numeric(dg[["almostoutlier"]][[j]][[1]]) names(dg[["almostoutlier"]][[j]][[1]])<- c("tval_AO","tval_LS","tval_TC") }} } if(slidingspans){ ### S 0. if(dg[[which(names(dg) %in% "ss.options")]][[1]]!="-"){ ss.options <- as.list(as.numeric(unlist(strsplit(dg[[which(names(dg) %in% "ss.options")]][[1]],"\\s+")))) names(ss.options)<-c("nSpans","lSpans","period1span1","year1span1") dg[[which(names(dg) %in% "ss.options")]] <- ss.options } #ss.out <- c("ss.stabseas","ss.movseas","ss.m7","ss.idseas") #suppressWarnings(for(i in which(names(dg) %in% ss.out)){ # out.vec <- unlist(strsplit(dg[[i]][[1]],"\\s+")) # if(length(which(is.na(as.numeric(out.vec))))==0){ # dg[[i]][[1]] <- as.numeric(out.vec) # }else{ # dg[[i]][[1]] <- out.vec # } # }) ss.out <- c("ss.stabseas","ss.movseas","ss.m7","ss.idseas") ss.S0 <-dg[which(names(dg) %in% ss.out)] rn.S0<-names(dg)[which(names(dg) %in% ss.out)] ss.S0 <- strsplit(unlist(ss.S0),"\\s+") dims <- lapply(ss.S0,length) if(any(dims>1)){ rn.S0 <- rn.S0[which(dims>1)] ss.S0 <- as.data.frame(t(data.frame(do.call(rbind,ss.S0[which(dims>1)])))) ss.S0[,-which(rn.S0=="ss.idseas")]<-apply(ss.S0[,-which(rn.S0=="ss.idseas")],2,as.numeric) row.names(ss.S0)<- paste("span",1:(dim(ss.S0)[1]),sep="") colnames(ss.S0)<-gsub("ss.","",rn.S0) names(dg)[[which(names(dg)=="ss.stabseas")]]<-"ss.seasTests" dg <- dg[-which(names(dg) %in% c("ss.movseas","ss.m7","ss.idseas"))] dg[[which(names(dg)=="ss.seasTests")]] <- ss.S0 } ### S 1. if(any(dg[[which(names(dg)=="ss.S1")]]!="-")){ ss.S1split <- dg[[which(names(dg)=="ss.S1")]] ss.S1split <- strsplit(unlist(ss.S1split),"\\s+") dims <- lapply(ss.S1split,length) ss.S1 <- data.frame(do.call(rbind,ss.S1split[which(dims==unique(dims)[[1]])])) ss.S1[,1]<-as.character(ss.S1[,1]) ss.S1[,-1]<-apply(ss.S1[,-1],2,as.numeric) row.names(ss.S1)<-NULL colnames(ss.S1)<-c("period", paste("span",1:(unique(dims)[[1]]-3),sep=""), "maxPercDiff", "allSpans") ss.summaryS1 <- data.frame(do.call(rbind,ss.S1split[which(dims==unique(dims)[[2]])]),stringsAsFactors=FALSE) ss.summaryS1<-apply(ss.summaryS1,2,as.numeric) row.names(ss.summaryS1) <- c(paste("span",1:(dim(ss.summaryS1)[1]-1),sep=""),"allSpans") colnames(ss.summaryS1)<-c("min","max","range") dg[[which(names(dg)=="ss.S1")]] <- list(ss.S1=ss.S1,ss.summaryS1=ss.summaryS1) } ### S 2. if(any(dg[[which(names(dg)=="ss.S2")]]!="-")){ ss.S2 <- dg[[which(names(dg)=="ss.S2")]] ss.S2split <- dg[[which(names(dg)=="ss.S2")]] ss.S2split <- strsplit(unlist(ss.S2split),"\\s+") dims <- lapply(ss.S2split,length) if(any(dims>1)){ ss.S2 <- data.frame(apply(do.call(rbind,ss.S2split[which(dims>1)]),2,as.numeric)) rn.S2 <- which(c("s2.a.per", "s2.b.per", "s2.c.per", "s2.d.per", "s2.e.per")%in%names(ss.S2split)) row.names(ss.S2) <- c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")[rn.S2] colnames(ss.S2)<-c("nUnstable","nPeriods","percUnstable") dg[[which(names(dg)=="ss.S2")]] <- ss.S2 } } ### S 3. #remove histograms #remove hinges (no standard format) if(any(dg[[which(names(dg)=="ss.S3")]]!="-")){ ss.S3 <- dg[[which(names(dg)=="ss.S3")]] ss.S3 <- ss.S3[-grep("thist",names(ss.S3))] ss.S3 <- ss.S3[-grep("hinge",names(ss.S3))] ss.S3split <- strsplit(unlist(ss.S3),"\\s+") #dims <- lapply(ss.S3split,length) ss.S3 <- data.frame(do.call(rbind,ss.S3split),stringsAsFactors=FALSE) ss.S3[,-1] <- apply(ss.S3[,-1],2,as.numeric) colnames(ss.S3) <- c("periodYear","nBreakdowns","ampd") ss.S3.new <- list() ss.S3.names <- vector() for( i in 1:5){ s3.names <- c("s3.a.brk", "s3.b.brk", "s3.c.brk", "s3.d.brk", "s3.e.brk") ss.S3.new.obj <-ss.S3[grep(s3.names[i],rownames(ss.S3)),] if(dim(ss.S3.new.obj)[1]!=0){ rownames(ss.S3.new.obj)<-NULL ss.S3.new[[length(ss.S3.new)+1]] <-ss.S3.new.obj ss.S3.names[length(ss.S3.names)+1] <- c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")[i] } } ss.S3 <- ss.S3.new names(ss.S3) <- ss.S3.names dg[[which(names(dg)=="ss.S3")]] <- ss.S3 } } if(history){ ### R 1. - R 6. h.tablenames <- c("h.R1","h.R2","h.R4","h.R5","h.R6") for(i in 1:length(h.tablenames)){ if(any(unlist(dg[h.tablenames[i]])!="-")){ lags <- unique(substr(names(dg[h.tablenames[i]][[1]]),1,9)) if(any(grepl("aarmo",lags))) lags <- lags[-grep("aarmo",lags)] colnames.lags <- gsub(paste("r0",substr(h.tablenames[i],4,4),".lag0",sep=""),"lag",lags) colnames.lags <- gsub(paste("r0",substr(h.tablenames[i],4,4),".lag",sep=""),"lag",colnames.lags) colnames.lags <- gsub(paste("r0",substr(h.tablenames[i],4,4),".proj.",sep=""),"proj",colnames.lags) dates <- date.list <- total.list <- hingeValues.list <- hinges <- list() for(j in 1:length(lags)){ table <- strsplit(unlist(dg[h.tablenames[i]][[1]]),"\\s+") table <- table[grep(lags[j],names(table))] date <- table[-c(grep(".all",names(table)),grep(".hinge",names(table)))] date <- do.call(rbind,date) rownames(date)<-NULL date.list[[length(date.list)+1]]<-date[,2] dates[[length(dates)+1]] <- date[,1] total <- unlist(table[grep(".all",names(table))]) rownames(total)<-NULL total.list[[length(total.list)+1]]<-total hingeValues <- table[grep(".hinge",names(table))] hingeValues <- do.call(rbind,hingeValues) hinges[[length(hinges)+1]] <- do.call(rbind,strsplit(rownames(hingeValues),"hinge."))[,2] #rownames(hingeValues)<-NULL hingeValues.list[[length(hingeValues.list)+1]]<-hingeValues } if(length(lags)>1){ #date if(all(lapply(dates,length)==length(unique(unlist(dates))))){ date <- as.data.frame(cbind(dates[[1]],do.call(cbind,date.list)),stringsAsFactors=FALSE) colnames(date)<-c("date",colnames.lags) date[,-1]<-apply(date[,-1],2,as.numeric) rownames(date)<-NULL }else{ all.dates.sorted <- all.dates <- unique(unlist(dates)) months <-c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") if(any(all.dates%in%months)){ all.dates.sorted[1:length(suppressWarnings(which(is.na(as.numeric(all.dates)))))] <- all.dates[suppressWarnings(which(is.na(as.numeric(all.dates))))][match(months,all.dates[suppressWarnings(which(is.na(as.numeric(all.dates))))])] }else{ quarters <- c("1st", "2nd", "3rd", "4th") all.dates.sorted[1:length(suppressWarnings(which(is.na(as.numeric(all.dates)))))] <- all.dates[suppressWarnings(which(is.na(as.numeric(all.dates))))][match(quarters,all.dates[suppressWarnings(which(is.na(as.numeric(all.dates))))])] } all.dates.sorted[(length(suppressWarnings(which(is.na(as.numeric(all.dates)))))+1):length(all.dates.sorted)] <- as.character(sort(suppressWarnings(as.numeric(all.dates[which(!is.na(as.numeric(all.dates)))])))) all.dates <- all.dates.sorted for(k in 1:length(dates)){ date.list[[k]] <- c(date.list[[k]],rep(NA,length(all.dates[which(!all.dates%in%dates[[k]])]))) dates[[k]] <- c(dates[[k]],all.dates[which(!all.dates%in%dates[[k]])]) date.list[[k]] <- date.list[[k]][ match(all.dates,dates[[k]])] } date <- as.data.frame(cbind(all.dates,do.call(cbind,date.list)),stringsAsFactors=FALSE) colnames(date)<-c("date",colnames.lags) date[,-1]<-apply(date[,-1],2,as.numeric) rownames(date)<-NULL } #total total <- as.data.frame(do.call(cbind,total.list),stringsAsFactors=FALSE) total[,1:(dim(total)[2])] <- as.numeric(total) rownames(total)<-NULL colnames(total)<-colnames.lags #hingeValues if(length(unique(unlist(hinges)))==length(unlist(hinges))/length(lags)){ hingeValues <- as.data.frame(cbind(unlist(hinges[[1]]),do.call(cbind,hingeValues.list)),stringsAsFactors=FALSE) rownames(hingeValues)<-NULL colnames(hingeValues)<-c("hinges",colnames.lags) hingeValues[,-1]<-apply(hingeValues[,-1],2,as.numeric) }else{ hingeValues <- as.data.frame(do.call(cbind,lapply(1:length(lags),function(x)cbind(hinges[[x]],hingeValues.list[[x]]))),stringsAsFactors=FALSE) rownames(hingeValues)<-NULL colnames(hingeValues)[seq(from=1,by=2,length.out=length(lags))]<-rep("hinges",length(lags)) colnames(hingeValues)[seq(from=2,by=2,length.out=length(lags))]<-colnames.lags } }else{ #date date <- as.data.frame(cbind(unlist(dates),unlist(date.list)),stringsAsFactors=FALSE) colnames(date)<-c("date",colnames.lags) date[,1]<-as.character(date[,1]) date[,2]<-as.numeric(date[,2]) rownames(date)<-NULL #total total <- as.data.frame(do.call(cbind,total.list),stringsAsFactors=FALSE) total[1]<-as.numeric(total) colnames(total)<-colnames.lags rownames(total)<-NULL #hingeValues hingeValues <- as.data.frame(cbind(unlist(hinges[[1]]),do.call(cbind,hingeValues.list)),stringsAsFactors=FALSE) rownames(hingeValues)<-NULL colnames(hingeValues)<-c("hinges",colnames.lags) hingeValues[,-1]<-as.numeric(hingeValues[,-1]) } dg[[which(names(dg) %in% h.tablenames[i])]] <- list(date=date,total=total,hingeValues=hingeValues) }} ### R 7. if(file.exists(paste(filename,".","lkh",sep=""))){ h.R7 <- readLines(con=paste(filename,".","lkh",sep=""),n=-1) names.h.R7 <- c("date","loglikelihood","aicc") h.R7 <- data.frame(do.call(rbind,lapply(strsplit(h.R7[-(1:2)],"\t"),as.numeric))) colnames(h.R7)<-names.h.R7 dg[[length(dg)+1]]<-h.R7 names(dg)[[length(dg)]]<-"h.R7" } ### R 8. if(file.exists(paste(filename,".","fce",sep=""))){ h.R8 <- readLines(con=paste(filename,".","fce",sep=""),n=-1) names.h.R8 <- gsub("Sum","sum",gsub("\\(|\\)","",unlist(strsplit(h.R8[1],"\t")))) h.R8 <- data.frame(do.call(rbind,lapply(strsplit(h.R8[-(1:2)],"\t"),as.numeric))) colnames(h.R8)<-names.h.R8 rownames(h.R8)<-NULL h.meanssfe <- as.data.frame(t(as.numeric(unlist(strsplit(unlist(dg[["h.meanssfe"]]),"\\s+"))))) colnames(h.meanssfe)<-gsub("sumSqFcstError","lead",names.h.R8[-1]) rownames(h.meanssfe)<-NULL dg[[length(dg)+1]]<-list(h.R8=h.R8,meanSumSqFcstError=h.meanssfe) names(dg)[[length(dg)]]<-"h.R8" dg <- dg[-which(names(dg) %in% "h.meanssfe")] } }#end history ### identify: autocorrealtions of the residuals if(file.exists(paste(filename,".","iac",sep=""))){ iac <- readLines(con=paste(filename,".","iac",sep=""),n=-1) i.tables <- grep("\\$diff",iac) #grep("\\$sdiff",iac,value=TRUE) name.table <- iac.list <- list() for(i in 1:length(i.tables)){ name.table[[length(name.table)+1]] <- gsub("= ","",gsub("\\$","",paste(iac[i.tables[i]],iac[i.tables[i]+1],sep="_"))) #names.iac <- gsub("\\(|\\)","",unlist(strsplit(iac[i.tables[i]+2],"\t"))) names.iac <- c("lag","sample.acf","stderr.acf","Ljung-Box.q","df.q","pval" ) if(i.tables[i]!=i.tables[length(i.tables)]) iac.table <- data.frame(do.call(rbind,lapply(strsplit(iac[(i.tables[i]+4):(i.tables[i+1]-1)],"\t"),as.numeric))) else iac.table <- data.frame(do.call(rbind,lapply(strsplit(iac[(i.tables[i]+4):length(iac)],"\t"),as.numeric))) colnames(iac.table)<-names.iac rownames(iac.table)<-NULL iac.list[[length(iac.list)+1]] <- iac.table } names(iac.list)<-name.table dg[[length(dg)+1]]<-iac.list names(dg)[[length(dg)]]<-"rsd.iac" } ### identify: partial autocorrealtions of the residuals if(file.exists(paste(filename,".","ipc",sep=""))){ ipc <- readLines(con=paste(filename,".","ipc",sep=""),n=-1) i.tables <- grep("\\$diff",ipc) #grep("\\$sdiff",ipc,value=TRUE) name.table <- ipc.list <- list() for(i in 1:length(i.tables)){ name.table[[length(name.table)+1]] <- gsub("= ","",gsub("\\$","",paste(ipc[i.tables[i]],ipc[i.tables[i]+1],sep="_"))) #names.ipc <- gsub("\\(|\\)","",unlist(strsplit(ipc[i.tables[i]+2],"\t"))) names.ipc <- c("lag","sample.pacf","stderr.pacf") if(i.tables[i]!=i.tables[length(i.tables)]) ipc.table <- data.frame(do.call(rbind,lapply(strsplit(ipc[(i.tables[i]+4):(i.tables[i+1]-1)],"\t"),as.numeric))) else ipc.table <- data.frame(do.call(rbind,lapply(strsplit(ipc[(i.tables[i]+4):length(ipc)],"\t"),as.numeric))) colnames(ipc.table)<-names.ipc rownames(ipc.table)<-NULL ipc.list[[length(ipc.list)+1]] <- ipc.table } names(ipc.list)<-name.table dg[[length(dg)+1]]<-ipc.list names(dg)[[length(dg)]]<-"rsd.ipc" } if(transform=="auto" && dg[["transform"]]!="Automatic selection" &&!x11regress){ dg[["autotransform"]] <- dg[["transform"]] names(dg[["autotransform"]]) <-"autotransform" dg[["transform"]][[1]]<-"Automatic selection" names(dg[["transform"]])<-"transform" } if(any(grepl("derived.coef",ls()))){ dg[[length(dg)+1]]<-derived.coef names(dg)[[length(dg)]]<-"derived.coef" } if(dg[["finalseasonalma"]]!="-"){ dg[["seasonalma"]] <- c(dg[["seasonalma"]],dg[["finalseasonalma"]]) } dg <- dg[-which(names(dg)=="finalseasonalma")] if(dg[["finalsamode"]]!="-"){ dg[["samode"]] <- c(dg[["samode"]],dg[["finalsamode"]]) } dg <- dg[-which(names(dg)=="finalsamode")] if(dg[["id.rsdseas"]]=="-") dg[["id.rsdseas"]]<-"Residual seasonality present" if(is.null(outlier) &&!x11regress){ dg[[length(dg)+1]]<-"No outlier detection performed" names(dg)[[length(dg)]]<-"ifout" }else if(!is.null(outlier) &&!x11regress){ dg[[length(dg)+1]]<-"Outlier detection performed" names(dg)[[length(dg)]]<-"ifout" } #filename <- "M:/Meraner/Workspace/Saisonbereinigung_Test/gra/Rout" #file<-"M:/Meraner/Workspace/Saisonbereinigung_Test/Rout" #!if exists notwendig if(file.exists(paste(filename,".","acf",sep=""))){ #sample autocorrelations of residuals acf <- readLines(con=paste(filename,".","acf",sep=""),n=-1) #names.acf <- unlist(strsplit(acf[1],"\t")) names.acf <- c("lag","sample.acf","stderr.acf","Ljung-Box.q","df.q","pval") acf <- data.frame(do.call(rbind,lapply(strsplit(acf[-(1:2)],"\t"),as.numeric))) colnames(acf)<-names.acf dg[[length(dg)+1]]<-acf names(dg)[[length(dg)]]<-"rsd.acf" } if(file.exists(paste(filename,".","pcf",sep=""))){ #sample partial autocorrelations of residuals pacf <- readLines(con=paste(filename,".","pcf",sep=""),n=-1) #names.pacf <- unlist(strsplit(pacf[1],"\t")) names.pacf <- c("lag","sample.pacf","stderr.pacf") pacf <- data.frame(do.call(rbind,lapply(strsplit(pacf[-(1:2)],"\t"),as.numeric))) colnames(pacf)<-names.pacf dg[[length(dg)+1]]<-pacf names(dg)[[length(dg)]]<-"rsd.pacf" } if(file.exists(paste(filename,".","ac2",sep=""))){ #sample autocorrelations of squared residuals acf2 <-readLines(con=paste(filename,".","ac2",sep=""),n=-1) names.acf2 <- unlist(strsplit(acf2[1],"\t")) names.acf2 <- c("lag","sample.acf2","stderr.acf2","Ljung-Box.q","df.q","pval") acf2 <- data.frame(do.call(rbind,lapply(strsplit(acf2[-(1:2)],"\t"),as.numeric))) colnames(acf2)<-names.acf2[1:dim(acf2)[2]]#Box Ljung fehlt fuer acf2 #Box.test(,lag=1,type="Ljung-Box",) dg[[length(dg)+1]]<-acf2 names(dg)[[length(dg)]]<-"rsd.acf2" } ##Forecasts als data.frame #if(file.exists(paste(filename,".","fct",sep=""))){ ##sample autocorrelations of residuals # fct <- readLines(con=paste(filename,".","fct",sep=""),n=-1) # names.fct <- unlist(strsplit(fct[1],"\t")) ## names.fct <- c("..") # fct <- data.frame(do.call(rbind,lapply(strsplit(fct[-(1:2)],"\t"),as.numeric))) # colnames(fct)<-names.fct ## dg[[length(dg)+1]]<-fct ## names(dg)[[length(dg)]]<-"fct" #out$forecast <- fct #} # ##Backcasts als data.frame #if(file.exists(paste(filename,".","bct",sep=""))){ ##sample autocorrelations of residuals # bct <- readLines(con=paste(filename,".","bct",sep=""),n=-1) # names.bct <- unlist(strsplit(bct[1],"\t")) ## names.bct <- c("..") # bct <- data.frame(do.call(rbind,lapply(strsplit(bct[-(1:2)],"\t"),as.numeric))) # colnames(bct)<-names.bct ## dg[[length(dg)+1]]<-bct ## names(dg)[[length(dg)]]<-"bct" #out$backcast <- bct #} dg[[length(dg)+1]]<- file names(dg)[[length(dg)]]<-"tsName" #out <- list() dg[[length(dg)+1]]<- freq_series names(dg)[[length(dg)]]<-"frequency" if(any(grepl("span:",udg,fixed=TRUE))){ span <- grep("span:",udg,fixed=TRUE,value=TRUE) if(length(span)>1){ dg[[length(dg)+1]]<- span names(dg)[[length(dg)]]<-"span" }else{ span <- str_trim(unlist(strsplit(span,":"))) dg[[length(dg)+1]]<- span[2] names(dg)[[length(dg)]]<-"span" } } out[["dg"]] <- list() out[["dg"]] <- dg # out$seats <- seats out$file <- file out$tblnames <- tblnames out$Rtblnames <- Rtblnames class(out) <- "x12work" out }
/scratch/gouwar.j/cran-all/cranData/x12/R/readx12Out.R
setMethod("summary", signature(object = "x12Output"), function (object, fullSummary=FALSE, spectra.detail=FALSE, almostout=FALSE, rsd.autocorr=NULL, quality.stat=FALSE, likelihood.stat=FALSE, aape=FALSE, id.rsdseas=FALSE, slidingspans=FALSE, history=FALSE, identify=FALSE, print=TRUE) { if(length(object@dg)>0){ if(print) summaryworkhorse(object@dg,fullSummary=fullSummary,spectra.detail=spectra.detail, almostout=almostout,rsd.autocorr=rsd.autocorr,quality.stat=quality.stat, likelihood.stat=likelihood.stat,aape=aape,id.rsdseas=id.rsdseas, slidingspans=slidingspans,history=history,identify=identify) invisible(summary.output.workhorse(object@dg,fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history,identify=identify)) }else{ cat("You need to run x12 before viewing a summary!\n") } } ) setMethod("summary", signature(object = "x12Single"), function (object, fullSummary=FALSE, spectra.detail=FALSE, almostout=FALSE, rsd.autocorr=NULL, quality.stat=FALSE, likelihood.stat=FALSE, aape=FALSE, id.rsdseas=FALSE,slidingspans=FALSE, history=FALSE, identify=FALSE, oldOutput=NULL,print=TRUE) { ############ # sumout<-unlist(summary.output[,1]) # names.sumout<-unique(sumout) # length.names.sumout<-unlist(lapply(names.sumout,function(x)length(grep(x,sumout)))) ########### # summary.output<-summary(object@x12Output,fullSummary=fullSummary,spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr,quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape,id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history,identify=identify) if(is.null(oldOutput)){ if(!is.null(object@tsName)){ if(print){ cat("-------------------------- ",object@tsName," ------------------------------------\n") cat("-----------------------------------------------------------------------------------\n") } summary.output<-summary(object@x12Output,fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history, identify=identify,print=print) names(summary.output)[2]<-object@tsName }else{ if(print){ cat("-------------------------- Rout ------------------------------------\n") cat("-----------------------------------------------------------------------------------\n") } summary.output<-summary(object@x12Output,fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history, identify=identify,print=print) } }else{ nprev <- min(length(object@x12OldOutput),oldOutput) if(!is.null(object@tsName)){ if(print){ cat("-------------------------- ",object@tsName," ------------------------------------\n") cat("-----------------------------------------------------------------------------------\n") } summary.output<-summary(object@x12Output,fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history, identify=identify,print=print) names(summary.output)[2]<-object@tsName }else{ if(print){ cat("-------------------------- Rout ------------------------------------\n") cat("-----------------------------------------------------------------------------------\n") } summary.output<-summary(object@x12Output,fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history, identify=identify,print=print) } if(nprev>0){ for(i in nprev:1){ if(i==length(object@x12OldOutput)) TF <- !identical(object@x12Output,object@x12OldOutput[[i]]) else if(i!=nprev) TF <- !identical(object@x12OldOutput[[i]],object@x12OldOutput[[i+1]]) else TF <- TRUE if(TF){ if(print) cat("\n--------------------------- RUN ",i," ----------------------------------------\n") summary.oldout<-summary(object@x12OldOutput[[i]],fullSummary=fullSummary, spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr, quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape, id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history, identify=identify,print=print) names(summary.oldout)<-names(summary.output) summary.oldout<-rbind(c(paste("OLD OUTPUT",i),paste("Run",i)),summary.oldout) summary.output<-rbind(summary.output,summary.oldout) }else{ if(print) cat("--- No valid previous runs. ---\n") } } } } invisible(summary.output) } ) setMethod("summary", signature(object = "x12Batch"), function (object, fullSummary=FALSE, spectra.detail=FALSE, almostout=FALSE, rsd.autocorr=NULL, quality.stat=FALSE, likelihood.stat=FALSE, aape=FALSE, id.rsdseas=FALSE, slidingspans=FALSE,history=FALSE,identify=FALSE,oldOutput=NULL,print=TRUE) { summary.output<-list() for(i in 1:length(object@x12List)){ if(print) cat("-----------------------------------------------------------------------------------\n") summary.output[[length(summary.output)+1]]<-summary(object@x12List[[i]],fullSummary=fullSummary,spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr,quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape,id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history,identify=identify,oldOutput=oldOutput,print=print) } if(is.null(oldOutput)){ summary.output<-fun.table(summary.output,object=object) invisible(summary.output) }else{ new.out<-list() old.out<-list() for(i in 1:length(object@x12List)){ # cat("i=",i,"\n") if(any(grep("OLD OUTPUT",summary.output[[i]][,1]))){ new.out[[i]]<-summary.output[[i]][1:(min(grep("OLD OUTPUT",summary.output[[i]][,1]))-1),] old.out.sub<-list() ind.old.out<-grep("OLD OUTPUT",summary.output[[i]][,1]) if(length(ind.old.out)>1){ for(j in 1:(length(ind.old.out)-1)){ old.out.sub[[length(old.out.sub)+1]]<- summary.output[[i]][ind.old.out[j]:(ind.old.out[j+1]-1),] }} old.out.sub[[length(old.out.sub)+1]]<- summary.output[[i]][ind.old.out[length(ind.old.out)]:length(summary.output[[i]][,1]),] old.out[[length(old.out)+1]]<-old.out.sub }else{ new.out[[i]]<-summary.output[[i]] old.out[[length(old.out)+1]]<-NA } } # summary.output<-fun.table(new.out,object=object) noo.values<-sort(unique(grep("OLD OUTPUT",unlist(old.out),value=TRUE)),decreasing=TRUE) noo <- length(noo.values) new.list<-list() new.list<-lapply(1:length(old.out),function(x) new.list[[length(new.list)+1]]<-rep(data.frame(NA),noo)) for(i in 1:length(old.out)){ if(noo>0){ for(j in 1:noo){ if(any(grepl(noo.values[j],old.out[[i]]))) new.list[[i]][[j]]<-old.out[[i]][[grep(noo.values[j],old.out[[i]])]] } } } old.out<-new.list new.list<-list() if(noo>0){ for(i in 1:noo){ sub.new.list<-list() new.list[[length(new.list)+1]]<-lapply(old.out,function(x){ if(all(is.na(x[[i]]))){ x[[i]]<-list(c(paste("OLD OUTPUT",c(noo:1)[i]),"No previous run")) x[[i]]<-unlist(x[[i]]) } sub.new.list[[length(sub.new.list)+1]]<-x[[i]] }) } for(i in 1:noo){ summary.output<-rbind(summary.output,fun.table(new.list[[i]],object=object)) } } } invisible(summary.output) } ) fun.table <- function(y,object){ row.names<-unique(unlist(lapply(y,function(x){ if(is.character(x)) x[1] else x[,1]}))) new.out<-cbind(row.names,rep(NA,length(row.names))) col.names<-vector() for(i in 1:length(object@x12List)){ new.out2<-new.out if(is.character(y[[i]])){ new.out2<-new.out2[,2] col.names[length(col.names)+1]<-object@x12List[[i]]@tsName } else{ new.out2<-apply(new.out2,1,function(x){ if(x[1]%in%y[[i]][,1]) x[2]<-y[[i]][which(y[[i]][,1]%in%x[1]),2] else x[2]<-x[2] }) col.names[length(col.names)+1]<-names(y[[i]])[2] } if(i==1) new.out3<-data.frame(cbind(new.out[,1],unlist(new.out2))) else new.out3<-cbind(new.out3,unlist(new.out2)) } y<-as.data.frame(new.out3) colnames(y)<-c("DIAGNOSTICS",col.names) g <- grep("variable",as.character(y[,1])) if(length(g)>1){ ind1 <- 1:(g[1]-1) y2 <- y[ind1,] ind2 <- g y <- rbind(y2,y[ind2,],y[-c(ind1,ind2),]) rownames(y) <- 1:nrow(y) } return(y) }
/scratch/gouwar.j/cran-all/cranData/x12/R/summary-methods.R
#summary.output.x12 <- function(object,fullSummary=FALSE,spectra.detail=FALSE,almostout=FALSE,rsd.autocorr=NULL,q2=FALSE,likelihood.stat=FALSE,aape=FALSE,id.rsdseas=FALSE,slidingspans=FALSE,...){ # summaryworkhorse(object$dg,fullSummary=fullSummary,spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr,quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape,id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history,identify=identify) #} summary.output.workhorse <- function(x,fullSummary=FALSE,spectra.detail=FALSE,almostout=FALSE,rsd.autocorr=NULL,quality.stat=FALSE,likelihood.stat=FALSE,aape=FALSE,id.rsdseas=FALSE,slidingspans=FALSE,history=FALSE,identify=FALSE){ #cat("File: \"",x$file,"\"",sep="","\n") if(length(nchar(unlist(x$outlier)))==0) x$outlier<-"-" if(fullSummary){ spectra.detail=TRUE almostout=TRUE rsd.autocorr=c("acf","pacf","acf2") quality.stat=TRUE likelihood.stat=TRUE aape=TRUE id.rsdseas=TRUE slidingspans=TRUE history=TRUE identify=TRUE } summary.output<-data.frame() summary.output[dim(summary.output)[1]+1,1]<-"Frequency" summary.output[dim(summary.output)[1],2]<-x$frequency if(length(x$span)>1){ span <- str_trim(unlist(strsplit(x$span,":"))) span.index <- which(span=="span") summary.output[dim(summary.output)[1]+1,1]<-"Span" summary.output[dim(summary.output)[1],2]<-span[span.index+1] span.index <- which(span=="modelspan") if(length(span.index)>0) modelspan <- c("Model Span",span[span.index+1]) span.index <- which(span=="outlierspan") if(length(span.index)>0) outlierspan <- c("Outlier Span",span[span.index+1]) }else{ summary.output[dim(summary.output)[1]+1,1]<-"Span" summary.output[dim(summary.output)[1],2]<-x$span } if(x$x11regress=="no"){ # colnames(summary.output)<-c("Diagnostic","Series") summary.output[dim(summary.output)[1]+1,1]<-"X11 Regression" summary.output[dim(summary.output)[1],2]<-"FALSE" summary.output[dim(summary.output)[1]+1,1]<-"Model Definition" if(x$automdl!="-"){ summary.output[dim(summary.output)[1],2]<-paste("ARIMA Model:",unlist(x$arimamdl),"(Automatic Model Choice)") }else{ summary.output[dim(summary.output)[1],2]<- paste("ARIMA Model:",unlist(x$arimamdl)) } if(exists("modelspan")) summary.output[dim(summary.output)[1]+1,] <- modelspan if(x$transform=="Automatic selection"){ summary.output[dim(summary.output)[1]+1,1]<-"Transformation" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$transform),":",unlist(x$autotransform)) }else{ summary.output[dim(summary.output)[1]+1,1]<-"Transformation" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$transform)) } summary.output[dim(summary.output)[1]+1,1]<-"Regression Model" summary.output[dim(summary.output)[1],2] <- paste(unlist(x$regmdl),collapse=" ") # cat("\n\tOutlier Detection\n") if(x$ifout=="Outlier detection performed"){ summary.output[dim(summary.output)[1]+1,1]<-"Outlier detection performed" summary.output[dim(summary.output)[1],2]<-paste("TRUE") if(exists("outlierspan")) summary.output[dim(summary.output)[1]+1,] <- outlierspan # cat("Critical |t| for outliers:\t\n") for(i in 1:length(names(x$crit))){ summary.output[dim(summary.output)[1]+1,1]<-names(x$crit)[[i]] summary.output[dim(summary.output)[1],2]<-paste(x$crit[[i]],collapse=" ") } summary.output[dim(summary.output)[1]+1,1]<-"Total Number of Outliers" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$nout)) summary.output[dim(summary.output)[1]+1,1]<-"Nr of Automatically Identified Outliers" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$nautoout)) if(almostout){ summary.output[dim(summary.output)[1]+1,1]<-"Nr of Almost Outliers" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$nalmostout)) } } else{ summary.output[dim(summary.output)[1]+1,1]<-"Outlier detection performed" summary.output[dim(summary.output)[1],2]<-paste("FALSE") } rest<-unlist(lapply(strsplit(as.character(x$regmdl),"+",fixed=TRUE),function(x)gsub("^\\s+|\\s+$", "",x))) rest<-names(x)[which(names(x)%in%rest)] if(almostout){ liste <- c("outlier","userdefined","leapyear","td","autooutlier",rest,"almostoutlier") }else liste <- c("outlier","userdefined","leapyear","td",rest,"autooutlier") liste<-liste[which(liste%in%names(x))] empty <- which(unlist(lapply(1:length(x),function(y)any(x[[y]]=="-")))) res <- as.data.frame(do.call(rbind,lapply(which(!liste %in% names(x[empty])),function(j){ if(!any(grepl(names(x[liste[j]]),names(x[[liste[j]]])))){ names(x[[liste[j]]])<-paste(names(x[liste[j]]),"_",names(x[[liste[j]]]),sep="") } do.call(rbind,lapply(1:length(x[[liste[j]]]),function(i){ c(names(x[[liste[j]]][i]),unlist(x[[liste[j]]][[i]]))}))}))) if(all(dim(res))>0){ res[,2:4] <- apply(res[,2:4],2,function(x)as.numeric(formatC(as.numeric(as.character(x)),digits=3,format="f"))) colnames(res)[1]<-"variable" res2 <- cbind(paste(1:length(res[,1]),"variable, coef, stderr, tval"),apply(res,1,paste,collapse=", ")) summary.output<-rbind(summary.output,res2) if(!is.null(x[["derived.coef"]])){ summary.output[dim(summary.output)[1]+1,1]<-"* Derived parameter estimates" summary.output[dim(summary.output)[1],2]<-paste(x[["derived.coef"]]) } } if(likelihood.stat){ # cat("\n\tLikelihood Statistics\n") lstat<-matrix(c("AIC","AICC","BIC","HQ ","Log Likelihood",x$aic,x$aicc,x$bic,x$hq,x$loglikelihood),ncol=2) summary.output <- rbind(summary.output,lstat) } if(aape && length(x$aape)>1){ # cat("\nAverage absolute percentage error\n") mode<-ifelse(x$aape$aape.mode=="outofsample","out of sample","within sample") summary.output[dim(summary.output)[1]+1,1]<-"AAPE mode" summary.output[dim(summary.output)[1],2]<-paste(mode) aape.mat<-matrix(c("AAPE Last year","AAPE Last-1 year","AAPE Last-2 year","AAPE Last 3 years",x$aape$aape.0,x$aape$aape.1,x$aape$aape.2,x$aape$aape.3),ncol=2) summary.output <- rbind(summary.output,aape.mat) } # cat("\n\tSeasonal Adjustment\n\n") summary.output[dim(summary.output)[1]+1,1]<-"Identifiable Seasonality" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$id.seas)) if(id.rsdseas){ summary.output[dim(summary.output)[1]+1,1]<-"Residual Seasonality" if(x$id.rsdseas=="none") summary.output[dim(summary.output)[1],2]<-"none" else summary.output[dim(summary.output)[1],2]<-"yes" } summary.output[dim(summary.output)[1]+1,1]<-"Seasonal Peaks" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$peaks.seas)) summary.output[dim(summary.output)[1]+1,1]<-"Trading Day Peaks" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$peaks.td)) summary.output[dim(summary.output)[1]+1,1]<-"Q Statistic" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$q)) if(quality.stat){ summary.output[dim(summary.output)[1]+1,1]<-"Q2 Statistic" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$q2)) summary.output[dim(summary.output)[1]+1,1]<-"M1" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m1)) summary.output[dim(summary.output)[1]+1,1]<-"M2" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m2)) summary.output[dim(summary.output)[1]+1,1]<-"M3" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m3)) summary.output[dim(summary.output)[1]+1,1]<-"M4" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m4)) summary.output[dim(summary.output)[1]+1,1]<-"M5" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m5)) summary.output[dim(summary.output)[1]+1,1]<-"M6" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m6)) summary.output[dim(summary.output)[1]+1,1]<-"M7" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m7)) summary.output[dim(summary.output)[1]+1,1]<-"M8" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m8)) summary.output[dim(summary.output)[1]+1,1]<-"M9" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m9)) summary.output[dim(summary.output)[1]+1,1]<-"M10" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m10)) summary.output[dim(summary.output)[1]+1,1]<-"M11" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m11)) } summary.output[dim(summary.output)[1]+1,1]<-"Nr of M stats outside limits" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$nmfail)) if(spectra.detail){ new.names<-function(z){ for(i in 1:length(x[[z]])){ if(length(x[[z]][[i]])>1){ x[[z]][[i]] <- do.call(paste,x[[z]][[i]]) }} y<-as.data.frame(x[[z]],row.names="") colnames(y)<-gsub(paste(z,".",sep=""),replacement="",names(unlist(x[[z]])),fixed=TRUE) return(y)} summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of the original series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcori")),":",new.names("spcori"),collapse=",",sep="") summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of the regARIMA model residuals" if(any(x$spcrsd!="-")) summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcrsd")),":",new.names("spcrsd"),collapse=",",sep="") else summary.output[dim(summary.output)[1],2]<-"-" summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of differenced seasonally adjusted series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcsa")),":",new.names("spcsa"),collapse=",",sep="") summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of modified irregular series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcirr")),":",new.names("spcirr"),collapse=",",sep="") } # cat("\n\tSeasonal and Trend Moving Averages\n\n") summary.output[dim(summary.output)[1]+1,1]<-"SA decomposition" summary.output[dim(summary.output)[1],2]<-paste(x$samode[[length(x$samode)]]) if(x$seasonalma[[1]]=="M.S.R."){ summary.output[dim(summary.output)[1]+1,1]<-"Seasonal moving average" summary.output[dim(summary.output)[1],2]<-paste(x$seasonalma[[length(x$seasonalma)]],"(Based on msr size)",sep=" ") }else{ summary.output[dim(summary.output)[1]+1,1]<-"Seasonal moving average" summary.output[dim(summary.output)[1],2]<-paste(x$seasonalma[[length(x$seasonalma)]]) } summary.output[dim(summary.output)[1]+1,1]<-"Henderson filter" summary.output[dim(summary.output)[1],2]<-paste(x$trendma[[length(x$trendma)]],"-term",sep="") if(!is.null(rsd.autocorr)){ #rsd.autocorr=c("rsd.acf","rsd.pacf","rsd.acf2") if("acf"%in%rsd.autocorr && !is.null(x$rsd.acf)){ sig<-rep("",dim(x$rsd.acf)[1]) sig[which(x$rsd.acf$pval<0.05 & x$rsd.acf$df.q>0)]<-"*" rsd.acf<-cbind(round(x$rsd.acf,digits=3),sig) summary.output<-rbind(summary.output,cbind(rep("acf: lag, sample.acf, stderr.acf, Ljung-Box.q, df.q, pval",dim(rsd.acf)[1]),paste(apply(rsd.acf[,-dim(rsd.acf)[2]],1,paste,collapse=", "),rsd.acf[,dim(rsd.acf)[2]]))) } if("pacf"%in%rsd.autocorr && !is.null(x$rsd.pacf)){ summary.output<-rbind(summary.output,cbind(rep("pacf: lag, sample.pacf, stderr.pacf",dim(x$rsd.pacf)[1]),apply(round(x$rsd.pacf,digits=3),1,paste,collapse=", "))) } if("acf2"%in%rsd.autocorr && !is.null(x$rsd.acf2)){ summary.output<-rbind(summary.output,cbind(rep("acf2: lag, sample.acf2, stderr.acf2",dim(x$rsd.acf2)[1]),apply(round(x$rsd.acf2,digits=3),1,paste,collapse=", "))) } } if(slidingspans){ if(length(grep("ss.",names(x)))>0){ summary.output[dim(summary.output)[1]+1,1]<-"Sliding spans analysis performed" summary.output[dim(summary.output)[1],2]<-"TRUE" if(all(x$ss.options!="-")){ ss.options<-cbind(c("Nr of spans","Length of spans","First period in first span","First year in first span"),as.character(x$ss.options)) summary.output<-rbind(summary.output,ss.options) } if(any(x$ss.seasTests!="-")){ seasTests <- x$ss.seasTests seasTests <- cbind(paste(rownames(seasTests),": M7, Identifiable Seasonality",sep=""), as.character(apply(seasTests[which(colnames(seasTests)%in%c("m7","idseas"))],1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,seasTests) } if(all(x$ss.S1!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S1 Table generated (Period means of seasonal factors)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S1 <- x$ss.S1[[1]] ss.S1 <- cbind(paste("S1: ",paste(colnames(ss.S1),collapse=", "),sep=""), apply(ss.S1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,ss.S1) ss.S1 <- x$ss.S1[[2]] ss.S1 <- cbind(paste("S1.summary",paste(row.names(ss.S1),paste(colnames(ss.S1),collapse=", "),sep=": "),sep="."), as.character(apply(ss.S1,1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,ss.S1) }else{ summary.output[dim(summary.output)[1]+1,1]<-"S1 Table generated (Period means of seasonal factors)" summary.output[dim(summary.output)[1],2]<-"FALSE" } if(all(x$ss.S2!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S2 Table generated (Percentage of unstable periods)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S2 <- x$ss.S2 rn.S2 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%row.names(ss.S2)) ss.S2<-cbind(paste(paste("S2.",row.names(ss.S2)[rn.S2],sep=""),": nUnstable, nPeriods, percUnstable",sep=""), as.character(apply(ss.S2,1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,ss.S2) }else{ summary.output[dim(summary.output)[1]+1,1]<-"S2 Table generated (Percentage of unstable periods)" summary.output[dim(summary.output)[1],2]<-"FALSE" } if(all(x$ss.S3!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S3 Table(s) generated (Breakdown of unstable periods)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S3 <- x$ss.S3 rn.S3 <- c("a.seasFac","b.td","c.SA","d.period-period","e.year-year") rn.S3.index <- which(rn.S3%in%names(ss.S3)) for(i in 1:length(rn.S3.index)){ table.ss.S3 <- cbind(paste("S3.",rn.S3[rn.S3.index[i]],": ",paste(colnames(ss.S3[[i]]),collapse=", "),sep=""), as.character(apply(ss.S3[[i]],1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,table.ss.S3) } }else{ summary.output[dim(summary.output)[1]+1,1]<-"S3 Table(s) generated (Breakdown of unstable periods)" summary.output[dim(summary.output)[1],2]<-"FALSE" } }else{ summary.output[dim(summary.output)[1]+1,1]<-"Sliding spans analysis performed" summary.output[dim(summary.output)[1],2]<-"FALSE" } } if(history){ if(length(grep("h.",names(x),fixed=TRUE))>0){ summary.output[dim(summary.output)[1]+1,1]<-"History analysis performed" summary.output[dim(summary.output)[1],2]<-"TRUE" ## R1 if(all(x$h.R1!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R1 Summary table generated (SA series)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R1 <- x$h.R1[[1]] h.R1 <- cbind(paste("R1: ",paste(colnames(h.R1),collapse=", "),sep=""), apply(h.R1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R1) h.R1 <- x$h.R1[[2]] h.R1 <- cbind(paste("R1: total, ",paste(colnames(h.R1),collapse=", "),sep=""), paste("total",paste(h.R1,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R1) h.R1 <- x$h.R1[[3]] h.R1 <- cbind(paste("R1: ",paste(colnames(h.R1),collapse=", "),sep=""), apply(h.R1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R1) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R1 Summary table generated (SA series)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R2 if(all(x$h.R2!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R2 Summary table generated (period-period changes in SA)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R2 <- x$h.R2[[1]] h.R2 <- cbind(paste("R2: ",paste(colnames(h.R2),collapse=", "),sep=""), apply(h.R2,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R2) h.R2 <- x$h.R2[[2]] h.R2 <- cbind(paste("R2: total, ",paste(colnames(h.R2),collapse=", "),sep=""), paste("total",paste(h.R2,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R2) h.R2 <- x$h.R2[[3]] h.R2 <- cbind(paste("R2: ",paste(colnames(h.R2),collapse=", "),sep=""), apply(h.R2,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R2) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R2 Summary table generated (period-period changes in SA)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R4 if(all(x$h.R4!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R4 Summary table generated (Henderson trend component)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R4 <- x$h.R4[[1]] h.R4 <- cbind(paste("R4: ",paste(colnames(h.R4),collapse=", "),sep=""), apply(h.R4,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R4) h.R4 <- x$h.R4[[2]] h.R4 <- cbind(paste("R4: total, ",paste(colnames(h.R4),collapse=", "),sep=""), paste("total",paste(h.R4,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R4) h.R4 <- x$h.R4[[3]] h.R4 <- cbind(paste("R4: ",paste(colnames(h.R4),collapse=", "),sep=""), apply(h.R4,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R4) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R4 Summary table generated (Henderson trend component))" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R5 if(all(x$h.R5!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R5 Summary table generated (period-period changes in trend)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R5 <- x$h.R5[[1]] h.R5 <- cbind(paste("R5: ",paste(colnames(h.R5),collapse=", "),sep=""), apply(h.R5,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R5) h.R5 <- x$h.R5[[2]] h.R5 <- cbind(paste("R5: total, ",paste(colnames(h.R5),collapse=", "),sep=""), paste("total",paste(h.R5,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R5) h.R5 <- x$h.R5[[3]] h.R5 <- cbind(paste("R5: ",paste(colnames(h.R5),collapse=", "),sep=""), apply(h.R5,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R5) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R5 Summary table generated (period-period changes in trend)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R6 if(all(x$h.R6!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R6 Summary table generated (conc. and proj. seasonal component)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R6 <- x$h.R6[[1]] h.R6 <- cbind(paste("R6: ",paste(colnames(h.R6),collapse=", "),sep=""), apply(h.R6,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R6) h.R6 <- x$h.R6[[2]] h.R6 <- cbind(paste("R6: total, ",paste(colnames(h.R6),collapse=", "),sep=""), paste("total",paste(h.R6,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R6) h.R6 <- x$h.R6[[3]] h.R6 <- cbind(paste("R6: ",paste(colnames(h.R6),collapse=", "),sep=""), apply(h.R6,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R6) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R6 Summary table generated (conc. and proj. seasonal component)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R7 if(!is.null(x$h.R7)){ summary.output[dim(summary.output)[1]+1,1]<-"R7 Table generated (Likelihood stats from estimating model)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R7 <- x$h.R7 h.R7 <- cbind(paste("R7: ",paste(colnames(h.R7),collapse=", "),sep=""), apply(h.R7,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R7) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R7 Table generated (Likelihood stats from estimating model)))" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R8 if(!is.null(x$h.R8)){ summary.output[dim(summary.output)[1]+1,1]<-"R8 Table generated (Cum SumSq Fcst Errors at spec leads)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R8 <- x$h.R8[[1]] h.R8 <- cbind(paste("R8: ",paste(colnames(h.R8),collapse=", "),sep=""), apply(h.R8,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R8) h.R8 <- x$h.R8[[2]] h.R8 <- cbind(paste("R8: mean, ",paste(colnames(h.R8),collapse=", "),sep=""), paste("mean",paste(h.R8,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R8) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R8 Table generated (SumSq Fcst Errors at spec leads)" summary.output[dim(summary.output)[1],2]<-"FALSE" } }else{ summary.output[dim(summary.output)[1]+1,1]<-"History analysis performed" summary.output[dim(summary.output)[1],2]<-"FALSE" } } # identify if(identify){ if(!is.null(x$rsd.iac)){ for(i in 1:length(x$rsd.iac)){ sig<-rep("",dim(x$rsd.iac[[i]])[1]) sig[which(x$rsd.iac[[i]]$pval<0.05 & x$rsd.iac[[i]]$df.q>0)]<-"*" rsd.iac<-cbind(round(x$rsd.iac[[i]],digits=3),sig) summary.output<-rbind(summary.output,cbind(rep(paste("iac_",names(x$rsd.iac)[[i]],": lag, sample.iac, stderr.iac, Ljung-Box.q, df.q, pval",sep=""),dim(rsd.iac)[1]),paste(apply(rsd.iac[,-dim(rsd.iac)[2]],1,paste,collapse=", "),rsd.iac[,dim(rsd.iac)[2]]))) } } if(!is.null(x$rsd.ipc)){ for(i in 1:length(x$rsd.ipc)){ sig<-rep("",dim(x$rsd.ipc[[i]])[1]) sig[which(x$rsd.ipc[[i]]$pval<0.05 & x$rsd.ipc[[i]]$df.q>0)]<-"*" rsd.ipc<-cbind(round(x$rsd.ipc[[i]],digits=3),sig) summary.output<-rbind(summary.output,cbind(rep(paste("ipc_",names(x$rsd.ipc)[[i]],": lag, sample.ipc, stderr.ipc, Ljung-Box.q, df.q, pval",sep=""),dim(rsd.ipc)[1]),paste(apply(rsd.ipc[,-dim(rsd.ipc)[2]],1,paste,collapse=", "),rsd.ipc[,dim(rsd.ipc)[2]]))) } } } # names.sumout<-unique(summary.output[,1]) }else{ # cat("\n\tX11 Regression\n\n") summary.output[dim(summary.output)[1]+1,1]<-"X11 Regression" summary.output[dim(summary.output)[1],2]<-"TRUE" summary.output[dim(summary.output)[1]+1,1]<-"Regression Model" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$regmdl)) # cat("\n\tOutlier Detection\n") # if(x$ifout=="Outlier detection performed"){ # summary.output[dim(summary.output)[1]+1,1]<-"Outlier detection performed" # summary.output[dim(summary.output)[1],2]<-paste("TRUE") # cat("Critical |t| for outliers:\t\n") summary.output[dim(summary.output)[1]+1,1]<-"aocrit" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$crit)) summary.output[dim(summary.output)[1]+1,1]<-"Total Number of Outliers" summary.output[dim(summary.output)[1],2]<-paste(length(x$out)+length(x$autooutlier)-length(which(x$out=="-"))-length(which(x$autooutlier=="-"))) summary.output[dim(summary.output)[1]+1,1]<-"Nr of Automatically Identified Outliers" summary.output[dim(summary.output)[1],2]<-paste(length(x$autooutlier)-length(which(x$autooutlier=="-"))) # } # else{ # summary.output[dim(summary.output)[1]+1,1]<-"Outlier detection performed" # summary.output[dim(summary.output)[1],2]<-paste("FALSE") # } # cat("\n\tRegression Model\n") rest<-unlist(lapply(strsplit(as.character(x$regmdl),"+",fixed=TRUE),function(x)gsub("^\\s+|\\s+$", "",x))) rest<-names(x)[which(names(x)%in%rest)] liste <- c("outlier","userdefined","leapyear","td",rest,"autooutlier")#,"almostoutlier") liste<-liste[which(liste%in%names(x))] empty <- which(unlist(lapply(1:length(x),function(y)any(x[[y]]=="-")))) res <- as.data.frame(do.call(rbind,lapply(which(!liste %in% names(x[empty])),function(j){ if(!any(grepl(names(x[liste[j]]),names(x[[liste[j]]])))){ names(x[[liste[j]]])<-paste(names(x[liste[j]]),"_",names(x[[liste[j]]]),sep="") } do.call(rbind,lapply(1:length(x[[liste[j]]]),function(i){ c(names(x[[liste[j]]][i]),unlist(x[[liste[j]]][[i]]))}))}))) if(all(dim(res))>0){ res[,2:4] <- apply(res[,2:4],2,function(x)as.numeric(formatC(as.numeric(as.character(x)),digits=3,format="f"))) colnames(res)[1]<-"variable" res2 <- cbind(paste(1:length(res[,1]),"variable, coef, stderr, tval"),apply(res,1,paste,collapse=", ")) summary.output<-rbind(summary.output,res2) if(!is.null(x[["derived.coef"]])){ summary.output[dim(summary.output)[1]+1,1]<-"* Derived parameter estimates" summary.output[dim(summary.output)[1],2]<-paste(x[["derived.coef"]]) } } # cat("\n\tSeasonal Adjustment\n\n") summary.output[dim(summary.output)[1]+1,1]<-"Identifiable Seasonality" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$id.seas)) if(id.rsdseas){ summary.output[dim(summary.output)[1]+1,1]<-"Residual Seasonality" if(x$id.rsdseas=="none") summary.output[dim(summary.output)[1],2]<-"none" else summary.output[dim(summary.output)[1],2]<-"yes" } summary.output[dim(summary.output)[1]+1,1]<-"Seasonal Peaks" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$peaks.seas)) summary.output[dim(summary.output)[1]+1,1]<-"Trading Day Peaks" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$peaks.td)) summary.output[dim(summary.output)[1]+1,1]<-"Q Statistic" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$q)) if(quality.stat){ summary.output[dim(summary.output)[1]+1,1]<-"Q2 Statistic" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$q2)) summary.output[dim(summary.output)[1]+1,1]<-"M1" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m1)) summary.output[dim(summary.output)[1]+1,1]<-"M2" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m2)) summary.output[dim(summary.output)[1]+1,1]<-"M3" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m3)) summary.output[dim(summary.output)[1]+1,1]<-"M4" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m4)) summary.output[dim(summary.output)[1]+1,1]<-"M5" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m5)) summary.output[dim(summary.output)[1]+1,1]<-"M6" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m6)) summary.output[dim(summary.output)[1]+1,1]<-"M7" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m7)) summary.output[dim(summary.output)[1]+1,1]<-"M8" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m8)) summary.output[dim(summary.output)[1]+1,1]<-"M9" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m9)) summary.output[dim(summary.output)[1]+1,1]<-"M10" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m10)) summary.output[dim(summary.output)[1]+1,1]<-"M11" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$m11)) } summary.output[dim(summary.output)[1]+1,1]<-"Nr of M stats outside limits" summary.output[dim(summary.output)[1],2]<-paste(unlist(x$nmfail)) if(spectra.detail){ new.names<-function(z){ for(i in 1:length(x[[z]])){ if(length(x[[z]][[i]])>1){ x[[z]][[i]] <- do.call(paste,x[[z]][[i]]) }} y<-as.data.frame(x[[z]],row.names="") colnames(y)<-gsub(paste(z,".",sep=""),replacement="",names(unlist(x[[z]])),fixed=TRUE) return(y)} summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of the original series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcori")),":",new.names("spcori"),collapse=",",sep="") summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of differenced seasonally adjusted series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcsa")),":",new.names("spcsa"),collapse=",",sep="") summary.output[dim(summary.output)[1]+1,1]<-"Spectrum of modified irregular series" summary.output[dim(summary.output)[1],2]<-paste(names(new.names("spcirr")),":",new.names("spcirr"),collapse=",",sep="") } # cat("\n\tSeasonal and Trend Moving Averages\n\n") summary.output[dim(summary.output)[1]+1,1]<-"SA decomposition" summary.output[dim(summary.output)[1],2]<-paste(x$samode[[length(x$samode)]]) if(x$seasonalma[[1]]=="M.S.R."){ summary.output[dim(summary.output)[1]+1,1]<-"Seasonal moving average" summary.output[dim(summary.output)[1],2]<-paste(x$seasonalma[[length(x$seasonalma)]],"(Based on msr size)",sep=" ") }else{ summary.output[dim(summary.output)[1]+1,1]<-"Seasonal moving average" summary.output[dim(summary.output)[1],2]<-paste(x$seasonalma[[length(x$seasonalma)]]) } summary.output[dim(summary.output)[1]+1,1]<-"Henderson filter" summary.output[dim(summary.output)[1],2]<-paste(x$trendma[[length(x$trendma)]],"-term",sep="") if(slidingspans){ if(length(grep("ss.",names(x)))>0){ summary.output[dim(summary.output)[1]+1,1]<-"Sliding spans analysis performed" summary.output[dim(summary.output)[1],2]<-"TRUE" if(all(x$ss.options!="-")){ ss.options<-cbind(c("Nr of spans","Length of spans","First period in first span","First year in first span"),as.character(x$ss.options)) summary.output<-rbind(summary.output,ss.options) } if(any(x$ss.seasTests!="-")){ seasTests <- x$ss.seasTests seasTests <- cbind(paste(rownames(seasTests),": M7, Identifiable Seasonality",sep=""), as.character(apply(seasTests[which(colnames(seasTests)%in%c("m7","idseas"))],1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,seasTests) } if(all(x$ss.S1!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S1 Table generated (Period means of seasonal factors)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S1 <- x$ss.S1[[1]] ss.S1 <- cbind(paste("S1: ",paste(colnames(ss.S1),collapse=", "),sep=""), apply(ss.S1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,ss.S1) ss.S1 <- x$ss.S1[[2]] ss.S1 <- cbind(paste("S1.summary",paste(row.names(ss.S1),paste(colnames(ss.S1),collapse=", "),sep=": "),sep="."), as.character(apply(ss.S1,1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,ss.S1) }else{ summary.output[dim(summary.output)[1]+1,1]<-"S1 Table generated (Period means of seasonal factors)" summary.output[dim(summary.output)[1],2]<-"FALSE" } if(all(x$ss.S2!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S2 Table generated (Percentage of unstable periods)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S2 <- x$ss.S2 rn.S2 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%row.names(ss.S2)) ss.S2<-cbind(paste(paste("S2.",row.names(ss.S2)[rn.S2],sep=""),": nUnstable, nPeriods, percUnstable",sep=""), as.character(apply(ss.S2,1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,ss.S2) }else{ summary.output[dim(summary.output)[1]+1,1]<-"S2 Table generated (Percentage of unstable periods)" summary.output[dim(summary.output)[1],2]<-"FALSE" } if(all(x$ss.S3!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"S3 Table(s) generated (Breakdown of unstable periods)" summary.output[dim(summary.output)[1],2]<-"TRUE" ss.S3 <- x$ss.S3 rn.S3 <- c("a.seasFac","b.td","c.SA","d.period-period","e.year-year") rn.S3.index <- which(rn.S3%in%names(ss.S3)) for(i in 1:length(rn.S3.index)){ table.ss.S3 <- cbind(paste("S3.",rn.S3[rn.S3.index[i]],": ",paste(colnames(ss.S3[[i]]),collapse=", "),sep=""), as.character(apply(ss.S3[[i]],1,function(x)paste(x,collapse=", ")))) summary.output <- rbind(summary.output,table.ss.S3) } }else{ summary.output[dim(summary.output)[1]+1,1]<-"S3 Table(s) generated (Breakdown of unstable periods)" summary.output[dim(summary.output)[1],2]<-"FALSE" } }else{ summary.output[dim(summary.output)[1]+1,1]<-"Sliding spans analysis performed" summary.output[dim(summary.output)[1],2]<-"FALSE" } } if(history){ if(length(grep("h.",names(x),fixed=TRUE))>0){ summary.output[dim(summary.output)[1]+1,1]<-"History analysis performed" summary.output[dim(summary.output)[1],2]<-"TRUE" ## R1 if(all(x$h.R1!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R1 Summary table generated (SA series)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R1 <- x$h.R1[[1]] h.R1 <- cbind(paste("R1: ",paste(colnames(h.R1),collapse=", "),sep=""), apply(h.R1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R1) h.R1 <- x$h.R1[[2]] h.R1 <- cbind(paste("R1: total, ",paste(colnames(h.R1),collapse=", "),sep=""), paste("total",paste(h.R1,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R1) h.R1 <- x$h.R1[[3]] h.R1 <- cbind(paste("R1: ",paste(colnames(h.R1),collapse=", "),sep=""), apply(h.R1,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R1) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R1 Summary table generated (SA series)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R2 if(all(x$h.R2!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R2 Summary table generated (period-period changes in SA)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R2 <- x$h.R2[[1]] h.R2 <- cbind(paste("R2: ",paste(colnames(h.R2),collapse=", "),sep=""), apply(h.R2,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R2) h.R2 <- x$h.R2[[2]] h.R2 <- cbind(paste("R2: total, ",paste(colnames(h.R2),collapse=", "),sep=""), paste("total",paste(h.R2,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R2) h.R2 <- x$h.R2[[3]] h.R2 <- cbind(paste("R2: ",paste(colnames(h.R2),collapse=", "),sep=""), apply(h.R2,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R2) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R2 Summary table generated (period-period changes in SA)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R4 if(all(x$h.R4!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R4 Summary table generated (Henderson trend component)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R4 <- x$h.R4[[1]] h.R4 <- cbind(paste("R4: ",paste(colnames(h.R4),collapse=", "),sep=""), apply(h.R4,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R4) h.R4 <- x$h.R4[[2]] h.R4 <- cbind(paste("R4: total, ",paste(colnames(h.R4),collapse=", "),sep=""), paste("total",paste(h.R4,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R4) h.R4 <- x$h.R4[[3]] h.R4 <- cbind(paste("R4: ",paste(colnames(h.R4),collapse=", "),sep=""), apply(h.R4,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R4) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R4 Summary table generated (Henderson trend component))" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R5 if(all(x$h.R5!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R5 Summary table generated (period-period changes in trend)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R5 <- x$h.R5[[1]] h.R5 <- cbind(paste("R5: ",paste(colnames(h.R5),collapse=", "),sep=""), apply(h.R5,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R5) h.R5 <- x$h.R5[[2]] h.R5 <- cbind(paste("R5: total, ",paste(colnames(h.R5),collapse=", "),sep=""), paste("total",paste(h.R5,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R5) h.R5 <- x$h.R5[[3]] h.R5 <- cbind(paste("R5: ",paste(colnames(h.R5),collapse=", "),sep=""), apply(h.R5,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R5) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R5 Summary table generated (period-period changes in trend)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R6 if(all(x$h.R6!="-")){ summary.output[dim(summary.output)[1]+1,1]<-"R6 Summary table generated (conc. and proj. seasonal component)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R6 <- x$h.R6[[1]] h.R6 <- cbind(paste("R6: ",paste(colnames(h.R6),collapse=", "),sep=""), apply(h.R6,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R6) h.R6 <- x$h.R6[[2]] h.R6 <- cbind(paste("R6: total, ",paste(colnames(h.R6),collapse=", "),sep=""), paste("total",paste(h.R6,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R6) h.R6 <- x$h.R6[[3]] h.R6 <- cbind(paste("R6: ",paste(colnames(h.R6),collapse=", "),sep=""), apply(h.R6,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R6) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R6 Summary table generated (conc. and proj. seasonal component)" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R7 if(!is.null(x$h.R7)){ summary.output[dim(summary.output)[1]+1,1]<-"R7 Table generated (Likelihood stats from estimating model)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R7 <- x$h.R7 h.R7 <- cbind(paste("R7: ",paste(colnames(h.R7),collapse=", "),sep=""), apply(h.R7,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R7) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R7 Table generated (Likelihood stats from estimating model)))" summary.output[dim(summary.output)[1],2]<-"FALSE" } ## R8 if(!is.null(x$h.R8)){ summary.output[dim(summary.output)[1]+1,1]<-"R8 Table generated (Cum SumSq Fcst Errors at spec leads)" summary.output[dim(summary.output)[1],2]<-"TRUE" h.R8 <- x$h.R8[[1]] h.R8 <- cbind(paste("R8: ",paste(colnames(h.R8),collapse=", "),sep=""), apply(h.R8,1,function(x)paste(x,collapse=", "))) summary.output <- rbind(summary.output,h.R8) h.R8 <- x$h.R8[[2]] h.R8 <- cbind(paste("R8: mean, ",paste(colnames(h.R8),collapse=", "),sep=""), paste("mean",paste(h.R8,collapse=", "),sep=", ")) summary.output <- rbind(summary.output,h.R8) }else{ summary.output[dim(summary.output)[1]+1,1]<-"R8 Table generated (SumSq Fcst Errors at spec leads)" summary.output[dim(summary.output)[1],2]<-"FALSE" } }else{ summary.output[dim(summary.output)[1]+1,1]<-"History analysis performed" summary.output[dim(summary.output)[1],2]<-"FALSE" } } # identify if(identify){ if(!is.null(x$rsd.iac)){ for(i in 1:length(x$rsd.iac)){ sig<-rep("",dim(x$rsd.iac[[i]])[1]) sig[which(x$rsd.iac[[i]]$pval<0.05 & x$rsd.iac[[i]]$df.q>0)]<-"*" rsd.iac<-cbind(round(x$rsd.iac[[i]],digits=3),sig) summary.output<-rbind(summary.output,cbind(rep(paste("iac_",names(x$rsd.iac)[[i]],": lag, sample.iac, stderr.iac, Ljung-Box.q, df.q, pval",sep=""),dim(rsd.iac)[1]),paste(apply(rsd.iac[,-dim(rsd.iac)[2]],1,paste,collapse=", "),rsd.iac[,dim(rsd.iac)[2]]))) } } if(!is.null(x$rsd.ipc)){ for(i in 1:length(x$rsd.ipc)){ sig<-rep("",dim(x$rsd.ipc[[i]])[1]) sig[which(x$rsd.ipc[[i]]$pval<0.05 & x$rsd.ipc[[i]]$df.q>0)]<-"*" rsd.ipc<-cbind(round(x$rsd.ipc[[i]],digits=3),sig) summary.output<-rbind(summary.output,cbind(rep(paste("ipc_",names(x$rsd.ipc)[[i]],": lag, sample.ipc, stderr.ipc, Ljung-Box.q, df.q, pval",sep=""),dim(rsd.ipc)[1]),paste(apply(rsd.ipc[,-dim(rsd.ipc)[2]],1,paste,collapse=", "),rsd.ipc[,dim(rsd.ipc)[2]]))) } } } } # sumout<-unlist(summary.output[,1]) # names.sumout<-unique(sumout) # length.names.sumout<-unlist(lapply(names.sumout,function(x)length(grep(x,sumout)))) names(summary.output)<-c("DIAGNOSTICS","--- Rout ---") spl<-split(summary.output[,1,drop=FALSE],factor(summary.output[,1],levels=unique(summary.output[,1]))) ind <- which(sapply(spl, nrow) > 1) v <- lapply(ind, function(x) {data.frame(DIAGNOSTICS=paste(1:nrow(spl[[x]]), names(spl)[x]),stringsAsFactors=FALSE)} ) spl[ind] <- v new.col<-do.call("rbind",spl) summary.output[,1]<-new.col return(summary.output) # list(summary.output=summary.output,names.sumout=names.sumout,length.names.sumout=length.names.sumout) }
/scratch/gouwar.j/cran-all/cranData/x12/R/summary.output.x12.R
summary.x12work <- function(object,fullSummary=FALSE,spectra.detail=FALSE,almostout=FALSE,rsd.autocorr=NULL,quality.stat=FALSE,likelihood.stat=FALSE,aape=FALSE,id.rsdseas=FALSE,slidingspans=FALSE,history=FALSE,identify=FALSE,...){ summaryworkhorse(object$dg,fullSummary=fullSummary,spectra.detail=spectra.detail,almostout=almostout,rsd.autocorr=rsd.autocorr,quality.stat=quality.stat,likelihood.stat=likelihood.stat,aape=aape,id.rsdseas=id.rsdseas,slidingspans=slidingspans,history=history,identify=identify) } summaryworkhorse <- function(x,fullSummary=FALSE,spectra.detail=FALSE,almostout=FALSE,rsd.autocorr=NULL,quality.stat=FALSE,likelihood.stat=FALSE,aape=FALSE,id.rsdseas=FALSE,slidingspans=FALSE,history=FALSE,identify=FALSE){ #cat("File: \"",x$file,"\"",sep="","\n") if(length(nchar(unlist(x$outlier)))==0) x$outlier<-"-" if(fullSummary){ spectra.detail=TRUE almostout=TRUE rsd.autocorr=c("acf","pacf","acf2") quality.stat=TRUE likelihood.stat=TRUE aape=TRUE id.rsdseas=TRUE slidingspans=TRUE history=TRUE identify=TRUE } cat("\n\tTime Series\n\n") cat("Frequency:",x$frequency,"\n") if(length(x$span)>1){ span <- str_trim(unlist(strsplit(x$span,":"))) span.index <- which(span=="span") cat("Span:",span[span.index+1],"\n") }else{ cat("Span:",x$span,"\n")} span.index <- which(span=="modelspan") if(length(span.index)>0) modelspan <- span[span.index+1] span.index <- which(span=="outlierspan") if(length(span.index)>0) outlierspan <- span[span.index+1] ### RegARIMA Option: if(x$x11regress=="no"){ cat("\n\tModel Definition\n\n") if(x$automdl!="-"){ cat("ARIMA Model:",unlist(x$arimamdl),"(Automatic Model Choice)\n") }else{ cat("ARIMA Model:",unlist(x$arimamdl),"\n")#automdl erwaehnen } #zz <- data.frame("(row names)"= c("aaaaa", "b"), check.names=FALSE) if(exists("modelspan")) cat("Model Span:",span[span.index+1],"\n") if(x$transform=="Automatic selection"){ cat("Transformation:",unlist(x$transform),":",unlist(x$autotransform),"\n") }else{ cat("Transformation:",unlist(x$transform),"\n") } cat("Regression Model:",unlist(x$regmdl),"\n") cat("\n\tOutlier Detection\n\n") if(x$ifout=="Outlier detection performed"){ if(exists("outlierspan")) cat("Outlier Span:",span[span.index+1],"\n") cat("Critical |t| for outliers:\t\n") print(unlist(x$crit)) cat("Total Number of Outliers:",unlist(x$nout),"\n") cat("Automatically Identified Outliers:",unlist(x$nautoout),"\n") if(almostout){ cat("Number of ts values that were almost identified as outliers:",unlist(x$nalmostout),"\n") }} else{ cat("No outlier detection performed\n") } rest<-unlist(lapply(strsplit(as.character(x$regmdl),"+",fixed=TRUE),function(x)gsub("^\\s+|\\s+$", "",x))) rest<-names(x)[which(names(x)%in%rest)] if(almostout){ liste <- c("outlier","userdefined","leapyear","td","autooutlier",rest,"almostoutlier") }else liste <- c("outlier","userdefined","leapyear","td",rest,"autooutlier") liste<-liste[which(liste%in%names(x))] empty <- which(unlist(lapply(1:length(x),function(y)any(x[[y]]=="-")))) res <- as.data.frame(do.call(rbind,lapply(which(!liste %in% names(x[empty])),function(j){ if(!any(grepl(names(x[liste[j]]),names(x[[liste[j]]])))){ names(x[[liste[j]]])<-paste(names(x[liste[j]]),"_",names(x[[liste[j]]]),sep="") } do.call(rbind,lapply(1:length(x[[liste[j]]]),function(i){ c(names(x[[liste[j]]][i]),unlist(x[[liste[j]]][[i]]))}))}))) if(all(dim(res))>0){ res[,2:4] <- apply(res[,2:4],2,function(x)as.numeric(formatC(as.numeric(as.character(x)),digits=3,format="f"))) colnames(res)[1]<-"variable" cat("\n\tRegression Model\n") print(res) if(!is.null(x[["derived.coef"]])){ cat("* Derived parameter estimates: ",x[["derived.coef"]],"\n") } } if(likelihood.stat){ cat("\n\tLikelihood Statistics\n") # cat("\n") # lstat<-as.data.frame(matrix(c("Log Likelihood","AIC","AICC","BIC","HQ",x$loglikelihood,x$aic,x$aicc,x$bic,x$hq),ncol=2)) # colnames(lstat)<-c("Likelihood Statistics"," ") # print(lstat) lstat<-matrix(c("AIC:","AICC:","BIC: ","HQ: ","Log Likelihood:",x$aic,x$aicc,x$bic,x$hq,x$loglikelihood),ncol=2) write.table(lstat,quote=FALSE,row.names=FALSE, col.names=FALSE,sep="\t") #write.table(lstat,quote=FALSE,row.names=FALSE, col.names=FALSE) } if(aape && length(x$aape)>1){ cat("\nAverage absolute percentage error\n") mode<-ifelse(x$aape$aape.mode=="outofsample","out of sample","within sample") cat("\tin ",mode," forecasts",sep="","\n") aape.mat<-matrix(c("Last year: ","Last-1 year:","Last-2 year:","Last 3 years:",x$aape$aape.0,x$aape$aape.1,x$aape$aape.2,x$aape$aape.3),ncol=2) write.table(aape.mat,quote=FALSE,row.names=FALSE, col.names=FALSE,sep="\t") } cat("\n\tSeasonal Adjustment\n\n") cat("Identifiable Seasonality:",unlist(x$id.seas),"\n") if(id.rsdseas){ if(x$id.rsdseas=="none") cat("Residual Seasonality: none\n") else cat("Residual Seasonality: yes\n") } cat("Seasonal Peaks:",unlist(x$peaks.seas),"\n") cat("Trading Day Peaks:",unlist(x$peaks.td),"\n") cat("Overall Index of Quality of SA\n(Acceptance Region from 0 to 1)\nQ:",unlist(x$q),"\n") if(quality.stat){ cat("Q2:",unlist(x$q2),"\n")#(Q Statistic computed w/o the M2 Quality Control Statistic)\n") cat("Quality Control Statistics\n") cat("M1:",unlist(x$m1),"\n") cat("M2:",unlist(x$m2),"\n") cat("M3:",unlist(x$m3),"\n") cat("M4:",unlist(x$m4),"\n") cat("M5:",unlist(x$m5),"\n") cat("M6:",unlist(x$m6),"\n") cat("M7:",unlist(x$m7),"\n") cat("M8:",unlist(x$m8),"\n") cat("M9:",unlist(x$m9),"\n") cat("M10:",unlist(x$m10),"\n") cat("M11:",unlist(x$m11),"\n") } cat("Number of M statistics outside the limits:",unlist(x$nmfail),"\n") if(spectra.detail){ new.names<-function(z){ for(i in 1:length(x[[z]])){ if(length(x[[z]][[i]])>1){ x[[z]][[i]] <- do.call(paste,x[[z]][[i]]) }} y<-as.data.frame(x[[z]],row.names="") colnames(y)<-gsub(paste(z,".",sep=""),replacement="",names(unlist(x[[z]])),fixed=TRUE) return(y)} cat("Spectrum of the original series\n") print(new.names("spcori")) cat("Spectrum of the regARIMA model residuals\n") if(any(x$spcrsd!="-")) print(new.names("spcrsd")) else cat("\t- Not available -\n") cat("Spectrum of differenced seasonally adjusted series\n") print(new.names("spcsa")) cat("Spectrum of modified irregular series\n") print(new.names("spcirr")) } # cat("\n\tSeasonal and Trend Moving Averages\n\n") cat("\nSA decomposition:",x$samode[[length(x$samode)]],"\n") if(x$seasonalma[[1]]=="M.S.R."){ cat("Seasonal moving average used for the final iteration: \n",x$seasonalma[[length(x$seasonalma)]], " (Based on the size of the global moving seasonality ratio (msr))\n",sep="") }else{ cat("Moving average used to estimate the seasonal factors:",x$seasonalma[[length(x$seasonalma)]],"\n") } cat("Moving average used to estimate the final trend-cycle: ",x$trendma[[length(x$trendma)]],"-term Henderson filter\n",sep="") if(!is.null(rsd.autocorr)){ #rsd.autocorr=c("rsd.acf","rsd.pacf","rsd.acf2") if("acf"%in%rsd.autocorr){ cat("\n\tSample Autocorrelations of the Residuals\n") if(!is.null(x$rsd.acf)){ #cat("p-values approximate the probability of observing a q-value at least this #large when the model fitted is correct.") #!!Diese Schranke (<0.05) kann sich noch aendern falls pickmodel spec implementiert wird cat("(Small p-values (<0.05) indicate model inadequacy (for df.q >0))\n\n") sig<-rep("",dim(x$rsd.acf)[1]) sig[which(x$rsd.acf$pval<0.05 & x$rsd.acf$df.q>0)]<-"*" rsd.acf<-cbind(round(x$rsd.acf,digits=3),sig) colnames(rsd.acf)<-c(colnames(x$rsd.acf),"") print(rsd.acf)}else{ cat("\n\t- Not available -\n\n") } } if("pacf"%in%rsd.autocorr){ cat("\n\tSample Partial Autocorrelations of the Residuals\n\n") if(!is.null(x$rsd.pacf)) print(round(x$rsd.pacf,digits=3)) else cat("\t- Not available -\n\n") } if("acf2"%in%rsd.autocorr){ cat("\n\tSample Autocorrelations of the Squared Residuals\n\n") if(!is.null(x$rsd.acf2)) print(round(x$rsd.acf2,digits=3)) else cat("\t- Not available -\n\n") } } # if(slidingspans){ # if(length(grep("ss.",names(x)))>0){ # if(!is.null(x$ss.)) # ## if(length(grep("rsd.acf",names(x)))>0) # ## rsd.autocorr=c("rsd.acf","rsd.pacf","rsd.acf2") ## if("acf"%in%rsd.autocorr){ ## cat("\n\tSample Autocorrelations of the Residuals\n") ## if(!is.null(x$rsd.acf)){ #write.table(ss.options,quote=FALSE,row.names=FALSE,sep="\t") # }} if(slidingspans){ cat("\n\tSlidingspans\n\n") if(length(grep("ss.",names(x),fixed=TRUE))>0){ ## S0 cat("Summary of options selected for this run:") if(all(x$ss.options!="-")){ ss.options<-as.data.frame((t(as.data.frame(x$ss.options)))) row.names(ss.options)<-c("Number of spans:","Length of spans:","First period in first span:","First year in first span:") colnames(ss.options)<-"" print(ss.options) }else{cat("\t- Not available -\n")} cat("\nSeasonality:\n") if(any(x$ss.seasTests!="-")){ # F statistics not printed (might change this if requested) # cat("stabseas (F-Value of test for the presence of seasonality assuming stability)\n", # "movseas (F-Value of moving seasonality test)\n") seasTests <- x$ss.seasTests seasTests <- seasTests[which(colnames(seasTests)%in%c("m7","idseas"))] colnames(seasTests)<-gsub("m7","M7",colnames(seasTests)) colnames(seasTests)<-gsub("idseas","Identifiable Seasonality",colnames(seasTests)) print(seasTests)}else{cat("\n\t- Not available -\n")} ## S1 cat("\nS1 Period means of Seasonal Factors\n(Movements within a period should be small)\n") if(all(x$ss.S1!="-")){ print(x$ss.S1[[1]]) cat("\nSummary statistics for mean seasonal factor\n") print(x$ss.S1[[2]]) }else{cat("\t- Not available -\n")} ## S2 cat("\nS2 Percentage of unstable periods\n") if(all(x$ss.S2!="-")){ ss.S2 <- x$ss.S2 rn.S2 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%row.names(ss.S2)) row.names(ss.S2) <- c("a.Seasonal Factors","b.Trading Days","c.Final SA series","d.Period to period changes","e.Year on year changes")[rn.S2] print(ss.S2) }else{cat("\t- Not available -\n")} ## S3 cat("\nS3 Breakdown of unstable periods\n") if(all(x$ss.S3!="-")){ ss.S3 <- x$ss.S3 rn.S3 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%names(ss.S3)) new.rn.S3 <- c("a.Seasonal Factors","b.Trading Days","c.Final SA series","d.Period to period changes","e.Year on year changes") for(i in 1:length(rn.S3)){ cat("\n",new.rn.S3[rn.S3[i]],"\n") print(x$ss.S3[[i]]) } }else{cat("\t- Not available -\n\n")} }else{cat("\t- Not available -\n\n")} } if(history){ cat("\n\tHistory\n") if(length(grep("h.",names(x),fixed=TRUE))>0){ ## R1 cat("\nR1 Average absolute percent revisions of the seasonal adjusments\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R1!="-")){ x$h.R1[[1]][,-1] <- round(x$h.R1[[1]][,-1],digits=3) x$h.R1[[2]] <- round(x$h.R1[[2]],digits=3) x$h.R1[[3]][,-1] <- round(x$h.R1[[3]][,-1],digits=3) print(rbind(x$h.R1[[1]], rep("",dim(x$h.R1[[1]])[2]), c("total",unlist(x$h.R1[[2]])),rep("",dim(x$h.R1[[1]])[2]))) print(x$h.R1[[3]]) }else{cat("\t- Not available -\n")} ## R2 cat("\nR2 Average absolute percent revisions of the period-to-period percent change of the ajusments\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R2!="-")){ x$h.R2[[1]][,-1] <- round(x$h.R2[[1]][,-1],digits=3) x$h.R2[[2]] <- round(x$h.R2[[2]],digits=3) x$h.R2[[3]][,-1] <- round(x$h.R2[[3]][,-1],digits=3) print(rbind(x$h.R2[[1]], rep("",dim(x$h.R2[[1]])[2]), c("total",unlist(x$h.R2[[2]])),rep("",dim(x$h.R2[[1]])[2]))) print(x$h.R2[[3]]) }else{cat("\t- Not available -\n")} ## R4 cat("\nR4 Average absolute percent revisions of the final Henderson trend component\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R4!="-")){ x$h.R4[[1]][,-1] <- round(x$h.R4[[1]][,-1],digits=3) x$h.R4[[2]] <- round(x$h.R4[[2]],digits=3) x$h.R4[[3]][,-1] <- round(x$h.R4[[3]][,-1],digits=3) print(rbind(x$h.R4[[1]], rep("",dim(x$h.R4[[1]])[2]), c("total",unlist(x$h.R4[[2]])),rep("",dim(x$h.R4[[1]])[2]))) print(x$h.R4[[3]]) }else{cat("\t- Not available -\n")} ## R5 cat("\nR5 Average absolute percent revisions of period-to-period percent change of the trend-cycle\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R5!="-")){ x$h.R5[[1]][,-1] <- round(x$h.R5[[1]][,-1],digits=3) x$h.R5[[2]] <- round(x$h.R5[[2]],digits=3) x$h.R5[[3]][,-1] <- round(x$h.R5[[3]][,-1],digits=3) print(rbind(x$h.R5[[1]], rep("",dim(x$h.R5[[1]])[2]), c("total",unlist(x$h.R5[[2]])),rep("",dim(x$h.R5[[1]])[2]))) print(x$h.R5[[3]]) }else{cat("\t- Not available -\n")} ## R6 cat("\nR6 Average absolute percent revisions of the concurrent and projected seasonal component\n") if(all(x$h.R6!="-")){ x$h.R6[[1]][,-1] <- round(x$h.R6[[1]][,-1],digits=3) x$h.R6[[2]] <- round(x$h.R6[[2]],digits=3) x$h.R6[[3]][,-1] <- round(x$h.R6[[3]][,-1],digits=3) print(rbind(x$h.R6[[1]], rep("",dim(x$h.R6[[1]])[2]), c("total",unlist(x$h.R6[[2]]),rep("",dim(x$h.R6[[1]])[2])))) print(x$h.R6[[3]]) }else{cat("\t- Not available -\n")} ## R7 cat("\nR7 Likelihood statistics from estimating regARIMA model") if(!is.null(x$h.R7)){ cat(" over spans with ending dates",paste(substr(x$h.R7[1,1],1,4),".",substr(x$h.R7[1,1],5,6),sep=""),"to",paste(substr(x$h.R7[dim(x$h.R7)[1],1],1,4),".",substr(x$h.R7[dim(x$h.R7)[1],1],5,6),sep=""),"\n") x$h.R7[dim(x$h.R7)[1],1] print(x$h.R7) }else{cat("\n\t- Not available -\n")} ## R8 cat("\nR8 Sum of squared forecast errors at specified leads from the end of each span") if(!is.null(x$h.R8)){ R8 <- x$h.R8[[1]] colnames(R8) <-gsub("sumSqFcstError","lead",colnames(R8)) colnames(R8) <- gsub("date","forecast.date",colnames(R8)) cat("\nCumulative sum of squared forecast errors\n") print(R8) cat("\nMean sum of squared forecast errors\n") print(x$h.R8[[2]]) }else{cat("\n\t- Not available -\n")} }else{cat("\n\t- Not available -\n\n")} } if(identify){ cat("\n\tModel Identification\n") #iac cat("\nSample Autocorrelations of the Residuals\n") if(!is.null(x$rsd.iac)){ cat("(Small p-values (<0.05) indicate model inadequacy (for df.q >0))\n\n") for(i in 1:length(x$rsd.iac)){ sig<-rep("",dim(x$rsd.iac[[i]])[1]) sig[which(x$rsd.iac[[i]]$pval<0.05 & x$rsd.iac[[i]]$df.q>0)]<-"*" rsd.iac<-cbind(round(x$rsd.iac[[i]],digits=3),sig) colnames(rsd.iac)<-c(colnames(x$rsd.iac[[i]]),"") cat("\n",names(x$rsd.iac)[[i]],"\n") print(rsd.iac) } }else{cat("\t- Not available -\n")} #ipc cat("\nSample Partial Autocorrelations of the Residuals\n") if(!is.null(x$rsd.ipc)){ cat("(Small p-values (<0.05) indicate model inadequacy (for df.q >0))\n\n") for(i in 1:length(x$rsd.ipc)){ sig<-rep("",dim(x$rsd.ipc[[i]])[1]) sig[which(x$rsd.ipc[[i]]$pval<0.05 & x$rsd.ipc[[i]]$df.q>0)]<-"*" rsd.ipc<-cbind(round(x$rsd.ipc[[i]],digits=3),sig) colnames(rsd.ipc)<-c(colnames(x$rsd.ipc[[i]]),"") cat("\n",names(x$rsd.ipc)[[i]],"\n") print(rsd.ipc) } }else{cat("\t- Not available -\n")} } }else{ #End RegARIMA Option # x11Regression Option: cat("\n\tX11 Regression\n\n") cat("Regression Model:",unlist(x$regmdl),"\n") cat("\n\tOutlier Detection\n") cat("Critical |t| for outliers:",unlist(x$crit),"\t\n") cat("Total Number of Outliers:",length(x$out)+length(x$autooutlier)-length(which(x$out=="-"))-length(which(x$autooutlier=="-")),"\n") cat("Automatically Identified Outliers:",length(x$autooutlier)-length(which(x$autooutlier=="-")),"\n") cat("\n\tRegression Model\n") rest<-unlist(lapply(strsplit(as.character(x$regmdl),"+",fixed=TRUE),function(x)gsub("^\\s+|\\s+$", "",x))) rest<-names(x)[which(names(x)%in%rest)] liste <- c("outlier","userdefined","leapyear","td",rest,"autooutlier")#,"almostoutlier") liste<-liste[which(liste%in%names(x))] empty <- which(unlist(lapply(1:length(x),function(y)any(x[[y]]=="-")))) res <- as.data.frame(do.call(rbind,lapply(which(!liste %in% names(x[empty])),function(j){ if(!any(grepl(names(x[liste[j]]),names(x[[liste[j]]])))){ names(x[[liste[j]]])<-paste(names(x[liste[j]]),"_",names(x[[liste[j]]]),sep="") } do.call(rbind,lapply(1:length(x[[liste[j]]]),function(i){ c(names(x[[liste[j]]][i]),unlist(x[[liste[j]]][[i]]))}))}))) res[,2:4] <- apply(res[,2:4],2,function(x)as.numeric(formatC(as.numeric(as.character(x)),digits=3,format="f"))) colnames(res)[1]<-"variable" print(res) if(!is.null(x[["derived.coef"]])){ cat("* Derived parameter estimates: ",x[["derived.coef"]],"\n") } cat("\n\tSeasonal Adjustment\n\n") cat("Identifiable Seasonality:",unlist(x$id.seas),"\n") if(id.rsdseas){ if(x$id.rsdseas=="none") cat("Residual Seasonality: none\n") else cat("Residual Seasonality: yes\n") } cat("Seasonal Peaks:",unlist(x$peaks.seas),"\n") cat("Trading Day Peaks:",unlist(x$peaks.td),"\n") cat("Overall Index of Quality of SA\n(Acceptance region from 0 to 1)\nQ:",unlist(x$q),"\n") if(quality.stat){ cat("Q2:",unlist(x$q2),"(Q statistic computed w/o the M2 quality control statistic)\n") cat("Quality Control Statistics\n") cat("M1:",unlist(x$m1),"\n") cat("M2:",unlist(x$m2),"\n") cat("M3:",unlist(x$m3),"\n") cat("M4:",unlist(x$m4),"\n") cat("M5:",unlist(x$m5),"\n") cat("M6:",unlist(x$m6),"\n") cat("M7:",unlist(x$m7),"\n") cat("M8:",unlist(x$m8),"\n") cat("M9:",unlist(x$m9),"\n") cat("M10:",unlist(x$m10),"\n") cat("M11:",unlist(x$m11),"\n") } cat("Number of M statistics outside the limits:",unlist(x$nmfail),"\n") if(spectra.detail){ new.names<-function(z){ for(i in 1:length(x[[z]])){ if(length(x[[z]][[i]])>1){ x[[z]][[i]] <- do.call(paste,x[[z]][[i]]) }} y<-as.data.frame(x[[z]],row.names="") colnames(y)<-gsub(paste(z,".",sep=""),replacement="",names(unlist(x[[z]])),fixed=TRUE) return(y)} cat("Spectrum of the original series\n") print(new.names("spcori")) cat("Spectrum of differenced seasonally adjusted series\n") print(new.names("spcsa")) cat("Spectrum of modified irregular series\n") print(new.names("spcirr")) } # cat("\n\tSeasonal and Trend Moving Averages\n\n") cat("\nSA decomposition:",x$samode[[length(x$samode)]],"\n") if(x$seasonalma[[1]]=="M.S.R."){ cat("Seasonal moving average used for the final iteration: \n",x$seasonalma[[length(x$seasonalma)]], " (Based on the size of the global moving seasonality ratio (msr))\n",sep="") }else{ cat("Moving average used to estimate the seasonal factors:",x$seasonalma[[length(x$seasonalma)]],"\n") } cat("Moving average used to estimate the final trend-cycle: ",x$trendma[[length(x$trendma)]],"-term Henderson filter\n",sep="") if(slidingspans){ cat("\n\tSlidingspans\n\n") if(length(grep("ss.",names(x),fixed=TRUE))>0){ ## S0 cat("Summary of options selected for this run:") if(all(x$ss.options!="-")){ ss.options<-as.data.frame((t(as.data.frame(x$ss.options)))) row.names(ss.options)<-c("Number of spans:","Length of spans:","First period in first span:","First year in first span:") colnames(ss.options)<-"" print(ss.options) }else{cat("\t- Not available -\n")} cat("\nSeasonality:\n") if(any(x$ss.seasTests!="-")){ # F statistics not printed (might change this if requested) # cat("stabseas (F-Value of test for the presence of seasonality assuming stability)\n", # "movseas (F-Value of moving seasonality test)\n") seasTests <- x$ss.seasTests seasTests <- seasTests[which(colnames(seasTests)%in%c("m7","idseas"))] colnames(seasTests)<-gsub("m7","M7",colnames(seasTests)) colnames(seasTests)<-gsub("idseas","Identifiable Seasonality",colnames(seasTests)) print(seasTests)}else{cat("\n\t- Not available -\n")} ## S1 cat("\nS1 Period means of Seasonal Factors\n(Movements within a period should be small)\n") if(all(x$ss.S1!="-")){ print(x$ss.S1[[1]]) cat("\nSummary statistics for mean seasonal factor\n") print(x$ss.S1[[2]]) }else{cat("\t- Not available -\n")} ## S2 cat("\nS2 Percentage of unstable periods\n") if(all(x$ss.S2!="-")){ ss.S2 <- x$ss.S2 rn.S2 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%row.names(ss.S2)) row.names(ss.S2) <- c("a.Seasonal Factors","b.Trading Days","c.Final SA series","d.Period to period changes","e.Year on year changes")[rn.S2] print(ss.S2) }else{cat("\t- Not available -\n")} ## S3 cat("\nS3 Breakdown of unstable periods\n") if(all(x$ss.S3!="-")){ ss.S3 <- x$ss.S3 rn.S3 <- which(c("a.seasFac","b.td","c.SA","d.period-period","e.year-year")%in%names(ss.S3)) new.rn.S3 <- c("a.Seasonal Factors","b.Trading Days","c.Final SA series","d.Period to period changes","e.Year on year changes") for(i in 1:length(rn.S3)){ cat("\n",new.rn.S3[rn.S3[i]],"\n") print(x$ss.S3[[i]]) } }else{cat("\t- Not available -\n\n")} }else{cat("\t- Not available -\n\n")} } if(history){ cat("\n\tHistory\n") if(length(grep("h.",names(x),fixed=TRUE))>0){ ## R1 cat("\nR1 Average absolute percent revisions of the seasonal adjusments\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R1!="-")){ x$h.R1[[1]][,-1] <- round(x$h.R1[[1]][,-1],digits=3) x$h.R1[[2]] <- round(x$h.R1[[2]],digits=3) x$h.R1[[3]][,-1] <- round(x$h.R1[[3]][,-1],digits=3) print(rbind(x$h.R1[[1]], rep("",dim(x$h.R1[[1]])[2]), c("total",unlist(x$h.R1[[2]])),rep("",dim(x$h.R1[[1]])[2]))) print(x$h.R1[[3]]) }else{cat("\t- Not available -\n")} ## R2 cat("\nR2 Average absolute percent revisions of the period-to-period percent change of the ajusments\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R2!="-")){ x$h.R2[[1]][,-1] <- round(x$h.R2[[1]][,-1],digits=3) x$h.R2[[2]] <- round(x$h.R2[[2]],digits=3) x$h.R2[[3]][,-1] <- round(x$h.R2[[3]][,-1],digits=3) print(rbind(x$h.R2[[1]], rep("",dim(x$h.R2[[1]])[2]), c("total",unlist(x$h.R2[[2]])),rep("",dim(x$h.R2[[1]])[2]))) print(x$h.R2[[3]]) }else{cat("\t- Not available -\n")} ## R4 cat("\nR4 Average absolute percent revisions of the final Henderson trend component\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R4!="-")){ x$h.R4[[1]][,-1] <- round(x$h.R4[[1]][,-1],digits=3) x$h.R4[[2]] <- round(x$h.R4[[2]],digits=3) x$h.R4[[3]][,-1] <- round(x$h.R4[[3]][,-1],digits=3) print(rbind(x$h.R4[[1]], rep("",dim(x$h.R4[[1]])[2]), c("total",unlist(x$h.R4[[2]])),rep("",dim(x$h.R4[[1]])[2]))) print(x$h.R4[[3]]) }else{cat("\t- Not available -\n")} ## R5 cat("\nR5 Average absolute percent revisions of period-to-period percent change of the trend-cycle\n(Deviation from",unlist(x$h.target),"estimate defines revisions of seasonal adjustments calculated at lags)\n") if(all(x$h.R5!="-")){ x$h.R5[[1]][,-1] <- round(x$h.R5[[1]][,-1],digits=3) x$h.R5[[2]] <- round(x$h.R5[[2]],digits=3) x$h.R5[[3]][,-1] <- round(x$h.R5[[3]][,-1],digits=3) print(rbind(x$h.R5[[1]], rep("",dim(x$h.R5[[1]])[2]), c("total",unlist(x$h.R5[[2]])),rep("",dim(x$h.R5[[1]])[2]))) print(x$h.R5[[3]]) }else{cat("\t- Not available -\n")} ## R6 cat("\nR6 Average absolute percent revisions of the concurrent and projected seasonal component\n") if(all(x$h.R6!="-")){ x$h.R6[[1]][,-1] <- round(x$h.R6[[1]][,-1],digits=3) x$h.R6[[2]] <- round(x$h.R6[[2]],digits=3) x$h.R6[[3]][,-1] <- round(x$h.R6[[3]][,-1],digits=3) print(rbind(x$h.R6[[1]], rep("",dim(x$h.R6[[1]])[2]), c("total",unlist(x$h.R6[[2]]),rep("",dim(x$h.R6[[1]])[2])))) print(x$h.R6[[3]]) }else{cat("\t- Not available -\n")} ## R7 cat("\nR7 Likelihood statistics from estimating regARIMA model") if(!is.null(x$h.R7)){ cat(" over spans with ending dates",paste(substr(x$h.R7[1,1],1,4),".",substr(x$h.R7[1,1],5,6),sep=""),"to",paste(substr(x$h.R7[dim(x$h.R7)[1],1],1,4),".",substr(x$h.R7[dim(x$h.R7)[1],1],5,6),sep=""),"\n") x$h.R7[dim(x$h.R7)[1],1] print(x$h.R7) }else{cat("\n\t- Not available -\n")} ## R8 cat("\nR8 Sum of squared forecast errors at specified leads from the end of each span") if(!is.null(x$h.R8)){ R8 <- x$h.R8[[1]] colnames(R8) <-gsub("sumSqFcstError","lead",colnames(R8)) colnames(R8) <- gsub("date","forecast.date",colnames(R8)) cat("\nCumulative sum of squared forecast errors\n") print(R8) cat("\nMean sum of squared forecast errors\n") print(x$h.R8[[2]]) }else{cat("\n\t- Not available -\n")} }else{cat("\n\t- Not available -\n\n")} } if(identify){ cat("\n\tModel Identification\n") #iac cat("\nSample Autocorrelations of the Residuals\n") if(!is.null(x$rsd.iac)){ cat("(Small p-values (<0.05) indicate model inadequacy (for df.q >0))\n\n") for(i in 1:length(x$rsd.iac)){ sig<-rep("",dim(x$rsd.iac[[i]])[1]) sig[which(x$rsd.iac[[i]]$pval<0.05 & x$rsd.iac[[i]]$df.q>0)]<-"*" rsd.iac<-cbind(round(x$rsd.iac[[i]],digits=3),sig) colnames(rsd.iac)<-c(colnames(x$rsd.iac[[i]]),"") cat("\n",names(x$rsd.iac)[[i]],"\n") print(rsd.iac) } }else{cat("\t- Not available -\n")} #ipc cat("\nSample Partial Autocorrelations of the Residuals\n") if(!is.null(x$rsd.ipc)){ cat("(Small p-values (<0.05) indicate model inadequacy (for df.q >0))\n\n") for(i in 1:length(x$rsd.ipc)){ sig<-rep("",dim(x$rsd.ipc[[i]])[1]) sig[which(x$rsd.ipc[[i]]$pval<0.05 & x$rsd.ipc[[i]]$df.q>0)]<-"*" rsd.ipc<-cbind(round(x$rsd.ipc[[i]],digits=3),sig) colnames(rsd.ipc)<-c(colnames(x$rsd.ipc[[i]]),"") cat("\n",names(x$rsd.ipc)[[i]],"\n") print(rsd.ipc) } }else{cat("\t- Not available -\n")} } }}
/scratch/gouwar.j/cran-all/cranData/x12/R/summary.x12.R
setGeneric("times", function(x) { standardGeneric("times")} ) setMethod(f='times', signature=signature(x = "x12Output"), definition=function(x){ ret <- list() ret$original <- c(start(x@a1),end(x@a1)) if(!all(is.na(x@forecast@estimate))) ret$forecast <- c(start(x@forecast@estimate),end(x@forecast@estimate)) if(!all(is.na(x@backcast@estimate))) ret$backcast <- c(start(x@backcast@estimate),end(x@backcast@estimate)) else ret$backcast <- NULL return(ret) }) setMethod(f='times', signature=signature(x = "x12Single"), definition=function(x)times(x@x12Output))
/scratch/gouwar.j/cran-all/cranData/x12/R/times.R
setGeneric("x12", function(object, x12Parameter=new("x12Parameter"), x12BaseInfo=new("x12BaseInfo"),...) { standardGeneric("x12")} ) setMethod( f='x12', signature=signature(object = "ts"), definition=function(object, x12Parameter=new("x12Parameter"),x12BaseInfo=new("x12BaseInfo")) { Par <- slotNames(x12Parameter) pp <- vector() for(p in Par){ pp <- c(pp,(paste(p,"=x12Parameter@",p,sep=""))) } Par <- slotNames(x12BaseInfo) for(p in Par){ pp <- c(pp,(paste(p,"=x12BaseInfo@",p,sep=""))) } if(!is.null(getOption("x12.delete"))){ if(getOption("x12.delete")) keep_x12out <- paste("keep_x12out=FALSE") else keep_x12out <- paste("keep_x12out=TRUE") }else keep_x12out <- paste("keep_x12out=TRUE") out <- NULL pp <- paste("out <- x12work(tso=object,",paste(pp,collapse=","),",tblnames=\"otl\",Rtblnames=\"regressor\",",keep_x12out,")",sep="") eval(parse(text=pp)) classout <- new("x12Output") Par <- slotNames(classout) for(p in Par){ if(is(slot(classout,p),"spectrum")){ if(p%in%names(out)){ slot(classout,p)@frequency <- out[[p]]$frequency slot(classout,p)@spectrum <- out[[p]]$spectrum } }else if(is(slot(classout,p),"fbcast")){ if(p%in%names(out)){ slot(classout,p)@estimate <- out[[p]][["estimate"]] slot(classout,p)@lowerci <- out[[p]][["lowerci"]] slot(classout,p)@upperci <- out[[p]][["upperci"]] } }else slot(classout,p)<-out[[p]] } return(classout) } ) setMethod( f='x12', signature=signature(object = "x12Single"), definition=function(object,x12BaseInfo=new("x12BaseInfo"),forceRun=FALSE) { if(length(object@x12OldParameter)>0) TF <- !identical(object@x12Parameter,object@x12OldParameter[[length(object@x12OldParameter)]]) else TF <- TRUE if(!object@firstRun||forceRun||TF){ x12Parameter <- object@x12Parameter if(object@firstRun){ object@x12OldParameter[[length(object@x12OldParameter)+1]] <- object@x12Parameter object@x12OldOutput[[length(object@x12OldOutput)+1]] <- object@x12Output } object@firstRun <- TRUE Par <- slotNames(x12Parameter) pp <- vector() for(p in Par){ pp <- c(pp,(paste(p,"=x12Parameter@",p,sep=""))) } Par <- slotNames(x12BaseInfo) for(p in Par){ pp <- c(pp,(paste(p,"=x12BaseInfo@",p,sep=""))) } if(!is.null(object@tsName)) pp <- c(pp, paste("file=\"",object@tsName,"\"",sep="")) if(!is.null(getOption("x12.delete"))){ if(getOption("x12.delete")) keep_x12out <- paste("keep_x12out=FALSE") else keep_x12out <- paste("keep_x12out=TRUE") }else keep_x12out <- paste("keep_x12out=TRUE") out <- NULL pp <- paste("out <- x12work(tso=object@ts,",paste(pp,collapse=","),",tblnames=\"otl\",Rtblnames=\"regressor\",",keep_x12out,")",sep="") eval(parse(text=pp)) classout <- new("x12Output") Par <- slotNames(classout) for(p in Par){ if(is(slot(classout,p),"spectrum")){ if(p%in%names(out)){ slot(classout,p)@frequency <- out[[p]]$frequency slot(classout,p)@spectrum <- out[[p]]$spectrum } }else if(is(slot(classout,p),"fbcast")){ if(p%in%names(out)){ slot(classout,p)@estimate <- out[[p]][["estimate"]] slot(classout,p)@lowerci <- out[[p]][["lowerci"]] slot(classout,p)@upperci <- out[[p]][["upperci"]] } }else slot(classout,p)<-out[[p]] } object@x12Output <- classout } return(object) } ) setMethod( f='x12', signature=signature(object = "x12Batch"), definition=function(object,forceRun=FALSE) { starting.time <- Sys.time() if(existd("x12path")) object@x12BaseInfo@x12path <- getd("x12path") else stop("Please enter an x12path") ## Parallelization implemented after the pattern used in the survey package by Thomas Lumley. if (is.null(getOption("x12.parallel")) | !require("parallel", quietly=TRUE)){ tmpList <- lapply(object@x12List,function(x)try(x12(x,x12BaseInfo=object@x12BaseInfo,forceRun=forceRun))) }else{ tmpList <- mclapply(object@x12List,function(x)try(x12(x,x12BaseInfo=object@x12BaseInfo,forceRun=forceRun)),mc.cores=getOption("x12.parallel")) } for(i in 1:length(tmpList)) object@x12List[[i]] <- tmpList[[i]] print(Sys.time()-starting.time) return(object) } )
/scratch/gouwar.j/cran-all/cranData/x12/R/x12-methods.R
# Underlying S3 function x12work <- function(tso,period=frequency(tso),file="Rout", series.span=NULL,series.modelspan=NULL, transform.function="auto",transform.power=NULL,transform.adjust=NULL, regression.variables=NULL,regression.user=NULL,regression.file=NULL, regression.usertype=NULL,regression.centeruser=NULL,regression.start=NULL, regression.aictest=NULL, outlier.types=NULL,outlier.critical=NULL,outlier.span=NULL,outlier.method=NULL, identify=FALSE,identify.diff=NULL,identify.sdiff=NULL,identify.maxlag=NULL, arima.model=NULL,arima.smodel=NULL,arima.ar=NULL,arima.ma=NULL, automdl=FALSE,automdl.acceptdefault=FALSE,automdl.balanced=TRUE, automdl.maxorder=c(3,2),automdl.maxdiff=c(1,1), forecast_years=NULL,backcast_years=NULL,forecast_conf=.95, forecast_save="ftr", estimate=FALSE,estimate.outofsample=TRUE, check=TRUE,check.maxlag=NULL, slidingspans=FALSE, slidingspans.fixmdl=NULL,slidingspans.fixreg=NULL, slidingspans.length=NULL,slidingspans.numspans=NULL, slidingspans.outlier=NULL, slidingspans.additivesa=NULL,slidingspans.start=NULL, history=FALSE, history.estimates=NULL,history.fixmdl=FALSE, history.fixreg=NULL,history.outlier=NULL, history.sadjlags=NULL,history.trendlags=NULL,history.start=NULL,history.target=NULL, x11.sigmalim=c(1.5,2.5),x11.type=NULL,x11.sfshort=FALSE,x11.samode=NULL, x11.seasonalma=NULL,x11.trendma=NULL, x11.appendfcst=TRUE,x11.appendbcst=FALSE,x11.calendarsigma=NULL, x11.excludefcst=TRUE,x11.final="user", x11regression=FALSE, tblnames=NULL,Rtblnames=NULL, x12path=NULL, # x13path=NULL, use="x12", keep_x12out=TRUE,showWarnings=TRUE ){ ### Quick Fix: Rename the parameters to previous version: seats=FALSE seatsparameter=NULL span <- series.span modelspan <-series.modelspan transform<-transform.function regvariables <- regression.variables reguser <- regression.user regfile <- regression.file usertype <- regression.usertype centeruser <- regression.centeruser regfilestart <- regression.start aictest<-regression.aictest # outlier.detection <- outlier outlier <- outlier.types critical <- outlier.critical outlier_span <- outlier.span outlier_method <- outlier.method arima <- arima.model sarima <- arima.smodel acceptdefault <- automdl.acceptdefault balanced <- automdl.balanced maxorder <- automdl.maxorder maxdiff <- automdl.maxdiff estOutofsample <- estimate.outofsample sigmalim <- x11.sigmalim onlytd <- x11.type sfshort <- x11.sfshort samode <- x11.samode seasonalma <- x11.seasonalma trendma <- x11.trendma x11appendfcst <- x11.appendfcst x11appendbcst <- x11.appendbcst x11calendarsigma <- x11.calendarsigma x11excludefcst <- x11.excludefcst x11final <- x11.final x11regress <- x11regression basename(file) ext <- c("out","err","spc","otl","dat","a1","b1","d10","d11","d12","ftr","log") for(e in ext){ f <- paste(basename(file),".",e,sep="") if(file.exists(f)) file.remove(f) } dirgra <- paste("gra_",gsub("\\.","_",basename(file)),sep="") unlink(paste(dirname(file),"/",dirgra,sep=""),recursive=TRUE) if((length(tso)/period)>15 && !is.null(backcast_years) && !showWarnings){ cat("\nWarning: x12 cannot produce backcasts for time series that are more than 15 years long!\n") } header <- vector() header[length(header)+1] <- "series{" header[length(header)+1] <- "save=(a1 b1)" header[length(header)+1] <- 'title="R Output for x12a"' header[length(header)+1] <- paste("start=",paste(start(tso),collapse="."),sep="") if(!is.null(span)){ topaste<-span tocollapse<-c(".",".") if(any(is.na(span))){ topaste[which(is.na(span))]<-"" tocollapse[which(is.na(span))[2]/2]<-"" } header[length(header)+1] <- paste("span=(",paste(topaste[1:2],collapse=tocollapse[1]),",",paste(topaste[3:4],collapse=tocollapse[2]),")",sep="") } if(!is.null(modelspan)){ topaste<-modelspan tocollapse<-c(".",".") if(any(is.na(modelspan))){ topaste[which(is.na(modelspan))]<-"" tocollapse[which(is.na(modelspan))[2]/2]<-"" } header[length(header)+1] <- paste("modelspan=(",paste(topaste[1:2],collapse=tocollapse[1]),",",paste(topaste[3:4],collapse=tocollapse[2]),")",sep="") } # if(!is.null(series.comptype)){ # header[length(header)+1] <- paste("comptype=",series.comptype,sep="") # } # if(!is.null(series.compwt)){ # header[length(header)+1] <- paste("compwt=",series.compwt,sep="") # } header[length(header)+1] <- paste("period=",period,sep="") # if(!is.null(series.type)){ # header[length(header)+1] <- paste("type=",series.type,sep="") # } #ERROR: Argument name "type" not found header[length(header)+1] <- "DECIMALS=5" header[length(header)+1] <- paste("file = \"",file,".dat\"",sep="") tsoout <- as.character(round(as.vector(tso),5)) tsoout[is.na(tsoout)] <- "-99999" write(tsoout,file=paste(file,".dat",sep=""),ncolumns =1) #header[length(header)+1] <- "data=(" #datarows<-as.vector(tso) #datarows[length(datarows)+1] <- ")" #datarows[length(datarows)+1] <- "}" header[length(header)+1] <- "}" addcommands <- vector() if(!x11regress){#transform ausschalten falls x11 Regression addcommands[length(addcommands)+1] <- paste("transform{") if(is.null(transform.power)) addcommands[length(addcommands)+1] <- paste("function=",transform,sep="") else addcommands[length(addcommands)+1] <- paste("power=",transform.power,sep="") if(!is.null(transform.adjust)) addcommands[length(addcommands)+1] <- paste("adjust=",transform.adjust,sep="") addcommands[length(addcommands)+1] <- "}" } if(!is.null(c(arima,sarima,arima.ar,arima.ma))&&!x11regress){ arima <- paste("(",paste(arima,collapse=","),")",sep="") if(!is.null(sarima)) sarima <- paste("(",paste(sarima,collapse=","),")",sep="") addcommands[length(addcommands)+1] <- paste("arima{") addcommands[length(addcommands)+1] <- paste("model=",arima,sarima,sep="") if(!is.null(arima.ar)){ arima.ar[is.na(arima.ar)]<-" " addcommands[length(addcommands)+1] <- paste("ar=",paste("(",paste(arima.ar,collapse=","),")",sep=""),sep="") } if(!is.null(arima.ma)){ arima.ma[is.na(arima.ma)]<-" " addcommands[length(addcommands)+1] <- paste("ma=",paste("(",paste(arima.ma,collapse=","),")",sep=""),sep="") } addcommands[length(addcommands)+1] <- "}" } if(!is.null(c(arima,sarima,arima.ar,arima.ma))&&automdl&&!x11regress) cat("Warning: 'automdl' is ignored because an ARIMA model has been specified! \n") #cat("Arima and Sarima model specifications are ignored, because automdl is activated! \n") if(any(!is.null(c(regvariables,reguser,regfile,aictest,regfilestart,usertype,centeruser))) &&! x11regress){ addcommands[length(addcommands)+1] <- "regression{" addcommands[length(addcommands)+1] <- "save=otl" if(!is.null(regvariables)) addcommands[length(addcommands)+1] <- paste("variables=(",paste(regvariables,collapse=" "),")",sep="") if(!is.null(aictest)) addcommands[length(addcommands)+1] <- paste("aictest=(",paste(aictest,collapse=" "),") savelog= aictest",sep="") if(!is.null(reguser)){ forbidden.regression.user <- c("x11regress:", "samode:","finmode:","seasonalma:","trendma:","sfmsr:", "finalxreg","x11irrcrtval:", "$AO","User-defined$","Automatically Identified Outliers$", "peaks.seas:","peaks.td:","f2.idseasonal:","d11.f:", "spcori","spcsa","spcirr", "f3.m01:","f3.m02:","f3.m03:","f3.m04:","f3.m05:","f3.m06:", "f3.m07:","f3.m08:","f3.m09:","f3.m10:","f3.m11:", "f3.q:","f3.qm2:","f3.fail:", "ssa:","ssfstab:","ssfmov:","ssm7:","ssident:","ssran.","s2.","s3.", "historytarget","r01.lag","r02.lag","r04.lag","r05.lag","r06","meanssfe") if(!any(unlist(lapply(forbidden.regression.user,function(x)grepl(x,reguser))))){ addcommands[length(addcommands)+1] <- paste("user=(",paste(reguser,collapse=" "),")",sep="") }else{ bad.name.regression.user <- unlist(lapply(forbidden.regression.user,function(x)grep(x,reguser,value=TRUE))) bad.name.index <- which(reguser%in%bad.name.regression.user) reguser[bad.name.index]<-paste("user_",1:length(bad.name.regression.user),sep="") cat("Warning: the user paramter/s",bad.name.regression.user,"in the regression argument 'regression.user' has/have been renamed to",reguser[bad.name.index],"due to conflicts! \n") addcommands[length(addcommands)+1] <- paste("user=(",paste(reguser,collapse=" "),")",sep="") } } if(!is.null(regfile)) addcommands[length(addcommands)+1] <- paste("file='",regfile,"'",sep="") if(!is.null(regfilestart)) addcommands[length(addcommands)+1] <- paste("start=",paste(regfilestart,collapse="."),"",sep="") if(!is.null(usertype)) addcommands[length(addcommands)+1] <- paste("usertype=(",paste(usertype,collapse=" "),")",sep="") if(!is.null(centeruser)) addcommands[length(addcommands)+1] <- paste("centeruser=",centeruser,sep="") addcommands[length(addcommands)+1] <- "}" } if(!is.null(outlier) &&! x11regress){ # if((outlier.detection || !is.null(outlier)) &&! x11regress){ addcommands[length(addcommands)+1] <- "outlier {" # if(!is.null(outlier)){ # outlier.detection <- TRUE if(all(outlier=="all")) addcommands[length(addcommands)+1] <- "types=(all)" else addcommands[length(addcommands)+1] <- paste("types=(",paste(outlier,collapse=" "),")",sep="") # } if(!is.null(critical)){ if(is.list(critical)){ names(critical)<-toupper(names(critical)) critval <- vector() ifelse(is.null(critical$AO),critval[1] <- "",critval[1] <- critical$AO) ifelse(is.null(critical$LS),critval[2] <- "",critval[2] <- critical$LS) ifelse(is.null(critical$TC),critval[3] <- "",critval[3] <- critical$TC) addcommands[length(addcommands)+1] <- paste("critical=(",paste(critval,collapse=","),")",sep="") }else{addcommands[length(addcommands)+1] <- paste("critical=(",paste(critical,collapse=","),")",sep="") } } if(!is.null(outlier_span)){ topaste<-outlier_span tocollapse<-c(".",".") if(any(is.na(outlier_span))){ topaste[which(is.na(outlier_span))]<-"" tocollapse[which(is.na(outlier_span))[2]/2]<-"" } addcommands[length(addcommands)+1] <- paste("span=(",paste(topaste[1:2],collapse=tocollapse[1]),",",paste(topaste[3:4],collapse=tocollapse[2]),")",sep="") } # if(!is.null(outlier_span)) # addcommands[length(addcommands)+1] <- paste("span=(",paste(outlier_span,collapse=","),")",sep="") addcommands[length(addcommands)+1] <- "print=(default)" if(!is.null(outlier_method) &&! x11regress){ addcommands[length(addcommands)+1] <- paste("method=",paste(outlier_method,collapse=","),sep="") } addcommands[length(addcommands)+1] <- "}" } if(identify){ addcommands[length(addcommands)+1] <- "identify {" if(!is.null(identify.diff)) addcommands[length(addcommands)+1] <- paste("diff=",paste("(",paste(identify.diff,collapse=","),")",sep=""),sep="") if(!is.null(identify.sdiff)) addcommands[length(addcommands)+1] <- paste("sdiff=",paste("(",paste(identify.sdiff,collapse=","),")",sep=""),sep="") if(!is.null(identify.maxlag)) addcommands[length(addcommands)+1] <- paste("maxlag=",identify.maxlag,sep="") addcommands[length(addcommands)+1] <- "}" } if(slidingspans){ addcommands[length(addcommands)+1] <- "slidingspans{" if(!is.null(slidingspans.fixmdl)) addcommands[length(addcommands)+1] <- paste("fixmdl=",slidingspans.fixmdl,sep="") if(!is.null(slidingspans.fixreg)) addcommands[length(addcommands)+1] <- paste("fixreg=(",paste(slidingspans.fixreg,collapse=" "),")",sep="") if(!is.null(slidingspans.length)) addcommands[length(addcommands)+1] <- paste("length=",slidingspans.length,sep="") if(!is.null(slidingspans.numspans)) addcommands[length(addcommands)+1] <- paste("numspans=",slidingspans.numspans,sep="") if(!is.null(slidingspans.outlier)) addcommands[length(addcommands)+1] <- paste("outlier=",slidingspans.outlier,sep="") if(!is.null(slidingspans.start)) addcommands[length(addcommands)+1] <- paste("start=",paste(slidingspans.start,collapse="."),"",sep="") if(!is.null(slidingspans.additivesa)) addcommands[length(addcommands)+1] <- paste("additivesa=",slidingspans.additivesa,sep="") addcommands[length(addcommands)+1] <- "}" } if(history){ addcommands[length(addcommands)+1] <- "history{" if(!is.null(history.estimates)) addcommands[length(addcommands)+1] <- paste("estimates=(",paste(history.estimates,collapse=" "),")",sep="") if(history.fixmdl) addcommands[length(addcommands)+1] <- "fixmdl=yes" if(!is.null(history.fixreg)) addcommands[length(addcommands)+1] <- paste("fixreg=(",paste(history.fixreg,collapse=" "),")",sep="") if(!is.null(history.outlier)) addcommands[length(addcommands)+1] <- paste("outlier=",history.outlier,sep="") if(!is.null(history.sadjlags)) addcommands[length(addcommands)+1] <- paste("sadjlags=",paste("(",paste(history.sadjlags,collapse=","),")",sep=""),sep="") if(!is.null(history.trendlags)) addcommands[length(addcommands)+1] <- paste("trendlags=",paste("(",paste(history.trendlags,collapse=","),")",sep=""),sep="") if(!is.null(history.start)) addcommands[length(addcommands)+1] <- paste("start=",paste(history.start,collapse="."),"",sep="") if(!is.null(history.target)) addcommands[length(addcommands)+1] <- paste("target=",history.target,sep="") addcommands[length(addcommands)+1] <- "}" } if(!x11regress){#nicht bei x11 Regression if(estimate){ addcommands[length(addcommands)+1] <- "estimate {" if(estOutofsample){ addcommands[length(addcommands)+1] <- "outofsample=yes"} addcommands[length(addcommands)+1] <- "print=(default + rts)" addcommands[length(addcommands)+1] <- "savelog=(aic bic afc)" addcommands[length(addcommands)+1] <- "}" if(check){ addcommands[length(addcommands)+1] <- "check{" if(!is.null(check.maxlag)) addcommands[length(addcommands)+1] <- paste("maxlag=",check.maxlag,sep="") #addcommands[length(addcommands)+1] <- "print=(default+specresidual+pacfplot)" addcommands[length(addcommands)+1] <- "savelog=(nrm lbq)" addcommands[length(addcommands)+1] <- "}" } } if(automdl && is.null(c(arima,sarima,arima.ar,arima.ma))){ addcommands[length(addcommands)+1] <- "automdl{" if(acceptdefault) addcommands[length(addcommands)+1] <- "acceptdefault=yes" else addcommands[length(addcommands)+1] <- "acceptdefault=no" if(balanced) addcommands[length(addcommands)+1] <- "balanced=yes" else addcommands[length(addcommands)+1] <- "balanced=no" maxorder[is.na(maxorder)]<-" " addcommands[length(addcommands)+1] <- paste("maxorder=",paste("(",paste(maxorder,collapse=","),")",sep=""),sep="") maxdiff[is.na(maxdiff)]<-" " addcommands[length(addcommands)+1] <- paste("maxdiff=",paste("(",paste(maxdiff,collapse=","),")",sep=""),sep="") addcommands[length(addcommands)+1] <- "savelog=(adf amd b5m mu)" addcommands[length(addcommands)+1] <- "}" } #Forecasts Backcasts if(!is.null(forecast_years) | !is.null(backcast_years)){ addcommands[length(addcommands)+1] <- "forecast {" addcommands[length(addcommands)+1] <- paste0("save=",forecast_save) if(!is.null(forecast_years)){ addcommands[length(addcommands)+1] <- paste("maxlead=",forecast_years*frequency(tso),sep="") } if(!is.null(backcast_years)){ addcommands[length(addcommands)+1] <- paste("maxback=",backcast_years*frequency(tso),sep="") } addcommands[length(addcommands)+1] <- "}" } }#end nicht bei x11 Regression if(!seats){ addcommands[length(addcommands)+1] <- "x11{" addcommands[length(addcommands)+1] <- "save=(d10 d11 d12)" if(!is.null(onlytd)){ addcommands[length(addcommands)+1] <- paste("type=",onlytd,sep="") } if(sfshort) addcommands[length(addcommands)+1] <- "sfshort=yes" if(!is.null(sigmalim)){ sigmalim <- paste("(",sigmalim[1],",",sigmalim[2],")",sep="") addcommands[length(addcommands)+1] <- paste("sigmalim=",sigmalim,sep="") } if(!is.null(samode)) addcommands[length(addcommands)+1] <- paste("mode=",samode,sep="") if(!is.null(seasonalma)){ addcommands[length(addcommands)+1] <- paste("seasonalma=(",paste(seasonalma,collapse=" "),")",sep="")} if(!is.null(trendma)){ addcommands[length(addcommands)+1] <- paste("trendma=",trendma,sep="") } if(!is.null(x11calendarsigma)) addcommands[length(addcommands)+1] <- paste("calendarsigma=",x11calendarsigma,sep="") if(x11excludefcst) addcommands[length(addcommands)+1] <- "excludefcst=yes" if(x11appendbcst) addcommands[length(addcommands)+1] <- "appendbcst=yes" ###backcast if(x11final!="none") addcommands[length(addcommands)+1] <- paste("final=(",paste(x11final,collapse=" "),")",sep="") if(x11appendfcst) addcommands[length(addcommands)+1] <- "appendfcst=yes" ###forecast addcommands[length(addcommands)+1] <- "savelog=all" addcommands[length(addcommands)+1] <- "}" }else{ addcommands[length(addcommands)+1] <- paste("seats{",seatsparameter,"}",sep="") } if(x11regress){ #start: The start date for the values of the user-defined regression variables. # The default is the start date of the series. # Valid values are any date up to the start date of the series # (or up to the start date of the span specified by the span argument of the series spec, if present). addcommands[length(addcommands)+1] <- "x11regression{" if(!is.null(regfilestart)) addcommands[length(addcommands)+1] <- paste("start=",paste(regfilestart,collapse="."),sep="") else addcommands[length(addcommands)+1] <- paste("start=",paste(start(tso),collapse="."),sep="") if(!is.null(critical)){ if(is.list(critical) & length(critical)>1 &!"AO"%in%names(critical)){ cat("X11 Regression only allows for the detection of Additive Outliers (AO)! \n")} else addcommands[length(addcommands)+1] <- paste("critical=",critical,sep="") } if(!is.null(outlier_method)){ addcommands[length(addcommands)+1] <- paste("outliermethod=",paste(outlier_method,collapse=","),sep="") } if(!is.null(outlier_span)){ topaste<-outlier_span tocollapse<-c(".",".") if(any(is.na(outlier_span))){ topaste[which(is.na(outlier_span))]<-"" tocollapse[which(is.na(outlier_span))[2]/2]<-"" } addcommands[length(addcommands)+1] <- paste("outlierspan=(",paste(topaste[1:2],collapse=tocollapse[1]),",",paste(topaste[3:4],collapse=tocollapse[2]),")",sep="") } if(!is.null(regvariables)) addcommands[length(addcommands)+1] <- paste("variables=(",paste(regvariables,collapse=" "),")",sep="") if(!is.null(reguser)){ forbidden.regression.user <- c("x11regress:", "samode:","finmode:","seasonalma:","trendma:","sfmsr:", "finalxreg","x11irrcrtval:", "$AO","User-defined$","Automatically Identified Outliers$", "peaks.seas:","peaks.td:","f2.idseasonal:","d11.f:", "spcori","spcsa","spcirr", "f3.m01:","f3.m02:","f3.m03:","f3.m04:","f3.m05:","f3.m06:", "f3.m07:","f3.m08:","f3.m09:","f3.m10:","f3.m11:", "f3.q:","f3.qm2:","f3.fail:", "ssa:","ssfstab:","ssfmov:","ssm7:","ssident:","ssran.","s2.","s3.", "historytarget","r01.lag","r02.lag","r04.lag","r05.lag","r06","meanssfe") if(!any(unlist(lapply(forbidden.regression.user,function(x)grepl(x,reguser))))){ addcommands[length(addcommands)+1] <- paste("user=(",paste(reguser,collapse=" "),")",sep="") }else{ bad.name.regression.user <- unlist(lapply(forbidden.regression.user,function(x)grep(x,reguser,value=TRUE))) bad.name.index <- which(reguser%in%bad.name.regression.user) reguser[bad.name.index]<-paste("user_",1:length(bad.name.regression.user),sep="") cat("Warning: the user paramter/s",bad.name.regression.user,"in the regression argument 'regression.user' has/have been renamed to",reguser[bad.name.index],"due to conflicts! \n") addcommands[length(addcommands)+1] <- paste("user=(",paste(reguser,collapse=" "),")",sep="") } } if(!is.null(regfile)) addcommands[length(addcommands)+1] <- paste("file='",regfile,"'",sep="") if(!is.null(centeruser)) addcommands[length(addcommands)+1] <- paste("centeruser=",centeruser,sep="") if(!is.null(usertype)) addcommands[length(addcommands)+1] <- paste("usertype=(",paste(usertype,collapse=" "),")",sep="") addcommands[length(addcommands)+1] <- "}" } con <- file(paste(file,".spc",sep="")) #writeLines(c(header,datarows,addcommands),con) writeLines(c(header,addcommands),con) close(con) # Rewritten, for not using sh or bat files (Suggestions by Peter Ellis) if(Sys.info()[1]=="Windows"){ #con1 <- file("run.bat") #mdcommand <- "md gra" file_1 <- gsub("/","\\\\",file) if((!is.null(x12path)) && use=="x12"){ x12path_1 <- gsub("/","\\\\",x12path) command <- paste("\"",x12path_1,"\" ",file_1," -g ",dirgra,sep="") }else if((!is.null(x13path)) && use!="x12"){ x13path_1 <- gsub("/","\\\\",x13path) command <- paste("\"","x13path_1","\" ",file_1," -g ",dirgra,sep="") }else stop("Please define the path to the X12 binaries!") }else{ #con1 <- file("run.sh") #mdcommand <- "mkdir gra" quiet <-ifelse(is.null(options("x12.message")$x12.message),"", " -q ") if((!is.null(x12path)) && use=="x12"){ command <- paste(x12path," ",file, quiet, " -r -g ",dirgra,sep="") }else if((!is.null(x13path)) && use!="x12"){ command <- paste(x13path," ",file, quiet, " -r -g ",dirgra,sep="") }else stop("Please define the path to the X12 binaries!") } #writeLines(c(mdcommand,command),con1) #close(con1) if(!file.exists(dirgra)) dir.create(dirgra) if(is.null(options("x12.message")$x12.message)){ system(command) }else{ system(command, ignore.stdout = TRUE, ignore.stderr = TRUE) } # if(Sys.info()[1]=="Windows"){ # system("run.bat") # }else{ # system("chmod 744 run.sh") # system("./run.sh") # } # out <- list() start_series <- start(tso) end_series <- end(tso) if(!is.null(span)){ if(!any(is.na(span[1:2]))) start_series <- as.numeric(span[1:2]) if(!any(is.na(span[3:4]))) end_series <- as.numeric(span[3:4]) } out <- readx12Out(file,freq_series=frequency(tso),start_series=start_series,end_series=end_series,tblnames=tblnames,Rtblnames=Rtblnames,transform=transform,slidingspans=slidingspans,history=history,x11regress=x11regress,outlier=outlier,showWarnings=showWarnings,keep_x12out=keep_x12out) # Rtblnames <- c("Original series", "Final seasonal factors", "Final seasonally adjusted data", "Final trend cycle", # "Final irregular components","Combined adjustment factors","Final weights for irregular component", # "Final replacements for SI ratios", # "Differenced, transformed, seasonally adjusted data", # Rtblnames) # if(seats==TRUE) # tblnames <- c("a1", "s10", "s11", "s12", "s13","s16","c17","s9","e2", tblnames) # else # tblnames <- c("a1", "d10", "d11", "d12", "d13","d16","c17","d9","e2", tblnames) # for(i in 1:length(tblnames)){ # if(file.exists(paste("gra\\",file,".",tblnames[i],sep=""))) # out[[tblnames[i]]] <- ts(read.table(paste("gra\\",file,".",tblnames[i],sep=""),header=FALSE,skip=2,sep=" ",na.strings="-999")[,2],frequency=frequency(tso),start=start(tso)) # } # spnames <- c("Spectrum_AdjOri","Spectrum_SA","Spectrum_Irr") # sptblnames <- c("sp0", "sp1", "sp2") # if(!seats){ # for(i in 1:length(sptblnames)){ # out[[spnames[i]]] <- read.table(paste("gra\\",file,".",sptblnames[i],sep=""),header=FALSE,skip=2,sep=" ")[,2:3] # names(out[[spnames[i]]]) <- c("frequency","spectrum") # } # } # out[["d9"]][out[["d9"]]==-999]<-NA # out[["Forecast with CI"]] <- list() # fct <- read.table(paste("gra\\",file,".","fct",sep=""),header=FALSE,skip=2,sep=" ") # out[["Forecast with CI"]]$estimate <-ts(fct[,2],frequency=frequency(tso),start=end(tso)) # out[["Forecast with CI"]]$lower <-ts(fct[,3],frequency=frequency(tso),start=end(tso)) # out[["Forecast with CI"]]$upper <-ts(fct[,4],frequency=frequency(tso),start=end(tso)) # out$seats <- seats # out$file <- file # out$tblnames <- tblnames # out$Rtblnames <- Rtblnames # class(out) <- "x12" ext <- c(".out",".err",".spc",".otl",".dat",".a1",".b1",".d10",".d11",".d12",".ftr",".log",".html","_err.html","_Profiler.txt","_log.html") if(!keep_x12out){ for(e in ext){ f <- paste(basename(file),e,sep="") if(file.exists(f)) file.remove(f) } } if(!keep_x12out) unlink(paste(dirname(file),"/",dirgra,sep=""),recursive=TRUE) out } #print.x12work <- function(x,editor=getOption("editor"),...){ # if(!(x$file=="Example_for_X12")) # filename <- paste(x$file,".out",sep="") # else # filename <- paste(paste(searchpaths()[grep("x12",searchpaths())],"/doc/Rout",sep=""),".out",sep="") # edit(file=filename,editor=editor,...) #}
/scratch/gouwar.j/cran-all/cranData/x12/R/x12work.R
.onAttach <- function(...){ #data(AirPassengers) if(x13binary::supportedPlatform()){ packageStartupMessage("x12 is ready to use.") packageStartupMessage("Use the package x12GUI for a Graphical User Interface. \n") x12path() packageStartupMessage("By default the X13-ARIMA-SEATS binaries provided by the R package x13binary\n") packageStartupMessage("are used but this can be changed with x12path(validpath) \n") packageStartupMessage("---------------\n") packageStartupMessage("Suggestions and bug-reports can be submitted at: https://github.com/statistikat/x12/issues") }else{ packageStartupMessage("The X13-ARIMA-SEATS binaries from R package x13binary is not available.\n") packageStartupMessage("Please provide the path to your own binary through the function x12path.\n") } }
/scratch/gouwar.j/cran-all/cranData/x12/R/zzz.R
library(x12) setwd(tempdir()) # AirPassenger ts x12 run" xts <- x12(AirPassengers) s <- summary(xts) expect_true(class(xts)=="x12Output") expect_true(s[12,2]=="0.26") # AirPassenger x12Single x12 run" xs <- x12(new("x12Single",ts=AirPassengers)) s <- summary(xs) expect_true(class(xs)=="x12Single") expect_true(s[12,2]=="0.26") # AirPassenger x12Batch x12 run" xb <- x12(new("x12Batch",list(AirPassengers,AirPassengers,AirPassengers))) xbs <- summary(xb) expect_true(class(xb)=="x12Batch") expect_true(all(sapply(xb@x12List,class)=="x12Single")) expect_true(all(xbs[12,2:4]=="0.26")) #Create new batch object with 4 time series xb <- new("x12Batch",list(AirPassengers,AirPassengers,AirPassengers,AirPassengers)) # change the automdl to FALSE in all 4 elements xb <- setP(xb,list(automdl=FALSE)) #change the arima.model and arima.smodel setting for the first ts object xb <- setP(xb,list(arima.model=c(1,1,0),arima.smodel=c(1,1,0)),1) #change the arima.model and arima.smodel setting for the second ts object xb <- setP(xb,list(arima.model=c(0,1,1),arima.smodel=c(0,1,1)),2) #change the arima.model and arima.smodel setting for the third ts object xb <- setP(xb,list(arima.model=c(0,1,1),arima.smodel=c(1,1,1)),3) #change the arima.model and arima.smodel setting for the fourth ts object xb <- setP(xb,list(arima.model=c(1,1,1),arima.smodel=c(1,1,1)),4) #run x12 on all series xb <- x12(xb) xbs <- summary(xb) expect_true(class(xb)=="x12Batch") expect_true(all(sapply(xb@x12List,class)=="x12Single")) expect_true(all(xbs[12,2:5]=="0.26")) #Set automdl=TRUE for the first ts xb <- setP(xb,list(automdl=TRUE),1) #rerun x12 on all series (the binaries will only run on the first one) xb <- x12(xb) #summary with oldOutput xbs <- summary(xb,oldOutput=10) #Change the parameter and output of the first series back to the first run xb <- prev(xb,index=1,n=1) #summary with oldOutput (--- No valid previous runs. ---) summary(xb,oldOutput=10)
/scratch/gouwar.j/cran-all/cranData/x12/inst/tinytest/test_AirPassengers.R
#' Check if X-13ARIMA-SEATS Runs Properly #' #' Performs a test run of X-13ARIMA-SEATS. Fails if no output is produced. #' #' @param fail.unsupported logical, whether being on an unsupported platform #' leads to an error. #' @param verbose logical, should a message be returned on success? #' @examples #' checkX13binary() #' #' @export checkX13binary <- function(fail.unsupported = FALSE, verbose = TRUE) { if (supportedPlatform()) { if (.Platform$OS.type == "windows") { x13.bin <- system.file("bin", "x13ashtml.exe", package="x13binary") } else { x13.bin <- system.file("bin", "x13ashtml", package="x13binary") } if (x13.bin == "") { stop("X-13 binary file not found") } dir.create(tdir <- tempfile(pattern="x13binary__", fileext="__dir")) on.exit(unlink(tdir, recursive=TRUE, force=TRUE)) file.copy(system.file("testdata", "Testairline.spc", package="x13binary"), tdir) if (.Platform$OS.type == "windows") { ## change wd on win as X-13 writes `fort.6` to it owd <- getwd() on.exit(setwd(owd), add=TRUE) setwd(tdir) ## shell() gives a more verbose output on windows sout <- shell(paste0("\"", normalizePath(x13.bin), "\"", " Testairline"), intern = TRUE) if (isTRUE(attr(sout,"status") != 0)) { if (verbose) { packageStartupMessage("Rerunning with full console output:") shell(paste0("\"", normalizePath(x13.bin), "\"", " Testairline")) } stop("When running\n\n ", x13.bin, "\n\nCommand Prompt returned the following message:\n\n", paste(strwrap(sout, indent = 2, exdent = 2), collapse = "\n"), "\n\n") } } else { sout <- system(paste(x13.bin, file.path(tdir, "Testairline")), intern = TRUE) if (isTRUE(attr(sout,"status") != 0)) { stop("When running\n\n ", x13.bin, "\n\nthe system returned the following message:\n\n", sout, "\n\n") } ## drop error if output contains the word ERROR ## (This does not necessarily lead to a non zero exit status) if (inherits(sout, "character")) { if (any(grepl("ERROR", sout))) { stop("When running\n\n ", x13.bin, "\n\nthe system returned the following message:\n\n", sout, "\n\n") } } } if (!file.exists(file.path(tdir, "Testairline.html"))) { if (inherits(sout, "character")) { stop("X-13 has not produced Testairline.html.\n", "When running\n\n ", x13.bin, "\n\nthe system returned the following message:\n\n", sout, "\n\n") } else { stop("X-13 has not produced Testairline.html.\n", "When running\n\n ", x13.bin, "\n\nthe system has returned no message.\n\n") } } if (verbose) { packageStartupMessage("x13binary is working properly") } } else { ifelse(fail.unsupported, stop, packageStartupMessage)( "Unsupported platform: ", Sys.info()["sysname"], Sys.info()["release"], "\nFor this platform, there are currently no binaries of X-13ARIMA-SEATS.") return(invisible(FALSE)) } invisible(TRUE) }
/scratch/gouwar.j/cran-all/cranData/x13binary/R/checkX13binary.R
## empty -- nothing to do here ## or maybe check for binary in expected location ?
/scratch/gouwar.j/cran-all/cranData/x13binary/R/init.R
#' Test Platform Support #' #' Always returns \code{TRUE}. #' #' @examples #' supportedPlatform() #' #' @export #' @import utils supportedPlatform <- function(){ TRUE }
/scratch/gouwar.j/cran-all/cranData/x13binary/R/supportedPlatform.R
#' @title Provide the \sQuote{X13-ARIMA-SEATS} Seasonal Adjustment Program #' #' @description The US Census provides a seasonal adjustment program now called #' 'X-13ARIMA-SEATS' building on both earlier Census programs called X11 and #' X12 as well as the SEATS program by the Bank of Spain. Census offers both #' source and binary versions. This package integrates these for use by other R #' packages. #' #' @name x13binary-package #' @aliases x13binary #' @docType package #' @author Dirk Eddelbuettel \email{edd@@debian.org} and Christoph Sax #' @references \url{https://www.census.gov/data/software/x13as.X-13ARIMA-SEATS.html} #' @keywords package NULL
/scratch/gouwar.j/cran-all/cranData/x13binary/R/x13binary-package.R
#' Full Path to X-13ARIMA-SEATS #' #' Returns the full full path to the X-13ARIMA-SEATS binary contained in the #' package, or \code{""} if the platform is unsupported. #' #' @examples #' x13path() #' #' @export x13path <- function(){ system.file("bin", package="x13binary") }
/scratch/gouwar.j/cran-all/cranData/x13binary/R/x13path.R
## This file prevents R from looking for a shared library ## We use it here to (on macOS) update the dynamic library information of the just-created binary if (grepl("darwin", R.Version()$platform)) { links <- trimws(readLines(pipe("otool -L ../inst/bin/x13ashtml"))) for (lib in c("libgfortran", "libquadmath")) { libexpr <- paste0("/.*/", lib, ".*dylib") input <- unique(regmatches(links, regexpr(libexpr, links))) if (length(input)) { output <- file.path(R.home('lib'), basename(input)) if (file.exists(output)) { system2('install_name_tool', c('-change', input, output, "../inst/bin/x13ashtml")) } } } }
/scratch/gouwar.j/cran-all/cranData/x13binary/src/install.libs.R
scale_to_pix <- function(x3p, which, values) { scale <- 1 if (which == "x") { scale <- x3p$header.info$incrementX return(round(values / scale + 1, 0)) } if (which == "y") { scale <- x3p$header.info$incrementY return(x3p$header.info$sizeY - round(values / scale + 1, 0)) } } #' Add vertical line to the mask of an x3p object #' #' Add vertical lines to overlay the surface of an x3p object. Lines are added to a mask. In case no mask exists, one is created. #' @param x3p x3p object #' @param xintercept (vector of) numerical values for the position of the lines. #' @param size width (in pixels) of the line #' @param color (vector of) character values to describe color of lines #' @return x3p object with added vertical lines in the mask #' @export #' @examples #' \dontrun{ #' logo <- x3p_read(system.file("csafe-logo.x3p", package = "x3ptools")) #' logo_color <- magick::image_read(system.file("csafe-color.png", package = "x3ptools")) #' logoplus <- x3p_add_mask(logo, as.raster(logo_color)) #' # ten vertical lines across: #' logoplus <- x3p_add_vline(logo, seq(0, 740 * 6.4500e-7, length = 5), size = 3) #' x3p_image(logoplus, size = c(741, 419), zoom = 0.5) #' } x3p_add_vline <- function(x3p, xintercept, size = 5, color = "#e6bf98") { stopifnot("x3p" %in% class(x3p)) if (!exists("mask", x3p)) x3p <- x3p_add_mask(x3p) xs <- scale_to_pix(x3p, "x", xintercept) xindex <- rep(xs, each = size) + 1:size - 1 - size %/% 2 colors <- rep(color, each = size, length = length(xindex)) valid <- (xindex > 0) & (xindex <= x3p$header.info$sizeX) # browser() x3p$mask[, xindex[valid]] <- colors[valid] x3p } #' Add horizontal line to the mask of an x3p object #' #' Add horizontal lines to overlay the surface of an x3p object. Lines are added to a mask. In case no mask exists, one is created. #' @param x3p x3p object #' @param yintercept (vector of) numerical values for the position of the lines. #' @param size width (in pixels) of the line #' @param color (vector of) character values to describe color of lines #' @return x3p object with added vertical lines in the mask #' @export #' @examples #' \dontrun{ #' logo <- x3p_read(system.file("csafe-logo.x3p", package = "x3ptools")) #' color_logo <- magick::image_read(system.file("csafe-color.png", package = "x3ptools")) #' logoplus <- x3p_add_mask(logo, as.raster(color_logo)) #' # five horizontal lines at equal intervals: #' logoplus <- x3p_add_hline(logo, seq(0, 418 * 6.4500e-7, length = 5), size = 3) #' x3p_image(logoplus, size = c(741, 419), zoom = 0.5) #' } x3p_add_hline <- function(x3p, yintercept, size = 5, color = "#e6bf98") { stopifnot("x3p" %in% class(x3p)) if (!exists("mask", x3p)) x3p <- x3p_add_mask(x3p) ys <- scale_to_pix(x3p, "y", yintercept) yindex <- rep(ys, each = size) + 1:size - 1 - size %/% 2 colors <- rep(color, each = size, length = length(yindex)) valid <- (yindex > 0) & (yindex <= x3p$header.info$sizeY) # browser() x3p$mask[yindex[valid], ] <- colors[valid] x3p } #' Calculate grid spacing #' #' Helper function, not exported. #' @param x3p x3p file #' @param spaces space between grid lines #' @param axis axis to calculate, as character #' @return vector of line locations #' @keywords internal calculate_spacing <- function(x3p, spaces, axis = "y") { increment <- x3p_show_xml(x3p, paste0("increment", toupper(axis)))[[1]] size <- x3p_show_xml(x3p, paste0("size", toupper(axis)))[[1]] lines <- seq(from = increment, to = size, by = spaces) } #' Add a grid of helper lines to the mask of an x3p object #' #' Add a grid of lines to overlay the surface of an x3p object. #' Lines are added to a mask. In case no mask exists, one is created. #' @param x3p x3p object #' @param spaces space between grid lines, doubled for x #' @param size width (in pixels) of the lines #' @param color (vector of) character values to describe color of lines #' @return x3p object with added vertical lines in the mask #' @export #' @examples #' \dontrun{ #' logo <- x3p_read(system.file("csafe-logo.x3p", package = "x3ptools")) #' # ten vertical lines across: #' logoplus <- x3p_add_grid(logo, #' spaces = 50e-6, size = c(1, 3, 5), #' color = c("grey50", "black", "darkred") #' ) #' x3p_image(logoplus, size = c(741, 419), zoom = 0.5) #' } x3p_add_grid <- function(x3p, spaces, size = c(1, 3, 5), color = c("grey50", "black", "darkred")) { stopifnot("x3p" %in% class(x3p)) if (!exists("mask", x3p)) x3p <- x3p_add_mask(x3p) # ensure that both size and color are the same length if (length(color) < length(size)) color <- rep(color, length(size)) if (length(color) > length(size)) size <- rep(size, length(color)) maxX <- x3p$header.info$sizeX maxY <- x3p$header.info$sizeY yintercepts <- calculate_spacing(x3p, spaces, "Y") xintercepts <- calculate_spacing(x3p, spaces * 2, "X") if (length(yintercepts) == 1 | length(xintercepts) == 1) { warning(paste( "Line spacing does not map to x3p file correctly.", sprintf( "Spaces are at %f x %f, scan has size of %e x %e. ", 2 * spaces, spaces, maxX, maxY ), "Remember to specify grid spacing in measured units, not pixels." )) } x3plus <- x3p m <- 1 for (i in 1:length(size)) { if (i > 1 & i %% 2 == 0) { # take every fifth value m <- m * 5 } if (i > 1 & i %% 2 == 1) { # take every second value m <- m * 2 } xidx <- seq.int(1, length(xintercepts), by = m) yidx <- seq.int(1, length(yintercepts), by = m) x3plus <- x3p_add_vline(x3plus, xintercept = xintercepts[xidx], size = size[i], color = color[i]) x3plus <- x3p_add_hline(x3plus, yintercept = yintercepts[yidx], size = size[i], color = color[i]) } x3plus }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/add_lines_x3p.R
#' Add/change xml meta information in x3p object #' #' Use a specified template to overwrite the general info in the x3p object (and structure of the feature info, if needed). #' @param x3p x3p object #' @param template file path to xml template, use NULL for in-built package template #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' # exchange meta information for general x3p information: #' logo <- x3p_add_meta(logo, template = system.file("templateXML.xml", package="x3ptools")) #' logo$general.info x3p_add_meta <- function(x3p, template = NULL) { if (is.null(template)) { template <- system.file("templateXML.xml", package = "x3ptools") } a1 <- read_xml(template) a1list <- as_list(a1) x3p$general.info <- a1list[[1]]$Record2 if (is.null(x3p$feature.info)) { x3p$feature.info <- a1list[[1]]$Record1 x3p$feature.info$Axes$CX$Increment <- x3p$header.info$incrementX x3p$feature.info$Axes$CY$Increment <- x3p$header.info$incrementY } x3p } #' @rdname x3p_add_meta #' @export addtemplate_x3p <- function(x3p, template = NULL) { x3p_add_meta(x3p = x3p, template = template) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/addtemplate_x3p.R
#' Convert x3p header information to microns from meters #' #' ISO standard 5436_2 asks for specification of values in meters. #' For topographic surfaces collected by microscopes values in microns are #' more readable. Besides scaling the values in the surface matrix, corresponding #' increments are changed to microns as well. #' @param x3p x3p file with header information in meters #' @return x3p with header information in microns #' @export #' @import assertthat x3p_m_to_mum <- function(x3p) { x3p_scale_unit(x3p = x3p, scale_by = 10^6) } #' Scale x3p object by given unit #' #' x3p objects can be presented in different units. ISO standard 5436_2 asks for specification of values in meters. #' For topographic surfaces collected by microscopes values in microns are #' more readable. This functions allows to convert between different units. #' @param x3p object in x3p format, 3d topographic surface. #' @param scale_by numeric value. Value the surface to be scaled by. While not enforced, values of `scale_by` make most sense as multiples of 10 (for a metric system). #' @return x3p with header information in microns #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' logo # measurements in meters #' x3p_scale_unit(logo, scale_by=10^6) # measurements in microns x3p_scale_unit <- function(x3p, scale_by) { assert_that("x3p" %in% class(x3p)) # make sure all measurements are in microns x3p$surface.matrix <- x3p$surface.matrix * scale_by x3p$header.info$incrementY <- x3p$header.info$incrementY * scale_by x3p$header.info$incrementX <- x3p$header.info$incrementX * scale_by x3p } #' Check resolution of a scan #' #' Scans in x3p format capture 3d topogographic surfaces. According to ISO standard #' ISO5436 - 2000 scans are supposed to be captured in meters. For microscopic images #' capture in meters might be impractical. #' @param x3p object #' @return numeric value of resolution per pixel #' @export x3p_get_scale <- function(x3p) { assert_that("x3p" %in% class(x3p)) x3p$header.info$incrementX }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/convert.R
#' Subsampled wire cut scan #' #' An example part of a wire cut in `x3p` format. The wire cut is part of a CSAFE study #' involving 1.5 mm Aluminium wires cut by Kaiweet wire-cutters. #' #' @format `x3p` object "wire" #' Subsampled scan of a land-engraved area #' #' LEAs (land-engraved areas) are created on the outside of a bullet during #' the firing process. Depending on the rifling inside the barrel, multiple lands #' exist for each barrel. Striation marks in these land engraved areas are used #' in forensic labs to determine whether two bullets were fired from the same firearm. #' #' @format `x3p` object #' @examples #' data(lea) #' image(lea) #' if (interactive()) x3p_image(lea) "lea"
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/data.R
#' Convert an x3p file into a data frame #' #' An x3p file consists of a list with meta info and a 2d matrix with scan depths. #' fortify turns the matrix into a data frame, using the parameters of the header as necessary. #' @param x3p a file in x3p format as returned by function x3p_read #' @return data frame with variables x, y, and value and meta function in attribute #' @export #' @importFrom dplyr select #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' logo_df <- x3p_to_df(logo) #' head(logo_df) x3p_to_df <- function(x3p) { info <- x3p$header.info if (is.null(info$sizeX)) { if (!is.null(info$num_obs_per_profile)) { info$sizeX <- info$num_obs_per_profile } else { warning("Assuming X is represented by rows of the surface matrix because it is not specified in header information") info$sizeX <- nrow(x3p$surface.matrix) } } if (is.null(info$sizeY)) { if (!is.null(info$num_profiles)) { info$sizeY <- info$num_profiles } else { warning("Assuming Y is represented by columns of the surface matrix because it is not specified in header information") info$sizeY <- ncol(x3p$surface.matrix) } } if (is.null(info$incrementY)) { if (!is.null(info$profile_inc)) { info$incrementY <- info$profile_inc } else { warning("Assuming Y increment is 1 - not specified") info$incrementY <- 1 } } if (is.null(info$incrementX)) { if (!is.null(info$obs_inc)) { info$incrementX <- info$obs_inc } else { warning("Assuming X increment is 1 - not specified") info$incrementX <- 1 } } # expand.grid creates grid with first variable the fastest df <- data.frame(expand.grid( x = 1:info$sizeX, y = info$sizeY:1 ), value = as.vector(x3p$surface.matrix) ) df$y <- (df$y - 1) * info$incrementY df$x <- (df$x - 1) * info$incrementX if (!is.null(x3p$mask)) { df$mask <- as.vector(x3p$mask) annotations <- x3p_mask_legend(x3p) if (!is.null(annotations)) { # browser() annotations <- tolower(annotations) rev_annotations <- tolower(names(annotations)) names(rev_annotations) <- annotations # make sure the hex code is lower case and only 6 digits wide (7 including the hash) df$maskmerge <- tolower(substr(df$mask, 1, 7)) df$annotation <- rev_annotations[df$maskmerge] # preserve all annotations, not just the ones in use df$mask <- factor(df$maskmerge, levels=annotations) df$annotation <- factor(df$annotation, levels=names(annotations)) df <- select(df, -"maskmerge") } } attr(df, "header.info") <- info attr(df, "feature.info") <- x3p$feature.info attr(df, "general.info") <- x3p$general.info df } #' Convert a data frame into an x3p file #' #' @param dframe data frame. `dframe` must have the columns x, y, and value. #' @return x3p object #' @param var name of the variable containing the surface measurements. Defaults to "value". #' @importFrom stats median #' @importFrom dplyr pull #' @export df_to_x3p <- function(dframe, var = "value") { x3p <- attributes(dframe)[-(1:3)] # first three attributes are names, row.names and class, we want to keep the others if (var != "value") dframe$value <- pull(dframe, var) # dframe must have columns x, y, and value stopifnot(!is.null(dframe$x), !is.null(dframe$y), !is.null(dframe$value)) ny <- length(unique(dframe$y)) nx <- length(unique(dframe$x)) if (nrow(dframe) != nx * ny) { message("dframe has missing values ... they will be expanded") df2 <- expand.grid(x = unique(dframe$x), y = unique(dframe$y)) df2 <- merge(df2, dframe, by = c("x", "y"), all.x = TRUE) dframe <- df2 } dframe$y <- (-1) * dframe$y idx <- order(dframe$x, dframe$y) dframe <- dframe[idx, ] x3p[["surface.matrix"]] <- matrix(dframe$value, # nrow = ny, ncol = nx, byrow = TRUE) nrow = nx, ncol = ny, byrow = TRUE ) if (is.null(x3p$header.info)) { x3p$header.info <- list( sizeX = nx, sizeY = ny, incrementX = median(diff(unique(dframe$x))), incrementY = median(diff(unique(dframe$y))) ) } if (is.null(x3p$matrix.info)) { x3p$matrix.info <- list(MatrixDimension = list(SizeX = nx, SizeY = ny, SizeZ = 1)) } class(x3p) <- "x3p" if ("mask" %in% names(dframe)) { x3p <- x3p %>% x3p_add_mask(mask = matrix(dframe$mask, nrow = dim(x3p$surface.matrix)[2])) # browser() if("annotation" %in% names(dframe)) { if(is.factor(dframe$mask) & is.factor(dframe$annotation)) { annotations <- data.frame( mask = levels(dframe$mask), annotation = levels(dframe$annotation) ) } else annotations <- unique(dframe[,c("mask", "annotation")]) for (i in 1:nrow(annotations)) { x3p <- x3p %>% x3p_add_annotation( color = annotations$mask[i], annotation=annotations$annotation[i]) } } } x3p }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/df_to_x3p.R
#' Show meta information of an x3p file #' #' `head.x3p` expands the generic head method for x3p objects. It gives a summary of the most relevant 3p meta information and returns the object invisibly. #' @param x x3p object #' @param n number of rows/columns of the matrix #' @param ... extra parameters passed to head.matrix() #' @importFrom utils head #' @importFrom utils head.matrix #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' head(logo) #' @method head x3p head.x3p <- function(x, n = 6L, ...) { cat(sprintf( "size (width x height): %d x %d in pixel \n", x$header.info$sizeX, x$header.info$sizeY )) cat(sprintf( "resolution: %.4e x %.4e \n", x$header.info$incrementX, x$header.info$incrementY )) head.matrix(x$surface.matrix[, 1:n], n = n, ...) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/head_x3p.R
#' Plot x3p object as an image #' #' Plot an interactive surface plot of the x3p matrix. This implementation uses the `rgl` package. #' In case `rgl.useNULL` is set to TRUE (i.e. no separate window will be opened), an rgl widget #' can be used to show the surface in the viewer window (see the example). #' @param x3p x3p object #' @param file file name for saving, if file is NULL the opengl device stays open. #' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg. #' @param col color specification #' @param size vector of width and height. If only one value is given, height or width will be adjusted proportionally to the dimensions of the surface matrix of the scan to reach an upper bound of size. #' @param zoom numeric value indicating the amount of zoom #' @param multiply exaggerate the relief by factor multiply #' @param update Boolean value indicating whether a scene should be updated (defaults to FALSE). If FALSE, a new rgl device is opened. #' @param ... not used #' @export #' @import rgl #' @importFrom rgl snapshot3d r3dDefaults #' @examples #' save <- getOption("rgl.useNULL") #' options(rgl.useNULL=TRUE) #' #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' x3p_image(logo, size = c(741, 419), zoom=0.5) #' # add crosscut: #' logoplus <- x3p_add_hline(logo, yintercept = 50*.645e-6, color = "#e6bf98", size = 5) #' x3p_image(logoplus, size = c(741, 419), zoom=0.5) #' widget <- rgl::rglwidget() #' if (interactive()) #' widget #' #' options(rgl.useNULL=save) x3p_image <- function(x3p, file = NULL, col = "#cd7f32", # crosscut = NA, # ccParam = list( # color = "#e6bf98", # radius = 5 # ), size = 750, zoom = 0.35, multiply = 5, update = FALSE, ...) { stopifnot("x3p" %in% class(x3p), is.numeric(size)) surface <- x3p$surface.matrix z <- multiply * surface # Exaggerate the relief yidx <- ncol(z):1 y <- x3p$header.info$incrementY * yidx # x <- x3p$header.info$incrementX * (1:nrow(z)) # if (all(is.na(size)) | length(size) == 1) { # set size to be proportional to value given if size is only one value if (length(size) == 1 & all(is.na(size))) size <- 750 dims <- dim(surface) ratio <- dims[2]/dims[1] if (ratio < 1) size <- c(size[1], round(size[1]*dims[2]/dims[1])) else size <- c(round(size[1]*dims[1]/dims[2]), size[1]) } params <- rgl::r3dDefaults # params$viewport <- c(0,0, 750, 250) # params$windowRect <- c(40, 125, 40 + size[1], 125 + size[2]) params$userMatrix <- diag(c(1, 1, 1, 1)) params$zoom <- zoom # xyz <- matrix(c(-2000, mean(y), max(z, na.rm=TRUE)), ncol = 3) xyz <- matrix(c( min(y) - diff(range(y)), mean(y), max(z, na.rm = TRUE) ), ncol = 3) if (!update) { open3d(params = params) pop3d("lights") light3d( x = xyz, diffuse = "gray40", specular = "gray40", ambient = "grey10", viewpoint.rel = TRUE ) light3d(diffuse = "gray20", specular = "gray20") } else { # pop3d() } # if (!is.na(crosscut)) { # .Deprecated("x3p_add_hline", msg = "Use of crosscut is deprecated. Use x3p_add_hline instead.") # crosscutidx <- which.min(abs(crosscut - y)) # # colmat <- matrix(rep(col, length(z)), nrow = nrow(z), ncol = ncol(z)) # if (exists("mask", x3p)) colmat <- as.vector(x3p$mask) # # if (length(crosscutidx) > 0) { # coloridx <- pmax(crosscutidx - ccParam$radius, 0):pmin(crosscutidx + ccParam$radius, ncol(z)) # colmat[coloridx] <- ccParam$color # # I changed this to be [coloridx] instead of [, coloridx] because of the # # as.vector() call in the block above this if statement. # } else { # warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y))) # } # # if (crosscut > max(y)) { # warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y))) # } # # # p <- surface3d(x, y, z, color = colmat, back = "fill") # } else { if (exists("mask", x3p)) col <- as.vector(x3p$mask) p <- surface3d(x, y, z, color = col, back = "fill") # } p if (!is.null(file)) { x3p_snapshot(file) close3d() } invisible(p) } #' @export #' @rdname x3p_image image_x3p <- function(x3p, file = NULL, col = "#cd7f32", # crosscut = NA, # ccParam = list( # color = "#e6bf98", # radius = 5 # ), size = c(750, 250), zoom = 0.35, multiply = 5, ...) { x3p_image( x3p = x3p, file = file, col = col, #crosscut = crosscut, ccParam = ccParam, size = size, zoom = zoom, multiply = multiply, ... ) } #' Take a snapshot of the active rgl device and save in a file #' #' Make a snapshot of the current rgl device and save it to file. Options for file formats are png, svg, and stl (for 3d printing). #' @param file file name for saving. #' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg. #' @export x3p_snapshot <- function(file) { if (!is.null(file)) { splits <- strsplit(file, split = "\\.") extension <- splits[[1]][length(splits[[1]])] if (extension == "png") { rgl.snapshot(filename = file, top = TRUE) } if (extension == "svg") { rgl.postscript(filename = file, fmt = "svg") } if (extension == "stl") { writeSTL(con = file) } } } #' Raster image of an x3p surface #' #' `image.x3p` expands the generic image method for x3p objects. #' This image function creates a raster image to show the surface of an x3p file. #' Due to some inconsistency in the mapping of the origin (0,0), (choice between top left or bottom left) image functions from different packages will result in different images. #' @param x an x3p object #' @param ... parameters passed into image #' @importFrom graphics image #' @method image x3p #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' image(logo) image.x3p <- function(x, ...) { graphics::image(x$surface.matrix, ylim = c(1, 0), ...) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/image_x3p.R
#' Interpolate from an x3p object #' #' An interpolated scan is created at specified resolutions `resx`, `resy` in x and y direction. #' The interpolation is based on `na.approx` from the `zoo` package. It is possible to create interpolations at a higher resolution than the one specified in the data itself, but it is not recommended to do so. #' `x3p_interpolate` can also be used as a way to linearly interpolate any missing values in an existing scan without changing the resolution. #' @param x3p x3p object #' @param resx numeric value specifying the new resolution for the x axis. #' @param resy numeric value specifying the new resolution for the y axis. #' @param maxgap integer variable used in `na.approx` to specify the maximum number of NAs to be interpolated, defaults to 1. #' @return interpolated x3p object #' @importFrom zoo na.approx #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' # resolution: #' logo$header.info$incrementX #' # change resolution to 1 micron = 1e-6 meters #' logo2 <- x3p_interpolate(logo, resx = 1e-6) #' logo2$header.info$incrementX x3p_interpolate <- function(x3p, resx = 1e-6, resy = resx, maxgap = 1) { stopifnot("x3p" %in% class(x3p)) if ((resx < x3p$header.info$incrementX) | (resy < x3p$header.info$incrementY)) { warning("New resolution is higher than the old. proceed with caution.\n") } if (!is.null(x3p$mask)) { warning("Mask will be deleted during interpolation. Use x3p_sample to preserve mask.") x3p <- x3p_delete_mask(x3p) } sizes <- dim(x3p$surface.matrix) newsize <- round(sizes * c(x3p$header.info$incrementX / resx, x3p$header.info$incrementY / resy)) newsize <- pmax(newsize, c(1,1)) seqx <- seq.int(from = 1, to = sizes[1]) newseq <- seq.int(from = 1, to = newsize[1]) * resx / x3p$header.info$incrementX matrix1 <- apply(x3p$surface.matrix, 2, na.approx, x = seqx, xout = newseq, na.rm = FALSE, maxgap = maxgap ) seqy <- seq.int(from = 1, to = sizes[2]) newseq <- seq.int(from = 1, to = newsize[2]) * resy / x3p$header.info$incrementY matrix2 <- t(apply(matrix1, 1, na.approx, x = seqy, xout = newseq, na.rm = FALSE, maxgap = maxgap )) if (!all(dim(matrix2) == newsize)) { warning(sprintf("Setting matrix dimensions to %d x %d", newsize[1], newsize[2])) dim(matrix2) <- newsize } x3p$surface.matrix <- matrix2 x3p$header.info$sizeX <- nrow(x3p$surface.matrix) x3p$header.info$sizeY <- ncol(x3p$surface.matrix) x3p$header.info$incrementX <- resx x3p$header.info$incrementY <- resy x3p$matrix.info$MatrixDimension$SizeX[[1]] <- x3p$header.info$sizeX x3p$matrix.info$MatrixDimension$SizeY[[1]] <- x3p$header.info$sizeY x3p } #' @rdname x3p_interpolate #' @export interpolate_x3p <- function(x3p, resx = 1e-6, resy = resx, maxgap = 1) { x3p_interpolate(x3p = x3p, resx = resx, resy = resy, maxgap = 1) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/interpolate_x3p.R
#' Add/Exchange a mask for an x3p object #' #' Create a mask for an x3p object in case it does not have a mask yet. #' Masks are used for overlaying colors on the bullets surface. #' @param x3p x3p object #' @param mask raster matrix of colors with the same dimensions as the x3p surface. If NULL, an object of the right size will be created. #' @return x3p object with added/changed mask #' @export #' @importFrom grDevices as.raster #' @examples #' x3p <- x3p_read(system.file("sample-land.x3p", package="x3ptools")) #' # x3p file has mask consisting color raster image: #' x3p$mask[1:5,1:5] #' \dontrun{ #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' color_logo <- png::readPNG(system.file("csafe-color.png", package="x3ptools")) #' logoplus <- x3p_add_mask(logo, as.raster(color_logo)) #' x3p_image(logoplus, multiply=50, size = c(741, 419),zoom = 0.5) #' } x3p_add_mask <- function(x3p, mask = NULL) { stopifnot("x3p" %in% class(x3p)) # This is necessary so that mask information can be added # HH: I'm not sure how this could happen. Both df_to_x3p and x3p_read create the matrix if (!"matrix.info" %in% names(x3p)) { x3p$matrix.info <- list(MatrixDimension = list( SizeX = dim(x3p$surface.matrix)[1], SizeY = dim(x3p$surface.matrix)[2], SizeZ = 1 )) } dims <- rev(dim(x3p$surface.matrix)) if (is.null(mask)) { if (!"Mask" %in% names(x3p$matrix.info)) { x3p$matrix.info$Mask <- list( Background = if (length(unique(mask)) == 1) { list(unique(mask)) } else { list("#cd7f32") }, Annotations = list() ) } mask <- as.raster(matrix("#cd7f32", dims[1], dims[2])) } else { mask <- as.raster(mask) # Fix matrix/array rasters # check that the mask has the right dimensions if (!all(dim(mask) == dims)) { dm <- dim(mask) # SVP: This warning doesn't handle extra matrix dimensions :) warning(sprintf("Mask does not have the right dimensions. Mask has dimensions %d x %d should be %d x %d.", dm[1], dm[2], dims[1], dims[2])) } } x3p$mask <- mask x3p } #' Delete mask from an x3p object #' #' Deletes mask and its annotations from an x3p file. #' @param x3p x3p object #' @return x3p object without the mask #' @export x3p_delete_mask <- function(x3p) { x3p$mask <- NULL # delete mask # also delete annotations from XML file if ("Mask" %in% names(x3p$matrix.info)) { x3p$matrix.info$Mask <- NULL } x3p } #' Add annotations to an x3p object #' #' Annotations in an x3p object are legend entries for each color of a mask. #' @param x3p x3p object #' @param color name or hex value of color #' @param annotation character value describing the region #' @return x3p object with the added annotations #' @export #' @examples #' \dontrun{ #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' color_logo <- png::readPNG(system.file("csafe-color.png", package="x3ptools")) #' logoplus <- x3p_add_mask(logo, as.raster(color_logo)) #' x3p_image(logoplus, multiply=50, size = c(741, 419),zoom = 0.5) #' logoplus <- x3p_add_annotation(logoplus, "#FFFFFFFF", "background") #' logoplus <- x3p_add_annotation(logoplus, "#818285FF", "text") #' logoplus <- x3p_add_annotation(logoplus, "#F6BD47FF", "fingerprint") #' logoplus <- x3p_add_annotation(logoplus, "#D2202FFF", "fingerprint") #' logoplus <- x3p_add_annotation(logoplus, "#92278FFF", "fingerprint") #' #' x3p_add_legend(logoplus) #' } x3p_add_annotation <- function(x3p, color, annotation) { if (!("Mask" %in% names(x3p$matrix.info))) { x3p$matrix.info$Mask <- list("Annotations") } len <- length(x3p$matrix.info$Mask$Annotations) x3p$matrix.info$Mask$Annotations[[len + 1]] <- list() x3p$matrix.info$Mask$Annotations[[len + 1]][[1]] <- annotation attr(x3p$matrix.info$Mask$Annotations[[len + 1]], "color") <- color x3p }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/mask_x3p.R
#' Modify xml elements meta information in x3p object #' #' Identify xml fields in the meta file of an x3p object by name and modify content if uniquely described. #' @param x3p x3p object #' @param element character or integer. In case of character, name of xml field in the meta file. Note that element can contain regular expressions, e.g. `"*"` returns all meta fields. #' In case of integer, element is used as an index for the meta fields. #' @param value character. Value to be given to the xml field in the meta file. #' @return x3p object with changed meta information #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' x3p_show_xml(logo, "creator") #' x3p_modify_xml(logo, "creator", "I did that") #' x3p_show_xml(logo, 20) #' x3p_modify_xml(logo, 20, "I did that, too") x3p_modify_xml <- function(x3p, element, value) { stopifnot(inherits(x3p, "x3p"), length(value)==1, length(element)==1) find_element <- helper_identify_xml(x3p, element) # idx <- find_element[[1]] res <- find_element[[2]] if (length(res) == 0) stop(sprintf("No element found matching <%s>", element)) if (length(res) > 1) stop(sprintf("More than one element matching <%s> found: %s", element, paste(names(res), collapse = ", "))) # we found exactly one matching meta element record <- find_element[[3]] obj_str <- sprintf("x3p$%s$%s[[1]] <- value", record, gsub("\\.", "$", names(res))) eval(parse(text = obj_str)) x3p } helper_identify_xml <- function(x3p, element) { # there are four records to look for names rec1 <- x3p$header.info rec2 <- unlist(x3p$feature.info) rec3 <- unlist(x3p$general.info) rec4 <- unlist(x3p$matrix.info) other <- unlist(x3p$other.info) allrecords <- c(rec1, rec2, rec3, rec4, other) n <- c(length(rec1), length(rec2), length(rec3), length(rec4), length(other)) idx <- NULL if (is.character(element)) { idx <- grep(tolower(element), tolower(names(allrecords))) } if (is.numeric(element)) { idx <- element[element > 0 & element < length(allrecords)] } if (length(idx) == 1) { firstneg <- which(idx - cumsum(n) <= 0) record <- c("header.info", "feature.info", "general.info", "matrix.info", "other.info")[firstneg[1]] firstidx <- (idx - cumsum(c(0, n)))[firstneg[1]] } res <- lapply(idx, function(k) { firstneg <- which(k - cumsum(n) <= 0) record <- c("header.info", "feature.info", "general.info", "matrix.info", "other.info")[firstneg[1]] firstidx <- (k - cumsum(c(0, n)))[firstneg[1]] list(firstidx, allrecords[k], record) }) firstidx <- sapply(res, function(x) x[[1]]) allrecords <- sapply(res, function(x) x[[2]]) record <- sapply(res, function(x) x[[3]]) list(firstidx, allrecords, record) } #' Show xml elements from meta information in x3p object #' #' Identify xml fields by name and show content. #' @param x3p x3p object #' @param element character or integer (vector). In case of character, name of xml field in the meta file. Note that element can contain regular expressions, e.g. `"*"` returns all meta fields. #' In case of integer, element is used as an index vector for the meta fields. #' @return list of exact field names and their contents #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' x3p_show_xml(logo, "creator") # all fields containing the word "creator" #' x3p_show_xml(logo, "axis") #' x3p_show_xml(logo, "CZ.AxisType") #' # show all fields: #' x3p_show_xml(logo, "*") #' # show first five fields #' x3p_show_xml(logo, 1:5) x3p_show_xml <- function(x3p, element) { res <- helper_identify_xml(x3p, element) if (length(res[[2]]) == 0) warning(sprintf("no fields containing \"%s\" found. Try `element=\"*\" to see all fields.", as.character(element))) res[[2]] }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/modify_xml.R
#' Show meta information of an x3p file #' #' `print.x3p` expands the generic print method for x3p objects. It gives a summary of the most relevant x3p meta information and returns the object invisibly. #' @param x x3p object #' @param ... ignored #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' print(logo) #' @method print x3p print.x3p <- function(x, ...) { cat("x3p object\n") cat(sprintf("Instrument: %s \n", x$general.info$Instrument$Manufacturer[[1]])) cat(sprintf( "size (width x height): %d x %d in pixel \n", x$header.info$sizeX, x$header.info$sizeY )) cat(sprintf( "resolution: %.4e x %.4e \n", x$header.info$incrementX, x$header.info$incrementY )) cat(sprintf("Creator: %s \n", x$general.info$Creator[[1]])) cat(sprintf("Comment: %s \n", x$general.info$Comment[[1]])) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/print_x3p.R
flatten <- function(list) { unlist(list, recursive = FALSE, use.names=TRUE) } #' Read an x3p file into an x3p object #' #' Read file in x3p format. x3p formats describe 3d topological surface according to #' ISO standard ISO5436 – 2000. #' x3p files are a container format implemented as a zip archive of a folder #' consisting of an xml file of meta #' information and a binary matrix of numeric surface measurements. #' @param file The file path to the x3p file, or an url to an x3p file #' @param quiet for url downloads, show download progress? #' @param size size in bytes to use for reading the binary file. If not specified, default is used. Will be overwritten if specified in the xml meta file. #' @param tmpdir temporary directory to use to extract the x3p file (default NULL uses tempdir() to set a directory). #' @return x3p object consisting of a list of the surface matrix and the four records as specified in the ISO standard #' @export #' @import xml2 #' @importFrom utils unzip download.file #' #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) x3p_read <- function(file, size = NA, quiet = T, tmpdir = NULL) { if (grepl("http|www", file)) { fname <- tempfile(fileext = ".x3p") download.file(file, destfile = fname, quiet = quiet, mode = "wb") on.exit(file.remove(fname)) } else { fname <- file } if (!file.exists(fname)) stop(sprintf("File %s not found.\n", fname)) ## Create a temp directory to unzip x3p file if (!is.null(tmpdir)) { mydir <- tmpdir } else { mydir <- tempdir() } try_unzip <- try({result <- unzip(fname, exdir = mydir)}, silent=TRUE) if (length(result) == 0) stop(sprintf("File %s is not an x3p file", fname)) # unzipping didn't work ## see what we got: data <- grep("data.bin$", result) # data has extension .bin meta <- grep(".xml$", result) # meta info has extension .xml mask <- grep(".png$", result, value = TRUE) # mask has extension .png # for CSAFE # browser() cadre <- FALSE if (length(mask)==0) { mask <- grep("mask.bin$", result, value = TRUE) # mask has extension .png # for Cadre if (length(mask) > 0) cadre <- TRUE } # if we have not exactly one of each we have a problem: stopifnot(length(data) == 1) # nice error messages would be good ## Should contain data.bin and valid.bin bullet_data_dir <- file.path(mydir, "bindata", dir(file.path(mydir, "bindata"))) bullet_data <- result[data] ## Get the meta information bullet_info <- lapply(result[meta], read_xml) bullet_children <- lapply(bullet_info, xml_children) bullet_childinfo <- lapply(bullet_children, xml_children) ## Convert to a list bullet_info_list <- lapply(bullet_childinfo, as_list) bullet_info_list <- flatten(bullet_info_list) bullet_info_unlist <- flatten(bullet_info_list) ## Read the data matrix sizes <- as.numeric(c(bullet_info_unlist$SizeX[[1]], bullet_info_unlist$SizeY[[1]], bullet_info_unlist$SizeZ[[1]])) increments <- as.numeric( c( bullet_info_unlist$CX$Increment[[1]], bullet_info_unlist$CY$Increment[[1]], ifelse(length(bullet_info_unlist$CZ$Increment) == 0, 1, bullet_info_unlist$CZ$Increment[[1]]) ) ) # use a default of 1 in case the Z increment is not included size2 <- NA if (bullet_info_unlist$CZ$DataType[[1]] == "F") size2 <- 4 if (bullet_info_unlist$CZ$DataType[[1]] == "D") size2 <- 8 if (!is.na(size2) & !(is.na(size))) { if (size != size2) warning(sprintf("Number of bytes specified (%d bytes) in x3p file different from requested (%d bytes)", size2, size)) } if (is.na(size)) size <- size2 # only use xml when size is not specified datamat <- matrix(readBin(bullet_data, what = numeric(), size = size, n = prod(sizes[1:2]) ), nrow = sizes[1], ncol = sizes[2] ) ## Store some metadata bullet_metadata <- list( sizeY = sizes[2], sizeX = sizes[1], incrementY = increments[2], incrementX = increments[1] ) input.info <- flatten(lapply(bullet_info, as_list)) if (!("Record1" %in% names(input.info))) { names(input.info) <- NULL input.info <- flatten(input.info) } # Let's make sure we have Records 1, 2, 3, and 4 record1 <- input.info$Record1 record2 <- input.info$Record2 record3 <- input.info$Record3 if (any(is.null(record1), is.null(record2), is.null(record3))) { warning("One of the crucial record files is missing, double check that the x3p is valid. Found Records named <", paste0(names(input.info), collapse = ","),">") } # is there missing info in general.info? any_empty_info <- sapply(record2, function(x) !length(x)) if (any(any_empty_info)) { idx <- which(any_empty_info) record2[idx] <- lapply(record2[idx], function(x) { x <- list("") }) } # is there any other information? other <- setdiff(names(input.info), c("Record1", "Record2", "Record3", "Record4")) record_other <- NULL if (length(other) > 0) { record_other <- input.info[other] } res <- list( header.info = bullet_metadata, surface.matrix = datamat, feature.info = record1, general.info = record2, matrix.info = record3, other.info = record_other ) # bullet_info = bullet_info) # browser() class(res) <- "x3p" if (length(mask) > 0) { # png <- magick::image_read(mask) png <- png::readPNG(mask, native = FALSE) if (cadre) { nc <- ncol(png) png <- png[,nc:1] } raster <- as.raster(png) if (!(is.na(dim(png)[3]))) { if (dim(png)[3] == 4) { # bit of a workaround - not sure why #rrggbb00 is not recognized as transparent automatically raster[png[, , 4] == 0] <- "transparent" } } # browser() res <- x3p_add_mask(res, mask = raster) } return(res) } #' @rdname x3p_read #' @export read_x3p <- x3p_read
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/read_x3p.R
#' Sample from an x3p object #' #' @param x3p x3p object #' @param m integer value - every mth value is included in the sample #' @param mY integer value - every mth value is included in the sample in x direction and every mYth value is included in y direction #' @param offset integer value between 0 and m-1 to specify offset of the sample #' @param offsetY integer value between 0 and mY-1 to specify different offsets for x and y direction #' @return down-sampled x3p object #' @export #' @examples #' logo <- x3p_read(system.file("csafe-logo.x3p", package="x3ptools")) #' dim(logo$surface.matrix) #' # down-sample to one-fourth of the image: #' logo4 <- x3p_sample(logo, m=4) #' dim(logo4$surface.matrix) #' \dontrun{ #' x3p_image(logo) #' x3p_image(logo4) #' } x3p_sample <- function(x3p, m = 2, mY = m, offset = 0, offsetY = offset) { stopifnot("x3p" %in% class(x3p)) sizes <- dim(x3p$surface.matrix) seqx <- seq.int(from = 1 + offset, to = sizes[1], by = m) seqy <- seq.int(from = 1 + offsetY, to = sizes[2], by = mY) x3p$surface.matrix <- x3p$surface.matrix[seqx, seqy] if (!is.null(x3p$mask)) { x3p$mask <- x3p$mask[seqy, seqx] } x3p$header.info$sizeX <- length(seqx) x3p$header.info$sizeY <- length(seqy) x3p$header.info$incrementX <- m * x3p$header.info$incrementX x3p$header.info$incrementY <- mY * x3p$header.info$incrementY x3p$matrix.info$MatrixDimension$SizeX[[1]] <- x3p$header.info$sizeX x3p$matrix.info$MatrixDimension$SizeY[[1]] <- x3p$header.info$sizeY x3p } #' @rdname x3p_sample #' @export sample_x3p <- function(x3p, m = 2, mY = m, offset = 0, offsetY = offset) { x3p_sample(x3p = x3p, m = m, mY = mY, offset = offset, offsetY = offsetY) }
/scratch/gouwar.j/cran-all/cranData/x3ptools/R/sample_x3p.R