content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Save coverage statistics to multi-worksheet Excel file. #' #' @param project.directory Path to project directory #' @param file.name Name of output file #' @param overwrite Logical indicating whether to overwrite existing file if it exists. #' #' @return None #' #' save.coverage.excel <- function(project.directory, file.name, overwrite = TRUE) { ### INPUT TESTS ########################################################### assertthat::assert_that( is.character(project.directory) ); assertthat::assert_that( 1 == length(project.directory) ); assertthat::assert_that( dir.exists(project.directory) ); ### MAIN ################################################################## # get data coverage.sample <- get.coverage.by.sample.statistics(project.directory); coverage.amplicon <- get.coverage.by.amplicon(project.directory); # define header style for bolding header.style <- openxlsx::createStyle(textDecoration = 'Bold'); workbook <- openxlsx::createWorkbook(); # add coverage by sample sheet.name <- 'Coverage by sample'; openxlsx::addWorksheet(workbook, sheetName = sheet.name); openxlsx::setColWidths( workbook, sheet = sheet.name, cols = 1:ncol(coverage.sample), widths = pmax(10, nchar(names(coverage.sample)) + 1) ); openxlsx::writeData( workbook, sheet = sheet.name, coverage.sample, headerStyle = header.style ); # add coverage by amplicon sheet.name <- 'Coverage by amplicon'; openxlsx::addWorksheet(workbook, sheetName = sheet.name); openxlsx::setColWidths( workbook, sheet = sheet.name, cols = 1:ncol(coverage.amplicon), widths = pmax(10, nchar(names(coverage.amplicon)) + 1) ); openxlsx::writeData( workbook, sheet = sheet.name, coverage.amplicon, headerStyle = header.style ); ### SAVE TO FILE openxlsx::saveWorkbook( workbook, file.name, overwrite = overwrite ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/save.coverage.excel.R
#' Save variants to Excel. #' #' @description #' Makes an Excel workbook with variant calls. If filters are provided, these will #' be saved to an additional worksheet within the same file. #' #' @param variants #' Data frame containing variants #' @param file.name #' Name of output file #' @param filters #' Optional list of filters to be saved #' @param overwrite #' Logical indicating whether to overwrite exiting file if it exists. Defaults to TRUE for consistency with other R functions. #' #' save.variants.excel <- function( variants, file.name, filters = NULL, overwrite = TRUE ) { ### INPUT TESTS ########################################################### assertthat::assert_that( is.data.frame(variants) ); ### MAIN ################################################################## # define header style for bolding header.style <- openxlsx::createStyle(textDecoration = 'Bold'); workbook <- openxlsx::createWorkbook(); ## ADD VARIANTS sheet.name <- 'Variants'; openxlsx::addWorksheet(workbook, sheetName = sheet.name); openxlsx::setColWidths( workbook, sheet = sheet.name, cols = 1:ncol(variants), widths = pmax(10, nchar(names(variants)) + 1) ); openxlsx::writeData( workbook, sheet = sheet.name, variants, headerStyle = header.style ); ## ADD FILTERS (IF REQUESTED) if( !is.null(filters) ) { sheet.name <- 'Filters'; # force conversion to character vector filters$DUMMY <- 'DUMMY'; filter.vector <- unlist(filters); filter.vector <- filter.vector[ 'DUMMY' != filter.vector ]; filter.data <- data.frame( 'Filter' = names(filter.vector), 'Value' = filter.vector, stringsAsFactors = FALSE ); # add to workbook sheet.name <- 'Filters'; openxlsx::addWorksheet(workbook, sheetName = sheet.name); openxlsx::setColWidths( workbook, sheet = sheet.name, cols = 1:2, widths = c(max(nchar(filter.data$Filter)), 10) ); openxlsx::writeData( workbook, sheet = sheet.name, filter.data, headerStyle = header.style ); } ### SAVE TO FILE openxlsx::saveWorkbook( workbook, file.name, overwrite = overwrite ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/save.variants.excel.R
#' Set options for varitas pipeline. #' #' @description #' Set or overwrite options for the VariTAS pipeline. Nested options should be separated by a dot. #' For example, to update the reference genome for grch38, use reference_genome.grch38 #' #' @param \dots options to set #' @return None #' #' @examples #' \dontrun{ #' set.varitas.options(reference_build = 'grch38'); #' set.varitas.options( #' filters.mutect.min_normal_depth = 10, #' filters.vardict.min_normal_depth = 10 #' ); #' } #' @export set.varitas.options <- function(...) { # TO DO: # - implement this 'options'-style, i.e. allow for arguments passed directly updated.options <- get.varitas.options(); options.to.add <- list(...); # currently don't support default filters - not sure how I could implement those if( any( grepl('^filters.default', names(options.to.add) )) ) { error.message <- paste( 'Currently cannot set default filters with set.varitas.options.', 'Please use overwrite.varitas.options instead' ); stop( error.message ); } # if one of the settings is mode, update that first so other settings will take precedence if( 'mode' %in% names(options.to.add) ) { option.value <- tolower( options.to.add[[ 'mode' ]] ); # only ctDNA and tumour supported if( !( option.value %in% c('ctdna', 'tumour') ) ) { stop('mode must be either ctDNA or tumour'); } # warn user that settings are being overwritten warning.message <- paste( 'Setting mode to', option.value, '- overwriting any previous filters' ); warning( warning.message ); # read mode defaults from file mode.default.file <- system.file( paste0(option.value, '_defaults.yaml'), package = get.varitas.options('pkgname') ); mode.defaults <- yaml::yaml.load_file( mode.default.file ); # update settings for each for(setting.name in names(mode.defaults) ) { updated.options <- add.option( name = setting.name, value = mode.defaults[[ setting.name ]], old.options = updated.options ); } options.to.add <- options.to.add[ 'mode' != names(options.to.add) ]; } for( i in seq_along(options.to.add) ) { option.name <- names(options.to.add)[i]; option.value <- options.to.add[[i]]; updated.options <- add.option( name = option.name, value = option.value, old.options = updated.options ); } options(varitas = updated.options); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/set.varitas.options.R
# Set config.yaml as VariTAS options .onLoad <- function(libname, pkgname) { # add package name to enable loading it later # can't use add.option for this since we haven't added any varitas options yet! options(varitas = list(pkgname = pkgname)); # set options according to default config file config.file <- system.file('config.yaml', package = pkgname); overwrite.varitas.options(config.file); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/setup.R
#' Make barplot of trinucleotide substitutions #' #' @param variants Data frame with variants #' @param file.name Name of output file #' #' @return None #' #' #' #' @importFrom magrittr "%>%" #' #' trinucleotide.barplot <- function(variants, file.name) { ### INPUT TESTS ########################################################### if( !all(c('sample.id', 'REF', 'ALT') %in% names(variants)) ) { stop('Variant data frame missing required columns'); } ### MAIN ################################################################## variants$substitution <- get.base.substitution(ref = variants$REF, alt = variants$ALT); counts <- variants %>% dplyr::group_by(substitution) %>% dplyr::summarise(n = n()); barplot.data <- as.matrix( t( stats::xtabs(n ~ ., counts )) ); if( !is.null(file.name) ) { grDevices::png( file.name, width = 7, height = 5, units = 'in', res = 400 ); } graphics::par( mar = c(4, 4, 0.5, 0.5), cex.axis = 0.8, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylab = 'Variants detected' ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/trinucleotide.barplot.R
#' Make barplot of variants per caller #' #' @param variants Data frame with variants #' @param file.name Name of output file #' #' @return None #' #' #' #' @importFrom magrittr "%>%" #' #' variant.recurrence.barplot <- function(variants, file.name) { ### INPUT TESTS ########################################################### if( !is.data.frame(variants) ) { stop('variants must be a data frame'); } if( !all(c('CHROM', 'REF', 'ALT', 'caller') %in% names(variants))) { stop('variants must contain the columns CHROM, REF, ALT, and caller'); } ### MAIN ################################################################## # add a label for each variant variants$position.id <- paste0(variants$CHROM, ':', variants$POS, ' | ', variants$REF, '->', variants$ALT); # prepare data for barplot position.counts <- variants %>% dplyr::group_by(position.id, caller) %>% dplyr::summarise(n = n()) %>% dplyr::ungroup() %>% tidyr::complete(position.id, caller, fill = list(n = 0)); counts.wide <- position.counts %>% tidyr::spread(position.id, n); counts.matrix <- as.matrix(counts.wide[, 'caller' != names(counts.wide) ]); rownames(counts.matrix) <- counts.wide$caller; if (nrow(counts.matrix) > 1) { # Should only be ordered if there is more than one row counts.matrix <- counts.matrix[, order(colSums(counts.matrix), decreasing = TRUE) ]; } # colour scheme # use London underground colours colour.scheme <- c( 'Central' = '#E32017', 'Circle' = '#FFD300', 'Hammersmith and City' = '#F3A9BB', 'Jubilee' = '#A0A5A9', 'Waterloo and City' = '#95CDBA', 'Metropolitan' = '#9B0056', 'Northern' = '#000000', 'Piccadilly' = '#003688', 'DLR' = '#00A4A7', 'Overground' = '#EE7C0E', 'Victoria' = '#0098D4', 'Tramlink' = '#84B817', 'Cable Car' = '#E21836', 'Crossrail' = '#7156A5', 'District' = '#00782A', 'Bakerloo' = '#B36305' ); if( !is.null(file.name) ) { grDevices::png( file.name, width = 8, height = 5, units = 'in', res = 300 ); } graphics::par( mar = c(8, 4, 0.5, 8), cex.axis = 0.7, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( counts.matrix[, 1:min(25, ncol(counts.matrix)) ], ylab = 'Samples with variant', col = colour.scheme[ 1:nrow(counts.matrix) ], legend.text = rownames(counts.matrix), args.legend = list( x = graphics::par('usr')[2], y = graphics::par('usr')[4]/2, xjust = 0, yjust = 0.5, xpd = TRUE, bty = 'n' ) ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/variant.recurrence.barplot.R
#' Make barplot of variants per caller #' #' @param variants #' Data frame with variants #' @param file.name #' Name of output file #' @param group.by #' Optional grouping variable for barplot #' #' @return None #' #' #' #' #' @importFrom magrittr "%>%" #' #' variants.caller.barplot <- function( variants, file.name, group.by = NULL ) { ### INPUT TESTS ########################################################### if( !all(c('sample.id', 'REF', 'ALT') %in% names(variants)) ) { stop('Variant data frame missing required columns'); } ### MAIN ################################################################## # colour scheme from COSMIC trinucleotide.colours <- c( 'C>A' = '#5CBDEB', 'C>G' = '#050708', 'C>T' = '#D23C32', 'T>A' = '#CBCACB', 'T>C' = '#ABCD72', 'T>G' = '#E7C9C6' ); if( !is.null(group.by) && 'substitution' == group.by && !('substitution' %in% names(variants)) ) { variants$substitution <- get.base.substitution(ref = variants$REF, alt = variants$ALT); } else if( !is.null(group.by) && 'type' == group.by && !('type' %in% names(variants))) { variants$type <- classify.variant(ref = variants$REF, alt = variants$ALT); } split.variants <- split.on.column( variants, column = 'caller', split.character = ':' ); # capitalized properly split.variants$caller <- capitalize.caller(split.variants$caller); grouping.variables <- 'caller'; if( !is.null(group.by) ) { grouping.variables <- c(grouping.variables, group.by); } counts <- split.variants %>% dplyr::group_by_at(grouping.variables) %>% dplyr::summarise(n = n()); barplot.data <- as.matrix( t(stats::xtabs(n ~ ., counts)) ); colour.scheme <- c("#0039A6", "#FF6319", "#6CBE45", "#996633", "#A7A9AC", "#FCCC0A"); if( !is.null(group.by) && 'substitution' == group.by ) { colour.scheme <- trinucleotide.colours[ rownames(barplot.data) ]; } if( !is.null(file.name) ) { grDevices::png( file.name, width = 6, height = 5, units = 'in', res = 300 ); } graphics::par( mar = c(4.5, 4, 0.5, 5), cex.axis = 0.9, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylab = 'Variants detected', col = colour.scheme, legend.text = rownames(barplot.data), args.legend = list( x = graphics::par('usr')[2], y = graphics::par('usr')[4]/2, xjust = 0, yjust = 0.5, xpd = TRUE, bty = 'n' ) ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/variants.caller.barplot.R
#' Make barplot of variants per sample #' #' @param variants Data frame with variants #' @param file.name Name of output file #' #' @return None #' #' #' #' @importFrom magrittr "%>%" #' #' variants.sample.barplot <- function( variants, file.name ) { ### INPUT TESTS ########################################################### if( !all(c('sample.id', 'Type') %in% names(variants)) ) { stop('Variant data frame missing required columns'); } ### MAIN ################################################################## variants$Type <- factor( variants$Type, levels = c('SNV', 'MNV', 'indel') ); counts <- variants %>% dplyr::group_by(sample.id, Type) %>% dplyr::summarise(n = n()) %>% dplyr::ungroup() %>% tidyr::complete(sample.id, Type, fill = list(n = 0)); counts.wide <- counts %>% tidyr::spread(sample.id, n); barplot.data <- as.matrix( counts.wide[, 'Type' != names(counts.wide)] ); rownames(barplot.data) <- counts.wide$Type; totals <- apply(barplot.data, 2, sum) max.value <- max(totals) # try to select a decent value for the axis font size cex.axis <- 1.2; if( length( unique(counts$sample.id) ) > 30) cex.axis <- 0.6; print(barplot.data); if( !is.null(file.name) ) { grDevices::png( file.name, width = 9, height = 5, units = 'in', res = 300 ); } graphics::par( mar = c(4.5, 2, 0.5, 0.5), cex.axis = 0.7, font.axis = 1, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylim = c(0, max.value * 1.15), ylab = 'Variants detected', col = c('#E32017', '#FFD300', '#F3A9BB'), legend.text = rownames(barplot.data), args.legend = list( x = 'top', #x = graphics::par('usr')[2], #y = graphics::par('usr')[4]/2, ncol = 3, xjust = 0, yjust = 0.5, xpd = TRUE, bty = 'n' ) ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/variants.sample.barplot.R
#' varitas: VariTAS #' NULL if(getRversion() >= "2.15.1") { utils::globalVariables( c( 'sample.id', 'Type', 'n', 'substitution', '.', 'position.id', 'caller', 'gene' ) ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/varitas-pkg.R
#' Check that sample specification data frame matches expected format, and that all files exist #' #' @param bam.specification Data frame containing columns sample.id and tumour.bam, and optionally a column normal.bam. #' #' @return None #' #' verify.bam.specification <- function(bam.specification) { ## check type if( !is.data.frame(bam.specification) ) { stop('bam.specification is not a data frame.'); } ## check data frame dimensions – expect either 2 or 3 if(ncol(bam.specification) < 2) { stop('bam.specification has fewer than 2 columns.'); } if(ncol(bam.specification) > 4) { stop('bam.specification has more than 4 columns.'); } ## check column headers for(label in c('sample.id', 'tumour.bam')) { if( !(label %in% names(bam.specification)) ) { error.message <- paste('No column named', label, 'in bam.specification'); stop(error.message); } } # can't check that files exist.. since they might be created later # check for white space in sample IDs if( any( grepl('\\s', bam.specification$sample.id) ) ) { stop('Sample IDs can not contain whitespace.'); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.bam.specification.R
#' verify.bwa.index #' #' @inheritParams verify.fasta.index #' #' @description #' Verify that bwa index files exist for a fasta file #' #' @return index.files.exist Logical indicating if bwa index files were found (only returned if error set to FALSE) #' #' verify.bwa.index <- function(fasta.file, error = FALSE) { bwa.index.files <- paste0(fasta.file, '.', c('amb', 'ann', 'bwt', 'pac', 'sa')); index.files.exist <- all( file.exists(bwa.index.files) ); if( error ) { if(!index.files.exist) { error.message <- paste('Index files not found for reference genome file', fasta.file, '- try running bwa index.'); stop(error.message); } } else { return(index.files.exist); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.bwa.index.R
#' verify.fasta.index #' #' @description #' Verify that fasta index files exist for a given fasta file. #' #' @param fasta.file Fasta file to check #' @param error Logical indicating whether to throw an (informative) error if verification fails #' #' @return faidx.exists Logical indicating if fasta index files were found (only returned if error set to FALSE) #' #' verify.fasta.index <- function(fasta.file, error = FALSE) { faidx.file <- paste0(fasta.file, '.fai'); faidx.exists <- file.exists(faidx.file); if( error ) { assertthat::assert_that( file.exists(faidx.file), msg = paste('Fasta index file not found for file', fasta.file, '\nTry running samtools faidx.') ); } else { return(faidx.exists); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.fasta.index.R
#' Check that FASTQ specification data frame matches expected format, and that all files exist #' #' @param fastq.specification Data frame containing columns sample.id and reads, and optionally a column mates #' @param paired.end Logical indicating whether paired end reads are used #' @param files.ready Logical indicating if the files already exist on disk. If there are job dependencies, this should be set to FALSE. #' #' @return None #' #' verify.fastq.specification <- function( fastq.specification, paired.end = FALSE, files.ready = FALSE ) { ## check type if( !is.data.frame(fastq.specification) ) { stop('fastq.specification is not a data frame.'); } ## check data frame dimensions – expect either 2 or 3 if(ncol(fastq.specification) < 2) { stop('fastq.specification has fewer than 2 columns.'); } ## check column headers for(label in c('sample.id', 'reads')) { if( !(label %in% names(fastq.specification)) ) { error.message <- paste('No column named', label, 'in fastq.specification'); stop(error.message); } } if( paired.end && !( 'mates' %in% names(fastq.specification) ) ) { stop('No column named mates in fastq.specification'); } # if files are supposed to be ready on disk, # check that files exist if( files.ready ) { read.files <- fastq.specification$reads; if(paired.end) { read.files <- c(read.files, fastq.specification$mates); } reads.exist <- file.exists(read.files); if( !all(reads.exist) ) { error.message <- paste0( 'The following FASTQ files do not exist:\n', paste(read.files[!reads.exist], collapse = '\n') ); stop(error.message); } # check for white space in sample IDs if( any(grepl('\\s', fastq.specification$sample.id)) ) { stop('Sample IDs can not contain whitespace.'); } } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.fastq.specification.R
#' verify.sequence.dictionary #' #' @inheritParams verify.fasta.index #' #' @description #' Verify that sequence dictionary exists for a fasta file. #' #' @return dict.exists Logical indicating if sequence dictionary files were found (only returned if error set to FALSE) #' #' verify.sequence.dictionary <- function(fasta.file, error = FALSE) { fasta.file.extension <- tools::file_ext(fasta.file); dict.file <- gsub( pattern = paste0('.', fasta.file.extension, '$'), replacement = '.dict', fasta.file ); dict.exists <- file.exists(dict.file); if(error) { if( !dict.exists ) { error.message <- paste('Sequence dictionary not found for file', fasta.file, '- try running GATK CreateSequenceDictionary.'); stop(error.message); } } else { return(dict.exists); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.sequence.dictionary.R
#' Check against common errors in the VariTAS options. #' #' @description #' Check against common errors in the VariTAS options before launching into pipeline #' #' @param stages.to.run #' Vector indicating which stages should be run. Defaults to all possible stages. #' If only running a subset of stages, only checks corresponding to the desired stages are run #' @param variant.callers #' Vector indicating which variant callers to run. Only used if calling is in \code{stages.to.run}. #' @param varitas.options #' Optional file path or list of VariTAS options. #' #' @return None #' #' verify.varitas.options <- function( stages.to.run = c('alignment', 'qc', 'calling', 'annotation', 'merging'), variant.callers = c('mutect', 'vardict', 'ides', 'varscan', 'lofreq', 'muse'), varitas.options = NULL ) { ## TO DO: # - ordering of reference genome and VCF files for MuTect # - chr-compatability of target panel and reference genome # - check if QC needs any specific settings stages.to.run <- tolower(stages.to.run); ### INPUT TESTS ########################################################### # NOTE: # - Merging currently does not require any specific config settings, so no checks are run. For ease-of-use, the stage is still included as an option. supported.stages <- c('alignment', 'qc', 'calling', 'annotation', 'merging'); if( !all(stages.to.run %in% supported.stages) ) { unrecognized.stages <- stages.to.run[ !(stages.to.run %in% supported.stages) ]; error.message <- paste('The following stages are not supported:', paste(unrecognized.stages, collapse = ', ')); stop(error.message); } assertthat::assert_that( is.null(varitas.options) || is.list(varitas.options) || is.character(varitas.options), msg = 'varitas.options must be a list of options or a string giving the path to the config YAML file.' ); assertthat::assert_that( !is.character(varitas.options) || file.exists(varitas.options), msg = paste('varitas.options file', varitas.options, 'does not exist' ) ); ### MAIN ################################################################## if( is.null(varitas.options) ) { varitas.options <- get.varitas.options(); } else if( is.character(varitas.options)) { # NOTE: # this is a bit messy, but the idea is that it should be possible to verify # a possible set of options BEFORE setting them. # => allow both list, character, and no input varitas.options <- yaml::yaml.load_file(varitas.options); } ### GENERIC SETTINGS # is this needed for merging actually? assertthat::assert_that('reference_build' %in% names(varitas.options),msg = 'config must include reference_build'); reference.build <- varitas.options$reference_build; assertthat::assert_that( reference.build %in% c('grch37', 'grch38'), msg = 'reference_build must be either grch37 or grch38' ); ### REFERENCE GENOME # all stages except merging use reference genome # => check existence here if( !identical(stages.to.run, 'merging')) { # reference genome exists and has necessary derivative files reference.genome <- varitas.options$reference_genome[[ reference.build ]]; assertthat::assert_that( file.exists(reference.genome), msg = paste('Reference genome file', reference.genome, 'not found') ); # reference genome has fa or fasta extension # (this is probably stricter than necessary) reference.genome.extension <- tools::file_ext(reference.genome); assertthat::assert_that( tolower(reference.genome.extension) %in% c('fa', 'fasta'), msg = paste('Reference genome file', reference.genome, 'does not have extension .fa or .fasta') ); # needed for future steps reference.genome.chromosomes <- get.fasta.chromosomes(reference.genome); # there's an upper limit on how long the error message can be # => if( length(reference.genome.chromosomes) > 25) { reference.chromosome.string <- paste(c(reference.genome.chromosomes[1:25], '...'), collapse = ' '); } else { reference.chromosome.string <- paste(reference.genome.chromosomes, collapse = ' '); } } ### TARGET PANEL # needed for both alignment and variant calling if( 'alignment' %in% stages.to.run || 'calling' %in% stages.to.run ) { assertthat::assert_that( 'target_panel' %in% names(varitas.options), msg = 'target_panel must be provided for alignment and variant calling stages' ); target.panel <- varitas.options$target_panel; assertthat::assert_that( file.exists( target.panel ), msg = paste('target_panel file', target.panel, 'does not exist') ); panel.chromosomes <- get.bed.chromosomes(target.panel); assertthat::assert_that( all(panel.chromosomes %in% reference.genome.chromosomes), msg = paste( 'Mismatch between reference genome and target panel.\n', 'Reference genome chromosomes:', reference.chromosome.string, '\n', 'Target panel chromosomes:', paste(panel.chromosomes, collapse = ' ') ) ); bed.data <- utils::read.table(target.panel) gene.column <- 5; if( !(any( grepl('GENE_ID', bed.data[, gene.column] ))) ) { gene.column <- 6; } if( !(any( grepl('GENE_ID', bed.data[, gene.column] ))) ) { stop('Target panel must contain gene/feature IDs in the 5th or 6th column\n In the format of GENE_ID=...;etc.') } } ### ALIGNMENT-SPECIFIC OPTIONS if( 'alignment' %in% stages.to.run ) { # check that bwa index has been run on the reference genome verify.bwa.index(reference.genome, error = TRUE); verify.sequence.dictionary(reference.genome, error = TRUE); verify.fasta.index(reference.genome, error = TRUE); # Picard jar file assertthat::assert_that( 'picard_jar' %in% names(varitas.options), msg = 'config must contain picard_jar when running alignment' ); picard.jar <- varitas.options$picard_jar; assertthat::assert_that( file.exists(picard.jar), msg = paste('picard_jar file', picard.jar, 'does not exist') ); # GATK jar file assertthat::assert_that( 'gatk_jar' %in% names(varitas.options), msg = 'config must contain gatk_jar if running alignment' ); gatk.jar <- varitas.options$gatk_jar; assertthat::assert_that( file.exists(gatk.jar), msg = paste('gatk_jar file', gatk.jar, 'does not exist') ); } ### VARIANT CALLING if( 'calling' %in% stages.to.run ) { verify.sequence.dictionary(reference.genome, error = TRUE); verify.fasta.index(reference.genome, error = TRUE); if( 'mutect' %in% variant.callers ) { # TO DO: # - parameterize whether to use dbSNP and cosmic files # GATK jar file assertthat::assert_that( 'gatk_jar' %in% names(varitas.options), msg = 'config must contain gatk_jar if running Mutect' ); gatk.jar <- varitas.options$gatk_jar; assertthat::assert_that( file.exists(gatk.jar), msg = paste('gatk_jar file', gatk.jar, 'does not exist') ); } if( 'vardict' %in% variant.callers) { assertthat::assert_that( 'vardict_path' %in% names(varitas.options), msg = 'config must include vardict_path if running VarDict' ); vardict.path <- varitas.options$vardict_path; assertthat::assert_that( dir.exists( vardict.path ), msg = paste('vardict_path directory', vardict.path, 'does not exist or is not a directory') ); } if( 'varscan' %in% variant.callers ) { assertthat::assert_that( 'varscan_path' %in% names(varitas.options), msg = 'config must include varscan_path if running varscan' ); varscan.path <- varitas.options$varscan_path; assertthat::assert_that( file.exists( varscan.path ), msg = paste('varscan_path directory', varscan.path, 'does not exist') ); } if( 'lofreq' %in% variant.callers ) { assertthat::assert_that( 'lofreq_path' %in% names(varitas.options), msg = 'config must include lofreq_path if running lofreq' ); lofreq.path <- varitas.options$lofreq_path; assertthat::assert_that( file.exists( lofreq.path ), msg = paste('lofreq_path directory', lofreq.path, 'does not exist') ); } if( 'muse' %in% variant.callers ) { assertthat::assert_that( 'muse_path' %in% names(varitas.options), msg = 'config must include muse_path if running muse' ); muse.path <- varitas.options$muse_path; assertthat::assert_that( file.exists( muse.path ), msg = paste('muse_path directory', muse.path, 'does not exist') ); } } ### ANNOTATION if( 'annotation' %in% stages.to.run ) { # ANNOVAR path assertthat::assert_that( 'annovar_path' %in% names(varitas.options), msg = 'config must include annovar_path when running annotation' ); annovar.path <- varitas.options$annovar_path; assertthat::assert_that( dir.exists(annovar.path), msg = paste('annovar_path directory', annovar.path, 'does not exist or is not a directory') ); # ANNOVAR database annovar.database <- varitas.options$annovar_database[[ reference.build ]]; assertthat::assert_that( dir.exists(annovar.database), msg = paste('annovar_database directory', annovar.database, 'does not exist or is not a directory') ); # TO DO: annotation-specific database required.databases <- c('cytoBand', 'cosmic70', 'sites\\.2015_08', 'exac03nontcga', 'clinvar_20170130', 'nci60', 'icgc21', 'dbnsfp30a', 'dbnsfp31a_interpro'); for( db in required.databases) { matched.files <- list.files(pattern = db, path = annovar.database); # should this be > 0 or > 1? Will it work without .idx files assertthat::assert_that( length(matched.files) > 0, msg = paste('Missing required database', db, 'in', annovar.database) ); } } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.varitas.options.R
#' verify.vcf.specification #' #' @description #' Verify that VCF specification data frame fits expected format #' #' @param vcf.specification #' VCF specification data frame #' #' @return None #' #' verify.vcf.specification <- function(vcf.specification) { # check type if( !is.data.frame(vcf.specification) ) { stop('vcf.specification is not a data frame.'); } # check data frame dimensions – expect at least 2 columns if(ncol(vcf.specification) < 2) { stop('vcf.specification has fewer than 2 columns.'); } # check that sample.id and vcf columns exist if( !( 'sample.id' %in% names(vcf.specification) ) ) { stop('vcf.specification is missing required column sample.id'); } if( !( 'vcf' %in% names(vcf.specification) ) ) { stop('vcf.specification is missing required column vcf'); } # check for white space in sample IDs if( any( grepl('\\s', vcf.specification$sample.id) ) ) { stop('Sample IDs can not contain whitespace.'); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/verify.vcf.specification.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_knit$set(root.dir = '../')
/scratch/gouwar.j/cran-all/cranData/varitas/inst/doc/errors.R
--- title: "What Does This Error Mean?" author: "Adam Mills" date: "`r Sys.Date()`" output: html_document: toc: yes theme: united highlight: kate toc_float: collapsed: true smooth_scroll: true pdf_document: toc: yes bibliography: references.bib vignette: > %\VignetteIndexEntry{Errors} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_knit$set(root.dir = '../') ``` ## The Purpose of This Guide As any bioinformatician knows, there are few things more frustrating than trying to understand how to use someone else's program. I struggled with this myself while working on this package. However, in the realm of scientific research, we must learn to appreciate the stringency of our frequently used tools. I will not tell you to ignore the various warnings and errors produced by VariTAS in this vignette, because they are essential to ensure that the pipeline produces statistically robust, reproducible results. That being said, I empathise with the frustration of trying to use a new tool only to be met with a barrage of errors and incompatible data. So to minimise the amount of time you have to spend interpreting laconic error messages and resubmitting processes, I have written this guide. I hope that it helps to explain why these errors are thrown and more importantly, how to make them go away. ## Verifying VariTAS Options These are errors thrown when the pipeline is verifying the various options and parameters submitted to it through the config file. This includes a number of 'file ____ does not exist'-type errors that I have omitted for what I hope are obvious reasons. ### The following stages are not supported: ____ An incompatible stage has been submitted to the main pipeline function. The only supported stages are 'alignment', 'qc', 'calling', 'annotation', and 'merging'. #### Solution Ensure that the `start.stage` parameter is set to one of the allowed stages. ### `varitas.options` must be a list of options or a string giving the path to the config YAML file Whatever you have tried to use as the VariTAS options file is incorrect. You shouldn't see this error if you're following the template in the Introduction vignette. #### Solution Ensure that you are pointing to the correct file when submitting it to `overwrite.varitas.options`. It should be based on the `config.yaml` file contained in the `inst` directory of this package. ### config must include `reference_build` There must be a `reference_build` parameter set somewhere in the config file so that the script knows which version of the genome you are using. This setting is present in the `config.yaml` file found in the `inst` directory of this package. #### Solution Add a parameter to the config file called `reference_build` and make sure it's set to either 'grch37' or 'grch38' (anything else will cause you to run into the next error). ### `reference_build` must be either grch37 or grch38 The `reference_build` parameter in the config file can only be set to either 'grch37' or 'grch38', which are the two versions of the human genome supported by the pipeline. See also the previous error. #### Solution Ensure that `reference_build` is set to your version of the genome, in the form of either 'grch37' or 'grch38'. ### Reference genome file ____ does not have extension .fa or .fasta Only reference genomes in the FASTA format are supported by the various tools used in this pipeline. Of course, your genome might already be in FASTA format with a different file extension, but it's better to be sure. #### Solution Use a reference in FASTA format with the .fa or .fasta file extension. ### `target_panel` must be provided for alignment and variant calling stages As VariTAS is meant to be run on data from amplicon sequencing experiments, some of the stages require a file detailing the target panel. This should be in the form of a BED file, the format of which is described [here](https://www.ensembl.org/info/website/upload/bed.html). #### Solution Ensure that you have a properly formatted BED file supplied as the `target_panel` parameter in the config file. ### Mismatch between reference genome and target panel Followed by "Reference genome chromosomes: \____ Target panel chromosomes: \____". This error probably looks familiar if you've ever had the great priviledge of working with GATK. Essentially, the chromosomes listed in your target panel don't match up with those in the reference genome. In practice, it means you have one or more chromosomes in the target panel that are not in the reference. #### Solution This issue can arise from a few different places, so be sure to check that it's not something very simple first. 1. There is too much whitespace at the end of the target panel BED file. In this case, simply delete the empty lines at the bottom of the file. This is probably the cause if the chromosome names otherwise seem identical. 2. Your chromosomes have names like 'chr1, chr2, chr3, etc.' in one file and '1, 2, 3' in the other file. This is likely the case if your target panel and reference genome are from different builds/assemblies of the human genome. To resolve this, either liftover the BED file using a utilty like [liftOver](https://genome.ucsc.edu/cgi-bin/hgLiftOver) to convert it the correct reference build or (if you're sure they refer to the same build) edit your BED file so that the chromosome names match those in the reference file. ### Index files not found for reference genome file ____ - try running bwa index. This issue and the next two are related to preparing the reference genome file. Various tools require that large FASTA files are indexed and have sequence dictionaries so that they can be parsed quickly. Once you fix these issues, they shouldn't come up again as long as the index files are in the same directory as the reference. #### Solution Run `bwa index` on the indicated file. ### Sequence dictionary not found for file ____ - try running GATK CreateSequenceDictionary. See above #### Solution Run `gatk CreateSequenceDictionary` on the indicated file. ### Fasta index file not found for file ____ Try running samtools faidx. See above (x2) #### Solution Run `samtools faidx` on the indicated file.
/scratch/gouwar.j/cran-all/cranData/varitas/inst/doc/errors.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_knit$set(root.dir = '../') ## ----fastq-------------------------------------------------------------------- library(varitas); output.directory <- ''; fastq.specification <- data.frame( sample.id = c('A', 'B', 'C', 'D'), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = c('A_1.fq', 'B_1.fq', 'C_1.fq', 'D_1.fq'), mates = c('A_2.fq', 'B_2.fq', 'C_2.fq', 'D_2.fq') ); print(fastq.specification); ## ----alignment, results="hide"------------------------------------------------ matched.bam.specification <- run.alignment( fastq.specification = fastq.specification, output.directory = output.directory, paired.end = TRUE, quiet = TRUE # only for testing, does not submit jobs to cluster ); ## ----------------------------------------------------------------------------- print(matched.bam.specification); ## ----variants1---------------------------------------------------------------- unmatched.bam.specification <- data.frame( sample.id = c('Z', 'Y'), tumour.bam = c('Z.bam', 'Y.bam') ); print(unmatched.bam.specification); ## ----variants2, results = FALSE----------------------------------------------- vcf.specification <- run.variant.calling( matched.bam.specification, output.directory = output.directory, variant.caller = c('vardict', 'mutect'), quiet = TRUE # only for testing, does not submit jobs to cluster ); ## ----------------------------------------------------------------------------- print(vcf.specification); ## ---- results = FALSE--------------------------------------------------------- variant.specification <- run.annotation( vcf.specification, output.directory = output.directory, quiet = TRUE # testing only ); ## ---- eval = FALSE------------------------------------------------------------ # print(variant.specification); ## ---- results = FALSE--------------------------------------------------------- run.post.processing( variant.specification = variant.specification, output.directory = output.directory, quiet = TRUE ); ## ---- results = FALSE--------------------------------------------------------- ## ---- results = FALSE--------------------------------------------------------- vcf.specification$job.dependency <- NULL; run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', quiet = TRUE ); ## ---- results = FALSE--------------------------------------------------------- run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', email = '[email protected]', quiet = TRUE ); ## ----wrapper1----------------------------------------------------------------- library(varitas) output.directory <- '.' fastq.directory <- 'inst/extdata/fastq' fastq.files <- list.files( pattern = 'R1.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.mate.files <- list.files( pattern = 'R2.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.specification <- data.frame( # Extract the sample ID from the filename sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ## ----wrapper2, eval=FALSE, results=FALSE-------------------------------------- # set.varitas.options(filters.vardict.min_tumour_depth = 10) ## ----wrapper3, eval=FALSE, results=FALSE-------------------------------------- # config <- 'inst/extdata/varitas_config.yaml' # overwrite.varitas.options(config) ## ----wrapper4, eval=FALSE, results=FALSE-------------------------------------- # run.varitas.pipeline( # file.details = fastq.specification, # output.directory = output.directory, # variant.callers = c('mutect', 'vardict'), # quiet = FALSE, # run.name = 'EXAMPLE', # email = '[email protected]' # ) ## ----wrapper5, eval=FALSE, results=FALSE-------------------------------------- # ############################################################################### # ## VariTAS Wrapper Script # ## # ############################################################################### # ## Author: # ## Adam Mills # ############################################################################### # ## Libraries: # library(varitas) # ############################################################################### # ## Main # # output.directory <- '.' # # fastq.directory <- 'inst/extdata/fastq' # fastq.files <- list.files( # pattern = 'R1.*\\.fastq', # path = fastq.directory, # full.names = TRUE # ) # fastq.mate.files <- list.files( # pattern = 'R2.*\\.fastq', # path = fastq.directory, # full.names = TRUE # ) # # fastq.specification <- data.frame( # sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), # reads = fastq.files, # mates = fastq.mate.files, # stringsAsFactors = FALSE # ) # # config <- 'inst/extdata/varitas_config.yaml' # overwrite.varitas.options(config) # # run.varitas.pipeline( # file.details = fastq.specification, # output.directory = output.directory, # variant.callers = c('mutect', 'vardict'), # quiet = FALSE, # run.name = 'EXAMPLE', # email = '[email protected]' # ) # ## ----matched1----------------------------------------------------------------- fastq.specification <- data.frame( sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ## ----hybrid1------------------------------------------------------------------ bam.directory <- 'inst/extdata/bam' bam.files <- list.files( pattern = 'Sample.*\\.bam', path = bam.directory, full.names = TRUE ) vcf.directory <- 'inst/extdata/vcf' vcf.files <- list.files( pattern = 'Sample.*\\.vcf', path = vcf.directory, full.names = TRUE ) bam.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(bam.files)), tumour.bam = bam.files, stringsAsFactors = FALSE ) vcf.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(vcf.files)), vcf = vcf.files, caller = rep('pgm', length(vcf.files)), stringsAsFactors = FALSE ) print(bam.specification) print(vcf.specification) ## ----hybrid2, eval=FALSE, results=FALSE--------------------------------------- # run.varitas.pipeline.hybrid( # bam.specification = bam.specification, # vcf.specification = vcf.specification, # output.directory = 'inst/extdata/output/', # proton = TRUE, # run.name = 'EXAMPLE', # quiet = FALSE, # email = '[email protected]' # ); ## ----hybrid3------------------------------------------------------------------ miniseq.sheet <- 'inst/extdata/miniseq/Example_template.csv' miniseq.directory <- 'inst/extdata/miniseq' miniseq.info <- prepare.miniseq.specifications(miniseq.sheet, miniseq.directory) fastq.specification <- miniseq.info[[ 1 ]] vcf.specification <- miniseq.info[[ 2 ]] vcf.specification['caller'] <- rep('miniseq', nrow(vcf.specification)) print(fastq.specification) print(vcf.specification) ## ----hybrid4, eval=FALSE, results=FALSE--------------------------------------- # run.varitas.pipeline.hybrid( # fastq.specification = fastq.specification, # vcf.specification = vcf.specification, # output.directory = 'inst/extdata/output/', # run.name = 'EXAMPLE', # quiet = FALSE, # email = '[email protected]' # )
/scratch/gouwar.j/cran-all/cranData/varitas/inst/doc/introduction.R
--- title: "The VariTAS Pipeline" author: "Erle Holgersen and Adam Mills" date: "`r Sys.Date()`" output: html_document: toc: yes theme: united highlight: kate toc_float: collapsed: true smooth_scroll: true pdf_document: toc: yes bibliography: references.bib vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_knit$set(root.dir = '../') ``` # Pipeline Overview The VariTAS pipeline is an R package for processing amplicon-based targeted sequencing. It supports alignment, somatic variant calling (with and without matched normal), and variant annotation, and the pipeline can start from any stage. Both Illumina sequencing (typically MiniSeq) and Ion Torrent systems are supported by the pipeline, but they require different configurations. For Illumina runs, the FASTQ files are used to start the pipeline at the alignment stage. For Ion Torrent sequencing, the aligned BAM files from the machine are used as input. The pipeline is designed to be fully automated. Once the pipeline is launched, cluster jobs will be submitted for all tasks. In the case that some jobs depend on others, these job dependencies will be included in the script and handled by the cluster. Each stage of the pipeline is associated with a file specification data frame. This data frame contains paths to the files to be processed at that stage, and information on any job dependencies. In turn, each pipeline stage will return a data frame that can be used for the next stage in the pipeline. File paths, run parameters, HPC settings, and other options are controlled by a config file. See the [Updating Settings](#settings) section below for more details. To start using the pipeline quickly, see the [Examples](#examples) section. ## Third-Party Software There are several essential programs that the VariTAS pipeline requires. The table below provides essential information about each of them. The version number indicates the latest version tested with the pipeline. |Program|Version|Download Link| |:------|:------|:-----------------------------| |BWA|0.7.12|http://bio-bwa.sourceforge.net/| |bedtools|2.25.0|https://bedtools.readthedocs.io/en/latest/| |Samtools|1.5|http://www.htslib.org/| |Picard|2.1.0|https://broadinstitute.github.io/picard/| |Vardict (Java)|1.4.6|https://github.com/AstraZeneca-NGS/VarDictJava| |FastQC|0.11.4|https://www.bioinformatics.babraham.ac.uk/projects/fastqc/| ## Directory Structure ``` Note that only the top output directory needs to be manually created . # Supplied output directory |-2018-11-12-plots # Contains generated plots used in report |---sample-coverage # Coverage plots generated per-sample |-2018-11-12-variant-data # Final output files, including the PDF report |-78 # Directory for each sample, containing intermediary files |---mutect # Files produced by MuTect for each sample |---vardict # Files produced by VarDict for each sample |-code # Bash scripts used to submit jobs to HPC scheduler |-log # stdout and stderr for each job ``` ## Stages There are four stages to the VariTAS pipeline: alignment, variant calling, annotation, and merging. |Stage |Description| |:------|:-------| |Alignment|Align FASTQ files to reference genome| |Variant Calling|Run variant callers on aligned BAM files| |Annotation|Annotate variants with ANNOVAR| |Merging|Merge files from all variant callers and produce reports/ plots| ### Alignment Alignment consists of two main steps: alignment with bwa, and coverage quality control. For Illumina sequencing runs, both steps will typically be necessary. For Proton runs, the machine does the alignment against UCSC hg19. While the machine also outputs FASTQ files, realigning these yourself is not recommended as read quality information is lost[^1]. The main function for running alignment is `run.alignment()`. It takes a FASTQ specification data frame as input, submits one alignment job per sample, and returns a BAM specification data frame. ```{r fastq} library(varitas); output.directory <- ''; fastq.specification <- data.frame( sample.id = c('A', 'B', 'C', 'D'), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = c('A_1.fq', 'B_1.fq', 'C_1.fq', 'D_1.fq'), mates = c('A_2.fq', 'B_2.fq', 'C_2.fq', 'D_2.fq') ); print(fastq.specification); ``` The FASTQ specification must have columns **sample.id** and **reads**. Optionally, it can contain a column **mates** (for paired end reads), and columns **patient.ID** and **tissue**. If provided, the patient ID and tissue information will be used to do matched normal somatic variant calling in later stages of the pipeline. After creating the FASTQ specification data frame, we are ready to run the alignment step of the pipeline. ```{r alignment, results="hide"} matched.bam.specification <- run.alignment( fastq.specification = fastq.specification, output.directory = output.directory, paired.end = TRUE, quiet = TRUE # only for testing, does not submit jobs to cluster ); ``` The alignment step returns a BAM specification data frame that can be used for the variant calling. When patient ID and tissue information is provided in the input data frame, the output data frame will contain tumour and normal BAM files for each tumour sample. When no patient ID/ tissue information is provided, all samples are assumed to be tumours, and variant calling without matched normal is performed in the subsequent step. ```{r} print(matched.bam.specification); ``` ### Variant Calling Variant calling is performed through the `run.variant.calling()` function. The form of the input BAM specification depends on whether matched normals are available. If no matched normals are available, the only two required columns are **sample.id** and **tumour.bam**. ```{r variants1} unmatched.bam.specification <- data.frame( sample.id = c('Z', 'Y'), tumour.bam = c('Z.bam', 'Y.bam') ); print(unmatched.bam.specification); ``` In addition to the bam specification data frame, `run.variant.calling()` takes the variant callers as an argument. To run VarDict and MuTect 2 on the previous matched normal example, you can use the following code. ```{r variants2, results = FALSE} vcf.specification <- run.variant.calling( matched.bam.specification, output.directory = output.directory, variant.caller = c('vardict', 'mutect'), quiet = TRUE # only for testing, does not submit jobs to cluster ); ``` ```{r} print(vcf.specification); ``` The VCF specification includes information on the variant caller used to produce the VCF file. This is needed for downstream filtering steps, and used to create unique job names for annotation jobs. #### VarDict [VarDict](https://github.com/AstraZeneca-NGS/VarDict) [@vardict] is a variant caller optimized for deep sequencing. As performance scales linearly with depth, downsampling reads is not necessary, and VarDict has greater sensitivity for detecting variants present at low allele frequencies compared to other callers. #### MuTect [MuTect](https://software.broadinstitute.org/cancer/cga/mutect) [@mutect] is most commonly used for calling variants from whole genome and whole exome sequencing data. It is not optimized for amplicon data, and downsamples to depth 1,000 when it encounters deep sequencing data. When detecting variants in circulating DNA, this downsampling can result in mutations being lost, and running MuTect is not recommended. However, when sequencing solid tumours the variant allele frequencies are higher and there is less concern about losing mutations. ### Annotation Variant file annotation is done with ANNOVAR, and annotated variants are saved to a tab-separated file. The config file specifies the fields to be included in the final tab-separated file. More fields can be added as long as they are included in the ANNOVAR databases. ```{r, results = FALSE} variant.specification <- run.annotation( vcf.specification, output.directory = output.directory, quiet = TRUE # testing only ); ``` ```{r, eval = FALSE} print(variant.specification); ``` ### Merging The main function for submitting the post-processing job to the cluster is `run.post.processing()`. Similar to the alignment, variant calling, and variant annotation stages, this function will submit a cluster job with job dependencies as specified by the variant specification. However, unlike the other stages, the post processing stage does not rely on any command line tools. If there are no job dependencies, the post-processing stage can be run directly through the `post.processing()` function. ```{r, results = FALSE} run.post.processing( variant.specification = variant.specification, output.directory = output.directory, quiet = TRUE ); ``` There are three main parts to the post-processing stage: 1. Variant merging 2. Summary plots and PDF report 3. Quality control Excel sheet The output is split between two date-stamped subdirectories of the project directory. The `variant-data` directory contains files that are meant to be sent to collaborators: filtered variants in Excel and text formats, coverage statistics in Excel format, and a PDF report. Additionally, the PNG format plots are saved to the `plots` directory. The final page of the PDF report contains details on the pipeline run, including the path to the directory on scratch where the rest of the files can be found. ``` ## VariTAS version 0.7.0 ## Date: 2018-04-26 ## User: username ## Raw/intermediate files can be found in ## /data/analysis_dir ``` ## Running the Full Pipeline In most cases, all steps in the pipeline can be executed with a single function call. `run.varitas.pipeline()` is the main function for launching the full pipeline. By default, this will run all stages from alignment to post-processing. To start the pipeline at a later stage, adjust the `start.stage` argument of the function. Whatever start stage you provide must match the files provided in the `file.details` data frame. For example, if starting the pipeline at the variant annotation stage, the `file.details` data frame should contain paths to VCF files containing the variant calls, and be formatted in a way that passes the `verify.variant.specification()` check. Running the `run.varitas.pipeline()` function will submit jobs for all stages at once, with appropriate job dependencies. To see which jobs that would be submitted, run `run.varitas.pipeline()` with the argument `quiet = TRUE`. This will print out all of the Perl calls instead of submitting them as system calls. Each Perl call corresponds to one job submitted to the cluster. ```{r, results = FALSE} ``` When starting the pipeline at a later stage, earlier jobs are dropped and job dependencies are adjusted accordingly. ```{r, results = FALSE} vcf.specification$job.dependency <- NULL; run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', quiet = TRUE ); ``` The merging stage of the pipeline supports email notifications. As merging is the last stage of the pipeline, the email notification can be used to let you know when the pipeline run finishes. ```{r, results = FALSE} run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', email = '[email protected]', quiet = TRUE ); ``` ## Updating Settings {#settings} The VariTAS pipeline comes with a set of default options specified in the `config.yaml` file. These are loaded into R by default, and will enable you to run the pipeline. The settings include both cluster-specific settings that are unlikely to change once they have been set for your HPC system and run-specific settings that are more likely to change. Examples of run-specific settings are the target panel, sequencing platform, and variant filters. In most cases you will want to make changes to the default settings. There are two ways of doing this. 1. Create your own config file, and overwrite all config options with the `overwrite.varitas.options()` function. 2. Update individual options with the `set.varitas.options()` function. ### Variant Filters Variant filters are specified as part of the settings. All these settings should start with the prefix `filters` (e.g. be nested under `filters` in the YAML file), and be further grouped by variant caller. For example, to set a MuTect-specific filter `FILTER_NAME`, use the command `set.varitas.options(filters.mutect.FILTER_NAME = TRUE)`. To specify a filter for all variant callers, list them under `default` in the config YAML file. These filters are set first and overwritten by any caller-specific filters. For example, the YAML code below would set the `remove_exac` filter for all variant callers and a `min_tumour_depth` filter of 10 for all callers except VarDict. The VarDict minimum tumour depth filter is set to 20. ``` filters: default: min_tumour_depth: 10 remove_exac: true vardict: min_tumour_depth: 20 ``` The `set.varitas.options()` function currently does not support default filters. These must be specified through a config YAML file that's passed to the `overwrite.varitas.options()` function. The table below describes all filters currently supported. Variants that do not meet all of these criteria will be filtered out. Note that filters with "normal" in the name are only applied if the samples are paired tumour/normal. |Name |Value |Description | |:------|:-----|:-------------------------------------------------| |min_tumour_variant_reads|numeric|Minimum number of reads supporting a variant| |max_normal_variant_reads|numeric|Maximum number of reads in supporting a variant in normal| |min_tumour_depth|numeric|Minimum depth in tumour| |min_normal_depth|numeric|Minimum depth in normal| |min_tumour_allele_frequency|numeric|Minimum tumour allele frequency| |max_normal_allele_frequency|numeric|Maximum normal allele frequency| |indel_min_tumour_allele_frequency|numeric|Minimum tumour allele frequency for indels| |min_quality|numeric|Minimum base quality| |ct_min_tumour_allele_frequency|numeric|Minimum tumour allele frequency for C>T mutations. Intended as an FFPE filter| |remove_1000_genomes|logical|Flag for removing all variants found in 1,000 genomes[^2]| |remove_exac|logical|Flag for removing variants found at AF>0.01 in the Exome Aggregation Consortium| |remove_germline_status|logical|Flag for removing all variants with a status field set to "Germline". Intended to be used with VarDict| To make it easier to specify filters, the pipeline comes with different sets of default options. These are split into defaults for ctDNA and solid tumours, and can be set by `mode: ctdna` and `mode: tumour`, respectively. Any filters specified separately will take precedence over the mode default settings. For example, the following YAML code will use the ctDNA default settings, but update the `min_tumour_variant_reads` filter to 20 for all callers. ``` mode: ctDNA filters: default: min_tumour_variant_reads: 20 ``` #### Solid Tumour Mode The default settings for the solid tumour mode can be found in the `tumour_defaults.yaml` file in the package directory. ``` filters: default: min_normal_depth: 5 min_tumour_variant_reads: 5 min_tumour_allele_frequency: 0.05 max_normal_allele_frequency: 0.02 ct_min_tumour_allele_frequency: 0.1 indel_min_tumour_allele_frequency: 0.1 remove_1000_genomes: true remove_exac: true vardict: remove_germline_status: true ``` #### ctDNA Mode Defaults for variant calling on ctDNA can be found in the `ctdna_defaults.yaml` file. Due to low purity, variant allele frequencies in circulating DNA will typically be much lower than those in solid tumour samples. To allow for this, the minimum allele frequency filters are decreased. ``` filters: default: min_tumour_variant_reads: 5 min_tumour_allele_frequency: 0.01 ct_min_tumour_allele_frequency: 0.05 indel_min_tumour_allele_frequency: 0.05 min_normal_depth: 5 max_normal_allele_frequency: 0 remove_1000_genomes: true remove_exac: true pgm: indel_min_tumour_allele_frequency: 0.02 vardict: remove_germline_status: true isis: indel_min_tumour_allele_frequency: 0.02 ``` # Examples and Use Cases ## Generic Wrapper Script {#examples} Any call to the VariTAS pipeline requires data to be passed in the form of a dataframe, so the easiest way to interact with it is to create a simple wrapper R script. The goals of the wrapper are to collect the relevant input files in a dataframe, change any necessary VariTAS options, and call the relevant pipeline function. We can start by arranging the FASTQ files: ```{r wrapper1} library(varitas) output.directory <- '.' fastq.directory <- 'inst/extdata/fastq' fastq.files <- list.files( pattern = 'R1.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.mate.files <- list.files( pattern = 'R2.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.specification <- data.frame( # Extract the sample ID from the filename sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ``` Often, you will need to change settings in the VariTAS config file. As shown in the Introduction, this can be done in one of two ways. The first is to use `set.varitas.options()` within your wrapper script like so: ```{r wrapper2, eval=FALSE, results=FALSE} set.varitas.options(filters.vardict.min_tumour_depth = 10) ``` This is suitable for smaller changes, but it is usually more convenient to have a copy of the VariTAS config file for each project or run of the pipeline. This way, all of the settings that are unlikely to change can be easily set and other users will be able to clearly see the config options you used. ```{r wrapper3, eval=FALSE, results=FALSE} config <- 'inst/extdata/varitas_config.yaml' overwrite.varitas.options(config) ``` Once the above steps are completed, you are ready to call the main function of the pipeline. ```{r wrapper4, eval=FALSE, results=FALSE} run.varitas.pipeline( file.details = fastq.specification, output.directory = output.directory, variant.callers = c('mutect', 'vardict'), quiet = FALSE, run.name = 'EXAMPLE', email = '[email protected]' ) ``` And those are all the necessary steps to run the pipeline. It will notify you by email when it is finished if you provide an address. On the first attempt, it is advisable to set the `quiet` parameter to `TRUE`, which prevents any of the tasks from running. This way, any potential problems can be fixed before a large number of jobs are created. A full wrapper script template is provided below for completeness and ease of copying-and-pasting. ```{r wrapper5, eval=FALSE, results=FALSE} ############################################################################### ## VariTAS Wrapper Script ## ############################################################################### ## Author: ## Adam Mills ############################################################################### ## Libraries: library(varitas) ############################################################################### ## Main output.directory <- '.' fastq.directory <- 'inst/extdata/fastq' fastq.files <- list.files( pattern = 'R1.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.mate.files <- list.files( pattern = 'R2.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.specification <- data.frame( sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) config <- 'inst/extdata/varitas_config.yaml' overwrite.varitas.options(config) run.varitas.pipeline( file.details = fastq.specification, output.directory = output.directory, variant.callers = c('mutect', 'vardict'), quiet = FALSE, run.name = 'EXAMPLE', email = '[email protected]' ) ``` ## Variant Calling with Matched Normal Data from normal tissue can be used for matched somatic variant calling in the pipeline. When creating your FASTQ specification dataframe, include the columns `patient.id` and `tissue` and the pipeline will submit matched normal data to the variant callers. ```{r matched1} fastq.specification <- data.frame( sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ``` ## Ion PGM Data Data produced by an Ion PGM system can also be processed by this pipeline using a different function. If you'd like to incorporate the variants called by the machine in the pipeline, simply pass both the BAM files and the VCF files into `run.varitas.pipeline.hybrid()`. Data from Ion Proton systems can be used in the same way by setting the `proton` parameter to `TRUE`. ```{r hybrid1} bam.directory <- 'inst/extdata/bam' bam.files <- list.files( pattern = 'Sample.*\\.bam', path = bam.directory, full.names = TRUE ) vcf.directory <- 'inst/extdata/vcf' vcf.files <- list.files( pattern = 'Sample.*\\.vcf', path = vcf.directory, full.names = TRUE ) bam.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(bam.files)), tumour.bam = bam.files, stringsAsFactors = FALSE ) vcf.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(vcf.files)), vcf = vcf.files, caller = rep('pgm', length(vcf.files)), stringsAsFactors = FALSE ) print(bam.specification) print(vcf.specification) ``` ```{r hybrid2, eval=FALSE, results=FALSE} run.varitas.pipeline.hybrid( bam.specification = bam.specification, vcf.specification = vcf.specification, output.directory = 'inst/extdata/output/', proton = TRUE, run.name = 'EXAMPLE', quiet = FALSE, email = '[email protected]' ); ``` In this version of the pipeline, the alignment stage is skipped and the Ion PGM variant data will be incorporated into the final reports. ## MiniSeq Data To enable users to quickly build file specifications for MiniSeq runs, the VariTAS pipeline has a function `prepare.miniseq.specifications()`. When passed a MiniSeq sample sheet and the path to a MiniSeq directory, the function will parse through the directory and look for FASTQ/ BAM/ VCF files for each of the samples. By default the `Sample_ID` column of the MiniSeq sample sheet, up to the first dash, is taken as the sample ID. `prepare.miniseq.specifications()` returns a list with elements corresponding to the different file types that have been found. For example, if VCF files were present in the VCF directory, a VCF specification will be named as `vcf` in the result. Note that you will have to add a column `caller` to the VCF specification before it can be used in the pipeline. ```{r hybrid3} miniseq.sheet <- 'inst/extdata/miniseq/Example_template.csv' miniseq.directory <- 'inst/extdata/miniseq' miniseq.info <- prepare.miniseq.specifications(miniseq.sheet, miniseq.directory) fastq.specification <- miniseq.info[[ 1 ]] vcf.specification <- miniseq.info[[ 2 ]] vcf.specification['caller'] <- rep('miniseq', nrow(vcf.specification)) print(fastq.specification) print(vcf.specification) ``` ### Incorporating MiniSeq Variant Calls The dataframes generated by the `prepare.miniseq.specifications` function can be fed into the standard pipeline, or they can be used in the hybrid pipeline. In the latter case, you are able to pass the VCF files much like the Ion PGM scenario in Example 2. By doing so, the pipeline will include the MiniSeq variant calls in the final output. ```{r hybrid4, eval=FALSE, results=FALSE} run.varitas.pipeline.hybrid( fastq.specification = fastq.specification, vcf.specification = vcf.specification, output.directory = 'inst/extdata/output/', run.name = 'EXAMPLE', quiet = FALSE, email = '[email protected]' ) ``` ## References [^1]: Ion machines use SFF files, which are then converted back to FASTQ. This results in the loss of information on read quality. Another problem with aligning the Ion Torrent FASTQs is the distinct homopolymer error profiles of ion semiconductor sequencing. This is accounted for with the machine aligner, but not by BWA. [^2]: Data from phase 3 of the 1,000 genomes project is obtained through ANNOVAR.
/scratch/gouwar.j/cran-all/cranData/varitas/inst/doc/introduction.Rmd
--- title: "VariTAS Pipeline Report" date: "`r format(Sys.time(), '%d %B, %Y')`" output: pdf_document: toc: true --- ```{r setup, include = FALSE} library(varitas); knitr::opts_chunk$set( echo = TRUE, fig.height = 4, dev.args = list(bg = 'white') ); ``` \newpage # Sample QC ## Coverage per Sample ```{r, echo = FALSE, results = FALSE} coverage.sample <- get.coverage.by.sample.statistics(project.directory); plot.coverage.by.sample(coverage.sample[order(-coverage.sample$mean.coverage),], file.name = NULL, statistic = 'mean'); plot.coverage.by.sample(coverage.sample[order(-coverage.sample$median.coverage),], file.name = NULL, statistic = 'median'); ``` ## Ontarget Percent ```{r, echo = FALSE, results = FALSE} plot.ontarget.percent( coverage.sample, file.name = NULL ); ``` ## Paired Percent ```{r, echo = FALSE, results = FALSE} plot.paired.percent( coverage.sample, file.name = NULL ); ``` \newpage # Variants ## Variants per Sample ```{r, echo = FALSE, results = FALSE} varitas:::variants.sample.barplot( filtered.variants, file.name = NULL ); ``` ## Variants per Caller ```{r, echo = FALSE, results = FALSE} varitas:::variants.caller.barplot( filtered.variants, file.name = NULL, group.by = 'type' ); ``` ## Trinucleotide Substitutions ```{r, echo = FALSE, results = FALSE} varitas:::trinucleotide.barplot( filtered.variants, file.name = NULL ); ``` ## Trinucleotide Substitutions by Caller ```{r, echo = FALSE, results = FALSE} varitas:::variants.caller.barplot( filtered.variants, file.name = NULL, group.by = 'substitution' ); ``` ## Recurrent Variants ```{r, echo = FALSE, results = FALSE} varitas:::variant.recurrence.barplot( filtered.variants, file.name = NULL ); ``` ## Concordance Between Callers ```{r, echo = FALSE, results = FALSE, fig.height = 6} # Caller overlap venn diagram all.callers <- stringr::str_split(filtered.variants$caller, pattern = ':'); unique.callers <- unique( unlist(all.callers) ); # create ID field to uniquely identify variants filtered.variants$id <- paste0( filtered.variants$sample.id, '-', filtered.variants$CHROM, ':', filtered.variants$POS, '-', filtered.variants$REF, '>', filtered.variants$ALT ); caller.results <- lapply( unique.callers, function(caller, variants) { return(variants$id[ grepl(caller, variants$caller) ]) }, variants = filtered.variants ); names(caller.results) <- varitas:::capitalize.caller(unique.callers); # turn off log files futile.logger::flog.threshold(futile.logger::ERROR, name = "VennDiagramLogger"); colour.scheme <- c( '#0039A6', '#FF6319', '#6CBE45', '#996633', '#A7A9AC', '#FCCC0A', '#B933AD', '#EE352E', '#808183', '#00933C' ); venn.object <- VennDiagram::venn.diagram( caller.results, filename = NULL, fill = colour.scheme[ 1:length(unique.callers) ], ext.text = FALSE, ext.percent = rep(0.01, 3), cat.pos = 0 ); plot.new(); grid::grid.draw(venn.object); ``` \newpage # Coverage by Amplicon ```{r, echo = FALSE, results = FALSE} coverage.statistics <- get.coverage.by.amplicon(project.directory); first.sample.column <- 4; while( !is.numeric(coverage.statistics[, first.sample.column]) && first.sample.column <= ncol(coverage.statistics) ) { if( ncol(coverage.statistics) == first.sample.column ) { stop('Cannot find first sample column'); } first.sample.column <- first.sample.column + 1; } genes <- get.gene(coverage.statistics); gene.start <- vapply( unique(genes), function(x, genes) match(x, genes), genes = genes, FUN.VALUE = 0 ); gene.end <- c(gene.start[-1], nrow(coverage.statistics) + 1); midpoints <- gene.start + (gene.end - gene.start)/2; chr.nums <- sapply(coverage.statistics$chr, function(x) substr(x, 4, nchar(x))) to.remain <- sapply(chr.nums, function(x) x != 'X' && x != 'Y') old.names <- names(coverage.statistics) coverage.statistics <- cbind(genes, chr.nums, coverage.statistics) names(coverage.statistics) <- c('gene', 'chr.no', old.names) first.sample.column <- first.sample.column + 2 for(i in which(sapply(coverage.statistics, class) == "factor")) coverage.statistics[[i]] = as.character(coverage.statistics[[i]]) colours <- c() shapes <- c() chr.palette = c( '#8DD3C7', '#081D58', '#BEBADA', '#FB8072', '#CCEBC5', '#FDB462', '#999999', '#FCCDE5', '#FC8D59', '#35978F', '#F781BF', '#FFED6F', '#E41A1C', '#377EB8', '#4DAF4A', '#984EA3', '#A65628', '#80B1D3', '#252525', '#A6761D', '#B3DE69', '#F0027F', '#FFFFCC', '#FDDBC7', '#004529' ) # Sort by chromosome and position (sex chromosomes at the end) sex.chr.rows <- coverage.statistics[!to.remain, ] coverage.statistics <- coverage.statistics[to.remain, ] coverage.statistics$chr.no <- as.integer(coverage.statistics$chr.no) coverage.order <- order(coverage.statistics$chr.no, coverage.statistics$start, coverage.statistics$end); coverage.statistics <- coverage.statistics[ coverage.order, ]; sex.chr.order <- order(sex.chr.rows$chr.no, sex.chr.rows$start, sex.chr.rows$end); sex.chr.rows <- sex.chr.rows[ sex.chr.order, ]; coverage.statistics <- rbind(coverage.statistics, sex.chr.rows) genes <- unique(coverage.statistics$gene) chr.list <- coverage.statistics$chr.no # Alternating red and blue for each chromosome present red <- TRUE prev.chr <- '' for (j in 1:length(chr.list)) { chr.ending <- chr.list[j] if (chr.ending != prev.chr){ if (red) { colours <- c(colours, 'red') shapes <- c(shapes, 21) red <- FALSE } else { colours <- c(colours, 'blue') shapes <- c(shapes, 22) red <- TRUE } } else { colours <- c(colours, colours[length(colours)]) shapes <- c(shapes, shapes[length(shapes)]) } prev.chr <- chr.ending } ``` ## Median ```{r, echo = FALSE, results = FALSE, fig.height = 4} if (first.sample.column < ncol(coverage.statistics)) { avg.coverage.stats <- aggregate.data.frame( coverage.statistics[, first.sample.column:ncol(coverage.statistics)], list(coverage.statistics$gene), sum ); avg.coverage.stats <- avg.coverage.stats[match(genes, avg.coverage.stats$Group.1),] median.coverage <- apply( avg.coverage.stats[, 2:ncol(avg.coverage.stats)], 1, stats::median ); } else { # Only one sample median.coverage <- stats::median(coverage.statistics[, first.sample.column]) } graphics::par( mar = c(3.4, 4, 1.2, 0.2), cex.axis = 0.6, font.axis = 1, oma = c(0, 0, 0, 0), las = 2, tcl = -0.2 ); graphics::plot( x = jitter(seq_along(median.coverage), 0.15), y = median.coverage, main = 'Median Coverage', cex = 0.8, pch = 21, bg = 'grey', col = 'black', xlab = '', ylab = 'Coverage', xaxt = 'n', xaxs = 'r' ); #graphics::abline(v = gene.start[-1], col = 'grey', lty = 'dashed'); graphics::axis(1, at = 1:length(unique(genes)), labels = unique(genes), font = 2); ``` ```{r, echo = FALSE, results = 'asis', fig.height = 4 } graphics::par( mar = c(3.4, 4, 1.2, 0.2), cex.axis = 0.6, font.axis = 1, oma = c(0, 0, 0, 0), las = 2, tcl = -0.2 ); # loop over columns and plot all of them for(i in first.sample.column:ncol(coverage.statistics) ) { sample.id <- names(coverage.statistics)[i]; sample.coverage <- coverage.statistics[, i]; x <- c() for (g in 1:length(unique(genes))) { for (i in 1:length(which(coverage.statistics$gene == unique(genes)[g]))) { x <- c(x, g) } } cat('## ', sample.id, '\n'); graphics::plot( x = jitter(x, amount = 0.15), y = sample.coverage, main = sample.id, cex = 0.8, pch = shapes, # pch = 21, bg = colours, col = 'black', xlab = '', ylab = 'Coverage', xaxt = 'n', xaxs = 'r' ); #graphics::abline(v = gene.start[-1], col = 'grey', lty = 'dashed'); graphics::axis(1, at = 1:length(unique(genes)), labels = unique(genes), font = 2); cat('\n\n'); } ``` \newpage # Plot Descriptions ## Variants *Variants per Sample* \newline The number of variants called by sample, broken down into SNV/ MNV/ indel. Copy number variants are filtered out as part of the pipeline. *Variants per Caller* \newline The number of variants called by caller, coloured by type of variant. *Trinucleotide Substitutions* \newline Number of variants called by trinucleotide substitutions, across all callers. *Trinucleotide Substitutions by Caller* \newline Caller-level variant counts broken down by trinucleotide substitution. *Recurrent Variants* \newline Recurrent variants in the cohort, grouped by caller(s). *Concordance Between Callers* \newline Overlap between variants called by different callers. ## Sample QC Every plot shows a quality control metric per sample. Each bar represents a sample, and the y-axis shows the relevant quality control metric. The raw data used for these plots can be found in the file **Coverage_statistics.xlsx** (*Coverage by sample* sheet). ## Coverage by Amplicon Each plot shows the coverage per amplicon per sample. The amplicons are ordered by genomic position (lexicographical order). Due to limited space, not all gene labels are shown. The raw data used for these plots can be found in the file **Coverage_statistics.xlsx** (*Coverage by amplicon* sheet). \newpage # Additional Files ## Filtered_variants.xlsx Variants called by sample, with ANNOVAR annotation. The *Interpro_domain* column contains the predicted protein domain of the mutations. If a gene has several transcripts, the protein domains are ordered from worst to best predicted impact. For more details about the ANNOVAR annotation fields, see the [ANNOVAR website](http://annovar.openbioinformatics.org/en/latest/user-guide/filter/). The *Filters* sheet contains the variant calling filters that were used in the pipeline run. ## Coverage_statistics.xlsx Per sample and per amplicon coverage. The workbook contains two sheets. **Coverage by sample** contains summarised coverage statistics per sample. **Coverage by amplicon** contains the number of reads mapped to each amplicon in each sample. ## filtered_variants.txt Filtered variants in tab-delimited format. The content is the same as the *Variants* sheet of **Filtered_variants.xlsx**. \newpage # Pipeline Details ```{r, echo = FALSE} cat( 'varitas version', as.character( packageVersion('varitas') ), '\n', 'Date:', format(Sys.Date(), format = '%Y-%m-%d'), '\n', 'User:', Sys.info()['user'], '\n\n' ); # need to find a way to wrap this... cat('Raw/intermediate files can be found in\n', project.directory); ```
/scratch/gouwar.j/cran-all/cranData/varitas/inst/report_template.Rmd
--- title: "What Does This Error Mean?" author: "Adam Mills" date: "`r Sys.Date()`" output: html_document: toc: yes theme: united highlight: kate toc_float: collapsed: true smooth_scroll: true pdf_document: toc: yes bibliography: references.bib vignette: > %\VignetteIndexEntry{Errors} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_knit$set(root.dir = '../') ``` ## The Purpose of This Guide As any bioinformatician knows, there are few things more frustrating than trying to understand how to use someone else's program. I struggled with this myself while working on this package. However, in the realm of scientific research, we must learn to appreciate the stringency of our frequently used tools. I will not tell you to ignore the various warnings and errors produced by VariTAS in this vignette, because they are essential to ensure that the pipeline produces statistically robust, reproducible results. That being said, I empathise with the frustration of trying to use a new tool only to be met with a barrage of errors and incompatible data. So to minimise the amount of time you have to spend interpreting laconic error messages and resubmitting processes, I have written this guide. I hope that it helps to explain why these errors are thrown and more importantly, how to make them go away. ## Verifying VariTAS Options These are errors thrown when the pipeline is verifying the various options and parameters submitted to it through the config file. This includes a number of 'file ____ does not exist'-type errors that I have omitted for what I hope are obvious reasons. ### The following stages are not supported: ____ An incompatible stage has been submitted to the main pipeline function. The only supported stages are 'alignment', 'qc', 'calling', 'annotation', and 'merging'. #### Solution Ensure that the `start.stage` parameter is set to one of the allowed stages. ### `varitas.options` must be a list of options or a string giving the path to the config YAML file Whatever you have tried to use as the VariTAS options file is incorrect. You shouldn't see this error if you're following the template in the Introduction vignette. #### Solution Ensure that you are pointing to the correct file when submitting it to `overwrite.varitas.options`. It should be based on the `config.yaml` file contained in the `inst` directory of this package. ### config must include `reference_build` There must be a `reference_build` parameter set somewhere in the config file so that the script knows which version of the genome you are using. This setting is present in the `config.yaml` file found in the `inst` directory of this package. #### Solution Add a parameter to the config file called `reference_build` and make sure it's set to either 'grch37' or 'grch38' (anything else will cause you to run into the next error). ### `reference_build` must be either grch37 or grch38 The `reference_build` parameter in the config file can only be set to either 'grch37' or 'grch38', which are the two versions of the human genome supported by the pipeline. See also the previous error. #### Solution Ensure that `reference_build` is set to your version of the genome, in the form of either 'grch37' or 'grch38'. ### Reference genome file ____ does not have extension .fa or .fasta Only reference genomes in the FASTA format are supported by the various tools used in this pipeline. Of course, your genome might already be in FASTA format with a different file extension, but it's better to be sure. #### Solution Use a reference in FASTA format with the .fa or .fasta file extension. ### `target_panel` must be provided for alignment and variant calling stages As VariTAS is meant to be run on data from amplicon sequencing experiments, some of the stages require a file detailing the target panel. This should be in the form of a BED file, the format of which is described [here](https://www.ensembl.org/info/website/upload/bed.html). #### Solution Ensure that you have a properly formatted BED file supplied as the `target_panel` parameter in the config file. ### Mismatch between reference genome and target panel Followed by "Reference genome chromosomes: \____ Target panel chromosomes: \____". This error probably looks familiar if you've ever had the great priviledge of working with GATK. Essentially, the chromosomes listed in your target panel don't match up with those in the reference genome. In practice, it means you have one or more chromosomes in the target panel that are not in the reference. #### Solution This issue can arise from a few different places, so be sure to check that it's not something very simple first. 1. There is too much whitespace at the end of the target panel BED file. In this case, simply delete the empty lines at the bottom of the file. This is probably the cause if the chromosome names otherwise seem identical. 2. Your chromosomes have names like 'chr1, chr2, chr3, etc.' in one file and '1, 2, 3' in the other file. This is likely the case if your target panel and reference genome are from different builds/assemblies of the human genome. To resolve this, either liftover the BED file using a utilty like [liftOver](https://genome.ucsc.edu/cgi-bin/hgLiftOver) to convert it the correct reference build or (if you're sure they refer to the same build) edit your BED file so that the chromosome names match those in the reference file. ### Index files not found for reference genome file ____ - try running bwa index. This issue and the next two are related to preparing the reference genome file. Various tools require that large FASTA files are indexed and have sequence dictionaries so that they can be parsed quickly. Once you fix these issues, they shouldn't come up again as long as the index files are in the same directory as the reference. #### Solution Run `bwa index` on the indicated file. ### Sequence dictionary not found for file ____ - try running GATK CreateSequenceDictionary. See above #### Solution Run `gatk CreateSequenceDictionary` on the indicated file. ### Fasta index file not found for file ____ Try running samtools faidx. See above (x2) #### Solution Run `samtools faidx` on the indicated file.
/scratch/gouwar.j/cran-all/cranData/varitas/vignettes/errors.Rmd
--- title: "The VariTAS Pipeline" author: "Erle Holgersen and Adam Mills" date: "`r Sys.Date()`" output: html_document: toc: yes theme: united highlight: kate toc_float: collapsed: true smooth_scroll: true pdf_document: toc: yes bibliography: references.bib vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_knit$set(root.dir = '../') ``` # Pipeline Overview The VariTAS pipeline is an R package for processing amplicon-based targeted sequencing. It supports alignment, somatic variant calling (with and without matched normal), and variant annotation, and the pipeline can start from any stage. Both Illumina sequencing (typically MiniSeq) and Ion Torrent systems are supported by the pipeline, but they require different configurations. For Illumina runs, the FASTQ files are used to start the pipeline at the alignment stage. For Ion Torrent sequencing, the aligned BAM files from the machine are used as input. The pipeline is designed to be fully automated. Once the pipeline is launched, cluster jobs will be submitted for all tasks. In the case that some jobs depend on others, these job dependencies will be included in the script and handled by the cluster. Each stage of the pipeline is associated with a file specification data frame. This data frame contains paths to the files to be processed at that stage, and information on any job dependencies. In turn, each pipeline stage will return a data frame that can be used for the next stage in the pipeline. File paths, run parameters, HPC settings, and other options are controlled by a config file. See the [Updating Settings](#settings) section below for more details. To start using the pipeline quickly, see the [Examples](#examples) section. ## Third-Party Software There are several essential programs that the VariTAS pipeline requires. The table below provides essential information about each of them. The version number indicates the latest version tested with the pipeline. |Program|Version|Download Link| |:------|:------|:-----------------------------| |BWA|0.7.12|http://bio-bwa.sourceforge.net/| |bedtools|2.25.0|https://bedtools.readthedocs.io/en/latest/| |Samtools|1.5|http://www.htslib.org/| |Picard|2.1.0|https://broadinstitute.github.io/picard/| |Vardict (Java)|1.4.6|https://github.com/AstraZeneca-NGS/VarDictJava| |FastQC|0.11.4|https://www.bioinformatics.babraham.ac.uk/projects/fastqc/| ## Directory Structure ``` Note that only the top output directory needs to be manually created . # Supplied output directory |-2018-11-12-plots # Contains generated plots used in report |---sample-coverage # Coverage plots generated per-sample |-2018-11-12-variant-data # Final output files, including the PDF report |-78 # Directory for each sample, containing intermediary files |---mutect # Files produced by MuTect for each sample |---vardict # Files produced by VarDict for each sample |-code # Bash scripts used to submit jobs to HPC scheduler |-log # stdout and stderr for each job ``` ## Stages There are four stages to the VariTAS pipeline: alignment, variant calling, annotation, and merging. |Stage |Description| |:------|:-------| |Alignment|Align FASTQ files to reference genome| |Variant Calling|Run variant callers on aligned BAM files| |Annotation|Annotate variants with ANNOVAR| |Merging|Merge files from all variant callers and produce reports/ plots| ### Alignment Alignment consists of two main steps: alignment with bwa, and coverage quality control. For Illumina sequencing runs, both steps will typically be necessary. For Proton runs, the machine does the alignment against UCSC hg19. While the machine also outputs FASTQ files, realigning these yourself is not recommended as read quality information is lost[^1]. The main function for running alignment is `run.alignment()`. It takes a FASTQ specification data frame as input, submits one alignment job per sample, and returns a BAM specification data frame. ```{r fastq} library(varitas); output.directory <- ''; fastq.specification <- data.frame( sample.id = c('A', 'B', 'C', 'D'), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = c('A_1.fq', 'B_1.fq', 'C_1.fq', 'D_1.fq'), mates = c('A_2.fq', 'B_2.fq', 'C_2.fq', 'D_2.fq') ); print(fastq.specification); ``` The FASTQ specification must have columns **sample.id** and **reads**. Optionally, it can contain a column **mates** (for paired end reads), and columns **patient.ID** and **tissue**. If provided, the patient ID and tissue information will be used to do matched normal somatic variant calling in later stages of the pipeline. After creating the FASTQ specification data frame, we are ready to run the alignment step of the pipeline. ```{r alignment, results="hide"} matched.bam.specification <- run.alignment( fastq.specification = fastq.specification, output.directory = output.directory, paired.end = TRUE, quiet = TRUE # only for testing, does not submit jobs to cluster ); ``` The alignment step returns a BAM specification data frame that can be used for the variant calling. When patient ID and tissue information is provided in the input data frame, the output data frame will contain tumour and normal BAM files for each tumour sample. When no patient ID/ tissue information is provided, all samples are assumed to be tumours, and variant calling without matched normal is performed in the subsequent step. ```{r} print(matched.bam.specification); ``` ### Variant Calling Variant calling is performed through the `run.variant.calling()` function. The form of the input BAM specification depends on whether matched normals are available. If no matched normals are available, the only two required columns are **sample.id** and **tumour.bam**. ```{r variants1} unmatched.bam.specification <- data.frame( sample.id = c('Z', 'Y'), tumour.bam = c('Z.bam', 'Y.bam') ); print(unmatched.bam.specification); ``` In addition to the bam specification data frame, `run.variant.calling()` takes the variant callers as an argument. To run VarDict and MuTect 2 on the previous matched normal example, you can use the following code. ```{r variants2, results = FALSE} vcf.specification <- run.variant.calling( matched.bam.specification, output.directory = output.directory, variant.caller = c('vardict', 'mutect'), quiet = TRUE # only for testing, does not submit jobs to cluster ); ``` ```{r} print(vcf.specification); ``` The VCF specification includes information on the variant caller used to produce the VCF file. This is needed for downstream filtering steps, and used to create unique job names for annotation jobs. #### VarDict [VarDict](https://github.com/AstraZeneca-NGS/VarDict) [@vardict] is a variant caller optimized for deep sequencing. As performance scales linearly with depth, downsampling reads is not necessary, and VarDict has greater sensitivity for detecting variants present at low allele frequencies compared to other callers. #### MuTect [MuTect](https://software.broadinstitute.org/cancer/cga/mutect) [@mutect] is most commonly used for calling variants from whole genome and whole exome sequencing data. It is not optimized for amplicon data, and downsamples to depth 1,000 when it encounters deep sequencing data. When detecting variants in circulating DNA, this downsampling can result in mutations being lost, and running MuTect is not recommended. However, when sequencing solid tumours the variant allele frequencies are higher and there is less concern about losing mutations. ### Annotation Variant file annotation is done with ANNOVAR, and annotated variants are saved to a tab-separated file. The config file specifies the fields to be included in the final tab-separated file. More fields can be added as long as they are included in the ANNOVAR databases. ```{r, results = FALSE} variant.specification <- run.annotation( vcf.specification, output.directory = output.directory, quiet = TRUE # testing only ); ``` ```{r, eval = FALSE} print(variant.specification); ``` ### Merging The main function for submitting the post-processing job to the cluster is `run.post.processing()`. Similar to the alignment, variant calling, and variant annotation stages, this function will submit a cluster job with job dependencies as specified by the variant specification. However, unlike the other stages, the post processing stage does not rely on any command line tools. If there are no job dependencies, the post-processing stage can be run directly through the `post.processing()` function. ```{r, results = FALSE} run.post.processing( variant.specification = variant.specification, output.directory = output.directory, quiet = TRUE ); ``` There are three main parts to the post-processing stage: 1. Variant merging 2. Summary plots and PDF report 3. Quality control Excel sheet The output is split between two date-stamped subdirectories of the project directory. The `variant-data` directory contains files that are meant to be sent to collaborators: filtered variants in Excel and text formats, coverage statistics in Excel format, and a PDF report. Additionally, the PNG format plots are saved to the `plots` directory. The final page of the PDF report contains details on the pipeline run, including the path to the directory on scratch where the rest of the files can be found. ``` ## VariTAS version 0.7.0 ## Date: 2018-04-26 ## User: username ## Raw/intermediate files can be found in ## /data/analysis_dir ``` ## Running the Full Pipeline In most cases, all steps in the pipeline can be executed with a single function call. `run.varitas.pipeline()` is the main function for launching the full pipeline. By default, this will run all stages from alignment to post-processing. To start the pipeline at a later stage, adjust the `start.stage` argument of the function. Whatever start stage you provide must match the files provided in the `file.details` data frame. For example, if starting the pipeline at the variant annotation stage, the `file.details` data frame should contain paths to VCF files containing the variant calls, and be formatted in a way that passes the `verify.variant.specification()` check. Running the `run.varitas.pipeline()` function will submit jobs for all stages at once, with appropriate job dependencies. To see which jobs that would be submitted, run `run.varitas.pipeline()` with the argument `quiet = TRUE`. This will print out all of the Perl calls instead of submitting them as system calls. Each Perl call corresponds to one job submitted to the cluster. ```{r, results = FALSE} ``` When starting the pipeline at a later stage, earlier jobs are dropped and job dependencies are adjusted accordingly. ```{r, results = FALSE} vcf.specification$job.dependency <- NULL; run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', quiet = TRUE ); ``` The merging stage of the pipeline supports email notifications. As merging is the last stage of the pipeline, the email notification can be used to let you know when the pipeline run finishes. ```{r, results = FALSE} run.varitas.pipeline( file.details = vcf.specification, output.directory = output.directory, start.stage = 'annotation', email = '[email protected]', quiet = TRUE ); ``` ## Updating Settings {#settings} The VariTAS pipeline comes with a set of default options specified in the `config.yaml` file. These are loaded into R by default, and will enable you to run the pipeline. The settings include both cluster-specific settings that are unlikely to change once they have been set for your HPC system and run-specific settings that are more likely to change. Examples of run-specific settings are the target panel, sequencing platform, and variant filters. In most cases you will want to make changes to the default settings. There are two ways of doing this. 1. Create your own config file, and overwrite all config options with the `overwrite.varitas.options()` function. 2. Update individual options with the `set.varitas.options()` function. ### Variant Filters Variant filters are specified as part of the settings. All these settings should start with the prefix `filters` (e.g. be nested under `filters` in the YAML file), and be further grouped by variant caller. For example, to set a MuTect-specific filter `FILTER_NAME`, use the command `set.varitas.options(filters.mutect.FILTER_NAME = TRUE)`. To specify a filter for all variant callers, list them under `default` in the config YAML file. These filters are set first and overwritten by any caller-specific filters. For example, the YAML code below would set the `remove_exac` filter for all variant callers and a `min_tumour_depth` filter of 10 for all callers except VarDict. The VarDict minimum tumour depth filter is set to 20. ``` filters: default: min_tumour_depth: 10 remove_exac: true vardict: min_tumour_depth: 20 ``` The `set.varitas.options()` function currently does not support default filters. These must be specified through a config YAML file that's passed to the `overwrite.varitas.options()` function. The table below describes all filters currently supported. Variants that do not meet all of these criteria will be filtered out. Note that filters with "normal" in the name are only applied if the samples are paired tumour/normal. |Name |Value |Description | |:------|:-----|:-------------------------------------------------| |min_tumour_variant_reads|numeric|Minimum number of reads supporting a variant| |max_normal_variant_reads|numeric|Maximum number of reads in supporting a variant in normal| |min_tumour_depth|numeric|Minimum depth in tumour| |min_normal_depth|numeric|Minimum depth in normal| |min_tumour_allele_frequency|numeric|Minimum tumour allele frequency| |max_normal_allele_frequency|numeric|Maximum normal allele frequency| |indel_min_tumour_allele_frequency|numeric|Minimum tumour allele frequency for indels| |min_quality|numeric|Minimum base quality| |ct_min_tumour_allele_frequency|numeric|Minimum tumour allele frequency for C>T mutations. Intended as an FFPE filter| |remove_1000_genomes|logical|Flag for removing all variants found in 1,000 genomes[^2]| |remove_exac|logical|Flag for removing variants found at AF>0.01 in the Exome Aggregation Consortium| |remove_germline_status|logical|Flag for removing all variants with a status field set to "Germline". Intended to be used with VarDict| To make it easier to specify filters, the pipeline comes with different sets of default options. These are split into defaults for ctDNA and solid tumours, and can be set by `mode: ctdna` and `mode: tumour`, respectively. Any filters specified separately will take precedence over the mode default settings. For example, the following YAML code will use the ctDNA default settings, but update the `min_tumour_variant_reads` filter to 20 for all callers. ``` mode: ctDNA filters: default: min_tumour_variant_reads: 20 ``` #### Solid Tumour Mode The default settings for the solid tumour mode can be found in the `tumour_defaults.yaml` file in the package directory. ``` filters: default: min_normal_depth: 5 min_tumour_variant_reads: 5 min_tumour_allele_frequency: 0.05 max_normal_allele_frequency: 0.02 ct_min_tumour_allele_frequency: 0.1 indel_min_tumour_allele_frequency: 0.1 remove_1000_genomes: true remove_exac: true vardict: remove_germline_status: true ``` #### ctDNA Mode Defaults for variant calling on ctDNA can be found in the `ctdna_defaults.yaml` file. Due to low purity, variant allele frequencies in circulating DNA will typically be much lower than those in solid tumour samples. To allow for this, the minimum allele frequency filters are decreased. ``` filters: default: min_tumour_variant_reads: 5 min_tumour_allele_frequency: 0.01 ct_min_tumour_allele_frequency: 0.05 indel_min_tumour_allele_frequency: 0.05 min_normal_depth: 5 max_normal_allele_frequency: 0 remove_1000_genomes: true remove_exac: true pgm: indel_min_tumour_allele_frequency: 0.02 vardict: remove_germline_status: true isis: indel_min_tumour_allele_frequency: 0.02 ``` # Examples and Use Cases ## Generic Wrapper Script {#examples} Any call to the VariTAS pipeline requires data to be passed in the form of a dataframe, so the easiest way to interact with it is to create a simple wrapper R script. The goals of the wrapper are to collect the relevant input files in a dataframe, change any necessary VariTAS options, and call the relevant pipeline function. We can start by arranging the FASTQ files: ```{r wrapper1} library(varitas) output.directory <- '.' fastq.directory <- 'inst/extdata/fastq' fastq.files <- list.files( pattern = 'R1.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.mate.files <- list.files( pattern = 'R2.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.specification <- data.frame( # Extract the sample ID from the filename sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ``` Often, you will need to change settings in the VariTAS config file. As shown in the Introduction, this can be done in one of two ways. The first is to use `set.varitas.options()` within your wrapper script like so: ```{r wrapper2, eval=FALSE, results=FALSE} set.varitas.options(filters.vardict.min_tumour_depth = 10) ``` This is suitable for smaller changes, but it is usually more convenient to have a copy of the VariTAS config file for each project or run of the pipeline. This way, all of the settings that are unlikely to change can be easily set and other users will be able to clearly see the config options you used. ```{r wrapper3, eval=FALSE, results=FALSE} config <- 'inst/extdata/varitas_config.yaml' overwrite.varitas.options(config) ``` Once the above steps are completed, you are ready to call the main function of the pipeline. ```{r wrapper4, eval=FALSE, results=FALSE} run.varitas.pipeline( file.details = fastq.specification, output.directory = output.directory, variant.callers = c('mutect', 'vardict'), quiet = FALSE, run.name = 'EXAMPLE', email = '[email protected]' ) ``` And those are all the necessary steps to run the pipeline. It will notify you by email when it is finished if you provide an address. On the first attempt, it is advisable to set the `quiet` parameter to `TRUE`, which prevents any of the tasks from running. This way, any potential problems can be fixed before a large number of jobs are created. A full wrapper script template is provided below for completeness and ease of copying-and-pasting. ```{r wrapper5, eval=FALSE, results=FALSE} ############################################################################### ## VariTAS Wrapper Script ## ############################################################################### ## Author: ## Adam Mills ############################################################################### ## Libraries: library(varitas) ############################################################################### ## Main output.directory <- '.' fastq.directory <- 'inst/extdata/fastq' fastq.files <- list.files( pattern = 'R1.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.mate.files <- list.files( pattern = 'R2.*\\.fastq', path = fastq.directory, full.names = TRUE ) fastq.specification <- data.frame( sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) config <- 'inst/extdata/varitas_config.yaml' overwrite.varitas.options(config) run.varitas.pipeline( file.details = fastq.specification, output.directory = output.directory, variant.callers = c('mutect', 'vardict'), quiet = FALSE, run.name = 'EXAMPLE', email = '[email protected]' ) ``` ## Variant Calling with Matched Normal Data from normal tissue can be used for matched somatic variant calling in the pipeline. When creating your FASTQ specification dataframe, include the columns `patient.id` and `tissue` and the pipeline will submit matched normal data to the variant callers. ```{r matched1} fastq.specification <- data.frame( sample.id = gsub('.*Sample0(\\d\\d).*', '\\1', basename(fastq.files)), patient.id = c('X', 'X', 'Y', 'Y'), tissue = c('tumour', 'normal', 'tumour', 'normal'), reads = fastq.files, mates = fastq.mate.files, stringsAsFactors = FALSE ) print(fastq.specification) ``` ## Ion PGM Data Data produced by an Ion PGM system can also be processed by this pipeline using a different function. If you'd like to incorporate the variants called by the machine in the pipeline, simply pass both the BAM files and the VCF files into `run.varitas.pipeline.hybrid()`. Data from Ion Proton systems can be used in the same way by setting the `proton` parameter to `TRUE`. ```{r hybrid1} bam.directory <- 'inst/extdata/bam' bam.files <- list.files( pattern = 'Sample.*\\.bam', path = bam.directory, full.names = TRUE ) vcf.directory <- 'inst/extdata/vcf' vcf.files <- list.files( pattern = 'Sample.*\\.vcf', path = vcf.directory, full.names = TRUE ) bam.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(bam.files)), tumour.bam = bam.files, stringsAsFactors = FALSE ) vcf.specification <- data.frame( sample.id = gsub('^Sample_(\\d+).*', '\\1', basename(vcf.files)), vcf = vcf.files, caller = rep('pgm', length(vcf.files)), stringsAsFactors = FALSE ) print(bam.specification) print(vcf.specification) ``` ```{r hybrid2, eval=FALSE, results=FALSE} run.varitas.pipeline.hybrid( bam.specification = bam.specification, vcf.specification = vcf.specification, output.directory = 'inst/extdata/output/', proton = TRUE, run.name = 'EXAMPLE', quiet = FALSE, email = '[email protected]' ); ``` In this version of the pipeline, the alignment stage is skipped and the Ion PGM variant data will be incorporated into the final reports. ## MiniSeq Data To enable users to quickly build file specifications for MiniSeq runs, the VariTAS pipeline has a function `prepare.miniseq.specifications()`. When passed a MiniSeq sample sheet and the path to a MiniSeq directory, the function will parse through the directory and look for FASTQ/ BAM/ VCF files for each of the samples. By default the `Sample_ID` column of the MiniSeq sample sheet, up to the first dash, is taken as the sample ID. `prepare.miniseq.specifications()` returns a list with elements corresponding to the different file types that have been found. For example, if VCF files were present in the VCF directory, a VCF specification will be named as `vcf` in the result. Note that you will have to add a column `caller` to the VCF specification before it can be used in the pipeline. ```{r hybrid3} miniseq.sheet <- 'inst/extdata/miniseq/Example_template.csv' miniseq.directory <- 'inst/extdata/miniseq' miniseq.info <- prepare.miniseq.specifications(miniseq.sheet, miniseq.directory) fastq.specification <- miniseq.info[[ 1 ]] vcf.specification <- miniseq.info[[ 2 ]] vcf.specification['caller'] <- rep('miniseq', nrow(vcf.specification)) print(fastq.specification) print(vcf.specification) ``` ### Incorporating MiniSeq Variant Calls The dataframes generated by the `prepare.miniseq.specifications` function can be fed into the standard pipeline, or they can be used in the hybrid pipeline. In the latter case, you are able to pass the VCF files much like the Ion PGM scenario in Example 2. By doing so, the pipeline will include the MiniSeq variant calls in the final output. ```{r hybrid4, eval=FALSE, results=FALSE} run.varitas.pipeline.hybrid( fastq.specification = fastq.specification, vcf.specification = vcf.specification, output.directory = 'inst/extdata/output/', run.name = 'EXAMPLE', quiet = FALSE, email = '[email protected]' ) ``` ## References [^1]: Ion machines use SFF files, which are then converted back to FASTQ. This results in the loss of information on read quality. Another problem with aligning the Ion Torrent FASTQs is the distinct homopolymer error profiles of ion semiconductor sequencing. This is accounted for with the machine aligner, but not by BWA. [^2]: Data from phase 3 of the 1,000 genomes project is obtained through ANNOVAR.
/scratch/gouwar.j/cran-all/cranData/varitas/vignettes/introduction.Rmd
#' Estimate the covariance of estimated parameters using a bootstrap based method #' #' \code{bootcovjmcm} gives the estimation of the covariance of estimated parameters returned by \code{jmcm} by using a bootstrap based method. #' #' @param object a fitted joint mean-covariance model of class "jmcmMod", returned by the function \code{jmcm}. #' @param mydata the data frame used in fitting the model. #' @param numboot the number of the bootstrap replications #' @return an estimated covariance matrix of the estimated parameters. #' @references [1] Liu, R.Y. (1988) "Bootstrap Procedure under Some Non-i.i.d. Models." Annals of Statistics, 16, 1696-1708. #' @examples #' cattleA <- cattle[cattle$group=='A', ] #' fit.mcd <- jmcm(weight|id|I(ceiling(day/14+1))~1|1, #' data = cattleA, #' cov.method = "mcd", #' triple = c(1,1,1)) #' bootcovjmcm(fit.mcd, cattleA, 5) #' ## Larger number of replications is needed to achieve accuracy, #' ## however it may take hours. #' \donttest{bootcovjmcm(fit.mcd, cattleA, 500)} #' @import jmcm #' @importFrom Matrix bdiag #' @importFrom stats cov as.formula #' @export bootcovjmcm <- function(object,mydata,numboot){ ## object is the fitted jmcm model ## numboot is the number to be bootstrap ## mydata is the original data if (missing(object)) stop("missing object.") if (missing(mydata)) stop("mydata can't be missing.") if (missing(numboot)) stop("Need to specify the number of replications.") if (class(object)[1]!="jmcmMod") stop("Object must be of class 'jmcmMod'.") SS <- as.list(x = c(1:length(object@args$m))) mean_X <- getJMCM(object,name = "X") mean_beta <- getJMCM(object,name = "beta") objectformula <- object@call$formula YYY <- as.character(objectformula[[2]][[2]][[2]]) res <- mydata[YYY]-mean_X%*%mean_beta ## Get the block diagonal getSigma <- function(i){ getJMCM(object,name = "Sigma",sub.num = i) } listsigma <- lapply(X = SS,getSigma) sqrtsigma <- lapply(X = listsigma,expm::sqrtm) bdsigma <- bdiag(sqrtsigma) ## Now standard the result by time Sigma^-1/2 res <- as.matrix(res) stdres <- solve(bdsigma,res) ## stdres is the i.i.d. residual to be bootstrapped booted <- sapply(1:numboot, function(o) sample(x = stdres, size = length(stdres), replace = TRUE)) bootedls <- as.data.frame(booted) ## bootedls is a data.frame that ## bootedls[[b]] contains the b-th bootstrapped iid residual ## now we need to time each [[b]] with the matrix trans_res<-function(Xdata){ bdsigma%*%Xdata } tran <- lapply(X = bootedls,FUN = trans_res) ## tran is a list of transformed back residual that are ## ready to be added to Xb=Y Yboot <- function(res){ mean_X%*%mean_beta+res } Ynew <- lapply(X = tran,FUN = Yboot) #################################### m2boot <- function(Yn2){ mydata[as.character(objectformula[[2]][[2]][[2]])] <- as.numeric(Yn2) m2 <- jmcm(formula = as.formula(objectformula), data = mydata, cov.method = object@call$cov.method, triple = object@triple) getJMCM(m2,name = "theta") } ##################################### betanew <- with(data = mydata, lapply(X = Ynew,FUN = m2boot)) betacov <- cov(t(as.data.frame(betanew))) betacov }
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/bootcovjmcm.R
#' Estimate the covariance of estimated parameters using the explicit formula #' #' \code{covjmcm} is a combination of \code{covjmcm_mcd}, \code{covjmcm_acd}, and \code{covjmcm_hpc}. #' It identifies the corresponding type of the model, i.e. MCD, ACD, or HPC, and calculates the estimation #' of the covariance of estimated parameters using explicit formula, #' which is the inverse of the estimated Fisher's information matrix. #' #' @param object a fitted joint mean-covariance model of class "jmcmMod", returned by the function \code{jmcm}. #' @return an estimated covariance matrix of the estimated parameters. #' @references [1] Pourahmadi, M., "Maximum likelihood estimation of generalised linear models for multivariate normal covariance matrix," Biometrika #' 87(2), 425–435 (2000). #' @references [2] M. Maadooliat, M. Pourahmadi and J. Z. Huang, "Robust estimation of the correlation #' matrix of longitudinal data", Statistics and Computing 23, 17-28, (2013). #' @references [3] W. Zhang, C. Leng, and C. Y. Tang(2015), "A joint modelling approach for longitudinal studies," #' Journal of the Royal Statistical Society. Series B. 77, 219-238. #' @examples #' ## balanced data #' cattleA <- cattle[cattle$group=='A', ] #' fit.mcd <- jmcm(weight|id|I(ceiling(day/14+1))~1|1, #' data = cattleA, cov.method = "mcd", #' triple = c(8,3,4)) #' cov.mcd <- covjmcm(fit.mcd) ##same as covjmcm_mcd(fit.mcd) #' ## unbalanced data #' ## This may take about 1.25 min. #' \donttest{ #' fit.hpc <- jmcm(I(sqrt(cd4)) | id | time ~ 1 | 1, #' data = aids, triple = c(8,1,1), #' cov.method = "hpc") #' cov.hpc <- covjmcm(fit.hpc) ##same as covjmcm_hpc(fit.hpc)} #' @seealso \code{\link{covjmcm_mcd}}, \code{\link{covjmcm_acd}}, and \code{\link{covjmcm_hpc}} #' @export covjmcm <- function(object){ ##object is a fitted jmcm model if (missing(object)) stop("missing object.") if (class(object)[1]!="jmcmMod") stop("Object must be of class 'jmcmMod'.") method <- object@call$cov.method if(method=="mcd") cov <- covjmcm_mcd(object) else if(method=="acd") cov <- covjmcm_acd(object) else if(method=="hpc") cov <- covjmcm_hpc(object) else stop("Method must be one of 'mcd','acd, or 'hpc'.") cov }
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/covjmcm.R
#' Calculate the estimation of the covariance of estimated parameters in a ACD model, via the explicit formula. #' #' \code{covjmcm_acd} calculates the estimation of the covariance of estimated parameters in a ACD model using #' the explicit formula, which is the inverse of the estimated Fisher's information matrix. #' #' @param object a fitted joint mean-covariance model of class "jmcmMod", returned by the function \code{jmcm}. #' @return an estimated covariance matrix of the estimated parameters in a ACD model. #' @references [1] M. Maadooliat, M. Pourahmadi and J. Z. Huang, "Robust estimation of the correlation #' matrix of longitudinal data", Statistics and Computing 23, 17-28, (2013). #' @examples #' ##This may take more than 5s. #' \donttest{ #' cattleA <- cattle[cattle$group=='A', ] #' fit.acd <- jmcm(weight|id|I(ceiling(day/14+1))~1|1, #' data = cattleA, cov.method = "acd", #' triple = c(8,3,4)) #' cov.acd <- covjmcm_acd(fit.acd)} #' @seealso \code{\link{covjmcm}}, \code{\link{covjmcm_mcd}}, and \code{\link{covjmcm_hpc}} #' @importFrom MASS ginv #' @export covjmcm_acd <- function(object){ ##object is a fitted jmcm model if (missing(object)) stop("missing object.") if(object@call$cov.method!="acd") stop("Method must be acd") m <- getJMCM(object, name="m") n <- length(m) q <- length(getJMCM(object,"gamma")) p <- length(getJMCM(object,"beta")) d <- length(getJMCM(object,"lambda")) p1 <- p+d p2 <- p+q+d I <- matrix(0, p2, p2) L1order<-function(Wi,r){ #1order-derivative for gamma(s) l1 <- matrix(0,m[i],m[i]) for(j in 2:m[i]){ for(k in 1:(j-1)) l1[j,k] <- Wi[,r][(sum(1:(j-1))-(j-1)+k)] } l1 } for(i in 1:n){ Xi <- getJMCM(object, name="X",sub.num=i) Zi <- getJMCM(object, name="Z",sub.num=i) Wi <- getJMCM(object, name="W",sub.num=i) Sigma <- getJMCM(object, name="Sigma", sub.num=i) Li <- getJMCM(object, name="T", sub.num=i) Ti <- ginv(Li) Di <- as.vector(diag(getJMCM(object, name="D", sub.num=i))) I11 <- t(Xi) %*% ginv(Sigma) %*% Xi I22 <- matrix(0, d, d) if(m[i]==1) { for(r in 1:d){ for (s in 1:d){ temp1 <- tcrossprod(Li)%*%(Zi[,r]*tcrossprod(Ti))*Zi[,s] I22[r,s] <- (1/4)*(crossprod(Zi[,r],Zi[,s]) + temp1) } } I33 <- matrix(0,q, q) I32 <- matrix(0, q, d) } if(m[i]>1){ for(r in 1:d){ for (s in 1:d){ temp1 <- tcrossprod(Li)%*%(Zi[,r]*tcrossprod(Ti))%*%diag(Zi[,s]) I22[r,s] <- (1/4)*(crossprod(Zi[,r],Zi[,s]) + sum(diag(temp1))) } } I33 <- matrix(0, q, q) for(r in 1:q){ for(s in 1:q){ temp2 <- tcrossprod(L1order(Wi, r), L1order(Wi,s))%*%crossprod(Ti) I33[r,s] <- sum(diag(temp2)) } } ##alternative formula, not simplified, same result ##for(r in 1:q){ ##for(s in 1:q){ ##temp21 <- (tcrossprod(L1order(Wi, r), Li)+tcrossprod(Li,L1order(Wi, r)))%*% ##crossprod(Ti)%*%(tcrossprod(L1order(Wi, s), Li)+tcrossprod(Li,L1order(Wi, s))) ##temp22 <- tcrossprod(L1order(Wi, r), L1order(Wi,s))+tcrossprod(L1order(Wi, s), L1order(Wi,r)) ##temp2 <- (2*temp21-temp22) %*% crossprod(Ti) ##I33[r,s] <- sum(diag(temp2))/2 ##} ##} I32 <- matrix(0,q,d) for(r in 1:q){ for(s in 1:d){ temp3 <- Di*tcrossprod(L1order(Wi,r),Li)%*%(Zi[,s]*diag(Di))%*%ginv(Sigma) I32[r,s] <- sum(diag(temp3))/2 } } ## alternative formula, not simplified, same result ## for(r in 1:q){ ## for(s in 1:d){ ## temp3 <- diag(Di)%*%(tcrossprod(L1order(Wi,r),Li)+tcrossprod(Li, L1order(Wi,r)))%*% ## diag(Zi[,s])%*%diag(Di)%*%ginv(Sigma) ## I32[r,s] <- sum(diag(temp3))/2}} } I[1:p,1:p] <- I[1:p,1:p] + I11 I[(p+1):p1,(p+1):p1] <- I[(p+1):p1,(p+1):p1] + I22 I[(p1+1):p2,(p1+1):p2] <- I[(p1+1):p2,(p1+1):p2] + I33 I[(p1+1):p2,(p+1):p1] <- I[(p1+1):p2,(p+1):p1] + I32 } I[(p+1):p1,(p1+1):p2] <- t(I[(p1+1):p2,(p+1):p1]) cov <- ginv(I) cov }
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/covjmcm_acd.R
#' Calculate the estimation of the covariance of estimated parameters in a HPC model, via the explicit formula. #' #' \code{covjmcm_hpc} gives the estimation of the covariance of estimated parameters in a HPC model using #' the explicit formula, which is the inverse of the estimated Fisher's information matrix. #' #' @param object a fitted joint mean-covariance model of class "jmcmMod", returned by the function \code{jmcm}. #' @return an estimated covariance matrix of the estimated parameters in a HPC model. #' @references [1] W. Zhang, C. Leng, and C. Y. Tang(2015), "A joint modelling approach for longitudinal studies," #' Journal of the Royal Statistical Society. Series B. 77, 219-238. #' @examples #' ##This may take more than 1 min. #' \donttest{ #' cattleA <- cattle[cattle$group=='A', ] #' fit.hpc <- jmcm(weight|id|I(ceiling(day/14+1))~1|1, #' data = cattleA, cov.method = "hpc", #' triple = c(8,3,4)) #' cov.hpc <- covjmcm_hpc(fit.hpc)} #' @seealso \code{\link{covjmcm}}, \code{\link{covjmcm_mcd}}, and \code{\link{covjmcm_acd}} #' @export covjmcm_hpc <- function(object){ ##object is a fitted jmcm model if (missing(object)) stop("missing object.") if(object@call$cov.method!="hpc") stop("Method must be hpc") m <- getJMCM(object, name="m") n <- length(m) gamma <- getJMCM(object,"gamma") q <- length(gamma) p <- length(getJMCM(object,"beta")) d <- length(getJMCM(object,"lambda")) p1 <- p+d p2 <- p+q+d I <- matrix(0, p2, p2) for(i in 1:n){ Xi <- getJMCM(object, name="X",sub.num=i) Zi <- getJMCM(object, name="Z",sub.num=i) Wi <- getJMCM(object, name="W",sub.num=i) Sigma <- getJMCM(object, name="Sigma", sub.num=i) Ti <- getJMCM(object, name="T", sub.num=i) D <- as.vector(diag(getJMCM(object, name="D", sub.num=i))) R <- Ti %*% t(Ti) I11 <- t(Xi) %*% ginv(Sigma) %*% Xi I22 <- (1/4)*(t(Zi)%*%(diag(rep(1,m[i]))+ginv(R)*R)%*%Zi) if(m[i]==1){I33 <- matrix(0,q,q) I23 <- matrix(0,q,d)} phi <- Wi %*% gamma Ai <- ginv(Ti) Ei <- (1/tan(as.vector(phi))) * Wi ##dTijj/dgamma Gi <- matrix(0, m[i], q) ##dTijk/dgamma Fi <- matrix(0, sum(1:(m[i]-1)) ,q) ##b_{ijk} Bi <- matrix(0, sum(1:(m[i]-1)) ,q) if(m[i]>1){ for(j in 2:m[i]){ temp <- Ei[(sum(1:(j-1))-(j-1)+1):sum(1:(j-1)), ] Gi[j,] <- switch(class(temp)[1], "numeric" = Ti[j,j] * temp, "matrix" = Ti[j,j] * colSums(temp)) for(k in 1:(j-1)){ k1 <- sum(1:(j-1))-(j-1)+k if(k==1) Fi[k1, ] <- -Ti[j,k] * tan(phi[k1,]) * Wi[k1,] temp0 <- Ei[(k1-k+1):(k1-1), ] if(k>1) Fi[k1, ] <- switch(class(temp0)[1], "numeric" = Ti[j,k]*(-tan(phi[k1,])*Wi[k1,] + temp0), "matrix" = Ti[j,k]*(-tan(phi[k1,])*Wi[k1,] + colSums(temp0))) } } for(j in 2:m[i]){ for(k in 1:(j-1)){ k1 <- sum(1:(j-1))-(j-1)+k for(l in (k+1):j) Bi[k1, ] <- Bi[k1, ] + Ai[j,l]*Fi[(sum(1:(l-1))-(l-1)+k), ] Bi[k1,] <- Bi[k1,] + Ai[j, k]*Gi[k, ] } } I33 <- matrix(0, q, q) for(j in 2:m[i]){ temp1 <- Bi[(sum(1:(j-1))-(j-1)+1):sum(1:(j-1)),] temp2 <- switch(class(temp1)[1], "numeric" = tcrossprod(Bi[(sum(1:(j-1))-(j-1)+1):sum(1:(j-1)),]), "matrix" = crossprod(Bi[(sum(1:(j-1))-(j-1)+1):sum(1:(j-1)),])) I33 <- I33 + 2*tcrossprod(Gi[j, ]/Ti[j,j])+temp2 } I32 <- matrix(0, q, d) for(j in 2:m[i]){ b <- matrix(0, q, d) for(k in 1:(j-1)){ a <- rep(0,d) for(l in k:j) a <- a + Ti[l,k]*Ai[j,k]*Zi[l,] b <- b + as.matrix(Bi[sum(1:(j-1))-(j-1)+k,]) %*% t(as.matrix(a)) } I32 <- I32 + as.matrix(Gi[j, ]/Ti[j,j]) %*% Zi[j, ]+(1/2)*b } } I[1:p,1:p] <- I[1:p,1:p] + I11 I[(p+1):p1,(p+1):p1] <- I[(p+1):p1,(p+1):p1] + I22 I[(p1+1):p2,(p1+1):p2] <- I[(p1+1):p2,(p1+1):p2] + I33 I[(p+1):p1,(p1+1):p2] <- I[(p+1):p1,(p1+1):p2] + t(I32) } I[(p1+1):p2,(p+1):p1] <- t(I[(p+1):p1,(p1+1):p2]) cov <- ginv(I) cov }
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/covjmcm_hpc.R
#' Calculate the estimation of the covariance of estimated parameters in a MCD model, via the explicit formula. #' #' \code{covjmcm_mcd} gives an estimation of the covariance of estimated parameters in a MCD model using #' the explicit formula, which is the inverse of the estimated Fisher's information matrix. #' #' @param object a fitted joint mean-covariance model of class "jmcmMod", returned by the function \code{jmcm}. #' @return an estimated covariance matrix of the estimated parameters in a MCD model. #' @examples #' cattleA <- cattle[cattle$group=='A', ] #' fit.mcd <- jmcm(weight|id|I(ceiling(day/14+1))~1|1, #' data = cattleA, cov.method = "mcd", #' triple = c(8,3,4)) #' cov.mcd <- covjmcm_mcd(fit.mcd) #' @references [1] Pourahmadi, M., "Maximum likelihood estimation of generalised linear models for multivariate normal covariance matrix," Biometrika #' 87(2), 425–435 (2000). #' @seealso \code{\link{covjmcm}}, \code{\link{covjmcm_acd}}, and \code{\link{covjmcm_hpc}} #' @importFrom MASS ginv #' @export covjmcm_mcd <- function(object){ ##object is a fitted jmcm model if (missing(object)) stop("missing object.") if(object@call$cov.method!="mcd") stop("Method must be mcd") m <- getJMCM(object, name="m") n <- length(m) q <- length(getJMCM(object,"gamma")) p <- length(getJMCM(object,"beta")) d <- length(getJMCM(object,"lambda")) p1 <- p+d p2 <- p+q+d I <- matrix(0, p2, p2) for(i in 1:n){ Xi <- getJMCM(object, name="X",sub.num=i) Zi <- getJMCM(object, name="Z",sub.num=i) Wi <- getJMCM(object, name="W",sub.num=i) Sigma <- getJMCM(object, name="Sigma", sub.num=i) T <- getJMCM(object, name="T", sub.num=i) D <- as.vector(diag(getJMCM(object, name="D", sub.num=i))) I11 <- t(Xi) %*% ginv(Sigma) %*% Xi I22 <- (1/2)*(t(Zi)%*%Zi) if(m[i]==1){I33 <- matrix(0,q,q) I23 <- matrix(0,d,q)} if(m[i]>1){ wi <- array(0, dim=c(q , q, m[i])) wi[,,1] <- matrix(0, q, q) for(j in 2:m[i]){ for(k in 1:(j-1)){ for(l in 1:(j-1)) wi[,,j] <- wi[,,j] + (Sigma[k,l]/D[j])* (tcrossprod(Wi[sum(1:(j-1))-(j-1)+k, ],Wi[sum(1:(j-1))-(j-1)+l, ])) } } I33 <- apply(wi,c(1,2),sum) A <- Sigma %*% t(T) B <- matrix(0, m[i], q) for (j in 2:m[i]) { for (k in 1:(j-1)) B[j,] <- B[j,] + A[k,j] * Wi[sum(1:(j-1))-(j-1)+k, ] } I23 <- t(Zi) %*% ((1/D)*B) } I[1:p,1:p] <- I[1:p,1:p] + I11 I[(p+1):p1,(p+1):p1] <- I[(p+1):p1,(p+1):p1] + I22 I[(p1+1):p2,(p1+1):p2] <- I[(p1+1):p2,(p1+1):p2] + I33 I[(p+1):p1,(p1+1):p2] <- I[(p+1):p1,(p1+1):p2] + I23 } I[(p1+1):p2,(p+1):p1] <- t(I[(p+1):p1,(p1+1):p2]) cov <- ginv(I) cov }
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/covjmcm_mcd.R
#' varjmcm: Estimations for the Covariance of Estimated Parameters in Joint Mean-Covariance Models #' #' The package provides estimations of the covariance of estimated parameters in joint #' mean-covariance models, which is fitted in 'jmcm' package. Two methods are available. #' \code{bootcovjmcm} calculates the covariance estimation via a bootstrap based method. \code{covjmcm} uses explicit formula, i.e. the inverse of the estimated Fisher's information, to calculate the covariance estimation. #' The bootstrap method may need large number of replications and thus may be time consuming. #' The explicit formula in the second method is asymptotically correct, and thus is valid only when the sample size is large. #' #' @seealso \code{\link{covjmcm}} and \code{\link{bootcovjmcm}} for more details and examples. #' #' @references [1] Pan J, Pan Y (2017). "jmcm: An R Package for Joint Mean-Covariance Modeling of Longitudinal Data." Journal of Statistical Software, 82(9), 1--29. #' @references [2] Pourahmadi, M., "Maximum likelihood estimation of generalised linear models for multivariate normal covariance matrix," Biometrika #' 87(2), 425–435 (2000). #' @references [3] M. Maadooliat, M. Pourahmadi and J. Z. Huang, "Robust estimation of the correlation #' matrix of longitudinal data", Statistics and Computing 23, 17-28, (2013). #' @references [4] W. Zhang, C. Leng, and C. Y. Tang(2015), "A joint modelling approach for longitudinal studies," #' Journal of the Royal Statistical Society. Series B. 77, 219-238. #' @docType package #' @name varjmcm NULL
/scratch/gouwar.j/cran-all/cranData/varjmcm/R/package-varjmcm.R
#2. amd amd.cal=function(y,x,res1){ res1$tpm=res1$fpm=res1$amd=NA M=nrow(res1) x1=x[y==1] x0=x[y==0] for(m in 1:M){ x11=x1[x1>res1$th[m]] x01=x0[x0>res1$th[m]] n.tpm=length(x11) n.fpm=length(x01) tpm=fpm=NA if(n.tpm==0){; tpm=max(x1) }else{; tpm=mean(x11) } if(n.fpm==0){; fpm=max(x0) }else{; fpm=mean(x01) } res1$tpm[m]=tpm res1$fpm[m]=fpm } res1$amd=res1$tpm-res1$fpm #3.return return(res1) }
/scratch/gouwar.j/cran-all/cranData/varoc/R/01.amd.cal.R
#3.iamd iamd.cal=function(res1,auc){ M=nrow(res1) K=M*10 fpf.k=seq(0,1,length.out=K) amd.k=tpm.k=fpm.k=rep(NA,K) for(k in 1:K){ which.k=min(which(res1$fpf<=fpf.k[k])) if(length(which.k)){ amd.k[k]=res1$amd[which.k] tpm.k[k]=res1$tpm[which.k] fpm.k[k]=res1$fpm[which.k] } } iamd=mean(amd.k,na.rm=TRUE) ccm.idx=!(is.na(tpm.k)|is.na(fpm.k)) #considered when both tpm.k and fpm.k are not missing itpm=mean(tpm.k[ccm.idx]) ifpm=mean(fpm.k[ccm.idx]) res3=data.frame(auc=auc,itpm=itpm,ifpm=ifpm,iamd=iamd) #3.return return(res3) }
/scratch/gouwar.j/cran-all/cranData/varoc/R/01.iamd.cal.R
#1. ROC stsp.cal=function(y,x){ roc1=pROC::roc(response=y,predictor=x,levels=c(0,1),direction="<") res1=data.frame(th=roc1$thresholds, tpf=roc1$sensitivities, fpf=(1-roc1$specificities), tpm=NA,fpm=NA,amd=NA) auc=as.numeric(roc1$auc) return(list(res1=res1,auc=auc)) }
/scratch/gouwar.j/cran-all/cranData/varoc/R/01.stsp.cal.R
amd=function(y,x,fpf=0.3,pval="no",alternative="greater",B=2000,conf.level=0.95){ #0.data frame df=data.frame(y=y,x=x) n=nrow(df) #1. ROC & amd at all points roc.fit=stsp.cal(y=y,x=x) res1=roc.fit$res1 auc1=roc.fit$auc res1=amd.cal(y=y,x=x,res1=res1) #amd at all th res2=res1[tail(which(res1$fpf>=fpf),1),] #amd at fpf res3=iamd.cal(res1=res1,auc=auc1) #iamd ### #3. amd, bootstrap ### if(pval=="yes"){ RES1=matrix(NA,B,nrow(res1)) RES2=RES3=rep(NA,B) for(b in 1:B){ df.b=df[sample(1:n,replace=TRUE),] y.b=df.b$y x.b=df.b$x RES1[b,]=amd.cal(y=y.b,x=x.b,res1=res1)$amd #res1, not res1.b, since we use the same cutoff roc.fit.b=stsp.cal(y=y.b,x=x.b) res1.b=roc.fit.b$res1 auc1.b=roc.fit.b$auc res1.b=amd.cal(y=y.b,x=x.b,res1=res1.b) #res1, not res1.b, since we use the same cutoff RES2[b]=res1.b[tail(which(res1.b$fpf>=fpf),1),]$amd RES3[b]=iamd.cal(res1=res1.b,auc=NA)$iamd } res1.se.b=sqrt(apply(RES1,2,var,na.rm=TRUE)) res2.se.b=sqrt(var(RES2,na.rm=TRUE)) res3.se.b=sqrt(var(RES3,na.rm=TRUE)) res1$lcl=NA; res1$ucl=NA; res1$z=NA; res1$pvalue=NA res2$lcl=NA; res2$ucl=NA; res2$z=NA; res2$pvalue=NA res3$lcl=NA; res3$ucl=NA; res3$z=NA; res3$pvalue=NA res1$z=res1$amd/res1.se.b res2$z=res2$amd/res2.se.b res3$z=res3$iamd/res3.se.b if(alternative=="two.sided"){ crit=qnorm(1-(1-conf.level)/2) res1$lcl=res1$amd-crit*res1.se.b; res1$ucl=res1$amd+crit*res1.se.b res2$lcl=res2$iamd-crit*res2.se.b; res2$ucl=res2$iamd+crit*res2.se.b res3$lcl=res3$iamd-crit*res3.se.b; res3$ucl=res3$iamd+crit*res3.se.b res1$pvalue=2*pnorm(abs(res1$z),lower.tail=FALSE) res2$pvalue=2*pnorm(abs(res2$z),lower.tail=FALSE) res3$pvalue=2*pnorm(abs(res3$z),lower.tail=FALSE) }else if(alternative=="greater"){ crit=qnorm(conf.level) res1$lcl=res1$amd-crit*res1.se.b; res1$ucl=Inf res2$lcl=res2$amd-crit*res2.se.b; res2$ucl=Inf res3$lcl=res3$iamd-crit*res3.se.b; res3$ucl=Inf res1$pvalue=pnorm(res1$z,lower.tail=FALSE) res2$pvalue=pnorm(res2$z,lower.tail=FALSE) res3$pvalue=pnorm(res3$z,lower.tail=FALSE) }else if(alternative=="less"){ crit=qnorm(conf.level) res1$lcl=-Inf; res1$ucl=res1$amd+crit*res1.se.b res2$lcl=-Inf; res2$ucl=res2$iamd+crit*res2.se.b res3$lcl=-Inf; res3$ucl=res3$iamd+crit*res3.se.b res1$pvalue=pnorm(res1$z) res2$pvalue=pnorm(res2$z) res3$pvalue=pnorm(res3$z) } colnames(res1)[colnames(res1)=="z"]="zAMD" colnames(res2)[colnames(res2)=="z"]="zAMD" colnames(res3)[colnames(res3)=="z"]="zIAMD" } #4. summary rownames(res1)=NULL rownames(res2)=NULL rownames(res3)=NULL fit=list(df=df, fpf=fpf, res=res1, amd=res2, iamd=res3) return(fit) }
/scratch/gouwar.j/cran-all/cranData/varoc/R/amd.R
jdp=function(fit, min=NULL,max=NULL,eps=0.2,seed=1, main="JDP",ylab="x",xlab=c("y=0","y=1"), col=c("blue","red","gray","gray"), legend="top",lwd=1,lty=3, cex.main=1,cex.pt=1.5,cex.lab=1,cex.axis=1,cex.legend=1,digits=2){ set.seed(seed) fpf=fit$fpf #1. data y=fit$df$x #x is plotted as y-value d=fit$df$y #y is plotted as x-value y1=y[d==1] y0=y[d==0] #2. th at fpf res=fit$amd th=res$th tpf=res$tpf fpf=res$fpf tpm=res$tpm fpm=res$fpm amd=res$amd #3. plot #3.1. base plot ylim=range(y) if(!is.null(min)) ylim[1]=min if(!is.null(max)) ylim[2]=max n=length(d) d.jitter=d+stats::runif(n,-eps,eps) plot(y~d.jitter,xlim=c(0-eps,1+eps),ylim=ylim, main=main,cex.main=cex.main, ylab=ylab,xlab="",xaxt='n', cex=cex.pt,cex.lab=cex.lab,cex.axis=cex.axis) graphics::axis(1,xlab,at=c(0,1), cex.axis=cex.lab) abline(h=th,lwd=lwd,lty=lty) #3.2. color #_{disease , marker} d11=d.jitter[d==1&y>th]; d10=d.jitter[d==1&y<=th] d01=d.jitter[d==0&y>th]; d00=d.jitter[d==0&y<=th] y11=y1[y1>th]; y10=y1[y1<=th] y01=y0[y0>th]; y00=y0[y0<=th] points(y01~d01,col=col[1],pch=19,cex=cex.pt) points(y00~d00,col=col[3],pch=19,cex=cex.pt) points(y10~d10,col=col[4],pch=19,cex=cex.pt) points(y11~d11,col=col[2],pch=19,cex=cex.pt) #3.3. Add horizontal lines for TPM, FPM eps2=(eps)*0.9 graphics::arrows(-eps2, fpm, eps2, fpm, code=0,lwd=3) graphics::arrows(-eps2+1,tpm, eps2+1, tpm, code=0,lwd=3) #3.4. adding points again points(y~d.jitter,cex=cex.pt) #3.5.legend if(legend%in%c("bottomleft", "topleft", "topright", "bottomright", "left", "right", "top", "bottom", "center")){ #tpf.text=paste0("TPF=",format(round(tpf,digits),nsmall=digits)) #amd.text=paste0("AMD=",format(round(amd,digits),nsmall=digits)) #graphics::legend(legend,paste0(tpf.text,"\n",amd.text),bty='n',cex=cex.legend) tpf.text=paste0("TPF=",format(round(tpf,2),nsmall=2)) fpf.text=paste0("FPF=",format(round(fpf,2),nsmall=2)) amd.text=paste0("AMD=",format(round(amd,2),nsmall=2)) legend(legend,paste0(tpf.text,"\n", fpf.text,"\n", amd.text,"\n" ),bty='n',cex=cex.legend) } }
/scratch/gouwar.j/cran-all/cranData/varoc/R/jdp.R
varoc=function(fit, mzr="AMD",mzr.min=NULL,mzr.max=NULL, main="VAROC",ylab="True positive fraction",xlab="False positive fraction", col=c("#9932cc","#87ceeb","#ffe135","#f56642"), legend="right",lwd=1, cex.main=1,cex.axis=1,cex.lab=1,cex.legend=1, digits=2){ #1.data y=fit$df$yf x=fit$df$x res=fit$res tpf=res$tpf fpf=res$fpf M=nrow(res) auc=fit$iamd$auc MZR=toupper(mzr) if(is.null(res$z)) MZR="AMD" if(MZR=="ZAMD"){ mzr=res$zAMD mzr.title="zAMD" iamd=fit$iamd$zIAMD iamd.title="zIAMD=" }else{ mzr=res$amd #overlap mzr.title="AMD" iamd=fit$iamd$iamd iamd.title="IAMD=" } #2. mzr range mzr.min0=min(mzr,na.rm=TRUE) mzr.max0=max(mzr,na.rm=TRUE) if(is.null(mzr.min)){ mzr.min=mzr.min0 }else{ mzr.min=min(mzr.min,mzr.min0) } if(is.null(mzr.max)){ mzr.max=mzr.max0 }else{ mzr.max=max(mzr.max,mzr.max0) } #3.plot #3.1. range mzr.order=round((mzr-mzr.min)/(mzr.max-mzr.min)*M,0) #kind of normalization, match AMD to color #map from mzr to [0,1]*M mzr.order[mzr.order==0]=1 col0=grDevices::colorRampPalette(col)(M) col1=col0[mzr.order] #line colors M2=length(col) col2 = grDevices::colorRampPalette(col)(M2) #legend colors plot(NA,col=col1,type='p',pch=19, ylab=ylab,xlab=xlab, main=main,cex.main=cex.main, cex.axis=cex.axis,cex.lab=cex.lab,lwd=lwd,xlim=c(0,1),ylim=c(0,1)) graphics::abline(a=0,b=1,col="darkgray",lwd=lwd) for(m in 2:M) graphics::lines(c(tpf[m],tpf[m-1])~c(fpf[m],fpf[m-1]),col=col1[m],type='S',lwd=lwd) for(m in 1:M) graphics::points(tpf[m]~fpf[m],col=col1[m],cex=1,pch=19) #3.2. mzr legend mzr.cut=round(seq(from=mzr.min,to=mzr.max,length.out=M2+1),digits) mzr.cut=format(mzr.cut) rt=0.05 #move to the right because of anti diagnoal line (rt=0 is no moving) text(0.5+rt,0.15,mzr.title,cex=cex.legend) corrplot::colorlegend( colbar=col2, labels=mzr.cut, at = seq(0, 1, len = M2+1), xlim = c(0.05, 0.90)+rt, ylim = c(0, 0.1), vertical = FALSE, cex=cex.legend) if(legend%in%c("bottomleft", "topleft", "topright", "bottomright", "left", "right", "top", "bottom", "center")){ AUC.text=paste0("AUC=",format(round(auc,2),nsmall=2)) IAMD.text=paste0(iamd.title,format(round(iamd,2),nsmall=2)) graphics::legend(legend,paste0(AUC.text,"\n",IAMD.text),adj=0,cex=cex.legend,bty='n') } }
/scratch/gouwar.j/cran-all/cranData/varoc/R/varoc.R
############################################################################### ## infotheo-toolbox.R --- ## Author : Gilles Kratzer ## Document created : 07/09/2017 ## : 08/09/2017 ############################################################################### ##--------------------------------------------------------------------------------------------------------- ## Discretization of an arbitrary large number of variables joint variables depending on their distribution ## Implemented discretization methods: ## -user defined ## -fd ## -doane ## -sqrt ## -sturges ## -rice ## -scott ## -cencov ## -kmeans ## -terrell-scott ##--------------------------------------------------------------------------------------------------------- discretization <- function(data.df = NULL, discretization.method = "cencov", frequency = FALSE){ #tests: if(is.character(discretization.method)) { discretization.method <- tolower(discretization.method) discretization.method <- c("fd","doane","cencov","sturges","rice","scott","kmeans","terrell-scott")[pmatch(discretization.method,c("fd","doane","cencov","sturges","rice","scott","kmeans","terrell-scott"))] if ( (is.na(discretization.method)) || (length(discretization.method)>1) ){ discretization.method <- "cencov" warning("Discretization method not recognised; set to 'cencov'")} } else { if (!is.numeric(discretization.method) || discretization.method < 2 ) stop("For equal binning, discretization.method (as numeric) >=2 is required.") #if(!(discretization.method %in% c("fd","doane","cencov","sturges","rice","scott","kmeans","terrell-scott") | is.numeric(discretization.method))){stop("Wrong definition of the discretization.method's parameter")} } ##end of tests data.df <- as.data.frame(data.df) nobs <- nrow(data.df) nvar <- ncol(data.df) df.tab <- data.df ind <- sapply(data.df, is.numeric) ##=============================================================== ## Discrete cutoff provided ##=============================================================== if(is.numeric(discretization.method)){ fun.discrete <- function(df){ df.out <- as.factor(cut(as.numeric(df), breaks = discretization.method, include.lowest = TRUE)) return(as.factor(df.out)) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.discrete) } ##=============================================================== ## kmeans ##=============================================================== if(discretization.method=="kmeans"){ fun.kmeans <- function(df){ wss <- (nobs-1)*sum(stats::var(df)) j <- 2 ratio <- 1 while(ratio>.2 & j<nobs) { set.seed(42) wss1 <- (stats::kmeans(df,centers=j,iter.max = 100000,algorithm = "MacQueen")$tot.withinss) ratio <- abs((wss1-wss)/wss) wss <- wss1 j <- j+1 } set.seed(42) df.out <- stats::kmeans(x = df,centers = j,algorithm = "MacQueen",iter.max = 100000)$cluster return(as.factor(df.out)) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.kmeans) } ##=============================================================== ##Freedman-Diaconis rule ##=============================================================== if(discretization.method=="fd"){ fun.fd <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(diff(range(df,na.rm = TRUE))/(2*stats::IQR(df) * nobs^(-1/3))))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.fd) } ##=============================================================== ##Doane’s formula ##=============================================================== if(discretization.method=="doane"){ fun.doane <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(1 + log2(nobs) + log2( 1 + (abs(skewness(df))/sqrt((6*(nobs)-2)/((nobs+1)*(nobs+2)))))))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.doane) } ##=============================================================== ##cencov ##=============================================================== if(discretization.method=="cencov"){ fun.sqrt <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(nobs^(1/3)))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.sqrt) } ##=============================================================== ##Sturges ##=============================================================== if(discretization.method=="sturges"){ fun.sturges <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(log(x = nobs,base = 2)))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.sturges) } ##=============================================================== ##Rice Rule ##=============================================================== if(discretization.method=="rice"){ fun.rice <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(2*nobs^(1/3)))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.rice) } ##=============================================================== ##Scott rule ##=============================================================== if(discretization.method=="scott"){ fun.scott <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(diff(range(df))/(3.5 * sd(df) / nobs^(1/3))))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.scott) } ##=============================================================== ##Terrell-Scott ##=============================================================== if(discretization.method=="terrell-scott"){ fun.terrell.scott <- function(df){ cut.index <- seq(from = range(df,na.rm = TRUE)[1], to = range(df,na.rm = TRUE)[2], length.out = ceiling(abs(2*nobs)^(1/3))) df.out <- as.factor(cut(as.numeric(df), breaks = cut.index, include.lowest = TRUE)) return(df.out) } df.tab[ind] <- lapply(X = data.df[ind], FUN = fun.terrell.scott) } df.tab <- as.data.frame(df.tab) names(df.tab) <- names(data.df) if(frequency){ return(list(table=table(df.tab),df.discr=df.tab)) }else{ return(df.tab) } } ##------------------------------------------------------------------------- ## Shanon Entropy ##------------------------------------------------------------------------- entropy.data <- function(freqs.table){ #normalization freqs.joint <- prop.table(freqs.table) #computing log part log.part <- ifelse(freqs.joint > 0, log(freqs.joint,base = 2), 0) #computing entropy entropy.out <- sum(freqs.joint * log.part) return(-entropy.out) } ##------------------------------------------------------------------------------------ ## Mutual Information ## Function that returns Mutual Information of a possibly discretized set of variables ##------------------------------------------------------------------------------------ mi.data <- function(X,Y,discretization.method=NULL,k=NULL){ ##H(X)+H(Y)-H(X,Y) if(!is.null(discretization.method)){ Hx <- entropy.data(discretization(data.df = X,discretization.method = discretization.method,frequency = TRUE)[[1]]) Hy <- entropy.data(discretization(data.df = Y,discretization.method = discretization.method,frequency = TRUE)[[1]]) Hxy <- entropy.data(discretization(data.df = cbind(X,Y),discretization.method = discretization.method,frequency = TRUE)[[1]]) mi <- Hx+Hy-Hxy } if(!is.null(k)){ mi <- FNN::mutinfo(X = X,Y = Y,k = k) } if(is.null(discretization.method) & is.null(k)){mi <- NA} return(mi) } ##------------------------------------------------------------------------------------ ## Mutual Information ## Function that returns Mutual Information of a discretized set of variables (optimized for discrete RV) ##------------------------------------------------------------------------------------ mi.data.discr <- function(X,Y){ ##H(X)+H(Y)-H(X,Y) Hx <- entropy.data(freqs.table = table(X)) Hy <- entropy.data(freqs.table = table(Y)) Hxy <- entropy.data(freqs.table = table(cbind(as.data.frame(X),as.data.frame(Y)))) mi <- Hx+Hy-Hxy return(mi) } ##------------------------------------------------------------------------------------ ## Table function for handling more than 2^31 elements ## Function that returns optimized table ##------------------------------------------------------------------------------------ # table.varrank <- function(x){ # # } ##EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/infotheo-toolbox.R
############################################################################### ## internal-toolbox.R --- ## Author : Gilles Kratzer ## Document created : 08/09/2017 ############################################################################### ##------------------------------------------------------------------------- ## Function that computes skewness of a distribution ##------------------------------------------------------------------------- skewness <- function(x){ n <- length(x) (sum((x-mean(x))^3)/n)/(sum((x-mean(x))^2)/n)^(3/2) } ##EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/internal-toolbox.R
############################################################################### ## plot.varrank.R --- ## Author : Gilles Kratzer ## Last modified : 13/12/2017 ## : 06/02/2018 (S3method) ############################################################################### plot.varrank <- function(x, ## block sepration colsep = TRUE, rowsep = TRUE, sepcol ="white", sepwidth=c(0.005,0.005), ## cell labeling cellnote = TRUE, notecex = 1.5, notecol = "black", digitcell = 3, ## Row/Column Labeling margins = c(6, 6, 4, 2), labelscex = 1.2, ## color key + density info colkey = NULL, densadj = 0.25, textlines = 2, ## plot labels main = NULL, maincex = 1, ... ){ ##scaling to [0,1]: scale201 <- function(x, low=min(x), high=max(x) ) return( (x-low)/(high - low)) x.algo <- x$algorithm x.scheme <- x$scheme x <- x[[2]] if(length(dimx <- dim(x)) != 2 || !is.numeric(x)) stop("varrank object 'x' must be a numeric matrix.") n <- dimx[1] n.2 <- dimx[2] if(n <= 1) stop("varrank object 'x' must have at least 2 rows and 2 columns.") if(!is.numeric(margins) || length(margins) != 4) stop("'margins' must be a numeric vector of length 4.") if(is.null(colkey)){ ##color rainbow ##definition of mypalette cool = rainbow(50, start=rgb2hsv(col2rgb('cyan'))[1], end=rgb2hsv(col2rgb('blue'))[1]) warm = rainbow(50, start=rgb2hsv(col2rgb('red'))[1], end=rgb2hsv(col2rgb('yellow'))[1]) cols = c(rev(cool), rev(warm)) mypalette <- colorRampPalette(cols)(255) }else{ mypalette <- colkey } op <- par(no.readonly = TRUE) on.exit(par(op)) ##layout if(x.algo=="forward"){ layout(matrix(c(1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1), nrow = 10, ncol = 10, byrow = TRUE)) } if(x.algo=="backward"){ layout(matrix(c(1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2, 1,1,1,1,1,1,2,2,2,2), nrow = 10, ncol = 10, byrow = TRUE)) } par(mar = margins) extreme <- max(abs(x), na.rm=TRUE) breaks <- length(mypalette) breaks <- seq( -extreme, extreme, length=breaks + 1) image(1:n.2, 1:n, t(x[n:1,]), xlim = 0.5 + c(0, n), ylim = 0.5 + c(0, n), axes = FALSE, xlab = "", ylab = "", col=mypalette, breaks = breaks, ...) if(x.algo=="forward"){ ## add 'background' colored spaces to visually separate sections if(colsep) { rect(xleft =.5, ybottom=0, xright=.5+sepwidth[1], ytop=nrow(x)+1.5, lty=1, lwd=1, col=sepcol, border=sepcol) for(csep in 1:min(n.2,n-1)){ rect(xleft =csep+0.5, ybottom=0, xright=csep+0.5+sepwidth[1], ytop=nrow(x)+1.5-csep, lty=1, lwd=1, col=sepcol, border=sepcol) } } if(rowsep) { for(rsep in 1:n){ rect(xleft =0.5, ybottom= (nrow(x)+1-rsep)-0.5, xright=1.5+min(rsep,n.2-1,n-2), ytop = (nrow(x)+1-rsep)-0.5 - sepwidth[2], lty=1, lwd=1, col=sepcol, border=sepcol) } rect(xleft =0.5, ybottom= (nrow(x)+1)-0.5, xright=1.5 , ytop = (nrow(x)+1)-0.5 - sepwidth[2], lty=1, lwd=1, col=sepcol, border=sepcol) } axis(1, 1:n, labels= rownames(x[1:n,]), las= 2, tick= 0, cex.axis= labelscex ) } if(x.algo=="backward"){ ############### n.2 <- n.2 - 1 ## add 'background' colored spaces to visually separate sections if(colsep){ rect(xleft =.5, ybottom=0.5, xright=.5+sepwidth[1], ytop=nrow(x)+.5, lty=1, lwd=1, col=sepcol, border=sepcol) for(csep in 1:min(n.2,n-1)){ rect(xleft =csep+0.5, ybottom=-0.5+csep, xright=csep+0.5+sepwidth[1], ytop=nrow(x)+.5, lty=1, lwd=1, col=sepcol, border=sepcol) } } if(rowsep) { for(rsep in 1:n){ rect(xleft =0.5, ybottom= (nrow(x)+1-rsep)-0.5, xright=1.5+min(n-rsep, n.2-1), ytop = (nrow(x)+1-rsep)-0.5 - sepwidth[2], lty=1, lwd=1, col=sepcol, border=sepcol) } rect(xleft =0.5, ybottom= (nrow(x)+1)-0.5, xright=n.2+.5 , ytop = (nrow(x)+1)-0.5 - sepwidth[2], lty=1, lwd=1, col=sepcol, border=sepcol) } axis(3, 1:n, labels= rownames(x[n:1,]), las= 2, tick= 0, cex.axis= labelscex ) } axis(2, 1:n, labels= rownames(x[n:1,]), las= 2, tick= 0, cex.axis= labelscex ) if(cellnote){ cellnote <- round(x = x,digits = digitcell)[n:1,] text(x=c(col(cellnote)), y=c(row(cellnote)), labels=c(cellnote), col=notecol, cex=notecex) } ## title if(!is.null(main)) title(main, cex.main = 1.5*maincex) ##key zlim <- max(abs(min(breaks)),abs(max(breaks))) z <- seq(from = -zlim,to = zlim, length=length(mypalette)) image(z=matrix(z, ncol=1), col=mypalette,breaks = breaks, xaxt="n", yaxt="n",ylim = c(0,1)) ##density dens <- density(x, adjust=densadj, na.rm=TRUE,from=min(breaks), to=max(breaks)) dens$x <- scale201(dens$x, min(breaks), max(breaks)) lines(dens$x, dens$y / max(dens$y) * 0.95, col="black", lwd=1) yargs <- list(at=pretty(dens$y/max(dens$y)) * 0.95, labels=pretty(dens$y/max(dens$y))) yargs$side <- 2 do.call(axis, yargs) title("Score density") lv <- pretty(breaks) xv <- scale201(as.numeric(lv), min(breaks), max(breaks)) xargs <- list(at=xv, labels=lv) xargs$side <- 1 do.call(axis, xargs) mtext(side=1, "Redundancy Relevancy", line = textlines, cex = 0.8*maincex) invisible() }#EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/plot-varrank.R
############################################################################### ## print.varrank.R --- ## Author : Gilles Kratzer ## Last modified : 05/02/2018 ## : ############################################################################### print.varrank <- function(x, digits=5, ...){ out <- as.matrix(round(t(diag(x[[2]])),digits = digits)) if(!is.null(rownames(out))) { rownames(out)<-c("Scores") cat("Ordered variables (decreasing importance):\n \n") print(out, ...) }else{ print(x[[1]], ...) } invisible(x) }#EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/print-varrank.R
############################################################################### ## summary.varrank.R --- ## Author : Gilles Kratzer ## Last modified : 05/02/2018 ## : ############################################################################### summary.varrank <- function(object, digits=3, ...){ if(object$algorithm=="forward") out1 <- as.matrix(round(t(diag(object$distance.m)),digits = digits)) if(object$algorithm=="backward") out1 <- as.matrix(round(t(diag(object$distance.m[dim(object$distance.m)[2]:1,])),digits = digits)) out2<-as.matrix(round(object$distance.m,digits = digits)) out2[!is.finite(out2)] <- " " out2 <- as.data.frame(out2) rownames(out1)<-c("Scores") cat("Number of variables ranked: ",dim(out1)[2],"\n",sep = "") cat(object[[3]], " search using ", object[[4]]," method \n",sep = "") cat("(",object[[5]]," scheme) \n \n",sep = "") if(object[[3]]=="forward") cat("Ordered variables (decreasing importance):\n") if(object[[3]]=="backward") cat("Ordered variables (increasing importance):\n") print(out1, ...) cat("\n ---") cat("\n \n Matrix of scores: \n") print(out2, ...) }#EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/summary-varrank.R
############################################################################### ## varrank.backward.R --- ## Author : Gilles Kratzer ## Last modified : 23/11/2017 ############################################################################### varrank.backward <- function(data.df = NULL, variable.important = NULL, method = NULL, scheme=NULL, discretization.method = NULL, k = NULL, ratio = NULL, n.var=NULL, verbose=TRUE){ nvar <- dim(data.df)[2] n.adjust <- nvar-length(variable.important) if(n.adjust==1) return(list(names(data.df)[!(names(data.df) %in% variable.important)],0)) n.important <- nvar-n.adjust if(is.null(n.var)){n.var <- n.adjust} ##Progress bar if(verbose==TRUE) pbPrint <- txtProgressBar(min = 0, max = n.var-1, style = 3) data.df.tmp.s <- data.df[,!(names(data.df) %in% variable.important)] data.df.tmp.i <- data.df[,(names(data.df) %in% variable.important)] data.df.tmp.s <- varrank::discretization(data.df = data.df.tmp.s,discretization.method = discretization.method,frequency = FALSE) data.df.tmp.i <- varrank::discretization(data.df = data.df.tmp.i,discretization.method = discretization.method,frequency = FALSE) score.out <- matrix(data = NA,nrow = n.var,ncol = n.var) distance.matrix <- matrix(data = NA,nrow = n.adjust,ncol = n.var) ##trick to keep the order in the final matrix names.index <- rep(TRUE, length(names(data.df.tmp.s))) names.selected <- names.to.order <- colnames(data.df.tmp.s) names.output <- NULL vect.relevance <- matrix(data = 0, nrow = n.adjust,ncol = 1) ##++++++++++++++++++++++++++++++++ ##Computing relevance - redundancy ##++++++++++++++++++++++++++++++++ ##Relevance for(i in 1:n.adjust){ vect.relevance[i] <- mi.data.discr(X = data.df.tmp.s[,i],Y = (data.df.tmp.i)) } for(j in 0:(n.var-1)){ ##Redundancy vect.redundancy <- matrix(data = 0, nrow = n.adjust-j,ncol = 1) for(i in 1:(n.adjust-j)){ tmp.sum <- 0 for(z in 1:length(names.selected)){ tmp <- mi.data.discr(X = data.df.tmp.s[,names.selected[z]],Y = data.df.tmp.s[,i]) ##alpha computation switch(method, battiti={alpha=1}, kwak={alpha=mi.data.discr(X = data.df.tmp.i,Y = data.df.tmp.s[,names.selected[z]])/(10^-6+entropy.data(freqs.table = table(data.df[,names.selected[z]])))}, peng={alpha=1}, estevez={alpha = 1/(min(entropy.data(freqs.table = table(data.df.tmp.s[,i])),entropy.data(freqs.table = table(data.df.tmp.s[,-i])))+10^-6)} ) tmp.sum <- tmp.sum + alpha * tmp } #beta computation switch(method, battiti={beta=ratio}, kwak={beta=ratio}, peng={beta=1/(length(names.selected)+1)}, estevez={beta=1/(length(names.selected)+1)} ) vect.redundancy[i,] <- beta * tmp.sum } if(scheme=="mid"){score.tmp <- vect.relevance - vect.redundancy} if(scheme=="miq"){score.tmp <- vect.relevance / (vect.redundancy+10^-6)} tmp.name <- max.col(t(-score.tmp)) vect.relevance <- as.matrix(vect.relevance[-tmp.name,]) names.selected <- names(data.df.tmp.s)[-tmp.name] if(dim(data.df.tmp.s)[2]==1){names.output <- c(names.output,colnames(data.df.tmp.s)[tmp.name]) }else{ names.output <- c(names.output,names(data.df.tmp.s)[tmp.name]) } if(j != (n.var-1)) distance.matrix[names.index,j+1] <- t(score.tmp) if(dim(data.df.tmp.s)[2]>2){data.df.tmp.s <- data.df.tmp.s[,-tmp.name];names.index[which(names.index==TRUE)][tmp.name] <- FALSE }else{ #if(dim(data.df.tmp.s)[2]==1){} name.df.tmp <- colnames(data.df.tmp.s) data.df.tmp.s <- matrix(data = data.df.tmp.s[,-tmp.name],ncol = 1) colnames(data.df.tmp.s) <- name.df.tmp[-tmp.name] names.index[which(names.index==TRUE)][tmp.name] <- FALSE } if(verbose==TRUE){setTxtProgressBar(pb = pbPrint, value = j)} } colnames(distance.matrix) <- names.output rownames(distance.matrix) <- names.to.order distance.matrix <- distance.matrix[c(names.to.order[!(names.to.order %in% names.output)],rev(names.output)),] return(list(names.output, distance.matrix)) }
/scratch/gouwar.j/cran-all/cranData/varrank/R/varrank-backward.R
############################################################################### ## varrank.forward.R --- ## Author : Gilles Kratzer ## Last modified : 23/11/2017 ## : 24/11/2017 ############################################################################### varrank.forward <- function(data.df = NULL, variable.important = NULL, method = NULL, scheme=NULL, discretization.method = NULL,k=NULL, ratio=NULL, n.var=NULL,verbose=TRUE){ nvar <- dim(data.df)[2] n.adjust <- nvar-length(variable.important) if(n.adjust==1) return(list(names(data.df)[!(names(data.df) %in% variable.important)],0)) n.important <- nvar-n.adjust if(is.null(n.var)){n.var <- n.adjust} if(n.var==1 | n.var==2) verbose <- FALSE ##Progress bar if(verbose==TRUE) pbPrint <- txtProgressBar(min = 0, max = n.var-2, style = 3) ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##Computing relevance (first variable is selected by maximizing relevance) ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ vect.relevance <- matrix(data = 0, nrow = n.adjust,ncol = 1) data.df.tmp <- data.df[,!(names(data.df) %in% variable.important)] names.to.order <- names(data.df.tmp) ##trick to keep the order in the final matrix names.index <- rep(TRUE, length(names(data.df.tmp))) ##discretization data.df <- varrank::discretization(data.df = data.df,discretization.method = discretization.method,frequency = FALSE) data.df.tmp <- varrank::discretization(data.df = data.df.tmp,discretization.method = discretization.method,frequency = FALSE) for(i in 1:n.adjust){ vect.relevance[i] <- mi.data.discr(X = data.df.tmp[,i] ,Y = data.df[,variable.important]) } first.add <- max.col(t(vect.relevance)) ## scoring names.selected <- names(data.df.tmp)[first.add] score <- matrix(data = 0,nrow = n.adjust,ncol = 1) distance.matrix <- matrix(data = NA,nrow = n.adjust,ncol = n.adjust) distance.matrix[,1] <- vect.relevance if(n.var==1){ distance.matrix <- distance.matrix[1] names.selected <- names(data.df)[!(names(data.df) %in% variable.important)] names.selected <- names.selected[first.add] names(distance.matrix) <- names.selected return(list(names.selected, distance.matrix)) } names.selected <- names(data.df.tmp)[first.add] ##return for 3 variables if(dim(data.df.tmp)[2]<3){ names.selected <- c(names.selected,names(data.df.tmp)[-first.add]) names.index[first.add] <- FALSE colnames(distance.matrix) <- names.selected rownames(distance.matrix) <- names.to.order distance.matrix <- distance.matrix[names.selected,] return(list(names.selected, distance.matrix)) }else{ data.df.tmp <- data.df.tmp[,-c(first.add)] names.index[first.add] <- FALSE } vect.relevance <- as.matrix(vect.relevance[-first.add,]) ##++++++++++++++++++++++++++++++++ ##Computing relevance - redundancy ##++++++++++++++++++++++++++++++++ ##Relevance ##precomputing entropy for(j in 1:(n.adjust-2)){ vect.redundancy <- matrix(data = 0, nrow = n.adjust-j,ncol = 1) for(i in 1:(n.adjust-j)){ tmp.sum <- 0 for(z in 1:length(names.selected)){ tmp <- mi.data.discr(X = data.df[,names.selected[z]],Y = data.df.tmp[,i]) ##alpha computation switch(method, battiti={alpha=1}, kwak={alpha=mi.data.discr(X = data.df[,variable.important],Y = data.df[,names.selected[z]])/(10^-6+entropy.data(freqs.table = table(data.df[,names.selected[z]])))}, peng={alpha=1}, estevez={alpha = 1/(min(entropy.data(freqs.table = table(data.df[,i])),entropy.data(freqs.table = table(data.df[,names.selected])))+10^-6)} ) tmp.sum <- tmp.sum + alpha * tmp } #beta computation switch(method, battiti={beta=ratio}, kwak={beta=ratio}, peng={beta=1/length(names.selected)}, estevez={beta=1/length(names.selected)} ) vect.redundancy[i,] <- beta * tmp.sum } if(scheme=="mid"){score.tmp <- vect.relevance - vect.redundancy} if(scheme=="miq"){score.tmp <- vect.relevance / (vect.redundancy+10^-6)} tmp.name <- max.col(t(score.tmp)) vect.relevance <- as.matrix(vect.relevance[-tmp.name,]) names.selected <- c(names.selected,names(data.df.tmp)[tmp.name]) distance.matrix[names.index,j+1] <- t(score.tmp) if(dim(data.df.tmp)[2]!=2) data.df.tmp <- data.df.tmp[,-tmp.name];names.index[which(names.index==TRUE)][tmp.name] <- FALSE if(length(names.selected)==n.var){ distance.matrix <- distance.matrix[,1:n.var] colnames(distance.matrix) <- names.selected rownames(distance.matrix) <- names.to.order names.selected.tmp <- c(names.selected,names(data.df.tmp)) distance.matrix <- distance.matrix[names.selected.tmp,] return(list(names.selected, distance.matrix))} if(verbose==TRUE){setTxtProgressBar(pb = pbPrint, value = j)} } names.selected <- c(names.selected,names(data.df.tmp)[-c(tmp.name)]) colnames(distance.matrix) <- names.selected rownames(distance.matrix) <- names.to.order distance.matrix <- distance.matrix[names.selected,] return(list(names.selected, distance.matrix)) }
/scratch/gouwar.j/cran-all/cranData/varrank/R/varrank-forward.R
############################################################################### ## varrank.R --- ## Author : Gilles Kratzer ## Last modified : 07/09/2017 ## : 27/11/2017 greedy search removed ## : 15/03/2018 code cleaning ############################################################################### varrank <- function(data.df = NULL, variable.important = NULL, method = c("battiti", "kwak", "peng", "estevez"), algorithm=c("forward","backward"), scheme=c("mid", "miq"), discretization.method = NULL, ratio=NULL,n.var=NULL, verbose=TRUE){ ##Tests of common sense if(!exists("method"))stop("A method has to be provided") if(!exists("algorithm"))stop("An algorithm has to be provided") #if(length(scheme)!=1)stop("A scheme have to be provided") if(is.null(discretization.method))stop("A discretization method has to be provided") method <- tolower(method) algorithm <- tolower(algorithm) scheme <- tolower(scheme) # Validation of discretization.method is deferred to discretization # if(is.character(discretization.method)) { # discretization.method <- tolower(discretization.method) method <- c("battiti", "kwak", "peng", "estevez")[pmatch(method,c("battiti", "kwak", "peng", "estevez"))] if ( (is.na(method)) || (length(method)>1) ){ method <- "peng" ; warning("'method' not recognised; set to 'peng'")} algorithm <- c("forward","backward")[pmatch(algorithm,c("forward","backward"))] if ( (is.na(algorithm)) || (length(algorithm)>1) ){ algorithm <- "forward"; warning("'algorithm' not recognised; set to 'forward'")} scheme <- c("mid", "miq")[pmatch( tolower(scheme), c("mid", "miq"))] if ( (is.na(scheme)) || (length(scheme)>1) ){ scheme <- "mid"; warning("'scheme' not recognised; set to 'mid'")} # discretization.method <- c("fd","doane","cencov","sturges","rice","scott","kmeans","terrell-scott")[pmatch(discretization.method,c("fd","doane","cencov","sturges","rice","scott","kmeans","terrell-scott"))] if(length(variable.important)<1)stop("A least one variable of importance should be given") if(dim(data.df)[2]<length(variable.important))stop("Misspecification of number of the data.frame and the variables of importance") if(dim(data.df)[1]<2)stop("Not enough observations to compute information theory metrics") #if(!(method %in% c("battiti", "kwak", "peng", "estevez")) & algorithm %in% c("forward")){method <- "peng" ; warning("Method not recognised; method assigned to peng")} #if(is.null(algorithm) & length(n.var)==1){algorithm <- "forward"; warning("Algorithm not recognised; algorithm assigned to forward")} #if(!(algorithm %in% c("forward","backward"))){stop("Algorithm not recognised")} if(is.numeric(n.var)) if(n.var>(dim(data.df)[2]-length(variable.important))) warning("n.var too large; assigned to max possible value") if(!is.null(n.var)){ if(length(n.var)>1){stop("Wrong format of n.var. It should be c(number of steps forward, number of steps backward, optionally number of variable to rank)")} } ##end of tests ##forward & backward scheme if(algorithm=="forward"){tmp <- varrank.forward(data.df = data.df ,variable.important = variable.important,method = method,discretization.method = discretization.method,scheme=scheme,ratio = ratio,n.var=n.var, verbose=verbose)} if(algorithm=="backward"){tmp <- varrank.backward(data.df = data.df ,variable.important = variable.important,method = method,discretization.method = discretization.method,scheme=scheme,ratio = ratio,n.var=n.var, verbose=verbose)} out <- list(ordered.var=tmp[[1]], distance.m=tmp[[2]], algorithm=algorithm, method=method, scheme=scheme) class(out) <- "varrank" return(out) }#EOF
/scratch/gouwar.j/cran-all/cranData/varrank/R/varrank.R
## ----setup, include = FALSE, cache = FALSE------------------------------------ knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 6, fig.align = "center" ) options(digits = 3) ## ----eval=FALSE--------------------------------------------------------------- # install.packages("varrank") ## ----eval=TRUE---------------------------------------------------------------- library(varrank) ## ---- warning = FALSE, message = FALSE---------------------------------------- data(PimaIndiansDiabetes, package = "mlbench") varrank.PimaIndiansDiabetes <- varrank(data.df = PimaIndiansDiabetes, method = "estevez", variable.important = "diabetes", discretization.method = "sturges", algorithm = "forward", scheme="mid", verbose = FALSE) summary(varrank.PimaIndiansDiabetes) ## ----------------------------------------------------------------------------- plot(varrank.PimaIndiansDiabetes) ## ----------------------------------------------------------------------------- library(caret) library(e1071) # prepare training scheme control <- trainControl(method = "repeatedcv", number = 10, repeats = 3) # train the model model <- train(diabetes~., data = PimaIndiansDiabetes, method = "lvq", preProcess = "scale", trControl = control) # estimate variable importance importance <- varImp(model, useModel = FALSE) # summarize importance print(importance) # plot importance plot(importance) ## ----------------------------------------------------------------------------- library(Boruta) out.boruta <- Boruta(diabetes~., data = PimaIndiansDiabetes) print(out.boruta) plot(out.boruta, cex.axis = 0.8, las=1) ## ---- warning = FALSE, message = FALSE---------------------------------------- library(varSelRF) rf <- randomForest(diabetes~., data = PimaIndiansDiabetes, ntree = 200, importance = TRUE) rf.rvi <- randomVarImpsRF(xdata = PimaIndiansDiabetes[, 1:8], Class = PimaIndiansDiabetes[, 9], forest = rf, numrandom = 20, usingCluster = FALSE) randomVarImpsRFplot(rf.rvi, rf, show.var.names = TRUE, cexPoint = 0.3,cex.axis=0.3) ## ----------------------------------------------------------------------------- library(FSelector) weights <- information.gain(diabetes~., data = PimaIndiansDiabetes) row.names(weights)[order(weights$attr_importance, decreasing = TRUE)] ## ---- fig.width = 3, fig.height = 3------------------------------------------- ### 1D example #### # sample from continuous uniform distribution x1 = runif(1000) hist(x1, xlim = c(0, 1)) #True entropy value: H(X) = log(1000) = 6.91 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1), discretization.method = "rice", freq = FALSE))) # sample from a non-uniform distribution x2 = rnorm(n = 10000, mean = 0, sd = 1) hist(x2) #differential entropy: H(x) = log(1*sqrt(2*pi*exp(1))) = 1.42 entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "sturges", freq = FALSE))) ### 2D example #### # two independent random variables x1 <- runif(100) x2 <- runif(100) ## Theoretical entropy: 2*log(100) = 9.21 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1, x2), discretization.method = "sturges", freq = FALSE))) ## ----------------------------------------------------------------------------- # mutual information for 2 uniform random variables x1 <- runif(10000) x2 <- runif(10000) # approximately zero mi.data(X = x1, Y = x2, discretization.method = "kmeans") # MI computed directely mi.data(X = x2, Y = x2, discretization.method = "kmeans") # MI computed with entropies: ##MI(x,y) = H(x)+H(y)-H(x, y) for x=y; ##MI(x,x) = 2 * H(x) - H(x,x) 2 * entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "kmeans", freq = FALSE))) - entropy.data(freqs.table = table(discretization(data.df = data.frame(x2, x2), discretization.method = "kmeans", freq = FALSE))) ## ----------------------------------------------------------------------------- output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) ##print output ##summary summary(output) ## ----------------------------------------------------------------------------- output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) plot(output) ## ----------------------------------------------------------------------------- output<-varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "backward",scheme="mid", verbose = FALSE) plot(output) ## ----------------------------------------------------------------------------- pairs(swiss, panel = panel.smooth, main = "Swiss Data", col = 3 + (swiss$Catholic > 80), gap = 0) summary(lm(Fertility ~ . , data = swiss)) ## ----------------------------------------------------------------------------- swiss.varrank <- varrank(data.df = swiss, method = "estevez", variable.important = "Fertility", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) swiss.varrank plot(swiss.varrank) ## ----------------------------------------------------------------------------- pairs(longley, main = "Longley Data", gap = 0) summary(fm1 <- lm(Employed ~ ., data = longley)) ## ----------------------------------------------------------------------------- longley.varrank <- varrank(data.df = longley, method = "estevez", variable.important = "Employed", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) longley.varrank plot(longley.varrank) ## ----------------------------------------------------------------------------- pairs(airquality, panel = panel.smooth, main = "Air Quality Data", gap = 0) ## ----------------------------------------------------------------------------- airquality.varrank <- varrank(data.df = (data.frame(lapply(airquality[complete.cases(airquality), ], as.numeric))), method = "estevez", variable.important = "Ozone", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) airquality.varrank plot(airquality.varrank) ## ----------------------------------------------------------------------------- data(nassCDS) nassCDS.varrank <- varrank(data.df = nassCDS, method = "peng", variable.important = "dead", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) nassCDS.varrank plot(nassCDS.varrank, notecex = 0.5)
/scratch/gouwar.j/cran-all/cranData/varrank/inst/doc/varrank.R
--- title: "varrank: An R Package for Variable Ranking Based on Mutual Information with Applications to Systems Epidemiology" author: "Gilles Kratzer, Reinhard Furrer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette #output: pdf_document vignette: > %\VignetteIndexEntry{varrank} %\VignetteEngine{knitr::knitr} %\usepackage[utf8]{inputenc} bibliography: bib_varrank.bib --- ```{r setup, include = FALSE, cache = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 6, fig.align = "center" ) options(digits = 3) ``` **What is the package design for?** The two main problems addressed by this package are **selection of the most representative variable** within a group of variables of interest (i.e. dimension reduction) and **variable ranking** with respect to a set of features of interest. **How does it work?** The *varrank* R package takes a named dataset as input. It transforms the continuous variables into categorical ones using discretization rules. Then a *varrank* analysis sequentially compares relevance with redundancy. The final rank is based on the measure of this score. **What are the R package functionalities?** The workhorse of the R package is the *varrank()* function. It computes the rank of variables and it returns a varrank class object. This object can be summarized with a comprehensive verbal description or a plot. **What is the structure of this document?** We first illustrate the package with a simple example. In a second step we compare the output of the main function *varrank()* with alternative approaches. Then some details are provided. For a full description of the technical details we refer to the original publication [@gk2018]. # Simple *varrank* example The package is available through [CRAN](https://CRAN.R-project.org/package=varrank) and can be installed directly in R: ```{r,eval=FALSE} install.packages("varrank") ``` Once installed, the *varrank* R package can be loaded using: ```{r,eval=TRUE} library(varrank) ``` Let us start with a ranking example from the *mlbench* R package [@leischmlbench]. The *PimaIndiansDiabetes* dataset contains 768 observations on 9 clinical variables. To run the *varrank()* function, one needs to choose a score function model ("battiti", "kwak", "peng", "estevez"), a discretization method (see discretization for details) and an algorithm scheme ("forward", "backward"). For the first search, it is advised to use either "peng" (faster) or "estevez" (more reliable but much slower for large datasets) and, in case the number of variables is large (>100), restrict the "forward" search to "n.var = 100." The progress bar will give you an idea of the remaining run time. ```{r, warning = FALSE, message = FALSE} data(PimaIndiansDiabetes, package = "mlbench") varrank.PimaIndiansDiabetes <- varrank(data.df = PimaIndiansDiabetes, method = "estevez", variable.important = "diabetes", discretization.method = "sturges", algorithm = "forward", scheme="mid", verbose = FALSE) summary(varrank.PimaIndiansDiabetes) ``` The function *varrank()* returns a list with multiple entries: "names.selected" and "distance.m". If `algorithm = "forward"`, "names.selected" contains an ordered list of the variable names in **decreasing** order of importance relative to the set in "variable.important" and, for a backward search, the list is in increasing order of importance. The scheme of comparison between relevance and redundance should be set to "mid" (Mutual Information Difference) or "miq" (Mutual Information Quotient). In order to visually assess *varrank()* output, one can plot it: ```{r} plot(varrank.PimaIndiansDiabetes) ``` Basically, the *varrank()* function sequentially compares the relevancy/redundancy balance of information across the set of variables. There is a legend containing the color code (cold blue for redundancy, hot red for relevancy) and the distribution of the scores. The columns of the triangular matrix contain the scores at each selection step. The variable selected at each step is the one with the highest score (the variables are ordered in the plot). The scores at selection can thus be read from the diagonal. A negative score indicates a redundancy final trade of information and a positive score indicates a relevancy final trade of information. ## Comparison with other R packages ### Caret Here is the output of the *caret* R package [@kuhn2014caret] applied to the same *PimaIndiansDiabetes* dataset. *caret* allows one to perform a model-free variable selection search. In such a case, the importance of each predictor is evaluated individually using a filter approach. ```{r} library(caret) library(e1071) # prepare training scheme control <- trainControl(method = "repeatedcv", number = 10, repeats = 3) # train the model model <- train(diabetes~., data = PimaIndiansDiabetes, method = "lvq", preProcess = "scale", trControl = control) # estimate variable importance importance <- varImp(model, useModel = FALSE) # summarize importance print(importance) # plot importance plot(importance) ``` ### R Package *Boruta* An alternative for variable selection is the *Boruta* R package [@kursa2010feature]. This compares the original attributes' importance with the importance achieved by random, estimated using their permuted copies in the Random Forest method. The *Boruta* output from the analysis of the same dataset is as follows. ```{r} library(Boruta) out.boruta <- Boruta(diabetes~., data = PimaIndiansDiabetes) print(out.boruta) plot(out.boruta, cex.axis = 0.8, las=1) ``` ### R Package varSelRF *varSelRF* [@diaz2007genesrf] is a random forest-based R package that performs variable ranking. The output of the same analysis is as follows. ```{r, warning = FALSE, message = FALSE} library(varSelRF) rf <- randomForest(diabetes~., data = PimaIndiansDiabetes, ntree = 200, importance = TRUE) rf.rvi <- randomVarImpsRF(xdata = PimaIndiansDiabetes[, 1:8], Class = PimaIndiansDiabetes[, 9], forest = rf, numrandom = 20, usingCluster = FALSE) randomVarImpsRFplot(rf.rvi, rf, show.var.names = TRUE, cexPoint = 0.3,cex.axis=0.3) ``` ### FSelector The *FSelector* R package [@romanski2016fselector] contains a large number of implemented techniques for generating rank weights for features. Below is an example based on an entropy-based filter using information gain applied to the same example. ```{r} library(FSelector) weights <- information.gain(diabetes~., data = PimaIndiansDiabetes) row.names(weights)[order(weights$attr_importance, decreasing = TRUE)] ``` # Underlying Theory of *varrank* ## Information theory metrics The *varrank* R package is based on the estimation of information theory metrics, namely, the *entropy* and the *mututal information*. Intuitivelly, the *entropy* is defined as the average amount of information produced by a stochastic source of data. The *mutual information* is defined as the mutual dependence between the two variables. ## Entropy from observational data Formally, for a continuous random variable $X$ with probability density function $P(X)$ the entropy $\text{H}(X)$ is defined as [see @cover2012elements for details] $$ \text{H}(X) = \textbf{E} [ - \log{P(X)} ]. $$ The entropy $\text{H}(X)$ of a discrete random variable $X$ is defined as [see @cover2012elements for details] $$ \text{H}(X) = \sum^{N}_{n = 1} P(x_n) \log{P(x_n)}, $$ where $N$ is the number of states of the random variable $X$. The latter definition can easily be extended for an arbitrarily large set of random variables. For $M$ random variables with $N_1$ to $N_M$ possible states respectively, the joint entropy is defined as \begin{equation} \text{H}(X_1, \dots, X_M) = \sum^{N_1}_{x_1 = 1} \dots \sum^{N_M}_{x_M = 1}P(x_1, \dots, x_M) \log{P(x_1, \dots, x_M)}. \end{equation} We now illustrate the calculation of entropy for some simple cases and give the true/theoretical values, as well. ```{r, fig.width = 3, fig.height = 3} ### 1D example #### # sample from continuous uniform distribution x1 = runif(1000) hist(x1, xlim = c(0, 1)) #True entropy value: H(X) = log(1000) = 6.91 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1), discretization.method = "rice", freq = FALSE))) # sample from a non-uniform distribution x2 = rnorm(n = 10000, mean = 0, sd = 1) hist(x2) #differential entropy: H(x) = log(1*sqrt(2*pi*exp(1))) = 1.42 entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "sturges", freq = FALSE))) ### 2D example #### # two independent random variables x1 <- runif(100) x2 <- runif(100) ## Theoretical entropy: 2*log(100) = 9.21 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1, x2), discretization.method = "sturges", freq = FALSE))) ``` ## Mutual information from observational data The mutual information $\text{MI}(X;Y)$ of two discrete random variables $X$ and $Y$ is defined as [see @cover2012elements for details] \begin{equation} \text{MI}(X;Y) = \sum^{N}_{n = 1} \sum^{M}_{m = 1}P(x_n,y_m) \log{\frac{P(x_ny_,m)}{P(x_n)P(y_m)} }, \end{equation} where $N$ and $M$ are the number of states of the random variables $X$ and $Y$, respectively. The extension to continuous variables is straightforward. The MI can also be expressed using entropy as [see @cover2012elements for details] \begin{equation}\label{eq:mi_entropy} \text{MI}(X;Y) = \text{H}(X) + \text{H}(Y) -\text{H}(X, Y). \end{equation} ```{r} # mutual information for 2 uniform random variables x1 <- runif(10000) x2 <- runif(10000) # approximately zero mi.data(X = x1, Y = x2, discretization.method = "kmeans") # MI computed directely mi.data(X = x2, Y = x2, discretization.method = "kmeans") # MI computed with entropies: ##MI(x,y) = H(x)+H(y)-H(x, y) for x=y; ##MI(x,x) = 2 * H(x) - H(x,x) 2 * entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "kmeans", freq = FALSE))) - entropy.data(freqs.table = table(discretization(data.df = data.frame(x2, x2), discretization.method = "kmeans", freq = FALSE))) ``` # Variables ranking/feature ranking Mutual information is very appealing when one wants to compute the degree of dependency between multiple random variables. Indeed, as it is based on the joint and marginal probability density functions (pdfs), it is very effective in measuring any kind of relationship [@cover2012elements]. The Minimum Redundancy Maximum Relevance (mRMR) algorithm can be described as an ensemble of models [@van2010increasing], originally proposed by @battiti1994using and coined by @peng2005feature. A general formulation of the ensemble of the mRMR technique is as follows. Given a set of features $\textbf{F}$, a subset of important features $\textbf{C}$, a candidate feature $f_i$ and possibly some already selected features $f_s \in \textbf{S}$, the local score function for a scheme in difference (Mutual Information Difference) is expressed as: \begin{equation} g(\alpha, \beta, \textbf{C}, \textbf{S}, \textbf{F}) = \text{MI}(f_i;\textbf{C}) - \beta \sum_{f_s \in \textbf{S}} \alpha(f_i, f_s, \textbf{C}) ~\text{MI}(f_i; f_s). \end{equation}\label{eq:mRMR} This equation is called the mRMRe equation. Model names and their corresponding functions $\alpha$ and parameters $\beta$ are listed below in historical order of publication: 1. $\beta>0$ is a user defined parameter and $\alpha(f_i,f_s,\textbf{C})=1$, named mutual information feature selector (MIFS). This method is called \textit{biattiti} in varrank and presented in @battiti1994using. 2. $\beta>0$ is a user defined parameter and $\alpha(f_i,f_s,\textbf{C})={\text{MI}(f_s;\textbf{C})}/{\text{H}(f_s)}$, named MIFS-U. This method is called \textit{kwak} in varrank and presented in @kwak2002input. 3. $\beta={1}/{|\textbf{S}|}$ and $\alpha(f_i,f_s,\textbf{C})=1$, which is named min-redundancy max-relevance (mRMR). This method is called \textit{peng} in varrank and presented in @peng2005feature. 4. $\beta={1}/{|\textbf{S}|}$ and $\alpha(f_i,f_s,\textbf{C})={1}/{\text{min}(\text{H}(f_i),\text{H}(f_s))}$ named Normalized MIFS (NMIFS). This method is called \textit{estevez} in varrank and presented in @estevez2009normalized. The two terms on the right-hand side of the definition of the local score function above are local proxies for the relevance and the redundancy, respectively. Redundancy is a penalty included to avoid selecting features highly correlated with previously selected ones. Local proxies are needed because computing the joint MI between high dimensional vectors is computationally expensive. The function $\alpha$ and the parameter $\beta$ attempt to balance both terms to the same scale. In @peng2005feature and @estevez2009normalized, the ratio of comparison is adaptively chosen as $\beta = 1/{|S|}$ to control the second term, which is a cumulative sum and increases quickly as the cardinality of $\textbf{S}$ increases. The function $\alpha$ tends to normalize the right side. One can remark that $0 \leq \text{MI}(f_i;f_s) \leq \min(\text{H}(f_i), \text{H}(f_s))$. ## Software implementation A common characteristic of data from systems epidemiology is that it contains both discrete and continuous variables. Thus, a common popular and efficient choice for computing information metrics is to discretize the continuous variables and then deal with only discrete variables. A recent survey of discretization techniques can be found in @garcia2013survey. Some static univariate unsupervised splitting approaches are implemented in the package. In the current implementation, one can give a user-defined number of bins (not recommended) or use a histogram-based approach. Popular choices for the latter are: Cencov's rule [@cencov1962estimation], Freedman-Diaconis' rule [@freedman1981histogram], Scott's rule [@scott], Sturges' rule [@sturges], Doane's formula [@doane] and Rice's rule. The MI is estimated through entropy using mRMRe equation and the count of the empirical frequencies. This is a plug-in estimator. Another approach is to use a clustering approach with the elbow method to determine the optimal number of clusters. Finally, one very popular MI estimation compatible only with continuous variables is based on nearest neighbors [@kraskov2004estimating]. The workhorse of *varrank* is the forward/backward implementation of mRMRe equation. The sequential forward variable ranking algorithm is described in [@gk2018]. ## Software functionalities The package *varrank* exploits the **object oriented functionalities** of R through the S3 class. The function *varrank()* returns an object of class *varrank*, a list with multiple entries. At present, three S3 methods have been implemented: the print method displaying a condensed output, the summary method displaying the full output and a plot method. The plot method is an adapted version of *heatmap.2()* from the R package *gplots*. ```{r} output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) ##print output ##summary summary(output) ``` The plot display depends on whether the algorithm is run in a forward search or backward search. ```{r} output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) plot(output) ``` ```{r} output<-varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "backward",scheme="mid", verbose = FALSE) plot(output) ``` ## Examples based on different datasets Below are some examples of the *varrank* methodology applied to classical R datasets. ### Swiss Fertility and Socioeconomic Indicators (1888) Data The swiss fertility dataset [@R-Core-Team:2017aa] consists of six continuous variables with 47 observations. Exploratory data analysis: ```{r} pairs(swiss, panel = panel.smooth, main = "Swiss Data", col = 3 + (swiss$Catholic > 80), gap = 0) summary(lm(Fertility ~ . , data = swiss)) ``` Forward *varrank* analysis: ```{r} swiss.varrank <- varrank(data.df = swiss, method = "estevez", variable.important = "Fertility", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) swiss.varrank plot(swiss.varrank) ``` ### Longley This is a data frame with seven continuous economical variables from the US, observed yearly from 1947 to 1962. This dataset is known to be highly collinear [@R-Core-Team:2017aa]. The exploratory data analysis: ```{r} pairs(longley, main = "Longley Data", gap = 0) summary(fm1 <- lm(Employed ~ ., data = longley)) ``` Forward *varrank* analysis: ```{r} longley.varrank <- varrank(data.df = longley, method = "estevez", variable.important = "Employed", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) longley.varrank plot(longley.varrank) ``` ### Air Quality dataset Daily air quality measurements in New York from May to September 1973. This dataset [@R-Core-Team:2017aa] contains 6 continuous variables with 154 observations. A complete case analysis is performed. Exploratory data analysis: ```{r} pairs(airquality, panel = panel.smooth, main = "Air Quality Data", gap = 0) ``` Forward *varrank* analysis ```{r} airquality.varrank <- varrank(data.df = (data.frame(lapply(airquality[complete.cases(airquality), ], as.numeric))), method = "estevez", variable.important = "Ozone", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) airquality.varrank plot(airquality.varrank) ``` ### Airbags and other influences on accident fatalities This is US data (1997-2002) from police-reported car crashes in which there was a harmful event (people or property) and from which at least one vehicle was towed. This dataset [@maindonald2014daag] contains 15 variables with 26'217 observations. The *varrank* forward search is performed using "peng" model. ```{r} data(nassCDS) nassCDS.varrank <- varrank(data.df = nassCDS, method = "peng", variable.important = "dead", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) nassCDS.varrank plot(nassCDS.varrank, notecex = 0.5) ``` # Bibliography
/scratch/gouwar.j/cran-all/cranData/varrank/inst/doc/varrank.Rmd
--- title: "varrank: An R Package for Variable Ranking Based on Mutual Information with Applications to Systems Epidemiology" author: "Gilles Kratzer, Reinhard Furrer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette #output: pdf_document vignette: > %\VignetteIndexEntry{varrank} %\VignetteEngine{knitr::knitr} %\usepackage[utf8]{inputenc} bibliography: bib_varrank.bib --- ```{r setup, include = FALSE, cache = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 6, fig.align = "center" ) options(digits = 3) ``` **What is the package design for?** The two main problems addressed by this package are **selection of the most representative variable** within a group of variables of interest (i.e. dimension reduction) and **variable ranking** with respect to a set of features of interest. **How does it work?** The *varrank* R package takes a named dataset as input. It transforms the continuous variables into categorical ones using discretization rules. Then a *varrank* analysis sequentially compares relevance with redundancy. The final rank is based on the measure of this score. **What are the R package functionalities?** The workhorse of the R package is the *varrank()* function. It computes the rank of variables and it returns a varrank class object. This object can be summarized with a comprehensive verbal description or a plot. **What is the structure of this document?** We first illustrate the package with a simple example. In a second step we compare the output of the main function *varrank()* with alternative approaches. Then some details are provided. For a full description of the technical details we refer to the original publication [@gk2018]. # Simple *varrank* example The package is available through [CRAN](https://CRAN.R-project.org/package=varrank) and can be installed directly in R: ```{r,eval=FALSE} install.packages("varrank") ``` Once installed, the *varrank* R package can be loaded using: ```{r,eval=TRUE} library(varrank) ``` Let us start with a ranking example from the *mlbench* R package [@leischmlbench]. The *PimaIndiansDiabetes* dataset contains 768 observations on 9 clinical variables. To run the *varrank()* function, one needs to choose a score function model ("battiti", "kwak", "peng", "estevez"), a discretization method (see discretization for details) and an algorithm scheme ("forward", "backward"). For the first search, it is advised to use either "peng" (faster) or "estevez" (more reliable but much slower for large datasets) and, in case the number of variables is large (>100), restrict the "forward" search to "n.var = 100." The progress bar will give you an idea of the remaining run time. ```{r, warning = FALSE, message = FALSE} data(PimaIndiansDiabetes, package = "mlbench") varrank.PimaIndiansDiabetes <- varrank(data.df = PimaIndiansDiabetes, method = "estevez", variable.important = "diabetes", discretization.method = "sturges", algorithm = "forward", scheme="mid", verbose = FALSE) summary(varrank.PimaIndiansDiabetes) ``` The function *varrank()* returns a list with multiple entries: "names.selected" and "distance.m". If `algorithm = "forward"`, "names.selected" contains an ordered list of the variable names in **decreasing** order of importance relative to the set in "variable.important" and, for a backward search, the list is in increasing order of importance. The scheme of comparison between relevance and redundance should be set to "mid" (Mutual Information Difference) or "miq" (Mutual Information Quotient). In order to visually assess *varrank()* output, one can plot it: ```{r} plot(varrank.PimaIndiansDiabetes) ``` Basically, the *varrank()* function sequentially compares the relevancy/redundancy balance of information across the set of variables. There is a legend containing the color code (cold blue for redundancy, hot red for relevancy) and the distribution of the scores. The columns of the triangular matrix contain the scores at each selection step. The variable selected at each step is the one with the highest score (the variables are ordered in the plot). The scores at selection can thus be read from the diagonal. A negative score indicates a redundancy final trade of information and a positive score indicates a relevancy final trade of information. ## Comparison with other R packages ### Caret Here is the output of the *caret* R package [@kuhn2014caret] applied to the same *PimaIndiansDiabetes* dataset. *caret* allows one to perform a model-free variable selection search. In such a case, the importance of each predictor is evaluated individually using a filter approach. ```{r} library(caret) library(e1071) # prepare training scheme control <- trainControl(method = "repeatedcv", number = 10, repeats = 3) # train the model model <- train(diabetes~., data = PimaIndiansDiabetes, method = "lvq", preProcess = "scale", trControl = control) # estimate variable importance importance <- varImp(model, useModel = FALSE) # summarize importance print(importance) # plot importance plot(importance) ``` ### R Package *Boruta* An alternative for variable selection is the *Boruta* R package [@kursa2010feature]. This compares the original attributes' importance with the importance achieved by random, estimated using their permuted copies in the Random Forest method. The *Boruta* output from the analysis of the same dataset is as follows. ```{r} library(Boruta) out.boruta <- Boruta(diabetes~., data = PimaIndiansDiabetes) print(out.boruta) plot(out.boruta, cex.axis = 0.8, las=1) ``` ### R Package varSelRF *varSelRF* [@diaz2007genesrf] is a random forest-based R package that performs variable ranking. The output of the same analysis is as follows. ```{r, warning = FALSE, message = FALSE} library(varSelRF) rf <- randomForest(diabetes~., data = PimaIndiansDiabetes, ntree = 200, importance = TRUE) rf.rvi <- randomVarImpsRF(xdata = PimaIndiansDiabetes[, 1:8], Class = PimaIndiansDiabetes[, 9], forest = rf, numrandom = 20, usingCluster = FALSE) randomVarImpsRFplot(rf.rvi, rf, show.var.names = TRUE, cexPoint = 0.3,cex.axis=0.3) ``` ### FSelector The *FSelector* R package [@romanski2016fselector] contains a large number of implemented techniques for generating rank weights for features. Below is an example based on an entropy-based filter using information gain applied to the same example. ```{r} library(FSelector) weights <- information.gain(diabetes~., data = PimaIndiansDiabetes) row.names(weights)[order(weights$attr_importance, decreasing = TRUE)] ``` # Underlying Theory of *varrank* ## Information theory metrics The *varrank* R package is based on the estimation of information theory metrics, namely, the *entropy* and the *mututal information*. Intuitivelly, the *entropy* is defined as the average amount of information produced by a stochastic source of data. The *mutual information* is defined as the mutual dependence between the two variables. ## Entropy from observational data Formally, for a continuous random variable $X$ with probability density function $P(X)$ the entropy $\text{H}(X)$ is defined as [see @cover2012elements for details] $$ \text{H}(X) = \textbf{E} [ - \log{P(X)} ]. $$ The entropy $\text{H}(X)$ of a discrete random variable $X$ is defined as [see @cover2012elements for details] $$ \text{H}(X) = \sum^{N}_{n = 1} P(x_n) \log{P(x_n)}, $$ where $N$ is the number of states of the random variable $X$. The latter definition can easily be extended for an arbitrarily large set of random variables. For $M$ random variables with $N_1$ to $N_M$ possible states respectively, the joint entropy is defined as \begin{equation} \text{H}(X_1, \dots, X_M) = \sum^{N_1}_{x_1 = 1} \dots \sum^{N_M}_{x_M = 1}P(x_1, \dots, x_M) \log{P(x_1, \dots, x_M)}. \end{equation} We now illustrate the calculation of entropy for some simple cases and give the true/theoretical values, as well. ```{r, fig.width = 3, fig.height = 3} ### 1D example #### # sample from continuous uniform distribution x1 = runif(1000) hist(x1, xlim = c(0, 1)) #True entropy value: H(X) = log(1000) = 6.91 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1), discretization.method = "rice", freq = FALSE))) # sample from a non-uniform distribution x2 = rnorm(n = 10000, mean = 0, sd = 1) hist(x2) #differential entropy: H(x) = log(1*sqrt(2*pi*exp(1))) = 1.42 entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "sturges", freq = FALSE))) ### 2D example #### # two independent random variables x1 <- runif(100) x2 <- runif(100) ## Theoretical entropy: 2*log(100) = 9.21 entropy.data(freqs.table = table(discretization(data.df = data.frame(x1, x2), discretization.method = "sturges", freq = FALSE))) ``` ## Mutual information from observational data The mutual information $\text{MI}(X;Y)$ of two discrete random variables $X$ and $Y$ is defined as [see @cover2012elements for details] \begin{equation} \text{MI}(X;Y) = \sum^{N}_{n = 1} \sum^{M}_{m = 1}P(x_n,y_m) \log{\frac{P(x_ny_,m)}{P(x_n)P(y_m)} }, \end{equation} where $N$ and $M$ are the number of states of the random variables $X$ and $Y$, respectively. The extension to continuous variables is straightforward. The MI can also be expressed using entropy as [see @cover2012elements for details] \begin{equation}\label{eq:mi_entropy} \text{MI}(X;Y) = \text{H}(X) + \text{H}(Y) -\text{H}(X, Y). \end{equation} ```{r} # mutual information for 2 uniform random variables x1 <- runif(10000) x2 <- runif(10000) # approximately zero mi.data(X = x1, Y = x2, discretization.method = "kmeans") # MI computed directely mi.data(X = x2, Y = x2, discretization.method = "kmeans") # MI computed with entropies: ##MI(x,y) = H(x)+H(y)-H(x, y) for x=y; ##MI(x,x) = 2 * H(x) - H(x,x) 2 * entropy.data(freqs.table = table(discretization(data.df = data.frame(x2), discretization.method = "kmeans", freq = FALSE))) - entropy.data(freqs.table = table(discretization(data.df = data.frame(x2, x2), discretization.method = "kmeans", freq = FALSE))) ``` # Variables ranking/feature ranking Mutual information is very appealing when one wants to compute the degree of dependency between multiple random variables. Indeed, as it is based on the joint and marginal probability density functions (pdfs), it is very effective in measuring any kind of relationship [@cover2012elements]. The Minimum Redundancy Maximum Relevance (mRMR) algorithm can be described as an ensemble of models [@van2010increasing], originally proposed by @battiti1994using and coined by @peng2005feature. A general formulation of the ensemble of the mRMR technique is as follows. Given a set of features $\textbf{F}$, a subset of important features $\textbf{C}$, a candidate feature $f_i$ and possibly some already selected features $f_s \in \textbf{S}$, the local score function for a scheme in difference (Mutual Information Difference) is expressed as: \begin{equation} g(\alpha, \beta, \textbf{C}, \textbf{S}, \textbf{F}) = \text{MI}(f_i;\textbf{C}) - \beta \sum_{f_s \in \textbf{S}} \alpha(f_i, f_s, \textbf{C}) ~\text{MI}(f_i; f_s). \end{equation}\label{eq:mRMR} This equation is called the mRMRe equation. Model names and their corresponding functions $\alpha$ and parameters $\beta$ are listed below in historical order of publication: 1. $\beta>0$ is a user defined parameter and $\alpha(f_i,f_s,\textbf{C})=1$, named mutual information feature selector (MIFS). This method is called \textit{biattiti} in varrank and presented in @battiti1994using. 2. $\beta>0$ is a user defined parameter and $\alpha(f_i,f_s,\textbf{C})={\text{MI}(f_s;\textbf{C})}/{\text{H}(f_s)}$, named MIFS-U. This method is called \textit{kwak} in varrank and presented in @kwak2002input. 3. $\beta={1}/{|\textbf{S}|}$ and $\alpha(f_i,f_s,\textbf{C})=1$, which is named min-redundancy max-relevance (mRMR). This method is called \textit{peng} in varrank and presented in @peng2005feature. 4. $\beta={1}/{|\textbf{S}|}$ and $\alpha(f_i,f_s,\textbf{C})={1}/{\text{min}(\text{H}(f_i),\text{H}(f_s))}$ named Normalized MIFS (NMIFS). This method is called \textit{estevez} in varrank and presented in @estevez2009normalized. The two terms on the right-hand side of the definition of the local score function above are local proxies for the relevance and the redundancy, respectively. Redundancy is a penalty included to avoid selecting features highly correlated with previously selected ones. Local proxies are needed because computing the joint MI between high dimensional vectors is computationally expensive. The function $\alpha$ and the parameter $\beta$ attempt to balance both terms to the same scale. In @peng2005feature and @estevez2009normalized, the ratio of comparison is adaptively chosen as $\beta = 1/{|S|}$ to control the second term, which is a cumulative sum and increases quickly as the cardinality of $\textbf{S}$ increases. The function $\alpha$ tends to normalize the right side. One can remark that $0 \leq \text{MI}(f_i;f_s) \leq \min(\text{H}(f_i), \text{H}(f_s))$. ## Software implementation A common characteristic of data from systems epidemiology is that it contains both discrete and continuous variables. Thus, a common popular and efficient choice for computing information metrics is to discretize the continuous variables and then deal with only discrete variables. A recent survey of discretization techniques can be found in @garcia2013survey. Some static univariate unsupervised splitting approaches are implemented in the package. In the current implementation, one can give a user-defined number of bins (not recommended) or use a histogram-based approach. Popular choices for the latter are: Cencov's rule [@cencov1962estimation], Freedman-Diaconis' rule [@freedman1981histogram], Scott's rule [@scott], Sturges' rule [@sturges], Doane's formula [@doane] and Rice's rule. The MI is estimated through entropy using mRMRe equation and the count of the empirical frequencies. This is a plug-in estimator. Another approach is to use a clustering approach with the elbow method to determine the optimal number of clusters. Finally, one very popular MI estimation compatible only with continuous variables is based on nearest neighbors [@kraskov2004estimating]. The workhorse of *varrank* is the forward/backward implementation of mRMRe equation. The sequential forward variable ranking algorithm is described in [@gk2018]. ## Software functionalities The package *varrank* exploits the **object oriented functionalities** of R through the S3 class. The function *varrank()* returns an object of class *varrank*, a list with multiple entries. At present, three S3 methods have been implemented: the print method displaying a condensed output, the summary method displaying the full output and a plot method. The plot method is an adapted version of *heatmap.2()* from the R package *gplots*. ```{r} output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) ##print output ##summary summary(output) ``` The plot display depends on whether the algorithm is run in a forward search or backward search. ```{r} output <- varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "forward", scheme="mid", verbose = FALSE) plot(output) ``` ```{r} output<-varrank(data.df = PimaIndiansDiabetes, method = "battiti", variable.important = "diabetes", discretization.method = "sturges", ratio = 0.6, algorithm = "backward",scheme="mid", verbose = FALSE) plot(output) ``` ## Examples based on different datasets Below are some examples of the *varrank* methodology applied to classical R datasets. ### Swiss Fertility and Socioeconomic Indicators (1888) Data The swiss fertility dataset [@R-Core-Team:2017aa] consists of six continuous variables with 47 observations. Exploratory data analysis: ```{r} pairs(swiss, panel = panel.smooth, main = "Swiss Data", col = 3 + (swiss$Catholic > 80), gap = 0) summary(lm(Fertility ~ . , data = swiss)) ``` Forward *varrank* analysis: ```{r} swiss.varrank <- varrank(data.df = swiss, method = "estevez", variable.important = "Fertility", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) swiss.varrank plot(swiss.varrank) ``` ### Longley This is a data frame with seven continuous economical variables from the US, observed yearly from 1947 to 1962. This dataset is known to be highly collinear [@R-Core-Team:2017aa]. The exploratory data analysis: ```{r} pairs(longley, main = "Longley Data", gap = 0) summary(fm1 <- lm(Employed ~ ., data = longley)) ``` Forward *varrank* analysis: ```{r} longley.varrank <- varrank(data.df = longley, method = "estevez", variable.important = "Employed", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) longley.varrank plot(longley.varrank) ``` ### Air Quality dataset Daily air quality measurements in New York from May to September 1973. This dataset [@R-Core-Team:2017aa] contains 6 continuous variables with 154 observations. A complete case analysis is performed. Exploratory data analysis: ```{r} pairs(airquality, panel = panel.smooth, main = "Air Quality Data", gap = 0) ``` Forward *varrank* analysis ```{r} airquality.varrank <- varrank(data.df = (data.frame(lapply(airquality[complete.cases(airquality), ], as.numeric))), method = "estevez", variable.important = "Ozone", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) airquality.varrank plot(airquality.varrank) ``` ### Airbags and other influences on accident fatalities This is US data (1997-2002) from police-reported car crashes in which there was a harmful event (people or property) and from which at least one vehicle was towed. This dataset [@maindonald2014daag] contains 15 variables with 26'217 observations. The *varrank* forward search is performed using "peng" model. ```{r} data(nassCDS) nassCDS.varrank <- varrank(data.df = nassCDS, method = "peng", variable.important = "dead", discretization.method = "sturges", algorithm = "forward", scheme = "mid", verbose = FALSE) nassCDS.varrank plot(nassCDS.varrank, notecex = 0.5) ``` # Bibliography
/scratch/gouwar.j/cran-all/cranData/varrank/vignettes/varrank.Rmd
"Acoef" <- function (x) { if (!is(x, "varest")) { stop("\nPlease provide an object of class 'varest', generated by 'var()'.\n") } K <- x$K p <- x$p A <- Bcoef(x)[, 1:(K*p)] As <- list() start <- seq(1, p * K, K) end <- seq(K, p * K, K) for (i in 1:p) { As[[i]] <- matrix(A[, start[i]:end[i]], nrow = K, ncol = K) rownames(As[[i]]) <- rownames(A) colnames(As[[i]]) <- colnames(A[, start[i]:end[i]]) } return(As) } A <- function(x){ .Deprecated("Acoef", package = "vars", msg = "Function 'A' is deprecated; use 'Acoef' instead.\nSee help(\"vars-deprecated\") and help(\"A-deprecated\") for more information.") Acoef(x = x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/A.R
"Bcoef" <- function(x){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'var()'.\n") } y.names <- colnames(x$datamat[, c(1 : x$K)]) Z <- x$datamat[, -c(1 : x$K)] B <- matrix(0, nrow = x$K, ncol = ncol(Z)) if(is.null(x$restriction)){ for(i in 1 : x$K){ B[i, ] <- coef(x$varresult[[i]]) } }else if(!(is.null(x$restriction))){ for(i in 1 : x$K){ restrictions <- x$restrictions restrictions[i, restrictions[i, ] == TRUE] <- coef(x$varresult[[i]]) temp <- restrictions[i, ] B[i, ] <- temp } } colnames(B) <- colnames(Z) rownames(B) <- y.names return(B) } B <- function(x){ .Deprecated("Bcoef", package = "vars", msg = "Function 'B' is deprecated; use 'Acoef' instead.\nSee help(\"vars-deprecated\") and help(\"B-deprecated\") for more information.") Bcoef(x = x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/B.R
"BQ" <- function(x){ if (!is(x, "varest")) { stop("\nPlease, provide an object of class 'varest',\n generated by function 'VAR()' as input for 'x'.\n") } Amats <- Acoef(x) P <- x$p Ident <- diag(x$K) mat1 <- matrix(0, x$K, x$K) mat2 <- mat1 for(i in 1:P){ mat1 <- mat1 - Amats[[i]] mat2 <- mat2 - t(Amats[[i]]) } mat1 <- Ident + mat1 mat2 <- Ident + mat2 df <- summary(x$varresult[[1]])$df[2] SigmaU <- crossprod(resid(x)) / df eval <- solve(mat1) %*% SigmaU %*% solve(mat2) lrim <- t(chol(eval)) colnames(lrim) <- colnames(x$y) rownames(lrim) <- colnames(lrim) cim <- mat1 %*% lrim colnames(cim) <- colnames(lrim) rownames(cim) <- colnames(lrim) result <- list(A = Ident, Ase = NULL, B = cim, Bse = NULL, LRIM = lrim, Sigma.U = SigmaU * 100, LR = NULL, opt = NULL, start = NULL, type = "Blanchard-Quah", var = x, call = match.call()) class(result) <- "svarest" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/BQ.R
"Phi" <- function(x, nstep=10, ...){ UseMethod("Phi", x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Phi.R
"Phi.svarest" <- function(x, nstep = 10, ...){ if(!is(x, "svarest")){ stop("\nPlease provide an object of class 'svarest', generated by 'SVAR()'.\n") } nstep <- abs(as.integer(nstep)) Ainv <- solve(x$A) Phi <- Phi(x$var, nstep = nstep) for(i in 1: dim(Phi)[3]){ Phi[, , i] <- Phi[, , i] %*% Ainv %*% x$B } return(Phi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Phi.svarest.R
"Phi.svecest" <- function(x, nstep = 10, ...){ if(!is(x, "svecest")){ stop("\nPlease provide an object of class 'svecest', generated by 'SVEC()'.\n") } nstep <- abs(as.integer(nstep)) varlevel <- vec2var(x$var, r = x$r) Phi <- Phi(varlevel, nstep = nstep) for(i in 1: dim(Phi)[3]){ Phi[, , i] <- Phi[, , i] %*% x$SR } return(Phi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Phi.svecest.R
"Phi.varest" <- function(x, nstep=10, ...){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'VAR()'.\n") } nstep <- abs(as.integer(nstep)) K <- x$K p <- x$p A <- as.array(Acoef(x)) if(nstep >= p){ As <- array(0, dim = c(K, K, nstep + 1)) for(i in (p + 1):(nstep + 1)){ As[, , i] <- matrix(0, nrow = K, ncol = K) } } else { As <- array(0, dim = c(K, K, p)) } for(i in 1:p){ As[, , i] <- A[[i]] } Phi <- array(0, dim=c(K, K, nstep + 1)) Phi[, ,1] <- diag(K) Phi[, , 2] <- Phi[, , 1] %*% As[, , 1] if (nstep > 1) { for (i in 3:(nstep + 1)) { tmp1 <- Phi[, , 1] %*% As[, , i-1] tmp2 <- matrix(0, nrow = K, ncol = K) idx <- (i - 2):1 for (j in 1:(i - 2)) { tmp2 <- tmp2 + Phi[, , j+1] %*% As[, , idx[j]] } Phi[, , i] <- tmp1 + tmp2 } } return(Phi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Phi.varest.R
"Phi.vec2var" <- function(x, nstep=10, ...){ if(!is(x, "vec2var")){ stop("\nPlease provide an object of class 'vec2var', generated by 'vec2var()'.\n") } nstep <- abs(as.integer(nstep)) K <- x$K p <- x$p A <- as.array(x$A) if(nstep >= p){ As <- array(0, dim = c(K, K, nstep + 1)) for(i in (p + 1):(nstep + 1)){ As[, , i] <- matrix(0, nrow = K, ncol = K) } } else { As <- array(0, dim = c(K, K, p)) } for(i in 1:p){ As[, , i] <- A[[i]] } Phi <- array(0, dim=c(K, K, nstep + 1)) Phi[, ,1] <- diag(K) Phi[, , 2] <- Phi[, , 1] %*% As[, , 1] if (nstep > 1) { for (i in 3:(nstep + 1)) { tmp1 <- Phi[, , 1] %*% As[, , i-1] tmp2 <- matrix(0, nrow = K, ncol = K) idx <- (i - 2):1 for (j in 1:(i - 2)) { tmp2 <- tmp2 + Phi[, , j+1] %*% As[, , idx[j]] } Phi[, , i] <- tmp1 + tmp2 } } return(Phi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Phi.vec2var.R
"Psi" <- function(x, nstep=10, ...){ UseMethod("Psi", x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Psi.R
"Psi.varest" <- function(x, nstep=10, ...){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'VAR()'.\n") } nstep <- abs(as.integer(nstep)) Phi <- Phi(x, nstep = nstep) Psi <- array(0, dim=dim(Phi)) params <- ncol(x$datamat[, -c(1:x$K)]) sigma.u <- crossprod(resid(x)) / (x$obs - params) P <- t(chol(sigma.u)) dim3 <- dim(Phi)[3] for(i in 1:dim3){ Psi[, , i] <- Phi[, , i] %*% P } return(Psi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Psi.varest.R
"Psi.vec2var" <- function(x, nstep=10, ...){ if(!is(x, "vec2var")){ stop("\nPlease provide an object of class 'vec2var', generated by 'vec2var()'.\n") } nstep <- abs(as.integer(nstep)) Phi <- Phi(x, nstep = nstep) Psi <- array(0, dim=dim(Phi)) params <- ncol(x$datamat[, -c(1:x$K)]) sigma.u <- crossprod(resid(x)) / x$obs P <- t(chol(sigma.u)) dim3 <- dim(Phi)[3] for(i in 1:dim3){ Psi[, , i] <- Phi[, , i] %*% P } return(Psi) }
/scratch/gouwar.j/cran-all/cranData/vars/R/Psi.vec2var.R
"SVAR" <- function (x, estmethod = c("scoring", "direct"), Amat = NULL, Bmat = NULL, start = NULL, max.iter = 100, conv.crit = 1e-07, maxls = 1, lrtest = TRUE, ...) { if (!is(x, "varest")) { stop("\nPlease, provide an object of class 'varest',\n generated by function 'VAR()' as input for 'x'.\n") } call <- match.call() estmethod <- match.arg(estmethod) if ((is.null(Amat)) && (is.null(Bmat))) { stop("\nAt least one matrix, either 'Amat' or 'Bmat', must be non-null.\n") } if ((is.null(Amat)) && !(is.null(Bmat))) { Amat <- diag(x$K) svartype <- "B-model" } else if ((is.null(Bmat)) && !(is.null(Amat))) { Bmat <- diag(x$K) svartype <- "A-model" } else { svartype <- "AB-model" diag(Amat) <- 1 freeA <- which(is.na(c(Amat))) freeB <- which(is.na(c(Bmat))) if (any(freeA %in% freeB)) { stop("\nSVAR not identified. Free parameters at the same positions in Amat and Bmat.\n") } } if (!any(is.na(cbind(Amat, Bmat)))) { stop("\nNo parameters provided for optimisation, i.e.\nneither 'Amat' nor 'Bmat' do contain NA elements.\n") } K <- x$K obs <- x$obs df <- summary(x$varresult[[1]])$df[2] sigma <- crossprod(resid(x)) / df params.A <- sum(is.na(Amat)) params.B <- sum(is.na(Bmat)) params <- params.A + params.B if ((svartype == "B-model") || (svartype == "A-model")) { if (K^2 - params < K * (K - 1)/2) { stop("\nModel is not identified,\nchoose different settings for 'Amat' and/or 'Bmat'.\n") } } else if (svartype == "AB-model") { if (2 * K^2 - params < K^2 + K * (K - 1)/2) { stop("\nModel is not identified,\nchoose different settings for 'Amat' and/or 'Bmat'.\n") } } if (is.null(start)) start <- rep(0.1, params) start <- as.vector(start) if (!(length(start) == params)) { stop("\nWrong count of starting values provided in 'start'.\nLength of 'start' must be equal to the count of 'na' in 'Amat' and 'Bmat'.\n") } if (estmethod == "direct") { param.Aidx <- which(is.na(Amat), arr.ind = TRUE) param.Bidx <- which(is.na(Bmat), arr.ind = TRUE) logLc <- function(coef) { if (svartype == "B-model") { Bmat[param.Bidx] <- coef } else if (svartype == "A-model") { Amat[param.Aidx] <- coef } else if (svartype == "AB-model") { if (length(param.Aidx) > 0) { Amat[param.Aidx] <- coef[c(1:nrow(param.Aidx))] if (length(param.Bidx) > 0) { Bmat[param.Bidx] <- coef[-c(1:nrow(param.Aidx))] } } else if (length(param.Aidx) == 0) { Bmat[param.Bidx] <- coef } } logLc <- -1 * (K * obs/2) * log(2 * pi) + obs/2 * log(det(Amat)^2) - obs/2 * log(det(Bmat)^2) - obs/2 * sum(diag(t(Amat) %*% solve(t(Bmat)) %*% solve(Bmat) %*% Amat %*% sigma)) return(-logLc) } opt <- optim(start, logLc, ...) iter <- opt$counts[1] Asigma <- matrix(0, nrow = K, ncol = K) Bsigma <- matrix(0, nrow = K, ncol = K) if (!(is.null(opt$hessian))) { Sigma <- sqrt(diag(solve(opt$hessian))) } if (svartype == "B-model") { Bmat[param.Bidx] <- opt$par if (!(is.null(opt$hessian))) { Bsigma[param.Bidx] <- Sigma } } else if (svartype == "A-model") { Amat[param.Aidx] <- opt$par if (!(is.null(opt$hessian))) { Asigma[param.Aidx] <- Sigma } } else if (svartype == "AB-model") { if (length(param.Aidx) > 0) { Amat[param.Aidx] <- head(opt$par, nrow(param.Aidx)) if (!(is.null(opt$hessian))) { Asigma[param.Aidx] <- head(Sigma, nrow(param.Aidx)) } } else { Amat <- Amat } if (length(param.Bidx) > 0) { Bmat[param.Bidx] <- tail(opt$par, nrow(param.Bidx)) if (!(is.null(opt$hessian))) { Bsigma[param.Bidx] <- tail(Sigma, nrow(param.Bidx)) } } else { Bmat <- Bmat } } } if (estmethod == "scoring") { gamma <- start Ksq <- K^2 if (svartype == "A-model") { rb <- c(diag(K)) ra <- c(Amat) pos <- which(is.na(ra)) cols <- length(pos) Ra <- matrix(0, nrow = Ksq, ncol = cols) for (i in 1:cols) Ra[pos[i], i] <- 1 ra[pos] <- 0 } if (svartype == "B-model") { ra <- c(diag(K)) rb <- c(Bmat) pos <- which(is.na(rb)) cols <- length(pos) Rb <- matrix(0, nrow = Ksq, ncol = cols) for (i in 1:cols) Rb[pos[i], i] <- 1 rb[pos] <- 0 } if (svartype == "AB-model") { ra <- c(Amat) pos <- which(is.na(ra)) cols <- length(pos) Ra <- matrix(0, nrow = Ksq, ncol = cols) for (i in 1:cols) Ra[pos[i], i] <- 1 ra[pos] <- 0 rb <- c(Bmat) pos <- which(is.na(rb)) cols <- length(pos) Rb <- matrix(0, nrow = Ksq, ncol = cols) for (i in 1:cols) Rb[pos[i], i] <- 1 rb[pos] <- 0 } R <- matrix(0, nrow = 2 * Ksq, ncol = params) if (identical(as.integer(params.A), as.integer(0))) { R[(Ksq + 1):(2 * Ksq), 1:params] <- Rb } else if (identical(as.integer(params.B), as.integer(0))) { R[1:Ksq, 1:params] <- Ra } else if ((!(is.null(params.A)) && (!(is.null(params.B))))) { R[1:Ksq, 1:params.A] <- Ra R[(Ksq + 1):(2 * Ksq), (params.A + 1):params] <- Rb } r <- c(ra, rb) Kkk <- diag(Ksq)[, c(sapply(1:K, function(i) seq(i, Ksq, K)))] IK2 <- diag(Ksq) IK <- diag(K) iters <- 0 cvcrit <- conv.crit + 1 while (cvcrit > conv.crit) { z <- gamma vecab <- R %*% gamma + r Amat <- matrix(vecab[1:Ksq], nrow = K, ncol = K) Bmat <- matrix(vecab[(Ksq + 1):(2 * Ksq)], nrow = K, ncol = K) Binv <- solve(Bmat) Btinv <- solve(t(Bmat)) BinvA <- Binv %*% Amat infvecab.mat1 <- rbind(kronecker(solve(BinvA), Btinv), -1 * kronecker(IK, Btinv)) infvecab.mat2 <- IK2 + Kkk infvecab.mat3 <- cbind(kronecker(t(solve(BinvA)), Binv), -1 * kronecker(IK, Binv)) infvecab <- obs * (infvecab.mat1 %*% infvecab.mat2 %*% infvecab.mat3) infgamma <- t(R) %*% infvecab %*% R infgammainv <- solve(infgamma) scorevecBinvA <- obs * c(solve(t(BinvA))) - obs * (kronecker(sigma, IK) %*% c(BinvA)) scorevecAB.mat <- rbind(kronecker(IK, Btinv), -1 * kronecker(BinvA, Btinv)) scorevecAB <- scorevecAB.mat %*% scorevecBinvA scoregamma <- t(R) %*% scorevecAB direction <- infgammainv %*% scoregamma length <- max(abs(direction)) ifelse(length > maxls, lambda <- maxls/length, lambda <- 1) gamma <- gamma + lambda * direction iters <- iters + 1 z <- z - gamma cvcrit <- max(abs(z)) if (iters >= max.iter) { warning(paste("Convergence not achieved after", iters, "iterations. Convergence value:", cvcrit, ".")) break } } iter <- iters - 1 abSigma <- sqrt(diag((R %*% solve(infgamma) %*% t(R)))) Asigma <- matrix(abSigma[1:Ksq], nrow = K, ncol = K) Bsigma <- matrix(abSigma[(Ksq + 1):(2 * Ksq)], nrow = K, ncol = K) opt <- NULL } colnames(Amat) <- colnames(x$y) rownames(Amat) <- colnames(Amat) colnames(Bmat) <- colnames(Amat) rownames(Bmat) <- colnames(Amat) colnames(Asigma) <- colnames(Amat) rownames(Asigma) <- colnames(Amat) colnames(Bsigma) <- colnames(Amat) rownames(Bsigma) <- colnames(Amat) ## ## Normalize sign of Amat and Bmat if applicable ## if(svartype == "AB-model"){ if(any(diag(Amat) < 0)){ ind <- which(diag(Amat) < 0) Amat[, ind] <- -1 * Amat[, ind] } if(any(diag(Bmat) < 0)){ ind <- which(diag(Bmat) < 0) Bmat[, ind] <- -1 * Bmat[, ind] } } if(svartype == "B-model"){ if(any(diag(solve(Amat) %*% Bmat) < 0)){ ind <- which(diag(solve(Amat) %*% Bmat) < 0) Bmat[, ind] <- -1 * Bmat[, ind] } } if(svartype == "A-model"){ if(any(diag(solve(Amat) %*% Bmat) < 0)){ ind <- which(diag(solve(Amat) %*% Bmat) < 0) Amat[, ind] <- -1 * Amat[, ind] } } Sigma.U <- solve(Amat) %*% Bmat %*% t(Bmat) %*% t(solve(Amat)) LRover <- NULL if (lrtest) { degrees <- 2 * K^2 - params - 2 * K^2 + 0.5 * K * (K + 1) if (identical(degrees, 0)) { warning(paste("The", svartype, "is just identified. No test possible.")) } else { STATISTIC <- obs * (log(det(Sigma.U)) - log(det(sigma))) names(STATISTIC) <- "Chi^2" PARAMETER <- 2 * K^2 - params - 2 * K^2 + 0.5 * K * (K + 1) names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "LR overidentification" LRover <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = x$call$y) class(LRover) <- "htest" } } result <- list(A = Amat, Ase = Asigma, B = Bmat, Bse = Bsigma, LRIM = NULL, Sigma.U = Sigma.U * 100, LR = LRover, opt = opt, start = start, type = svartype, var = x, iter = iter, call = call) class(result) <- "svarest" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/SVAR.R
"SVEC" <- function(x, LR = NULL, SR = NULL, r = 1, start = NULL, max.iter = 100, conv.crit = 1e-07, maxls = 1, lrtest = TRUE, boot = FALSE, runs = 100){ if (!is(x, "ca.jo")) { stop("\nPlease, provide object of class 'ca.jo' as 'z'.\n") } if((is.null(LR)) || (is.null(SR))){ stop("Please, provide matrix objects for 'LR' and 'SR'.\n") } r <- as.integer(r) if(!({1 <= r} && {r < ncol(x@x)})){ stop(paste("\nThe cointegration rank 'r' must be in the interval [1:", ncol(z@x) - 1, "].\n", sep = "")) } ## ## Setting parameters and identity matrices ## K <- x@P K2 <- K^2 P <- x@lag - 1 IK <- diag(K) IK2 <- diag(K2) Kkk <- diag(K2)[, c(sapply(1:K, function(i) seq(i, K2, K)))] svartype <- "B-model" ## ## Checking for correct dimensions of LR and SR ## if(!identical(dim(LR), as.integer(c(K, K)))){ stop(paste("Dimension of 'LR' must be (", K, "x", K, ").")) } if(!identical(dim(SR), as.integer(c(K, K)))){ stop(paste("Dimension of 'SR' must be (", K, "x", K, ").")) } SRorig <- SR LRorig <- LR ## ## Obtaining restricted VECM with normalised beta ## vecr <- cajorls(z = x, r = r) ## ## Obtaining Sigma of vecr ## Resids <- residuals(vecr$rlm) obs <- nrow(Resids) Sigma <- crossprod(Resids) / obs ## ## Obtaining Xi matrix ## Coef.vecr <- coef(vecr$rlm) alpha <- t(Coef.vecr[1:r, ]) ifelse(r == 1, alpha.orth <- Null(t(alpha)), alpha.orth <- Null(alpha)) beta <- vecr$beta[1:K, ] beta.orth <- Null(beta) Gamma.array <- array(c(t(tail(coef(vecr$rlm), K*P))), c(K, K, P)) Gamma <- apply(Gamma.array, c(1, 2), sum) Xi <- beta.orth %*% solve(t(alpha.orth) %*% (IK - Gamma) %*% beta.orth) %*% t(alpha.orth) ## ## S-Matrix for explicit form ## Lrres <- sum(is.na(LR)) SRres <- sum(is.na(SR)) R0 <- diag(K^2) select <- c(apply(SR, c(1, 2), function(x) ifelse(identical(x, 0.0), TRUE, FALSE))) R.B <- R0[select, ] select <- c(apply(LR, c(1, 2), function(x) ifelse(identical(x, 0.0), TRUE, FALSE))) R.C1 <- R0[select, ] if(identical(nrow(R.C1), as.integer(0))){ R.C1 <- matrix(0, nrow = K2, ncol = K2) nResC1 <- 0 } R.C1 <- R.C1 %*% kronecker(IK, Xi) nResC1 <- qr(R.C1)$rank ## ## Setting up the R matrix (implicit form) ## if(identical(nrow(R.B), as.integer(0))){ R <- R.C1 nResB <- 0 } else { R <- rbind(R.C1, R.B) nResB <- qr(R.B)$rank } ## ## Obtaining the S matrix and s vector (explicit form) ## Sb <- Null(t(R)) S <- rbind(matrix(0, nrow = K^2, ncol = ncol(Sb)), Sb) l<- ncol(S) s <- c(c(diag(K)), rep(0, K^2)) ## ## Test of unidentification ## if((nResB + nResC1) < (K * (K - 1) / 2)){ stop("The model is not identified. Use less free parameters.") } ## ## Test identification numerically ## ifelse(is.null(start), gamma <- start <- rnorm(l), gamma <- start) vecab <- S %*% gamma + s A <- matrix(vecab[1:K2], nrow = K, ncol = K) B <- matrix(vecab[(K2 + 1):(2 * K2)], nrow = K, ncol = K) v1 <- (IK2 + Kkk) %*% kronecker(t(solve(A) %*% B), solve(B)) v2 <- -1 * (IK2 + Kkk) %*% kronecker(IK, solve(B)) v <- cbind(v1, v2) idmat <- v %*% S ms <- t(v) %*% v auto <- eigen(ms)$values rni <- 0 for (i in 1:l) { if (auto[i] < 1e-11) rni <- rni + 1 } if (identical(rni, 0)) { if (identical(l, as.integer(K * (K + 1)/2))) { ident <- paste("The", svartype, "is just identified.") } else { ident <- paste("The", svartype, "is over identified.") } } else { ident <- paste("The", svartype, "is unidentified. The non-identification rank is", rni, ".") stop(ident) } ## ## Scoring Algorithm ## iters <- 0 cvcrit <- conv.crit + 1 while (cvcrit > conv.crit) { z <- gamma vecab <- S %*% gamma + s A <- matrix(vecab[1:K2], nrow = K, ncol = K) B <- matrix(vecab[(K2 + 1):(2 * K2)], nrow = K, ncol = K) Binv <- solve(B) Btinv <- solve(t(B)) BinvA <- Binv %*% A infvecab.mat1 <- rbind(kronecker(solve(BinvA), Btinv), -1 * kronecker(IK, Btinv)) infvecab.mat2 <- IK2 + Kkk infvecab.mat3 <- cbind(kronecker(t(solve(BinvA)), Binv), -1 * kronecker(IK, Binv)) infvecab <- obs * (infvecab.mat1 %*% infvecab.mat2 %*% infvecab.mat3) infgamma <- t(S) %*% infvecab %*% S infgammainv <- solve(infgamma) scorevecBinvA <- obs * c(solve(t(BinvA))) - obs * (kronecker(Sigma, IK) %*% c(BinvA)) scorevecAB.mat <- rbind(kronecker(IK, Btinv), -1 * kronecker(BinvA, Btinv)) scorevecAB <- scorevecAB.mat %*% scorevecBinvA scoregamma <- t(S) %*% scorevecAB direction <- infgammainv %*% scoregamma length <- max(abs(direction)) ifelse(length > maxls, lambda <- maxls/length, lambda <- 1) gamma <- gamma + lambda * direction iters <- iters + 1 z <- z - gamma cvcrit <- max(abs(z)) if (iters >= max.iter) { warning(paste("Convergence not achieved after", iters, "iterations. Convergence value:", cvcrit, ".")) break } } iter <- iters - 1 vecab <- S %*% gamma + s SR <- B ## ## Normalising the sign of SR ## select <- which(diag(solve(A) %*% B) < 0) SR[, select] <- -1 * SR[, select] ## ## Computing LR and Sigma.U ## LR <- Xi %*% SR Sigma.U <- solve(A) %*% B %*% t(B) %*% t(solve(A)) colnames(SR) <- colnames(x@x) rownames(SR) <- colnames(SR) colnames(LR) <- colnames(SR) rownames(LR) <- colnames(SR) colnames(Sigma.U) <- colnames(SR) rownames(Sigma.U) <- colnames(SR) ## ## LR overidentification test ## LRover <- NULL if (lrtest) { degrees <- K * (K + 1) / 2 - l if(identical(degrees, 0)) { warning(paste("The SVEC is just identified. No test possible.")) } else { rSigma <- solve(A) %*% B %*% t(B) %*% t(solve(A)) det1 <- det(rSigma) det2 <- det(Sigma) STATISTIC <- (log(det1) - log(det2)) * obs names(STATISTIC) <- "Chi^2" PARAMETER <- degrees names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "LR overidentification" LRover <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = deparse(substitute(x))) class(LRover) <- "htest" } } BOOTVAL <- NULL SRse <- NULL LRse <- NULL if(boot){ BOOTVAL <- .bootsvec(x = x, LRorig = LRorig, SRorig = SRorig, r = r, runs = runs, K = K, conv.crit = conv.crit, maxls = maxls, max.iter = max.iter) ## ## Calculating the standard deviations for parameters ## SRboot <- BOOTVAL[1:K2, ] SRse <- matrix(sqrt(apply((SRboot - c(SR))^2, 1, mean)), nrow = K, ncol = K) idxnull <- which(abs(SRse) < 0.1e-8, arr.ind = TRUE) SRse[idxnull] <- 0.0 LRboot <- BOOTVAL[-c(1:K2), ] LRse <- matrix(sqrt(apply((LRboot - c(LR))^2, 1, mean)), nrow = K, ncol = K) idxnull <- which(abs(LRse) < 0.1e-8, arr.ind = TRUE) LRse[idxnull] <- 0.0 colnames(SRse) <- colnames(SR) rownames(SRse) <- rownames(SR) colnames(LRse) <- colnames(LR) rownames(LRse) <- rownames(LR) } ## ## Setting near zero elements to zero ## idxnull <- which(abs(LR) < 0.1e-8, arr.ind = TRUE) LR[idxnull] <- 0.0 idxnull <- which(abs(SR) < 0.1e-8, arr.ind = TRUE) SR[idxnull] <- 0.0 ## ## Assembling svecest object ## result <- list(SR = SR, SRse = SRse, LR = LR, LRse = LRse, Sigma.U = Sigma.U * 100, Restrictions = c(nResC1, nResB), LRover = LRover, start = start, type = svartype, var = x, LRorig = LRorig, SRorig = SRorig, r = r, iter = iter, call = match.call()) class(result) <- "svecest" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/SVEC.R
"VAR" <- function (y, p = 1, type = c("const", "trend", "both", "none"), season = NULL, exogen = NULL, lag.max = NULL, ic = c("AIC", "HQ", "SC", "FPE")) { y <- as.matrix(y) if (any(is.na(y))) stop("\nNAs in y.\n") if (ncol(y) < 2) stop("The matrix 'y' should contain at least two variables. For univariate analysis consider ar() and arima() in package stats.\n") if (is.null(colnames(y))) { colnames(y) <- paste("y", 1:ncol(y), sep = "") warning(paste("No column names supplied in y, using:", paste(colnames(y), collapse = ", "), ", instead.\n")) } colnames(y) <- make.names(colnames(y)) y.orig <- y type <- match.arg(type) obs <- dim(y)[1] K <- dim(y)[2] if(!is.null(lag.max)){ lag.max <- abs(as.integer(lag.max)) ic <- paste(match.arg(ic), "(n)", sep = "") p <- VARselect(y, lag.max = lag.max, type = type, season = season, exogen = exogen)$selection[ic] } sample <- obs - p ylags <- embed(y, dimension = p + 1)[, -(1:K)] temp1 <- NULL for (i in 1:p) { temp <- paste(colnames(y), ".l", i, sep = "") temp1 <- c(temp1, temp) } colnames(ylags) <- temp1 yend <- y[-c(1:p), ] if (type == "const") { rhs <- cbind(ylags, rep(1, sample)) colnames(rhs) <- c(colnames(ylags), "const") } else if (type == "trend") { rhs <- cbind(ylags, seq(p + 1, length = sample)) colnames(rhs) <- c(colnames(ylags), "trend") } else if (type == "both") { rhs <- cbind(ylags, rep(1, sample), seq(p + 1, length = sample)) colnames(rhs) <- c(colnames(ylags), "const", "trend") } else if (type == "none") { rhs <- ylags colnames(rhs) <- colnames(ylags) } if (!(is.null(season))) { season <- abs(as.integer(season)) dum <- (diag(season) - 1/season)[, -season] dums <- dum while (nrow(dums) < obs) { dums <- rbind(dums, dum) } dums <- dums[1:obs, ] colnames(dums) <- paste("sd", 1:ncol(dums), sep = "") rhs <- cbind(rhs, dums[-c(1:p), ]) } if (!(is.null(exogen))) { exogen <- as.matrix(exogen) if (!identical(nrow(exogen), nrow(y))) { stop("\nDifferent row size of y and exogen.\n") } if (is.null(colnames(exogen))) { colnames(exogen) <- paste("exo", 1:ncol(exogen), sep = "") warning(paste("No column names supplied in exogen, using:", paste(colnames(exogen), collapse = ", "), ", instead.\n")) } colnames(exogen) <- make.names(colnames(exogen)) tmp <- colnames(rhs) rhs <- cbind(rhs, exogen[-c(1:p), ]) colnames(rhs) <- c(tmp, colnames(exogen)) } datamat <- as.data.frame(rhs) colnames(datamat) <- colnames(rhs) equation <- list() for (i in 1:K) { y <- yend[, i] equation[[colnames(yend)[i]]] <- lm(y ~ -1 + ., data = datamat) if(any(c("const", "both") %in% type)){ attr(equation[[colnames(yend)[i]]]$terms, "intercept") <- 1 } } call <- match.call() if("season" %in% names(call)) call$season <- eval(season) result <- list(varresult = equation, datamat = data.frame(cbind(yend, rhs)), y = y.orig, type = type, p = p, K = K, obs = sample, totobs = sample + p, restrictions = NULL, call = call) class(result) <- "varest" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/VAR.R
"VARselect" <- function (y, lag.max = 10, type = c("const", "trend", "both", "none"), season = NULL, exogen = NULL) { y <- as.matrix(y) if (any(is.na(y))) stop("\nNAs in y.\n") colnames(y) <- make.names(colnames(y)) K <- ncol(y) lag.max <- abs(as.integer(lag.max)) type <- match.arg(type) lag <- abs(as.integer(lag.max + 1)) ylagged <- embed(y, lag)[, -c(1:K)] yendog <- y[-c(1:lag.max), ] sample <- nrow(ylagged) rhs <- switch(type, const = rep(1, sample), trend = seq(lag.max + 1, length = sample), both = cbind(rep(1, sample), seq(lag.max + 1, length = sample)), none = NULL) if (!(is.null(season))) { season <- abs(as.integer(season)) dum <- (diag(season) - 1/season)[, -season] dums <- dum while (nrow(dums) < sample) { dums <- rbind(dums, dum) } dums <- dums[1:sample, ] rhs <- cbind(rhs, dums) } if (!(is.null(exogen))) { exogen <- as.matrix(exogen) if (!identical(nrow(exogen), nrow(y))) { stop("\nDifferent row size of y and exogen.\n") } if (is.null(colnames(exogen))) { colnames(exogen) <- paste("exo", 1:ncol(exogen), sep = "") warning(paste("No column names supplied in exogen, using:", paste(colnames(exogen), collapse = ", "), ", instead.\n")) } colnames(exogen) <- make.names(colnames(exogen)) rhs <- cbind(rhs, exogen[-c(1:lag.max), ]) } idx <- seq(K, K * lag.max, K) if(!is.null(rhs)){ detint <- ncol(as.matrix(rhs)) } else { detint <- 0 } criteria <- matrix(NA, nrow = 4, ncol = lag.max) rownames(criteria) <- c("AIC(n)", "HQ(n)", "SC(n)", "FPE(n)") colnames(criteria) <- paste(seq(1:lag.max)) for (i in 1:lag.max) { ys.lagged <- cbind(ylagged[, c(1:idx[i])], rhs) sampletot <- nrow(y) nstar <- ncol(ys.lagged) resids <- lm.fit(x=ys.lagged, y=yendog)$residuals sigma.det <- det(crossprod(resids)/sample) criteria[1, i] <- log(sigma.det) + (2/sample) * (i * K^2 + K * detint) criteria[2, i] <- log(sigma.det) + (2 * log(log(sample))/sample) * (i * K^2 + K * detint) criteria[3, i] <- log(sigma.det) + (log(sample)/sample) * (i * K^2 + K * detint) criteria[4, i] <- ((sample + nstar)/(sample - nstar))^K * sigma.det } order <- apply(criteria, 1, which.min) return(list(selection = order, criteria = criteria)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/VARselect.R
"arch.test" <- function(x, lags.single = 16, lags.multi = 5, multivariate.only = TRUE){ if(!(is(x, "varest") || is(x, "vec2var"))){ stop("\nPlease provide an object of class 'varest', generated by 'var()', or an object of class 'vec2var' generated by 'vec2var()'.\n") } obj.name <- deparse(substitute(x)) lags.single <- abs(as.integer(lags.single)) lags.multi <- abs(as.integer(lags.multi)) K <- x$K obs <- x$obs resid <- resid(x) resids <- scale(resid) ## ARCH test (multivariate) archm.resids <- .arch.multi(resids, lags.multi = lags.multi, K = K, obs = obs, obj.name = obj.name) if(multivariate.only){ result <- list(resid=resid, arch.mul = archm.resids) } else { ## ARCH test (univariate) archs.resids <- apply(resids, 2, function(x) .arch.uni(x, lags.single = lags.single)) for(i in 1 : K) archs.resids[[i]][5] <- paste("Residual of", colnames(resids)[i], "equation") result <- list(resid=resid, arch.uni = archs.resids, arch.mul = archm.resids) } class(result) <- "varcheck" return(result) } arch <- function(x, lags.single = 16, lags.multi = 5, multivariate.only = TRUE){ .Deprecated("arch.test", package = "vars", msg = "Function 'arch' is deprecated; use 'arch.test' instead.\nSee help(\"vars-deprecated\") and help(\"arch-deprecated\") for more information.") arch.test(x = x, lags.single = lags.single, lags.multi = lags.multi, multivariate.only = multivariate.only) }
/scratch/gouwar.j/cran-all/cranData/vars/R/arch.R
causality <- function(x, cause = NULL, vcov.=NULL, boot=FALSE, boot.runs=100){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'var()'.\n") } K <- x$K p <- x$p obs <- x$obs type <- x$type obj.name <- deparse(substitute(x)) y <- x$y y.names <- colnames(x$y) if(is.null(cause)){ cause <- y.names[1] warning("\nArgument 'cause' has not been specified;\nusing first variable in 'x$y' (", cause, ") as cause variable.\n") } else { if(!all(cause%in%y.names)) stop("Argument cause does not match variables names.\n") } y1.names <- subset(y.names, subset = y.names %in% cause) y2.names <- subset(y.names, subset = !(y.names %in% cause)) Z <- x$datamat[, -c(1 : K)] xMlm<-toMlm(x) PI <- coef(xMlm) PI.vec <- as.vector(PI) ###Restriction matrix R for Granger causality #build matrix of same size as coef matrix indicating which to be restricted R2<-matrix(0, ncol=ncol(PI), nrow=nrow(PI)) g<-which(gsub("\\.l\\d+", "", rownames(PI))%in%cause) #select cause regressors j<-which(colnames(PI)%in%cause) #select cause regressand R2[g,-j]<-1 #select coef to be tested #If the model already has restriction, overlay with the new ones if (!is.null(x$restrictions)) { xr <- t(x$restrictions) xr <- abs(xr - 1) # match positions of variables rownames(xr)[rownames(xr) == "const"] <- "(Intercept)" xr <- xr[rownames(PI), colnames(PI)] # overlay xr <- xr + R2 xr[xr == 2] <- 1 R2 <- xr } w<-which(as.vector(R2)!=0) #build corresponding matrix as coef are not vectorized N <- length(w) R<-matrix(0, ncol=ncol(PI)*nrow(PI), nrow=N) #matrix of restrictions for(i in 1:N) R[i,w[i]]<-1 ## ## Granger-causality ## if (is.null(vcov.)) { sigma.pi <- vcov(xMlm) } else if (is.function(vcov.)) { sigma.pi <- vcov.(xMlm) } else { sigma.pi <- vcov. } df1 <- p * length(y1.names) * length(y2.names) df2 <- K * obs - length(PI)#K^2 * p - detcoeff STATISTIC <- t(R %*% PI.vec) %*% solve(R %*% sigma.pi %*% t(R)) %*% R %*% PI.vec / N ###bootstrap procedure if(boot){ ###Restricted model: estimation under null of Granger non-causality co.names<-Bcoef(x) #needs to rebuild another restriction matrix for restrict(), as disposition of coef is different k<-which(gsub("\\.l\\d+", "", colnames(co.names))%in%cause) #select cause regressors l<-which(rownames(co.names)%in%cause) #select cause regressand R2inv<-matrix(1, ncol=nrow(PI), nrow=ncol(PI)) #exact inverse steps as R2 R2inv[-l,k]<-0 #select coef to be tested #If the model already has restriction, overlay with the new ones if (!is.null(x$restrictions)) { xr <- x$restrictions # match positions of variables xr <- xr[rownames(co.names), colnames(co.names)] # overlay R2inv <- xr * R2inv } xres<-restrict(x, method = "man", resmat = R2inv) pred<-sapply(xres$varresult,predict) res<-residuals(xres) #bootstrap function for homo case: use more efficient low-level, as XX-1 already computed if(is.null(vcov.)){ if (is.null(x$restrictions)) { #haven't figured out how to adjust these lines to account for existing restrictions Zmlm<-model.matrix(xMlm) cross<-crossprod(Zmlm) inside<-solve(R %*% sigma.pi %*% t(R)) boot.fun<-function(x=1){ Ynew<-pred+res*rnorm(n=obs, mean=0, sd=x) PI.boot<-solve(cross, crossprod(Zmlm,Ynew)) #this could be made more efficent: compute only interest coefs, PI.boot.vec<-as.vector(PI.boot) t(R %*% PI.boot.vec) %*% inside %*% (R %*% PI.boot.vec) / N } } else { #if restrictions already exist; reestimate models (slower), use vcov by default xtmp <- x boot.fun <- function(x = 1) { xtmp$datamat[,1:K] <- pred + res * rnorm(n = obs, mean = 0, sd = x) xMlm.boot <- toMlm(xtmp) sigma.pi.boot <- vcov(xMlm.boot) PI.boot.vec <- as.vector(coef(xMlm.boot)) t(R %*% PI.boot.vec) %*% solve(R %*% sigma.pi.boot %*% t(R)) %*% R %*% PI.boot.vec / N } } } else { #two next lines as needed as x<-freeny.x; mylm<-lm(freeny.y~x); rm(x);update(mylm) #does not work xtmp <- x # X<-x$datamat # if(x$type%in%c("const", "both")) X<-X[, -grep("const", colnames(X))] boot.fun<-function(x=1){ xtmp$datamat[,1:K]<-pred+res*rnorm(n=obs, sd=x, mean=0) #workaround as calling it ynew and putting in update() fails # xMlm.boot<-update(xMlm, .~.) #replace with the row below to account for possible restrictions xMlm.boot <- toMlm(xtmp) if (is.function(vcov.)) { sigma.pi.boot <- vcov.(xMlm.boot) } else { sigma.pi.boot <- vcov. warning("vcov. should be function, not an object, when used with boot=TRUE") } PI.boot.vec <- as.vector(coef(xMlm.boot)) t(R %*% PI.boot.vec) %*% solve(R %*% sigma.pi.boot %*% t(R)) %*% R %*% PI.boot.vec / N } } res.rep<-replicate(boot.runs, boot.fun(x=1)) pval<-mean(res.rep>as.numeric(STATISTIC)) } names(STATISTIC) <- "F-Test" if(!boot){ PARAMETER1 <- df1 PARAMETER2 <- df2 names(PARAMETER1) <- "df1" names(PARAMETER2) <- "df2" PVAL <- 1 - pf(STATISTIC, PARAMETER1, PARAMETER2) PARAM<-c(PARAMETER1, PARAMETER2) } else { PARAMETER1 <- boot.runs names(PARAMETER1) <- "boot.runs" PVAL <- pval PARAM<-PARAMETER1 } METHOD <- paste("Granger causality H0:", paste(y1.names, collapse=" "), "do not Granger-cause", paste(y2.names, collapse=" ")) result1 <- list(statistic = STATISTIC, parameter = PARAM, p.value = PVAL, method = METHOD, data.name = paste("VAR object", obj.name)) class(result1) <- "htest" ## ## Instantaneous Causality ## sigma.u <- crossprod(resid(x)) / (obs - ncol(Z)) colnames(sigma.u) <- y.names rownames(sigma.u) <- y.names select <- sigma.u[rownames(sigma.u) %in% y2.names, colnames(sigma.u) %in% y1.names ] sig.vech <- sigma.u[lower.tri(sigma.u, diag = TRUE)] index <- which(sig.vech %in% select) N <- length(index) Cmat <- matrix(0, nrow = N, ncol = length(sig.vech)) for(i in 1 : N){ Cmat[i, index[i]] <- 1 } Dmat <- .duplicate(K) Dinv <- MASS::ginv(Dmat) lambda.w <- obs %*% t(sig.vech) %*% t(Cmat) %*% solve(2 * Cmat %*% Dinv %*% kronecker(sigma.u, sigma.u) %*% t(Dinv) %*% t(Cmat)) %*% Cmat %*% sig.vech STATISTIC <- lambda.w names(STATISTIC) <- "Chi-squared" PARAMETER <- N names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, PARAMETER) METHOD <- paste("H0: No instantaneous causality between:", paste(y1.names, collapse=" "), "and", paste(y2.names, collapse=" ")) result2 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("VAR object", obj.name)) class(result2) <- "htest" result2 return(list(Granger = result1, Instant = result2)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/causality.R
"coef.varest" <- function(object, ...){ return(lapply(lapply(object$varresult, summary), coef)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/coef.varest.R
"fanchart" <- function(x, colors = NULL, cis = NULL, names = NULL, main = NULL, ylab = NULL, xlab = NULL, col.y = NULL, nc, plot.type = c("multiple", "single"), mar = par("mar"), oma = par("oma"), ...){ if(!is(x, "varprd")){ stop("\nPlease provide an object of class 'varprd',\ngenerated by predict-method for objects of class 'varest'.\n") } if(is.null(colors)) colors <- gray(sqrt(seq(from = 0.05, to = 1.0, length = 9))) if(is.null(cis)){ cis <- seq(0.1, 0.9, by = 0.1) } else { if((min(cis) <= 0) || (max(cis) >= 1)) stop("\nValues of confidence intervals must be in(0, 1).\n") if(length(cis) > length(colors)) stop("\nSize of 'colors' vector must be at least as long as\nsize of 'cis' vector\n") } n.regions <- length(cis) n.ahead <- nrow(x$fcst[[1]]) K <- ncol(x$endog) e.sample <- nrow(x$endog) endog <- x$endog fcst <- NULL for(j in 1:n.regions){ fcst[[j]] <- predict(x$model, n.ahead = n.ahead, ci = cis[j], dumvar = x$exo.fcst)$fcst } xx <- seq(e.sample, length.out = n.ahead + 1) xx <- c(xx, rev(xx)) op <- par(no.readonly = TRUE) plot.type <- match.arg(plot.type) ynames <- colnames(endog) if (is.null(names)) { names <- ynames } else { names <- as.character(names) if (!(all(names %in% ynames))) { warning("\nInvalid variable name(s) supplied, using first variable.\n") names <- ynames[1] } } nv <- length(names) ifelse(is.null(main), main <- paste("Fanchart for variable", names), main <- rep(main, nv)[1:nv]) ifelse(is.null(ylab), ylab <- "", ylab <- ylab) ifelse(is.null(xlab), xlab <- "", xlab <- xlab) ifelse(is.null(col.y), col.y <- "black", col.y <- col.y) if(plot.type == "single") { if(nv > 1) par(ask = TRUE) par(mar = mar, oma = oma) } else if(plot.type == "multiple"){ if (missing(nc)) { nc <- ifelse(nv > 4, 2, 1) } nr <- ceiling(nv/nc) par(mfcol = c(nr, nc), mar = mar, oma = oma) } for(i in 1 : nv){ ymax <- max(c(fcst[[n.regions]][names[i]][[1]][, 3]), endog[, names[i]]) ymin <- min(c(fcst[[n.regions]][names[i]][[1]][, 2]), endog[, names[i]]) yy1 <- c(endog[e.sample, names[i]], fcst[[1]][names[i]][[1]][, 2], rev(c(endog[e.sample, names[i]], fcst[[1]][names[i]][[1]][, 3]))) plot.ts(c(endog[, names[i]], rep(NA, n.ahead)), main = main[i], ylim = c(ymin, ymax), ylab = ylab, xlab = xlab, col = col.y, ...) polygon(xx, yy1, col = colors[1], border = colors[1]) if(n.regions > 1){ for(l in 2:n.regions){ yyu <- c(endog[e.sample, names[i]], fcst[[l]][names[i]][[1]][, 3], rev(c(endog[e.sample, names[i]], fcst[[l-1]][names[i]][[1]][, 3]))) yyl <- c(endog[e.sample, names[i]], fcst[[l-1]][names[i]][[1]][, 2], rev(c(endog[e.sample, names[i]], fcst[[l]][names[i]][[1]][, 2]))) polygon(xx, yyu, col = colors[l], border = colors[l]) polygon(xx, yyl, col = colors[l], border = colors[l]) } } } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fanchart.R
"fevd" <- function(x, n.ahead = 10, ...){ UseMethod("fevd", x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fevd.R
"fevd.svarest" <- function(x, n.ahead=10, ...){ if(!is(x, "svarest")){ stop("\nPlease provide an object of class 'svarest', generated by 'SVAR()'.\n") } n.ahead <- abs(as.integer(n.ahead)) K <- x$var$K p <- x$var$p ynames <- colnames(x$var$datamat[, 1 : K]) msey <- .fecovsvar(x, n.ahead = n.ahead) Phi <- Phi(x, nstep = n.ahead) mse <- matrix(NA, nrow = n.ahead, ncol = K) Omega <- array(0, dim = c(n.ahead, K, K)) for(i in 1 : n.ahead){ mse[i, ] <- diag(msey[, , i]) temp <- matrix(0, K, K) for(j in 1 : i){ temp <- temp + Phi[ , , j]^2 } temp <- temp / mse[i, ] for(j in 1 : K){ Omega[i, ,j] <- temp[j, ] } } result <- list() for(i in 1 : K){ result[[i]] <- matrix(Omega[, , i], nrow = n.ahead, ncol = K) colnames(result[[i]]) <- ynames } names(result) <- ynames class(result) <- "varfevd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fevd.svarest.R
"fevd.svecest" <- function(x, n.ahead=10, ...){ if(!is(x, "svecest")){ stop("\nPlease provide an object of class 'svecest', generated by 'SVEC()'.\n") } n.ahead <- abs(as.integer(n.ahead)) ifelse(is.null(x$call$r), r <- 1, r <- x$call$r) varlevel <- vec2var(x$var, r = r) K <- varlevel$K p <- varlevel$p ynames <- colnames(varlevel$datamat[, 1 : K]) msey <- .fecovsvec(x, n.ahead = n.ahead, K = K) Phi <- Phi(x, nstep = n.ahead) mse <- matrix(NA, nrow = n.ahead, ncol = K) Omega <- array(0, dim = c(n.ahead, K, K)) for(i in 1 : n.ahead){ mse[i, ] <- diag(msey[, , i]) temp <- matrix(0, K, K) for(j in 1 : i){ temp <- temp + Phi[ , , j]^2 } temp <- temp / mse[i, ] for(j in 1 : K){ Omega[i, ,j] <- temp[j, ] } } result <- list() for(i in 1 : K){ result[[i]] <- matrix(Omega[, , i], nrow = n.ahead, ncol = K) colnames(result[[i]]) <- ynames } names(result) <- ynames class(result) <- "varfevd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fevd.svecest.R
"fevd.varest" <- function(x, n.ahead=10, ...){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'VAR()'.\n") } n.ahead <- abs(as.integer(n.ahead)) K <- x$K p <- x$p ynames <- colnames(x$datamat[, 1 : K]) msey <- .fecov(x, n.ahead = n.ahead) Psi <- Psi(x, nstep = n.ahead) mse <- matrix(NA, nrow = n.ahead, ncol = K) Omega <- array(0, dim = c(n.ahead, K, K)) for(i in 1 : n.ahead){ mse[i, ] <- diag(msey[, , i]) temp <- matrix(0, K, K) for(l in 1 : K){ for(m in 1 : K){ for(j in 1 : i){ temp[l, m] <- temp[l, m] + Psi[l , m, j]^2 } } } temp <- temp / mse[i, ] for(j in 1 : K){ Omega[i, ,j] <- temp[j, ] } } result <- list() for(i in 1 : K){ result[[i]] <- matrix(Omega[, , i], nrow = n.ahead, ncol = K) colnames(result[[i]]) <- ynames } names(result) <- ynames class(result) <- "varfevd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fevd.varest.R
"fevd.vec2var" <- function(x, n.ahead=10, ...){ if(!is(x, "vec2var")){ stop("\nPlease provide an object of class 'vec2var', generated by 'vec2var()'.\n") } n.ahead <- abs(as.integer(n.ahead)) K <- x$K p <- x$p ynames <- colnames(x$datamat[, 1 : K]) msey <- .fecovvec2var(x, n.ahead = n.ahead) Psi <- Psi(x, nstep = n.ahead) mse <- matrix(NA, nrow = n.ahead, ncol = K) Omega <- array(0, dim = c(n.ahead, K, K)) for(i in 1 : n.ahead){ mse[i, ] <- diag(msey[, , i]) temp <- matrix(0, K, K) for(l in 1 : K){ for(m in 1 : K){ for(j in 1 : i){ temp[l, m] <- temp[l, m] + Psi[l , m, j]^2 } } } temp <- temp / mse[i, ] for(j in 1 : K){ Omega[i, ,j] <- temp[j, ] } } result <- list() for(i in 1 : K){ result[[i]] <- matrix(Omega[, , i], nrow = n.ahead, ncol = K) colnames(result[[i]]) <- ynames } names(result) <- ynames class(result) <- "varfevd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fevd.vec2var.R
"fitted.varest" <- function(object, ...){ return(sapply(object$varresult, fitted)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fitted.varest.R
"fitted.vec2var" <- function(object, ...){ if (!is(object, "vec2var")) { stop("\nPlease, provide object of class 'vec2var' as 'object'.\n") } resids <- resid(object) fitted <- object$datamat[, colnames(object$y)] - resids colnames(fitted) <- paste("fit of", colnames(object$y)) return(fitted) }
/scratch/gouwar.j/cran-all/cranData/vars/R/fitted.vec2var.R
require(MASS) require(strucchange) ## ## Forecast variance-covariance matrix (VAR) ## ".fecov" <- function(x, n.ahead) { n.par<-sapply(x$varresult, function(x) summary(x)$df[2]) sigma.u <- crossprod(resid(x))/n.par Sigma.yh <- array(NA, dim = c(x$K, x$K, n.ahead)) Sigma.yh[, , 1] <- sigma.u Phi <- Phi(x, nstep = n.ahead) if (n.ahead > 1) { for (i in 2:n.ahead) { temp <- matrix(0, nrow = x$K, ncol = x$K) for (j in 2:i) { temp <- temp + Phi[, , j] %*% sigma.u %*% t(Phi[, , j]) } Sigma.yh[, , i] <- temp + Sigma.yh[, , 1] } } return(Sigma.yh) } ## ## Forecast variance-covariance matrix (SVAR) ## ".fecovsvar" <- function(x, n.ahead) { Sigma.yh <- array(NA, dim = c(x$var$K, x$var$K, n.ahead)) Phi <- Phi(x, nstep = n.ahead) Sigma.yh[, , 1] <- Phi[, , 1]%*%t(Phi[, , 1]) if (n.ahead > 1) { for (i in 2:n.ahead) { temp <- matrix(0, nrow = x$var$K, ncol = x$var$K) for (j in 2:i) { temp <- temp + Phi[, , j]%*%t(Phi[, , j]) } Sigma.yh[, , i] <- temp + Sigma.yh[, , 1] } } return(Sigma.yh) } ## ## Forecast variance-covariance matrix (vec2var) ## ".fecovvec2var" <- function(x, n.ahead) { sigma.u <- crossprod(resid(x))/x$obs Sigma.yh <- array(NA, dim = c(x$K, x$K, n.ahead)) Sigma.yh[, , 1] <- sigma.u Phi <- Phi(x, nstep = n.ahead) if (n.ahead > 1) { for (i in 2:n.ahead) { temp <- matrix(0, nrow = x$K, ncol = x$K) for (j in 2:i) { temp <- temp + Phi[, , j] %*% sigma.u %*% t(Phi[, , j]) } Sigma.yh[, , i] <- temp + Sigma.yh[, , 1] } } return(Sigma.yh) } ## ## Forecast variance-covariance matrix (SVEC) ## ".fecovsvec" <- function(x, n.ahead, K) { Sigma.yh <- array(NA, dim = c(K, K, n.ahead)) Phi <- Phi(x, nstep = n.ahead) Sigma.yh[, , 1] <- Phi[, , 1]%*%t(Phi[, , 1]) if (n.ahead > 1) { for (i in 2:n.ahead) { temp <- matrix(0, nrow = K, ncol = K) for (j in 2:i) { temp <- temp + Phi[, , j]%*%t(Phi[, , j]) } Sigma.yh[, , i] <- temp + Sigma.yh[, , 1] } } return(Sigma.yh) } ## ## irf (internal) ## ".irf" <- function(x, impulse, response, y.names, n.ahead, ortho, cumulative){ if(is(x, "varest") || is(x, "vec2var")){ if(ortho){ irf <- Psi(x, nstep = n.ahead) } else { irf <- Phi(x, nstep = n.ahead) } } else if(is(x, "svarest") || is(x, "svecest")){ irf <- Phi(x, nstep = n.ahead) } dimnames(irf) <- list(y.names, y.names, NULL) idx <- length(impulse) irs <- list() for(i in 1 : idx){ irs[[i]] <- matrix(t(irf[response , impulse[i], 1 : (n.ahead + 1)]), nrow = n.ahead+1) colnames(irs[[i]]) <- response if(cumulative){ if(length(response) > 1) irs[[i]] <- apply(irs[[i]], 2, cumsum) if(length(response) == 1){ tmp <- matrix(cumsum(irs[[i]])) colnames(tmp) <- response irs[[i]] <- tmp } } } names(irs) <- impulse result <- irs return(result) } ## ## Bootstrapping IRF for VAR and SVAR ## ".boot" <- function(x, n.ahead, runs, ortho, cumulative, impulse, response, ci, seed, y.names){ if(!(is.null(seed))) set.seed(abs(as.integer(seed))) if(is(x, "varest")){ VAR <- eval.parent(x) }else if(is(x, "svarest")){ VAR <- eval.parent(x$var) } else { stop("Bootstrap not implemented for this class.\n") } p <- VAR$p K <- VAR$K obs <- VAR$obs total <- VAR$totobs type <- VAR$type B <- Bcoef(VAR) BOOT <- vector("list", runs) ysampled <- matrix(0, nrow = total, ncol = K) colnames(ysampled) <- colnames(VAR$y) Zdet <- NULL if(ncol(VAR$datamat) > (K * (p+1))){ Zdet <- as.matrix(VAR$datamat[, (K * (p + 1) + 1):ncol(VAR$datamat)]) } resorig <- scale(resid(VAR), scale = FALSE) B <- Bcoef(VAR) for(i in 1:runs){ booted <- sample(c(1 : obs), replace=TRUE) resid <- resorig[booted, ] lasty <- c(t(VAR$y[p : 1, ])) ysampled[c(1 : p), ] <- VAR$y[c(1 : p), ] for(j in 1 : obs){ lasty <- lasty[1 : (K * p)] Z <- c(lasty, Zdet[j, ]) ysampled[j + p, ] <- B %*% Z + resid[j, ] lasty <- c(ysampled[j + p, ], lasty) } varboot <- update(VAR, y = ysampled) if(is(x, "svarest")){ varboot <- update(x, x = varboot) } BOOT[[i]] <- .irf(x = varboot, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, y.names=y.names) } lower <- ci / 2 upper <- 1 - ci / 2 mat.l <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) mat.u <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) Lower <- list() Upper <- list() idx1 <- length(impulse) idx2 <- length(response) idx3 <- n.ahead + 1 temp <- rep(NA, runs) for(j in 1 : idx1){ for(m in 1 : idx2){ for(l in 1 : idx3){ for(i in 1 : runs){ if(idx2 > 1){ temp[i] <- BOOT[[i]][[j]][l, m] } else { temp[i] <- matrix(BOOT[[i]][[j]])[l, m] } } mat.l[l, m] <- quantile(temp, lower, na.rm = TRUE) mat.u[l, m] <- quantile(temp, upper, na.rm = TRUE) } } colnames(mat.l) <- response colnames(mat.u) <- response Lower[[j]] <- mat.l Upper[[j]] <- mat.u } names(Lower) <- impulse names(Upper) <- impulse result <- list(Lower = Lower, Upper = Upper) return(result) } ## ## Bootstrapping coefficients SVEC ## .bootsvec <- function(x, LRorig, SRorig, r, runs, K, conv.crit, maxls, max.iter){ ## ## Obtaining level-VAR ## varlevel <- vec2var(x, r = r) Resids <- scale(varlevel$resid, scale = FALSE) obs <- varlevel$obs totobs <- varlevel$totobs P <- totobs - obs ## ## Fixing beta ## betafix <- matrix(x@V[, 1:r], ncol = r) ## ## Computing the coefficient matrix ## coeffmat <- cbind(varlevel$deterministic, matrix(unlist(varlevel$A), nrow = K)) ## ## Initialising the BOOT matrix, the sampled y ## and the deterministic regressors ## BOOT <- matrix(0, nrow = 2*K^2, ncol = runs) ysampled <- matrix(0, nrow = totobs, ncol = K) Zdet <- varlevel$datamat[, -c(1:K)] nrhs <- ncol(Zdet) ndet <- nrhs - K*P Zdet <- matrix(Zdet[, 1:ndet], nrow = obs, ncol = ndet) ## ## Conducting the Bootstrap ## for(i in 1:runs){ ## ## Sampling of the residuals ## booted <- sample(c(1 : obs), replace=TRUE) resid <- Resids[booted, ] ## ## Setting the starting values for y ## lasty <- c(t(varlevel$y[P : 1, ])) ysampled[c(1 : P), ] <- varlevel$y[c(1 : P), ] for(j in 1 : obs){ lasty <- lasty[1 : (K * P)] Z <- c(Zdet[j, ], lasty) ysampled[j + P, ] <- coeffmat %*% Z + resid[j, ] lasty <- c(ysampled[j + P, ], lasty) } colnames(ysampled) <- colnames(x@x) ## ## Re-estimating the VECM ## ifelse(is.null(x@call$K), Korig <- 2, Korig <- x@call$K) ifelse(is.null(x@call$spec), specorig <- "longrun", specorig <- x@call$spec) if(is.null(x@call$season)){ seasonorig <- NULL }else { seasonorig <- x@call$season } if(is.null(x@call$dumvar)){ dumvarorig <- NULL }else { dumvarorig <- x@call$dumvar } ifelse(is.null(x@call$ecdet), ecdetorig <- "none", ecdetorig <- x@call$ecdet) vecm <- ca.jo(x = ysampled, K = Korig, spec = specorig, season = seasonorig, dumvar = dumvarorig, ecdet = ecdetorig) vecm@V <- betafix ## ## Re-estimating the SVEC ## svec <- SVEC(x = vecm, LR = LRorig, SR = SRorig, r = r, max.iter = max.iter, conv.crit = conv.crit, maxls = maxls, boot = FALSE, lrtest = FALSE) SRboot <- c(svec$SR) LRboot <- c(svec$LR) bootvals <- c(SRboot, LRboot) ## ## Storing the parameters in BOOT ## BOOT[, i] <- bootvals } return(BOOT) } ## ## Bootstrapping IRF for Level-VECM ## .bootirfvec2var <- function(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names){ ## ## Obtaining VECM arguments ## vecm <- x$vecm vecm.ecdet <- vecm@ecdet vecm.season <- vecm@season vecm.dumvar <- vecm@dumvar vecm.K <- vecm@lag vecm.spec <- vecm@spec ## ## Getting VAR-level coefficients ## Zdet <- matrix(x$datamat[ , colnames(x$deterministic)], ncol = ncol(x$deterministic)) B <- x$deterministic for(i in 1:x$p){ B <- cbind(B, x$A[[i]]) } p <- x$p K <- x$K obs <- x$obs total <- x$totobs ## ## Initialising Bootstrap ## BOOT <- vector("list", runs) ysampled <- matrix(0, nrow = total, ncol = K) colnames(ysampled) <- colnames(x$y) resorig <- scale(resid(x), scale = FALSE) ## ## Conducting the bootstrapping ## for(i in 1:runs){ booted <- sample(c(1 : obs), replace=TRUE) resid <- resorig[booted, ] lasty <- c(t(x$y[p : 1, ])) ysampled[c(1 : p), ] <- x$y[c(1 : p), ] ## ## Obtaining the new y ## for(j in 1 : obs){ lasty <- lasty[1 : (K * p)] Z <- c(Zdet[j, ], lasty) ysampled[j + p, ] <- B %*% Z + resid[j, ] lasty <- c(ysampled[j + p, ], lasty) } vec <- ca.jo(ysampled, ecdet = vecm.ecdet, season = vecm.season, dumvar = vecm.dumvar, K = vecm.K, spec = vecm.spec) varboot <- vec2var(vec, r = x$r) BOOT[[i]] <- .irf(x = varboot, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, y.names = y.names) } ## ## Obtaining the lower and upper bounds ## lower <- ci / 2 upper <- 1 - ci / 2 mat.l <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) mat.u <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) Lower <- list() Upper <- list() idx1 <- length(impulse) idx2 <- length(response) idx3 <- n.ahead + 1 temp <- rep(NA, runs) for(j in 1 : idx1){ for(m in 1 : idx2){ for(l in 1 : idx3){ for(i in 1 : runs){ if(idx2 > 1){ temp[i] <- BOOT[[i]][[j]][l, m] } else { temp[i] <- matrix(BOOT[[i]][[j]])[l, m] } } mat.l[l, m] <- quantile(temp, lower, na.rm = TRUE) mat.u[l, m] <- quantile(temp, upper, na.rm = TRUE) } } colnames(mat.l) <- response colnames(mat.u) <- response Lower[[j]] <- mat.l Upper[[j]] <- mat.u } result <- list(Lower = Lower, Upper = Upper) return(result) } ## ## Bootstrapping IRF for SVEC ## .bootirfsvec <- function(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names){ ## ## Obtaining VECM arguments ## vecm <- x$var vecm.ecdet <- vecm@ecdet vecm.season <- vecm@season vecm.dumvar <- vecm@dumvar vecm.K <- vecm@lag vecm.spec <- vecm@spec vecm.beta <- vecm@V ## ## Obtaining arguments for SVEC ## svec.r <- x$r if(is.null(x$call$start)){ svec.start <- NULL } else { svec.start <- x$call$start } ifelse(is.null(x$call$max.iter), svec.maxiter <- 100, svec.maxiter <- x$call$max.iter) ifelse(is.null(x$call$conv.crit), svec.convcrit <- 1e-07, svec.convcrit <- x$call$conv.crit) ifelse(is.null(x$call$maxls), svec.maxls <- 1.0, svec.maxls <- x$call$maxls) ## ## Getting VAR-level coefficients ## varlevel <- vec2var(vecm, r = svec.r) Zdet <- matrix(varlevel$datamat[ , colnames(varlevel$deterministic)], ncol = ncol(varlevel$deterministic)) B <- varlevel$deterministic for(i in 1:varlevel$p){ B <- cbind(B, varlevel$A[[i]]) } p <- varlevel$p K <- varlevel$K obs <- varlevel$obs total <- varlevel$totobs ## ## Initialising Bootstrap ## BOOT <- vector("list", runs) ysampled <- matrix(0, nrow = total, ncol = K) colnames(ysampled) <- colnames(varlevel$y) resorig <- scale(varlevel$resid, scale = FALSE) ## ## Conducting the bootstrapping ## for(i in 1:runs){ booted <- sample(c(1 : obs), replace=TRUE) resid <- resorig[booted, ] lasty <- c(t(varlevel$y[p : 1, ])) ysampled[c(1 : p), ] <- varlevel$y[c(1 : p), ] ## ## Obtaining the new y ## for(j in 1 : obs){ lasty <- lasty[1 : (K * p)] Z <- c(Zdet[j, ], lasty) ysampled[j + p, ] <- B %*% Z + resid[j, ] lasty <- c(ysampled[j + p, ], lasty) } vec <- ca.jo(ysampled, ecdet = vecm.ecdet, season = vecm.season, dumvar = vecm.dumvar, K = vecm.K, spec = vecm.spec) ##vec@V <- vecm.beta svec <- SVEC(x = vec, LR = x$LRorig, SR = x$SRorig, r = svec.r, max.iter = svec.maxiter, maxls = svec.maxls, lrtest = FALSE, boot = FALSE) BOOT[[i]] <- .irf(x = svec, n.ahead = n.ahead, cumulative = cumulative, ortho = ortho, impulse = impulse, response = response, y.names = y.names) } ## ## Obtaining the lower and upper bounds ## lower <- ci / 2 upper <- 1 - ci / 2 mat.l <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) mat.u <- matrix(NA, nrow = n.ahead + 1, ncol = length(response)) Lower <- list() Upper <- list() idx1 <- length(impulse) idx2 <- length(response) idx3 <- n.ahead + 1 temp <- rep(NA, runs) for(j in 1 : idx1){ for(m in 1 : idx2){ for(l in 1 : idx3){ for(i in 1 : runs){ if(idx2 > 1){ temp[i] <- BOOT[[i]][[j]][l, m] } else { temp[i] <- matrix(BOOT[[i]][[j]])[l, m] } } mat.l[l, m] <- quantile(temp, lower, na.rm = TRUE) mat.u[l, m] <- quantile(temp, upper, na.rm = TRUE) } } colnames(mat.l) <- response colnames(mat.u) <- response Lower[[j]] <- mat.l Upper[[j]] <- mat.u } result <- list(Lower = Lower, Upper = Upper) return(result) } ## ## univariate ARCH test ## ".arch.uni" <- function(x, lags.single){ lags.single <- lags.single + 1 mat <- embed(scale(x)^2, lags.single) arch.lm <- summary(lm(mat[, 1] ~ mat[, -1])) STATISTIC <- arch.lm$r.squared*length(resid(arch.lm)) names(STATISTIC) <- "Chi-squared" PARAMETER <- lags.single - 1 names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "ARCH test (univariate)" result <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = deparse(substitute(x))) class(result) <- "htest" return(result) } ## ## multivariate ARCH test ## ".arch.multi" <- function(x, lags.multi, obj.name, K, obs){ col.arch.df <- 0.5 * K * (K + 1) arch.df <- matrix(NA, nrow = obs, ncol = col.arch.df) for( i in 1 : obs){ temp <- outer(x[i,], x[i,]) arch.df[i,] <- temp[lower.tri(temp, diag=TRUE)] } lags.multi <- lags.multi + 1 arch.df <- embed(arch.df, lags.multi) archm.lm0 <- lm(arch.df[ , 1:col.arch.df] ~ 1) archm.lm0.resids <- resid(archm.lm0) omega0 <- cov(archm.lm0.resids) archm.lm1 <- lm(arch.df[ , 1 : col.arch.df] ~ arch.df[ , -(1 : col.arch.df)]) archm.lm1.resids <- resid(archm.lm1) omega1 <- cov(archm.lm1.resids) R2m <- 1 - (2 / (K * (K + 1))) * sum(diag(omega1 %*% solve(omega0))) n <- nrow(archm.lm1.resids) STATISTIC <- 0.5 * n * K * (K+1) * R2m names(STATISTIC) <- "Chi-squared" lags.multi <- lags.multi - 1 PARAMETER <- lags.multi * K^2 * (K + 1)^2 / 4 names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "ARCH (multivariate)" result <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(result) <- "htest" return(result) } ## ## univariate normality test ## ".jb.uni" <- function(x, obs){ x <- as.vector(x) m1 <- sum(x) / obs m2 <- sum((x - m1)^2) / obs m3 <- sum((x - m1)^3) / obs m4 <- sum((x - m1)^4) / obs b1 <- (m3 / m2^(3 / 2))^2 b2 <- (m4/m2^2) STATISTIC <- obs * b1 / 6 + obs * (b2 - 3)^2 / 24 names(STATISTIC) <- "Chi-squared" PARAMETER <- 2 names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = 2) METHOD <- "JB-Test (univariate)" result <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = deparse(substitute(x))) class(result) <- "htest" return(result) } ## ## multivariate normality test ## ".jb.multi" <- function(x, obs, K, obj.name){ P <- chol(crossprod(x) / obs) resids.std <- x %*% solve(P) b1 <- apply(resids.std, 2, function(x) sum(x^3) / obs) b2 <- apply(resids.std, 2, function(x) sum(x^4) / obs) s3 <- obs * t(b1) %*% b1 / 6 s4 <- obs * t(b2 - rep(3, K)) %*% (b2 - rep(3, K)) / 24 STATISTIC <- s3 + s4 names(STATISTIC) <- "Chi-squared" PARAMETER <- 2 * K names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "JB-Test (multivariate)" result1 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(result1) <- "htest" STATISTIC <- s3 names(STATISTIC) <- "Chi-squared" PARAMETER <- K names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "Skewness only (multivariate)" result2 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(result2) <- "htest" STATISTIC <- s4 names(STATISTIC) <- "Chi-squared" PARAMETER <- K names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "Kurtosis only (multivariate)" result3 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(result3) <- "htest" result <- list(JB = result1, Skewness = result2, Kurtosis = result3) return(result) } ## ## Convenience function for computing lagged x ## ".matlag1" <- function(x, lag = 1){ totcols <- ncol(x) nas <- matrix(NA, nrow = lag, ncol = totcols) x <- rbind(nas, x) totrows <- nrow(x) x <- x[-c((totrows - lag + 1) : totrows), ] return(x) } ## ## Multivariate Portmanteau Statistic ## ".pt.multi" <- function(x, K, obs, lags.pt, obj.name, resids){ C0 <- crossprod(resids) / obs C0inv <- solve(C0) tracesum <- rep(NA, lags.pt) for(i in 1 : lags.pt){ Ut.minus.i <- .matlag1(resids, lag = i)[-c(1 : i), ] Ut <- resids[-c(1 : i), ] Ci <- crossprod(Ut, Ut.minus.i) / obs tracesum[i] <- sum(diag(t(Ci) %*% C0inv %*% Ci %*% C0inv)) } vec.adj <- obs - (1 : lags.pt) Qh <- obs * sum(tracesum) Qh.star <- obs^2 * sum(tracesum / vec.adj) nstar <- K^2 * x$p ## htest objects for Qh and Qh.star STATISTIC <- Qh names(STATISTIC) <- "Chi-squared" if(identical(class(x), "varest")){ PARAMETER <- (K^2 * lags.pt - nstar) } else { PARAMETER <- (K^2 * lags.pt - nstar + x$K) } names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "Portmanteau Test (asymptotic)" PT1 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(PT1) <- "htest" STATISTIC <- Qh.star names(STATISTIC) <- "Chi-squared" if(identical(class(x), "varest")){ PARAMETER <- (K^2 * lags.pt - nstar) } else { PARAMETER <- (K^2 * lags.pt - nstar + x$K) } names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "Portmanteau Test (adjusted)" PT2 <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(PT2) <- "htest" result <- list(PT1 = PT1, PT2 = PT2) return(result) } ## ## Breusch-Godfrey and Edgerton-Shukur Test ## ".bgserial" <- function(x, K, obs, lags.bg, obj.name, resids){ ylagged <- as.matrix(x$datamat[, -c(1 : K)]) resids.l <- .matlag2(resids, lag = lags.bg) if(is.null(x$restrictions)){ regressors <- as.matrix(cbind(ylagged, resids.l)) lm0 <- lm(resids ~ -1 + regressors) lm1 <- lm(resids ~ -1 + ylagged) sigma.1 <- crossprod(resid(lm1)) / obs sigma.0 <- crossprod(resid(lm0)) / obs } else { resid0 <- matrix(NA, ncol = K, nrow = obs) resid1 <- matrix(NA, ncol = K, nrow = obs) for(i in 1 : K){ datares <- data.frame(ylagged[, which(x$restrictions[i, ] == 1)]) regressors <- data.frame(cbind(datares, resids.l)) lm0 <- lm(resids[, i] ~ -1 + ., data=regressors) lm1 <- lm(resids[, i] ~ -1 + ., data=datares) resid0[, i] <- resid(lm0) resid1[, i] <- resid(lm1) sigma.0 <- crossprod(resid0) / obs sigma.1 <- crossprod(resid1) / obs } } LMh.stat <- obs * (K - sum(diag(crossprod(solve(sigma.1), sigma.0)))) STATISTIC <- LMh.stat names(STATISTIC) <- "Chi-squared" PARAMETER <- lags.bg * K^2 names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "Breusch-Godfrey LM test" LMh <- list(statistic = STATISTIC, parameter = PARAMETER, p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(LMh) <- "htest" ## small sample correction of Edgerton Shukur R2r <- 1 - det(sigma.0) / det(sigma.1) m <- K * lags.bg q <- 0.5 * K * m - 1 n <- ncol(x$datamat) - K N <- obs - n - m - 0.5 * (K - m + 1) r <- sqrt((K^2 * m^2 - 4)/(K^2 + m^2 - 5)) LMFh.stat <- (1 - (1 - R2r)^(1 / r))/(1 - R2r)^(1 / r) * (N * r - q) / (K * m) STATISTIC <- LMFh.stat names(STATISTIC) <- "F statistic" PARAMETER1 <- lags.bg * K^2 names(PARAMETER1) <- "df1" PARAMETER2 <- floor(N * r - q) names(PARAMETER2) <- "df2" PVAL <- 1 - pf(LMFh.stat, PARAMETER1, PARAMETER2) METHOD <- "Edgerton-Shukur F test" LMFh <- list(statistic = STATISTIC, parameter = c(PARAMETER1, PARAMETER2), p.value = PVAL, method = METHOD, data.name = paste("Residuals of VAR object", obj.name)) class(LMFh) <- "htest" return(list(LMh = LMh, LMFh = LMFh)) } ## ## Duplication matrix ## ".duplicate" <- function(n){ D <- matrix(0, nrow = n^2, ncol = n * (n + 1) / 2) count <- 0 for(j in 1 : n){ D[(j - 1) * n + j, count + j] <- 1 if((j + 1) <= n){ for(i in (j + 1):n){ D[(j - 1) * n + i, count + i] <- 1 D[(i - 1) * n + j, count + i] <- 1 } } count <- count + n - j } return(D) } ## ## Convenience function for computing lagged residuals ## ".matlag2" <- function(x, lag = 1){ K <- ncol(x) obs <- nrow(x) zeromat <- matrix(0, nrow = obs, ncol = K * lag) idx1 <- seq(1, K * lag, K) idx2 <- seq(K, K * lag, K) for(i in 1:lag){ lag <- i + 1 res.tmp <- embed(x, lag)[, -c(1 : (K * i))] zeromat[-c(1 : i), idx1[i] : idx2[i]] <- res.tmp } resids.l <- zeromat return(resids.l) }
/scratch/gouwar.j/cran-all/cranData/vars/R/internal.R
"irf" <- function(x, impulse=NULL, response=NULL, n.ahead=10, ortho=TRUE, cumulative=FALSE, boot=TRUE, ci=0.95, runs=100, seed=NULL, ...){ UseMethod("irf", x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/irf.R
"irf.svarest" <- function(x, impulse=NULL, response=NULL, n.ahead=10, ortho=TRUE, cumulative=FALSE, boot=TRUE, ci=0.95, runs=100, seed=NULL, ...){ if(!is(x, "svarest")){ stop("\nPlease provide an object of class 'svarest', generated by 'SVAR()'.\n") } y.names <- colnames(x$var$y) if(is.null(impulse)){ impulse <- y.names } else { impulse <- as.vector(as.character(impulse)) if(any(!(impulse %in% y.names))) { stop("\nPlease provide variables names in impulse\nthat are in the set of endogenous variables.\n") } impulse <- subset(y.names, subset = y.names %in% impulse) } if(is.null(response)){ response <- y.names } else { response <- as.vector(as.character(response)) if(any(!(response %in% y.names))){ stop("\nPlease provide variables names in response\nthat are in the set of endogenous variables.\n") } response <- subset(y.names, subset = y.names %in% response) } ## Getting the irf irs <- .irf(x = x, impulse = impulse, response = response, y.names = y.names, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative) ## Bootstrapping Lower <- NULL Upper <- NULL if(boot){ ci <- as.numeric(ci) if((ci <= 0)|(ci >= 1)){ stop("\nPlease provide a number between 0 and 1 for the confidence interval.\n") } ci <- 1 - ci BOOT <- .boot(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names) Lower <- BOOT$Lower Upper <- BOOT$Upper } result <- list(irf=irs, Lower=Lower, Upper=Upper, response=response, impulse=impulse, ortho=ortho, cumulative=cumulative, runs=runs, ci=ci, boot=boot, model = class(x)) class(result) <- "varirf" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/irf.svarest.R
"irf.svecest" <- function(x, impulse=NULL, response=NULL, n.ahead=10, ortho=TRUE, cumulative=FALSE, boot=TRUE, ci=0.95, runs=100, seed=NULL, ...){ if(!is(x, "svecest")){ stop("\nPlease provide an object of class 'svecest', generated by 'SVEC()'.\n") } y.names <- colnames(x$var@x) if(is.null(impulse)){ impulse <- y.names } else { impulse <- as.vector(as.character(impulse)) if(any(!(impulse %in% y.names))) { stop("\nPlease provide variables names in impulse\nthat are in the set of endogenous variables.\n") } impulse <- subset(y.names, subset = y.names %in% impulse) } if(is.null(response)){ response <- y.names } else { response <- as.vector(as.character(response)) if(any(!(response %in% y.names))){ stop("\nPlease provide variables names in response\nthat are in the set of endogenous variables.\n") } response <- subset(y.names, subset = y.names %in% response) } ## Getting the irf irs <- .irf(x = x, impulse = impulse, response = response, y.names = y.names, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative) ## Bootstrapping Lower <- NULL Upper <- NULL if(boot){ ci <- as.numeric(ci) if((ci <= 0)|(ci >= 1)){ stop("\nPlease provide a number between 0 and 1 for the confidence interval.\n") } ci <- 1 - ci if(!(is.null(seed))) set.seed(abs(as.integer(seed))) BOOT <- .bootirfsvec(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names) Lower <- BOOT$Lower Upper <- BOOT$Upper names(Lower) <- impulse names(Upper) <- impulse } result <- list(irf=irs, Lower=Lower, Upper=Upper, response=response, impulse=impulse, ortho=ortho, cumulative=cumulative, runs=runs, ci=ci, boot=boot, model = class(x)) class(result) <- "varirf" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/irf.svecest.R
"irf.varest" <- function(x, impulse=NULL, response=NULL, n.ahead=10, ortho=TRUE, cumulative=FALSE, boot=TRUE, ci=0.95, runs=100, seed=NULL, ...){ if(!is(x, "varest")){ stop("\nPlease provide an object of class 'varest', generated by 'VAR()'.\n") } y.names <- colnames(x$y) if(is.null(impulse)){ impulse <- y.names } else { impulse <- as.vector(as.character(impulse)) if(any(!(impulse %in% y.names))) { stop("\nPlease provide variables names in impulse\nthat are in the set of endogenous variables.\n") } impulse <- subset(y.names, subset = y.names %in% impulse) } if(is.null(response)){ response <- y.names } else { response <- as.vector(as.character(response)) if(any(!(response %in% y.names))){ stop("\nPlease provide variables names in response\nthat are in the set of endogenous variables.\n") } response <- subset(y.names, subset = y.names %in% response) } ## Getting the irf irs <- .irf(x = x, impulse = impulse, response = response, y.names = y.names, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative) ## Bootstrapping Lower <- NULL Upper <- NULL if(boot){ ci <- as.numeric(ci) if((ci <= 0)|(ci >= 1)){ stop("\nPlease provide a number between 0 and 1 for the confidence interval.\n") } ci <- 1 - ci BOOT <- .boot(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names) Lower <- BOOT$Lower Upper <- BOOT$Upper } result <- list(irf=irs, Lower=Lower, Upper=Upper, response=response, impulse=impulse, ortho=ortho, cumulative=cumulative, runs=runs, ci=ci, boot=boot, model = class(x)) class(result) <- "varirf" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/irf.varest.R
"irf.vec2var" <- function(x, impulse=NULL, response=NULL, n.ahead=10, ortho=TRUE, cumulative=FALSE, boot=TRUE, ci=0.95, runs=100, seed=NULL, ...){ if(!is(x, "vec2var")){ stop("\nPlease provide an object of class 'vec2var', generated by 'vec2var()'.\n") } y.names <- colnames(x$y) if(is.null(impulse)){ impulse <- y.names } else { impulse <- as.vector(as.character(impulse)) if(any(!(impulse %in% y.names))) { stop("\nPlease provide variables names in impulse\nthat are in the set of endogenous variables.\n") } impulse <- subset(y.names, subset = y.names %in% impulse) } if(is.null(response)){ response <- y.names } else { response <- as.vector(as.character(response)) if(any(!(response %in% y.names))){ stop("\nPlease provide variables names in response\nthat are in the set of endogenous variables.\n") } response <- subset(y.names, subset = y.names %in% response) } ## Getting the irf irs <- .irf(x = x, impulse = impulse, response = response, y.names = y.names, n.ahead = n.ahead, ortho = ortho, cumulative = cumulative) ## Bootstrapping Lower <- NULL Upper <- NULL if(boot){ ci <- as.numeric(ci) if((ci <= 0)|(ci >= 1)){ stop("\nPlease provide a number between 0 and 1 for the confidence interval.\n") } ci <- 1 - ci if(!(is.null(seed))) set.seed(abs(as.integer(seed))) BOOT <- .bootirfvec2var(x = x, n.ahead = n.ahead, runs = runs, ortho = ortho, cumulative = cumulative, impulse = impulse, response = response, ci = ci, seed = seed, y.names = y.names) Lower <- BOOT$Lower Upper <- BOOT$Upper names(Lower) <- impulse names(Upper) <- impulse } result <- list(irf=irs, Lower=Lower, Upper=Upper, response=response, impulse=impulse, ortho=ortho, cumulative=cumulative, runs=runs, ci=ci, boot=boot, model = class(x)) class(result) <- "varirf" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/irf.vec2var.R
logLik.svarest <- function(object, ...){ obs <- object$var$obs K <- object$var$K A <- object$A B <- object$B Sigma <- object$Sigma.U / 100 r <- -(K * obs/2) * log(2 * pi) + obs/2 * log(det(A)^2) - obs/2 * log(det(B)^2) - obs/2 * sum(diag(t(A) %*% solve(t(B)) %*% solve(B) %*% A %*% Sigma)) class(r) <- "logLik" return(r) }
/scratch/gouwar.j/cran-all/cranData/vars/R/logLik.svarest.R
"logLik.svecest" <- function (object, ...) { K <- object$var@P A <- diag(K) B <- object$SR obs <- nrow(object$var@Z0) Sigma <- object$Sigma.U / 100 r <- -(K * obs/2) * log(2 * pi) + obs/2 * log(det(A)^2) - obs/2 * log(det(B)^2) - obs/2 * sum(diag(t(A) %*% solve(t(B)) %*% solve(B) %*% A %*% Sigma)) class(r) <- "logLik" return(r) }
/scratch/gouwar.j/cran-all/cranData/vars/R/logLik.svecest.R
logLik.varest <- function(object, ...){ obs <- object$obs df <- min(unlist(lapply(object$varresult, function(x) summary(x)$df[2]))) K <- object$K resids <- resid(object) Sigma <- crossprod(resids) / obs r <- -( obs * K / 2 ) * log(2 * pi) - (obs / 2) * log(det(Sigma)) - (1 / 2) * sum(diag(resids %*% solve(Sigma) %*% t(resids))) class(r) <- "logLik" params <- sum(unlist(lapply(object$varresult, function(x) length(coef(x))))) attr(r, "df") <- params attr(r, "nobs") <- object$obs return(r) }
/scratch/gouwar.j/cran-all/cranData/vars/R/logLik.varest.R
logLik.vec2var <- function(object, ...){ obs <- object$obs K <- object$K resids <- resid(object) Sigma <- crossprod(resids) / obs r <- -(obs * K/2) * log(2 * pi) - (obs/2) * log(det(Sigma)) - (1/2) * sum(diag(resids %*% solve(Sigma) %*% t(resids))) class(r) <- "logLik" params <- length(unlist(object$A)) + length(object$deterministic) attr(r, "df") <- params attr(r, "nobs") <- object$obs return(r) }
/scratch/gouwar.j/cran-all/cranData/vars/R/logLik.vec2var.R
"normality.test" <- function(x, multivariate.only = TRUE){ if(!(is(x, "varest") || is(x, "vec2var"))){ stop("\nPlease provide an object of class 'varest', generated by 'var()', or an object of class 'vec2var' generated by 'vec2var()'.\n") } obj.name <- deparse(substitute(x)) K <- x$K obs <- x$obs resid <- resid(x) resids <- scale(resid, scale=FALSE) ## Jarque Bera Test (multivariate) jbm.resids <- .jb.multi(resids, obs = obs, K = K, obj.name = obj.name) if(multivariate.only){ result <- list(resid = resid, jb.mul = jbm.resids) } else { ## Jarque Bera Test (univariate) jbu.resids <- apply(resids, 2, function(x) .jb.uni(x, obs = obs)) for(i in 1 : K) jbu.resids[[i]][5] <- paste("Residual of", colnames(resids)[i], "equation") result <- list(resid = resid, jb.uni = jbu.resids, jb.mul = jbm.resids) } class(result) <- "varcheck" return(result) } normality <- function(x, multivariate.only = TRUE){ .Deprecated("normality.test", package = "vars", msg = "Function 'normality' is deprecated; use 'normality.test' instead.\nSee help(\"vars-deprecated\") and help(\"normality-deprecated\") for more information.") normality.test(x = x, multivariate.only = multivariate.only) }
/scratch/gouwar.j/cran-all/cranData/vars/R/normality.R
"plot.varcheck" <- function (x, names = NULL, main.resid = NULL, main.hist = NULL, main.acf = NULL, main.pacf = NULL, main.acf2 = NULL, main.pacf2 = NULL, ylim.resid = NULL, ylim.hist = NULL, ylab.resid = NULL, xlab.resid = NULL, xlab.acf = NULL, lty.resid = NULL, lwd.resid = NULL, col.resid = NULL, col.edf = NULL, lag.acf = NULL, lag.pacf = NULL, lag.acf2 = NULL, lag.pacf2 = NULL, mar = par("mar"), oma = par("oma"), ...) { op <- par(no.readonly = TRUE) rnames <- colnames(x$resid) if (is.null(names)) { names <- rnames } else { names <- as.character(names) if (!(all(names %in% rnames))) { warning("\nInvalid residual name(s) supplied, using residuals of first variable.\n") names <- rnames[1] } } nv <- length(names) resids <- matrix(x$resid[, names], ncol = nv) ifelse(is.null(main.resid), main.resid <- paste("Residuals of", names), main.resid <- rep(main.resid, nv)[1:nv]) ifelse(is.null(main.hist), main.hist <- rep("Histogram and EDF", nv), main.hist <- rep(main.hist, nv)[1:nv]) ifelse(is.null(main.acf), main.acf <- rep("ACF of Residuals", nv), main.acf <- rep(main.acf, nv)[1:nv]) ifelse(is.null(main.pacf), main.pacf <- rep("PACF of Residuals", nv), main.pacf <- rep(main.pacf, nv)[1:nv]) ifelse(is.null(main.acf2), main.acf2 <- rep("ACF of squared Residuals", nv), main.acf2 <- rep(main.acf2, nv)[1:nv]) ifelse(is.null(main.pacf2), main.pacf2 <- rep("PACF of squared Residuals", nv), main.pacf2 <- rep(main.pacf2, nv)[1:nv]) ifelse(is.null(ylab.resid), ylab.resid <- rep("", nv), ylab.resid <- rep(ylab.resid, nv)[1:nv]) ifelse(is.null(xlab.resid), xlab.resid <- rep("", nv), xlab.resid <- rep(xlab.resid, nv)[1:nv]) ifelse(is.null(xlab.acf), xlab.acf <- rep("", nv), xlab.acf <- rep(xlab.acf, nv)[1:nv]) ifelse(is.null(lty.resid), lty.resid <- c(1, 1), lty.resid <- rep(lty.resid, 2)[1:2]) ifelse(is.null(lwd.resid), lwd.resid <- c(1, 1), lwd.resid <- rep(lwd.resid, 2)[1:2]) ifelse(is.null(col.resid), col.resid <- c("black", "red"), col.resid <- rep(col.resid, 2)[1:2]) ifelse(is.null(col.edf), col.edf <- "blue", col.edf <- col.edf[1]) ifelse(is.null(lag.acf), lag.acf <- 12, lag.acf <- lag.acf) ifelse(is.null(lag.pacf), lag.pacf <- 12, lag.pacf <- lag.pacf) ifelse(is.null(lag.acf2), lag.acf2 <- 12, lag.acf2 <- lag.acf2) ifelse(is.null(lag.pacf2), lag.pacf2 <- 12, lag.pacf2 <- lag.pacf2) plotcheck <- function(resid, main.resid, ylab.resid, xlab.resid, main.hist, main.acf, main.pacf, main.acf2, main.pacf2){ ifelse(is.null(ylim.resid), ylim.resid <- c(min(resid), max(resid)), ylim.resid <- ylim.resid) ifelse(is.null(ylim.hist), ylim.hist <- c(0, 1), ylim.hist <- ylim.hist) plot.ts(resid, main = main.resid, ylim = ylim.resid, ylab = ylab.resid, xlab = xlab.resid, lty = lty.resid[1], lwd = lwd.resid[2], col = col.resid[1], ...) abline(h = 0, col = col.resid[2], lty = lty.resid[2], lwd = lwd.resid[2]) acf(resid, main = main.acf, ylab = "", xlab = xlab.acf, lag.max = lag.acf) acf(resid^2, main = main.acf2, ylab = "", xlab = xlab.acf, lag.max = lag.acf2) hist(resid, main = main.hist, freq = FALSE, xlab = "", ylim = ylim.hist) lines(density(resid), col = col.edf) pacf(resid, main = main.pacf, ylab = "", xlab = xlab.acf, lag.max = lag.pacf) pacf(resid^2, main = main.pacf2, ylab = "", xlab = xlab.acf, lag.max = lag.pacf2) } par(mfcol = c(3, 2), mar = mar, oma = oma) if (nv > 1) par(ask = TRUE) for (i in 1:nv) { plotcheck(resid = resids[, i], main.resid = main.resid[i], ylab.resid = ylab.resid[i], xlab.resid = xlab.resid[i], main.hist = main.hist[i], main.acf = main.acf[i], main.pacf = main.pacf[i], main.acf2 = main.acf2[i], main.pacf2 = main.pacf2[i]) } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varcheck.R
"plot.varest" <- function (x, names = NULL, main.fit = NULL, main.acf = NULL, main.pacf = NULL, ylim.fit = NULL, ylim.resid = NULL, lty.fit = NULL, lty.resid = NULL, lwd.fit = NULL, lwd.resid = NULL, lag.acf = NULL, lag.pacf = NULL, col.fit = NULL, col.resid = NULL, ylab.fit = NULL, ylab.resid = NULL, ylab.acf = NULL, ylab.pacf = NULL, xlab.fit = NULL, xlab.resid = NULL, nc, mar = par("mar"), oma = par("oma"), adj.mtext = NA, padj.mtext = NA, col.mtext = NA, ...) { op <- par(no.readonly = TRUE) K <- x$K resids <- resid(x) fitted <- fitted(x) y <- x$datamat[, 1:K] ynames <- colnames(y) if (is.null(names)) { names <- ynames } else { names <- as.character(names) if (!(all(names %in% ynames))) { warning("\nInvalid variable name(s) supplied, using first variable.\n") names <- ynames[1] } } nv <- length(names) ifelse(is.null(main.fit), main.fit <- paste("Diagram of fit and residuals for", names), main.fit <- rep(main.fit, nv)[1:nv]) ifelse(is.null(main.acf), main.acf <- rep("ACF Residuals", nv), main.acf <- rep(main.acf, nv)[1:nv]) ifelse(is.null(main.pacf), main.pacf <- rep("PACF Residuals", nv), main.pacf <- rep(main.pacf, nv)[1:nv]) ifelse(is.null(lty.fit), lty.fit <- c(1, 2), lty.fit <- rep(lty.fit, 2)[1:2]) ifelse(is.null(lty.resid), lty.resid <- c(1, 1), lty.resid <- rep(lty.resid, 2)[1:2]) ifelse(is.null(lwd.fit), lwd.fit <- c(1, 1), lwd.fit <- rep(lwd.fit, 2)[1:2]) ifelse(is.null(lwd.resid), lwd.resid <- c(1, 1), lwd.resid <- rep(lwd.resid, 2)[1:2]) ifelse(is.null(lag.acf), lag.acf <- 12, lag.acf <- lag.acf) ifelse(is.null(lag.pacf), lag.pacf <- 12, lag.pacf <- lag.pacf) ifelse(is.null(col.fit), col.fit <- c("black", "blue"), col.fit <- rep(col.fit, 2)[1:2]) ifelse(is.null(col.resid), col.resid <- c("black", "red"), col.resid <- rep(col.resid, 2)[1:2]) ifelse(is.null(ylab.fit), ylab.fit <- rep("", nv), ylab.fit <- rep(ylab.fit, nv)[1:nv]) ifelse(is.null(ylab.resid), ylab.resid <- rep("", nv), ylab.resid <- rep(ylab.resid, nv)[1:nv]) ifelse(is.null(ylab.acf), ylab.acf <- rep("", nv), ylab.acf <- rep(ylab.acf, nv)[1:nv]) ifelse(is.null(ylab.pacf), ylab.pacf <- rep("", nv), ylab.pacf <- rep(ylab.pacf, nv)[1:nv]) ifelse(is.null(xlab.fit), xlab.fit <- rep("", nv), xlab.fit <- rep(xlab.fit, nv)[1:nv]) ifelse(is.null(xlab.resid), xlab.resid <- rep("", nv), xlab.resid <- rep(xlab.resid, nv)[1:nv]) plotest <- function(y, fitted, resids, main.fit, main.acf, main.pacf, ylab.fit, ylab.resid, ylab.acf, ylab.pacf, xlab.fit, xlab.resid, adj.mtext, padj.mtext, col.mtext, ...){ ifelse(is.null(ylim.fit), ylim.fit <- c(min(c(y, fitted)), max(c(y, fitted))), ylim.fit <- ylim.fit) ifelse(is.null(ylim.resid), ylim.resid <- c(min(resids), max(resids)), ylim.resid <- ylim.resid) layout(matrix(c(1, 1, 2, 2, 3, 4), nrow = 3, ncol = 2, byrow = TRUE)) par(oma = c(6, 0, 5, 0), mar = c(0, 5.1, 0, 2.1)) plot.ts(y, main = "", ylim = ylim.fit, ylab = ylab.fit, xlab = xlab.fit, lty = lty.fit[1], lwd = lwd.fit[1], col = col.fit[1], axes = FALSE, ...) lines(fitted, col = col.fit[2], lty = lty.fit[2], lwd = lwd.fit[2]) box() axis(2, pretty(c(y, fitted))[-1], ...) mtext(main.fit, side = 3, line = 3, adj = adj.mtext, padj = padj.mtext, col = col.mtext, ...) plot.ts(resids, main = "", ylim = ylim.resid, ylab = ylab.resid, xlab = xlab.resid, lty = lty.resid[1], lwd = lwd.resid[1], col = col.resid[1], ...) abline(h = 0, col = col.resid[2], lty = lty.resid[2], lwd = lwd.resid[2]) par(mar=c(1, 5.1, 7, 2.1)) acf(resids, main = main.acf, ylab = ylab.acf, lag.max = lag.acf, ...) pacf(resids, main = main.pacf, ylab = ylab.pacf, lag.max = lag.pacf, ...) } par(mar = mar, oma = oma) for (i in 1:nv) { plotest(y = y[, names[i]], fitted = fitted[, names[i]], resids = resids[, names[i]], main.fit = main.fit[i], main.acf = main.acf[i], main.pacf = main.pacf[i], ylab.fit = ylab.fit[i], ylab.resid = ylab.resid[i], ylab.acf = ylab.acf[i], ylab.pacf = ylab.pacf[i], xlab.fit = xlab.fit[i], xlab.resid = xlab.resid[i], adj.mtext = adj.mtext, padj.mtext = padj.mtext, col.mtext = col.mtext, ...) if (nv > 1) par(ask = TRUE) } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varest.R
"plot.varfevd" <- function (x, plot.type = c("multiple", "single"), names = NULL, main = NULL, col = NULL, ylim = NULL, ylab = NULL, xlab = NULL, legend = NULL, names.arg = NULL, nc, mar = par("mar"), oma = par("oma"), addbars = 1, ...) { K <- length(x) ynames <- names(x) plot.type <- match.arg(plot.type) if (is.null(names)) { names <- ynames } else { names <- as.character(names) if (!(all(names %in% ynames))) { warning("\nInvalid variable name(s) supplied, using first variable.\n") names <- ynames[1] } } nv <- length(names) op <- par(no.readonly = TRUE) ifelse(is.null(main), main <- paste("FEVD for", names), main <- rep(main, nv)[1:nv]) ifelse(is.null(col), col <- gray.colors(K), col <- rep(col, K)[1:K]) ifelse(is.null(ylab), ylab <- rep("Percentage", nv), ylab <- rep(ylab, nv)[1:nv]) ifelse(is.null(xlab), xlab <- rep("Horizon", nv), xlab <- rep(xlab, nv)[1:nv]) ifelse(is.null(ylim), ylim <- c(0, 1), ylim <- ylim) ifelse(is.null(legend), legend <- ynames, legend <- legend) if(is.null(names.arg)) names.arg <- c(paste(1:nrow(x[[1]])), rep(NA, addbars)) plotfevd <- function(x, main, col, ylab, xlab, names.arg, ylim, ...){ addbars <- as.integer(addbars) if(addbars > 0){ hmat <- matrix(0, nrow = K, ncol = addbars) xvalue <- cbind(t(x), hmat) barplot(xvalue, main = main, col = col, ylab = ylab, xlab = xlab, names.arg = names.arg, ylim = ylim, legend.text = legend, ...) abline(h = 0) } else { xvalue <- t(x) barplot(xvalue, main = main, col = col, ylab = ylab, xlab = xlab, names.arg = names.arg, ylim = ylim, ...) abline(h = 0) } } if (plot.type == "single") { par(mar = mar, oma = oma) if (nv > 1) par(ask = TRUE) for (i in 1:nv) { plotfevd(x = x[[names[i]]], main = main[i], col = col, ylab = ylab[i], xlab = xlab[i], names.arg = names.arg, ylim = ylim, ...) } } else if (plot.type == "multiple") { if (missing(nc)) { nc <- ifelse(nv > 4, 2, 1) } nr <- ceiling(nv / nc) par(mfcol = c(nr, nc), mar = mar, oma = oma) for (i in 1:nv) { plotfevd(x = x[[names[i]]], main = main[i], col = col, ylab = ylab[i], xlab = xlab[i], names.arg = names.arg, ylim = ylim, ...) } } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varfevd.R
"plot.varirf" <- function (x, plot.type = c("multiple", "single"), names = NULL, main = NULL, sub = NULL, lty = NULL, lwd = NULL, col = NULL, ylim = NULL, ylab = NULL, xlab = NULL, nc, mar.multi = c(0, 4, 0, 4), oma.multi = c(6, 4, 6, 4), adj.mtext = NA, padj.mtext = NA, col.mtext = NA, ...) { op <- par(no.readonly = TRUE) on.exit(par(op)) ## ## Checking of arguments ## plot.type <- match.arg(plot.type) inames <- x$impulse rnames <- x$response if (is.null(names)) { names <- inames } else { names <- as.character(names) if (!(all(names %in% inames))) { warning("\nInvalid variable name(s) supplied, using first variable.\n") inames <- inames[1] } else { inames <- names } } nvi <- length(inames) nvr <- length(rnames) ## ## Presetting certain plot-argument ## ifelse(is.null(lty), lty <- c(1, 1, 2, 2), lty <- rep(lty, 4)[1:4]) ifelse(is.null(lwd), lwd <- c(1, 1, 1, 1), lwd <- rep(lwd, 4)[1:4]) ifelse(is.null(col), col <- c("black", "gray", "red", "red"), col <- rep(col, 4)[1:4]) ## ## Extract data from object for plotting per iname ## dataplot <- function(x, iname){ impulses <- x$irf[[iname]] range <- range(impulses) upper <- NULL lower <- NULL if(x$boot){ upper <- x$Upper[[iname]] lower <- x$Lower[[iname]] range <- range(cbind(impulses, upper, lower)) } if ((x$model == "varest") || (x$model == "vec2var")) { if (x$ortho) { text1 <- paste("Orthogonal Impulse Response from", iname, sep = " ") } else { text1 <- paste("Impulse Response from", iname, sep = " ") } } else if (x$model == "svarest") { text1 <- paste("SVAR Impulse Response from", iname, sep = " ") } else if (x$model == "svecest") { text1 <- paste("SVECM Impulse Response from", iname, sep = " ") } if (x$cumulative) text1 <- paste(text1, "(cumulative)", sep = " ") text2 <- "" if (x$boot) text2 <- paste((1 - x$ci) * 100, "% Bootstrap CI, ", x$runs, "runs") result <- list(impulses = impulses, upper = upper, lower = lower, range = range, text1 = text1, text2 = text2) return(result) } ## ## Plot function for irf per impulse and response ## plot.single <- function(x, iname, rname, ...) { ifelse(is.null(main), main <- x$text1, main <- main) ifelse(is.null(sub), sub <- x$text2, sub <- sub) xy <- xy.coords(x$impulse[, rname]) ifelse(is.null(ylab), ylabel <- rname, ylabel <- ylab) ifelse(is.null(xlab), xlabel <- "", xlabel <- xlab) ifelse(is.null(ylim), ylim <- x$range, ylim <- ylim) plot(xy, type = "l", ylim = ylim, col = col[1], lty = lty[1], lwd = lwd[1], axes = FALSE, ylab = paste(ylabel), xlab = paste(xlab), ...) title(main = main, sub = sub, ...) axis(1, at = xy$x, labels = c(0:(length(xy$x) - 1))) axis(2, ...) box() if (!is.null(x$upper)) lines(x$upper[, rname], col = col[3], lty = lty[3], lwd = lwd[3]) if (!is.null(x$lower)) lines(x$lower[, rname], col = col[3], lty = lty[3], lwd = lwd[3]) abline(h = 0, col = col[2], lty = lty[2], lwd = lwd[2]) } ## ## Plot function per impulse ## plot.multiple <- function(dp, nc = nc, ...){ x <- dp$impulses y <- dp$upper z <- dp$lower ifelse(is.null(main), main <- dp$text1, main <- main) ifelse(is.null(sub), sub <- dp$text2, sub <- sub) ifelse(is.null(ylim), ylim <- dp$range, ylim <- ylim) range <- range(c(x, y, z)) nvr <- ncol(x) if (missing(nc)) { nc <- ifelse(nvr > 4, 2, 1) } nr <- ceiling(nvr/nc) par(mfrow = c(nr, nc), mar = mar.multi, oma = oma.multi) if(nr > 1){ for(i in 1:(nvr - nc)){ ifelse(is.null(ylab), ylabel <- colnames(x)[i], ylabel <- ylab) xy <- xy.coords(x[, i]) plot(xy, axes = FALSE, type = "l", ylab = ylabel, ylim = ylim, col = col[1], lty = lty[1], lwd = lwd[1], ...) axis(2, at = pretty(range)[-1]) abline(h = 0, col = "red") if(!is.null(y)) lines(y[, i], col = col[3], lty = lty[3], lwd = lwd[3]) if(!is.null(z)) lines(z[, i], col = col[3], lty = lty[3], lwd = lwd[3]) box() } for(j in (nvr - nc + 1):nvr){ ifelse(is.null(ylab), ylabel <- colnames(x)[j], ylabel <- ylab) xy <- xy.coords(x[, j]) plot(xy, axes = FALSE, type = "l", ylab = ylabel, ylim = ylim, col = col[1], lty = lty[1], lwd = lwd[1], ...) axis(2, at = pretty(range)[-1]) axis(1, at = 1:(nrow(x)), labels = c(0:(nrow(x) - 1))) box() abline(h = 0, col = "red") if(!is.null(y)) lines(y[, j], col = col[3], lty = lty[3], lwd = lwd[3]) if(!is.null(z)) lines(z[, j], col = col[3], lty = lty[3], lwd = lwd[3]) } mtext(main, 3, line = 2, outer = TRUE, adj = adj.mtext, padj = padj.mtext, col = col.mtext, ...) mtext(sub, 1, line = 4, outer = TRUE, adj = adj.mtext, padj = padj.mtext, col = col.mtext, ...) } else { for(j in 1:nvr){ ifelse(is.null(ylab), ylabel <- colnames(x)[j], ylabel <- ylab) xy <- xy.coords(x[, j]) plot(xy, type = "l", ylab = ylabel, ylim = ylim, col = col[1], lty = lty[1], lwd = lwd[1], ...) if(!is.null(y)) lines(y[, j], col = col[3], lty = lty[3], lwd = lwd[3]) if(!is.null(z)) lines(z[, j], col = col[3], lty = lty[3], lwd = lwd[3]) abline(h = 0, col = "red") } mtext(main, 3, line = 2, outer = TRUE, adj = adj.mtext, padj = padj.mtext, col = col.mtext, ...) mtext(sub, 1, line = 4, outer = TRUE, adj = adj.mtext, padj = padj.mtext, col = col.mtext, ...) } } ## ## Plot for type = single ## if (plot.type == "single") { for(i in 1:nvi){ dp <- dataplot(x, iname = inames[i]) for(j in 1:nvr){ plot.single(dp, iname = inames[i], rname = rnames[j], ...) if (nvr > 1) par(ask = TRUE) } } } ## ## Plot for type = multiple ## if (plot.type == "multiple") { for (i in 1:nvi) { dp <- dataplot(x, iname = inames[i]) plot.multiple(dp, nc = nc, ...) if (nvi > 1) par(ask = TRUE) } } }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varirf.R
"plot.varprd" <- function(x, plot.type = c("multiple", "single"), names = NULL, main = NULL, col = NULL, lty = NULL, lwd = NULL, ylim = NULL, ylab = NULL, xlab = NULL, nc, mar = par("mar"), oma = par("oma"), ...){ op <- par(no.readonly = TRUE) ynames <- colnames(x$endog) smpl <- nrow(x$endog) K <- ncol(x$endog) plot.type <- match.arg(plot.type) if(is.null(names)){ names <- ynames } else { names <- as.character(names) if(!(all(names %in% ynames))){ warning("\nInvalid variable name(s) supplied, using first variable.\n") names <- ynames[1] } } nv <- length(names) ifelse(is.null(main), main <- paste("Forecast of series", names), main <- rep(main, nv)[1:nv]) ifelse(is.null(col), col <- c("blue", "black", "red", "red", "grey"), col <- rep(col, 5)[1:5]) ifelse(is.null(lty), lty <- c(2, 1, 3, 3, 4), lty <- rep(lty, 5)[1:5]) ifelse(is.null(lwd), lwd <- rep(1, 5), lwd <- rep(lwd, 5)[1:5]) ifelse(is.null(ylab), ylab <- rep("", nv), ylab <- rep(ylab, nv)[1:nv]) ifelse(is.null(xlab), xlab <- rep("", nv), xlab <- rep(xlab, nv)[1:nv]) plotprd <- function(x, name, main, col, lty, lwd, ylab, xlab, ...){ fcsty <- c(rep(NA, smpl - 1), x$endog[smpl, name], x$fcst[[name]][, 1]) fcstl <- c(rep(NA, smpl - 1), x$endog[smpl, name], x$fcst[[name]][, 2]) fcstu <- c(rep(NA, smpl - 1), x$endog[smpl, name], x$fcst[[name]][, 3]) smply <- c(x$endog[, name], rep(NA, length(x$fcst[[name]][, 1]))) if(is.null(ylim)){ min.y <- min(na.omit(c(fcsty, fcstl, fcstu, smply))) max.y <- max(na.omit(c(fcsty, fcstl, fcstu, smply))) ylim <- c(min.y, max.y) } plot.ts(fcsty, main = main, ylab = ylab, xlab = xlab, ylim = ylim, col = col[1], lty = lty[1], lwd = lwd[1], ...) lines(smply, col = col[2], lty = lty[2], lwd = lwd[2]) lines(fcstl, col = col[3], lty = lty[3], lwd = lwd[3]) lines(fcstu, col = col[4], lty = lty[4], lwd = lwd[4]) abline(v = smpl, col = col[5], lty = lty[5], lwd = lwd[5]) } if(plot.type == "single"){ par(mar = mar, oma = oma) if(nv > 1) par(ask = TRUE) for(i in 1:nv){ plotprd(x = x, name = names[i], main = main[i], col = col, lty = lty, lwd = lwd, ylab = ylab[i], xlab = xlab[i], ...) } } else if(plot.type == "multiple"){ if(missing(nc)){ nc <- ifelse(nv > 4, 2, 1) } nr <- ceiling(nv / nc) par(mfcol = c(nr, nc), mar = mar, oma = oma) for(i in 1:nv){ plotprd(x = x, name = names[i], main = main[i], col = col, lty = lty, lwd = lwd, ylab = ylab[i], xlab = xlab[i], ...) } } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varprd.R
"plot.varstabil" <- function (x, plot.type = c("multiple", "single"), names = NULL, main = NULL, nc, mar = par("mar"), oma = par("oma"), ...) { op <- par(no.readonly = TRUE) plot.type <- match.arg(plot.type) ynames <- x$names if (is.null(names)) { names <- ynames } else { names <- as.character(names) if (!(all(names %in% ynames))) { warning("\nInvalid variable name(s) supplied, using first variable.\n") names <- ynames[1] } } nv <- length(names) ifelse(is.null(main), main <- paste(x$stability[[1]]$type, "of equation", names), main <- rep(main, nv)[1:nv]) if (plot.type == "single") { par(mar = mar, oma = oma) if (nv > 1) par(ask = TRUE) for (i in 1:nv) { plot(x[[1]][[names[i]]], main = main[i], ...) } } else if(plot.type == "multiple") { if (missing(nc)) { nc <- ifelse(nv > 4, 2, 1) } nr <- ceiling(nv/nc) par(mfcol = c(nr, nc), mar = mar, oma = oma) for (i in 1:nv) { plot(x[[1]][[names[i]]], main = main[i], ...) } } on.exit(par(op)) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.varstabil.R
"plot.vec2var" <- function(x, ...){ plot.varest(x, ...) }
/scratch/gouwar.j/cran-all/cranData/vars/R/plot.vec2var.R
"predict.varest" <- function(object, ..., n.ahead = 10, ci = 0.95, dumvar = NULL){ K <- object$K p <- object$p obs <- object$obs type <- object$type data.all <- object$datamat ynames <- colnames(object$y) n.ahead <- as.integer(n.ahead) Z <- object$datamat[, -c(1 : K)] B <- Bcoef(object) ## ## Deterministic and lagged y's ## Retrieval of A in matrix (whole) ## Deterministic variables in Zdet ## if(type == "const"){ Zdet <- matrix(rep(1, n.ahead), nrow = n.ahead, ncol = 1) colnames(Zdet) <- "const" }else if(type == "trend"){ trdstart <- nrow(Z) + 1 + p Zdet <- matrix(seq(trdstart, length = n.ahead), nrow = n.ahead, ncol = 1) colnames(Zdet) <- "trend" }else if(type == "both"){ trdstart <- nrow(Z) + 1 + p Zdet <- matrix(c(rep(1, n.ahead), seq(trdstart, length = n.ahead)), nrow = n.ahead, ncol = 2) colnames(Zdet) <- c("const", "trend") }else if(type == "none"){ Zdet <- NULL } ## Include seasonal if applicable if(!is.null(eval(object$call$season))){ season <- eval(object$call$season) seas.names <- paste("sd", 1:(season-1), sep = "") cycle <- tail(data.all[, seas.names], season) seasonal <- as.matrix(cycle, nrow = season, ncol = season - 1) if(nrow(seasonal) >= n.ahead){ seasonal <- as.matrix(cycle[1:n.ahead, ], nrow = n.ahead, ncol = season -1 ) } else { while(nrow(seasonal) < n.ahead){ seasonal <- rbind(seasonal, cycle) } seasonal <- seasonal[1:n.ahead, ] } rownames(seasonal) <- seq(nrow(data.all) + 1, length = n.ahead) if(!is.null(Zdet)){ Zdet <- as.matrix(cbind(Zdet, seasonal)) } else { Zdet <- as.matrix(seasonal) } } ## Include exogenous variables if applicable if(!is.null(eval(object$call$exogen))){ if(is.null(dumvar)){ stop("\nNo matrix for dumvar supplied, but object varest contains exogenous variables.\n") } if(!all(colnames(dumvar) %in% colnames(data.all))){ stop("\nColumn names of dumvar do not coincide with exogen.\n") } if(!identical(nrow(dumvar), n.ahead)){ stop("\nRow number of dumvar is unequal to n.ahead.\n") } if(!is.null(Zdet)){ Zdet <- as.matrix(cbind(Zdet, dumvar)) } else { Zdet <- as.matrix(dumvar) } } ## Retrieving predetermined y variables Zy <- as.matrix(object$datamat[, 1:(K * (p + 1))]) yse <- matrix(NA, nrow = n.ahead, ncol = K) sig.y <- .fecov(x = object, n.ahead = n.ahead) for(i in 1 : n.ahead){ yse[i, ] <- sqrt(diag(sig.y[, , i])) } yse <- -1 * qnorm((1 - ci) / 2) * yse colnames(yse) <- paste(ci, "of", ynames) ## forecast recursion forecast <- matrix(NA, ncol = K, nrow = n.ahead) lasty <- c(Zy[nrow(Zy), ]) for(i in 1 : n.ahead){ lasty <- lasty[1 : (K * p)] Z <- c(lasty, Zdet[i, ]) forecast[i, ] <- B %*% Z temp <- forecast[i, ] lasty <- c(temp, lasty) } colnames(forecast) <- paste(ynames, ".fcst", sep="") lower <- forecast - yse colnames(lower) <- paste(ynames, ".lower", sep="") upper <- forecast + yse colnames(upper) <- paste(ynames, ".upper", sep="") forecasts <- list() for(i in 1 : K){ forecasts[[i]] <- cbind(forecast[, i], lower[, i], upper[, i], yse[, i]) colnames(forecasts[[i]]) <- c("fcst", "lower", "upper", "CI") } names(forecasts) <- ynames result <- list(fcst = forecasts, endog = object$y, model = object, exo.fcst = dumvar) class(result) <- "varprd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/predict.varest.R
"predict.vec2var" <- function(object, ..., n.ahead = 10, ci = 0.95, dumvar = NULL){ n.ahead <- as.integer(n.ahead) K <- object$K p <- object$p obs <- object$obs data.all <- object$datamat ynames <- colnames(object$y) Z <- object$datamat[, -c(1 : K)] B <- object$deterministic for(i in 1:object$p){ B <- cbind(B, object$A[[i]]) } ## Deterministic and lagged y's ## Retrieval of A in matrix (whole) Zdet <- matrix(rep(1, n.ahead), nrow = n.ahead, ncol = 1) rownames(Zdet) <- seq(nrow(data.all) + 1, length = n.ahead) if(eval(object$vecm@ecdet) == "trend"){ trendf <- seq(obs + p, length = n.ahead) Zdet <- cbind(Zdet, trendf) } if(!is.null(eval(object$vecm@season))){ season <- eval(object$vecm@season) seas.names <- paste("sd", 1:(season-1), sep = "") cycle <- tail(data.all[, seas.names], season) seasonal <- matrix(cycle, nrow = season, ncol = season - 1) if(nrow(seasonal) >= n.ahead){ seasonal <- matrix(cycle[1:n.ahead, ], nrow = n.ahead, ncol = season -1 ) } else { while(nrow(seasonal) < n.ahead){ seasonal <- rbind(seasonal, cycle) } seasonal <- seasonal[1:n.ahead, ] } rownames(seasonal) <- seq(nrow(data.all) + 1, length = n.ahead) Zdet <- cbind(Zdet, seasonal) } if(!is.null(eval(object$vecm@dumvar))){ if(is.null(dumvar)){ stop(paste("\nPlease, provide a matrix x for argument 'dumvar' with", n.ahead, "rows.\n", sep = " ")) } if(!identical(nrow(dumvar), n.ahead)){ stop("\nNumber of rows of 'dumvar' is not equal to 'n.ahead'.\n") } testsum <- sum((colnames(dumvar) %in% colnames(B))) if(!(testsum == ncol(dumvar))){ stop("\nColumn names of 'dumvar' do not match with column names in 'object$datamat'.\n") } Zdet <- cbind(Zdet, dumvar) } exogen.cols <- which(colnames(data.all) %in% colnames(object$deterministic)) Zy <- data.all[, -exogen.cols] yse <- matrix(NA, nrow = n.ahead, ncol = K) sig.y <- .fecovvec2var(x = object, n.ahead = n.ahead) for(i in 1 : n.ahead){ yse[i, ] <- sqrt(diag(sig.y[, , i])) } yse <- -1 * qnorm((1 - ci) / 2) * yse colnames(yse) <- paste(ci, "of", ynames) ## forecast recursion forecast <- matrix(NA, ncol = K, nrow = n.ahead) lasty <- c(Zy[nrow(Zy), ]) for(i in 1 : n.ahead){ lasty <- lasty[1 : (K * p)] Z <- c(Zdet[i, ], lasty) forecast[i, ] <- B %*% Z temp <- forecast[i, ] lasty <- c(temp, lasty) } colnames(forecast) <- paste(ynames, ".fcst", sep="") lower <- forecast - yse colnames(lower) <- paste(ynames, ".lower", sep="") upper <- forecast + yse colnames(upper) <- paste(ynames, ".upper", sep="") forecasts <- list() for(i in 1 : K){ forecasts[[i]] <- cbind(forecast[, i], lower[, i], upper[, i], yse[, i]) colnames(forecasts[[i]]) <- c("fcst", "lower", "upper", "CI") } names(forecasts) <- ynames result <- list(fcst = forecasts, endog = object$y, model = object, exo.fcst = dumvar) class(result) <- "varprd" return(result) }
/scratch/gouwar.j/cran-all/cranData/vars/R/predict.vec2var.R
"print.svarest" <- function(x, digits = max(3, getOption("digits") - 3), ...){ text1 <- "SVAR Estimation Results:" cat(paste("\n", text1, "\n", sep = "")) row <- paste(rep("=", nchar(text1)), collapse = "") cat(row, "\n") cat("\n") if(identical(x$type, "Blanchard-Quah")){ cat("\nEstimated contemporaneous impact matrix:\n") print(x$B, digits = digits, ...) cat("\nEstimated identified long run impact matrix:\n") print(x$LRIM, digits = digits, ...) } else if(identical(x$type, "A-model")){ cat("\nEstimated A matrix:\n") print(x$A, digits = digits, ...) } else if(identical(x$type, "B-model")){ cat("\nEstimated B matrix:\n") print(x$B, digits = digits, ...) } else if(identical(x$type, "AB-model")){ cat("\nEstimated A matrix:\n") print(x$A, digits = digits, ...) cat("\nEstimated B matrix:\n") print(x$B, digits = digits, ...) } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.svarest.R
"print.svarsum" <- function(x, digits = max(3, getOption("digits") - 3), ...){ text1 <- "SVAR Estimation Results:" cat(paste("\n", text1, "\n", sep = "")) row <- paste(rep("=", nchar(text1)), collapse = "") cat(row, "\n") cat("\nCall:\n") print(x$call) cat("\n") cat(paste("Type:", x$type, "\n")) cat(paste("Sample size:", x$obs, "\n")) cat(paste("Log Likelihood:", round(x$logLik, 3), "\n")) if(!(x$type == "Blanchard-Quah")){ cat(paste("Method:", x$call$estmethod, "\n")) cat(paste("Number of iterations:", x$iter, "\n")) if(x$call$estmethod == "direct"){ cat(paste("Convergence code:", x$opt$convergence, "\n")) if(!is.null(x$opt$message)){ print(x$opt$message) } } } if(!is.null(x$LR)){ cat("\nLR overidentification test:\n") print(x$LR, digits = digits, ...) } if(identical(x$type, "Blanchard-Quah")){ cat("\nEstimated contemporaneous impact matrix:\n") print(x$B, digits = digits, ...) cat("\nEstimated identified long run impact matrix:\n") print(x$LRIM, digits = digits, ...) } else { cat("\nEstimated A matrix:\n") print(x$A, digits = digits, ...) if(any(c(x$Ase) != 0)){ cat("\nEstimated standard errors for A matrix:\n") print(x$Ase, digits = digits, ...) } cat("\nEstimated B matrix:\n") print(x$B, digits = digits, ...) if(any(c(x$Bse) != 0)){ cat("\nEstimated standard errors for B matrix:\n") print(x$Bse, digits = digits, ...) } } cat("\nCovariance matrix of reduced form residuals (*100):\n") print(x$Sigma.U, digits = digits, ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.svarsum.R
"print.svecest" <- function(x, digits = max(3, getOption("digits") - 3), ...){ text1 <- "SVEC Estimation Results:" cat(paste("\n", text1, "\n", sep = "")) row <- paste(rep("=", nchar(text1)), collapse = "") cat(row, "\n") cat("\n") cat("\nEstimated contemporaneous impact matrix:\n") print(x$SR, digits = digits, ...) cat("\nEstimated long run impact matrix:\n") print(x$LR, digits = digits, ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.svecest.R
"print.svecsum" <- function(x, digits = max(3, getOption("digits") - 3), ...){ text1 <- "SVEC Estimation Results:" cat(paste("\n", text1, "\n", sep = "")) row <- paste(rep("=", nchar(text1)), collapse = "") cat(row, "\n") cat("\nCall:\n") print(x$call) cat("\n") cat(paste("Type:", x$type, "\n")) cat(paste("Sample size:", x$obs, "\n")) cat(paste("Log Likelihood:", round(x$logLik, 3), "\n")) cat(paste("Number of iterations:", x$iter, "\n")) if(!is.null(x$LRover)){ cat("\nLR overidentification test:\n") print(x$LRover, digits = digits, ...) } cat("\nEstimated contemporaneous impact matrix:\n") print(x$SR, digits = digits, ...) if(!is.null(x$SRse)){ cat("\nEstimated standard errors for impact matrix:\n") print(x$SRse, digits = digits, ...) } cat("\nEstimated long run impact matrix:\n") print(x$LR, digits = digits, ...) if(!is.null(x$LRse)){ cat("\nEstimated standard errors for long-run matrix:\n") print(x$LRse, digits = digits, ...) } cat("\nCovariance matrix of reduced form residuals (*100):\n") print(x$Sigma.U, digits = digits, ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.svecsum.R
"print.varcheck" <- function(x, ...){ lapply(x[-1], print, ...) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.varcheck.R
"print.varest" <- function(x, digits = max(3, getOption("digits") - 3), ...){ dim <- length(x$varresult) names <- colnames(x$y) text1 <- "VAR Estimation Results:" cat(paste("\n", text1, "\n", sep = "")) row <- paste(rep("=", nchar(text1)), collapse = "") cat(row, "\n") cat("\n") for(i in 1:dim){ result <- coef(x$varresult[[i]]) text1 <- paste("Estimated coefficients for equation ", names[i], ":", sep = "") cat(text1, "\n") row <- paste(rep("=", nchar(text1)), collapse="") cat(row, "\n") text2 <- paste("Call:\n", names[i], " = ", paste(names(result), collapse = " + "), sep = "") cat(text2, "\n\n") print(result, ...) cat("\n\n") } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.varest.R
"print.varfevd" <- function(x, ...){ print(x[], ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.varfevd.R
"print.varirf" <- function(x, ...){ cat("\nImpulse response coefficients\n") print(x[[1]], ...) if(x$boot){ cat(paste("\nLower Band, CI=", 1 - x$ci, "\n")) print(x[[2]], ...) cat(paste("\nUpper Band, CI=", 1 - x$ci, "\n")) print(x[[3]], ...) } }
/scratch/gouwar.j/cran-all/cranData/vars/R/print.varirf.R