content
stringlengths
0
14.9M
filename
stringlengths
44
136
# Xpose 4 # An R-based population pharmacokinetic/ # pharmacodynamic model building aid for NONMEM. # Copyright (C) 1998-2004 E. Niclas Jonsson and Mats Karlsson. # Copyright (C) 2005-2008 Andrew C. Hooker, Justin J. Wilkins, # Mats O. Karlsson and E. Niclas Jonsson. # Copyright (C) 2009-2010 Andrew C. Hooker, Mats O. Karlsson and # E. Niclas Jonsson. # This file is a part of Xpose 4. # Xpose 4 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. A copy can be cound in the R installation # directory under \share\licenses. If not, see http://www.gnu.org/licenses/. ##stack.xpose <- function (object, select,rep,...) { #' @rdname addid xpose.stack <- function (data, object, select,rep,subset=NULL,...) { x <- data xx <- data nl <- as.list(1:ncol(x)) names(nl) <- names(x) vars <- eval(substitute(select), nl, parent.frame()) if(is.null(subset)){ x <- x[, vars, drop = FALSE] facnams <- xlabel(names(x),object) names(x) <- facnams tmp <- data.frame(values = unlist(unname(x)), ind = factor(rep.int(names(x),lapply(x, length)), levels=names(x))) if(!missing(rep)) { for(yy in rep) { # labs[yy] <- xlabel(yy,object) tmp[,yy] <- rep(xx[,yy],length(select)) } } } else { tmp <- c() if(length(subset)!=length(select)) subset <- rep(subset,length=length(select)) for(i in 1:length(select)){ if(subset[i]=="NULL"){ x_tmp <- x[,c(select[i],rep)] } else { x_tmp <- x[eval(parse(text=paste("x$",subset[i],sep=""))),c(select[i],rep)] } facnams_tmp <- xlabel(select[i],object) tmp_tmp <- data.frame(x_tmp, ind = rep.int(facnams_tmp,dim(x_tmp)[[1]])) names(tmp_tmp)[1] <- "values" tmp <- rbind(tmp,tmp_tmp) } } # labs <- c() # labs["values"] <- paste(select,sep="/",collapse="/") # labs["ind"] <- "ind" # ## xpobj <- new("xpose") ## xpobj@Data <- tmp ## xpobj@Prefs@Labels <- as.list(labs) ## xpobj@Runno <- object@Runno ## return(xpobj) return(tmp) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xpose.stack.R
# Xpose 4 # An R-based population pharmacokinetic/ # pharmacodynamic model building aid for NONMEM. # Copyright (C) 1998-2004 E. Niclas Jonsson and Mats Karlsson. # Copyright (C) 2005-2008 Andrew C. Hooker, Justin J. Wilkins, # Mats O. Karlsson and E. Niclas Jonsson. # Copyright (C) 2009-2010 Andrew C. Hooker, Mats O. Karlsson and # E. Niclas Jonsson. # This file is a part of Xpose 4. # Xpose 4 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. A copy can be cound in the R installation # directory under \share\licenses. If not, see http://www.gnu.org/licenses/. #' Print a pretty string. #' #' Print a string with a certain number of characters per row. #' #' #' @param value The text to print. #' @param fill How wide should the text be per row. #' @param file Where to print. \code{""} means to the screen. #' @author Niclas Jonsson and Andrew C. Hooker #' @keywords methods #' @export xpose.string.print xpose.string.print <- function(value,fill=60,file=""){ cat(unlist(strsplit(value," ")),fill=fill,file=file) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xpose.string.print.R
#' @export #' @describeIn export.graph.par export graphics settings for a specified Xpose data object to #' a file. xpose.write <- function(object, file = "xpose.ini" ) { # build matrix (66 options) prefs <- matrix(1:150, ncol=2) # General prefs[1,] <- c("Miss", object@Prefs@Miss) prefs[2,] <- c("Cat.levels", object@[email protected]) prefs[3,] <- c("DV.Cat.levels", object@[email protected]) # Plotting prefs[4,] <- c("type", object@[email protected]$type) prefs[5,] <- c("cex", object@[email protected]$cex) prefs[6,] <- c("lty", object@[email protected]$lty) prefs[7,] <- c("lwd", object@[email protected]$lwd) prefs[8,] <- c("col", object@[email protected]$col) prefs[9,] <- c("pch", object@[email protected]$pch) prefs[10,] <- c("grid", object@[email protected]$grid) prefs[11,] <- c("aspect", object@[email protected]$aspect) # Conditioning prefs[12,] <- c("byordfun", object@[email protected]$byordfun) prefs[13,] <- c("shingnum", object@[email protected]$shingnum) prefs[14,] <- c("shingol", object@[email protected]$shingol) # abline prefs[15,] <- c("ablcol", object@[email protected]$ablcol) prefs[16,] <- c("abllty", object@[email protected]$abllty) prefs[17,] <- c("abllwd", object@[email protected]$abllwd) # lmline prefs[18,] <- c("lmcol", object@[email protected]$lmcol) prefs[19,] <- c("lmlty", object@[email protected]$lmlty) prefs[20,] <- c("lmlwd", object@[email protected]$lmlwd) # smooth prefs[21,] <- c("smooth", object@[email protected]$smooth) prefs[22,] <- c("smcol", object@[email protected]$sucol) prefs[23,] <- c("smlty", object@[email protected]$sulty) prefs[24,] <- c("smlwd", object@[email protected]$sulwd) prefs[25,] <- c("smspan", object@[email protected]$suspan) prefs[26,] <- c("smdegr", object@[email protected]$sudegr) # suline prefs[27,] <- c("sucol", object@[email protected]$sucol) prefs[28,] <- c("sulty", object@[email protected]$sulty) prefs[29,] <- c("sulwd", object@[email protected]$sulwd) prefs[30,] <- c("suspan", object@[email protected]$suspan) prefs[31,] <- c("sudegr", object@[email protected]$sudegr) # Labelling prefs[32,] <- c("ids", object@[email protected]$ids) prefs[33,] <- c("idsext", object@[email protected]$idsext) prefs[34,] <- c("idscex", object@[email protected]$idscex) prefs[35,] <- c("idsdir", object@[email protected]$idsdir) # Dilution prefs[36,] <- c("dilfrac", object@[email protected]$dilfrac) prefs[37,] <- c("dilci", object@[email protected]$dilci) # Prediction intervals prefs[38,] <- c("PIuplty", object@[email protected]$PIuplty) prefs[39,] <- c("PIdolty", object@[email protected]$PIdolty) prefs[40,] <- c("PImelty", object@[email protected]$PImelty) prefs[41,] <- c("PIuptyp", object@[email protected]$PIuptyp) prefs[42,] <- c("PIdotyp", object@[email protected]$PIdotyp) prefs[43,] <- c("PImetyp", object@[email protected]$PImetyp) prefs[44,] <- c("PIupcol", object@[email protected]$PIupcol) prefs[45,] <- c("PIdocol", object@[email protected]$PIdocol) prefs[46,] <- c("PImecol", object@[email protected]$PImecol) prefs[47,] <- c("PIuplwd", object@[email protected]$PIuplwd) prefs[48,] <- c("PIdolwd", object@[email protected]$PIdolwd) prefs[49,] <- c("PImelwd", object@[email protected]$PImelwd) prefs[50,] <- c("PIuplimit", object@[email protected]$PIlimits[2]) prefs[51,] <- c("PIdolimit", object@[email protected]$PIlimits[1]) prefs[52,] <- c("PIarcol", object@[email protected]$PImelwd) # B&W plots prefs[53,] <- c("bwhoriz", object@[email protected]$bwhoriz) prefs[54,] <- c("bwratio", object@[email protected]$bwratio) prefs[55,] <- c("bwvarwid", object@[email protected]$bwvarwid) prefs[56,] <- c("bwdotpch", object@[email protected]$bwdotpch) prefs[57,] <- c("bwdotcol", object@[email protected]$bwdotcol) prefs[58,] <- c("bwdotcex", object@[email protected]$bwdotcex) prefs[59,] <- c("bwrecfill", object@[email protected]$bwrecfill) prefs[60,] <- c("bwreccol", object@[email protected]$bwreccol) prefs[61,] <- c("bwreclty", object@[email protected]$bwreclty) prefs[62,] <- c("bwreclwd", object@[email protected]$bwreclwd) prefs[63,] <- c("bwumbcol", object@[email protected]$bwumbcol) prefs[64,] <- c("bwumblty", object@[email protected]$bwumblty) prefs[65,] <- c("bwumblwd", object@[email protected]$bwumblwd) prefs[66,] <- c("bwoutpch", object@[email protected]$bwoutpch) prefs[67,] <- c("bwoutcol", object@[email protected]$bwoutcol) prefs[68,] <- c("bwoutcex", object@[email protected]$bwoutcex) # Histograms prefs[69,] <- c("hiborder", object@[email protected]$hiborder) prefs[70,] <- c("hicol", object@[email protected]$hicol) prefs[71,] <- c("hilty", object@[email protected]$hilty) prefs[72,] <- c("hilwd", object@[email protected]$hilwd) prefs[73,] <- c("hidcol", object@[email protected]$hidcol) prefs[74,] <- c("hidlty", object@[email protected]$hidlty) prefs[75,] <- c("hidlwd", object@[email protected]$hidlwd) for (i in 1:nrow(prefs)) { if (prefs[i,1] == prefs[i,2]) { prefs[i,2] = "NULL" } } # save matrix write.table(prefs, file = file, sep = "\t", col.names = FALSE, row.names=FALSE, quote=FALSE) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xpose.write.R
#' The Xpose Package #' #' Xpose is an R-based model building aid for population analysis using NONMEM. #' It facilitates data set checkout, exploration and visualization, model #' diagnostics, candidate covariate identification and model comparison. #' #' Xpose takes output from NONMEM output and/or PsN output and generates graphs #' or other analyses. It is assumed that each NONMEM run can be uniquely #' identified by a run number (see section below for how to generate the #' appropriate input to Xpose). Xpose is implemented using the lattice graphics #' library. #' #' The Xpose package can be divided up into six subsections (functions #' associated with each of the different subsections are linked in the "See #' Also" section): #' \describe{ #' \item{Data Functions}{Functions for managing the #' input data and manipulating the Xpose database.} #' \item{Generic Functions}{Generic wrapper functions around the lattice #' functions. These functions can be invoked by the user but require quite #' detailed instructions to generate the desired output.} #' \item{Specific Functions}{These functions are single purpose functions #' that generate specific output given only the Xpose database as input. The #' behavior can, to some extent, be influenced by the user.} #' \item{Classic Functions}{Xpose has a text based menu interface to make it #' simple for the user to invoke the Xpose specific functions. This interface is #' called Xpose Classic. Given the limitations a text based interface imposes, #' Xpose Classic is not very flexible but may be useful for quick assessment of #' a model and for learning to use Xpose.} #' \item{PsN Functions}{These functions are the interface between Xpose and #' PsN, i.e. they do not post-process NONMEM output but rather PsN output.} #' \item{GAM Functions}{Functions take an Xpose object and performs a generalized additive model #' (GAM) stepwise search for influential covariates on a single model parameter.} #' } #' #' @name xpose4-package #' @aliases xpose4-package xpose #' @section How to make NONMEM generate input to Xpose: Xpose recognizes NONMEM #' runs, and files associated to a particular run, though the run number. #' This is a number that is used in the name of NONMEM model files, output #' files and table files. The fundamental input to Xpose is one or more #' NONMEM table files. These table files should be named as below followed by #' the run number, for example xptab1 for run number 1. Xpose looks for files #' according to the following pattern, where * is your run number: #' #' \bold{sdtab*} Standard table file, containing ID, IDV, DV, PRED, IPRED, #' WRES, IWRES, RES, IRES, etc. #' #' \bold{patab*} Parameter table, containing model parameters - THETAs, ETAs #' and EPSes #' #' \bold{catab*} Categorical covariates, e.g. SEX, RACE #' #' \bold{cotab*} Continuous covariates, e.g. WT, AGE #' #' \bold{extra*, mutab*, mytab*, xptab*, cwtab*} Other variables you might #' need to have available to Xpose #' #' \bold{run*.mod} Model specification file #' #' \bold{run*.lst} NONMEM output #' #' Strictly, only one table file is needed for xpose (for example sdtab* or #' xptab*). However, using patab*, cotab*, catab* will influence the way that #' Xpose interprets the data and are recommended to get full benefit from #' Xpose. #' #' You can use code in NONMEM similar to the following to generate the tables #' you need. NONMEM automatically appends DV, PRED, WRES and RES unless #' NOAPPEND is specified. Don't forget to leave at least one blank line at #' the end of the NONMEM model specification file. #' #' \code{$TABLE ID TIME IPRED IWRES EVID MDV NOPRINT ONEHEADER FILE=sdtab1} #' \code{$TABLE ID CL V2 KA K SLP KENZ NOPRINT ONEHEADER FILE=patab1} #' \code{$TABLE ID WT HT AGE BMI PKG NOPRINT ONEHEADER FILE=cotab1} #' \code{$TABLE ID SEX SMOK ALC NOPRINT ONEHEADER FILE=catab1} #' @author E. Niclas Jonsson, Mats Karlsson, Justin Wilkins and Andrew Hooker #' @references \href{https://uupharmacometrics.github.io/PsN/}{PsN} #' @keywords methods package #' @examples #' #' \dontrun{ #' # run the classic interface #' library(xpose4) #' xpose4() #' #' # command line interface #' library(xpose4) #' xpdb <- xpose.data(5) #' basic.gof(xpdb) #' } #' #' @family data functions #' @family generic functions #' @family specific functions #' @family classic functions #' @family PsN functions #' @family GAM functions #' #' @import lattice #' @import grid #' @import gam #' @import methods #' #' @importFrom stats aggregate anova as.formula coefficients #' density dfbetas dnorm formula lm #' lm.influence loess median predict preplot #' qchisq quantile quasi reorder reshape #' residuals sd summary.glm update #' @importFrom grDevices bmp dev.cur dev.new dev.off jpeg pdf png postscript rgb tiff #' @importFrom graphics hist par #' @importFrom utils capture.output citation count.fields #' data head installed.packages menu #' packageDescription packageVersion read.csv #' read.table setTxtProgressBar tail txtProgressBar #' write.table "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xpose4-package.R
# Xpose 4 # An R-based population pharmacokinetic/ # pharmacodynamic model building aid for NONMEM. # Copyright (C) 1998-2004 E. Niclas Jonsson and Mats Karlsson. # Copyright (C) 2005-2008 Andrew C. Hooker, Justin J. Wilkins, # Mats O. Karlsson and E. Niclas Jonsson. # Copyright (C) 2009-2010 Andrew C. Hooker, Mats O. Karlsson and # E. Niclas Jonsson. # This file is a part of Xpose 4. # Xpose 4 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. A copy can be cound in the R installation # directory under \share\licenses. If not, see http://www.gnu.org/licenses/. #' Classic menu system for Xpose 4 #' #' @return NULL #' @author Andrew Hooker #' @keywords methods #' @examples #' #' \dontrun{ #' xpose4() #' } #' #' @export #' @family classic functions #' @importFrom utils menu xpose4 <- function() { ## check that classes are present # if (length(findClass("xpose.data")) < 1) { # createXposeClasses() # } if (!isClass("xpose.data") || !isClass("xpose.prefs")) { createXposeClasses() } ## ## THIS messes up menu system! Leave it out! ## ## Set error handling options such that it always return to the main ## menu. This may not necessarily be the best and a more elaborate ## error handling may be needed. See help for 'stop', 'try' and ## 'invokeRestart'. ##oldopts <- options(error=main.menu) ##on.exit(options(oldopts)) cat(" Welcome to Xpose! Xpose is a population analysis model building aid for NONMEM developed by: Andrew C. Hooker, Justin J. Wilkins, Mats O. Karlsson and E. Niclas Jonsson Pharmacometrics research group, Department of Pharmaceutical Biosciences, Uppsala University, Sweden. Version: Xpose ") cat(packageDescription("xpose4",fields=c("Version")),".","\n\n",sep="") cat(packageDescription("xpose4",fields=c("URL")),"\n\n") cat("Please report bugs at", packageDescription("xpose4",fields=c("BugReports")),"\n\n") cat("Xpose, Copyright (C)", packageDescription("xpose4",fields=c("Copyright")),"\n\n") cat("Xpose is free software and comes with ABSOLUTELY NO WARRANTY. Xpose is made available under the terms of the GNU Lesser General Public License (LGPL), version 3 or later. You are welcome to redistribute it under the conditions described therein. http://www.gnu.org/licenses/ ") ## Get the data change.xp.obj() ## Start the menus main.menu() ##return() }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xpose4.R
#' Extract or set the value of the Subset slot. #' #' Extract or set the value of the Subset slot of an "xpose.data" object. #' #' The subset string has the same syntax as the subset argument to, e.g. #' \code{panel.xyplot}. Note, however, that the "xpose.data" subset is not used #' as an argument to \code{panel.xyplot}. It is intended as the subset argument #' to the \code{Data} and \code{SData} functions. #' #' @aliases xsubset xsubset<- #' @param object An "xpose.data" object. #' @param value A string with the subset expression. #' @return A string representing the subset expression. #' @author Niclas Jonsson #' @seealso \code{\link{Data}}, \code{\link{SData}} #' @keywords methods #' @examples #' xpdb <- simpraz.xpdb #' xsubset(xpdb) <- "DV > 0" #' xsubset(xpdb) #' #' @export #' @family data functions xsubset <- function(object) { return(object@Prefs@Subset) } #' @describeIn xsubset assign value with a string representing the subset expression #' @export "xsubset<-" <- function(object,value) { if(is.null(value)) return(object) object@Prefs@Subset <- value return(object) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xsubset.R
# Xpose 4 # An R-based population pharmacokinetic/ # pharmacodynamic model building aid for NONMEM. # Copyright (C) 1998-2004 E. Niclas Jonsson and Mats Karlsson. # Copyright (C) 2005-2008 Andrew C. Hooker, Justin J. Wilkins, # Mats O. Karlsson and E. Niclas Jonsson. # Copyright (C) 2009-2010 Andrew C. Hooker, Mats O. Karlsson and # E. Niclas Jonsson. # This file is a part of Xpose 4. # Xpose 4 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. A copy can be cound in the R installation # directory under \share\licenses. If not, see http://www.gnu.org/licenses/. #' Extract and set Xpose variable definitions. #' #' This function extracts and set Xpose variable definitions in "xpose.data" #' objects. #' #' The Xpose variable definitions are used to map particular variable types to #' column names in the data.frame in the Data slot of the "xpose.data" object. #' The single-valued Xpose variable definitions are: \code{id, idlab, idv, occ, #' dv, pred, ipred, iwres, res}. The (potentially) vector-valued Xpose variable #' definitions are: \code{parms, covariates, ranpar, tvparms} (parameters, #' covariates, random effects parameters=etas, typical value parameters). The #' default values of these can be found in the \code{createXposeClasses} #' function. #' #' @param x The name of an xpose variable (see below). #' @param object An \code{xpose.data} object. #' @param value A two element vector of which the first element is the name of #' the variable and the second the column name in the Data slot of the object. #' @return Returns a string with the name of the data variable defined as the #' Xpose data variable. #' @author Niclas Jonsson #' @seealso \code{\link{xpose.data-class}},\code{\link{xpose.prefs-class}} #' @examples #' xpdb <- simpraz.xpdb #' #' ## get the column name in the Data slot of object xpdb #' ## corresponding to the label dv #' xvardef("dv", xpdb) #' #' ## reset the which column the label dv points to in the Data slot of #' ## object xpdb #' xvardef(xpdb) <- c("dv", "DVA") #' #' @export xvardef <- function(x,object) { return(object@Prefs@Xvardef[[x]]) } #' @describeIn xvardef reset the which column the label dv points to in the Data slot of #' the xpose database object #' @export "xvardef<-" <- function(object,value) { ## value is a two element vector of which the first element is the ## name of the variable and the second the label object@Prefs@Xvardef[value[1]] <- value[2] return(object) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/xvardef.R
# Xpose 4 # An R-based population pharmacokinetic/ # pharmacodynamic model building aid for NONMEM. # Copyright (C) 1998-2004 E. Niclas Jonsson and Mats Karlsson. # Copyright (C) 2005-2008 Andrew C. Hooker, Justin J. Wilkins, # Mats O. Karlsson and E. Niclas Jonsson. # Copyright (C) 2009-2010 Andrew C. Hooker, Mats O. Karlsson and # E. Niclas Jonsson. # This file is a part of Xpose 4. # Xpose 4 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. A copy can be cound in the R installation # directory under \share\licenses. If not, see http://www.gnu.org/licenses/. .onLoad <- function(libname, pkgname) { #createXposeClasses() #create.xpose.plot.classes() ## Define the methods #setMethod("print",signature(x="xpose.multiple.plot"),print.xpose.multiple.plot) #setMethod("show","xpose.multiple.plot",function(object) print(x=object)) }
/scratch/gouwar.j/cran-all/cranData/xpose4/R/zzz.R
#' Analyze a dataset and search for anomalies #' #' If any anomalous columns are found, they are reported as a warning and returned in a data.frame. To interpret the output, we are getting these anomalies: #' \itemize{ #' \item NA values: NA #' \item 0 values: Zero #' \item Blank strings: Blank #' \item Infinite numbers: Inf #' } #' #' All of these value are reported in columns prefixed by q (quantity), indicating the rows with the anomaly, and p (percentage), indicating percent of total rows with the anomaly. #' #' And, also any columns with only one distinct value, which means the column doesn't bring information to the table (If all rows are equal, why bother having that column?). We report the number of distinct values in qDistinct. #' #' @param data_analyze a data frame or tibble to analyze #' @param anomaly_threshold the minimum percentage of anomalous rows for the column to be problematic #' @param distinct_threshold the minimum amount of distinct values the column has to have to not be problematic, usually you want to keep this at it's default value. #' #' @examples #' #' library(xray) #' anomalies(mtcars, anomaly_threshold=0.5) #' #' @export #' @import dplyr anomalies <- function(data_analyze, anomaly_threshold = 0.8, distinct_threshold = 2) { if(anomaly_threshold < 0 || anomaly_threshold > 1){ warning("anomaly_threshold can be between 0 and 1, which mean 0% and 100%.") return() } if(inherits(data_analyze, 'tbl_sql')){ # Collect up to 100k samples print("Remote data source, collecting up to 100k sample rows") data_analyze = collect(data_analyze, n=100000) } varNames = names(data_analyze) # Check for anomalies by column and summarize them analyze = data_analyze %>% mutate_all(funs( case_when( is.na(.) ~ 'NA', . == Inf | . == -Inf ~ 'Infinite', . == 0 ~ 'Zero', as.character(.) == '' ~ 'Blank', TRUE ~ 'Value' ) ))%>% summarize_all(funs( qNA=sum(.=='NA'), qZero=sum(.=='Zero'), qBlank=sum(.=='Blank'), qValues=sum(.=='Value'), qInf=sum(.=='Infinite'), q=n() )) %>% collect() # Distinct amount of values inside each column analyzeDistinct = data_analyze %>% summarize_all(funs(qDistinct=n_distinct(.))) %>% collect() # Generate summary analyzeOut=data.frame() for(var in varNames){ outRow = data.frame(Variable=var) if(ncol(data_analyze)==1){ prefix='' }else{ prefix=paste0(var,'_') } q=analyze[[paste0(prefix,'q')]] outRow$q=q outRow$qNA=analyze[[paste0(prefix,'qNA')]] outRow$pNA=as.double(outRow$qNA/q) outRow$qZero=analyze[[paste0(prefix,'qZero')]] outRow$pZero=as.double(outRow$qZero/q) outRow$qBlank=analyze[[paste0(prefix,'qBlank')]] outRow$pBlank=as.double(outRow$qBlank/q) outRow$qInf=analyze[[paste0(prefix,'qInf')]] outRow$pInf=as.double(outRow$qInf/q) outRow$qDistinct=analyzeDistinct[[paste0(prefix,'qDistinct')]] if(ncol(analyzeOut)==0){ analyzeOut=outRow }else{ analyzeOut=rbind(analyzeOut,outRow) } } descriptions = getColumnDescriptions(data_analyze) analyzeOut$type=descriptions # Calculate percent anomalous finalReport = analyzeOut %>% mutate(anomalous_percent=pNA+pZero+pBlank+pInf) %>% arrange(-anomalous_percent, qDistinct) # Detect problematic variables problem_vars = filter(finalReport, anomalous_percent > anomaly_threshold | qDistinct < distinct_threshold) %>% mutate(problems=trimws(paste0( ifelse(anomalous_percent > anomaly_threshold, paste0('Anomalies present in ', xpercent(anomalous_percent), ' of the rows. '),''), ifelse(qDistinct < distinct_threshold, paste0('Less than ', distinct_threshold, ' distinct values. '),'') ))) %>% mutate_at(c('pNA', 'pZero','pBlank','pInf', 'anomalous_percent') , xpercent) finalReport = mutate_at(finalReport, c('pNA', 'pZero','pBlank','pInf', 'anomalous_percent') , xpercent) if(nrow(problem_vars) > 0){ warning("Found ", nrow(problem_vars), ' possible problematic variables: \n', paste0(problem_vars$Variable, collapse=', ')) } # Return the result return(list( variables=finalReport, problem_variables=problem_vars )) } colToDescription <- function(col) { col_class = class(col)[[1]] switch(col_class, logical = 'Logical', numeric = 'Numeric', integer = 'Integer', Date = 'Date', POSIXct = 'Timestamp', POSIXlt = 'Timestamp', factor = "Factor", character = "Character", "Unknown" ) } getColumnDescriptions <- function(df) { return( vapply( df, FUN = colToDescription, character(1) ) ) } xpercent = function(x){ ifelse(x==0, '-',paste0(round(x*100,2), '%')) }
/scratch/gouwar.j/cran-all/cranData/xray/R/anomalies.R
#' Analyze each variable and generate a histogram describing it's distribution. #' #' Also returns a table of all numeric variables describind it's percentiles 1, 10, 25, 50 (median), 75, 90 and 99. #' #' @param data_analyze a data frame to analyze #' @param outdir an optional output directory to save the resulting plots as png images #' @param charts set this to false to avoid generating charts, useful for batch script usage #' #' #' @examples #' library(xray) #' distributions(mtcars) #' #' @export #' @import dplyr #' @import ggplot2 #' @importFrom graphics plot #' @importFrom stats dnorm #' @importFrom stats sd #' @importFrom utils setTxtProgressBar #' @importFrom utils txtProgressBar distributions <- function(data_analyze, outdir, charts=T) { # If it's remote, bring it home if(inherits(data_analyze, 'tbl_sql')){ # Collect up to 100k samples print("Remote data source, collecting up to 100k sample rows") data_analyze = collect(data_analyze, n=100000) } # Obtain metadata for the dataset varMetadata = suppressWarnings(anomalies(data_analyze)$variables) if(charts==T){ # Start rolling baby! i=0 pb <- txtProgressBar(0, nrow(varMetadata)) # Progress bar resVars = c() results = foreach::foreach(i=seq_len(nrow(varMetadata))) %do% { var=varMetadata[i,] varName=as.character(var$Variable) setTxtProgressBar(pb, i) #Ignore unsupported types if(!var$type %in% c('Integer', 'Logical', 'Numeric', 'Factor', 'Character')){ warning('Ignoring variable ', varName, ': Unsupported type for visualization.') }else{ resVars=c(resVars,as.character(varName)) if(var$type %in% c('Integer', 'Numeric')){ varAnalyze = data.frame(dat=as.double(data_analyze[[varName]])) range=max(varAnalyze$dat,na.rm=T)-min(varAnalyze$dat,na.rm=T) # Histogram for numeric variables with at least 10 distinct values if(var$qDistinct > 10){ bins = case_when( nrow(varAnalyze) > 1000 & var$qDistinct > 50 ~ 20, nrow(varAnalyze) > 5000 & var$qDistinct > 30 ~ 15, TRUE ~ 10 ) ggplot(varAnalyze, aes(dat)) + geom_histogram(aes(y=..density..), bins=bins,show.legend = FALSE, col='grey', fill='#5555ee') + scale_fill_discrete(h = c(180, 250), l=50) + stat_function(fun = dnorm, args = list(mean = mean(varAnalyze$dat, na.rm=TRUE), sd = sd(varAnalyze$dat, na.rm=TRUE)), col = 'red') + theme_minimal() + labs(x = varName, y = "Rows") + ggtitle(paste("Histogram of", varName)) }else{ # Plot a bar chart if less than or equal to 10 distinct values varAnalyze = data.frame(dat=as.character(data_analyze[[varName]])) ggplot(varAnalyze, aes(dat, fill=dat)) + geom_bar(show.legend = FALSE) + scale_fill_discrete(h = c(180, 250), l=50) + theme_minimal() + labs(x = varName, y = "Rows") + ggtitle(paste("Bar Chart of", varName)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) } }else{ # Plot a grouped bar chart for character values varAnalyze = data.frame(dat=as.character(data_analyze[[varName]])) topvars = group_by(varAnalyze, dat) %>% count() %>% arrange(-n) topten=topvars if(nrow(topvars) > 10){ topten=head(topvars, 10) warning("On variable ", varName, ", more than 10 distinct variables found, only using top 10 for visualization.") others = anti_join(varAnalyze, topten, by='dat') %>% count() %>% mutate(dat='Others') %>% select(dat, n) } grouped = group_by(varAnalyze, dat) %>% semi_join(topten, by='dat') %>% count() %>% arrange(-n) %>% ungroup() if(nrow(topvars)>10){ grouped = rbind(grouped, others) } ggplot(grouped, aes(x=dat, y=n, fill=dat)) + geom_bar(stat='identity', show.legend = FALSE) + coord_flip() + scale_fill_discrete(h = c(180, 250), l=50) + theme_minimal() + labs(x = varName, y = "Rows") + ggtitle(paste0("Bar Chart of ", varName)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) } } } close(pb) results[vapply(results, is.null, logical(1))] <- NULL batches = ceiling(length(results)/4) foreach::foreach(i=seq_len(batches)) %do% { firstPlot=((i-1)*4)+1 lastPlot=min(firstPlot+3, length(results), na.rm=T) if(lastPlot==firstPlot){ plot(results[[firstPlot]]) }else{ grid::grid.newpage() grid::pushViewport(grid::viewport(layout = grid::grid.layout(2,2))) row=1 col=1 for (j in firstPlot:lastPlot) { print(results[[j]], vp = grid::viewport(layout.pos.row = row, layout.pos.col = col)) if(row==2){ row=1 col=col+1 }else{ row=row+1 } } } } if(!missing(outdir)){ foreach::foreach(i=seq_along(results)) %do% { ggsave(filename=file.path(outdir, paste0(gsub('[^a-z0-9 ]','_', tolower(resVars[[i]])), '.png')), plot=results[[i]]) } } } distTable=foreach::foreach(i=seq_len(nrow(varMetadata)), .combine=rbind) %do% { var=varMetadata[i,] varName=as.character(var$Variable) if(var$type %in% c('Integer', 'Numeric')){ data.frame( cbind(varName, t(round(quantile(data_analyze[[varName]], probs=c(.01, .1, .25, .5, .75, .9, .99), na.rm=T), 4)) )) } } distTable=setNames(distTable, c('Variable', 'p_1', 'p_10', 'p_25', 'p_50', 'p_75', 'p_90', 'p_99')) distTable }
/scratch/gouwar.j/cran-all/cranData/xray/R/distributions.R
#' Analyze each variable in respect to a time variable #' #' @param data_analyze a data frame to analyze #' @param date_variable the variable (length one character vector or bare expression) that will be used to pivot all other variables #' @param time_unit the time unit to use if not automatically #' @param nvals_num_to_cat numeric numeric values with this many or fewer distinct values will be treated as categorical #' @param outdir an optional output directory to save the resulting plots as png images #' #' @examples #' library(xray) #' data(longley) #' longley$Year=as.Date(paste0(longley$Year,'-01-01')) #' timebased(longley, 'Year') #' #' @export #' @import dplyr #' @import ggplot2 #' @importFrom grDevices boxplot.stats #' @importFrom utils head #' @importFrom stats quantile #' @importFrom stats setNames #' @importFrom utils setTxtProgressBar #' @importFrom utils txtProgressBar #' timebased <- function(data_analyze, date_variable, time_unit="auto", nvals_num_to_cat=2,outdir) { # Remove nulls data_analyze = filter(data_analyze, !is.na(!!date_variable)) if(inherits(data_analyze, 'tbl_sql')){ # Collect up to 100k samples print("Remote data source, collecting up to 100k sample rows") data_analyze = collect(data_analyze, n=100000) } # Obtain metadata for the dataset varMetadata = suppressWarnings(anomalies(data_analyze)$variables) dateData = pull(data_analyze, date_variable) if(inherits(dateData, 'POSIXct') || inherits(dateData, 'POSIXlt')){ # Remove timezone attr(dateData, "tzone") <- "UTC" }else if(! inherits(dateData, 'Date')){ # Not a Date nor a POSIXct/POSIXlt, what are you giving me? if(is.character(dateData) || is.factor(dateData)){ #Try to convert strings dateData = as.Date(as.character(dateData)) }else{ warning('You need to specify a date variable as the second parameter.') return() } } #Determine time unit if(time_unit == 'auto'){ timeRange = as.double(difftime(max(dateData, na.rm=TRUE), min(dateData, na.rm=TRUE), units='secs')) min=60 hour=min*60 day=hour*24 year=day*365 time_unit = case_when( timeRange > year*2 ~ 'year', timeRange > day*35 ~ 'month', timeRange > hour*6 ~ 'hour', timeRange > min*10 ~ 'minute', TRUE ~ 'second' ) } dateData=lubridate::floor_date(dateData, unit=time_unit) # Start rolling baby! i=0 pb <- txtProgressBar(0, nrow(varMetadata)) # Progress bar resVars = c() results = foreach::foreach(i=seq_len(nrow(varMetadata))) %do% { var=varMetadata[i,] varName=as.character(var$Variable) setTxtProgressBar(pb, i) if(var$pNA=='100%'){ #All null warning("The variable ", varName, " is completely NA, can't plot that.") return() }else if(var$Variable == quo_name(date_variable)) { #Do nothing when date var return() }else if(!var$type %in% c('Integer', 'Logical', 'Numeric', 'Factor', 'Character')){ #Do not try to plot anything warning('Ignoring variable ', varName, ': Unsupported type for visualization.') return() }else{ resVars=c(resVars,varName) if(var$type %in% c('Numeric','Integer') & var$qDistinct > nvals_num_to_cat){ # Box plot for visualizing difference in distribution among time varAnalyze = data.frame(dat=as.double(data_analyze[[varName]]), date=as.factor(dateData)) ylim1 = boxplot.stats(varAnalyze$dat)$stats[c(1, 5)] yrange = ylim1[2]-ylim1[1] ggplot(varAnalyze, aes(date, dat)) + geom_boxplot(fill='#ccccff', outlier.color = 'red', outlier.shape=1, na.rm=TRUE) + theme_minimal() + labs(x = varName, y = "Rows") + coord_cartesian(ylim = ylim1+c(-0.1*yrange,0.1*yrange)) + ggtitle(paste("Histogram of", var$Variable)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) }else{ # 100% stacked barchart showing difference in categorical composition varAnalyze = data.frame(dat=as.character(data_analyze[[varName]]), date=dateData) topvars = group_by(varAnalyze, dat) %>% count() %>% arrange(-n) %>% ungroup() topten=topvars if(nrow(topvars) > 10){ topten=head(topvars, 10) warning("On variable ", varName, ", more than 10 distinct variables found, only using top 10 for visualization.") others = anti_join(varAnalyze, topten, by='dat') %>% group_by(date) %>% count() %>% ungroup() %>% mutate(dat='Others') %>% select(date, dat, n) } grouped = group_by(varAnalyze, date, dat) %>% semi_join(topten, by='dat') %>% count() %>% arrange(date, -n) %>% ungroup() if(nrow(topvars)>10){ grouped = rbind(grouped, others) } abbr = function (x) {return (abbreviate(x, minlength = 10))} ggplot(grouped, aes(x=date, y=n, fill=dat)) + geom_bar(position='fill', stat='identity') + scale_y_continuous(labels = scales::percent_format()) + scale_fill_brewer(palette='Paired', label=abbr) + theme_minimal() + labs(x = var$Variable, y = "Rows", fill=varName) + ggtitle(paste("Evolution of variable", varName)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) } } } close(pb) results[vapply(results, is.null, logical(1))] <- NULL batches = ceiling(length(results)/4) foreach::foreach(i=seq_len(batches)) %do% { firstPlot=((i-1)*4)+1 lastPlot=min(firstPlot+3, length(results), na.rm=T) if(lastPlot==firstPlot){ plot(results[[firstPlot]]) }else{ grid::grid.newpage() grid::pushViewport(grid::viewport(layout = grid::grid.layout(2,2))) row=1 col=1 for (j in firstPlot:lastPlot) { print(results[[j]], vp = grid::viewport(layout.pos.row = row, layout.pos.col = col)) if(row==2){ row=1 col=col+1 }else{ row=row+1 } } } } if(!missing(outdir)){ foreach::foreach(i=seq_along(results)) %do% { ggsave(filename=file.path(outdir, paste0(gsub('[^a-z0-9 ]','_', tolower(resVars[[i]])), '.png')), plot=results[[i]]) } } message(length(results), " charts have been generated.") }
/scratch/gouwar.j/cran-all/cranData/xray/R/timebased.R
#' \code{xray} package #' #' X-Ray - Dataset Analyzer #' #' #' @docType package #' @name xray #' @importFrom dplyr %>% #' @importFrom foreach %do% NULL ## quiets concerns of R CMD check utils::globalVariables(c(".", "n", "pNA", "pZero", "pBlank", 'pInf', 'anomalous_percent', 'qDistinct', 'dat', '..density..'))
/scratch/gouwar.j/cran-all/cranData/xray/R/xray.R
# glmnot quite glmnet # this wrapper allows training & prediction from a data.frame, and provides mechanisms for handling new levels in prediction # for current release, this class is not exposed in public API coef.glmnot <- function(object, ...) { coef(object$model, ...) } # Accepts a data frame or design matrix for newdata, dependent on what the model was trained with. # If a model was trained with a data.frame, it can be predicted with either a matrix or a data.frame # #' @importFrom Matrix sparse.model.matrix predict.glmnot <- function(object, newdata, sparse = TRUE, lambda = 'lambda.min', type = 'response') { # convert a data-frame to a matrix matching the expected design matrix # this is really clunky design, but so be it if (is.data.frame(newdata)) { if (is.null(object$xlev)) { stop('Cannot predict from a dataframe using a glmnot object which was trained on a matrix - train using a data frame, or predict from a matrix of the same form') } design_matrix_method <- if (sparse) sparse.model.matrix else model.matrix no_response_formula <- delete.response(terms(object$formula)) # handle the case of new levels in newdata not in original data xlev for (column in all.vars(no_response_formula)) { column_data <- newdata[[column]] if(!is.numeric(column_data)) { # to avoid factor / character confusion, coerce everything to character and take the performance penalty column_data <- as.character(column_data) reference_level <- object$xlev[[column]][1] newdata[[column]] <- ifelse(column_data %in% object$xlev[[column]], column_data, reference_level) } } newdata <- design_matrix_method(no_response_formula, newdata, xlev = object$xlev) # this case for when newdata is missing levels in the original dataframe } predict(object$model, newx = newdata, s = lambda, type = type) } glmnot <- function(object, ...) { UseMethod('glmnot', object) } # X is a design matrix (sparse or otherwise) consisting of predictors to train on # y is a vector of responses to train on # alpha is the elastic net parameter # formula is the optional formula specifying the design matrix # #' @import glmnet glmnot.default <- function(X, y, family, alpha = 1, formula = NULL, xlev = NULL, glm_control = list() ) { cv.glmnet.args <- list(X, y, family = family, alpha = alpha) cv.glmnet.args <- append(cv.glmnet.args, glm_control) m <- do.call(cv.glmnet, cv.glmnet.args) # repair call by removing data m$call[["x"]] <- quote(x) m$call[["y"]] <- quote(y) m$call[[1]] <- quote(glmnet) structure(list(model = m, formula = formula, xlev = xlev), class = 'glmnot') } #' @importFrom Matrix sparse.model.matrix #' @import dplyr glmnot.formula <- function(formula, data, family, alpha = 1, type.measure = 'auc', sparse = TRUE, glm_control ) { constant_factors <- colnames( data %>% select_if(function(column) { if(!is.numeric(column)) { return(n_distinct(column) < 2) } else { return(FALSE) } }) ) # terms() produces a formula object which represents the expanded formula (i.e. if a '.' is included) final_formula <- terms(formula, data = data) constant_factors_in_formula <- intersect(constant_factors, all.vars(final_formula)) terms_matrix <- attr(final_formula, "factors") for (column in colnames(terms_matrix)) { # todo technically need to check if sum == order of term if (sum(terms_matrix[constant_factors_in_formula, column]) > 0) { final_formula <- update(final_formula, paste('. ~ . -', column)) } } # ensure there are at least two terms left stopifnot(length(all.vars(final_formula)) > 1) response_variable <- get_response(final_formula) columns <- get_predictors(final_formula) factor_column_indicator <- sapply(columns, function(column) { !is.numeric(data[[column]]) && column != response_variable }) factor_columns <- columns[factor_column_indicator] xlev <- lapply(data %>% select(factor_columns), function(x) { # respect original ordering of levels if factor, otherwise arbitrary ordering if(is.factor(x)) levels(x) else as.character(unique(x)) }) model_mat_method <- if (sparse) sparse.model.matrix else model.matrix X <- model_mat_method(final_formula, data, xlev = xlev) y <- data[[response_variable]] stopifnot(nrow(X) == length(y)) # this can be expected to occur when there are NA cases present in the original data (model.matrix will drop such rows) glmnot(X, y, family, alpha = alpha, formula = final_formula, xlev = xlev, glm_control = glm_control) }
/scratch/gouwar.j/cran-all/cranData/xrf/R/glmnot.R
########################################################### # functions adapting xrf rulesets to generic volumes & back ########################################################### features_to_space_identifier <- function(features) { # note, this is guaranteed unique because xrf preconditions that feature names do not include `,` ordered_features <- sort(unique(features)) return(paste(ordered_features, collapse=',')) } resolve_splits_to_bounding <- function(split, less_than) { lower_indices <- which(less_than) upper_indices <- which(!less_than) lower_bound <- max(c(-Inf, split[upper_indices])) upper_bound <- min(c(Inf, split[lower_indices])) list(lower_bound = lower_bound, upper_bound = upper_bound) } #' @import dplyr build_volumes_from_xrf_rules <- function(rules) { # turn all rules into bounds. for singly split dimensions, this means adding the appropriate Inf bound # for dimensions split many times, shrink to smallest bound (since a rule is a conjunction) rules_as_bounds <- rules %>% group_by(.data$rule_id, .data$feature) %>% summarize( lower_bound = resolve_splits_to_bounding(.data$split, .data$less_than)$lower_bound, upper_bound = resolve_splits_to_bounding(.data$split, .data$less_than)$upper_bound ) # create a logical grouping of volumes that occupy the same space (i.e. could feasbily be overlapped) space_assigned_volumes <- rules_as_bounds %>% group_by(.data$rule_id) %>% mutate( space_id = features_to_space_identifier(.data$feature) ) %>% ungroup() # rename columns for generic algo to solve space_assigned_volumes %>% mutate( dimension = .data$feature, volume_id = .data$rule_id, min = .data$lower_bound, max = .data$upper_bound ) %>% select(.data$dimension, .data$volume_id, .data$min, .data$max, .data$space_id) } # xrf uses single splits as opposed to [min, max] intervals, so convert intervals to splits build_xrf_rules_from_volumes <- function(volumes) { rbind( volumes %>% filter(is.finite(.data$min)) %>% mutate( rule_id = make.names(.data$volume_id), feature = .data$dimension, split = .data$min, less_than = FALSE ), volumes %>% filter(is.finite(.data$max)) %>% mutate( rule_id = make.names(.data$volume_id), feature = .data$dimension, split = .data$max, less_than = TRUE ), stringsAsFactors = FALSE ) } ########################################################### # generic deoverlapping algo using input volumes ########################################################### build_fully_partitioned_space <- function(volumes) { volumes %>% mutate(bound = .data$min) %>% select(.data$dimension, .data$bound) %>% rbind( volumes %>% mutate(bound = .data$max) %>% select(.data$dimension, .data$bound), stringsAsFactors = FALSE ) } generate_volumes_from_partitioned_space <- function(partitioned_space, id_starter = 1) { if (nrow(partitioned_space) == 0) { return(data.frame()) } # pick an arbtirary first dimension dimension_of_interest <- partitioned_space$dimension[1] dimension_bounds <- partitioned_space %>% filter(.data$dimension == dimension_of_interest) %>% # this is a small optimization - equal bounds are redundant distinct() %>% arrange(.data$bound) # there should always be 2 or more, since each bound corresponds to hyperrectangle edge stopifnot(nrow(dimension_bounds) > 1) # subspace meaning everything outside the dimension of interest partitioned_subspace <- partitioned_space %>% filter(.data$dimension != dimension_of_interest) # recursively build ranges from the subspace before tacking on ranges for the dimension of interest in this stack frame subspace_volumes <- generate_volumes_from_partitioned_space(partitioned_subspace, id_starter = id_starter) # "expanded" by the dimension of interest, that is expanded_volumes <- data.frame() for (bound_ix in 1:(nrow(dimension_bounds) - 1)) { # note that we are iterating on the sorted bounds lower_bound <- dimension_bounds$bound[bound_ix] upper_bound <- dimension_bounds$bound[bound_ix + 1] if (nrow(subspace_volumes) == 0) { # case this is the first dimension - there's nothing to add onto volume_id <- paste0(id_starter, '_', dimension_of_interest, '_', bound_ix) new_dimension_bounds <- list(volume_id = volume_id, min = lower_bound, max = upper_bound, dimension = dimension_of_interest) } else { # case this is after the first dimension - create a new volume for each subspace volume with the new bounds added (cartesian product) new_dimension_bounds <- lapply(unique(subspace_volumes$volume_id), function(volume_id) { list(volume_id = paste0(volume_id, '_', dimension_of_interest, '_', bound_ix), # TODO this scheme may not produce unique ids for carefully constructed feature names min = lower_bound, max = upper_bound, dimension = dimension_of_interest) }) %>% bind_rows() %>% rbind(subspace_volumes %>% mutate(volume_id = paste0(.data$volume_id, '_', dimension_of_interest, '_', bound_ix)), stringsAsFactors= FALSE) } expanded_volumes <- rbind(expanded_volumes, new_dimension_bounds, stringsAsFactors = FALSE) } return(expanded_volumes) } #' @import fuzzyjoin prune_noncovering_volumes <- function(new_volumes, original_volumes) { # we left join because not all new volumes belong to all old volumes # the range join prescribes that the original volumes contains the new volume original_to_new_volumes <- fuzzy_left_join(original_volumes, new_volumes, by = c('min' = 'min', 'max' = 'max', 'dimension' = 'dimension'), match_fun = c(`<=`, `>=`, `==`)) %>% # renaming some things in a reasonable way mutate(dimension = .data$dimension.x) %>% select(-.data$dimension.x, -.data$dimension.y) covering_volumes <- data.frame() for (new_volume_id_to_check in unique(new_volumes$volume_id)) { volume <- new_volumes %>% filter(.data$volume_id == new_volume_id_to_check) in_covering_space <- FALSE for (original_volume_id_to_check in unique(original_volumes$volume_id)) { original_volume_to_check <- original_to_new_volumes %>% filter(.data$volume_id.x == original_volume_id_to_check) # here we make sure all dimensions are contained volume_dimensions_contained <- original_to_new_volumes %>% filter(.data$volume_id.x == original_volume_id_to_check & .data$volume_id.y == new_volume_id_to_check) %>% pull(.data$dimension) %>% setequal(original_volume_to_check$dimension) if (volume_dimensions_contained) { in_covering_space <- TRUE break } } if (in_covering_space) { covering_volumes <- rbind(covering_volumes, volume, stringsAsFactors = FALSE) } } covering_volumes } fuse_abutted_hyperrectangles <- function(volumes, original_volumes) { dimensionality <- n_distinct(volumes$dimension) fused_volumes <- volumes fuses_possible <- TRUE fused_volume_unique_id <- 1 while (fuses_possible) { fuses_possible <- FALSE candidate_fuses <- fused_volumes %>% inner_join(fused_volumes, by = c('dimension' = 'dimension', 'max' = 'min'), suffix = c('.left', '.right')) %>% filter(.data$volume_id.left != .data$volume_id.right) %>% # this should only happen if a range is of size 0 mutate( max = .data$max.right # since the left max (where the abuttment happens on the right min) must be less than the right max ) %>% distinct(.data$dimension, .data$volume_id.left, .data$volume_id.right, .data$min, .data$max) # note this is a one to many maping, since the originals are overlapped current_volumes_to_original <- fused_volumes %>% fuzzy_inner_join(original_volumes, by = c('min' = 'min', 'max' = 'max', 'dimension' = 'dimension'), match_fun = c(`>=`, `<=`, `==`)) %>% group_by(.data$volume_id.x, .data$volume_id.y) %>% filter(n_distinct(.data$dimension.x) == dimensionality) %>% summarize( volume_id = .data$volume_id.x[1], original_volume_id = .data$volume_id.y[1] ) %>% ungroup() %>% select(.data$volume_id, .data$original_volume_id) for (candidate_fuse_ix in seq_len(nrow(candidate_fuses))) { candidate_fuse <- candidate_fuses[candidate_fuse_ix, ] # subvolume because we ignore the dimension of the fuse subvolume_left <- fused_volumes %>% filter(.data$volume_id == candidate_fuse$volume_id.left & .data$dimension != candidate_fuse$dimension) subvolume_right <- fused_volumes %>% filter(.data$volume_id == candidate_fuse$volume_id.right & .data$dimension != candidate_fuse$dimension) # this case implies the volume has already been joined # meaning the candidate fuse may not be valid any longer - catch it next iteration if (nrow(subvolume_left) == 0 || nrow(subvolume_right) == 0) { next() } stopifnot(nrow(subvolume_left) == nrow(subvolume_right) && n_distinct(subvolume_left$dimension) == n_distinct(subvolume_right$dimension)) dimension_matches <- subvolume_left %>% inner_join(subvolume_right, by = c('dimension' = 'dimension', 'min' = 'min', 'max' = 'max')) original_volume_counts <- current_volumes_to_original %>% filter(.data$volume_id %in% c(candidate_fuse$volume_id.left, candidate_fuse$volume_id.right)) %>% group_by(.data$original_volume_id) %>% count() %>% pull(.data$n) if (nrow(dimension_matches) == dimensionality - 1 && all(original_volume_counts == 2)) { # fuses_possible <- TRUE # add in the new volume fused_volume <- rbind( dimension_matches %>% select(.data$min, .data$max, .data$dimension), candidate_fuse %>% select(.data$min,.data$max, .data$dimension), stringsAsFactors = FALSE) fused_volume$volume_id <- paste0(candidate_fuse$volume_id.left, '_', candidate_fuse$volume_id.right, '_', as.character(fused_volume_unique_id)) fused_volume_unique_id <- fused_volume_unique_id + 1 fused_volumes <- rbind(fused_volumes, fused_volume, stringsAsFactors = FALSE) # clean up the old volumes fused_volumes <- fused_volumes %>% filter(.data$volume_id != candidate_fuse$volume_id.left & .data$volume_id != candidate_fuse$volume_id.right) } } } return(fused_volumes) } deoverlap_hyperrectangles <- function(volumes) { partitioned_space <- build_fully_partitioned_space(volumes) new_volumes <- generate_volumes_from_partitioned_space(partitioned_space) solution <- prune_noncovering_volumes(new_volumes, volumes) fuse_abutted_hyperrectangles(solution, volumes) } xrf_deoverlap_rules <- function(rules) { volumes <- build_volumes_from_xrf_rules(rules) deoverlapped_volumes <- data.frame() for (deoverlap_space_id in unique(volumes$space_id)) { volumes_in_space <- volumes %>% filter(deoverlap_space_id == .data$space_id) %>% select(-.data$space_id) deoverlapped_volumes <- rbind(deoverlapped_volumes, deoverlap_hyperrectangles(volumes_in_space), stringsAsFactors = FALSE) } build_xrf_rules_from_volumes(deoverlapped_volumes) }
/scratch/gouwar.j/cran-all/cranData/xrf/R/hyperrectangle_overlap.R
get_response <- function(formula) { # this code may execute before xrf_preconditions, so the exception originates here if (length(formula) != 3) { stop('Supplied formula does not appear to follow expected form of response ~ predictors') } all.vars(formula[[2]]) } get_predictors <- function(formula) { all.vars(formula[[3]]) } expand_formula <- function(formula, data) { expanded_formula <- terms(formula, data = data) formula_terms <- attr(expanded_formula, 'term.labels') response <- all.vars(expanded_formula)[1] as.formula(paste0(response, '~', paste0(formula_terms, collapse = '+'))) } add_predictors <- function(base_formula, new_predictors) { if (length(new_predictors) == 0) { return (base_formula) } new_part <- paste(new_predictors, collapse = ' + ') base_formula_char <- Reduce(paste, deparse(base_formula)) as.formula(paste0(as.character(base_formula_char), ' + ', new_part)) } startsWith <- function(base, prefix) { substr(base, 1, nchar(prefix)) == prefix } lstrip <- function(full, to_remove){ sub(sprintf("^%s", to_remove), "", full) }
/scratch/gouwar.j/cran-all/cranData/xrf/R/utils.R
############################################# ## functions for preconditions on user input ############################################# condition_xgb_control <- function(family, xgb_control, data, response_var, prefit_xgb) { # this is a duplicated but necessary check if (!(response_var %in% colnames(data))) { stop(paste0('Response variable "', response_var, '" not present in supplied data')) } data_mutated <- data if (family == 'multinomial' && is.null(xgb_control$num_class) && is.null(prefit_xgb)) { n_classes <- n_distinct(data[[response_var]]) warning(paste0('Detected ', as.character(n_classes), ' classes to set num_class xgb_control parameter')) xgb_control$num_class <- n_distinct(data[[response_var]]) } # xgboost expects multinomial labels to be 0:num_class if (family == 'multinomial' && (is.factor(data[[response_var]]) || is.character(data[[response_var]]))) { integer_response <- as.integer(as.factor(data[[response_var]])) data_mutated[[response_var]] <- integer_response - min(integer_response) } else if (family == 'binomial' && is.factor(data[[response_var]]) || is.character(data[[response_var]])) { integer_response <- as.integer(as.factor(data[[response_var]])) data_mutated[[response_var]] <- integer_response - min(integer_response) } list(xgb_control = xgb_control, data = data_mutated) } xrf_preconditions <- function(family, xgb_control, glm_control, data, response_var, prefit_xgb) { supported_families <- c('gaussian', 'binomial', 'multinomial') if (!(family %in% supported_families)) { stop(paste0('Family "', family, '" is not currently supported. Supported families are: ', paste0(supported_families, collapse = ', '))) } if (!('nrounds' %in% names(xgb_control))) { stop('Must supply an "nrounds" list element to the xgb_control argument') } if ('objective' %in% names(xgb_control)) { stop('User may not supply an "objective" list element to the xgb_control argument') } if (!(response_var %in% colnames(data))) { stop(paste0('Response variable "', response_var, '" not present in supplied data')) } if (any(is.na(data[[response_var]]))) { stop('Response variable contains missing values which is not allowed') } if (n_distinct(data[[response_var]]) <= 1) { # TODO cv.glmnet will still warn/fail when there is a very small number of observations per class for logistic regression stop('Response variable shows no variation, model cannot be fit') } if (family == 'multinomial' && ((is.null(xgb_control$num_class) || n_distinct(data[[response_var]]) != xgb_control$num_class) && is.null(prefit_xgb))) { stop('Must supply a num_class list element in xgb_control when using multinomial objective') } if (length(intersect(c('type.measure', 'nfolds', 'foldid'), names(glm_control))) < 2) { stop('Must supply "type.measure" and ("nfolds" or "foldid") as glm_control parameters') } allowed_tree_ensemble_classes <- c('xgb.Booster') if (!is.null(prefit_xgb) && length(intersect(allowed_tree_ensemble_classes, class(prefit_xgb))) == 0) { stop('Prefit tree ensemble must be of class {', paste0(allowed_tree_ensemble_classes, collapse = ','), "}") } features_with_commas <- grepl(',', colnames(data), fixed = TRUE) if (any(features_with_commas)) { feature_names <- colnames(data)[features_with_commas] stop(paste0('The following column names contain illegal characters: ', paste0("'", features_with_commas, "'", collapse = ','))) } } ## the choice of ensemble loss is currently hidden from the api to protect implementation details ## this may be exposed to the user in the future get_xgboost_objective <- function(family) { if (family == 'gaussian') { return('reg:squarederror') } else if (family == 'binomial') { return('binary:logistic') } else if (family == 'multinomial') { return('multi:softmax') } stop(paste0('Unrecognized family ', family, ' which should have failed fast in preconditions')) } ############################################# ## functions for extracting xgboost rule sets ############################################# augment_rules <- function(row, rule_ids, less_than) { bind_rows( lapply(rule_ids, function(rule_id) { list( split_id = row$ID, rule_id = rule_id, feature = row$Feature, split = row$Split, less_than = less_than)} ) ) } # this is of course slow, but it shouldn't be a bottleneck due to ensembles generally small and tree depth < 6 rule_traverse <- function(row, tree) { if (row$Feature == 'Leaf') { return(data.frame( split_id = row$ID, rule_id = paste0('r', gsub('-', '_', row$ID)), # leaf nodes uniquely identify a rule feature = NA, split = NA, less_than = NA, stringsAsFactors = FALSE)) } else { # the Yes/No obfuscates the simplicity of the algo - in order tree traversal left_child <- tree[tree$ID == row$Yes,] stopifnot(nrow(left_child) == 1) # this can be trusted from xgboost, but fail if that changes right_child <- tree[tree$ID == row$No,] stopifnot(nrow(right_child) == 1) # recursion will bubble up the conjunctive rule to this split left_rules <- rule_traverse(left_child, tree) right_rules <- rule_traverse(right_child, tree) left_rules_augmented <- augment_rules(row, unique(left_rules$rule_id), less_than = TRUE) right_rules_augmented <- augment_rules(row, unique(right_rules$rule_id), less_than = FALSE) return(rbind(left_rules_augmented, right_rules_augmented, left_rules, right_rules, stringsAsFactors = FALSE)) } } # note that xgboost produces nrounds * classes number of trees for multi-class classification - we proceed by extracting all rules from all trees, treating them equally # this function produces a data frame with columns rule_id (which idenitifies a total conjunction), feature, split, and less_than # each row represents an individual boolean clause evaluated by feature (less_than ? < : >=) split extract_xgb_rules <- function(m) { rules <- xgb.model.dt.tree(model = m) %>% group_by(.data$Tree) %>% arrange(.data$Node) %>% # put the root at the top of each tree group do(harvested_rules = rule_traverse(.data[1, ], .data) %>% filter(!is.na(.data$feature))) %>% pull(.data$harvested_rules) %>% lapply(drop_zero_row_tbl) %>% bind_rows() rules } drop_zero_row_tbl <- function(tbl) { if (nrow(tbl) == 0) { return(NULL) } tbl } ################################################## ## functions for parsing out model matrix features ################################################## build_feature_metadata <- function(data) { all_features <- data.frame(feature_name = colnames(data), stringsAsFactors = FALSE) feature_metadata <- all_features %>% mutate( is_continuous = sapply(.data$feature_name, function(fname){ is.numeric(data[[fname]]) }) ) xlev <- data %>% select_if(function(x) { !is.numeric(x) }) %>% lapply(function(x) { if(is.factor(x)) levels(x) else as.character(unique(x)) }) list( xlev = xlev, feature_metadata = feature_metadata ) } has_matching_level <- function(feature_name, level_remainder, xlev) { for (ix in seq_along(feature_name)) { fn <- feature_name[ix] lr <- level_remainder[ix] if (lr %in% xlev[[fn]]) { return(TRUE) } } return(FALSE) } correct_xgb_sparse_categoricals <- function(rules, feature_metadata, xlev, # .5 matches what xgboost does with dense matrices categorical_split_value = .5) { if (nrow(rules) == 0) { return(rules) } for (row_ix in 1:nrow(rules)) { feature_level <- rules[row_ix, 'feature'] classified_features <- feature_metadata %>% mutate( level_remainder = sapply(.data$feature_name, function(fn){ lstrip(feature_level, fn) }), may_be_rule_feature = sapply(.data$feature_name, function(fn) { !startsWith(feature_level, fn) }) ) feature_level_matches <- classified_features %>% filter(!.data$may_be_rule_feature) %>% filter(.data$level_remainder == '' | has_matching_level(.data$feature_name, .data$level_remainder, xlev)) if (nrow(feature_level_matches) > 1) { # this means that several feaures and their levels may be concatenated to produce the same column name # e.g. feature "ora" with level "nge" and another feature "oran" with level "ge". or even a continuous with name "orange" stop(paste0('In attempting to parse sparse design matrix columns, several feature/level matches found for: "', feature_level, '". Conservatively failing to user to change feature/level names or use dense matrices.')) } else if (nrow(feature_level_matches) == 0) { # the feature couldn't be found. this is usually because a transformation was applied via the formula stop(paste0('In attempting to parse sparse design matrix columns, no feature/level matches found for: "', feature_level, '". This is often caused by supplying a transformation in the input formula. User may either transform source data and use main effects only formula or set argument sparse=FALSE.')) } if (!feature_level_matches$is_continuous) { # xgb always makes the split value negative, so that "Missing" (= 0 one-hot) really maps to "Yes" (the left, less than split) # and the right, greater than split (1 one-hot) maps to "No" # as such, we don't have to invert the inequality ("less_than") # of course, this is reliant on, as far as I can tell, undocumented/unspecified behavior in XGBoost. So the durability isn't great, but: # 1. it doesn't seem liable to change (https://github.com/dmlc/xgboost/issues/1112) # 2. that lack of specification (dare I call it a bug) is the whole reason we have to do this exercise in xrf rules[row_ix, 'split'] <- categorical_split_value } } rules } ############################################# ## functions for evaluating rulesets ############################################# evaluate_rules <- function(rules, data) { per_rule_evaluation <- rules %>% group_by(.data$rule_id) %>% do ( rule_evaluation = sapply(1:nrow(.data), function(split_ix) { split <- .data[split_ix, ] feature_ix <- which(split$feature == colnames(data)) if (length(feature_ix) == 0) { stop(paste0('Feature "', split$feature, '" from ruleset is not present in the input data to be evaluated')) } else if (length(feature_ix) > 1) { stop(paste0('Unexpectedly found two columns with the same name in input data (user must resolve): ', split$feature)) } split_comparison <- data[, feature_ix] < split$split return(split_comparison == split$less_than) }) %>% apply(1, all) %>% as.integer() %>% data.frame() ) rule_features <- bind_cols(per_rule_evaluation$rule_evaluation, .name_repair = "minimal") colnames(rule_features) <- per_rule_evaluation$rule_id rule_features } evaluate_rules_dense_only <- function(rules, data) { data_df <- as.data.frame(data) per_rule_evaluation <- rules %>% group_by(.data$rule_id) %>% do ( # yes, this is gross # yes, this is fast rule_evaluation = eval(parse(text=paste0( paste0('`', .data$feature, '`'), ifelse(.data$less_than, ' < ', ' >= '), .data$split, collapse = ' & ' )), data_df) %>% as.integer() %>% data.frame() ) rule_features <- bind_cols(per_rule_evaluation$rule_evaluation, .name_repair = "minimal") colnames(rule_features) <- per_rule_evaluation$rule_id rule_features } ############################################# ## functions for cleaning up evaluated rules ############################################# # returns the list of rules with non-zero variance # if, by an unexpected outcome of the tree fitting process, a rule shows no variance, remove it remove_no_variance_rules <- function(evaluated_rules) { keep_columns <- sapply(evaluated_rules, function(feature) { length(unique(feature)) > 1 }) return(colnames(evaluated_rules)[keep_columns]) } # removes any exactly equal rules dedupe_train_rules <- function(evaluated_rules) { as.matrix(evaluated_rules) %>% unique(MARGIN=2) %>% colnames() } #' Fit an eXtreme RuleFit model #' #' S3 method for building an "eXtreme RuleFit" model. #' See \code{\link{xrf.formula}} for preferred entry point #' #' @param object an object describing the model to be fit #' @param ... additional arguments #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' #' @export xrf <- function(object, ...) { UseMethod('xrf', object) } #' Fit an eXtreme RuleFit model #' #' See Friedman & Popescu (2008) for a description of the general RuleFit algorithm. #' This method uses XGBoost to fit a tree ensemble, extracts a ruleset as the conjunction of tree #' traversals, and fits a sparse linear model to the resulting feature set #' (including the original feature set) using glmnet. #' #' @param object a formula prescribing features to use in the model. transformation of the response variable is not supported. when using transformations on the input features (not suggested in general) it is suggested to set sparse=F #' @param data a data frame with columns corresponding to the formula #' @param family the family of the fitted model. one of 'gaussian', 'binomial', 'multinomial' #' @param xgb_control a list of parameters for xgboost. must supply an nrounds argument #' @param glm_control a list of parameters for the glmnet fit. must supply a type.measure and nfolds arguments (for the lambda cv) #' @param sparse whether a sparse design matrix should be used #' @param prefit_xgb an xgboost model (of class xgb.Booster) to be used instead of the model that \code{xrf} would normally fit #' @param deoverlap if true, the tree derived rules are deoverlapped, in that the deoverlapped rule set contains no overlapped rules #' @param ... ignored arguments #' #' @importFrom xgboost xgboost #' @importFrom xgboost xgb.model.dt.tree #' @import dplyr #' @importFrom Matrix sparse.model.matrix #' @importFrom rlang .data #' @importFrom stats as.formula #' @importFrom stats coef #' @importFrom stats delete.response #' @importFrom stats model.matrix #' @importFrom stats predict #' @importFrom stats terms #' @importFrom stats update #' #' @references #' Friedman, J. H., & Popescu, B. E. (2008). Predictive learning via rule #' ensembles. \emph{The Annals of Applied Statistics, 2}(3), 916-954. #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' #' @export xrf.formula <- function(object, data, family, xgb_control = list(nrounds = 100, max_depth = 3), glm_control = list(type.measure = 'deviance', nfolds = 5), sparse = TRUE, prefit_xgb = NULL, deoverlap = FALSE, ...) { expanded_formula <- expand_formula(object, data) response_var <- get_response(expanded_formula) xgboost_conditioned <- condition_xgb_control(family, xgb_control, data, response_var, prefit_xgb) xgb_control <- xgboost_conditioned$xgb_control data <- xgboost_conditioned$data xrf_preconditions(family, xgb_control, glm_control, data, response_var, prefit_xgb) model_matrix_method <- if (sparse) sparse.model.matrix else model.matrix design_matrix <- model_matrix_method(expanded_formula, data) nrounds <- xgb_control$nrounds # necessary to remove from params to avoid false positive warnings xgb_control <- within(xgb_control, rm(nrounds)) if (is.null(prefit_xgb)) { m_xgb <- xgboost(data = design_matrix, label = data[[response_var]], nrounds = nrounds, objective = get_xgboost_objective(family), params = xgb_control, verbose = 0) rules <- extract_xgb_rules(m_xgb) } else { m_xgb <- prefit_xgb rules <- extract_xgb_rules(m_xgb) if (length(setdiff(rules$feature, colnames(design_matrix))) > 0) { stop('prefit_xgb contains features (or factor-levels) not present in the input training data. This is currently not supported.') # one simple approach would be to simply remove these feature splits from the rules # but that potentially dilutes the power of this method. for now, it's on the user to rectify this issue } } if (sparse) { feature_metadata <- build_feature_metadata(data) rules <- correct_xgb_sparse_categoricals(rules, feature_metadata$feature_metadata, feature_metadata$xlev) } if (deoverlap){ rules <- xrf_deoverlap_rules(rules) %>% select(.data$rule_id, .data$feature, .data$split, .data$less_than) } rule_features <- if (sparse) evaluate_rules(rules, design_matrix) else evaluate_rules_dense_only(rules, design_matrix) varying_rules <- remove_no_variance_rules(rule_features) rule_features <- rule_features[, varying_rules] rules <- rules %>% filter(.data$rule_id %in% varying_rules) non_duplicate_rules <- dedupe_train_rules(rule_features) rule_features <- rule_features[, non_duplicate_rules] rules <- rules %>% filter(.data$rule_id %in% non_duplicate_rules) overlapped_feature_names <- intersect(colnames(rule_features), colnames(data)) if (length(overlapped_feature_names) > 0) { warning(paste0('Found the following overlapped raw feature & rule names (the rule features will be dropped): ', paste(overlapped_feature_names, collapse = ','))) rule_features <- rule_features[, !(colnames(rule_features) %in% overlapped_feature_names)] } # todo we already have a design matrix, so re-generating it with glmnot is a bit wasteful full_data <- cbind(data, rule_features, stringsAsFactors = FALSE) # todo glmnet is a bottleneck on data size - it may be interesting to fit the glm to much larger data, e.g. with spark or biglasso full_formula <- add_predictors(expanded_formula, colnames(rule_features)) # glmnet automatically adds an intercept full_formula <- update(full_formula, . ~ . -1) m_glm <- glmnot(full_formula, full_data, family = family, alpha = 1, # this specifies the LASSO sparse = sparse, glm_control = glm_control) structure(list(glm = m_glm, xgb = m_xgb, base_formula = expanded_formula, rule_augmented_formula = full_formula, rules = rules), class = 'xrf') } #' Generate the design matrix from an eXtreme RuleFit model #' #' @param object an object of class "xrf" #' @param data data to generate design matrix from #' @param sparse a logical indicating whether a sparse design matrix should be used #' @param ... ignored arguments #' #' @importFrom Matrix sparse.model.matrix #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' design <- model.matrix(m, iris, sparse = FALSE) #' #' @export model.matrix.xrf <- function(object, data, sparse = TRUE, ...) { # TODO: handle missing factor levels more elegantly (both for rule evaluation & glmnet) # TODO: when rules have a zero coefficient and we just want to predict, we don't need to evaluate them stopifnot(is.data.frame(data)) trms <- terms(object$base_formula) trms <- delete.response(trms) design_matrix_method <- if (sparse) sparse.model.matrix else model.matrix raw_design_matrix <- design_matrix_method(trms, data) rules_features <- if (sparse) evaluate_rules(object$rules, raw_design_matrix) else evaluate_rules_dense_only(object$rules, raw_design_matrix) full_data <- cbind(data, rules_features, stringsAsFactors = FALSE) full_data } #' Draw predictions from a RuleFit xrf model #' #' @param object an object of class "xrf" #' @param newdata data to predict on #' @param sparse a logical indicating whether a sparse design matrix should be used #' @param lambda the lasso penalty parameter to be applied #' @param type the type of predicted value produced #' @param ... ignored arguments #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' predictions <- predict(m, iris) #' #' @export predict.xrf <- function(object, newdata, sparse = TRUE, lambda = 'lambda.min', type = 'response', ...) { stopifnot(is.data.frame(newdata)) full_data <- model.matrix(object, newdata, sparse) predict(object$glm, newdata = full_data, sparse = sparse, lambda = lambda, type = type) } synthesize_conjunctions <- function(rules) { rules %>% group_by(.data$rule_id)%>% arrange(.data$feature, .data$split) %>% summarize( conjunction = paste0( .data$feature, ifelse(.data$less_than, '<', '>='), format(.data$split, scientific = FALSE, digits = 4), collapse = ' & ' ) ) } #' Produce rules & coefficients for the RuleFit model #' #' @param object an object of class "xrf" #' @param lambda the lasso penalty parameter to be applied as in 'glmnet' #' @param ... ignored arguments #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' linear_model_coefficients <- coef(m, lambda = 'lambda.1se') #' #' @export coef.xrf <- function(object, lambda = 'lambda.min', ...) { rule_conjunctions <- synthesize_conjunctions(object$rules) glm_coefficients <- coef(object$glm, s = lambda) glm_df <- as.data.frame(as.matrix(glm_coefficients)) colnames(glm_df) <- sapply(lambda, function(lambda_value) { paste0('coefficient_', lambda) }) glm_df$term <- rownames(glm_df) rownames(glm_df) <- NULL glm_df %>% left_join(rule_conjunctions, by = c('term' = 'rule_id')) %>% arrange_at(colnames(glm_df[1])) %>% mutate( rule = .data$conjunction ) %>% select(-.data$conjunction) } #' Summarize an eXtreme RuleFit model #' #' @param object an object of class "xrf" #' @param ... ignored arguments #' #' @import dplyr #' @importFrom methods show #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' summary(m) #' #' @export summary.xrf <- function(object, ...) { cat(paste0('An eXtreme RuleFit model of ', n_distinct(object$rules$rule_id), ' rules.')) cat(paste0('\n\nOriginal Formula:\n\n')) cat(smaller_formula(object$base_formula)) cat('\n\nTree model:\n\n') show(summary(object$xgb)) cat('\n\nGLM:\n\n') show(summary(object$glm)) } #' Print an eXtreme RuleFit model #' #' @param x an object of class "xrf" #' @param ... ignored arguments #' #' @examples #' m <- xrf(Petal.Length ~ ., iris, #' xgb_control = list(nrounds = 2, max_depth = 2), #' family = 'gaussian') #' print(m) #' #' @export print.xrf <- function(x, ...) { cat(paste0('An eXtreme RuleFit model of ', n_distinct(x$rules$rule_id), ' rules.')) cat(paste0('\n\nOriginal Formula:\n\n')) cat(smaller_formula(x$base_formula), "\n") } smaller_formula <- function(x, ...) { chr_form <- deparse(x, width.cutoff = getOption("width") - 12) if (length(chr_form) > 1) { chr_form <- paste0(chr_form[1], "[truncated]") } chr_form }
/scratch/gouwar.j/cran-all/cranData/xrf/R/xrf.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 computeResponseRcpp <- function(X, mattype_x, Fixed, beta0, betas, gammas, response_type, family) { .Call(`_xrnet_computeResponseRcpp`, X, mattype_x, Fixed, beta0, betas, gammas, response_type, family) } fitModelCVRcpp <- function(x, mattype_x, y, ext, is_sparse_ext, fixed, weights_user, intr, stnd, penalty_type, cmult, quantiles, num_penalty, penalty_ratio, penalty_user, penalty_user_ext, lower_cl, upper_cl, family, user_loss, test_idx, thresh, maxit, ne, nx) { .Call(`_xrnet_fitModelCVRcpp`, x, mattype_x, y, ext, is_sparse_ext, fixed, weights_user, intr, stnd, penalty_type, cmult, quantiles, num_penalty, penalty_ratio, penalty_user, penalty_user_ext, lower_cl, upper_cl, family, user_loss, test_idx, thresh, maxit, ne, nx) } fitModelRcpp <- function(x, mattype_x, y, ext, is_sparse_ext, fixed, weights_user, intr, stnd, penalty_type, cmult, quantiles, num_penalty, penalty_ratio, penalty_user, penalty_user_ext, lower_cl, upper_cl, family, thresh, maxit, ne, nx) { .Call(`_xrnet_fitModelRcpp`, x, mattype_x, y, ext, is_sparse_ext, fixed, weights_user, intr, stnd, penalty_type, cmult, quantiles, num_penalty, penalty_ratio, penalty_user, penalty_user_ext, lower_cl, upper_cl, family, thresh, maxit, ne, nx) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/RcppExports.R
#' Get coefficient estimates from "tune_xrnet" model object #' #' @description Returns coefficients from 'xrnet' model. Note that we currently only support returning #' coefficient estimates that are in the original path(s). #' #' @param object A \code{\link{tune_xrnet}} object #' @param p vector of penalty values to apply to predictor variables. #' Default is optimal value in tune_xrnet object. #' @param pext vector of penalty values to apply to external data variables. #' Default is optimal value in tune_xrnet object. #' @param ... pass other arguments to xrnet function (if needed) #' #' @return A list with coefficient estimates at each of the requested penalty combinations #' \item{beta0}{matrix of first-level intercepts indexed by penalty values, NULL if no first-level intercept in original model fit} #' \item{betas}{3-dimensional array of first-level penalized coefficients indexed by penalty values} #' \item{gammas}{3-dimensional array of first-level non-penalized coefficients indexed by penalty values, NULL if unpen NULL in original model fit} #' \item{alpha0}{matrix of second-level intercepts indexed by penalty values, NULL if no second-level intercept in original model fit} #' \item{alphas}{3-dimensional array of second-level external data coefficients indexed by penalty values, NULL if external NULL in original model fit} #' @examples #' ## cross validation of hierarchical linear regression model #' data(GaussianExample) #' #' ## 5-fold cross validation #' cv_xrnet <- tune_xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian", #' control = xrnet.control(tolerance = 1e-6) #' ) #' #' ## Get coefficient estimates at optimal penalty combination #' coef_opt <- coef(cv_xrnet) #' #' @export coef.tune_xrnet <- function(object, p = "opt", pext = "opt", ...) { if (p == "opt") p <- object$opt_penalty if (pext == "opt") pext <- object$opt_penalty_ext predict( object$fitted_model, newdata = NULL, newdata_fixed = NULL, p = p, pext = pext, type = "coefficients", ... ) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/coef_tune_xrnet.R
#' Get coefficient estimates from "xrnet" model object #' #' @description Returns coefficients from 'xrnet' model. Note that we currently only support returning #' coefficient estimates that are in the original path(s). #' #' @param object A \code{\link{xrnet}} object #' @param p vector of penalty values to apply to predictor variables. #' @param pext vector of penalty values to apply to external data variables. #' @param ... pass other arguments to xrnet function (if needed) #' #' @return A list with coefficient estimates at each of the requested penalty combinations #' \item{beta0}{matrix of first-level intercepts indexed by penalty values, NULL if no first-level intercept in original model fit} #' \item{betas}{3-dimensional array of first-level penalized coefficients indexed by penalty values} #' \item{gammas}{3-dimensional array of first-level non-penalized coefficients indexed by penalty values, NULL if unpen NULL in original model fit} #' \item{alpha0}{matrix of second-level intercepts indexed by penalty values, NULL if no second-level intercept in original model fit} #' \item{alphas}{3-dimensional array of second-level external data coefficients indexed by penalty values, NULL if external NULL in original model fit} #' @examples #' data(GaussianExample) #' #' fit_xrnet <- xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian" #' ) #' #' lambda1 <- fit_xrnet$penalty[10] #' lambda2 <- fit_xrnet$penalty_ext[10] #' #' coef_xrnet <- coef( #' fit_xrnet, #' p = lambda1, #' pext = lambda2, #' ) #' #' #' @export coef.xrnet <- function(object, p = NULL, pext = NULL, ...) { predict( object, newdata = NULL, newdata_fixed = NULL, p = p, pext = pext, type = "coefficients", ... ) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/coef_xrnet.R
#' Simulated example data for hierarchical regularized linear regression #' #' @format A matrix with 100 rows and 50 variables "x_linear" #' Simulated outcome data #' #' @format A vector with 100 elements "y_linear" #' Simulated external data #' #' @format A matrix with 50 rows and 4 columns "ext_linear"
/scratch/gouwar.j/cran-all/cranData/xrnet/R/data.R
#' Define regularization object for predictor and external data #' #' @description Defines regularization for predictors and external data variables in \code{\link{xrnet}} fitting. #' Use helper functions define_lasso, define_ridge, or define_enet to specify a common penalty on x or external. #' #' @param penalty_type type of regularization. Default is 1 (Lasso). #' Can supply either a scalar value or vector with length equal to the number of variables the matrix. #' \itemize{ #' \item 0 = Ridge #' \item (0,1) = Elastic-Net #' \item 1 = Lasso / Quantile #' } #' @param quantile specifies quantile for quantile penalty. Default of 0.5 reduces to lasso (currently not implemented). #' @param num_penalty number of penalty values to fit in grid. Default is 20. #' @param penalty_ratio ratio between minimum and maximum penalty for x. #' Default is 1e-04 if \eqn{n > p} and 0.01 if \eqn{n <= p}. #' @param user_penalty user-defined vector of penalty values to use in penalty path. #' @param custom_multiplier variable-specific penalty multipliers to apply to overall penalty. #' Default is 1 for all variables. 0 is no penalization. #' #' @return A list object with regularization settings that are used to define the regularization #' for predictors or external data in \code{\link{xrnet}} and \code{\link{tune_xrnet}}: #' \item{penalty_type}{The penalty type, scalar with value in range [0, 1].} #' \item{quantile}{Quantile for quantile penalty, 0.5 defaults to lasso (not currently implemented).} #' \item{num_penalty}{The number of penalty values in the penalty path.} #' \item{penalty_ratio}{The ratio of the minimum penalty value compared to the maximum penalty value.} #' \item{user_penalty}{User-defined numeric vector of penalty values, NULL if not provided by user.} #' \item{custom_multiplier}{User-defined feature-specific penalty multipliers, NULL if not provided by user.} #' #' @examples #' #' # define ridge penalty with penalty grid split into 30 values #' my_penalty <- define_penalty(penalty_type = 0, num_penalty = 30) #' #' # define elastic net (0.5) penalty with user-defined penalty #' my_custom_penalty <- define_penalty(penalty_type = 0.5, user_penalty = c(100, 50, 10, 1, 0.1)) #' @export define_penalty <- function(penalty_type = 1, quantile = 0.5, num_penalty = 20, penalty_ratio = NULL, user_penalty = NULL, custom_multiplier = NULL) { if (any(penalty_type < 0) || any(penalty_type > 1)) { stop("Invalid penalty type") } else { penalty_type <- as.double(penalty_type) } if (quantile < 0 || quantile > 1) { stop("Invalid value for quantile, must be between 0 and 1") } else { quantile <- as.double(quantile) } if (is.null(user_penalty)) { user_penalty <- as.double(0) num_penalty <- as.integer(num_penalty) if (!is.null(penalty_ratio)) { if (penalty_ratio <= 0 | penalty_ratio >= 1) { stop("penalty_ratio should be between 0 and 1") } else { penalty_ratio <- as.double(penalty_ratio) } } } else { penalty_ratio <- as.double(0) if (any(user_penalty < 0)) { stop("user_penalty can only contain non-negative values") } user_penalty <- as.double(rev(sort(user_penalty))) num_penalty <- as.integer(length(user_penalty)) } if (!is.null(custom_multiplier) && any(custom_multiplier < 0)) { stop("custom_multiplier can only contain non-negative values") } penalty_obj <- list( penalty_type = penalty_type, quantile = quantile, num_penalty = num_penalty, penalty_ratio = penalty_ratio, user_penalty = user_penalty, custom_multiplier = custom_multiplier ) } #' Define lasso regularization object for predictor and external data #' #' @description Helper function to define a lasso penalty regularization object. #' See \code{define_penalty} for more details. #' #' @param num_penalty number of penalty values to fit in grid. Default is 20. #' @param penalty_ratio ratio between minimum and maximum penalty for x. #' Default is 1e-04 if \eqn{n > p} and 0.01 if \eqn{n <= p}. #' @param user_penalty user-defined vector of penalty values to use in penalty path. #' @param custom_multiplier variable-specific penalty multipliers to apply to overall penalty. #' Default is 1 for all variables. 0 is no penalization. #' @return A list object with regularization settings that are used to define the regularization #' for predictors or external data in \code{\link{xrnet}} and \code{\link{tune_xrnet}}. The list #' elements will match those returned by \code{\link{define_penalty}}, but with the penalty_type #' automatically set to 1. #' @export define_lasso <- function(num_penalty = 20, penalty_ratio = NULL, user_penalty = NULL, custom_multiplier = NULL) { define_penalty( penalty_type = 1, quantile = 0.5, num_penalty = num_penalty, penalty_ratio = penalty_ratio, user_penalty = user_penalty, custom_multiplier = custom_multiplier ) } #' Define ridge regularization object for predictor and external data #' #' @description Helper function to define a ridge penalty regularization object. #' See \code{define_penalty} for more details. #' #' @param num_penalty number of penalty values to fit in grid. Default is 20. #' @param penalty_ratio ratio between minimum and maximum penalty for x. #' Default is 1e-04 if \eqn{n > p} and 0.01 if \eqn{n <= p}. #' @param user_penalty user-defined vector of penalty values to use in penalty path. #' @param custom_multiplier variable-specific penalty multipliers to apply to overall penalty. #' Default is 1 for all variables. 0 is no penalization. #' #' @return A list object with regularization settings that are used to define the regularization #' for predictors or external data in \code{\link{xrnet}} and \code{\link{tune_xrnet}}. The list #' elements will match those returned by \code{\link{define_penalty}}, but with the penalty_type #' automatically set to 0. #' @export define_ridge <- function(num_penalty = 20, penalty_ratio = NULL, user_penalty = NULL, custom_multiplier = NULL) { define_penalty( penalty_type = 0, quantile = 0.5, num_penalty = num_penalty, penalty_ratio = penalty_ratio, user_penalty = user_penalty, custom_multiplier = custom_multiplier ) } #' Define elastic net regularization object for predictor and external data #' #' @description Helper function to define a elastic net penalty regularization object. #' See \code{define_penalty} for more details. #' #' @param en_param elastic net parameter, between 0 and 1 #' @param num_penalty number of penalty values to fit in grid. Default is 20. #' @param penalty_ratio ratio between minimum and maximum penalty for x. #' Default is 1e-04 if \eqn{n > p} and 0.01 if \eqn{n <= p}. #' @param user_penalty user-defined vector of penalty values to use in penalty path. #' @param custom_multiplier variable-specific penalty multipliers to apply to overall penalty. #' Default is 1 for all variables. 0 is no penalization. #' #' @return A list object with regularization settings that are used to define the regularization #' for predictors or external data in \code{\link{xrnet}} and \code{\link{tune_xrnet}}. The list #' elements will match those returned by \code{\link{define_penalty}}, but with the penalty_type #' set to match the value of \code{en_param}. #' @export define_enet <- function(en_param = 0.5, num_penalty = 20, penalty_ratio = NULL, user_penalty = NULL, custom_multiplier = NULL) { define_penalty( penalty_type = en_param, quantile = 0.5, num_penalty = num_penalty, penalty_ratio = penalty_ratio, user_penalty = user_penalty, custom_multiplier = custom_multiplier ) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/define_penalty.R
#' Plot k-fold cross-validation error grid #' #' @description Generates plots to visualize the mean cross-validation error. If no external #' data was used in the model fit, a plot of the cross-validated error with standard error #' bars is generated for all penalty values. If external data was used in the model fit, a #' contour plot of the cross-validated errors is created. Error curves can also be #' generated for a fixed value of the primary penalty on x (p) or the external penalty (pext) when #' external data is used. #' #' @param x A tune_xrnet class object #' @param p (optional) penalty value for x (for generating an error curve across external penalties). #' Use value "opt" to use the optimal penalty value. #' @param pext (optional) penalty value for external (for generating an error curve across primary penalties) #' Use value "opt" to use the optimal penalty value. #' @param ... Additional graphics parameters #' #' @return None #' #' @details The parameter values p and pext can be used to generate profiled error curves by fixing either #' the penalty on x or the penalty on external to a fixed value. You cannot specify #' both at the same time as this would only return a single point. #' #' @examples #' #' ## load example data #' data(GaussianExample) #' #' ## 5-fold cross validation #' cv_xrnet <- tune_xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian", #' control = xrnet.control(tolerance = 1e-6) #' ) #' #' ## contour plot of cross-validated error #' plot(cv_xrnet) #' #' ## error curve of external penalties at optimal penalty value #' plot(cv_xrnet, p = "opt") #' #' @export #' @importFrom graphics filled.contour axis points #' @importFrom grDevices colorRampPalette plot.tune_xrnet <- function(x, p = NULL, pext = NULL, ...) { if (is.null(x$fitted_model$alphas) || !is.null(p) || !is.null(pext)) { if (is.null(x$fitted_model$alphas)) { xval <- log(as.numeric(rownames(x$cv_mean))) cverr <- x$cv_mean[, 1] cvsd <- x$cv_sd[, 1] xlab <- "log(Penalty)" xopt_val <- log(x$opt_penalty) } else { if (!is.null(p) && !is.null(pext)) { stop("Please only specify either penalty or penalty_ext, cannot specify both at the same time") } else if (!is.null(p)) { if (p == "opt") { p <- x$opt_penalty } p_idx <- match(p, x$fitted_model$penalty) if (is.na(p_idx)) { stop("The penalty value 'p' is not in the fitted model") } xval <- log(as.numeric(colnames(x$cv_mean))) cverr <- x$cv_mean[p_idx, ] cvsd <- x$cv_sd[p_idx, ] xlab <- "log(External Penalty)" xopt_val <- log(x$opt_penalty_ext) } else { if (pext == "opt") { pext <- x$opt_penalty_ext } pext_idx <- match(pext, x$fitted_model$penalty_ext) if (is.na(pext_idx)) { stop("The penalty value 'p' is not in the fitted model") } xval <- log(as.numeric(rownames(x$cv_mean))) cverr <- x$cv_mean[, pext_idx] cvsd <- x$cv_sd[, pext_idx] xlab <- "log(Penalty)" xopt_val <- log(x$opt_penalty) } } graphics::plot( x = xval, y = cverr, ylab = paste0("Mean CV Error (", x$loss, ")"), xlab = xlab, ylim=range(c(cverr - cvsd, cverr + cvsd)), type = "n" ) graphics::arrows( xval, cverr - cvsd, xval, cverr + cvsd, length = 0.025, angle = 90, code = 3, col = "lightgray" ) graphics::points( x = xval, y = cverr, col = "dodgerblue4", pch = 16, ) graphics::abline(v = xopt_val, col = "firebrick") } else { cvgrid <- x$cv_mean cvgrid <- cvgrid[rev(seq_len(nrow(cvgrid))), ] cvgrid <- cvgrid[ , rev(seq_len(ncol(cvgrid)))] minx <- log(x$opt_penalty_ext) miny <- log(x$opt_penalty) contour_colors <- c("#014636", "#016C59", "#02818A", "#3690C0", "#67A9CF", "#A6BDDB", "#D0D1E6", "#ECE2F0", "#FFF7FB") graphics::filled.contour( x = log(as.numeric(colnames(cvgrid))), y = log(as.numeric(rownames(cvgrid))), z = t(cvgrid), col = colorRampPalette(contour_colors)(25), xlab = "log(External Penalty)", ylab = "log(Penalty)", plot.axes = {axis(1); axis(2); points(minx, miny, col = "red", pch = 16)} ) } }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/plot_tune_xrnet.R
#' Predict function for "tune_xrnet" object #' #' @description Extract coefficients or predict response in new data using fitted model from a \code{\link{tune_xrnet}} object. #' Note that we currently only support returning results that are in the original path(s). #' #' @param object A \code{\link{tune_xrnet}} object #' @param newdata matrix with new values for penalized variables #' @param newdata_fixed matrix with new values for unpenalized variables #' @param p vector of penalty values to apply to predictor variables. #' Default is optimal value in tune_xrnet object. #' @param pext vector of penalty values to apply to external data variables. #' Default is optimal value in tune_xrnet object. #' @param type type of prediction to make using the xrnet model, options include: #' \itemize{ #' \item response #' \item link (linear predictor) #' \item coefficients #' } #' @param ... pass other arguments to xrnet function (if needed) #' #' @return The object returned is based on the value of type as follows: #' \itemize{ #' \item response: An array with the response predictions based on the data for each penalty combination #' \item link: An array with linear predictions based on the data for each penalty combination #' \item coefficients: A list with the coefficient estimates for each penalty combination. See \code{\link{coef.xrnet}}. #' } #' #' @examples #' data(GaussianExample) #' #' ## 5-fold cross validation #' cv_xrnet <- tune_xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian", #' control = xrnet.control(tolerance = 1e-6) #' ) #' #' ## Get coefficients and predictions at optimal penalty combination #' coef_xrnet <- predict(cv_xrnet, type = "coefficients") #' pred_xrnet <- predict(cv_xrnet, newdata = x_linear, type = "response") #' #' @export predict.tune_xrnet <- function(object, newdata = NULL, newdata_fixed = NULL, p = "opt", pext = "opt", type = c("response", "link", "coefficients"), ...) { if (p == "opt") p <- object$opt_penalty if (pext == "opt") pext <- object$opt_penalty_ext predict(object$fitted_model, newdata = newdata, newdata_fixed = newdata_fixed, p = p, pext = pext, type = type, ... ) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/predict_tune_xrnet.R
#' Predict function for "xrnet" object #' #' @description Extract coefficients or predict response in new data using fitted model from an \code{\link{xrnet}} object. #' Note that we currently only support returning coefficient estimates that are in the original path(s). #' #' @param object A \code{\link{xrnet}} object #' @param newdata matrix with new values for penalized variables #' @param newdata_fixed matrix with new values for unpenalized variables #' @param p vector of penalty values to apply to predictor variables #' @param pext vector of penalty values to apply to external data variables #' @param type type of prediction to make using the xrnet model, options include #' \itemize{ #' \item response #' \item link (linear predictor) #' \item coefficients #' } #' @param ... pass other arguments to xrnet function (if needed) #' #' @return The object returned is based on the value of type as follows: #' \itemize{ #' \item response: An array with the response predictions based on the data for each penalty combination #' \item link: An array with linear predictions based on the data for each penalty combination #' \item coefficients: A list with the coefficient estimates for each penalty combination. See \code{\link{coef.xrnet}}. #' } #' @examples #' data(GaussianExample) #' #' fit_xrnet <- xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian" #' ) #' #' lambda1 <- fit_xrnet$penalty[10] #' lambda2 <- fit_xrnet$penalty_ext[10] #' #' coef_xrnet <- predict( #' fit_xrnet, #' p = lambda1, #' pext = lambda2, #' type = "coefficients" #' ) #' #' pred_xrnet <- predict( #' fit_xrnet, #' p = lambda1, #' pext = lambda2, #' newdata = x_linear, #' type = "response" #' ) #' #' @export predict.xrnet <- function(object, newdata = NULL, newdata_fixed = NULL, p = NULL, pext = NULL, type = c("response", "link", "coefficients"), ...) { if (missing(type)) { type <- "response" } else { type <- match.arg(type) } if (missing(newdata) && !match(type, c("coefficients"), FALSE)) { stop("newdata needs to be specified") } if (is.null(p)){ stop("p not specified") } if (!is.null(object$penalty_ext) && is.null(pext)) { stop("pext not specified") } if (!(all(p %in% object$penalty)) || !(all(pext %in% object$penalty_ext))) { stop("Not all penalty values in path(s), please refit xrnet() model with desired penalty values") } p <- rev(sort(p)) idxl1 <- which(object$penalty %in% p) if (!is.null(object$penalty_ext)) { pext <- rev(sort(pext)) idxl2 <- which(object$penalty_ext %in% pext) } else { idxl2 <- 1 } beta0 <- object$beta0[idxl1, idxl2, drop = F] betas <- object$betas[ , idxl1, idxl2, drop = F] gammas <- object$gammas[, idxl1, idxl2, drop = F] alpha0 <- object$alpha0[idxl1, idxl2, drop = F] alphas <- object$alphas[ , idxl1, idxl2, drop = F] if (type == "coefficients") { return(list( beta0 = beta0, betas = betas, gammas = gammas, alpha0 = alpha0, alphas = alphas, penalty = p, penalty_ext = pext )) } if (type %in% c("link", "response")) { if (is(newdata, "matrix")) { if (typeof(newdata) != "double") { stop("newdata must be of type double") } mattype_x <- 1 } else if (is.big.matrix(newdata)) { if (bigmemory::describe(newdata)@description$type != "double") { stop("newdata must be of type double") } mattype_x <- 2 } else if ("dgCMatrix" %in% class(newdata)) { if (typeof(newdata@x) != "double") { stop("newdata must be of type double") } mattype_x <- 3 } else { stop("newdata must be a matrix, big.matrix, filebacked.big.matrix, or dgCMatrix") } beta0 <- as.vector(beta0) betas <- `dim<-`( aperm(betas, c(1, 3, 2)), c(dim(betas)[1], dim(betas)[2] * dim(betas)[3]) ) if (!is.null(gammas)) { gammas <- `dim<-`( aperm(gammas, c(1, 3, 2)), c(dim(gammas)[1], dim(gammas)[2] * dim(gammas)[3]) ) } else { gammas <- matrix(vector("numeric", 0), 0, 0) newdata_fixed <- matrix(vector("numeric", 0), 0, 0) } result <- computeResponseRcpp( newdata, mattype_x, newdata_fixed, beta0, betas, gammas, type, object$family ) if (length(pext) > 1) { dim(result) <- c(NROW(result), length(pext), length(p)) result <- aperm(result, c(1, 3, 2)) } return(drop(result)) } }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/predict_xrnet.R
#' k-fold cross-validation for hierarchical regularized regression #' #' @importFrom foreach foreach #' @importFrom foreach %dopar% #' @importFrom bigmemory describe #' @importFrom bigmemory attach.big.matrix #' #' @description k-fold cross-validation for hierarchical regularized regression \code{\link{xrnet}} #' #' @param x predictor design matrix of dimension \eqn{n x p}, matrix options include: #' \itemize{ #' \item matrix #' \item big.matrix #' \item filebacked.big.matrix #' \item sparse matrix (dgCMatrix) #' } #' @param y outcome vector of length \eqn{n} #' @param external (optional) external data design matrix of dimension \eqn{p x q}, matrix options include: #' \itemize{ #' \item matrix #' \item sparse matrix (dgCMatrix) #' } #' @param unpen (optional) unpenalized predictor design matrix, matrix options include: #' \itemize{ #' \item matrix #' } #' @param family error distribution for outcome variable, options include: #' \itemize{ #' \item "gaussian" #' \item "binomial" #' } #' @param penalty_main specifies regularization object for x. See \code{\link{define_penalty}} for more details. #' @param penalty_external specifies regularization object for external. See \code{\link{define_penalty}} for more details. #' See \code{\link{define_penalty}} for more details. #' @param weights optional vector of observation-specific weights. #' Default is 1 for all observations. #' @param standardize indicates whether x and/or external should be standardized. #' Default is c(TRUE, TRUE). #' @param intercept indicates whether an intercept term is included for x and/or external. #' Default is c(TRUE, FALSE). #' @param loss loss function for cross-validation. Options include: #' \itemize{ #' \item "deviance" #' \item "mse" (Mean Squared Error) #' \item "mae" (Mean Absolute Error) #' \item "auc" (Area under the curve) #' } #' @param nfolds number of folds for cross-validation. Default is 5. #' @param foldid (optional) vector that identifies user-specified fold for each observation. #' If NULL, folds are automatically generated. #' @param parallel use \code{foreach} function to fit folds in parallel if TRUE, #' must register cluster (\code{doParallel}) before using. #' @param control specifies xrnet control object. See \code{\link{xrnet.control}} for more details. #' #' @return A list of class \code{tune_xrnet} with components #' \item{cv_mean}{mean cross-validated error for each penalty combination. Object returned is #' a vector if there is no external data (external = NULL) and matrix if there is external data.} #' \item{cv_sd}{estimated standard deviation for cross-validated errorsObject returned is #' a vector if there is no external data (external = NULL) and matrix if there is external data.} #' \item{loss}{loss function used to compute cross-validation error} #' \item{opt_loss}{the value of the loss function for the optimal cross-validated error} #' \item{opt_penalty}{first-level penalty value that achieves the optimal loss} #' \item{opt_penalty_ext}{second-level penalty value that achieves the optimal loss (if external data is present)} #' \item{fitted_model}{fitted xrnet object using all data, see \code{\link{xrnet}} for details of object} #' #' @details k-fold cross-validation is used to determine the 'optimal' combination of hyperparameter values, where #' optimal is based on the optimal value obtained for the user-selected loss function across the k folds. To efficiently traverse all possible #' combinations of the hyperparameter values, 'warm-starts' are used to traverse the penalty from largest #' to smallest penalty value(s). Note that the penalty grid for the folds is generated #' by fitting the model on the entire training data. Parallelization is enabled through the \code{foreach} and #' \code{doParallel} R packages. To use parallelization, \code{parallel = TRUE}, you must first create the cluster #' \code{makeCluster} and then register the cluster \code{registerDoParallel}. See the \code{parallel}, \code{foreach}, #' and/or \code{doParallel} R packages for more details on how to setup parallelization. #' #' @examples #' ## cross validation of hierarchical linear regression model #' data(GaussianExample) #' #' ## 5-fold cross validation #' cv_xrnet <- tune_xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian", #' control = xrnet.control(tolerance = 1e-6) #' ) #' #' ## contour plot of cross-validated error #' plot(cv_xrnet) #' #' @export tune_xrnet <- function(x, y, external = NULL, unpen = NULL, family = c("gaussian", "binomial"), penalty_main = define_penalty(), penalty_external = define_penalty(), weights = NULL, standardize = c(TRUE, TRUE), intercept = c(TRUE, FALSE), loss = c("deviance", "mse", "mae", "auc"), nfolds = 5, foldid = NULL, parallel = FALSE, control = list()) { # function call this_call <- match.call() # Check family argument family <- match.arg(family) # Set measure used to assess model prediction performance if (missing(loss)) { if (family == "gaussian") { loss <- "mse" } else if (family == "binomial") { loss <- "auc" } } else { loss <- match.arg(loss) loss_available <- TRUE if (family == "gaussian" && !(loss %in% c("deviance", "mse", "mae"))) { loss_available <- FALSE } else if (family == "binomial" && !(loss %in% c("deviance", "auc"))) { loss_available <- FALSE } if (!loss_available) { stop(paste0("loss = '", loss, "' is not available for family = '", family,"'")) } } # check type of x matrix if (is(x, "matrix")) { if (!(typeof(x) %in% c("integer", "double"))) { stop("x contains non-numeric values") } mattype_x <- 1 } else if (is.big.matrix(x)) { if (!(bigmemory::describe(x)@description$type %in% c("integer", "double"))) { stop("x contains non-numeric values") } mattype_x <- 2 } else if ("dgCMatrix" %in% class(x)) { if (!(typeof(x@x) %in% c("integer", "double"))) { stop("x contains non-numeric values") } mattype_x <- 3 } else { stop("x must be a standard R matrix, big.matrix, filebacked.big.matrix, or dgCMatrix") } # check external type is_sparse_ext <- is(external, "sparseMatrix") # check y type y <- drop(as.numeric(y)) # Get arguments to tune_xrnet() function and filter for calls to fitting procedure xrnet_call <- match.call(expand.dots = TRUE) cv_args <- match(c("loss", "nfolds", "foldid", "parallel"), names(xrnet_call), FALSE) if (any(cv_args)) { xrnet_call <- xrnet_call[-cv_args] } xrnet_call[[1]] <- as.name("xrnet") # Set sample size / weights n <- length(y) if (is.null(weights)) { weights <- rep(1, n) } # Fit model on all training data xrnet_object <- xrnet( x = x, y = y, external = external, unpen = unpen, family = family, weights = weights, standardize = standardize, intercept = intercept, penalty_main = penalty_main, penalty_external = penalty_external, control = control ) xrnet_object$call <- xrnet_call # Check whether fixed and external are empty if (is.null(unpen)) { unpen <- matrix(vector("numeric", 0), 0, 0) nc_unpen <- as.integer(0) } else { nc_unpen <- NCOL(unpen) } if (is.null(external)) { external <- matrix(vector("numeric", 0), 0, 0) nc_ext <- as.integer(0) } else { nc_ext <- NCOL(external) } # Prepare penalty and control object for folds penalty_main_fold <- penalty_main penalty_external_fold <- penalty_external penalty_main_fold$user_penalty <- xrnet_object$penalty if (is.null(xrnet_object$penalty_ext)) { penalty_external_fold$user_penalty <- as.double(0.0) } else { penalty_external_fold$user_penalty <- xrnet_object$penalty_ext } penalty_fold <- initialize_penalty( penalty_main = penalty_main_fold, penalty_external = penalty_external_fold, nr_x = NROW(x), nc_x = NCOL(x), nc_unpen = nc_unpen, nr_ext = NROW(external), nc_ext = nc_ext, intercept = intercept ) num_pen <- penalty_fold$num_penalty num_pen_ext <- penalty_fold$num_penalty_ext control <- do.call("xrnet.control", control) control <- initialize_control( control_obj = control, nc_x = NCOL(x), nc_unpen = nc_unpen, nc_ext = nc_ext, intercept = intercept ) # Randomly sample observations into folds / check nfolds if (is.null(foldid)) { if (nfolds < 2) { stop("number of folds (nfolds) must be at least 2") } foldid <- sample(rep(seq(nfolds), length = n)) } else { if (length(foldid) != n) { stop("length of foldid (", length(foldid), ") not equal to number of observations (", n, ")") } foldid <- as.numeric(factor(foldid)) nfolds <- length(unique(foldid)) if (nfolds < 2) { stop("number of folds (nfolds) must be at least 2") } } # Run k-fold CV if (parallel) { if (is.big.matrix(x)) { xdesc <- describe(x) errormat <- foreach(k = 1L:nfolds, .packages = c("xrnet", "bigmemory"), .combine = cbind) %dopar% { weights_train <- weights weights_train[foldid == k] <- 0.0 test_idx <- as.integer(which(foldid == k) - 1) xref <- attach.big.matrix(xdesc) error_vec <- fitModelCVRcpp( x = xref, mattype_x = mattype_x, y = y, ext = external, is_sparse_ext = is_sparse_ext, fixed = unpen, weights_user = weights_train, intr = intercept, stnd = standardize, penalty_type = penalty_fold$ptype, cmult = penalty_fold$cmult, quantiles = c(penalty_fold$quantile, penalty_fold$quantile_ext), num_penalty = c(penalty_fold$num_penalty, penalty_fold$num_penalty_ext), penalty_ratio = c(penalty_fold$penalty_ratio, penalty_fold$penalty_ratio_ext), penalty_user = penalty_fold$user_penalty, penalty_user_ext = penalty_fold$user_penalty_ext, lower_cl = control$lower_limits, upper_cl = control$upper_limits, family = family, user_loss = loss, test_idx = test_idx, thresh = control$tolerance, maxit = control$max_iterations, ne = control$dfmax, nx = control$pmax ) } } else { errormat <- foreach(k = 1L:nfolds, .packages = c("xrnet", "Matrix"), .combine = cbind) %dopar% { weights_train <- weights weights_train[foldid == k] <- 0.0 test_idx <- as.integer(which(foldid == k) - 1) # Get errors for k-th fold error_vec <- fitModelCVRcpp( x = x, mattype_x = mattype_x, y = y, ext = external, is_sparse_ext = is_sparse_ext, fixed = unpen, weights_user = weights_train, intr = intercept, stnd = standardize, penalty_type = penalty_fold$ptype, cmult = penalty_fold$cmult, quantiles = c(penalty_fold$quantile, penalty_fold$quantile_ext), num_penalty = c(penalty_fold$num_penalty, penalty_fold$num_penalty_ext), penalty_ratio = c(penalty_fold$penalty_ratio, penalty_fold$penalty_ratio_ext), penalty_user = penalty_fold$user_penalty, penalty_user_ext = penalty_fold$user_penalty_ext, lower_cl = control$lower_limits, upper_cl = control$upper_limits, family = family, user_loss = loss, test_idx = test_idx, thresh = control$tolerance, maxit = control$max_iterations, ne = control$dfmax, nx = control$pmax ) } } } else { errormat <- matrix(NA, nrow = num_pen * num_pen_ext, ncol = nfolds) for (k in 1:nfolds) { # Split into test and train for k-th fold weights_train <- weights weights_train[foldid == k] <- 0.0 test_idx <- as.integer(which(foldid == k) - 1) # Fit model on k-th training fold errormat[, k] <- fitModelCVRcpp( x = x, mattype_x = mattype_x, y = y, ext = external, is_sparse_ext = is_sparse_ext, fixed = unpen, weights_user = weights_train, intr = intercept, stnd = standardize, penalty_type = penalty_fold$ptype, cmult = penalty_fold$cmult, quantiles = c(penalty_fold$quantile, penalty_fold$quantile_ext), num_penalty = c(penalty_fold$num_penalty, penalty_fold$num_penalty_ext), penalty_ratio = c(penalty_fold$penalty_ratio, penalty_fold$penalty_ratio_ext), penalty_user = penalty_fold$user_penalty, penalty_user_ext = penalty_fold$user_penalty_ext, lower_cl = control$lower_limits, upper_cl = control$upper_limits, family = family, user_loss = loss, test_idx = test_idx, thresh = control$tolerance, maxit = control$max_iterations, ne = control$dfmax, nx = control$pmax ) } } cv_mean <- rowMeans(errormat) cv_sd <- sqrt(rowSums((errormat - cv_mean)^2) / nfolds) cv_mean <- matrix(cv_mean, nrow = num_pen, byrow = TRUE) cv_sd <- matrix(cv_sd, nrow = num_pen, byrow = TRUE) rownames(cv_mean) <- rev(sort(xrnet_object$penalty)) rownames(cv_sd) <- rev(sort(xrnet_object$penalty)) if (num_pen_ext > 1) { colnames(cv_mean) <- rev(sort(xrnet_object$penalty_ext)) colnames(cv_sd) <- rev(sort(xrnet_object$penalty_ext)) } if (loss %in% c("deviance", "mse", "mae")) { opt_loss <- min(cv_mean, na.rm = TRUE) optIndex <- which(opt_loss == cv_mean, arr.ind = TRUE) } else { opt_loss <- max(cv_mean, na.rm = TRUE) optIndex <- which(opt_loss == cv_mean, arr.ind = TRUE) } if (is.null(dim(optIndex))) { opt_penalty <- xrnet_object$penalty[optIndex[1]] opt_penalty_ext <- xrnet_object$penalty_ext[optIndex[2]] } else { opt_penalty <- xrnet_object$penalty[optIndex[1, 1]] opt_penalty_ext <- xrnet_object$penalty_ext[optIndex[1, 2]] } cvfit <- list( cv_mean = cv_mean, cv_sd = cv_sd, loss = loss, opt_loss = opt_loss, opt_penalty = opt_penalty, opt_penalty_ext = opt_penalty_ext, fitted_model = xrnet_object, call = this_call ) class(cvfit) <- "tune_xrnet" return(cvfit) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/tune_xrnet.R
#' @useDynLib xrnet, .registration = TRUE #' @importFrom Rcpp sourceCpp #' @importFrom stats predict #' @importFrom bigmemory is.big.matrix #' @importFrom methods is NULL #' Fit hierarchical regularized regression model #' #' @description Fits hierarchical regularized regression model that enables the incorporation of external data #' for predictor variables. Both the predictor variables and external data can be regularized #' by the most common penalties (lasso, ridge, elastic net). #' Solutions are computed across a two-dimensional grid of penalties (a separate penalty path is computed #' for the predictors and external variables). Currently support regularized linear and logistic regression, #' future extensions to other outcomes (i.e. Cox regression) will be implemented in the next major update. #' #' @param x predictor design matrix of dimension \eqn{n x p}, matrix options include: #' \itemize{ #' \item matrix #' \item big.matrix #' \item filebacked.big.matrix #' \item sparse matrix (dgCMatrix) #' } #' @param y outcome vector of length \eqn{n} #' @param external (optional) external data design matrix of dimension \eqn{p x q}, matrix options include: #' \itemize{ #' \item matrix #' \item sparse matrix (dgCMatrix) #' } #' @param unpen (optional) unpenalized predictor design matrix, matrix options include: #' \itemize{ #' \item matrix #' } #' @param family error distribution for outcome variable, options include: #' \itemize{ #' \item "gaussian" #' \item "binomial" #' } #' @param penalty_main specifies regularization object for x. See \code{\link{define_penalty}} for more details. #' @param penalty_external specifies regularization object for external. See \code{\link{define_penalty}} for more details. #' @param weights optional vector of observation-specific weights. Default is 1 for all observations. #' @param standardize indicates whether x and/or external should be standardized. Default is c(TRUE, TRUE). #' @param intercept indicates whether an intercept term is included for x and/or external. #' Default is c(TRUE, FALSE). #' @param control specifies xrnet control object. See \code{\link{xrnet.control}} for more details. #' #' @details This function extends the coordinate descent algorithm of the R package \code{glmnet} to allow the #' type of regularization (i.e. ridge, lasso) to be feature-specific. This extension is used to enable fitting #' hierarchical regularized regression models, where external information for the predictors can be included in the #' \code{external=} argument. In addition, elements of the R package \code{biglasso} are utilized to enable #' the use of standard R matrices, memory-mapped matrices from the \code{bigmemory} package, or sparse matrices from the \code{Matrix} package. #' #' @references #' Jerome Friedman, Trevor Hastie, Robert Tibshirani (2010). #' Regularization Paths for Generalized Linear Models via Coordinate Descent. #' Journal of Statistical Software, 33(1), 1-22. URL http://www.jstatsoft.org/v33/i01/. #' #' @references #' Zeng, Y., and Breheny, P. (2017). #' The biglasso Package: A Memory- and Computation-Efficient Solver for Lasso Model Fitting with Big Data in R. #' arXiv preprint arXiv:1701.05936. URL https://arxiv.org/abs/1701.05936. #' #' @references #' Michael J. Kane, John Emerson, Stephen Weston (2013). #' Scalable Strategies for Computing with Massive Data. #' Journal of Statistical Software, 55(14), 1-19. URL http://www.jstatsoft.org/v55/i14/. #' #' @return A list of class \code{xrnet} with components: #' \item{beta0}{matrix of first-level intercepts indexed by penalty values} #' \item{betas}{3-dimensional array of first-level penalized coefficients indexed by penalty values} #' \item{gammas}{3-dimensional array of first-level non-penalized coefficients indexed by penalty values} #' \item{alpha0}{matrix of second-level intercepts indexed by penalty values} #' \item{alphas}{3-dimensional array of second-level external data coefficients indexed by penalty values} #' \item{penalty}{vector of first-level penalty values} #' \item{penalty_ext}{vector of second-level penalty values} #' \item{family}{error distribution for outcome variable} #' \item{num_passes}{total number of passes over the data in the coordinate descent algorithm} #' \item{status}{error status for xrnet fitting} #' \itemize{ #' \item 0 = OK #' \item 1 = Error/Warning #' } #' \item{error_msg}{description of error} #' #' @examples #' ### hierarchical regularized linear regression ### #' data(GaussianExample) #' #' ## define penalty for predictors and external variables #' ## default is ridge for predictors and lasso for external #' ## see define_penalty() function for more details #' #' penMain <- define_penalty(0, num_penalty = 20) #' penExt <- define_penalty(1, num_penalty = 20) #' #' ## fit model with defined regularization #' fit_xrnet <- xrnet( #' x = x_linear, #' y = y_linear, #' external = ext_linear, #' family = "gaussian", #' penalty_main = penMain, #' penalty_external = penExt #' ) #' @export xrnet <- function(x, y, external = NULL, unpen = NULL, family = c("gaussian", "binomial"), penalty_main = define_penalty(), penalty_external = define_penalty(), weights = NULL, standardize = c(TRUE, TRUE), intercept = c(TRUE, FALSE), control = list()) { # function call this.call <- match.call() # check error distribution for y family <- match.arg(family) ## Prepare x and y ## # check type of x matrix if (is(x, "matrix")) { if (typeof(x) != "double") stop("x must be of type double") mattype_x <- 1 } else if (is.big.matrix(x)) { if (bigmemory::describe(x)@description$type != "double") stop("x must be of type double") mattype_x <- 2 } else if ("dgCMatrix" %in% class(x)) { if (typeof(x@x) != "double") stop("x must be of type double") mattype_x <- 3 } else { stop("x must be a standard R matrix, big.matrix, filebacked.big.matrix, or dgCMatrix") } # check type of y y <- as.double(drop(y)) # check dimensions of x and y nr_x <- NROW(x) nc_x <- NCOL(x) y_len <- NROW(y) if (y_len != nr_x) { stop( paste( "Length of y (", y_len, ") not equal to the number of rows of x (", nr_x,")", sep = "" ) ) } ## Prepare external ## is_sparse_ext = FALSE if (!is.null(external)) { # check if external is a sparse matrix if (is(external, "sparseMatrix")) { is_sparse_ext = TRUE } else { # convert to matrix if (!("matrix" %in% class(external))) { external <- as.matrix(external) } if (typeof(external) != "double") { stop("external must be of type double") } } # check dimensions nr_ext <- NROW(external) nc_ext <- NCOL(external) if (nc_x != nr_ext) { stop( paste("Number of columns in x (", nc_x, ") not equal to the number of rows in external (", nr_ext, ")", sep = "" ) ) } } else { external <- matrix(vector("numeric", 0), 0, 0) nr_ext <- as.integer(0) nc_ext <- as.integer(0) } ## Prepare unpenalized covariates ## if (!is.null(unpen)) { # check dimensions nc_unpen <- NCOL(unpen) if (y_len != NROW(unpen)) { stop( paste( "Length of y (", y_len, ") not equal to the number of rows of unpen (", NROW(unpen), ")", sep = "" ) ) } # convert unpen to matrix if (!("matrix" %in% class(unpen))) { unpen <- as.matrix(unpen) } if (typeof(unpen) != "double") { stop("unpen must be a numeric matrix of type 'double'") } } else { unpen <- matrix(vector("numeric", 0), 0, 0) nc_unpen <- as.integer(0) } # set weights if (is.null(weights)) { weights <- as.double(rep(1, nr_x)) } else if (length(weights) != y_len) { stop( paste( "Length of weights (", length(weights), ") not equal to length of y (", y_len, ")", sep = "" ) ) } else if (any(weights < 0)) { stop("weights can only contain non-negative values") } else { weights <- as.double(weights) } # check penalty objects penalty <- initialize_penalty( penalty_main = penalty_main, penalty_external = penalty_external, nr_x = nr_x, nc_x = nc_x, nc_unpen = nc_unpen, nr_ext = nr_ext, nc_ext = nc_ext, intercept = intercept ) # check control object control <- do.call("xrnet.control", control) control <- initialize_control( control_obj = control, nc_x = nc_x, nc_unpen = nc_unpen, nc_ext = nc_ext, intercept = intercept ) # fit model fit <- fitModelRcpp( x = x, mattype_x = mattype_x, y = y, ext = external, is_sparse_ext = is_sparse_ext, fixed = unpen, weights_user = weights, intr = intercept, stnd = standardize, penalty_type = penalty$ptype, cmult = penalty$cmult, quantiles = c(penalty$quantile, penalty$quantile_ext), num_penalty = c(penalty$num_penalty, penalty$num_penalty_ext), penalty_ratio = c(penalty$penalty_ratio, penalty$penalty_ratio_ext), penalty_user = penalty$user_penalty, penalty_user_ext = penalty$user_penalty_ext, lower_cl = control$lower_limits, upper_cl = control$upper_limits, family = family, thresh = control$tolerance, maxit = control$max_iterations, ne = control$dfmax, nx = control$pmax ) # check status of model fit if (fit$status %in% c(0, 1)) { if (fit$status == 0) { fit$status <- "0 (OK)" } else if (fit$status == 1) { fit$status <- "1 (Error/Warning)" fit$error_msg <- "Max number of iterations reached" warning("Max number of iterations reached") } # Create arrays ordering coefficients by 1st level penalty / 2nd level penalty fit$beta0 <- matrix( fit$beta0, nrow = penalty$num_penalty, ncol = penalty$num_penalty_ext, byrow = TRUE ) dim(fit$betas) <- c(nc_x, penalty$num_penalty_ext, penalty$num_penalty) fit$betas <- aperm(fit$betas, c(1, 3, 2)) if (intercept[2]) { fit$alpha0 <- matrix( fit$alpha0, nrow = penalty$num_penalty, ncol = penalty$num_penalty_ext, byrow = TRUE ) } else { fit$alpha0 <- NULL } if (nc_ext > 0) { dim(fit$alphas) <- c(nc_ext, penalty$num_penalty_ext, penalty$num_penalty) fit$alphas <- aperm(fit$alphas, c(1, 3, 2)) } else { fit$alphas <- NULL fit$penalty_ext <- NULL } if (nc_unpen > 0) { dim(fit$gammas) <- c(nc_unpen, penalty$num_penalty_ext, penalty$num_penalty) fit$gammas <- aperm(fit$gammas, c(1, 3, 2)) } else { fit$gammas <- NULL } } fit$call <- this.call class(fit) <- "xrnet" return(fit) } #' Control function for xrnet fitting #' #' @description Control function for \code{\link{xrnet}} fitting. #' #' @param tolerance positive convergence criterion. Default is 1e-08. #' @param max_iterations maximum number of iterations to run coordinate gradient descent #' across all penalties before returning an error. Default is 1e+05. #' @param dfmax maximum number of variables allowed in model. Default #' is \eqn{ncol(x) + ncol(unpen) + ncol(external) + intercept[1] + intercept[2]}. #' @param pmax maximum number of variables with nonzero coefficient estimate. #' Default is \eqn{min(2 * dfmax + 20, ncol(x) + ncol(unpen) + ncol(external) + intercept[2])}. #' @param lower_limits vector of lower limits for each coefficient. Default is -Inf for all variables. #' @param upper_limits vector of upper limits for each coefficient. Default is Inf for all variables. #' #' @return A list object with the following components: #' \item{tolerance}{The coordinate descent stopping criterion.} #' \item{dfmax}{The maximum number of variables that will be allowed in the model.} #' \item{pmax}{The maximum number of variables with nonzero coefficient estimate.} #' \item{lower_limits}{Feature-specific numeric vector of lower bounds for coefficient estimates} #' \item{upper_limits}{Feature-specific numeric vector of upper bounds for coefficient estimates} #' @export xrnet.control <- function(tolerance = 1e-08, max_iterations = 1e+05, dfmax = NULL, pmax = NULL, lower_limits = NULL, upper_limits = NULL) { if (tolerance <= 0) { stop("tolerance must be greater than 0") } if (max_iterations <= 0 || as.integer(max_iterations) != max_iterations) { stop("max_iterations must be a positive integer") } control_obj <- list( tolerance = tolerance, max_iterations = max_iterations, dfmax = dfmax, pmax = pmax, lower_limits = lower_limits, upper_limits = upper_limits ) } initialize_penalty <- function(penalty_main, penalty_external, nr_x, nc_x, nc_unpen, nr_ext, nc_ext, intercept) { names(penalty_external) <- c( "penalty_type_ext", "quantile_ext", "num_penalty_ext", "penalty_ratio_ext", "user_penalty_ext", "custom_multiplier_ext" ) penalty_obj <- c(penalty_main, penalty_external) # check penalty object for x if (length(penalty_obj$penalty_type) > 1) { if (length(penalty_obj$penalty_type) != nc_x) { stop( "Length of penalty_type (", length(penalty_obj$penalty_type), ") not equal to number of columns in x (", nc_x,")" ) } } else { penalty_obj$penalty_type <- rep(penalty_obj$penalty_type, nc_x) } if (is.null(penalty_obj$penalty_ratio)) { if (penalty_obj$user_penalty[1] == 0) { if (nr_x > nc_x) { penalty_obj$penalty_ratio <- 1e-04 } else { penalty_obj$penalty_ratio <- 0.01 } if (penalty_obj$num_penalty < 3) { penalty_obj$num_penalty <- 3 stop("num_penalty must be at least 3 when automatically computing penalty path") } } else { penalty_obj$user_penalty <- rev(sort(penalty_obj$user_penalty)) penalty_obj$penalty_ratio <- 0.0 } } if (is.null(penalty_obj$custom_multiplier)) { penalty_obj$custom_multiplier <- rep(1.0, nc_x) } else if (length(penalty_obj$custom_multiplier) != nc_x) { stop( "Length of custom_multiplier (", length(penalty_obj$custom_multiplier), ") not equal to number of columns in x (", nc_x, ")" ) } # check penalty object for external if (nc_ext > 0) { if (length(penalty_obj$penalty_type_ext) > 1) { if (length(penalty_obj$penalty_type_ext) != nc_ext) { stop( "Length of penalty_type_ext (", length(penalty_obj$penalty_type_ext), ") not equal to number of columns in external (", nc_ext, ")" ) } } else { penalty_obj$penalty_type_ext <- rep(penalty_obj$penalty_type_ext, nc_ext) } if (is.null(penalty_obj$penalty_ratio_ext)) { if (penalty_obj$user_penalty_ext[1] == 0) { if (nr_ext > nc_ext) { penalty_obj$penalty_ratio_ext <- 1e-04 } else { penalty_obj$penalty_ratio_ext <- 0.01 } if (penalty_obj$num_penalty_ext < 3) { penalty_obj$num_penalty_ext <- 3 stop("num_penalty_ext must be at least 3 when automatically computing penalty path") } } else { penalty_obj$user_penalty_ext <- rev(sort(penalty_obj$user_penalty_ext)) penalty_obj$penalty_ratio_ext <- 0.0 } } if (is.null(penalty_obj$custom_multiplier_ext)) { penalty_obj$custom_multiplier_ext <- rep(1.0, nc_ext) } else if (length(penalty_obj$custom_multiplier_ext) != nc_ext && nc_ext > 0) { stop( "Length of custom_multiplier_ext (", length(penalty_obj$custom_multiplier_ext), ") not equal to number of columns in external (", nc_ext, ")" ) } } else { penalty_obj$penalty_type_ext <- NULL penalty_obj$num_penalty_ext <- 1 penalty_obj$penalty_ratio_ext <- 0 penalty_obj$custom_multiplier_ext <- numeric(0) } # vectors holding penalty type and multipliers across all variables if (intercept[2]) { penalty_obj$ptype <- c( penalty_obj$penalty_type, rep(0.0, nc_unpen), 0.0, penalty_obj$penalty_type_ext ) penalty_obj$cmult <- c( penalty_obj$custom_multiplier, rep(0.0, nc_unpen), 0.0, penalty_obj$custom_multiplier_ext ) } else { penalty_obj$ptype <- c( penalty_obj$penalty_type, rep(0.0, nc_unpen), penalty_obj$penalty_type_ext ) penalty_obj$cmult <- c( penalty_obj$custom_multiplier, rep(0.0, nc_unpen), penalty_obj$custom_multiplier_ext ) } return(penalty_obj) } initialize_control <- function(control_obj, nc_x, nc_unpen, nc_ext, intercept) { if (is.null(control_obj$dfmax)) { control_obj$dfmax <- as.integer(nc_x + nc_ext + nc_unpen + intercept[1] + intercept[2]) } else if (control_obj$dfmax <= 0 || as.integer(control_obj$dfmax) != control_obj$dfmax) { stop("dfmax can only contain postive integers") } if (is.null(control_obj$pmax)) { control_obj$pmax <- as.integer(min(2 * control_obj$dfmax + 20, nc_x + nc_ext + nc_unpen + intercept[2])) } else if (control_obj$pmax <= 0 || as.integer(control_obj$pmax) != control_obj$pmax) { stop("pmax can only contain positive integers") } if (is.null(control_obj$lower_limits)) { control_obj$lower_limits <- rep(-Inf, nc_x + nc_ext + nc_unpen + intercept[2]) } else if (length(control_obj$lower_limits) != nc_x + nc_ext + nc_unpen) { stop( "Length of lower_limits (", length(control_obj$lower_limits), ") not equal to sum of number of columns in x, unpen, and external (", nc_x + nc_ext + nc_unpen, ")" ) } else if (intercept[2]) { control_obj$lower_limits <- c( control_obj$lower_limits[1:(nc_x + nc_unpen)], -Inf, control_obj$lower_limits[(nc_x + nc_unpen + 1):length(control_obj$lower_limits)] ) } if (is.null(control_obj$upper_limits)) { control_obj$upper_limits <- rep(Inf, nc_x + nc_ext + nc_unpen + intercept[2]) } else if (length(control_obj$upper_limits) != nc_x + nc_ext + nc_unpen) { stop( "Length of upper_limits (", length(control_obj$upper_limits), ") not equal to sum of number of columns in x, unpen, and external (", nc_x + nc_ext + nc_unpen, ")" ) } else if (intercept[2]) { control_obj$upper_limits <- c( control_obj$upper_limits[1:(nc_x + nc_unpen)], -Inf, control_obj$upper_limits[(nc_x + nc_unpen + 1):length(control_obj$upper_limits)] ) } return(control_obj) }
/scratch/gouwar.j/cran-all/cranData/xrnet/R/xrnet.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 doc_xslt_apply <- function(doc, xslt, params) { .Call('_xslt_doc_xslt_apply', PACKAGE = 'xslt', doc, xslt, params) } libxml2_version <- function() { .Call('_xslt_libxml2_version', PACKAGE = 'xslt') } libxslt_version <- function() { .Call('_xslt_libxslt_version', PACKAGE = 'xslt') }
/scratch/gouwar.j/cran-all/cranData/xslt/R/RcppExports.R
#' XSLT 1.0 Transformations #' #' Transform an XML document by applying an XSL stylesheet. Usually returns the #' transformed [xml_document][xml2::xml_new_document], unless the stylesheet has #' `<xsl:output method="text">` in which case we return a text string. #' #' This implementation supports XSLT 1.0 features plus most of the EXSLT set of #' processor-portable extensions functions. Unfortunately XSLT 2.0 or 3.0 features #' are only available in proprietary libraries and currently unsupported. However #' XSLT 2.0 is not widely adopted anyway because it is unavailable in most browsers. #' #' @export #' @rdname xslt #' @name xslt #' @useDynLib xslt #' @importFrom xml2 read_xml #' @importFrom Rcpp sourceCpp #' @param doc xml document as returned by [xml2::read_xml] #' @param stylesheet another xml document containing the XSL stylesheet #' @param params named list or vector with additional XSLT parameters #' @examples doc <- read_xml(system.file("examples/cd_catalog.xml", package = "xslt")) #' style <- read_xml(system.file("examples/cd_catalog.xsl", package = "xslt")) #' html <- xml_xslt(doc, style) #' cat(as.character(html)) xml_xslt <- function(doc, stylesheet, params){ UseMethod("xml_xslt") } #' @export xml_xslt.xml_document <- function(doc, stylesheet, params = NULL){ as_xml2 <- utils::getFromNamespace("xml_document", "xml2") stopifnot(inherits(stylesheet, "xml_document")) paramstr <- c(rbind(names(params), vapply(params, deparse, character(1)))) out <- doc_xslt_apply(doc$doc, stylesheet$doc, paramstr) if(is.character(out)) return(out) as_xml2(out) } #' @export #' @rdname xslt xslt_version <- function(){ list( xml2 = as.package_version(libxml2_version()), xslt = as.package_version(libxslt_version()) ) }
/scratch/gouwar.j/cran-all/cranData/xslt/R/xml_xslt.R
# Build against static libraries from rwinlib VERSION <- commandArgs(TRUE) if(!file.exists(sprintf("../windows/libxml2-%s/include/libxml2/libxml/parser.h", VERSION))){ if(getRversion() < "3.3.0") setInternet2() download.file(sprintf("https://github.com/rwinlib/libxml2/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE) dir.create("../windows", showWarnings = FALSE) unzip("lib.zip", exdir = "../windows") unlink("lib.zip") }
/scratch/gouwar.j/cran-all/cranData/xslt/tools/winlibs.R
#'Calculate chi-square periodogram #' #'@param activityDF data frame containing time and activity values #'@param res time resolution for calculating chi-squared statistics #' #'@return data frame of two columns (dateTime (min), Qp value) #' #'@export chiSqPeriodogram <- function(activityDF, res=0.1){ testPerVec <- seq(5,35,by=res) qpArray <- apply(as.array(testPerVec), 1, function(x) calcQp(activityDF$value,x)) sigArray <- apply(as.array(testPerVec), 1, function(x) qchisq(0.99^(1/length(testPerVec)), round(x*60))) data.frame(testPeriod=testPerVec, Qp.act=qpArray, Qp.sig=sigArray) } #'calculate Qp #' #'@param values activity values (each value represents the measured activity in a minute) #'@param varPer a period at which the chi-squared statistics is to be calculated #' #'@return a numeric of the calculated chi-squared statistics at the given varPer calcQp <- function(values, varPer){ colNum <- round(varPer*60) rowNum <- floor(length(values)/colNum) foldedValues <- matrix(values[1:(colNum*rowNum)], ncol=colNum, byrow=T) avgAll <- mean(foldedValues); avgP <- apply(foldedValues, 2, mean) numerator <- sum((avgP-avgAll)^2) denom <- sum((values-avgAll)^2)/(rowNum*colNum*rowNum) qp <- numerator/denom return(qp) }
/scratch/gouwar.j/cran-all/cranData/xsp/R/chiSqPeriodogram.R
#'Draw a graph of chi-square periodogram #' #'@param chiSqPrdgmDF data frame containing three column (testPerVec, Qp.act, Qp.sig) #' #'@return ggplot object #' #'@import ggplot2 #'@import reshape2 #' #'@importFrom stats lm qchisq #' #'@examples #'oscillation <- sin(seq(0, 2 * pi * 10, by = 2 * pi / 1440)) #'oscillation.df <- data.frame(dateTime = 1:length(oscillation), value = oscillation) #'chiSqPeriodogramPlot(chiSqPeriodogram(oscillation.df)) #' #'@export chiSqPeriodogramPlot <- function(chiSqPrdgmDF) { testPeriod <- NULL value <- NULL variable <- NULL pos <- NULL df <- melt(chiSqPrdgmDF, id.vars="testPeriod") pk <- getPeak(chiSqPrdgmDF$testPeriod, chiSqPrdgmDF$Qp.act, chiSqPrdgmDF$Qp.sig, 2) .e <- environment() p <- ggplot(df, environment=.e) + geom_line(aes(x=testPeriod, y=value, colour=variable)) p <- p + scale_color_manual(values=c("deepskyblue2","red2")) p <- p + geom_vline(data=pk, aes(xintercept=pos), linetype="dotted") p <- p + scale_x_continuous(breaks=seq(4,36,by=4)) p <- p + geom_text(aes(x=Inf, y=Inf, label=sprintf("Period: %2.2f[h]",pk$pos)), vjust=1, hjust=1) p + xlab("period [h]") + ylab("Qp") } #'find the peak value from chi-squared periodogram #' #'@param x times at which each chi-squared statistics is calculated #'@param y chi-squared statistics calculated from an activity data #'@param z chi-squared statistics calculated from a null-hypothesis #'@param p number of points to be used for fitting a quadratic function #' #'@return data frame with five numerics getPeak <- function(x, y, z, p){ parStoreLen <- length((1+p):(length(x)-p)) parMat <- data.frame( const =rep(0,parStoreLen), first =rep(0,parStoreLen), second=rep(0,parStoreLen), value =rep(0,parStoreLen), pos =rep(0,parStoreLen)) for (i in (1+p):(length(x)-p) ){ fit <- lm(y~x+I(x^2),data.frame(x=x[(i-p):(i+p)], y=y[(i-p):(i+p)])) parMat[i,] = c(fit$coefficients[1], fit$coefficients[2], fit$coefficients[3], fit$coefficients[1] + fit$coefficients[2]*x[i] + fit$coefficients[3]*x[i]^2 - z[i], -fit$coefficients[2]/(2*fit$coefficients[3]) ) } parMatValid <- parMat[parMat[,3]<0,] peakFitPar <- parMatValid[order(parMatValid$value, decreasing=T)[1],c(1,2,3,4,5)] peakFitPar }
/scratch/gouwar.j/cran-all/cranData/xsp/R/chiSqPeriodogramPlot.R
autoformat <- function(xtab, zap = getOption("digits")) { align(xtab) <- xalign(xtab) digits(xtab) <- xdigits(xtab, zap = zap) display(xtab) <- xdisplay(xtab) return(xtab) } xalign <- function(x, pad = TRUE) { lr <- function(v) if(is.numeric(v)) "r" else "l" is.2d <- length(dim(x)) == 2 alignment <- if(is.2d) sapply(as.data.frame(x), lr) else lr(x) output <- if(is.2d && pad) c("l", alignment) else alignment return(output) } xdigits <- function(x, pad = TRUE, zap = getOption("digits")) { dig <- function(v) { if(is.numeric(v)) { v <- na.omit(v) v <- zapsmall(abs(v - floor(v)), zap) dec <- if(any(v > 0)) max(nchar(v) - 2L) else 0L } else { dec <- 0L } return(dec) } is.2d <- length(dim(x)) == 2 decimals <- if(is.2d) sapply(as.data.frame(x), dig) else dig(x) output <- if(is.2d && pad) c(0L, decimals) else decimals return(output) } xdisplay <- function(x, pad = TRUE) { type <- function(v) { if(is.numeric(v)) { tp <- if(xdigits(v) == 0) "d" else "f" } else { tp <- "s" } return(tp) } is.2d <- length(dim(x)) == 2 disp <- if(is.2d) sapply(as.data.frame(x), type) else type(x) output <- if(is.2d && pad) c("s", disp) else disp return(output) }
/scratch/gouwar.j/cran-all/cranData/xtable/R/autoformat.R
### xtable package ### ### Produce LaTeX and HTML tables from R objects. ### ### Copyright 2000-2013 David B. Dahl <[email protected]> ### ### Maintained by David Scott <[email protected]> ### ### This file is part of the `xtable' library for R and related languages. ### It is made available under the terms of the GNU General Public ### License, version 2, or at your option, any later version, ### incorporated herein by reference. ### ### This program is distributed in the hope that it will be ### useful, but WITHOUT ANY WARRANTY; without even the implied ### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ### PURPOSE. See the GNU General Public License for more ### details. ### ### You should have received a copy of the GNU General Public ### License along with this program; if not, write to the Free ### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ### MA 02111-1307, USA print.xtable <- function(x, type = getOption("xtable.type", "latex"), file = getOption("xtable.file", ""), append = getOption("xtable.append", FALSE), floating = getOption("xtable.floating", TRUE), floating.environment = getOption("xtable.floating.environment", "table"), table.placement = getOption("xtable.table.placement", "ht"), caption.placement = getOption("xtable.caption.placement", "bottom"), caption.width = getOption("xtable.caption.width", NULL), latex.environments = getOption("xtable.latex.environments", c("center")), tabular.environment = getOption("xtable.tabular.environment", "tabular"), size = getOption("xtable.size", NULL), hline.after = getOption("xtable.hline.after", c(-1,0,nrow(x))), NA.string = getOption("xtable.NA.string", ""), include.rownames = getOption("xtable.include.rownames", TRUE), include.colnames = getOption("xtable.include.colnames", TRUE), only.contents = getOption("xtable.only.contents", FALSE), add.to.row = getOption("xtable.add.to.row", NULL), sanitize.text.function = getOption("xtable.sanitize.text.function", NULL), sanitize.rownames.function = getOption("xtable.sanitize.rownames.function", sanitize.text.function), sanitize.colnames.function = getOption("xtable.sanitize.colnames.function", sanitize.text.function), math.style.negative = getOption("xtable.math.style.negative", FALSE), math.style.exponents = getOption("xtable.math.style.exponents", FALSE), html.table.attributes = getOption("xtable.html.table.attributes", "border=1"), print.results = getOption("xtable.print.results", TRUE), format.args = getOption("xtable.format.args", NULL), rotate.rownames = getOption("xtable.rotate.rownames", FALSE), rotate.colnames = getOption("xtable.rotate.colnames", FALSE), booktabs = getOption("xtable.booktabs", FALSE), scalebox = getOption("xtable.scalebox", NULL), width = getOption("xtable.width", NULL), comment = getOption("xtable.comment", TRUE), timestamp = getOption("xtable.timestamp", date()), ...) { ## If caption is length 2, treat the second value as the "short caption" caption <- attr(x,"caption",exact = TRUE) short.caption <- NULL if (!is.null(caption) && length(caption) > 1){ short.caption <- caption[2] caption <- caption[1] } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 hline.after ## By default it print an \hline before and after the columns names ## independently they are printed or not and at the end of the table ## Old code that set hline.after should include c(-1, 0, nrow(x)) in the ## hline.after vector ## If you do not want any \hline inside the data, set hline.after to NULL ## PHEADER instead the string '\\hline\n' is used in the code ## Now hline.after counts how many time a position appear ## I left an automatic PHEADER in the longtable is this correct? ## Claudio Agostinelli <[email protected]> dated 2006-07-28 include.rownames, ## include.colnames pos <- 0 if (include.rownames) pos <- 1 ## Claudio Agostinelli <[email protected]> dated 2006-07-28 ## hline.after checks if (any(hline.after < -1) | any(hline.after > nrow(x))) { stop("'hline.after' must be inside [-1, ", nrow(x), "]") } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 ## add.to.row checks if (!is.null(add.to.row)) { if (is.list(add.to.row) && length(add.to.row) == 2) { if (is.null(names(add.to.row))) { names(add.to.row) <- c('pos', 'command') } else if (any(sort(names(add.to.row))!= c('command', 'pos'))) { stop("the names of the elements of 'add.to.row' must be 'pos' and 'command'") } if (is.list(add.to.row$pos) && is.vector(add.to.row$command, mode = 'character')) { if ((npos <- length(add.to.row$pos)) != length(add.to.row$command)) { stop("the length of 'add.to.row$pos' must be equal to the length of 'add.to.row$command'") } if (any(unlist(add.to.row$pos) < -1) | any(unlist(add.to.row$pos) > nrow(x))) { stop("the values in add.to.row$pos must be inside the interval [-1, ", nrow(x), "]") } } else { stop("the first argument ('pos') of 'add.to.row' must be a list, the second argument ('command') must be a vector of mode character") } } else { stop("'add.to.row' argument must be a list of length 2") } } else { add.to.row <- list(pos = list(), command = vector(length = 0, mode = "character")) npos <- 0 } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 add.to.row ## Add further commands at the end of rows if (type == "latex") { ## Original code before changes in version 1.6-1 ## PHEADER <- "\\hline\n" ## booktabs code from Matthieu Stigler <[email protected]>, ## 1 Feb 2012 if(!booktabs){ PHEADER <- "\\hline\n" } else { ## This code replaced to fix bug #2309, David Scott, 8 Jan 2014 ## PHEADER <- ifelse(-1%in%hline.after, "\\toprule\n", "") ## if(0%in%hline.after) { ## PHEADER <- c(PHEADER, "\\midrule\n") ## } ## if(nrow(x)%in%hline.after) { ## PHEADER <- c(PHEADER, "\\bottomrule\n") ## } if (is.null(hline.after)){ PHEADER <- "" } else { hline.after <- sort(hline.after) PHEADER <- rep("\\midrule\n", length(hline.after)) if (hline.after[1] == -1) { PHEADER[1] <- "\\toprule\n" } if (hline.after[length(hline.after)] == nrow(x)) { PHEADER[length(hline.after)] <- "\\bottomrule\n" } } } } else { PHEADER <- "" } lastcol <- rep(" ", nrow(x)+2) if (!is.null(hline.after)) { ## booktabs change - Matthieu Stigler: fill the hline arguments ## separately, 1 Feb 2012 ## ## Code before booktabs change was: ## add.to.row$pos[[npos+1]] <- hline.after if (!booktabs){ add.to.row$pos[[npos+1]] <- hline.after } else { for(i in 1:length(hline.after)) { add.to.row$pos[[npos+i]] <- hline.after[i] } } add.to.row$command <- c(add.to.row$command, PHEADER) } if ( length(add.to.row$command) > 0 ) { for (i in 1:length(add.to.row$command)) { addpos <- add.to.row$pos[[i]] freq <- table(addpos) addpos <- unique(addpos) for (j in 1:length(addpos)) { lastcol[addpos[j]+2] <- paste(lastcol[addpos[j]+2], paste(rep(add.to.row$command[i], freq[j]), sep = "", collapse = ""), sep = " ") } } } if (length(type)>1) stop("\"type\" must have length 1") type <- tolower(type) if (!all(!is.na(match(type, c("latex","html"))))) { stop("\"type\" must be in {\"latex\", \"html\"}") } ## Disabling the check on known floating environments as many users ## want to use additional environments. ## if (!all(!is.na(match(floating.environment, ## c("table","table*","sidewaystable", ## "margintable"))))) { ## stop("\"type\" must be in {\"table\", \"table*\", \"sidewaystable\", \"margintable\"}") ## } if (("margintable" %in% floating.environment) & (!is.null(table.placement))) { warning("margintable does not allow for table placement; setting table.placement to NULL") table.placement <- NULL } if (!is.null(table.placement) && !all(!is.na(match(unlist(strsplit(table.placement, split = "")), c("H","h","t","b","p","!"))))) { stop("\"table.placement\" must contain only elements of {\"h\",\"t\",\"b\",\"p\",\"!\"}") } if (!all(!is.na(match(caption.placement, c("bottom","top"))))) { stop("\"caption.placement\" must be either {\"bottom\",\"top\"}") } if (type == "latex") { BCOMMENT <- "% " ECOMMENT <- "\n" ## See e-mail from "John S. Walker <[email protected]>" dated 5-19-2003 ## regarding "texfloat" ## See e-mail form "Fernando Henrique Ferraz P. da Rosa" ## <[email protected]>" dated 10-28-2005 regarding "longtable" if ( tabular.environment == "longtable" & floating == TRUE ) { warning("Attempt to use \"longtable\" with floating = TRUE. Changing to FALSE.") floating <- FALSE } if ( floating == TRUE ) { ## See e-mail from "Pfaff, Bernhard <[email protected]>" ## dated 7-09-2003 regarding "suggestion for an amendment of ## the source" ## See e-mail from "Mitchell, David" ## <[email protected]>" dated 2003-07-09 regarding ## "Additions to R xtable package" ## See e-mail from "Garbade, Sven" ## <[email protected]> dated 2006-05-22 ## regarding the floating environment. BTABLE <- paste("\\begin{", floating.environment, "}", ifelse(!is.null(table.placement), paste("[", table.placement, "]", sep = ""), ""), "\n", sep = "") if ( is.null(latex.environments) || (length(latex.environments) == 0) ) { BENVIRONMENT <- "" EENVIRONMENT <- "" } else { BENVIRONMENT <- "" EENVIRONMENT <- "" if ("center" %in% latex.environments){ BENVIRONMENT <- paste(BENVIRONMENT, "\\centering\n", sep = "") } for (i in 1:length(latex.environments)) { if (latex.environments[i] == "") next if (latex.environments[i] != "center"){ BENVIRONMENT <- paste(BENVIRONMENT, "\\begin{", latex.environments[i], "}\n", sep = "") EENVIRONMENT <- paste("\\end{", latex.environments[i], "}\n", EENVIRONMENT, sep = "") } } } ETABLE <- paste("\\end{", floating.environment, "}\n", sep = "") } else { BTABLE <- "" ETABLE <- "" BENVIRONMENT <- "" EENVIRONMENT <- "" } tmp.index.start <- 1 if ( ! include.rownames ) { while ( attr(x, "align", exact = TRUE)[tmp.index.start] == '|' ) tmp.index.start <- tmp.index.start + 1 tmp.index.start <- tmp.index.start + 1 } ## Added "width" argument for use with "tabular*" or ## "tabularx" environments - CR, 7/2/12 if (is.null(width)){ WIDTH <-"" } else if (is.element(tabular.environment, c("tabular", "longtable"))){ warning("Ignoring 'width' argument. The 'tabular' and 'longtable' environments do not support a width specification. Use another environment such as 'tabular*' or 'tabularx' to specify the width.") WIDTH <- "" } else { WIDTH <- paste("{", width, "}", sep = "") } BTABULAR <- paste("\\begin{", tabular.environment, "}", WIDTH, "{", paste(c(attr(x, "align", exact = TRUE)[ tmp.index.start:length(attr(x, "align", exact = TRUE))], "}\n"), sep = "", collapse = ""), sep = "") ## fix 10-26-09 ([email protected]) the following ## 'if' condition is added here to support ## a caption on the top of a longtable if (tabular.environment == "longtable" && caption.placement == "top") { if (is.null(short.caption)){ BCAPTION <- "\\caption{" } else { BCAPTION <- paste("\\caption[", short.caption, "]{", sep = "") } ECAPTION <- "} \\\\ \n" if ((!is.null(caption)) && (type == "latex")) { BTABULAR <- paste(BTABULAR, BCAPTION, caption, ECAPTION, sep = "") } } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 ## add.to.row position -1 BTABULAR <- paste(BTABULAR, lastcol[1], sep = "") ## the \hline at the end, if present, is set in full matrix ETABULAR <- paste("\\end{", tabular.environment, "}\n", sep = "") ## Add scalebox - CR, 7/2/12 if (!is.null(scalebox)){ BTABULAR <- paste("\\scalebox{", scalebox, "}{\n", BTABULAR, sep = "") ETABULAR <- paste(ETABULAR, "}\n", sep = "") } ## BSIZE contributed by Benno <[email protected]> in e-mail ## dated Wednesday, December 01, 2004 if (is.null(size) || !is.character(size)) { BSIZE <- "" ESIZE <- "" } else { if(length(grep("^\\\\", size)) == 0){ size <- paste("\\", size, sep = "") } ## Change suggested by Claudius Loehnert reported in Bug #6260 ## BSIZE <- paste("{", size, "\n", sep = "") ## ESIZE <- "{\n" BSIZE <- paste("\\begingroup", size, "\n", sep = "") ESIZE <- "\\endgroup\n" } BLABEL <- "\\label{" ELABEL <- "}\n" ## Added caption width ([email protected]) if(!is.null(caption.width)){ BCAPTION <- paste("\\parbox{",caption.width,"}{",sep="") ECAPTION <- "}" } else { BCAPTION <- NULL ECAPTION <- NULL } if (is.null(short.caption)){ BCAPTION <- paste(BCAPTION,"\\caption{",sep="") } else { BCAPTION <- paste(BCAPTION,"\\caption[", short.caption, "]{", sep="") } ECAPTION <- paste(ECAPTION,"} \n",sep="") BROW <- "" EROW <- " \\\\ \n" BTH <- "" ETH <- "" STH <- " & " BTD1 <- " & " BTD2 <- "" BTD3 <- "" ETD <- "" } else { BCOMMENT <- "<!-- " ECOMMENT <- " -->\n" BTABLE <- paste("<table ", html.table.attributes, ">\n", sep = "") ETABLE <- "</table>\n" BENVIRONMENT <- "" EENVIRONMENT <- "" BTABULAR <- "" ETABULAR <- "" BSIZE <- "" ESIZE <- "" BLABEL <- "<a name=" ELABEL <- "></a>\n" BCAPTION <- paste("<caption align=\"", caption.placement, "\"> ", sep = "") ECAPTION <- " </caption>\n" BROW <- "<tr>" EROW <- " </tr>\n" BTH <- " <th> " ETH <- " </th> " STH <- " </th> <th> " BTD1 <- " <td align=\"" align.tmp <- attr(x, "align", exact = TRUE) align.tmp <- align.tmp[align.tmp!="|"] if (nrow(x) == 0) { BTD2 <- matrix(nrow = 0, ncol = ncol(x)+pos) } else { BTD2 <- matrix(align.tmp[(2-pos):(ncol(x)+1)], nrow = nrow(x), ncol = ncol(x)+pos, byrow = TRUE) } ## Based on contribution from Jonathan Swinton <[email protected]> ## in e-mail dated Wednesday, January 17, 2007 BTD2[regexpr("^p", BTD2)>0] <- "left" BTD2[BTD2 == "r"] <- "right" BTD2[BTD2 == "l"] <- "left" BTD2[BTD2 == "c"] <- "center" BTD3 <- "\"> " ETD <- " </td>" } result <- string("", file = file, append = append) info <- R.Version() ## modified Claudio Agostinelli <[email protected]> dated 2006-07-28 ## to set automatically the package version if (comment){ result <- result + BCOMMENT + type + " table generated in " + info$language + " " + info$major + "." + info$minor + " by xtable " + packageDescription('xtable')$Version + " package" + ECOMMENT if (!is.null(timestamp)){ result <- result + BCOMMENT + timestamp + ECOMMENT } } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 only.contents if (!only.contents) { result <- result + BTABLE result <- result + BENVIRONMENT if ( floating == TRUE ) { if ((!is.null(caption)) && (type == "html" ||caption.placement == "top")) { result <- result + BCAPTION + caption + ECAPTION } if (!is.null(attr(x, "label", exact = TRUE)) && (type == "latex" && caption.placement == "top")) { result <- result + BLABEL + attr(x, "label", exact = TRUE) + ELABEL } } result <- result + BSIZE result <- result + BTABULAR } ## Claudio Agostinelli <[email protected]> dated 2006-07-28 ## include.colnames, include.rownames if (include.colnames) { result <- result + BROW + BTH if (include.rownames) { result <- result + STH } ## David G. Whiting in e-mail 2007-10-09 if (is.null(sanitize.colnames.function)) { CNAMES <- sanitize(names(x), type = type) } else { CNAMES <- sanitize.colnames.function(names(x)) } if (rotate.colnames) { ##added by Markus Loecher, 2009-11-16 CNAMES <- paste("\\begin{sideways}", CNAMES, "\\end{sideways}") } result <- result + paste(CNAMES, collapse = STH) result <- result + ETH + EROW } cols <- matrix("", nrow = nrow(x), ncol = ncol(x)+pos) if (include.rownames) { ## David G. Whiting in e-mail 2007-10-09 if (is.null(sanitize.rownames.function)) { RNAMES <- sanitize(row.names(x), type = type) } else { RNAMES <- sanitize.rownames.function(row.names(x)) } if (rotate.rownames) { ##added by Markus Loecher, 2009-11-16 RNAMES <- paste("\\begin{sideways}", RNAMES, "\\end{sideways}") } cols[, 1] <- RNAMES } ## Begin vectorizing the formatting code by Ian Fellows [[email protected]] ## 06 Dec 2011 ## ## disp <- function(y) { ## if (is.factor(y)) { ## y <- levels(y)[y] ## } ## if (is.list(y)) { ## y <- unlist(y) ## } ## return(y) ## } varying.digits <- is.matrix( attr( x, "digits", exact = TRUE ) ) ## Code for letting "digits" be a matrix was provided by ## Arne Henningsen <[email protected]> ## in e-mail dated 2005-06-04. ##if( !varying.digits ) { ## modified Claudio Agostinelli <[email protected]> dated 2006-07-28 ## attr(x,"digits") <- matrix( attr( x, "digits",exact=TRUE ), ## nrow = nrow(x), ncol = ncol(x)+1, byrow = TRUE ) ##} for(i in 1:ncol(x)) { xcol <- x[, i] if(is.factor(xcol)) xcol <- as.character(xcol) if(is.list(xcol)) xcol <- sapply(xcol, unlist) ina <- is.na(xcol) is.numeric.column <- is.numeric(xcol) if(is.character(xcol)) { cols[, i+pos] <- xcol } else { if (is.null(format.args)){ format.args <- list() } if (is.null(format.args$decimal.mark)){ format.args$decimal.mark <- options()$OutDec } if(!varying.digits){ curFormatArgs <- c(list( x = xcol, format = ifelse(attr(x, "digits", exact = TRUE )[i+1] < 0, "E", attr(x, "display", exact = TRUE )[i+1]), digits = abs(attr(x, "digits", exact = TRUE )[i+1])), format.args) cols[, i+pos] <- do.call("formatC", curFormatArgs) }else{ for( j in 1:nrow( cols ) ) { curFormatArgs <- c(list( x = xcol[j], format = ifelse(attr(x, "digits", exact = TRUE )[j, i+1] < 0, "E", attr(x, "display", exact = TRUE )[i+1]), digits = abs(attr(x, "digits", exact = TRUE )[j, i+1])), format.args) cols[j, i+pos] <- do.call("formatC", curFormatArgs) } } } ## End Ian Fellows changes if ( any(ina) ) cols[ina, i+pos] <- NA.string ## Based on contribution from Jonathan Swinton <[email protected]> ## in e-mail dated Wednesday, January 17, 2007 if ( is.numeric.column ) { cols[, i+pos] <- sanitize.numbers(cols[, i+pos], type = type, math.style.negative = math.style.negative, math.style.exponents = math.style.exponents) } else { if (is.null(sanitize.text.function)) { cols[, i+pos] <- sanitize(cols[, i+pos], type = type) } else { cols[, i+pos] <- sanitize.text.function(cols[, i+pos]) } } } multiplier <- 5 full <- matrix("", nrow = nrow(x), ncol = multiplier*(ncol(x)+pos)+2) full[, 1] <- BROW full[, multiplier*(0:(ncol(x)+pos-1))+2] <- BTD1 full[, multiplier*(0:(ncol(x)+pos-1))+3] <- BTD2 full[, multiplier*(0:(ncol(x)+pos-1))+4] <- BTD3 full[, multiplier*(0:(ncol(x)+pos-1))+5] <- cols full[, multiplier*(0:(ncol(x)+pos-1))+6] <- ETD full[, multiplier*(ncol(x)+pos)+2] <- paste(EROW, lastcol[-(1:2)], sep = " ") if (type == "latex") full[, 2] <- "" result <- result + lastcol[2] + paste(t(full), collapse = "") if (!only.contents) { if (tabular.environment == "longtable") { ## booktabs change added the if() - 1 Feb 2012 if(!booktabs) { result <- result + PHEADER } ## fix 10-27-09 Liviu Andronic ([email protected]) the ## following 'if' condition is inserted in order to avoid ## that bottom caption interferes with a top caption of a longtable if(caption.placement == "bottom"){ if ((!is.null(caption)) && (type == "latex")) { result <- result + BCAPTION + caption + ECAPTION } } if (!is.null(attr(x, "label", exact = TRUE))) { result <- result + BLABEL + attr(x, "label", exact = TRUE) + ELABEL } ETABULAR <- "\\end{longtable}\n" } result <- result + ETABULAR result <- result + ESIZE if ( floating == TRUE ) { if ((!is.null(caption)) && (type == "latex" && caption.placement == "bottom")) { result <- result + BCAPTION + caption + ECAPTION } if (!is.null(attr(x, "label", exact = TRUE)) && caption.placement == "bottom") { result <- result + BLABEL + attr(x, "label", exact = TRUE) + ELABEL } } result <- result + EENVIRONMENT result <- result + ETABLE } result <- sanitize.final(result, type = type) if (print.results){ print(result) } return(invisible(result$text)) } "+.string" <- function(x, y) { x$text <- paste(x$text, as.string(y)$text, sep = "") return(x) } print.string <- function(x, ...) { cat(x$text, file = x$file, append = x$append) return(invisible()) } string <- function(text, file = "", append = FALSE) { x <- list(text = text, file = file, append = append) class(x) <- "string" return(x) } as.string <- function(x, file = "", append = FALSE) { if (is.null(attr(x, "class", exact = TRUE))) switch(data.class(x), character = return(string(x, file, append)), numeric = return(string(as.character(x), file, append)), stop("Cannot coerce argument to a string")) if (class(x) == "string") return(x) stop("Cannot coerce argument to a string") } is.string <- function(x) { return(class(x) == "string") }
/scratch/gouwar.j/cran-all/cranData/xtable/R/print.xtable.R
sanitize <- function(str, type = "latex") { if(type == "latex"){ result <- str result <- gsub("\\\\", "SANITIZE.BACKSLASH", result) result <- gsub("$", "\\$", result, fixed = TRUE) result <- gsub(">", "$>$", result, fixed = TRUE) result <- gsub("<", "$<$", result, fixed = TRUE) result <- gsub("|", "$|$", result, fixed = TRUE) result <- gsub("{", "\\{", result, fixed = TRUE) result <- gsub("}", "\\}", result, fixed = TRUE) result <- gsub("%", "\\%", result, fixed = TRUE) result <- gsub("&", "\\&", result, fixed = TRUE) result <- gsub("_", "\\_", result, fixed = TRUE) result <- gsub("#", "\\#", result, fixed = TRUE) result <- gsub("^", "\\verb|^|", result, fixed = TRUE) result <- gsub("~", "\\~{}", result, fixed = TRUE) result <- gsub("SANITIZE.BACKSLASH", "$\\backslash$", result, fixed = TRUE) return(result) } else { result <- str result <- gsub("&", "&amp;", result, fixed = TRUE) result <- gsub(">", "&gt;", result, fixed = TRUE) result <- gsub("<", "&lt;", result, fixed = TRUE) return(result) } } sanitize.numbers <- function(str, type, math.style.negative = FALSE, math.style.exponents = FALSE){ if (type == "latex"){ result <- str if ( math.style.negative ) { for(i in 1:length(str)) { result[i] <- gsub("-", "$-$", result[i], fixed = TRUE) } } if ( math.style.exponents ) { if (is.logical(math.style.exponents) && ! math.style.exponents ) { } else if (is.logical(math.style.exponents) && math.style.exponents || math.style.exponents == "$$" ) { for(i in 1:length(str)) { result[i] <- gsub("^\\$?(-?)\\$?([0-9.]+)[eE]\\$?(-?)\\+?\\$?0*(\\d+)$", "$\\1\\2 \\\\times 10^{\\3\\4}$", result[i]) } } else if (math.style.exponents == "ensuremath") { for(i in 1:length(str)) { result[i] <- gsub("^\\$?(-?)\\$?([0-9.]+)[eE]\\$?(-?)\\+?\\$?0*(\\d+)$", "\\\\ensuremath{\\1\\2 \\\\times 10^{\\3\\4}}", result[i]) } } else if (math.style.exponents == "UTF8" || math.style.exponents == "UTF-8") { for(i in 1:length(str)) { ## this code turns 1e5 into a UTF-8 representation of 1\times10^5 if (all(grepl("^\\$?(-?)\\$?([0-9.]+)[eE]\\$?(-?)\\+?\\$?0*(\\d+)$", result[i]))) { temp <- strsplit(result[i],"eE",result[i]) result[i] <- paste0(temp[1], "\u00d710", chartr("-1234567890", "\u207b\u00b9\u00b2\u00b3\u2074\u2075\u20746\u20747\u20748\u20749\u2070", temp[2])) } } } } return(result) } else { return(str) } } sanitize.final <- function(str, type){ if (type == "latex"){ return(str) } else { str$text <- gsub(" *", " ", str$text, fixed = TRUE) str$text <- gsub(' align="left"', "", str$text, fixed = TRUE) return(str) } } ### Some trivial helper functions ### Suggested by Stefan Edwards, [email protected] ### Helper function for disabling sanitizing as.is <- function(str) {str} ### Helper function for embedding names in a math environment as.math <- function(str, ...) { paste0('$',str,'$', ...) }
/scratch/gouwar.j/cran-all/cranData/xtable/R/sanitize.R
### xtable package ### ### Produce LaTeX and HTML tables from R objects. ### ### Copyright 2000-2013 David B. Dahl <[email protected]> ### ### This file is part of the `xtable' library for R and related languages. ### It is made available under the terms of the GNU General Public ### License, version 2, or at your option, any later version, ### incorporated herein by reference. ### ### This program is distributed in the hope that it will be ### useful, but WITHOUT ANY WARRANTY; without even the implied ### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ### PURPOSE. See the GNU General Public License for more ### details. ### ### You should have received a copy of the GNU General Public ### License along with this program; if not, write to the Free ### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ### MA 02111-1307, USA "caption<-" <- function(x, value) UseMethod("caption<-") "caption<-.xtable" <- function(x, value) { if (length(value) > 2) stop("\"caption\" must have length 1 or 2") attr(x, "caption") <- value return(x) } caption <- function(x, ...) UseMethod("caption") caption.xtable <- function(x, ...) { return(attr(x, "caption", exact = TRUE)) } "label<-" <- function(x, value) UseMethod("label<-") "label<-.xtable" <- function(x, value) { if (length(value) > 1) stop("\"label\" must have length 1") attr(x, "label") <- value return(x) } label <- function(x, ...) UseMethod("label") label.xtable <- function(x, ...) { return(attr(x, "label", exact = TRUE)) } "align<-" <- function(x, value) UseMethod("align<-") ### Based on contribution from Jonathan Swinton <[email protected]> ### in e-mail dated Wednesday, January 17, 2007 .alignStringToVector <- function(aString) { ## poor mans parsing - separating string of form "l{2in}llr|p{1in}c|{1in}" ## into "l{2in}" "l" "l" "r" "|" "p{1in}" "c" "|{1in}" aString.Align <- character(0); aString.Width <- character(0); wString <- aString while( nchar(wString) > 0) { aString.Align <- c(aString.Align, substr(wString, 1, 1)) ## is it followed by a brace? thisWidth <- "" if ( nchar(wString) > 1 & substr(wString, 2, 2) == "{") { beforeNextBrace <- regexpr("[^\\]\\}", wString) if (beforeNextBrace <0 ) { stop("No closing } in align string") } thisWidth <- substr(wString, 2, beforeNextBrace + 1) wString <- substr(wString, beforeNextBrace + 2, nchar(wString)) } else { wString <- substr(wString, 2, nchar(wString)) } aString.Width <- c(aString.Width, thisWidth) } alignAllowed <- c("l","r","p","c","|","X") if (any( !(aString.Align %in% alignAllowed))) { warning("Nonstandard alignments in align string") } res <- paste(aString.Align, aString.Width, sep = "") res } ###.alignStringToVector ("l{2in}llr|p{1in}c|{1in}") ###.alignStringToVector ("l{2in}llr|p{1in}c|") ### latex syntax error, but gives wrong alignment ###.alignStringToVector ("{2in}llr|p{1in}c|") ###.alignStringToVector("llllp{3cm}") "align<-.xtable" <- function(x, value) { ### Based on contribution from Benno <[email protected]> ### in e-mail dated Wednesday, December 01, 2004 ### Based on contribution from Jonathan Swinton <[email protected]> ### in e-mail dated Wednesday, January 17, 2007 ## cat("%", value, "\n") if ( (!is.null(value)) && ( is.character(value) ) && ( length(value) == 1 ) && ( nchar(value) > 1 ) ) { value <- .alignStringToVector(value) } ## That should have checked we had only lrcp| ## but what if the "if statement" is false? ## For simplicity, deleting check present in version 1.4-2 and earlier. c.value <- if (any(!is.na(match(value, "|")))) { value[-which(value == '|')] } else { value } if (length(c.value) != ncol(x) + 1) stop(paste("\"align\" must have length equal to", ncol(x) + 1, "( ncol(x) + 1 )")) attr(x, "align") <- value return(x) } align <- function(x, ...) UseMethod("align") align.xtable <- function(x, ...) { return(attr(x, "align", exact = TRUE)) } "digits<-" <- function(x, value) UseMethod("digits<-") "digits<-.xtable" <- function(x, value) { if( is.matrix( value ) ) { if( ncol( value ) != ncol(x) + 1 || nrow( value ) != nrow(x) ) { stop( "if argument 'digits' is a matrix, it must have columns equal", " to ", ncol(x) + 1, " ( ncol(x) + 1 ) and rows equal to ", nrow(x), " ( nrow( x )" ) } } else { if( length(value) == 1 ) { value <- rep(value, ncol(x) + 1) } if( length( value ) > 1 & length( value ) != ncol(x) + 1 ) { stop( "if argument 'digits' is a vector of length more than one, it must have length equal", " to ", ncol(x) + 1, " ( ncol(x) + 1 )" ) } } if (!is.numeric(value)) stop("\"digits\" must be numeric") attr(x, "digits") <- value return(x) } digits <- function(x, ...) UseMethod("digits") digits.xtable <- function(x, ...) { return(attr(x, "digits", exact = TRUE)) } "display<-" <- function(x, value) UseMethod("display<-") "display<-.xtable" <- function(x, value) { if (length(value) != ncol(x) + 1) stop(paste("\"display\" must have length equal to", ncol(x) + 1, "( ncol(x) + 1 )")) if (!all(!is.na(match(value, c("d","f","e","E","g","G","fg","s"))))) stop("\"display\" must be in {\"d\",\"f\",\"e\",\"E\",\"g\",\"G\", \"fg\", \"s\"}") attr(x, "display") <- value return(x) } display <- function(x, ...) UseMethod("display") display.xtable <- function(x, ...) { return(attr(x, "display", exact = TRUE)) }
/scratch/gouwar.j/cran-all/cranData/xtable/R/table.attributes.R
### xtable package ### ### Produce LaTeX and HTML tables from R objects. ### ### Copyright 2000-2013 David B. Dahl <[email protected]> ### ### Maintained by David Scott <[email protected]> ### ### This file is part of the `xtable' library for R and related languages. ### It is made available under the terms of the GNU General Public ### License, version 2, or at your option, any later version, ### incorporated herein by reference. ### ### This program is distributed in the hope that it will be ### useful, but WITHOUT ANY WARRANTY; without even the implied ### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ### PURPOSE. See the GNU General Public License for more ### details. ### ### You should have received a copy of the GNU General Public ### License along with this program; if not, write to the Free ### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ### MA 02111-1307, USA ### The generic for toLatex() is declared in the base package "utils" toLatex.xtable <- function(object, ...){ ## Initially just capturing the output of print.xtable(). At some ## point this could be refactored to have print.xtable() call ## toLatex() instead. - CR, 30/01/2012 dotArgs <- list(...) dotArgs$x <- object dotArgs$type <- "latex" dotArgs$print.results <- FALSE z <- do.call("print.xtable", dotArgs) z <- strsplit(z, split="\n")[[1]] class(z) <- "Latex" z }
/scratch/gouwar.j/cran-all/cranData/xtable/R/toLatex.R
### xtable package ### ### Produce LaTeX and HTML tables from R objects. ### ### Copyright 2000-2013 David B. Dahl <[email protected]> ### ### This file is part of the `xtable' library for R and related languages. ### It is made available under the terms of the GNU General Public ### License, version 2, or at your option, any later version, ### incorporated herein by reference. ### ### This program is distributed in the hope that it will be ### useful, but WITHOUT ANY WARRANTY; without even the implied ### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ### PURPOSE. See the GNU General Public License for more ### details. ### ### You should have received a copy of the GNU General Public ### License along with this program; if not, write to the Free ### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ### MA 02111-1307, USA xtable <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { UseMethod("xtable") } ### data.frame and matrix objects xtable.data.frame <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { logicals <- unlist(lapply(x, is.logical)) ##x[, logicals] <- lapply(x[, logicals], as.character) ## Patch for logicals bug, no 1911 ## David Scott, <[email protected]>, 2012-08-10 x[, logicals] <- lapply(x[, logicals, drop = FALSE], as.character) characters <- unlist(lapply(x, is.character)) factors <- unlist(lapply(x, is.factor)) ints <- sapply(x, is.integer) class(x) <- c("xtable","data.frame") caption(x) <- caption label(x) <- label if(auto && is.null(align)) align <- xalign(x) if(auto && is.null(digits)) digits <- xdigits(x) if(auto && is.null(display)) display <- xdisplay(x) align(x) <- switch(1+is.null(align), align, c("r",c("r","l")[(characters|factors)+1])) digits(x) <- switch(1+is.null(digits), digits, c(0,rep(2,ncol(x)))) ## Patch from Seth Falcon <[email protected]>, 18-May-2007 if (is.null(display)) { display <- rep("f", ncol(x)) display[ints] <- "d" display[characters | factors] <- "s" display <- c("s", display) } display(x) <- display return(x) } xtable.matrix <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.data.frame(data.frame(x, check.names = FALSE), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### table objects (of 1 or 2 dimensions) by Guido Gay, 9 Feb 2007 ### Fixed to pass R checks by DBD, 9 May 2007 xtable.table <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { if (length(dim(x)) == 1) { return(xtable.matrix(matrix(x, dimnames = list(rownames(x), names(dimnames(x)))), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } else if (length(dim(x))==2) { return(xtable.matrix(matrix(x, ncol = dim(x)[2], nrow = dim(x)[1], dimnames = list(rownames(x), colnames(x))), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } else { stop("xtable.table is not implemented for tables of > 2 dimensions") } } ### anova objects xtable.anova <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { suggested.digits <- c(0,rep(2, ncol(x))) suggested.digits[grep("Pr\\(>", names(x))+1] <- 4 suggested.digits[grep("P\\(>", names(x))+1] <- 4 suggested.digits[grep("Df", names(x))+1] <- 0 class(x) <- c("xtable","data.frame") caption(x) <- caption label(x) <- label if(auto && is.null(align)) align <- xalign(x) if(auto && is.null(digits)) digits <- xdigits(x) if(auto && is.null(display)) display <- xdisplay(x) align(x) <- switch(1+is.null(align), align, c("l",rep("r", ncol(x)))) digits(x) <- switch(1+is.null(digits), digits, suggested.digits) display(x) <- switch(1+is.null(display), display, c("s",rep("f", ncol(x)))) return(x) } ### aov objects xtable.aov <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.anova(anova(x, ...), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } xtable.summary.aov <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.anova(x[[1]], caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } xtable.summary.aovlist <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { for (i in 1:length(x)) { if (i == 1) { result <- xtable.summary.aov(x[[i]], caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...) } else { result <- rbind(result, xtable.anova(x[[i]][[1]], caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } } return(result) } xtable.aovlist <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.summary.aovlist(summary(x), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### lm objects xtable.lm <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.summary.lm(summary(x), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } xtable.summary.lm <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { x <- data.frame(x$coef, check.names = FALSE) class(x) <- c("xtable","data.frame") caption(x) <- caption label(x) <- label if(auto && is.null(align)) align <- xalign(x) if(auto && is.null(digits)) digits <- xdigits(x) if(auto && is.null(display)) display <- xdisplay(x) align(x) <- switch(1+is.null(align), align, c("r","r","r","r","r")) digits(x) <- switch(1+is.null(digits), digits, c(0,4,4,2,4)) display(x) <- switch(1+is.null(display), display, c("s","f","f","f","f")) return(x) } ### glm objects xtable.glm <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.summary.glm(summary(x), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } xtable.summary.glm <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable.summary.lm(x, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### prcomp objects xtable.prcomp <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { x <- data.frame(x$rotation, check.names = FALSE) class(x) <- c("xtable","data.frame") caption(x) <- caption label(x) <- label if(auto && is.null(align)) align <- xalign(x) if(auto && is.null(digits)) digits <- xdigits(x) if(auto && is.null(display)) display <- xdisplay(x) align(x) <- switch(1+is.null(align), align, c("r",rep("r", ncol(x)))) digits(x) <- switch(1+is.null(digits), digits, c(0,rep(4, ncol(x)))) display(x) <- switch(1+is.null(display), display, c("s",rep("f", ncol(x)))) return(x) } xtable.summary.prcomp <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { x <- data.frame(x$importance, check.names = FALSE) class(x) <- c("xtable","data.frame") caption(x) <- caption label(x) <- label if(auto && is.null(align)) align <- xalign(x) if(auto && is.null(digits)) digits <- xdigits(x) if(auto && is.null(display)) display <- xdisplay(x) align(x) <- switch(1+is.null(align), align, c("r",rep("r", ncol(x)))) digits(x) <- switch(1+is.null(digits), digits, c(0,rep(4, ncol(x)))) display(x) <- switch(1+is.null(display), display, c("s",rep("f", ncol(x)))) return(x) } ### Slightly modified version of xtable.coxph contributed on r-help by ### Date: Wed, 2 Oct 2002 17:47:56 -0500 (CDT) ### From: Jun Yan <[email protected]> ### Subject: Re: [R] xtable for Cox model output xtable.coxph <- function (x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { cox <- x beta <- cox$coef se <- sqrt(diag(cox$var)) if (is.null(cox$naive.var)) { tmp <- cbind(beta, exp(beta), se, beta/se, 1 - pchisq((beta/se)^2, 1)) dimnames(tmp) <- list(names(beta), c("coef", "exp(coef)", "se(coef)", "z", "p")) } else { tmp <- cbind( beta, exp(beta), se, beta/se, signif(1 - pchisq((beta/se)^2, 1), digits - 1)) dimnames(tmp) <- list(names(beta), c("coef", "exp(coef)", "robust se", "z", "p")) } return(xtable(tmp, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### Additional method: xtable.ts ### Contributed by David Mitchell ([email protected]) ### Date: July 2003 xtable.ts <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { if (inherits(x, "ts") && !is.null(ncol(x))) { ## COLNAMES <- paste(colnames(x)); tp.1 <- trunc(time(x)) tp.2 <- trunc(cycle(x)) day.abb <- c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat") ROWNAMES <- switch(frequency(x), tp.1, "Arg2", "Arg3", # Dummy arguments paste(tp.1, c("Q1", "Q2", "Q3", "Q4")[tp.2], sep = " "), "Arg5", "Arg6", paste("Wk.", tp.1, " ", day.abb[tp.2], sep = ""), "Arg8", "Arg9", "Arg10", "Arg11", paste(tp.1, month.abb[tp.2], sep = " ")) tmp <- data.frame(x, row.names = ROWNAMES); } else if (inherits(x, "ts") && is.null(ncol(x))) { COLNAMES <- switch(frequency(x), "Value", "Arg2", "Arg3", # Dummy arguments c("Q1", "Q2", "Q3", "Q4"), "Arg5", "Arg6", day.abb, "Arg8", "Arg9", "Arg10", "Arg11", month.abb) ROWNAMES <- seq(from = start(x)[1], to = end(x)[1]) tmp <- data.frame(matrix(c(rep(NA, start(x)[2] - 1), x, rep(NA, frequency(x) - end(x)[2])), ncol = frequency(x), byrow = TRUE), row.names = ROWNAMES) names(tmp) <- COLNAMES } return(xtable(tmp, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### Suggested by Ajay Narottam Shah <[email protected]> in e-mail 2006/07/22 xtable.zoo <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { return(xtable(as.ts(x), caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...)) } ### Date: Fri, 29 May 2015 11:41:04 +0200 ### From: Martin G. <[email protected]> ### Subject: [xtable] Code for spdep, splm and sphet objects outputs ### package spdep ### sarlm objects ## xtable.sarlm <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.sarlm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.sarlm <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ## ...) { ## x <- data.frame(x$Coef, check.names = FALSE) ## class(x) <- c("xtable","data.frame") ## caption(x) <- caption ## label(x) <- label ## if(auto && is.null(align)) align <- xalign(x) ## if(auto && is.null(digits)) digits <- xdigits(x) ## if(auto && is.null(display)) display <- xdisplay(x) ## align(x) <- switch(1+is.null(align), align, c("r","r","r","r","r")) ## digits(x) <- switch(1+is.null(digits), digits, c(0,4,4,2,4)) ## display(x) <- switch(1+is.null(display), display, c("s","f","f","f","f")) ## return(x) ## } ## ### spautolm objects: added by David Scott, 6/1/2016, after suggestion by ## ### Guido Schulz ## ### Date: Wed, 29 Apr 2015 10:45:16 +0200 ## ### Guido Schulz <[email protected]> ## xtable.spautolm <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.sarlm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.spautolm <- function(x, caption = NULL, label = NULL, ## align = NULL, digits = NULL, ## display = NULL, auto = FALSE, ...) { ## return(xtable.summary.sarlm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## ### gmsar objects ## xtable.gmsar <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.sarlm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.gmsar <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, ## auto = FALSE, ...) { ## return(xtable.summary.sarlm(x, caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## ### stsls objects ## xtable.stsls <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.sarlm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.stsls <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, ## auto = FALSE, ...) { ## return(xtable.summary.sarlm(x, caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## ### pred.sarlm objects ## xtable.sarlm.pred <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, ## auto = FALSE, ...) { ## return(xtable(as.data.frame(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## ### lagImpact objects ## xtable.lagImpact <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, ## auto = FALSE, ...) { ## requireNamespace('spdep') ## lagImpactMat <- get('lagImpactMat', environment(spdep::spdep)) ## xtable(lagImpactMat(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...) ## } ### package splm ### splm objects ## xtable.splm <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.splm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.splm <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ## ...) { ## x <- data.frame(x$CoefTable, check.names = FALSE) ## class(x) <- c("xtable","data.frame") ## caption(x) <- caption ## label(x) <- label ## if(auto && is.null(align)) align <- xalign(x) ## if(auto && is.null(digits)) digits <- xdigits(x) ## if(auto && is.null(display)) display <- xdisplay(x) ## align(x) <- switch(1+is.null(align), align, c("r","r","r","r","r")) ## digits(x) <- switch(1+is.null(digits), digits, c(0,4,4,2,4)) ## display(x) <- switch(1+is.null(display), display, c("s","f","f","f","f")) ## return(x) ## } ### package sphet ### sphet objects ## xtable.sphet <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, auto = FALSE, ...) { ## return(xtable.summary.splm(summary(x), caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## } ## xtable.summary.sphet <- function(x, caption = NULL, label = NULL, align = NULL, ## digits = NULL, display = NULL, ## auto = FALSE, ...) { ## return(xtable.summary.splm(x, caption = caption, label = label, ## align = align, digits = digits, ## display = display, auto = auto, ...)) ## }
/scratch/gouwar.j/cran-all/cranData/xtable/R/xtable.R
### ftable objects, requested by Charles Roosen ### Feature request #2248, 2/9/2012 xtableFtable <- function(x, caption = NULL, label = NULL, align = NULL, digits = 0, display = NULL, quote = FALSE, method = c("non.compact", "row.compact", "col.compact", "compact"), lsep = " $\\vert$ ", ...) { method <- match.arg(method) saveMethod <- method xDim <- dim(x) nRowVars <- length(attr(x, "row.vars")) nColVars <- length(attr(x, "col.vars")) if (nRowVars == 0){ if (method =="col.compact"){ method <- "non.compact" } else if (method == "compact"){ method <- "row.compact" } } if (nColVars == 0){ if (method =="row.compact"){ method <- "non.compact" } else if (method == "compact"){ method <- "col.compact" } } if (method == "non.compact"){ nCharCols <- nRowVars + 2 nCharRows <- nColVars + 1 } if (method == "row.compact"){ nCharCols <- nRowVars + 2 nCharRows <- nColVars } if (method == "col.compact"){ nCharCols <- nRowVars + 1 nCharRows <- nColVars + 1 } if (method == "compact"){ nCharCols <- nRowVars + 1 nCharRows <- nColVars } if(is.null(align)) { align <- c(rep("l", nCharCols - 1), "l |", rep("r", xDim[2])) } if(is.null(display)) { display <- c(rep("s", nCharCols), rep("d", xDim[2])) } attr(x, "ftableCaption") <- caption attr(x, "ftableLabel") <- label attr(x, "ftableAlign") <- align attr(x, "ftableDigits") <- digits attr(x, "quote") <- quote attr(x, "ftableDisplay") <- display attr(x, "method") <- method attr(x, "lsep") <- lsep attr(x, "nChars") <- c(nCharRows, nCharCols) class(x) <- c("xtableFtable", "ftable") return(x) } print.xtableFtable <- function(x, type = getOption("xtable.type", "latex"), file = getOption("xtable.file", ""), append = getOption("xtable.append", FALSE), floating = getOption("xtable.floating", TRUE), floating.environment = getOption("xtable.floating.environment", "table"), table.placement = getOption("xtable.table.placement", "ht"), caption.placement = getOption("xtable.caption.placement", "bottom"), caption.width = getOption("xtable.caption.width", NULL), latex.environments = getOption("xtable.latex.environments", c("center")), tabular.environment = getOption("xtable.tabular.environment", "tabular"), size = getOption("xtable.size", NULL), hline.after = getOption("xtable.hline.after", NULL), NA.string = getOption("xtable.NA.string", ""), only.contents = getOption("xtable.only.contents", FALSE), add.to.row = getOption("xtable.add.to.row", NULL), sanitize.text.function = getOption("xtable.sanitize.text.function", as.is), sanitize.rownames.function = getOption("xtable.sanitize.rownames.function", sanitize.text.function), sanitize.colnames.function = getOption("xtable.sanitize.colnames.function", sanitize.text.function), math.style.negative = getOption("xtable.math.style.negative", FALSE), math.style.exponents = getOption("xtable.math.style.exponents", FALSE), html.table.attributes = getOption("xtable.html.table.attributes", "border=1"), print.results = getOption("xtable.print.results", TRUE), format.args = getOption("xtable.format.args", NULL), rotate.rownames = getOption("xtable.rotate.rownames", FALSE), rotate.colnames = getOption("xtable.rotate.colnames", FALSE), booktabs = getOption("xtable.booktabs", FALSE), scalebox = getOption("xtable.scalebox", NULL), width = getOption("xtable.width", NULL), comment = getOption("xtable.comment", TRUE), timestamp = getOption("xtable.timestamp", date()), ...) { if (type == "latex"){ ## extract the information in the attributes caption <- attr(x, "ftableCaption") label <- attr(x, "ftableLabel") align <- attr(x, "ftableAlign") digits <- attr(x, "ftableDigits") quote <- attr(x, "quote") digits <- attr(x, "ftabelDigits") method <- attr(x, "method") lsep <- attr(x, "lsep") nCharRows <- attr(x, "nChars")[1] nCharCols <- attr(x, "nChars")[2] nRowVars <- length(attr(x, "row.vars")) nColVars <- length(attr(x, "col.vars")) ## change class so format method will find format.ftable ## even though format.ftable is not exported from 'stats' class(x) <- "ftable" fmtFtbl <- format(x, quote = quote, digits = digits, method = method, lsep = lsep) attr(fmtFtbl, "caption") <- caption attr(fmtFtbl, "label") <- label ## sanitization is possible for row names and/or column names ## row names if (is.null(sanitize.rownames.function)) { fmtFtbl[nCharRows, 1:nRowVars] <- sanitize(fmtFtbl[nCharRows, 1:nRowVars], type = type) } else { fmtFtbl[nCharRows, 1:nRowVars] <- sanitize.rownames.function(fmtFtbl[nCharRows, 1:nRowVars]) } ## column names if (is.null(sanitize.colnames.function)) { fmtFtbl[1:nColVars, nCharCols - 1] <- sanitize(fmtFtbl[1:nColVars, nCharCols - 1], type = type) } else { fmtFtbl[1:nColVars, nCharCols - 1] <- sanitize.colnames.function(fmtFtbl[1:nColVars, nCharCols - 1]) } ## rotations are possible if (rotate.rownames){ fmtFtbl[1:dim(fmtFtbl)[1], 1:(nCharCols - 1)] <- paste0("\\begin{sideways} ", fmtFtbl[1:dim(fmtFtbl)[1], 1:(nCharCols - 1)], "\\end{sideways}") } if (rotate.colnames){ if (rotate.rownames){ fmtFtbl[1:(nCharRows), (nCharCols):dim(fmtFtbl)[2]] <- paste0("\\begin{sideways} ", fmtFtbl[1:(nCharRows), (nCharCols):dim(fmtFtbl)[2]], "\\end{sideways}") } else { fmtFtbl[1:(nCharRows), 1:dim(fmtFtbl)[2]] <- paste0("\\begin{sideways} ", fmtFtbl[1:(nCharRows), 1:dim(fmtFtbl)[2]], "\\end{sideways}") } } ## booktabs is incompatible with vertical lines in tables if (booktabs) align <- gsub("|","", align, fixed = TRUE) attr(fmtFtbl, "align") <- align attr(fmtFtbl, "digits") <- digits attr(fmtFtbl, "quote") <- quote attr(fmtFtbl, "display") <- display ## labels should be left aligned for (i in 1:nCharRows){ fmtFtbl[i, nCharCols:dim(fmtFtbl)[2]] <- paste0("\\multicolumn{1}{l}{ ", fmtFtbl[i, nCharCols:dim(fmtFtbl)[2]], "}") } if(is.null(hline.after)) { hline.after <- c(-1, nCharRows, dim(fmtFtbl)[1]) } print.xtable(fmtFtbl, hline.after = hline.after, include.rownames = FALSE, include.colnames = FALSE, booktabs = booktabs, sanitize.text.function = as.is, file = file, append = append, floating = floating, floating.environment = floating.environment, table.placement = table.placement, caption.placement = caption.placement, caption.width = caption.width, latex.environments = latex.environments, tabular.environment = tabular.environment, size = size, NA.string = NA.string, only.contents = only.contents, add.to.row = add.to.row,, math.style.negative = math.style.negative, math.style.exponents = math.style.exponents, print.results = print.results, format.args = format.args, scalebox = scalebox, width = width, comment = comment, timestamp = timestamp, ...) } else { stop("print.xtableFtable not yet implemented for this type") } }
/scratch/gouwar.j/cran-all/cranData/xtable/R/xtableFtable.R
### Function to create lists of tables xtableList <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, ...) { if (is.null(digits)){ digitsList <- vector("list", length(x)) } else { if (!is.list(digits)){ digitsList <- vector("list", length(x)) for (i in 1:length(x)) digitsList[[i]] <- digits } } if (is.null(display)){ displayList <- vector("list", length(x)) } else { if (!is.list(display)){ displayList <- vector("list", length(x)) for (i in 1:length(x)) displayList[[i]] <- display } } xList <- vector("list", length(x)) for (i in 1:length(x)){ xList[[i]] <- xtable(x[[i]], caption = caption, label = label, align = align, digits = digitsList[[i]], display = displayList[[i]], ...) attr(xList[[i]], 'subheading') <- attr(x, 'subheadings')[[i]] } attr(xList, "message") <- attr(x, "message") attr(xList, "caption") <- caption attr(xList, "label") <- label class(xList) <- c("xtableList") return(xList) } print.xtableList <- function(x, type = getOption("xtable.type", "latex"), file = getOption("xtable.file", ""), append = getOption("xtable.append", FALSE), floating = getOption("xtable.floating", TRUE), floating.environment = getOption("xtable.floating.environment", "table"), table.placement = getOption("xtable.table.placement", "ht"), caption.placement = getOption("xtable.caption.placement", "bottom"), caption.width = getOption("xtable.caption.width", NULL), latex.environments = getOption("xtable.latex.environments", c("center")), tabular.environment = getOption("xtable.tabular.environment", "tabular"), size = getOption("xtable.size", NULL), hline.after = NULL, NA.string = getOption("xtable.NA.string", ""), include.rownames = getOption("xtable.include.rownames", TRUE), colnames.format = "single", only.contents = getOption("xtable.only.contents", FALSE), add.to.row = NULL, sanitize.text.function = getOption("xtable.sanitize.text.function", NULL), sanitize.rownames.function = getOption("xtable.sanitize.rownames.function", sanitize.text.function), sanitize.colnames.function = getOption("xtable.sanitize.colnames.function", sanitize.text.function), sanitize.subheadings.function = getOption("xtable.sanitize.subheadings.function", sanitize.text.function), sanitize.message.function = getOption("xtable.sanitize.message.function", sanitize.text.function), math.style.negative = getOption("xtable.math.style.negative", FALSE), math.style.exponents = getOption("xtable.math.style.exponents", FALSE), html.table.attributes = getOption("xtable.html.table.attributes", "border=1"), print.results = getOption("xtable.print.results", TRUE), format.args = getOption("xtable.format.args", NULL), rotate.rownames = getOption("xtable.rotate.rownames", FALSE), rotate.colnames = getOption("xtable.rotate.colnames", FALSE), booktabs = getOption("xtable.booktabs", FALSE), scalebox = getOption("xtable.scalebox", NULL), width = getOption("xtable.width", NULL), comment = getOption("xtable.comment", TRUE), timestamp = getOption("xtable.timestamp", date()), ...) { ## Get number of rows for each table in list of tables nCols <- dim(x[[1]])[2] rowNums <- sapply(x, dim)[1,] combinedRowNums <- cumsum(rowNums) combined <- do.call(rbind, x) if (type == "latex"){ ## Special treatment if using booktabs if (booktabs){ tRule <- "\\toprule" mRule <- "\\midrule" bRule <- "\\bottomrule" } else { tRule <- "\\hline" mRule <- "\\hline" bRule <- "\\hline" } ## Sanitize subheadings if required if (!is.null(sanitize.subheadings.function)) { for (i in 1:length(x)){ attr(x[[i]], 'subheading') <- sanitize.subheadings.function(attr(x[[i]], 'subheading')) } } ## Sanitize message if required if (!is.null(sanitize.message.function)) { xMessage <- attr(x, 'message') xMessage <- sapply(xMessage, sanitize.message.function) attr(x, 'message') <- xMessage } if (colnames.format == "single"){ add.to.row <- list(pos = NULL, command = NULL) add.to.row$pos <- as.list(c(0, combinedRowNums[-length(x)], dim(combined)[1])) command <- sapply(x, attr, "subheading") for (i in 1:length(x)){ if( !is.null(command[[i]]) ){ add.to.row$command[i] <- paste0(mRule,"\n\\multicolumn{", nCols + include.rownames, "}{l}{", command[[i]], "}\\\\\n") } else { add.to.row$command[i] <- paste0(mRule, "\n") } } ## Changed at request of Russ Lenth ## add.to.row$command[1:length(x)] <- ## paste0(mRule,"\n\\multicolumn{", nCols, "}{l}{", command, "}\\\\\n") if ( (booktabs) & length(attr(x, "message") > 0) ){ attr(x, "message")[1] <- paste0("\\rule{0em}{2.5ex}", attr(x, "message")[1]) } add.to.row$command[length(x) + 1] <- paste0("\n\\multicolumn{", nCols + include.rownames, "}{l}{", attr(x, "message"), "}\\\\\n", collapse = "") add.to.row$command[length(x) + 1] <- paste0(bRule, add.to.row$command[length(x) + 1]) class(combined) <- c("xtableList", "data.frame") hline.after <- c(-1) include.colnames <- TRUE } ## Create headings for columns if multiple headings are needed if (colnames.format == "multiple"){ if (is.null(sanitize.colnames.function)) { colHead <- names(x[[1]]) } else { colHead <- sanitize.colnames.function(names(x[[1]])) } if (rotate.colnames) { colHead <- paste("\\begin{sideways}", colHead, "\\end{sideways}") } colHead <- paste0(colHead, collapse = " & ") if (include.rownames) { colHead <- paste0(" & ", colHead) } colHead <- paste0(tRule, "\n", colHead, " \\\\", mRule, "\n") add.to.row <- list(pos = NULL, command = NULL) add.to.row$pos <- as.list(c(0, c(combinedRowNums[1:length(x)]))) command <- sapply(x, attr, "subheading") add.to.row$command[1] <- if( !is.null(command[[1]]) ){ add.to.row$command[1] <- paste0("\n\\multicolumn{", nCols + include.rownames, "}{l}{", command[[1]], "}\\\\ \n", colHead, "\n") } else { add.to.row$command[1] <- colHead } for (i in 2:length(x)) { add.to.row$command[i] <- if( !is.null(command[[i]]) ) { paste0(bRule, "\\\\ \n\\multicolumn{", nCols + include.rownames, "}{l}{", command[[i]], "}", "\\\\ \n", colHead) } else { add.to.row$command[i] <- paste0("\n", colHead) } } ## Changed at request of Russ Lenth ## add.to.row$command[1] <- ## paste0("\n\\multicolumn{", nCols, "}{l}{", command[1], ## "}", " \\\\ \n", ## colHead) ## add.to.row$command[2:length(x)] <- ## paste0(bRule, ## "\\\\ \n\\multicolumn{", nCols, "}{l}{", ## command[2:length(x)], "}", ## "\\\\ \n", ## colHead) if ( (booktabs) & length(attr(x, "message") > 0) ){ attr(x, "message")[1] <- paste0("\\rule{0em}{2.5ex}", attr(x, "message")[1]) } add.to.row$command[length(x) + 1] <- paste0("\n\\multicolumn{", nCols + include.rownames, "}{l}{", attr(x, "message"), "}\\\\\n", collapse = "") add.to.row$command[length(x) + 1] <- paste0(bRule, add.to.row$command[length(x) + 1]) class(combined) <- c("xtableList", "data.frame") hline.after <- NULL include.colnames <- FALSE } print.xtable(combined, type = type, floating = floating, floating.environment = floating.environment, table.placement = table.placement, caption.placement = caption.placement, caption.width = caption.width, latex.environments = latex.environments, tabular.environment = tabular.environment, size = size, hline.after = hline.after, NA.string = NA.string, include.rownames = include.rownames, include.colnames = include.colnames, only.contents = only.contents, add.to.row = add.to.row, sanitize.text.function = sanitize.text.function, sanitize.rownames.function = sanitize.rownames.function, sanitize.colnames.function = sanitize.colnames.function, math.style.negative = math.style.negative, math.style.exponents = math.style.exponents, html.table.attributes = html.table.attributes, print.results = print.results, format.args = format.args, rotate.rownames = rotate.rownames, rotate.colnames = rotate.colnames, booktabs = booktabs, scalebox = scalebox, width = width, comment = comment, timestamp = timestamp, ...) } else { stop("print.xtableList not yet implemented for this type") } } ### Uses xtableList xtableLSMeans <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...){ if (attr(x, "estName") == "lsmean"){ xList <- split(x, f = x[, 2]) for (i in 1:length(xList)){ xList[[i]] <- as.data.frame(xList[[i]][, -2]) } attr(xList, "subheadings") <- paste0(dimnames(x)[[2]][2], " = ", levels(x[[2]])) attr(xList, "message") <- c("", attr(x, "mesg")) xList <- xtableList(xList, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...) } else { xList <- x xList <- xtable.data.frame(xList, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...) } return(xList) }
/scratch/gouwar.j/cran-all/cranData/xtable/R/xtableList.R
### xtableMatharray object ### To deal with numeric arrays such as a variance-covariance matrix ### From a request by James Curran, 16 October 2015 xtableMatharray <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, auto = FALSE, ...) { class(x) <- c("xtableMatharray","matrix") xtbl <- xtable.matrix(x, caption = caption, label = label, align = align, digits = digits, display = display, auto = auto, ...) class(xtbl) <- c("xtableMatharray","xtable","data.frame") return(xtbl) } print.xtableMatharray <- function(x, print.results = TRUE, format.args = getOption("xtable.format.args", NULL), scalebox = getOption("xtable.scalebox", NULL), comment = FALSE, timestamp = NULL, ...) { class(x) <- c("xtableMatharray","data.frame") print.xtable(x, floating = FALSE, tabular.environment = 'array', include.rownames = FALSE, include.colnames = FALSE, hline.after = NULL, print.results = print.results, format.args = format.args, scalebox = scalebox, comment = comment, timestamp = timestamp, ...) }
/scratch/gouwar.j/cran-all/cranData/xtable/R/xtableMatharray.R
## ----set, include=FALSE-------------------------------------------------- library(knitr) opts_chunk$set(fig.path = 'Figures/other', debug = TRUE, echo = TRUE) opts_chunk$set(out.width = '0.9\\textwidth') ## ----package, results='asis'------------------------------ library(xtable) options(xtable.floating = FALSE) options(xtable.timestamp = "") options(width = 60) set.seed(1234) ## ----zoo, results = 'asis'-------------------------------- library(zoo) xDate <- as.Date("2003-02-01") + c(1, 3, 7, 9, 14) - 1 as.ts(xDate) x <- zoo(rnorm(5), xDate) xtable(x) ## ----zoots, results = 'asis'------------------------------ tempTs <- ts(cumsum(1 + round(rnorm(100), 0)), start = c(1954, 7), frequency = 12) tempTable <- xtable(tempTs, digits = 0) tempTable tempZoo <- as.zoo(tempTs) xtable(tempZoo, digits = 0) ## ----survival, results = 'asis'--------------------------- library(survival) test1 <- list(time=c(4,3,1,1,2,2,3), status=c(1,1,1,0,1,1,0), x=c(0,2,1,1,1,0,0), sex=c(0,0,0,0,1,1,1)) coxFit <- coxph(Surv(time, status) ~ x + strata(sex), test1) xtable(coxFit)
/scratch/gouwar.j/cran-all/cranData/xtable/inst/doc/OtherPackagesGallery.R
## ----set, include=FALSE-------------------------------------------------- library(knitr) opts_chunk$set(fig.path='Figures/list', debug=TRUE, echo=TRUE) opts_chunk$set(out.width='0.9\\textwidth') ## ----package, results='asis'------------------------------ library(xtable) options(xtable.floating = FALSE) options(xtable.timestamp = "") options(width = 60) ## ----data------------------------------------------------- require(xtable) data(mtcars) mtcars <- mtcars[, 1:6] mtcarsList <- split(mtcars, f = mtcars$cyl) ### Reduce the size of the list elements mtcarsList[[1]] <- mtcarsList[[1]][1,] mtcarsList[[2]] <- mtcarsList[[2]][1:2,] mtcarsList[[3]] <- mtcarsList[[3]][1:3,] attr(mtcarsList, "subheadings") <- paste0("Number of cylinders = ", names(mtcarsList)) attr(mtcarsList, "message") <- c("Line 1 of Message", "Line 2 of Message") str(mtcarsList) attributes(mtcarsList) ## ----xtablelist------------------------------------------- xList <- xtableList(mtcarsList) str(xList) ## ----xtablelist1------------------------------------------ xList1 <- xtableList(mtcarsList, digits = c(0,2,0,0,0,1,2)) str(xList1) ## ----xtablelist2------------------------------------------ xList2 <- xtableList(mtcarsList, digits = c(0,2,0,0,0,1,2), caption = "Caption to List", label = "tbl:xtableList") str(xList2) ## ----xtablelist3------------------------------------------ attr(mtcarsList, "subheadings") <- NULL xList3 <- xtableList(mtcarsList) str(xList3) ## ----xtablelist4------------------------------------------ attr(mtcarsList, "message") <- NULL xList4 <- xtableList(mtcarsList) str(xList4) ## ----singledefault, results='asis'------------------------ print.xtableList(xList) ## ----singlebooktabs, results='asis'----------------------- print.xtableList(xList, booktabs = TRUE) ## ----singlebooktabs1, results='asis'---------------------- print.xtableList(xList1, booktabs = TRUE) ## ----sanitize--------------------------------------------- large <- function(x){ paste0('{\\Large{\\bfseries ', x, '}}') } italic <- function(x){ paste0('{\\emph{ ', x, '}}') } bold <- function(x){ paste0('{\\bfseries ', x, '}') } red <- function(x){ paste0('{\\color{red} ', x, '}') } ## ----sanitizesingle, results='asis'----------------------- print.xtableList(xList, sanitize.rownames.function = italic, sanitize.colnames.function = large, sanitize.subheadings.function = bold, sanitize.message.function = red, booktabs = TRUE) ## ----singlecaption, results='asis'------------------------ print.xtableList(xList2, floating = TRUE) ## ----singlerotated, results='asis'------------------------ print.xtableList(xList, rotate.colnames = TRUE) ## ----nosubheadings, results='asis'------------------------ print.xtableList(xList3) ## ----nomessage, results='asis'---------------------------- print.xtableList(xList4) ## ----multipledefault, results='asis'---------------------- print.xtableList(xList, colnames.format = "multiple") ## ----multiplebooktabs, results='asis'--------------------- print.xtableList(xList, colnames.format = "multiple", booktabs = TRUE) ## ----sanitizemultiple, results='asis'--------------------- print.xtableList(xList, colnames.format = "multiple", sanitize.rownames.function = italic, sanitize.colnames.function = large, sanitize.subheadings.function = bold, sanitize.message.function = red, booktabs = TRUE) ## ----multiplecaption, results='asis'---------------------- print.xtableList(xList2, colnames.format = "multiple", floating = TRUE) ## ----multiplerotated, results='asis'---------------------- print.xtableList(xList, colnames.format = "multiple", rotate.colnames = TRUE) ## ----multiplenosubheadings, results='asis'---------------- print.xtableList(xList3, colnames.format = "multiple") ## ----multiplenomessage, results='asis'-------------------- print.xtableList(xList4, colnames.format = "multiple")
/scratch/gouwar.j/cran-all/cranData/xtable/inst/doc/listOfTablesGallery.R
## ----include=FALSE------------------------------------------------------- library(knitr) ## ------------------------------------------------------------------------ library(xtable) x <- matrix(rnorm(6), ncol = 2) x.small <- xtable(x, label = 'tabsmall', caption = 'A margin table') ## ----results='asis'------------------------------------------------------ print(x.small,floating.environment='margintable', latex.environments = "", table.placement = NULL)
/scratch/gouwar.j/cran-all/cranData/xtable/inst/doc/margintable.R
## ----include=FALSE------------------------------------------------------- library(knitr) opts_chunk$set(fig.path='figdir/fig', debug=TRUE, echo=TRUE) set.seed(1234) ## ----results='asis'------------------------------------------------------ library(xtable) options(xtable.floating = FALSE) options(xtable.timestamp = "") ## ----results='asis'------------------------------------------------------ data(tli) xtable(tli[1:10, ]) ## ----results='asis'------------------------------------------------------ design.matrix <- model.matrix(~ sex*grade, data = tli[1:10, ]) xtable(design.matrix, digits = 0) ## ----results='asis'------------------------------------------------------ fm1 <- aov(tlimth ~ sex + ethnicty + grade + disadvg, data = tli) xtable(fm1) ## ----results='asis'------------------------------------------------------ fm2 <- lm(tlimth ~ sex*ethnicty, data = tli) xtable(fm2) ## ----results='asis'------------------------------------------------------ xtable(anova(fm2)) ## ----results='asis'------------------------------------------------------ fm2b <- lm(tlimth ~ ethnicty, data = tli) xtable(anova(fm2b, fm2)) ## ----aovlist------------------------------------------------------------- Block <- gl(8, 4) A <- factor(c(0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1, 0,1,0,1,0,1,0,1,0,1,0,1)) B <- factor(c(0,0,1,1,0,0,1,1,0,1,0,1,1,0,1,0,0,0,1,1, 0,0,1,1,0,0,1,1,0,0,1,1)) C <- factor(c(0,1,1,0,1,0,0,1,0,0,1,1,0,0,1,1,0,1,0,1, 1,0,1,0,0,0,1,1,1,1,0,0)) Yield <- c(101, 373, 398, 291, 312, 106, 265, 450, 106, 306, 324, 449, 272, 89, 407, 338, 87, 324, 279, 471, 323, 128, 423, 334, 131, 103, 445, 437, 324, 361, 302, 272) aovdat <- data.frame(Block, A, B, C, Yield) old <- getOption("contrasts") options(contrasts = c("contr.helmert", "contr.poly")) (fit <- aov(Yield ~ A*B*C + Error(Block), data = aovdat)) class(fit) summary(fit) options(contrasts = old) ## ----xtableaovlist, results='asis'--------------------------------------- xtable(fit) ## ----results='asis'------------------------------------------------------ fm3 <- glm(disadvg ~ ethnicty*grade, data = tli, family = binomial) xtable(fm3) ## ----results='asis'------------------------------------------------------ xtable(anova(fm3)) ## ----results='asis'------------------------------------------------------ pr1 <- prcomp(USArrests) xtable(pr1) ## ----results='asis'------------------------------------------------------ xtable(summary(pr1)) ## ----include=FALSE------------------------------------------------------- # pr2 <- princomp(USArrests) # xtable(pr2) ## ----results='asis'------------------------------------------------------ temp.ts <- ts(cumsum(1 + round(rnorm(100), 0)), start = c(1954, 7), frequency = 12) temp.table <- xtable(temp.ts, digits = 0) temp.table ## ----ftable-------------------------------------------------------------- data(mtcars) mtcars$cyl <- factor(mtcars$cyl, levels = c("4","6","8"), labels = c("four","six","eight")) tbl <- ftable(mtcars$cyl, mtcars$vs, mtcars$am, mtcars$gear, row.vars = c(2, 4), dnn = c("Cylinders", "V/S", "Transmission", "Gears")) tbl ## ----ftablecheck--------------------------------------------------------- xftbl <- xtableFtable(tbl, method = "compact") print.xtableFtable(xftbl, booktabs = TRUE) ## ----ftable1, results = 'asis'------------------------------------------- xftbl <- xtableFtable(tbl) print.xtableFtable(xftbl) ## ----ftable2, results = 'asis'------------------------------------------- xftbl <- xtableFtable(tbl, method = "col.compact") print.xtableFtable(xftbl, rotate.rownames = TRUE) ## ----ftable3, results = 'asis'------------------------------------------- xftbl <- xtableFtable(tbl, method = "compact") print.xtableFtable(xftbl, booktabs = TRUE) ## ----ftable4, results = 'asis'------------------------------------------- italic <- function(x){ paste0('{\\emph{', x, '}}') } mtcars$cyl <- factor(mtcars$cyl, levels = c("four","six","eight"), labels = c("four",italic("six"),"eight")) large <- function(x){ paste0('{\\Large ', x, '}') } bold <- function(x){ paste0('{\\bfseries ', x, '}') } tbl <- ftable(mtcars$cyl, mtcars$vs, mtcars$am, mtcars$gear, row.vars = c(2, 4), dnn = c("Cylinders", "V/S", "Transmission", "Gears")) xftbl <- xtableFtable(tbl, method = "row.compact") print.xtableFtable(xftbl, sanitize.rownames.function = large, sanitize.colnames.function = bold, rotate.colnames = TRUE, rotate.rownames = TRUE) ## ----include=FALSE------------------------------------------------------- # ## Demonstrate saving to file # for(i in c("latex", "html")) { # outFileName <- paste("xtable.", ifelse(i=="latex", "tex", i), sep = "") # print(xtable(lm.D9), type = i, file = outFileName, append = TRUE, # latex.environments = NULL) # print(xtable(lm.D9), type = i, file = outFileName, append = TRUE, # latex.environments = "") # print(xtable(lm.D9), type = i, file = outFileName, append = TRUE, # latex.environments = "center") # print(xtable(anova(glm.D93, test = "Chisq")), # type = i, file = outFileName, # append = TRUE) # print(xtable(anova(glm.D93)), hline.after = c(1), # size = "small", type = i, # file = outFileName, append = TRUE) # # print(xtable(pr2), type = i, file = outFileName, append = TRUE) # } ## ----results='asis'------------------------------------------------------ data(mtcars) dat <- mtcars[1:3, 1:6] x <- xtable(dat) x ## ----results='asis'------------------------------------------------------ align(x) <- xalign(x) digits(x) <- xdigits(x) display(x) <- xdisplay(x) x ## ----results='asis'------------------------------------------------------ xtable(dat, auto = TRUE) ## ----results='asis'------------------------------------------------------ x <- xtable(dat) autoformat(x) ## ----results='asis'------------------------------------------------------ print(xtable(data.frame(text = c("foo","bar"), googols = c(10e10,50e10), small = c(8e-24,7e-5), row.names = c("A","B")), display = c("s","s","g","g")), math.style.exponents = TRUE) ## ----results='asis'------------------------------------------------------ insane <- data.frame(Name = c("Ampersand","Greater than","Less than", "Underscore","Per cent","Dollar", "Backslash","Hash","Caret","Tilde", "Left brace","Right brace"), Character = I(c("&",">","<","_","%","$", "\\","#","^","~","{","}"))) colnames(insane)[2] <- paste(insane[, 2], collapse = "") xtable(insane) ## ----results='asis'------------------------------------------------------ wanttex <- xtable(data.frame(Column = paste("Value_is $10^{-",1:3,"}$", sep = ""))) print(wanttex, sanitize.text.function = function(str) gsub("_", "\\_", str, fixed = TRUE)) ## ----sanitize3----------------------------------------------------------- dat <- mtcars[1:3, 1:6] large <- function(x){ paste0('{\\Large{\\bfseries ', x, '}}') } italic <- function(x){ paste0('{\\emph{ ', x, '}}') } ## ----sanitize4, results = 'asis'----------------------------------------- print(xtable(dat), sanitize.rownames.function = italic, sanitize.colnames.function = large, booktabs = TRUE) ## ----results='asis'------------------------------------------------------ mat <- round(matrix(c(0.9, 0.89, 200, 0.045, 2.0), c(1, 5)), 4) rownames(mat) <- "$y_{t-1}$" colnames(mat) <- c("$R^2$", "$\\bar{x}$", "F-stat", "S.E.E", "DW") mat <- xtable(mat) print(mat, sanitize.text.function = function(x) {x}) ## ----results='asis'------------------------------------------------------ money <- matrix(c("$1,000","$900","$100"), ncol = 3, dimnames = list("$\\alpha$", c("Income (US$)","Expenses (US$)", "Profit (US$)"))) print(xtable(money), sanitize.rownames.function = function(x) {x}) ## ----results='asis'------------------------------------------------------ print(xtable(anova(fm3), caption = "\\tt latex.environments = \"\""), floating = TRUE, latex.environments = "") print(xtable(anova(fm3), caption = "\\tt latex.environments = \"center\""), floating = TRUE, latex.environments = "center") ## ----results='asis'------------------------------------------------------ tli.table <- xtable(tli[1:10, ]) align(tli.table) <- rep("r", 6) tli.table ## ----results='asis'------------------------------------------------------ align(tli.table) <- "|rrl|l|lr|" tli.table ## ----results='asis'------------------------------------------------------ align(tli.table) <- "|rr|lp{3cm}l|r|" tli.table ## ----results='asis'------------------------------------------------------ display(tli.table)[c(2,6)] <- "f" digits(tli.table) <- 3 tli.table ## ----results='asis'------------------------------------------------------ digits(tli.table) <- 1:(ncol(tli)+1) tli.table ## ----results='asis'------------------------------------------------------ digits(tli.table) <- matrix(0:4, nrow = 10, ncol = ncol(tli)+1) tli.table ## ----results='asis'------------------------------------------------------ tli.table <- xtable(tli[1:10, ]) print(tli.table, include.rownames = FALSE) ## ----results='asis'------------------------------------------------------ align(tli.table) <- "|r|r|lp{3cm}l|r|" print(tli.table, include.rownames = FALSE) ## ------------------------------------------------------------------------ align(tli.table) <- "|rr|lp{3cm}l|r|" ## ----results='asis'------------------------------------------------------ print(tli.table, include.colnames = FALSE) ## ----results='asis'------------------------------------------------------ print(tli.table, include.colnames = FALSE, hline.after = c(0,nrow(tli.table))) ## ----results='asis'------------------------------------------------------ print(tli.table, include.colnames = FALSE, include.rownames = FALSE) ## ----results='asis'------------------------------------------------------ print(tli.table, rotate.rownames = TRUE, rotate.colnames = TRUE) ## ----results='asis'------------------------------------------------------ print(xtable(anova(fm3)), hline.after = c(1)) ## ----results='asis'------------------------------------------------------ tli.table <- xtable(tli[1:10, ]) print(tli.table, include.rownames = FALSE, booktabs = TRUE) ## ----results='asis'------------------------------------------------------ bktbs <- xtable(matrix(1:10, ncol = 2)) hlines <- c(-1, 0, 1, nrow(bktbs)) print(bktbs, booktabs = TRUE, hline.after = hlines) ## ----results='asis'------------------------------------------------------ print(xtable(anova(fm3)), size = "large") ## ----results='asis'------------------------------------------------------ print(xtable(anova(fm3)), size = "\\setlength{\\tabcolsep}{12pt}") ## ----results='asis'------------------------------------------------------ x <- matrix(rnorm(1000), ncol = 10) x.big <- xtable(x, caption = "A \\code{longtable} spanning several pages") print(x.big, hline.after=c(-1, 0), tabular.environment = "longtable") ## ----results='asis'------------------------------------------------------ add.to.row <- list(pos = list(0), command = NULL) command <- paste0("\\hline\n\\endhead\n", "\\hline\n", "\\multicolumn{", dim(x)[2] + 1, "}{l}", "{\\footnotesize Continued on next page}\n", "\\endfoot\n", "\\endlastfoot\n") add.to.row$command <- command print(x.big, hline.after=c(-1), add.to.row = add.to.row, tabular.environment = "longtable") ## ------------------------------------------------------------------------ Grade3 <- c("A","B","B","A","B","C","C","D","A","B", "C","C","C","D","B","B","D","C","C","D") Grade6 <- c("A","A","A","B","B","B","B","B","C","C", "A","C","C","C","D","D","D","D","D","D") Cohort <- table(Grade3, Grade6) Cohort ## ----results='asis'------------------------------------------------------ xtable(Cohort) ## ----results='asis'------------------------------------------------------ addtorow <- list() addtorow$pos <- list(0, 0) addtorow$command <- c("& \\multicolumn{4}{c}{Grade 6} \\\\\n", "Grade 3 & A & B & C & D \\\\\n") print(xtable(Cohort), add.to.row = addtorow, include.colnames = FALSE) ## ----results='asis'------------------------------------------------------ x <- x[1:30, ] x.side <- xtable(x, caption = "A sideways table") print(x.side, floating = TRUE, floating.environment = "sidewaystable") ## ----results='asis'------------------------------------------------------ x <- x[1:20, ] x.rescale <- xtable(x) print(x.rescale, scalebox = 0.7) ## ----results='asis'------------------------------------------------------ df <- data.frame(name = c("A","B"), right = c(1.4, 34.6), left = c(1.4, 34.6), text = c("txt1","txt2")) print(xtable(df, align = c("l", "|c", "|R{3cm}", "|L{3cm}", "| p{3cm}|")), floating = FALSE, include.rownames = FALSE) ## ----results='asis'------------------------------------------------------ df.width <- data.frame(One = c("item 1", "A"), Two = c("item 2", "B"), Three = c("item 3", "C"), Four = c("item 4", "D")) x.width <- xtable(df.width) align(x.width) <- "|l|X|l|l|l|" print(x.width, tabular.environment = "tabularx", width = "\\textwidth") ## ------------------------------------------------------------------------ x.out <- print(tli.table, print.results = FALSE) ## ------------------------------------------------------------------------ x.ltx <- toLatex(tli.table) class(x.ltx) x.ltx ## ----results='asis'------------------------------------------------------ toLatex(sessionInfo())
/scratch/gouwar.j/cran-all/cranData/xtable/inst/doc/xtableGallery.R
### input fac for factors in character ### condition the matrix setClass(Class="Design", representation=representation(volume="numeric",stock="data.frame",portion="array","VIRTUAL") ) setGeneric("writeTecan",function(object,fileName,source,destination,liquidType){standardGeneric("writeTecan")}) setGeneric("design2Screen",function(object){standardGeneric("design2Screen")}) setMethod( f="design2Screen", signature='Design', def=function(object){ stock<-object@stock fac<-colnames(stock) portion<-object@portion col<-rep(1:dim(portion)[1],dim(portion)[2]) row<-rep(LETTERS[1:dim(portion)[2]],each=dim(portion)[1]) position=cbind(row,col) dim(portion)<-c(dim(portion)[1]*dim(portion)[2],dim(portion)[3]) condition<-matrix(nrow=dim(portion)[1],ncol=length(fac)) colnames(condition)<-fac rownames(condition)<-paste(row,col) weightSum<-function(x1,x2) sum(x1*x2) for (i in seq(along=fac)) { condition[,fac[i]]<-apply(portion,1,weightSum,x2=stock[,fac[i]]) } screen(fac=fac,condition=condition,position=position) } )
/scratch/gouwar.j/cran-all/cranData/xtal/R/Design.R
### subclass of 'Design' ### divide 96-well in four quardants as the last dimension ### for 3 dimension grid(6X4X4), ### the portion of vertex/stock1 in each dimension input as list dim setClass(Class='Design8Vertex', # representation=representation(dim='list'), contains='Design' ) design8Vertex=function(volume,stock,dim){ portion=array(dim=c(12,8,8),dimnames=list(col=c(1:12),row=LETTERS[1:8],vertex=c(1:8))) portion[1:6,1,1]=dim[[1]] portion[1,1:4,1]=dim[[2]] portion[1:6,1:4,1]=portion[1:6,1,1]%o%portion[1,1:4,1] portion[1:6,1,2]=1-dim[[1]] portion[6,1:4,2]=dim[[2]] portion[1:6,1:4,2]=portion[1:6,1,2]%o%portion[6,1:4,2] portion[1:6,4,3]=dim[[1]] portion[1,1:4,3]=1-dim[[2]] portion[1:6,1:4,3]=portion[1:6,4,3]%o%portion[1,1:4,3] portion[1:6,4,4]=1-dim[[1]] portion[6,1:4,4]=1-dim[[2]] portion[1:6,1:4,4]=portion[1:6,4,4]%o%portion[6,1:4,4] portion[7:12,1:4,1:4]=portion[1:6,1:4,1:4]*dim[[3]][2] portion[1:6,5:8,1:4]=portion[1:6,1:4,1:4]*dim[[3]][3] portion[7:12,5:8,1:4]=portion[1:6,1:4,1:4]*dim[[3]][4] portion[7:12,5:8,5:8]=portion[1:6,1:4,1:4] portion[1:6,1:4,5:8]=portion[7:12,5:8,5:8]*(1-dim[[3]][1]) portion[7:12,1:4,5:8]=portion[7:12,5:8,5:8]*(1-dim[[3]][2]) portion[1:6,5:8,5:8]=portion[7:12,5:8,5:8]*(1-dim[[3]][3]) return(new(Class='Design8Vertex',volume=volume,stock=stock,portion=portion)) } setMethod( f="writeTecan", signature=c(object="Design8Vertex"), def=function(object,fileName,source='Source1',destination='Destination',liquidType){ tecan=object@portion*object@volume for (i in 1:8) { # i for vertex stock id for (k in 1:12) { # k for col for (j in 1:8) { #j for row if (tecan[k,j,i]!=0) { well=(k-1)*8+j out=paste(source,i, destination,well,tecan[k,j,i],liquidType[i],sep=',') write(out,file=fileName,append=TRUE) } } } } } ) setMethod( f="writeTecan", signature=c(object="Design8Vertex",liquidType="missing"), def=function(object,fileName,source='Source1',destination='Destination',liquidType){ liquidType=rep('B',length(object@stock)) tecan=object@portion*object@volume for (i in 1:8) { # i for vertex stock id for (k in 1:12) { # k for col for (j in 1:8) { #j for row if (tecan[k,j,i]!=0) { well=(k-1)*8+j out=paste(source,i, destination,well,tecan[k,j,i],liquidType[i],sep=',') write(out,file=fileName,append=TRUE) } } } } } )
/scratch/gouwar.j/cran-all/cranData/xtal/R/Design8Vertex.R
### input fac for factors in character ### condition the matrix column: fac ### condition the matrix row: well position (col first) ### position the matrix of (wellRow,wellCol) setClass(Class="Screen", representation=representation(fac="character",condition="matrix",position="matrix"), validity=function(object){if(length(object@fac)!=ncol(object@condition)){ stop ("[Screen: validation] the number of factors does not correspond") }else{} return(TRUE)} ) screen<-function(fac,condition,position) { colnames(condition)=fac rownames(condition)=paste(position[,1],position[,2]) return(new(Class='Screen',fac=fac,condition=condition,position=position)) } setGeneric("screenCsv",function(object,fileName){standardGeneric("screenCsv")}) setMethod(f="screenCsv", signature=c(object='Screen',fileName='character'), def=function(object,fileName){ data=data.frame(object@position,object@condition,row.names=NULL) write.csv(data,fileName) return(invisible()) } ) setGeneric("getCondition",function(object){standardGeneric("getCondition")}) setMethod(f="getCondition", signature=c(object='Screen'), def=function(object){ return(object@condition) } ) ### model response cureve of crystal score #source('Screen.R') setClass(Class='Exp', representation=representation(screen='Screen',score='numeric'), validity=function(object){if(nrow(getCondition(object@screen))!=length(object@score)){ stop ("[Screen: validation] the number of scores does not correspond") }else{} return(TRUE)} ) setGeneric('getOptimal',function(zga){standardGeneric('getOptimal')}) setMethod(f='getOptimal', signature='Exp', def=function(zga){ data=data.frame(score=zga@score) condition=getCondition(zga@screen) for (i in 1:ncol(condition)) { data=cbind(data,as.numeric(condition[,i])) } colnames(data)[2:ncol(data)]<-colnames(condition) mod=loess(data[,1]~data[,2]+data[,3]+data[,4],degree=2,model=TRUE) opt=which(predict(mod)==max(predict(mod))) return(condition[opt,]) } )
/scratch/gouwar.j/cran-all/cranData/xtal/R/Exp.R
### input fac for factors in character ### condition the matrix column: fac ### condition the matrix row: well position (col first) ### position the matrix of (wellRow,wellCol) setClass(Class="Screen", representation=representation(fac="character",condition="matrix",position="matrix"), validity=function(object){if(length(object@fac)!=ncol(object@condition)){ stop ("[Screen: validation] the number of factors does not correspond") }else{} return(TRUE)} ) screen<-function(fac,condition,position) { colnames(condition)=fac rownames(condition)=paste(position[,1],position[,2]) return(new(Class='Screen',fac=fac,condition=condition,position=position)) } setGeneric("screenCsv",function(object,fileName){standardGeneric("screenCsv")}) setMethod(f="screenCsv", signature=c(object='Screen',fileName='character'), def=function(object,fileName){ data=data.frame(object@position,object@condition,row.names=NULL) write.csv(data,fileName) return(invisible()) } ) setGeneric("getCondition",function(object){standardGeneric("getCondition")}) setMethod(f="getCondition", signature=c(object='Screen'), def=function(object){ return(object@condition) } )
/scratch/gouwar.j/cran-all/cranData/xtal/R/Screen.R
#' Computes Counting Betweenness #' #' Counting Betweenness implemented as in DePaolis et al(2022) #' @param A The adjacency matrix of the network to be analyzed. It must be square. #' @return The vector containing the values of Counting Betweenness of the network.. #' @examples cbet(exmpl_matrix) #' @export cbet <- function(A) { A <- as.matrix(A) ## Reads the A-matrix; removes row/column with zeros; records their row/column number m = nrow(A) rrss = rowSums(A) retain.vector <- vector(mode="numeric", length=0) if (0.0 %in% rrss){ ## Checks if there is a row with all zeros retain.vector <- row(as.matrix(rrss))[which(as.matrix(rrss) == 0)] AA1 = A[-retain.vector,-retain.vector] ## this is the A-matrix without row/columns of zeros } else { AA1 = A } d = diag(rowSums(AA1)) n = nrow(AA1) ones = matrix(1, n, 1) ## this is a vector of "n" rows by 1 col of "1" re = matrix(0, n, 1 ) ## this is a vector of "n" rows by 1 col of "0" for (p in 1:n){ atemp = AA1[-p,-p] T = solve(d[-p,-p] - atemp, tol = 1e-29) for (s in 1:n){ if (s != p){ if (s < p){ indx = s } else if (s > p) { indx = s - 1 } N = as.matrix(diag(T[indx,])) %*% atemp I = abs(N + t(N)) / 2 re[-p,1] = re[-p,1] + 0.5*((t(colSums(I))) + rowSums(I)) } } } re2 = (re + 2 * (n-1) * ones) / ((n) * (n-1)) res = matrix(0, m, 1) # restore one or more rows/columns of zeros to their original positions if (length(retain.vector)!=0) { res[-retain.vector] <- re2 } else res <- re2 return(res) }
/scratch/gouwar.j/cran-all/cranData/xtranat/R/cbet.R
#' Computes Counting Betweenness in normalized format #' #' A normalized version of Counting Betweenness implemented as in DePaolis et al(2022) #' @param A The adjacency matrix of the network to be analyzed.It must be square. #' @return The vector containing the normalized values (between 0 and 1) of Counting Betweenness of the network. #' @examples cbet_norm(exmpl_matrix) #' @export cbet_norm <- function(A) { ## Reads the A-matrix; removes row/column with all zeros; records their row/column number A <- as.matrix(A) m = nrow(A) rrss = rowSums(A) retain.vector <- vector(mode="numeric", length=0) if (0.0 %in% rrss){ ## Checks if there is a row with all zeros retain.vector <- row(as.matrix(rrss))[which(as.matrix(rrss) == 0)] AA1 = A[-retain.vector,-retain.vector] ## this is the A-matrix without row/columns of zeros } else { AA1 = A } d = diag(rowSums(AA1)) n = nrow(AA1) ones = matrix(1, n, 1) ## this is a vector of "n" rows by 1 col of "1" re = matrix(0, n, 1 ) ## this is a vector of "n" rows by 1 col of "0" for (p in 1:n){ atemp = AA1[-p,-p] T = solve(d[-p,-p] - atemp, tol = 1e-29) for (s in 1:n){ if (s != p){ if (s < p){ indx = s } else if (s > p) { indx = s - 1 } N = as.matrix(diag(T[indx,])) %*% atemp I = abs(N + t(N)) / 2 re[-p,1] = re[-p,1] + 0.5*((t(colSums(I))) + rowSums(I)) } } } re2 = (re + 2 * (n-1) * ones) / ((n) * (n-1)) res = matrix(0, m, 1) # restore one or more rows/columns of zeros to their original positions if (length(retain.vector)!=0) { res[-retain.vector] <- re2 } else res <- re2 res <- (res - min(res)) / (max(res) - min(res)) # This is a normalized version with values between "0" and "1" return(res) }
/scratch/gouwar.j/cran-all/cranData/xtranat/R/cbet_norm.R
#' Data to showcase the functions in the xtranat package #' #' Contains a randomly created adjacency matrix #' #' It is a 10 by 10 matrix with some values in the diagonal to represent loops #' #' @format A 10 by 10 square matrix #' #' @source {Created by the author as an example} #' #' @examples #' data(exmpl_matrix) "exmpl_matrix"
/scratch/gouwar.j/cran-all/cranData/xtranat/R/data.R
#' Computes mfpt #' #' mean first-passage time implemented as in DePaolis et al(2022) #' @param A The adjacency matrix of the network to be analyzed #' @return mfpt. #' @export mfpt <- function(A) { A <- as.matrix(A) n = nrow(A) rrss = rowSums(A) for (i in 1:n) { if (rrss[i] != 0) { rrss[i] = 1/rrss[i] } } AA = diag(n) - diag(rrss) %*% A #compute transition matrix. H = matrix(0, n, n) I = solve(AA[-1,-1]) ## inverse of AA without 1st column & 1st row ones = matrix(1, n-1, 1) ## vector of "1"s of length 'n-1' for (i in 1:n) { H[-i,i] = I %*% ones ## matrix product; otherwise, non-conformable if (i < n){ u = AA[-(i+1),i] - AA[-i, (i+1)] I = I - ((I*u) * I[i,]) / (1 + (I[i,] * u)) v = AA[i, -(i+1)] - AA[(i+1), -i] I = I - ((I[,i] * (v * I)) / 1 + v * I[,i]) if (AA[(i+1),(i+1)]!=1){ I = solve(AA[-(i+1),-(i+1)], tol = 1e-29) } if (any(is.infinite(I))) { I[is.infinite(I)] <- 0 } } } return(H) # H is called by RWC and RWC_norm }
/scratch/gouwar.j/cran-all/cranData/xtranat/R/mfpt.R
#' Computes Random Walk Centrality #' #' Random Walk Centrality implemented as in DePaolis et al(2022) #' @param A The adjacency matrix of the network to be analyzed.It must be square. #' @return The vector containing the values of Random Walk Centrality of the network. #' @examples rwc(exmpl_matrix) #' @export rwc <- function(A) { ## Reads the A-matrix; check if A is a matrix and if it's square. Complete with zeros if necessary ## If any row/column is all zeros, remove it; records their row/column number nn = nrow(A) cen = matrix(0,nn,1) m <- mfpt(A) # H from mfpt{} for (j in 1:nn) { if (all(A[j,] == (c(rep(1,(j-1)),0,rep(1,(nn-j)))))) { # This compares each row of A with a rows made of 1s and a zero on the diagonal cen[j] = 0 # If TRUE (i.e. row of A == 1s) that row of CEN == zero } else { cen[j] = nn / sum(m[,j]) } } return(cen) }
/scratch/gouwar.j/cran-all/cranData/xtranat/R/rwc.R
#' Computes Random Walk Centrality in normalized format #' #' A normalized version of Random Walk Centrality implemented as in DePaolis et al(2022) #' @param A The adjacency matrix of the network to be analyzed.It must be square. #' @return The vector containing the normalized values (between 0 and 1) of Random Walk Centrality of the network. #' @examples rwc_norm(exmpl_matrix) #' @export rwc_norm <- function(A) { ## Reads the A-matrix; check if A is a matrix and if it's square. Complete with zeros if necessary ## If any row/column is all zeros, remove it; records their row/column number nn = nrow(A) cen = matrix(0,nn,1) m <- mfpt(A) # H from mfpt{} for (j in 1:nn) { if (all(A[j,] == (c(rep(1,(j-1)),0,rep(1,(nn-j)))))) { # This compares each row of H with a rows made of 1s and a zero on the diagonal cen[j] = 0 # If TRUE (i.e. row of H == 1s) that row of CEN == zero } else { cen[j] = nn / sum(m[,j]) } } cen <- (cen - min(cen)) / (max(cen) - min(cen)) return(cen) }
/scratch/gouwar.j/cran-all/cranData/xtranat/R/rwc_norm.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(xtranat)
/scratch/gouwar.j/cran-all/cranData/xtranat/inst/doc/xtranat.R
--- title: "xtranat" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{xtranat} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(xtranat) ```
/scratch/gouwar.j/cran-all/cranData/xtranat/inst/doc/xtranat.Rmd
--- title: "xtranat" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{xtranat} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(xtranat) ```
/scratch/gouwar.j/cran-all/cranData/xtranat/vignettes/xtranat.Rmd
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `as.xts.Date` <- function(x,...) { xts(x=NULL,order.by=x,...) }
/scratch/gouwar.j/cran-all/cranData/xts/R/Date.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This code adapted from Ops.zoo.R cumsum.xts <- function(x) { if( NCOL(x) == 1 ) { x[] <- cumsum(coredata(x)) } else x[] <- apply(coredata(x), 2, function(y) cumsum(y)) x } cumprod.xts <- function(x) { if( NCOL(x) == 1 ) { x[] <- cumprod(coredata(x)) } else x[] <- apply(coredata(x), 2, function(y) cumprod(y)) x } cummin.xts <- function(x) { if( NCOL(x) == 1 ) { x[] <- cummin(coredata(x)) } else x[] <- apply(coredata(x), 2, function(y) cummin(y)) x } cummax.xts <- function(x) { if( NCOL(x) == 1 ) { x[] <- cummax(coredata(x)) } else x[] <- apply(coredata(x), 2, function(y) cummax(y)) x } mean.xts <- function(x,...) { if(is.vector(x) ||is.null(ncol(x)) || ncol(x)==1){ x<-as.numeric(x) mean(x,...) } else apply(x,2,mean.xts,...) } sd.xts <- function(x,na.rm=FALSE) { if(is.vector(x) || is.null(ncol(x)) || ncol(x)==1){ x<-as.numeric(x) sd(x,na.rm=na.rm) } else apply(x,2,sd,na.rm=na.rm) }
/scratch/gouwar.j/cran-all/cranData/xts/R/Math.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # functions from quantmod to check for OHLC style/columns # NOT TO BE EXPORTED # `OHLCV` <- function (x) { if (is.OHLCV(x)) return(x[, has.OHLCV(x, 1)]) NULL } `is.OHLCV` <- function(x) { all(has.Op(x),has.Hi(x),has.Lo(x),has.Cl(x),has.Vo(x)) } `has.OHLCV` <- function(x,which=FALSE) { if(which) { c(has.Op(x,1),has.Hi(x,1),has.Lo(x,1),has.Cl(x,1),has.Vo(x,1)) } else { c(has.Op(x),has.Hi(x),has.Lo(x),has.Cl(x),has.Vo(x)) } } `OHLC` <- function (x) { if (is.OHLC(x)) return(x[, has.OHLC(x, 1)]) NULL } `is.OHLC` <- function(x) { all(has.Op(x),has.Hi(x),has.Lo(x),has.Cl(x)) } `has.OHLC` <- function(x,which=FALSE) { if(which) { c(has.Op(x,1),has.Hi(x,1),has.Lo(x,1),has.Cl(x,1)) } else { c(has.Op(x),has.Hi(x),has.Lo(x),has.Cl(x)) } } `HLC` <- function (x) { if (is.HLC(x)) return(x[, has.HLC(x, 1)]) NULL } `is.HLC` <- function(x) { all(has.Hi(x),has.Lo(x),has.Cl(x)) } `has.HLC` <- function(x,which=FALSE) { if(which) { c(has.Hi(x,1),has.Lo(x,1),has.Cl(x,1)) } else { c(has.Hi(x),has.Lo(x),has.Cl(x)) } } `Op` <- function(x) { if(has.Op(x)) return(x[,grep('Open',colnames(x),ignore.case=TRUE)]) NULL } `has.Op` <- function(x,which=FALSE) { loc <- grep('Open',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) } `Hi` <- function(x) { if(has.Hi(x)) return(x[,grep('High',colnames(x),ignore.case=TRUE)]) NULL } `has.Hi` <- function(x,which=FALSE) { loc <- grep('High',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) } `Lo` <- function(x) { if(has.Lo(x)) return(x[,grep('Low',colnames(x),ignore.case=TRUE)]) NULL } `has.Lo` <- function(x,which=FALSE) { loc <- grep('Low',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) } `Cl` <- function(x) { if(has.Cl(x)) return(x[,grep('Close',colnames(x),ignore.case=TRUE)]) NULL } `has.Cl` <- function(x,which=FALSE) { loc <- grep('Close',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) } `Vo` <- function(x) { #vo <- grep('Volume',colnames(x)) #if(!identical(vo,integer(0))) if(has.Vo(x)) return(x[,grep('Volume',colnames(x),ignore.case=TRUE)]) NULL } `has.Vo` <- function(x,which=FALSE) { loc <- grep('Volume',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) } `Ad` <- function(x) { if(has.Ad(x)) return(x[,grep('Adjusted',colnames(x),ignore.case=TRUE)]) NULL } `has.Ad` <- function(x,which=FALSE) { loc <- grep('Adjusted',colnames(x),ignore.case=TRUE) if(!identical(loc,integer(0))) return(ifelse(which,loc,TRUE)) ifelse(which,loc,FALSE) }
/scratch/gouwar.j/cran-all/cranData/xts/R/OHLC.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `Ops.xts` <- function(e1, e2) { # determine and output class # use 'e1' first because e2 is missing for unary +/-/! if(inherits(e1, "xts")) { # e1 could be a derived class; use its class for output # NOTE: we want the output to be an xts object even if e2 is a derived # class, because Ops.xts() might not create an appropriate derived class # object out_class <- class(e1) } else { # if 'e1' isn't xts, then e2 must be xts or a derived class, otherwise # this method wouldn't have been called out_class <- class(e2) } e <- if (missing(e2)) { .Class <- "matrix" NextMethod(.Generic) } else if (any(nchar(.Method) == 0)) { .Class <- "matrix" NextMethod(.Generic) } else { if( NROW(e1)==NROW(e2) && identical(.index(e1),.index(e2)) ) { .Class <- "matrix" NextMethod(.Generic) } else { tmp.e1 <- merge.xts(e1, e2, all=FALSE, retclass=FALSE, retside=c(TRUE,FALSE), check.names=FALSE) e2 <- merge.xts(e2, e1, all=FALSE, retclass=FALSE, retside=c(TRUE,FALSE), check.names=FALSE) e1 <- tmp.e1 .Class <- "matrix" NextMethod(.Generic) } } # These return an object the same class as input(s); others return a logical object if(.Generic %in% c("+","-","*","/","^","%%","%/%")) { e <- .Call(C_add_class, e, out_class) } if(length(e)==0) { if(is.xts(e1)) { idx <- .index(e1) } else { idx <- .index(e2) } idx[] <- idx[0] attr(e,'index') <- idx } dn <- dimnames(e) if(!is.null(dn[[1L]])) { if(is.null(dn[[2L]])) { attr(e, "dimnames") <- NULL } else { dimnames(e) <- list(NULL, dn[[2L]]) } } if(is.null(attr(e,'index'))) { if(is.xts(e1)) { e <- .xts(e, .index(e1), tclass(e1), tzone(e1), tformat = tformat(e1)) } else if(is.xts(e2)) { e <- .xts(e, .index(e2), tclass(e2), tzone(e2), tformat = tformat(e2)) } else { # neither have class = ('xts', 'zoo'), because they were overwritten # by the result of merge(..., retclass = FALSE). But they still have # an 'index' attribute. ix <- .index(e1) if (is.null(ix)) { ix <- .index(e2) } e <- .xts(e, ix, tclass(ix), tzone(ix), tformat = tformat(ix)) } if(is.null(dim(e1)) && is.null(dim(e2))) dim(e) <- NULL } attr(e, "names") <- NULL e }
/scratch/gouwar.j/cran-all/cranData/xts/R/Ops.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. as.xts.POSIXt <- function(x, ...) { xts(NULL, order.by=x) }
/scratch/gouwar.j/cran-all/cranData/xts/R/POSIX.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. adj.time <- function(x, ...) { tr <- match.call(expand.dots=FALSE)$... if(length(tr) < 1) return(x) oClass <- class(x) x <- as.POSIXlt(x) ntime <- as.environment(unclass(x)) lapply(tr, function(T) { assign(all.vars(T), with(x, eval(T)), envir=ntime) }) x <- structure(list( sec=ntime$sec, min=ntime$min, hour=ntime$hour, mday=ntime$mday, month=ntime$mon, year=ntime$year, wday=ntime$wday, yday=ntime$yday,isdst=ntime$isdst), tzone=attr(x,"tzone"), class=c("POSIXlt","POSIXt")) do.call(paste('as',oClass[1],sep='.'), list(x)) }
/scratch/gouwar.j/cran-all/cranData/xts/R/adj.time.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. align.time <- function(x, ...) { UseMethod("align.time") } align.time.xts <- function(x, n=60, ...) { if(n <= 0) stop("'n' must be positive") .xts(x, .index(x) + (n-.index(x) %% n), tzone=tzone(x), tclass=tclass(x)) } align.time.POSIXct <- function(x, n=60, ...) { if(n <= 0) stop("'n' must be positive") structure(unclass(x) + (n - unclass(x) %% n),class=c("POSIXct","POSIXt")) } align.time.POSIXlt <- function(x, n=60, ...) { if(n <= 0) stop("'n' must be positive") as.POSIXlt(align.time(as.POSIXct(x),n=n,...)) } shift.time <- function(x, n=60, ...) { UseMethod("shift.time") } shift.time.xts <- function(x, n=60, ...) { .xts(x, .index(x) + n, tzone=tzone(x), tclass=tclass(x)) } is.index.unique <- is.time.unique <- function(x) { UseMethod("is.time.unique") } is.time.unique.xts <- function(x) { isOrdered(.index(x), strictly=TRUE) } is.time.unique.zoo <- function(x) { isOrdered(index(x), strictly=TRUE) } make.index.unique <- make.time.unique <- function(x, eps=0.000001, drop=FALSE, fromLast=FALSE, ...) { UseMethod("make.index.unique") } make.index.unique.xts <- function(x, eps=0.000001, drop=FALSE, fromLast=FALSE, ...) { if( !drop) { .Call(C_make_index_unique, x, eps) } else { x[.Call(C_non_duplicates, .index(x), fromLast)] } } make.index.unique.numeric <- function(x, eps=0.000001, drop=FALSE, fromLast=FALSE, ...) { if( !drop) { .Call(C_make_unique, x, eps) } else { x[.Call(C_non_duplicates, x, fromLast)] } } make.index.unique.POSIXct <- function(x, eps=0.000001, drop=FALSE, fromLast=FALSE, ...) { if( !drop) { .Call(C_make_unique, x, eps) } else { x[.Call(C_non_duplicates, x, fromLast)] } }
/scratch/gouwar.j/cran-all/cranData/xts/R/align.time.R
# # xts: eXtensible time-series # # Copyright (C) 2019 Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. all.equal.xts <- function(target, current, ..., check.attributes = TRUE) { if (isTRUE(check.attributes)) { # Remove potential index attributes on the objects attrNames <- c(".indexCLASS", ".indexTZ", "tclass", "tzone") for (aname in attrNames) { attr(target, aname) <- NULL attr(current, aname) <- NULL } # Order the object attributes a <- attributes(target) attributes(target) <- a[sort(names(a))] a <- attributes(current) attributes(current) <- a[sort(names(a))] # Order the index attributes a <- attributes(.index(target)) attributes(.index(target)) <- a[sort(names(a))] a <- attributes(.index(current)) attributes(.index(current)) <- a[sort(names(a))] } NextMethod("all.equal") }
/scratch/gouwar.j/cran-all/cranData/xts/R/all.equal.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. as.environment.xts <- function(x) { e <- new.env() lapply(1:NCOL(x), function(.) assign(colnames(x)[.], x[,.],envir=e)) e }
/scratch/gouwar.j/cran-all/cranData/xts/R/as.environment.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. as.numeric.xts <- function(x, drop=TRUE, ...) { if(drop) return(as.numeric(coredata(x))) .xts(matrix(as.numeric(coredata(x)),ncol=NCOL(x)), .index(x)) } as.xts.numeric <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...) { # jmu if(missing(order.by)) { if(is.null(names(x))) stop("order.by must be either 'names()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(names(x))) } xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='numeric', ...) return(xx) } re.numeric <- function(x,...) { if( !is.null(dim(x))) return(as.matrix(x)) # jmu y <- as.numeric(x,...) names(y) <- index(x) return(y) } as.integer.xts <- function(x, drop=TRUE, ...) { if(drop) return(as.integer(coredata(x))) .xts(matrix(as.integer(coredata(x)),ncol=NCOL(x)), .index(x)) } as.xts.integer <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...) { # jmu if(missing(order.by)) { if(is.null(names(x))) stop("order.by must be either 'names()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(names(x))) } xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='integer', ...) return(xx) } re.integer <- function(x,...) { if( !is.null(dim(x))) return(as.matrix(x)) # jmu y <- as.integer(x,...) names(y) <- index(x) return(y) } as.double.xts <- function(x, drop=TRUE, ...) { if(drop) return(as.double(coredata(x))) .xts(matrix(as.double(coredata(x)),ncol=NCOL(x)), .index(x)) } as.xts.double <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...) { # jmu if(missing(order.by)) { if(is.null(names(x))) stop("order.by must be either 'names()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(names(x))) } xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='double', ...) return(xx) } re.double <- function(x,...) { if( !is.null(dim(x))) return(as.matrix(x)) # jmu y <- as.double(x,...) names(y) <- index(x) return(y) } as.complex.xts <- function(x, drop=TRUE, ...) { if(drop) return(as.complex(coredata(x))) .xts(matrix(as.complex(coredata(x)),ncol=NCOL(x)), .index(x)) } as.xts.complex <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...) { # jmu if(missing(order.by)) { if(is.null(names(x))) stop("order.by must be either 'names()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(names(x))) } xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='complex', ...) return(xx) } re.complex <- function(x,...) { if( !is.null(dim(x))) return(as.matrix(x)) # jmu y <- as.complex(x,...) names(y) <- index(x) return(y) } as.logical.xts <- function(x, drop=TRUE, ...) { if(drop) return(as.logical(coredata(x))) .xts(matrix(as.logical(coredata(x)),ncol=NCOL(x)), .index(x)) } as.xts.logical <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...) { # jmu if(missing(order.by)) { if(is.null(names(x))) stop("order.by must be either 'names()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(names(x))) } xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='logical', ...) return(xx) } re.logical <- function(x,...) { if( !is.null(dim(x))) return(as.matrix(x)) # jmu y <- as.logical(x,...) names(y) <- index(x) return(y) }
/scratch/gouwar.j/cran-all/cranData/xts/R/as.numeric.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. axTicksByTime <- function(x, ticks.on = "auto", k = 1, labels = TRUE, format.labels = TRUE, ends = TRUE, gt = 2, lt = 30) { # if a vector of times/dates, convert to dummy xts object if (timeBased(x)) { x <- xts(rep(1, length(x)), x) } ticks.on <- ticks.on[1L] # special-case for "secs" and "mins" if (ticks.on == "secs" || ticks.on == "mins") { ticks.on <- substr(ticks.on, 1L, 3L) } tick.opts <- c("years", "quarters", "months", "weeks", "days", "hours", "minutes", "seconds") ticks.on <- match.arg(ticks.on, c("auto", tick.opts)) if (ticks.on == "auto") { tick.k.opts <- c(10, 5, 2, 1, 3, 6, 1, 1, 1, 4, 2, 1, 30, 15, 1, 1) tick.opts <- rep(tick.opts, c(4, 1, 2, 1, 1, 3, 3, 1)) is <- structure(rep(0, length(tick.opts)), .Names = tick.opts) for (i in 1:length(tick.opts)) { ep <- endpoints(x, tick.opts[i], tick.k.opts[i]) is[i] <- length(ep) - 1 if (is[i] > lt) { break } } loc <- rev(which(is > gt & is < lt))[1L] cl <- tick.opts[loc] ck <- tick.k.opts[loc] } else { cl <- ticks.on[1L] ck <- k } if (is.null(cl) || is.na(cl) || is.na(ck)) { ep <- c(0, NROW(x)) } else { ep <- endpoints(x, cl, ck) } if (ends) { ep <- ep + c(rep(1, length(ep) - 1), 0) } if (labels) { if (is.logical(format.labels) || is.character(format.labels)) { # format by platform... unix <- (.Platform$OS.type == "unix") # ...and level of time detail fmt <- switch(periodicity(x)$scale, weekly = , daily = if (unix) "%b %d%n%Y" else "%b %d %Y", minute = , hourly = if (unix) "%b %d%n%H:%M" else "%b %d %H:%M", seconds = if (unix) "%b %d%n%H:%M:%S" else "%b %d %H:%M:%S", if (unix) "%n%b%n%Y" else "%b %Y") # special case yearqtr index if (inherits(index(x), "yearqtr")) { fmt <- "%Y-Q%q" } if (is.character(format.labels)) { fmt <- format.labels } names(ep) <- format(index(x)[ep], fmt) } else { names(ep) <- as.character(index(x)[ep]) } } ep }
/scratch/gouwar.j/cran-all/cranData/xts/R/axTicksByTime.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. cbind.xts <- function(..., all=TRUE, fill=NA, suffixes=NULL) { # mc <- match.call(call=sys.call(sys.parent())) # mc[[1]] <- as.name("merge.xts") # eval(mc) merge.xts(..., all=all, fill=fill, suffixes=suffixes) } # # convert the call to a list to better manipulate it # mc <- as.list(match.call(call=sys.call(-1))) # # # remove deparse.level arg if called via cbind 'generic' # if(as.character(mc[[1]]) == "cbind") # mc <- mc[-2] # # # check if any default args are missing from the call, # # and add them to the call with the cbind defaults # if(missing(all)) mc <- c(mc,all=all) # if(missing(fill)) mc <- c(mc,fill=fill) # if(missing(suffixes)) mc <- c(mc,suffixes=suffixes) # # # replace the call to cbind.xts with merge.xts # mc[[1]] <- as.name('merge.xts') # # # convert the list into a call and evaluate it # mc <- as.call(mc) # eval(mc) #} # sc <- sys.call(sys.parent()) # mc <- gsub('cbind|cbind.xts','merge.xts',deparse(match.call(call=sc))) # return(eval(parse(text=mc))) # dots <- mc$... # length.args <- sum(.External("number_of_cols",...,PACKAGE="xts")) # if(is.null(suffixes)) # suffixes <- all.vars(match.call(call=sc), unique=FALSE)[1:length.args] # # if( length(suffixes) != length.args ) { # warning("length of suffixes and does not match number of merged objects") # suffixes <- rep(suffixes, length.out=length.args) # } # # merge.xts(..., all=all, fill=fill, suffixes=suffixes) # # dat <- list(...) # x <- dat[[1]]; dat <- dat[-1] # while( length(dat) > 0 ) { # y <- dat[[1]] # if( length(dat) > 0 ) # dat <- dat[-1] # x <- merge.xts(x, y, all=TRUE, fill=NA, suffixes=NULL, retclass="xts") # } # x #} `c.xts` <- function(...) { .External(C_rbindXts, dup=FALSE, ...) } rbind.xts <- function(..., deparse.level=1) { .External(C_rbindXts, dup=FALSE, ...) } `.rbind.xts` <- function(..., deparse.level=1) { dots <- list(...) if(length(dots) < 2) return(dots[[1]]) x <- dots[[1]] dots <- dots[-1] while( length(dots) > 0 ) { y <- dots[[1]] if( length(dots) > 0) dots <- dots[-1] if(!is.null(colnames(y)) && colnames(x) != colnames(y)) warning('column names differ') x <- .Call(C_do_rbind_xts,x,y,FALSE) } return(x) }
/scratch/gouwar.j/cran-all/cranData/xts/R/bind.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. coredata.xts <- function(x, fmt=FALSE, ...) { x.attr <- attributes(x) if(is.character(fmt)) { tformat(x) <- fmt fmt <- TRUE } if(length(x) > 0 && fmt) { if(!is.null(tformat(x))) { x.attr$dimnames <- list(format(index(x), format=tformat(x)), dimnames(x)[[2]]) tformat(x) <- NULL # remove before printing } else { x.attr$dimnames <- list(format(index(x)),dimnames(x)[[2]]) } #attributes not to be kept original.attr <- x.attr[!names(x.attr) %in% c('dim','dimnames')] if(is.null(dim(x))) { xx <- structure(coredata(x), names=x.attr$dimnames[[1]]) } else { xx <- structure(coredata(x), dim=dim(x), dimnames=x.attr$dimnames) } for(i in names(original.attr)) { attr(xx,i) <- NULL } return(xx) } if(length(x) == 0) { xx <- NextMethod(x) attr(xx, ".indexCLASS") <- NULL attr(xx, "tclass") <- NULL # Remove tz attrs (object created before 0.10-3) attr(xx, ".indexTZ") <- NULL attr(xx, "tzone") <- NULL return(xx) } else return(.Call(C_coredata_xts, x)) } `xcoredata.default` <- function(x,...) { x.attr <- attributes(x) original.attr <- x.attr[!names(x.attr) %in% c('dim','dimnames')] original.attr } `xcoredata` <- function(x,...) { UseMethod('xcoredata') } `xcoredata<-` <- function(x,value) { UseMethod('xcoredata<-') } `xcoredata<-.default` <- function(x,value) { if(is.null(value)) { return(coredata(x)) } else { for(att in names(value)) { if(!att %in% c('dim','dimnames')) attr(x,att) <- value[[att]] } return(x) } } `xtsAttributes` <- function(x, user=NULL) { # get all additional attributes not standard to xts object #stopifnot(is.xts(x)) rm.attr <- c('dim','dimnames','index','class','names') x.attr <- attributes(x) if(is.null(user)) { # Both xts and user attributes rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT', '.indexTZ', 'tzone', 'tclass') xa <- x.attr[!names(x.attr) %in% rm.attr] } else if(user) { # Only user attributes rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT','.indexTZ','tzone','tclass', x.attr$.CLASSnames) xa <- x.attr[!names(x.attr) %in% rm.attr] } else { # Only xts attributes xa <- x.attr[names(x.attr) %in% x.attr$.CLASSnames] } if(length(xa) == 0) return(NULL) xa } `xtsAttributes<-` <- function(x,value) { UseMethod('xtsAttributes<-') } `xtsAttributes<-.xts` <- function(x,value) { if(is.null(value)) { for(nm in names(xtsAttributes(x))) { attr(x,nm) <- NULL } } else for(nv in names(value)) { if(!nv %in% c('dim','dimnames','index','class','.CLASS','.ROWNAMES','.CLASSnames')) attr(x,nv) <- value[[nv]] } # Remove tz attrs (object created before 0.10-3) attr(x, ".indexTZ") <- NULL attr(x, "tzone") <- NULL # Remove index class attrs (object created before 0.10-3) attr(x, ".indexCLASS") <- NULL attr(x, "tclass") <- NULL # Remove index format attr (object created before 0.10-3) attr(x, ".indexFORMAT") <- NULL x }
/scratch/gouwar.j/cran-all/cranData/xts/R/coredata.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # functions to handle data.frame <--> xts conversions `re.data.frame` <- function(x,...) { data.frame(x,...) } `as.xts.data.frame` <- function(x, order.by, dateFormat = "POSIXct", frequency = NULL, ..., .RECLASS = FALSE) { # Should allow 'order.by' to be a vector of dates or a scaler # representing the column number to use. if(missing(order.by)) { order_by_ <- try({ coerce.rownames <- paste("as", dateFormat, sep = ".") do.call(coerce.rownames, list(rownames(x))) }, silent = TRUE) if(inherits(order_by_, "try-error")) { # parsing row names failed, so look for a time-based column time.based.col <- vapply(x, is.timeBased, logical(1)) if(any(time.based.col)) { # use the first time-based column which.col <- which.max(time.based.col) order_by_ <- x[[which.col]] x <- x[, -which.col, drop = FALSE] } else { stop("could not convert row names to a date-time and could not find a time-based column") } } } else { order_by_ <- order.by } if(.RECLASS) { xx <- xts(x, order.by=order_by_, frequency=frequency, .CLASS='data.frame', ...) } else { xx <- xts(x, order.by=order_by_, frequency=frequency, ...) } xx } `as.data.frame.xts` <- function(x,row.names=NULL,optional=FALSE,...) { if(missing(row.names)) row.names <- as.character(index(x)) as.data.frame(coredata(x),row.names,optional,...) }
/scratch/gouwar.j/cran-all/cranData/xts/R/data.frame.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # dimnames will return the actual dimnames of the xts object # dimnames<-.xts will force the rownames to always be NULL `dimnames.xts` <- function(x) { #list(NULL, colnames(unclass(x))) .Call(C_dimnames_zoo,x); #list(as.character(index(x)), colnames(unclass(x))) } `dimnames<-.xts` <- function(x, value) { .Call(C_xts_set_dimnames, x, value) }
/scratch/gouwar.j/cran-all/cranData/xts/R/dimnames.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. endpoints <- function(x,on='months',k=1) { if(k < 1) { stop("'k' must be > 0") } if(timeBased(x)) { NR <- length(x) x <- xts(, order.by=x) } else NR <- NROW(x) addlast <- TRUE # remove automatic NR last value if(!is.xts(x)) x <- try.xts(x, error='must be either xts-coercible or timeBased') # special-case "secs" and "mins" for back-compatibility if(on == "secs" || on == "mins") on <- substr(on, 1L, 3L) on <- match.arg(on, c("years", "quarters", "months", "weeks", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "ms", "us")) # posixltindex is costly in memory (9x length of time) # make sure we really need it if(on %in% c('years','quarters','months','weeks','days')) posixltindex <- as.POSIXlt(.POSIXct(.index(x)),tz=tzone(x)) include_last <- function(x, k) { len <- length(x) i <- seq(1L ,len, k) if(i[length(i)] != len) { i <- c(i, len) } ep[i] } switch(on, "years" = { as.integer(c(0, which(diff(posixltindex$year %/% k + 1) != 0), NR)) }, "quarters" = { ixyear <- posixltindex$year * 100L + 190000L ixqtr <- ixyear + posixltindex$mon %/% 3L + 1L ep <- c(0L, which(diff(ixqtr) != 0L), NR) if(k > 1) { ep <- include_last(ep, k) } ep }, "months" = { ixmon <- posixltindex$year * 100L + 190000L + posixltindex$mon ep <- .Call(C_endpoints, ixmon, 1L, 1L, addlast) if(k > 1) { ep <- include_last(ep, k) } ep }, "weeks" = { .Call(C_endpoints, .index(x)+3L*86400L, 604800L, k, addlast) }, "days" = { ixyday <- posixltindex$year * 1000L + 1900000L + posixltindex$yday .Call(C_endpoints, ixyday, 1L, k, addlast) }, # non-date slicing should be indifferent to TZ and DST, so use math instead "hours" = { .Call(C_endpoints, .index(x), 3600L, k, addlast) }, "minutes" = { .Call(C_endpoints, .index(x), 60L, k, addlast) }, "seconds" = { .Call(C_endpoints, .index(x), 1L, k, addlast) }, "ms" = , "milliseconds" = { sec2ms <- .index(x) * 1e3 .Call(C_endpoints, sec2ms, 1L, k, addlast) }, "us" = , "microseconds" = { sec2us <- .index(x) * 1e6 .Call(C_endpoints, sec2us, 1L, k, addlast) } ) } `startof` <- function(x,by='months', k=1) { ep <- endpoints(x,on=by, k=k) (ep+1)[-length(ep)] } `endof` <- function(x,by='months', k=1) { endpoints(x,on=by, k=k)[-1] } `firstof` <- function(year=1970,month=1,day=1,hour=0,min=0,sec=0,tz="") { ISOdatetime(year,month,day,hour,min,sec,tz) } lastof <- function (year = 1970, month = 12, day = 31, hour = 23, min = 59, sec = 59, subsec=.99999, tz = "") { if(!missing(sec) && sec %% 1 != 0) subsec <- 0 sec <- ifelse(year < 1970, sec, sec+subsec) # <1970 asPOSIXct bug workaround #sec <- sec + subsec mon.lengths <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) if (missing(day)) { day <- ifelse(month %in% 2, ifelse(((year%%4 %in% 0 & !year%%100 %in% 0) | (year%%400 %in% 0)), 29, 28), mon.lengths[month]) } # strptime has an issue (bug?) which returns NA when passed # 1969-12-31-23-59-59; pass 58.9 secs instead. sysTZ <- Sys.getenv("TZ") if (length(c(year, month, day, hour, min, sec)) == 6 && all(c(year, month, day, hour, min, sec) == c(1969, 12, 31, 23, 59, 59)) && (sysTZ == "" || isUTC(sysTZ))) sec <- sec-1 ISOdatetime(year, month, day, hour, min, sec, tz) }
/scratch/gouwar.j/cran-all/cranData/xts/R/endpoints.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `fillIndex` <- function(x) { p <- periodicity(x) xx <- xts(matrix(rep(NA,NCOL(x)),nrow=1), seq(start(x),end(x),by=p$units)) xx[index(xx) %in% index(x)] <- x colnames(xx) <- colnames(x) xx }
/scratch/gouwar.j/cran-all/cranData/xts/R/fillIndex.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `first` <- function(x,...) { UseMethod("first") } `first.default` <- function(x,n=1,keep=FALSE,...) { if(length(x) == 0) return(x) if(is.character(n)) { xx <- try.xts(x, error=FALSE) if(is.xts(xx)) { xx <- first.xts(x, n=n, keep=keep, ...) return(reclass(xx)) } } if(is.null(dim(x))) { if(n > 0) { sub <- seq_len(min(n, length(x))) xx <- x[sub] if(keep) xx <- structure(xx,keep=x[(-(-n)+1):NROW(x)]) xx } else if(n < 0) { sub <- seq.int(to = length(x), length.out = max(length(x)-(-n), 0L)) xx <- x[sub] if(keep) xx <- structure(xx,keep=x[1:(-n)]) xx } else { xx <- x[0] if(keep) xx <- structure(xx,keep=x[0]) xx } } else { if(n > 0) { sub <- seq_len(min(n, NROW(x))) xx <- x[sub,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[(-(-n)+1):NROW(x),]) xx } else if(n < 0) { sub <- seq.int(to = NROW(x), length.out = max(NROW(x)-(-n), 0L)) xx <- x[sub,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[1:(-n),]) xx } else { xx <- x[0,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[0,]) xx } } } `first.xts` <- function(x,n=1,keep=FALSE,...) { if(length(x) == 0) return(x) if(is.character(n)) { # n period set np <- strsplit(n," ",fixed=TRUE)[[1]] if(length(np) > 2 || length(np) < 1) stop(paste("incorrectly specified",sQuote("n"),sep=" ")) # series periodicity sp <- periodicity(x) # requested periodicity$units sp.units <- sp[["units"]] rpu <- np[length(np)] rpf <- ifelse(length(np) > 1, as.numeric(np[1]), 1) if(rpu == sp.units) { n <- rpf } else { # if singular - add an s to make it work if(substr(rpu,length(strsplit(rpu,'')[[1]]),length(strsplit(rpu,'')[[1]])) != 's') rpu <- paste(rpu,'s',sep='') u.list <- list(secs=4,seconds=4,mins=3,minutes=3,hours=2,days=1, weeks=1,months=1,quarters=1,years=1) dt.options <- c('seconds','secs','minutes','mins','hours','days', 'weeks','months','quarters','years') if(!rpu %in% dt.options) stop(paste("n must be numeric or use",paste(dt.options,collapse=','))) dt <- dt.options[pmatch(rpu,dt.options)] if(u.list[[dt]] > u.list[[sp.units]]) { # req is for higher freq data period e.g. 100 mins of daily data stop(paste("At present, without some sort of magic, it isn't possible", "to resolve",rpu,"from",sp$scale,"data")) } ep <- endpoints(x,dt) if(rpf > length(ep)-1) { rpf <- length(ep)-1 warning("requested length is greater than original") } if(rpf > 0) { n <- ep[rpf+1] if(is.null(dim(x))) { xx <- x[1:n] } else { xx <- x[1:n,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[(ep[-(-rpf)+1]+1):NROW(x)]) return(xx) } else if(rpf < 0) { n <- ep[-rpf+1]+1 if(is.null(dim(x))) { xx <- x[n:NROW(x)] } else { xx <- x[n:NROW(x),,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[1:(ep[-rpf+1])]) return(xx) } else { if(is.null(dim(x))) { xx <- x[0] } else { xx <- x[0,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[0]) return(xx) } } } if(length(n) != 1) stop("n must be of length 1") if(n > 0) { n <- min(n, NROW(x)) if(is.null(dim(x))) { xx <- x[1:n] } else { xx <- x[1:n,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[(-(-n)+1):NROW(x)]) xx } else if(n < 0) { if(abs(n) >= NROW(x)) return(x[0]) if(is.null(dim(x))) { xx <- x[(-n+1):NROW(x)] } else { xx <- x[(-n+1):NROW(x),,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[1:(-n)]) xx } else { if(is.null(dim(x))) { xx <- x[0] } else { xx <- x[0,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[0]) xx } }
/scratch/gouwar.j/cran-all/cranData/xts/R/first.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. index.xts <- time.xts <- function(x, ...) { value <- tclass(x) if(is.null(value) || !nzchar(value[1L])) { warning("index does not have a ", sQuote("tclass"), " attribute\n", " returning c(\"POSIXct\", \"POSIXt\")") ix <- .index(x) attr(ix, "tclass") <- attr(ix, "class") <- c("POSIXct", "POSIXt") return(ix) } # if tclass is Date, POSIXct time is set to 00:00:00 GMT. Convert here # to avoid ugly and hard to debug TZ conversion. What will this break? if(value[[1]] == "Date") #return( as.Date(.index(x)/86400) ) return( structure(.index(x) %/% 86400, class="Date")) #x.index <- structure(.index(x), class=c("POSIXct","POSIXt")) x.index <- .POSIXct(.index(x), tz=attr(.index(x), "tzone")) if(!is.list(value)) value <- as.list(value) switch(value[[1]], multitime = as.Date(as.character(x.index)), POSIXt = { # get specific ct/lt value do.call(paste('as',value[[2]],sep='.'),list(x.index)) }, POSIXct = as.POSIXct(x.index), POSIXlt = as.POSIXlt(x.index), timeDate = { if(!requireNamespace("timeDate", quietly=TRUE)) stop("package:",dQuote("timeDate"),"cannot be loaded.") timeDate::as.timeDate(x.index) }, chron = , dates = { if(!requireNamespace("chron", quietly=TRUE)) stop("package:",dQuote("chron"),"cannot be loaded.") chron::as.chron(format(x.index)) }, #Date = as.Date(as.character(x.index)), # handled above yearmon = as.yearmon(x.index), yearqtr = as.yearqtr(x.index), stop(paste('unsupported',sQuote('tclass'),'indexing type:',value[[1]])) ) } `time<-.xts` <- `index<-.xts` <- function(x, value) { if(length(index(x)) != length(value)) stop('length of index vectors does not match') if( !timeBased(value) ) stop(paste('unsupported',sQuote('index'), 'index type of class',sQuote(class(value)))) # copy original index attributes ixattr <- attributes(attr(x, 'index')) # set index to the numeric value of the desired index class if(inherits(value,"Date")) attr(x, 'index') <- structure(unclass(value)*86400, tclass="Date", tzone="UTC") else attr(x, 'index') <- as.numeric(as.POSIXct(value)) # ensure new index is sorted if(!isOrdered(.index(x), strictly=FALSE)) stop("new index needs to be sorted") # set tclass attribute to the end-user specified class attr(attr(x, 'index'), 'tclass') <- class(value) # set tzone attribute if(isClassWithoutTZ(object = value)) { attr(attr(x, 'index'), 'tzone') <- 'UTC' } else { if (is.null(attr(value, 'tzone'))) { # ensure index has tzone attribute if value does not attr(attr(x, 'index'), 'tzone') <- ixattr[["tzone"]] } else { attr(attr(x, 'index'), 'tzone') <- attr(value, 'tzone') } } return(x) } `.index` <- function(x, ...) { if(is.list(attr(x, "index"))) { attr(x, 'index')[[1]] } else attr(x, "index") } `.index<-` <- function(x, value) { if(timeBased(value)) { if(inherits(value, 'Date')) { attr(x, 'index') <- as.numeric(value) } else { attr(x, 'index') <- as.numeric(as.POSIXct(value)) } } else if(is.numeric(value)) { attr(value, 'tclass') <- tclass(x) attr(value, 'tzone') <- tzone(x) attr(x, 'index') <- value } else stop(".index is used for low level operations - data must be numeric or timeBased") return(x) } `.indexsec` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$sec } `.indexmin` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$min } `.indexhour` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$hour } `.indexmday` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$mday } `.indexmon` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$mon } `.indexyear` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$year } `.indexwday` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$wday } `.indexbday` <- function(x) { # is business day T/F as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$wday %% 6 > 0 } `.indexyday` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$yday } `.indexisdst` <- function(x) { as.POSIXlt(.POSIXct(.index(x), tz=tzone(x)))$isdst } `.indexDate` <- `.indexday` <- function(x) { .index(x) %/% 86400L } `.indexweek` <- function(x) { (.index(x) + (3 * 86400)) %/% 86400 %/% 7 } `.indexyweek` <- function(x) { ((.index(x) + (3 * 86400)) %/% 86400 %/% 7) - ((startOfYear() * 86400 + (3 * 86400)) %/% 86400 %/% 7)[.indexyear(x) + 1] } .update_index_attributes <- function(x) { suppressWarnings({ tclass(x) <- tclass(x) tzone(x) <- tzone(x) }) return(x) }
/scratch/gouwar.j/cran-all/cranData/xts/R/index.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # methods for tseries::irts `re.irts` <- function(x,...) { if(!requireNamespace('tseries', quietly=TRUE)) { irts <- function(...) message("package 'tseries' is required for re.irts") } else { irts <- tseries::irts } tclass(x) <- "POSIXct" xx <- coredata(x) # rownames(xx) <- attr(x,'irts.rownames') irts(index(x),xx) } `as.xts.irts` <- function(x,order.by,frequency=NULL,...,.RECLASS=FALSE) { if(.RECLASS) { xx <- xts(x=x$value, order.by=x$time, frequency=frequency, .CLASS='irts', # irts.rownames=rownames(x$value), ...) } else { xx <- xts(x=x$value, order.by=x$time, frequency=frequency, ...) } xx }
/scratch/gouwar.j/cran-all/cranData/xts/R/irts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `isOrdered` <- function(x, increasing=TRUE, strictly=TRUE) { # x must be of type double or integer. Checked in the C code. if(is.character(x)) stop('character ordering unsupported') if(!is.numeric(x)) x = as.numeric(x) .Call(C_do_is_ordered, x = x, increasing = as.logical(increasing), strictly = as.logical(strictly)) }
/scratch/gouwar.j/cran-all/cranData/xts/R/isOrdered.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `Lag.xts` <- function(x, k=1, na.action=na.pass, ...) { x <- try.xts(x, error=FALSE) if(!is.xts(x)) x <- as.matrix(x) xx <-sapply(k, function(k) { apply(x, 2, function(x) { if(k==0) return(as.matrix(x)) as.matrix(c(rep(NA, k), x[-((length(x) - k + 1):length(x))])) } )} ) xx <- matrix(as.numeric(xx),nrow=NROW(x)) colnames(xx) <- c(paste(colnames(x)[(rep(1:NCOL(x),length(k)))], 'lag', rep(k, each=NCOL(x)), sep = ".")) as.function(na.action)(reclass(xx,x)) } `Next.xts` <- function(x, k=1, na.action=na.pass, ...) { x <- try.xts(x, error=FALSE) if(!is.xts(x)) x <- as.matrix(x) xx <-sapply(k, function(k) { apply(x, 2, function(x) { if(k==0) return(as.matrix(x)) as.matrix(c(x[-(1:k)],rep(NA, k))) } )} ) xx <- matrix(as.numeric(xx),nrow=NROW(x)) colnames(xx) <- c(paste(colnames(x)[(rep(1:NCOL(x),length(k)))], 'next', rep(k, each=NCOL(x)), sep = ".")) as.function(na.action)(reclass(xx,x)) } lag.xts <- function(x, k=1, na.pad=TRUE, ...) { zooCompat <- getOption('xts.compat.zoo.lag') if(is.logical(zooCompat) && zooCompat) { k <- -k if(missing(na.pad)) na.pad <- FALSE } if(length(k) > 1) { if(is.null(names(k))) names(k) <- paste("lag",k,sep="") return(do.call("merge.xts", lapply(k, lag.xts, x=x, na.pad=na.pad,...))) } .Call(C_lag_xts, x, k, na.pad) } lagts.xts <- function(x, k=1, na.pad=TRUE, ...) { if(length(k) > 1) { if(is.null(names(k))) names(k) <- paste("lag",k,sep="") return(do.call("merge.xts", lapply(k, lag.xts, x=x, na.pad=na.pad,...))) } .Call(C_lag_xts, x, k, na.pad) } diff.xts <- function(x, lag=1, differences=1, arithmetic=TRUE, log=FALSE, na.pad=TRUE, ...) { if(!is.integer(lag) && any(is.na(as.integer(lag)))) stop("'lag' must be integer") differences <- as.integer(differences[1L]) if(is.na(differences)) stop("'differences' must be integer") if(is.logical(x)) { x <- .xts(matrix(as.integer(x), ncol=NCOL(x)), .index(x), tclass(x), dimnames=dimnames(x)) } if(lag < 1 || differences < 1) stop("'diff.xts' defined only for positive lag and differences arguments") zooCompat <- getOption('xts.compat.zoo.lag') if(is.logical(zooCompat) && zooCompat) { # this has to negated to satisfy the test in lag.xts... oh my lag <- -lag if(missing(na.pad)) na.pad <- FALSE } if(differences > 1) { if(arithmetic && !log) { #log is FALSE or missing x <- x - lag.xts(x, k=lag, na.pad=na.pad) } else { if(log) { x <- log(x/lag.xts(x, k=lag, na.pad=na.pad)) } else x <- x/lag.xts(x, k=lag, na.pad=na.pad) } diff(x, lag, differences=differences-1, arithmetic=arithmetic, log=log, na.pad=na.pad, ...) } else { if(arithmetic && !log) { x - lag.xts(x, k=lag, na.pad=na.pad) } else { if(log) { log(x/lag.xts(x, k=lag, na.pad=na.pad)) } else x/lag.xts(x, k=lag, na.pad=na.pad) } } }
/scratch/gouwar.j/cran-all/cranData/xts/R/lag.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `last` <- function(x,...) { UseMethod("last") } `last.default` <- function(x,n=1,keep=FALSE,...) { if(length(x) == 0) return(x) if(is.character(n)) { xx <- try.xts(x, error=FALSE) if(is.xts(xx)) { xx <- last.xts(x, n=n, keep=keep, ...) return(reclass(xx)) } } if(is.null(dim(x))) { if(n > 0) { sub <- seq.int(to = length(x), length.out = min(n, length(x))) xx <- x[sub] if(keep) xx <- structure(xx,keep=x[1:(NROW(x)+(-n))]) xx } else if(n < 0) { sub <- seq_len(max(length(x) + n, 0L)) xx <- x[sub] if(keep) xx <- structure(xx,keep=x[((NROW(x)-(-n)+1):NROW(x))]) xx } else { xx <- x[0] if(keep) xx <- structure(xx,keep=x[0]) xx } } else { if(n > 0) { sub <- seq.int(to = NROW(x), length.out = min(n, NROW(x))) xx <- x[sub,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[1:(NROW(x)+(-n)),]) xx } else if(n < 0) { sub <- seq_len(max(NROW(x) + n, 0L)) xx <- x[sub,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[((NROW(x)-(-n)+1):NROW(x)),]) xx } else { xx <- x[0,,drop=FALSE] if(keep) xx <- structure(xx,keep=x[0,]) xx } } } `last.xts` <- function(x,n=1,keep=FALSE,...) { if(length(x) == 0) return(x) if(is.character(n)) { # n period set np <- strsplit(n," ",fixed=TRUE)[[1]] if(length(np) > 2 || length(np) < 1) stop(paste("incorrectly specified",sQuote("n"),sep=" ")) # series periodicity sp <- periodicity(x) sp.units <- sp[["units"]] # requested periodicity$units rpu <- np[length(np)] rpf <- ifelse(length(np) > 1, as.numeric(np[1]), 1) if(rpu == sp.units) { n <- rpf } else { # if singular - add an s to make it work if(substr(rpu,length(strsplit(rpu,'')[[1]]),length(strsplit(rpu,'')[[1]])) != 's') rpu <- paste(rpu,'s',sep='') u.list <- list(secs=4,seconds=4,mins=3,minutes=3,hours=2,days=1, weeks=1,months=1,quarters=1,years=1) dt.options <- c('seconds','secs','minutes','mins','hours','days', 'weeks','months','quarters','years') if(!rpu %in% dt.options) stop(paste("n must be numeric or use",paste(dt.options,collapse=','))) dt <- dt.options[pmatch(rpu,dt.options)] if(u.list[[dt]] > u.list[[sp.units]]) { # req is for higher freq data period e.g. 100 mins of daily data stop(paste("At present, without some sort of magic, it isn't possible", "to resolve",rpu,"from",sp$scale,"data")) } ep <- endpoints(x,dt) if(rpf > length(ep)-1) { rpf <- length(ep)-1 warning("requested length is greater than original") } if(rpf > 0) { n <- ep[length(ep)-rpf]+1 if(is.null(dim(x))) { xx <- x[n:NROW(x)] } else { xx <- x[n:NROW(x),,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[1:(ep[length(ep)+(-rpf)])]) return(xx) } else if(rpf < 0) { n <- ep[length(ep)+rpf] if(is.null(dim(x))) { xx <- x[1:n] } else { xx <- x[1:n,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[(ep[length(ep)-(-rpf)]+1):NROW(x)]) return(xx) } else { if(is.null(dim(x))) { xx <- x[0] } else { xx <- x[0,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[0]) return(xx) } } } if(length(n) != 1) stop("n must be of length 1") if(n > 0) { n <- min(n, NROW(x)) if(is.null(dim(x))) { xx <- x[(NROW(x)-n+1):NROW(x)] } else { xx <- x[(NROW(x)-n+1):NROW(x),,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[1:(NROW(x)+(-n))]) xx } else if(n < 0) { if(abs(n) >= NROW(x)) return(x[0]) if(is.null(dim(x))) { xx <- x[1:(NROW(x)+n)] } else { xx <- x[1:(NROW(x)+n),,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[((NROW(x)-(-n)+1):NROW(x))]) xx } else { if(is.null(dim(x))) { xx <- x[0] } else { xx <- x[0,,drop=FALSE] } if(keep) xx <- structure(xx,keep=x[0]) xx } }
/scratch/gouwar.j/cran-all/cranData/xts/R/last.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. as.list.xts <- function(x, ...) { if( NCOL(x) == 1 ) return(structure(list(x),.Names=colnames(x))) cindex <- cnames <- colnames(x) if(is.null(cnames)) { cindex <- 1:NCOL(x) cnames <- paste("x",cindex,sep=".") } names(cindex) <- cnames lapply(cindex, function(f) x[,f], ...) }
/scratch/gouwar.j/cran-all/cranData/xts/R/list.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # functions for matrix <--> xts conversions `as.matrix.xts` <- function(x, ...) { # This function follows the pattern of as.matrix.zoo() cd <- coredata(x) y <- as.matrix(cd, ...) if (length(cd) == 0) { dim(y) <- c(0, 0) } # colnames if (length(y) > 0) { cnx <- colnames(x) if (length(cnx) > 0) { colnames(y) <- cnx } else { cn <- deparse(substitute(x), width.cutoff = 100, nlines = 1) if (NCOL(x) == 1) { colnames(y) <- cn } else { colnames(y) <- paste(cn, 1:ncol(x), sep = ".") } } } else if (nrow(y) != length(.index(x))) { dim(y) <- c(length(.index(x)), 0) } # rownames if (!is.null(y) && nrow(y) > 0 && is.null(rownames(y))) { rownames(y) <- as.character(index(x)) } y } `re.matrix` <- function(x,...) { as.matrix(x,...) } `as.xts.matrix` <- function(x,order.by,dateFormat="POSIXct",frequency=NULL,...,.RECLASS=FALSE) { # Should allow 'order.by' to be a vector of dates or a scaler # representing the column number to use. if(missing(order.by)) { # The 'index' of zoo objects is set to 'rownames' when converted with 'as.matrix', # but it is of class 'Date', not 'POSIXct'... - jmu if(is.null(rownames(x))) stop("order.by must be either 'rownames()' or otherwise specified") else # added '...' args to allow for tz specification order.by <- do.call(paste('as',dateFormat,sep='.'),list(rownames(x))) } if(.RECLASS) { xx <- xts(x, order.by=order.by, frequency=frequency, .CLASS='matrix', ...) } else { xx <- xts(x, order.by=order.by, frequency=frequency, ...) } xx }
/scratch/gouwar.j/cran-all/cranData/xts/R/matrix.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. merge.xts <- function(..., all=TRUE, fill=NA, suffixes=NULL, join="outer", retside=TRUE, retclass="xts", tzone=NULL, drop=NULL, check.names=NULL) { if(is.null(check.names)) { check.names <- TRUE } if(is.logical(retclass) && !retclass) { setclass=FALSE } else setclass <- TRUE fill.fun <- NULL if(is.function(fill)) { fill.fun <- fill fill <- NA } # as.list(substitute(list(...))) # this is how zoo handles colnames - jar mc <- match.call(expand.dots=FALSE) dots <- mc$... if(is.null(suffixes)) { syms <- names(dots) if(is.null(syms)) { # Based on makeNames() in merge.zoo() syms <- substitute(alist(...))[-1L] nm <- names(syms) fixup <- if (is.null(nm)) seq_along(syms) else !nzchar(nm) dep <- sapply(syms[fixup], function(x) deparse(x, nlines = 1L)) if(is.null(nm)) { nm <- dep } else if(any(fixup)) { nm[fixup] <- dep } syms <- nm } else { have.symnames <- nzchar(syms) if(any(!have.symnames)) { syms[!have.symnames] <- as.character(dots[!have.symnames]) } } } else if(length(suffixes) != length(dots)) { warning("length of suffixes and does not match number of merged objects") syms <- as.character(dots) # should we ignore suffixes here? #suffixes <- NULL } else { syms <- as.character(suffixes) } .times <- .External(C_number_of_cols, ...) # moved call to make.names inside of mergeXts/do_merge_xts symnames <- rep(syms, .times) suffixes <- rep(suffixes, .times) if(length(dots) == 1) { # this is for compat with zoo; one object AND a name if(!is.null(names(dots))) { x <- list(...)[[1]] if(is.null(colnames(x))) colnames(x) <- symnames return(x) } } if( !missing(join) ) { # join logic applied to index: # inspired by: http://blogs.msdn.com/craigfr/archive/2006/08/03/687584.aspx # # (full) outer - all cases, equivelant to all=c(TRUE,TRUE) # left - all x, && y's that match x # right - all ,y && x's that match y # inner - only x and y where index(x)==index(y) all <- switch(pmatch(join,c("outer","left","right","inner")), c(TRUE, TRUE ), # outer c(TRUE, FALSE), # left c(FALSE, TRUE ), # right c(FALSE, FALSE) # inner ) if( length(dots) > 2 ) { all <- all[1] warning("'join' only applicable to two object merges") } } if( length(all) != 2 ) { if( length(all) > 2 ) warning("'all' must be of length two") all <- rep(all[1], 2) } if( length(dots) > 2 ) retside <- TRUE if( length(retside) != 2 ) retside <- rep(retside[1], 2) x <- .External(C_mergeXts, all=all[1:2], fill=fill, setclass=setclass, symnames=symnames, suffixes=suffixes, retside=retside, env=new.env(), tzone=tzone, check.names=check.names, ...) if(!is.logical(retclass) && retclass != 'xts') { asFun <- paste("as", retclass, sep=".") if(!exists(asFun)) { warning(paste("could not locate",asFun,"returning 'xts' object instead")) return(x) } xx <- try(do.call(asFun, list(x))) if(!inherits(xx,'try-error')) { return(xx) } } if(!is.null(fill.fun)) { fill.fun(x) } else return(x) }
/scratch/gouwar.j/cran-all/cranData/xts/R/merge.R
# # xts: eXtensible time-series # # Copyright (C) 2009-2015 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Ross Bennett and Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. modify.args <- function(formals, arglist, ..., dots=FALSE) { # modify.args function from quantstrat # avoid evaluating '...' to make things faster dots.names <- eval(substitute(alist(...))) if(missing(arglist)) arglist <- NULL arglist <- c(arglist, dots.names) # see 'S Programming' p. 67 for this matching # nothing to do if arglist is empty; return formals as a list if(!length(arglist)) return(as.list(formals)) argnames <- names(arglist) if(!is.list(arglist) && !is.null(argnames) && !any(argnames == "")) stop("'arglist' must be a *named* list, with no names == \"\"") .formals <- formals onames <- names(.formals) pm <- pmatch(argnames, onames, nomatch = 0L) #if(any(pm == 0L)) # message(paste("some arguments stored for", fun, "do not match")) names(arglist[pm > 0L]) <- onames[pm] .formals[pm] <- arglist[pm > 0L] # include all elements from arglist if function formals contain '...' if(dots && !is.null(.formals$...)) { dotnames <- names(arglist[pm == 0L]) .formals[dotnames] <- arglist[dotnames] #.formals$... <- NULL # should we assume we matched them all? } # return a list (not a pairlist) as.list(.formals) } # This is how it is used in quantstrat in applyIndicators() # # replace default function arguments with indicator$arguments # .formals <- formals(indicator$name) # .formals <- modify.args(.formals, indicator$arguments, dots=TRUE) # # now add arguments from parameters # .formals <- modify.args(.formals, parameters, dots=TRUE) # # now add dots # .formals <- modify.args(.formals, NULL, ..., dots=TRUE) # # remove ... to avoid matching multiple args # .formals$`...` <- NULL # # tmp_val <- do.call(indicator$name, .formals)
/scratch/gouwar.j/cran-all/cranData/xts/R/modify.args.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. na.omit.xts <- function(object, ...) { xx <- .Call(C_na_omit_xts, object) if(length(xx)==0) return(structure(xts(,),.Dim=c(0,NCOL(object)))) naa <- attr(xx,'na.action') if(length(naa) == 0) return(xx) naa.index <- .index(object)[naa] ROWNAMES <- attr(object,'.ROWNAMES') if(!is.null(ROWNAMES)) { naa.rownames <- ROWNAMES[naa] } else naa.rownames <- NULL attr(xx,'na.action') <- structure(naa, index=naa.index, .ROWNAMES=naa.rownames) return(xx) } na.exclude.xts <- function(object, ...) { xx <- .Call(C_na_omit_xts, object) naa <- attr(xx,'na.action') if(length(naa) == 0) return(xx) naa.index <- .index(object)[naa] ROWNAMES <- attr(object,'.ROWNAMES') if(!is.null(ROWNAMES)) { naa.rownames <- ROWNAMES[naa] } else naa.rownames <- NULL attr(xx,'na.action') <- structure(naa, class="exclude", index=naa.index, .ROWNAMES=naa.rownames) return(xx) } na.restore <- function(object, ...) { UseMethod("na.restore") } na.restore.xts <- function(object, ...) { if(is.null(na.action(object))) return(object) structure(merge(structure(object,na.action=NULL), .xts(,attr(na.action(object),"index"))), .Dimnames=list(NULL, colnames(object))) } na.replace <- function(x) { .Deprecated("na.restore") if(is.null(xtsAttributes(x)$na.action)) return(x) # Create 'NA' xts object tmp <- xts(matrix(rep(NA,NCOL(x)*NROW(x)), ncol=NCOL(x)), attr(xtsAttributes(x)$na.action, 'index')) # Ensure xts 'NA' object has *all* the same attributes # as the object 'x'; this is necessary for rbind to # work correctly CLASS(tmp) <- CLASS(x) xtsAttributes(tmp) <- xtsAttributes(x) attr(x,'na.action') <- attr(tmp,'na.action') <- NULL colnames(tmp) <- colnames(x) rbind(x,tmp) } na.locf.xts <- function(object, na.rm=FALSE, fromLast=FALSE, maxgap=Inf, ...) { maxgap <- min(maxgap, NROW(object)) if(length(object) == 0) return(object) if(hasArg("x") || hasArg("xout")) return(NextMethod(.Generic)) x <- .Call(C_na_locf, object, fromLast, maxgap, Inf) if(na.rm) { return(structure(na.omit(x),na.action=NULL)) } else x } na.fill.xts <- function(object, fill, ix, ...) { if (length(fill) == 1 && missing(ix)) { # na.fill0() may change the storage type of 'object' # make sure 'fill' argument is same type as 'object' fill. <- fill storage.mode(fill.) <- storage.mode(object) return(na.fill0(object, fill.)) } else { NextMethod(.Generic) } }
/scratch/gouwar.j/cran-all/cranData/xts/R/na.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `nseconds` <- function(x) { length(endpoints(x,on='seconds'))-1 } `nminutes` <- function(x) { length(endpoints(x,on='minutes'))-1 } `nhours` <- function(x) { length(endpoints(x,on='hours'))-1 } `ndays` <- function(x) { length(endpoints(x,on='days'))-1 } `nweeks` <- function(x) { length(endpoints(x,on='weeks'))-1 } `nmonths` <- function(x) { length(endpoints(x,on='months'))-1 } `nquarters` <- function(x) { length(endpoints(x,on='quarters'))-1 } `nyears` <- function(x) { length(endpoints(x,on='years'))-1 }
/scratch/gouwar.j/cran-all/cranData/xts/R/nperiods.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # fixes for R new/broken as.Date, as.POSIXlt and as.POSIXct # hopefully to be removed when remedied in R # taken directly from 'base', with origin set to '1970-01-01' (1970-01-01) `as.Date.numeric` <- function(x, origin='1970-01-01', ...) { as.Date(origin,...) + x } `as.POSIXct.numeric` <- function(x, tz="", origin='1970-01-01', ...) { structure(x, class=c("POSIXct", "POSIXt")) } `as.POSIXlt.numeric` <- function(x, tz="", origin='1970-01-01', ...) { as.POSIXlt(as.POSIXct(origin,tz="UTC",...) + x, tz=tz) } as.POSIXct.Date <- function(x, ...) { as.POSIXct(as.character(x)) } as.Date.POSIXct <- function(x, ...) { as.Date(strftime(x)) # z <- floor(unclass((x - unclass(as.POSIXct('1970-01-01'))))/86400) # attr(z, 'tzone') <- NULL # structure(z, class="Date") } as.POSIXlt.Date <- function(x, ...) { as.POSIXlt(as.POSIXct.Date(x)) } #as.POSIXct.yearmon <- function(x, ...) #{ # structure(as.POSIXct("1970-01-01") + unclass(as.Date(x))*86400, # class=c("POSIXct","POSIXt")) #} # #as.POSIXlt.yearmon <- function(x, ...) #{ # as.POSIXlt(xts:::as.POSIXct.yearmon(x)) #} # as.POSIXct.dates <- function(x, ...) { # need to implement our own method to correctly handle TZ #as.POSIXct(as.character(as.POSIXlt(x,tz="GMT"))) structure(as.POSIXct(as.POSIXlt(x, tz="GMT"), tz="GMT"),class=c("POSIXct","POSIXt")) } as.chron.POSIXct <- function(x, ...) { if(!requireNamespace('chron', quietly=TRUE)) as.chron <- function(...) message("package 'chron' required") structure(as.chron(as.POSIXlt(as.character(x)))) }
/scratch/gouwar.j/cran-all/cranData/xts/R/origin.fix.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This function corresponds to the ISO 8601 standard # for specifying dates and times as described in # the ISO 8601:2004e standard. # # See: # http://en.wikipedia.org/wiki/ISO_8601 # http://www.iso.org/iso/support/faqs/faqs_widely_used_standards/widely_used_standards_other/date_and_time_format.htm # # This implementation is currently restricted # to interval based parsing, with basic or # extended formats, and duration strings. # Currently the duration must be in basic format # e.g. PnnYnnMnnDTnnHnnMnnS # # The return value is a list of start and # end times, in POSIXt space. # # Copyright 2009. Jeffrey A. Ryan. All rights reserved. # This is licensed under the GPL version 2 or later .makeISO8601 <- function(x) { paste(start(x), end(x), sep = "/") } .parseISO8601 <- function(x, start, end, tz="") { # x: character vector of length 1 in ISO8601:2004(e) format # start: optional earliest time # end: optional latest time # tz: optional tzone to create with as_numeric <- function(.x) { # simple helper function if(gsub(" ","",.x)=="") NULL else as.numeric(.x) } x <- gsub("NOW",format(Sys.time(),"%Y%m%dT%H%M%S"),x) x <- gsub("TODAY",format(Sys.Date(),"%Y%m%d"),x) if(identical(grep("/|(--)|(::)", x), integer(0))) { x <- paste(x,x,sep="/") } intervals <- unlist(strsplit(x, "/|(--)|(::)")) # e.g. "/2009": "" "xxx" end of defined, needs context # e.g. "2009/": "xxx" start of defined, needs context # check for duration specification DURATION <- "" if(length(intervals)==2L) { if(substr(intervals[1],0,1)=="P") { # duration on LHS DURATION <- intervals[1] DURATION_LHS <- TRUE intervals[1] <- "" } if(substr(intervals[2],0,1)=="P") { # duration on RHS DURATION <- intervals[2] DURATION_LHS <- FALSE intervals <- intervals[1] } # leave alone if no duration } parse.side <- function(x, startof) { if( is.na(x) || !nzchar(x)) return(c(NULL)) basic <- gsub(":|-", "", x, perl=TRUE) #, extended=TRUE) date.time <- unlist(strsplit(basic, " |T")) # dates date <- date.time[1] if(!missing(startof) && nchar(basic)==2L) { startof <- gsub(":|-", "", startof, perl=TRUE) #, extended=TRUE) if(nchar(startof) - nchar(date) >= 4) { # FIXME 200901/2009 needs to work, fix is ex-post now # pad to last place of startof # with startof values sstartof <- substr(startof,0,nchar(startof)-nchar(date)) date <- paste(sstartof,date,sep="") } } date <- sprintf("%-8s", date) YYYY <- substr(date,0,4) MM <- substr(date,5,6) DD <- substr(date,7,8) # times time <- date.time[2] if( !is.na(time)) { time <- sprintf("%-6s", time) H <- substr(time,0,2) M <- substr(time,3,4) S <- substr(time,5,10000L) } else H<-M<-S<-"" # return as list c(as.list(c( year=as_numeric(YYYY), month=as_numeric(MM), day=as_numeric(DD), hour=as_numeric(H), min=as_numeric(M), sec=as_numeric(S) ) ),tz=tz) } s <- e <- NA if(nzchar(intervals[1])) # LHS s <- as.POSIXlt(do.call(firstof, parse.side(intervals[1]))) if(length(intervals) == 2L) { # RHS e <- as.POSIXlt(do.call(lastof, parse.side(intervals[2],intervals[1]))) if(is.na(e)) e <- as.POSIXlt(do.call(lastof, parse.side(intervals[2]))) } if(is.na(s) && is.na(e) && !nzchar(DURATION) && intervals[1L] != "") { warning("cannot determine first and last time from ", x) return(list(first.time=NA_real_,last.time=NA_real_)) } if(!missing(start)) { start <- as.numeric(start) #s <- as.POSIXlt(structure(max(start, as.numeric(s), na.rm=TRUE), # class=c("POSIXct","POSIXt"),tz=tz)) s <- as.POSIXlt(.POSIXct(max(start, as.numeric(s), na.rm=TRUE),tz=tz)) } if(!missing(end)) { end <- as.numeric(end) #e <- as.POSIXlt(structure(min(end, as.numeric(e), na.rm=TRUE), # class=c("POSIXct","POSIXt"),tz=tz)) e <- as.POSIXlt(.POSIXct(min(end, as.numeric(e), na.rm=TRUE),tz=tz)) } if(nzchar(DURATION)) { parse_duration <- function(P) { # TODO: # strip leading P from string # convert second M (min) to 'm' IFF following a T # remove/ignore T # convert extended format (PYYYYMMDD) to basic format (PnnYnnMnnD) P <- gsub("P","",P) P <- gsub("T(.*)M","\\1m",P) n <- unlist(strsplit(P, "[[:alpha:]]")) d <- unlist(strsplit(gsub("[[:digit:]]", "", P),"")) dur.vec <- list(as.numeric(n),unname(c(Y=6,M=5,D=4,H=3,m=2,S=1)[d])) init.vec <- rep(0, 9) init.vec[dur.vec[[2]]] <- dur.vec[[1]] init.vec } if(DURATION_LHS) { s <- as.POSIXct(structure(as.list(mapply(`-`,e,parse_duration(DURATION))), class=c("POSIXlt","POSIXt"), tzone=attr(e,"tzone"))) } else { e <- as.POSIXct(structure(as.list(mapply(`+`,s,parse_duration(DURATION))), class=c("POSIXlt","POSIXt"), tzone=attr(e,"tzone"))) } } list(first.time=as.POSIXct(s),last.time=as.POSIXct(e)) }
/scratch/gouwar.j/cran-all/cranData/xts/R/parse8601.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # optimized periodic apply functions # `is.timeBased` <- `timeBased` <- function(x) { time.classes <- c("Date", "POSIXt", "chron", "dates", "times", "timeDate", "yearmon", "yearqtr", "xtime") inherits(x, time.classes) } make.timeBased <- function(x, class='POSIXct', ...) { do.call(class, list(x,...)) } `period.sum` <- function(x,INDEX) { if(NCOL(x) > 1) stop("single column data only") if(min(INDEX) < 0 || max(INDEX) > NROW(x)) stop("INDEX must be >= 0 and <= nrow(x)") ep <- as.integer(INDEX) if(ep[1L] != 0L) ep <- c(0L,ep) if(ep[length(ep)] != NROW(x)) ep <- c(ep,NROW(x)) xx <- as.double(x) xa <- .Call(C_xts_period_sum, xx, ep) if(timeBased(index(x))) { tz <- xts(xa, index(x)[ep[-1]]) } else { tz <- zoo(xa, index(x)[ep[-1]]) } tz } `period.prod` <- function(x,INDEX) { if(NCOL(x) > 1) stop("single column data only") if(min(INDEX) < 0 || max(INDEX) > NROW(x)) stop("INDEX must be >= 0 and <= nrow(x)") ep <- as.integer(INDEX) if(ep[1] != 0L) ep <- c(0L,ep) if(ep[length(ep)] != NROW(x)) ep <- c(ep,NROW(x)) xx <- as.double(x) xa <- .Call(C_xts_period_prod, xx, ep) if(timeBased(index(x))) { tz <- xts(xa, index(x)[ep[-1]]) } else { tz <- zoo(xa, index(x)[ep[-1]]) } tz } `period.max` <- function(x,INDEX) { if(NCOL(x) > 1) stop("single column data only") if(min(INDEX) < 0 || max(INDEX) > NROW(x)) stop("INDEX must be >= 0 and <= nrow(x)") ep <- as.integer(INDEX) if(ep[1] != 0L) ep <- c(0L,ep) if(ep[length(ep)] != NROW(x)) ep <- c(ep,NROW(x)) xx <- as.double(x) xa <- .Call(C_xts_period_max, xx, ep) if(timeBased(index(x))) { tz <- xts(xa, index(x)[ep[-1]]) } else { tz <- zoo(xa, index(x)[ep[-1]]) } tz } `period.min` <- function(x,INDEX) { if(NCOL(x) > 1) stop("single column data only") if(min(INDEX) < 0 || max(INDEX) > NROW(x)) stop("INDEX must be >= 0 and <= nrow(x)") ep <- as.integer(INDEX) if(ep[1] != 0L) ep <- c(0L,ep) if(ep[length(ep)] != NROW(x)) ep <- c(ep,NROW(x)) xx <- as.double(x) xa <- .Call(C_xts_period_min, xx, ep) if(timeBased(index(x))) { tz <- xts(xa, index(x)[ep[-1]]) } else { tz <- zoo(xa, index(x)[ep[-1]]) } tz }
/scratch/gouwar.j/cran-all/cranData/xts/R/period.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. .mean_by_column_message <- function(caller) { if (getOption("xts.message.period.apply.mean", TRUE)) { message("NOTE: `", caller, "(..., FUN = mean)` operates by column, unlike other math\n ", "functions (e.g. median, sum, var, sd). Please use `FUN = colMeans` instead,\n ", "and use `FUN = function(x) mean(x)` to take the mean of all columns. Set\n ", "`options(xts.message.period.apply.mean = FALSE)` to suppress this message.") } # changing this behavior will break code in the following dependencies: # # ATAforecasting/R/ATA_Find_Multi_Freq.R # bidask/R/utils.R # dsa/R/HelperFunctions.R # {.tomonth} # RavenR/inst/doc/Introduction_to_RavenR.R # RavenR/inst/doc/Introduction_to_RavenR.Rmd # RavenR/R/rvn_apply_wyearly.R # RavenR/R/rvn_monthly_vbias.R # rts/man/apply.monthly.Rd # rts/man/period.apply.Rd # RWDataPlyr/R/xts_helperFunctions.R } `period.apply` <- function(x, INDEX, FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("period.apply") } x <- try.xts(x, error = FALSE) FUN <- match.fun(FUN) if(!isOrdered(INDEX)) { # isOrdered returns FALSE if there are duplicates INDEX <- sort(unique(INDEX)) } if(INDEX[1] != 0) { INDEX <- c(0, INDEX) } if(last(INDEX) != NROW(x)) { INDEX <- c(INDEX, NROW(x)) } xx <- sapply(1:(length(INDEX) - 1), function(y) { FUN(x[(INDEX[y] + 1):INDEX[y + 1]], ...) }) if(is.vector(xx)) xx <- t(xx) xx <- t(xx) if(is.null(colnames(xx)) && NCOL(x)==NCOL(xx)) colnames(xx) <- colnames(x) reclass(xx, x[INDEX]) } `period.apply.original` <- function (x, INDEX, FUN, ...) { x <- use.xts(x,error=FALSE) if(!is.xts(x)) { FUN <- match.fun(FUN) xx <- sapply(1:(length(INDEX) - 1), function(y) { FUN(x[(INDEX[y] + 1):INDEX[y + 1]], ...) }) } else { FUN <- match.fun(FUN) new.index <- index(x)[INDEX] xx <- sapply(1:(length(INDEX) - 1), function(y) { FUN(x[(INDEX[y] + 1):INDEX[y + 1]], ...) }) xx <- xts(xx,new.index) CLASS(xx) <- CLASS(x) xtsAttributes(xx) <- xtsAttributes(x) xx <- reclass(xx) } xx } `apply.daily` <- function(x,FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("apply.daily") } ep <- endpoints(x,'days') period.apply(x,ep,FUN, ...) } `apply.weekly` <- function(x,FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("apply.weekly") } ep <- endpoints(x,'weeks') period.apply(x,ep,FUN, ...) } `apply.monthly` <- function(x,FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("apply.monthly") } ep <- endpoints(x,'months') period.apply(x,ep,FUN, ...) } `apply.quarterly` <- function(x,FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("apply.quarterly") } ep <- endpoints(x,'quarters') period.apply(x,ep,FUN, ...) } `apply.yearly` <- function(x,FUN, ...) { if (deparse(substitute(FUN))[1] == "mean") { .mean_by_column_message("apply.yearly") } ep <- endpoints(x,'years') period.apply(x,ep,FUN, ...) } period_apply <- function(x, INDEX, FUN, ...) { fun <- substitute(FUN) e <- new.env() if (INDEX[1] != 0) { INDEX <- c(0, INDEX) } if (INDEX[length(INDEX)] != NROW(x)) { INDEX <- c(INDEX, NROW(x)) } pl <- .Call(C_xts_period_apply, x, INDEX, fun, e) .xts(do.call(rbind, pl), .index(x)[INDEX], tclass = tclass(x), tzone = tzone(x)) }
/scratch/gouwar.j/cran-all/cranData/xts/R/period.apply.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. time_frequency <- function(x) { x <- gsub(":|/|-| ", "", x) nc <- nchar(x) if(nc < 4) stop("unrecognizable time.scale") if(nc == 4) res <- 2678400 * 12 #"yearly" if(nc > 4) res <- 2678400 #"monthly" if(nc > 6) res <- 86400 #"daily" if(nc > 8) res <- 3600 #"hourly" if(nc > 10) res <- 60 #"minute" if(nc > 12) res <- 1 #"seconds" return(res) } periodicity <- function(x, ...) { if( timeBased(x) ) { if( anyNA(x) ) { warning("removing NA in 'x' to calculate periodicity") x <- x[!is.na(x)] } x <- try.xts(x, error = "cannot convert 'x' to xts") } if (!is.xts(x)) { x <- try.xts(x, error = "cannot convert 'x' to xts") } n <- length(.index(x)) if( n < 2 ) { res <- list(difftime = structure(0, units='secs', class='difftime'), frequency = 0, start = NA, end = NA, units = 'secs', scale = 'seconds', label = 'second') res <- structure(res, class='periodicity') if( n == 0 ) { warning("can not calculate periodicity of empty object") } else { warning("can not calculate periodicity of 1 observation") res$start <- start(x) res$end <- end(x) } return(res) } p <- median(diff( .index(x) )) # Date and POSIXct if(p < 60) { units <- "secs" scale <- "seconds" label <- "second" } else if(p < 3600) { units <- "mins" scale <- "minute" label <- "minute" p <- p/60L } else if(p < 86400) { # < 1 day units <- "hours" scale <- "hourly" label <- "hour" } else if(p == 86400) { units <- "days" scale <- "daily" label <- "day" } else if(p <= 604800) { # 86400 * 7 units <- "days" scale <- "weekly" label <- "week" } else if(p <= 2678400) { # 86400 * 31 units <- "days" scale <- "monthly" label <- "month" } else if(p <= 7948800) { # 86400 * 92 units <- "days" scale <- "quarterly" label <- "quarter" } else { # years units <- "days" scale <- "yearly" label <- "year" } structure(list(difftime = as.difftime(p, units = units), frequency = p, start = start(x), end = end(x), units = units, scale = scale, label = label), class = 'periodicity') } `periodicity.old` <- function (x, ...) { if(!is.xts(x)) x <- as.xts(x) # convert if necessary to usable format if(!tclass(x)[[1]] %in% c('Date','POSIXt')) tclass(x) <- "POSIXct" # this takes a long time on big data - possibly use some sort of sampling instead??? p <- median(diff(time(x))) if (is.na(p)) stop("cannot calculate periodicity from one observation") p.numeric <- as.numeric(p) units <- attr(p, "units") if (units == "secs") { scale <- "seconds" } if (units == "mins") { scale <- "minute" if (p.numeric > 59) scale <- "hourly" } if (units == "hours") { scale <- "hourly" } if (units == "days") { scale <- "daily" if (p.numeric > 1) scale <- "weekly" if (p.numeric > 7) scale <- "monthly" if (p.numeric > 31) scale <- "quarterly" if (p.numeric > 91) scale <- "yearly" } structure(list(difftime = p, frequency = p.numeric, start = index(first(x)), end = index(last(x)), units = units, scale = scale),class="periodicity") # class(xx) <- "periodicity" # xx # used when structure was assigned to xx, useless now, remain until testing is done though -jar } `print.periodicity` <- function (x, ...) { x.freq <- ifelse(x$scale %in% c("minute", "seconds"), x$frequency, "") if (x.freq == "") { cap.scale <- paste(toupper(substring(x$scale, 1, 1)), substring(x$scale, 2), sep = "") cat(paste(cap.scale, "periodicity from", x$start, "to", x$end, "\n", sep = " ")) } else { cat(paste(x.freq, x$scale, "periodicity from", x$start, "to", x$end, "\n", sep = " ")) } }
/scratch/gouwar.j/cran-all/cranData/xts/R/periodicity.R
# # xts: eXtensible time-series # # Copyright (C) 2009-2015 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Ross Bennett and Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. current.xts_chob <- function() invisible(get(".xts_chob",.plotxtsEnv)) # Current design # # There is a main plot object that contains the plot title (and optional # timespan), the x-axis labels and tick marks, and a list of 'panel' objects. # The main plot object contains the objects/functions below. # # * Env: an environment holds all the plot information. # * add_main_header(): add the main plot header # * add_main_xaxis(): add the x-axis labels and ticks to the main plot. # * new_panel(): create a new panel and add it to the plot. # * get_xcoords(): get the x-coordinate values for the plot. # * get_panel(): get a specific panel. # * get_last_action_panel(): get the panel that had the last rendered action. # * new_environment: create a new environment with 'Env' as its parent. # Functions that aren't intended to be called externally: # # * update_panels(): re-calculate the x-axis and y-axis values. # * render_panels(): render all the plot panels. # * x_grid_lines(): plot the x-axis grid lines. # * create_ylim(): create y-axis max/min, handling when max(x) == min(x). # The panel object is composed of the following fields: # # * id: the numeric index of the panel in the plot's list of panels. # * asp: the x/y aspect ratio for the panel (relative vertical size). # * ylim: the ylim of the panel when it was created. # * ylim_render: the ylim of the panel to use when rendering. # * use_fixed_ylim: do not update the panel ylim based on all panels data # * header: the panel title. # * actions: a list of expressions used to render the panel. # * add_action(): a function to add an action to the list. # # The panel has the 'yaxis_expr' expression for rendering the y-axis min/max # values, labels, and grid lines/ticks. It also contains the x-axis grid # expression because we need the y-axis min/max values to know where to draw # the x-axis grid lines on the panel. # Other notes # # Environments created by new_environment() (e.g. the 'lenv') are children of # Env, so expressions evaluated in 'lenv' will look in Env for anything not # found in 'lenv'. # # Visual representation of plot structure # # ____________________________________________________________________________ # / \ # | plot object / window | # | | # | ______________________________________________________________________ | # | / \ | # | | panel #1 | | # | | __________________________________________________________________ | | # | | / \ | | # | | | header frame | | | # | | \__________________________________________________________________/ | | # | | __________________________________________________________________ | | # | | / \ | | # | | | series frame | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | \__________________________________________________________________/ | | # | \______________________________________________________________________/ | # | | # | ______________________________________________________________________ | # | / \ | # | | panel #2 | | # | | __________________________________________________________________ | | # | | / \ | | # | | | header frame | | | # | | \__________________________________________________________________/ | | # | | __________________________________________________________________ | | # | | / \ | | # | | | series frame | | | # | | | | | | # | | | | | | # | | | | | | # | | | | | | # | | \__________________________________________________________________/ | | # | \______________________________________________________________________/ | # | | # \____________________________________________________________________________/ # # Currently not necessary, but potentially very useful: # http://www.fromthebottomoftheheap.net/2011/07/23/passing-non-graphical-parameters-to-graphical-functions-using/ chart.lines <- function(x, type="l", lty=1, lwd=2, lend=1, col=NULL, up.col=NULL, dn.col=NULL, legend.loc=NULL, log=FALSE, ...){ xx <- current.xts_chob() switch(type, h={ # use up.col and dn.col if specified if (!is.null(up.col) && !is.null(dn.col)){ colors <- ifelse(x[,1] < 0, dn.col, up.col) } else { colors <- if (is.null(col)) 1 else col } if (length(colors) < nrow(x[,1])) colors <- colors[1] # x-coordinates for this column xcoords <- xx$get_xcoords(x[,1]) lines(xcoords,x[,1],lwd=2,col=colors,lend=lend,lty=1,type="h",...) }, p=, l=, b=, c=, o=, s=, S=, n={ if(is.null(col)) col <- xx$Env$theme$col if(length(lty) < NCOL(x)) lty <- rep(lty, length.out = NCOL(x)) if(length(lwd) < NCOL(x)) lwd <- rep(lwd, length.out = NCOL(x)) if(length(col) < NCOL(x)) col <- rep(col, length.out = NCOL(x)) for(i in NCOL(x):1) { # x-coordinates for this column xcoords <- xx$get_xcoords(x[,i]) xi <- x[,i] if (isTRUE(log)) xi <- log(xi) lines(xcoords, xi, type=type, lend=lend, col=col[i], lty=lty[i], lwd=lwd[i], ...) } }, { # default case warning(paste(type, "not recognized. Type must be one of 'p', 'l', 'b, 'c', 'o', 'h', 's', 'S', 'n'. plot.xts supports the same types as plot.default, see ?plot for valid arguments for type")) } ) if(!is.null(legend.loc)){ lc <- legend.coords(legend.loc, xx$Env$xlim, range(x, na.rm=TRUE)) legend(x=lc$x, y=lc$y, legend=colnames(x), xjust=lc$xjust, yjust=lc$yjust, fill=col[1:NCOL(x)], bty="n") } } add.par.from.dots <- function(call., ...) { stopifnot(is.call(call.)) # from graphics:::.Pars parnames <- c("xlog","ylog","adj","ann","ask","bg","bty","cex","cex.axis", "cex.lab","cex.main","cex.sub","cin","col","col.axis","col.lab", "col.main","col.sub","cra","crt","csi","cxy","din","err", "family", "fg","fig","fin","font","font.axis","font.lab", "font.main","font.sub","lab","las","lend","lheight","ljoin", "lmitre","lty","lwd","mai","mar","mex","mfcol","mfg","mfrow", "mgp","mkh","new","oma","omd","omi","page","pch","pin","plt", "ps","pty","smo","srt","tck","tcl","usr","xaxp","xaxs","xaxt", "xpd","yaxp","yaxs","yaxt","ylbias") dots <- list(...) argnames <- names(dots) pm <- match(argnames, parnames, nomatch = 0L) call.list <- as.list(call.) # only pass the args from dots ('...') that are in parnames as.call(c(call.list, dots[pm > 0L])) } isNullOrFalse <- function(x) { is.null(x) || identical(x, FALSE) } # Main plot.xts method. # author: Ross Bennett (adapted from Jeffrey Ryan's chart_Series) plot.xts <- function(x, y=NULL, ..., subset="", panels=NULL, multi.panel=FALSE, col=1:8, up.col=NULL, dn.col=NULL, bg="#FFFFFF", type="l", lty=1, lwd=2, lend=1, main=deparse(substitute(x)), main.timespan=TRUE, observation.based=FALSE, log=FALSE, ylim=NULL, yaxis.same=TRUE, yaxis.left=TRUE, yaxis.right=TRUE, yaxis.ticks=5, major.ticks="auto", minor.ticks=NULL, grid.ticks.on="auto", grid.ticks.lwd=1, grid.ticks.lty=1, grid.col="darkgray", labels.col="#333333", format.labels=TRUE, grid2="#F5F5F5", legend.loc=NULL, extend.xaxis=FALSE){ # Small multiples with multiple pages behavior occurs when multi.panel is # an integer. (i.e. multi.panel=2 means to iterate over the data in a step # size of 2 and plot 2 panels on each page # Make recursive calls and return if(is.numeric(multi.panel)){ multi.panel <- min(NCOL(x), multi.panel) idx <- seq.int(1L, NCOL(x), 1L) chunks <- split(idx, ceiling(seq_along(idx)/multi.panel)) # allow color and line attributes for each panel in a multi.panel plot if(length(lty) < ncol(x)) lty <- rep(lty, length.out = ncol(x)) if(length(lwd) < ncol(x)) lwd <- rep(lwd, length.out = ncol(x)) if(length(col) < ncol(x)) col <- rep(col, length.out = ncol(x)) if(!is.null(panels) && nchar(panels) > 0){ # we will plot the panels, but not plot the data by column multi.panel <- FALSE } else { # we will plot the data by column, but not the panels multi.panel <- TRUE panels <- NULL # set the ylim based on the data passed into the x argument if(yaxis.same) ylim <- range(x[subset], na.rm=TRUE) } for(i in 1:length(chunks)){ tmp <- chunks[[i]] p <- plot.xts(x=x[,tmp], y=y, ...=..., subset=subset, panels=panels, multi.panel=multi.panel, col=col[tmp], up.col=up.col, dn.col=dn.col, bg=bg, type=type, lty=lty[tmp], lwd=lwd[tmp], lend=lend, main=main, observation.based=observation.based, log=log, ylim=ylim, yaxis.same=yaxis.same, yaxis.left=yaxis.left, yaxis.right=yaxis.right, yaxis.ticks=yaxis.ticks, major.ticks=major.ticks, minor.ticks=minor.ticks, grid.ticks.on=grid.ticks.on, grid.ticks.lwd=grid.ticks.lwd, grid.ticks.lty=grid.ticks.lty, grid.col=grid.col, labels.col=labels.col, format.labels=format.labels, grid2=grid2, legend.loc=legend.loc, extend.xaxis=extend.xaxis) if(i < length(chunks)) print(p) } # NOTE: return here so we don't draw another chart return(p) } cs <- new.replot_xts() # major.ticks shouldn't be null so we'll set major.ticks here if it is null if(is.null(major.ticks)) { xs <- x[subset] mt <- c(years=nyears(xs), months=nmonths(xs), days=ndays(xs)) major.ticks <- names(mt)[rev(which(mt < 30))[1]] } # add theme and charting parameters to Env plot.call <- match.call(expand.dots=TRUE) cs$Env$theme <- list(up.col = up.col, dn.col = dn.col, col = col, rylab = yaxis.right, lylab = yaxis.left, bg = bg, grid = grid.col, grid2 = grid2, labels = labels.col, # String rotation in degrees. See comment about 'crt'. Only supported by text() srt = if (hasArg("srt")) eval.parent(plot.call$srt) else 0, # Rotation of axis labels: # 0: parallel to the axis (default), # 1: horizontal, # 2: perpendicular to the axis, # 3: vertical las = if (hasArg("las")) eval.parent(plot.call$las) else 0, # magnification for axis annotation relative to current 'cex' value cex.axis = if (hasArg("cex.axis")) eval.parent(plot.call$cex.axis) else 0.9) # /theme # multiplier to magnify plotting text and symbols cs$Env$cex <- if (hasArg("cex")) eval.parent(plot.call$cex) else 0.6 # lines of margin to the 4 sides of the plot: c(bottom, left, top, right) cs$Env$mar <- if (hasArg("mar")) eval.parent(plot.call$mar) else c(3,2,0,2) # check for colorset or col argument # if col has a length of 1, replicate to NCOL(x) so we can keep it simple # and color each line by its index in col if(hasArg("colorset")) col <- eval.parent(plot.call$colorset) if(length(col) < ncol(x)) col <- rep(col, length.out = ncol(x)) cs$Env$format.labels <- format.labels cs$Env$yaxis.ticks <- yaxis.ticks cs$Env$major.ticks <- if (isTRUE(major.ticks)) "auto" else major.ticks cs$Env$minor.ticks <- if (isTRUE(minor.ticks)) "auto" else minor.ticks cs$Env$grid.ticks.on <- if (isTRUE(grid.ticks.on)) "auto" else grid.ticks.on cs$Env$grid.ticks.lwd <- grid.ticks.lwd cs$Env$grid.ticks.lty <- grid.ticks.lty cs$Env$type <- type # if lty or lwd has a length of 1, replicate to NCOL(x) so we can keep it # simple and draw each line with attributes by index if(length(lty) < ncol(x)) lty <- rep(lty, length.out = ncol(x)) if(length(lwd) < ncol(x)) lwd <- rep(lwd, length.out = ncol(x)) cs$Env$lty <- lty cs$Env$lwd <- lwd cs$Env$lend <- lend cs$Env$legend.loc <- legend.loc cs$Env$extend.xaxis <- extend.xaxis cs$Env$observation.based <- observation.based cs$Env$log <- isTRUE(log) # Do some checks on x if(is.character(x)) stop("'x' must be a time-series object") # Raw returns data passed into function cs$Env$xdata <- x cs$Env$xsubset <- subset cs$Env$column_names <- colnames(x) cs$Env$nobs <- NROW(cs$Env$xdata) cs$Env$main <- main cs$Env$main.timespan <- main.timespan cs$Env$ylab <- if (hasArg("ylab")) eval.parent(plot.call$ylab) else "" xdata_ylim <- cs$create_ylim(cs$Env$xdata[subset,]) if(isTRUE(multi.panel)){ n_cols <- NCOL(cs$Env$xdata) asp <- ifelse(n_cols > 1, n_cols, 3) if (hasArg("yaxis.same") && hasArg("ylim") && !is.null(ylim)) { warning("only 'ylim' or 'yaxis.same' should be provided; using 'ylim'") } for(i in seq_len(n_cols)) { # create a local environment for each panel lenv <- cs$new_environment() lenv$xdata <- cs$Env$xdata[subset,i] lenv$type <- cs$Env$type if (is.null(ylim)) { if (yaxis.same) { lenv$ylim <- xdata_ylim # set panel ylim using all columns lenv$use_fixed_ylim <- FALSE # update panel ylim when rendering } else { panel_ylim <- cs$create_ylim(lenv$xdata) lenv$ylim <- panel_ylim # set panel ylim using this column lenv$use_fixed_ylim <- TRUE # do NOT update panel ylim when rendering } } else { lenv$ylim <- ylim # use the ylim argument value lenv$use_fixed_ylim <- TRUE # do NOT update panel ylim when rendering } # allow color and line attributes for each panel in a multi.panel plot lenv$lty <- cs$Env$lty[i] lenv$lwd <- cs$Env$lwd[i] lenv$col <- cs$Env$theme$col[i] lenv$log <- isTRUE(log) exp <- quote(chart.lines(xdata[xsubset], type=type, lty=lty, lwd=lwd, lend=lend, col=col, log=log, up.col=theme$up.col, dn.col=theme$dn.col, legend.loc=legend.loc)) exp <- as.expression(add.par.from.dots(exp, ...)) # create the panels this_panel <- cs$new_panel(lenv$ylim, asp = asp, envir = lenv, header = cs$Env$column_names[i], draw_left_yaxis = yaxis.left, draw_right_yaxis = yaxis.right, use_fixed_ylim = lenv$use_fixed_ylim, use_log_yaxis = log) # plot data this_panel$add_action(exp, env = lenv) } } else { if(type == "h" && NCOL(x) > 1) warning("only the univariate series will be plotted") if (is.null(ylim)) { yrange <- xdata_ylim # set ylim using all columns use_fixed_ylim <- FALSE # update panel ylim when rendering } else { yrange <- ylim # use the ylim argument value use_fixed_ylim <- TRUE # do NOT update panel ylim when rendering } # create the chart's main panel main_panel <- cs$new_panel(ylim = yrange, asp = 3, envir = cs$Env, header = "", use_fixed_ylim = use_fixed_ylim, draw_left_yaxis = yaxis.left, draw_right_yaxis = yaxis.right, use_log_yaxis = log) exp <- quote(chart.lines(xdata[xsubset], type=type, lty=lty, lwd=lwd, lend=lend, col=theme$col, log=log, up.col=theme$up.col, dn.col=theme$dn.col, legend.loc=legend.loc)) exp <- as.expression(add.par.from.dots(exp, ...)) main_panel$add_action(exp) assign(".xts_chob", cs, .plotxtsEnv) } # Plot the panels or default to a simple line chart if(!is.null(panels) && nchar(panels) > 0) { panels <- parse(text=panels, srcfile=NULL) for( p in 1:length(panels)) { if(length(panels[p][[1]][-1]) > 0) { cs <- eval(panels[p]) } else { cs <- eval(panels[p]) } } } assign(".xts_chob", cs, .plotxtsEnv) cs } # apply a function to the xdata in the xts chob and add a panel with the result addPanel <- function(FUN, main="", on=NA, type="l", col=NULL, lty=1, lwd=1, pch=1, ...){ # get the chob and the raw data (i.e. xdata) chob <- current.xts_chob() # xdata will be passed as first argument to FUN xdata <- chob$Env$xdata fun <- match.fun(FUN) .formals <- formals(fun) if("..." %in% names(.formals)) { # Just call do.call if FUN has '...' x <- try(do.call(fun, c(list(xdata), list(...)), quote=TRUE), silent=TRUE) } else { # Otherwise, ensure we only pass relevant args to FUN .formals <- modify.args(formals=.formals, arglist=list(...)) .formals[[1]] <- quote(xdata) x <- try(do.call(fun, .formals), silent=TRUE) } if(inherits(x, "try-error")) { message(paste("FUN function failed with message", x)) return(NULL) } addSeriesCall <- quote(addSeries(x = x, main = main, on = on, type = type, col = col, lty = lty, lwd = lwd, pch = pch)) addSeriesCall <- add.par.from.dots(addSeriesCall, ...) eval(addSeriesCall) } # Add a time series to an existing xts plot # author: Ross Bennett addSeries <- function(x, main="", on=NA, type="l", col=NULL, lty=1, lwd=1, pch=1, ...){ plot_object <- current.xts_chob() lenv <- plot_object$new_environment() lenv$plot_lines <- function(x, ta, on, type, col, lty, lwd, pch, ...){ xdata <- x$Env$xdata xsubset <- x$Env$xsubset xDataSubset <- xdata[xsubset] # we can add points that are not necessarily at the points # on the main series, but need to ensure the new series only # has index values within the xdata subset if(xsubset == "") { subset.range <- xsubset } else { fmt <- "%Y-%m-%d %H:%M:%OS6" subset.range <- paste(format(start(xDataSubset), fmt), format(end(xDataSubset), fmt), sep = "/") } xds <- .xts(, .index(xDataSubset), tzone=tzone(xdata)) ta.y <- merge(ta, xds)[subset.range] if (!isTRUE(x$Env$extend.xaxis)) { xi <- .index(ta.y) xc <- .index(xds) xsubset <- which(xi >= xc[1] & xi <= xc[length(xc)]) ta.y <- ta.y[xsubset] } chart.lines(ta.y, type=type, col=col, lty=lty, lwd=lwd, pch=pch, ...) } # get tag/value from dots expargs <- substitute(alist(ta=x, on=on, type=type, col=col, lty=lty, lwd=lwd, pch=pch, ...)) # capture values from caller, so we don't need to copy objects to lenv, # since this gives us evaluated versions of all the object values expargs <- lapply(expargs[-1L], eval, parent.frame()) exp <- as.call(c(quote(plot_lines), x = quote(current.xts_chob()), expargs)) xdata <- plot_object$Env$xdata xsubset <- plot_object$Env$xsubset lenv$xdata <- merge(x,xdata,retside=c(TRUE,FALSE)) if(hasArg("ylim")) { ylim <- eval.parent(substitute(alist(...))$ylim) } else { ylim <- range(lenv$xdata[xsubset], na.rm=TRUE) if(all(ylim == 0)) ylim <- c(-1, 1) } lenv$ylim <- ylim if(is.na(on[1])){ # add series to a new panel use_log <- isTRUE(eval.parent(substitute(alist(...))$log)) this_panel <- plot_object$new_panel(lenv$ylim, asp = 1, envir = lenv, header = main, use_log_yaxis = use_log) # plot data this_panel$add_action(exp, env = lenv) } else { for(i in on) { plot_object$add_panel_action(i, exp, lenv) } } plot_object } # Add time series of lines to an existing xts plot # author: Ross Bennett lines.xts <- function(x, ..., main="", on=0, col=NULL, type="l", lty=1, lwd=1, pch=1){ if(!is.na(on[1])) if(on[1] == 0) on[1] <- current.xts_chob()$get_last_action_panel()$id addSeries(x, ...=..., main=main, on=on, type=type, col=col, lty=lty, lwd=lwd, pch=pch) } # Add time series of points to an existing xts plot # author: Ross Bennett points.xts <- function(x, ..., main="", on=0, col=NULL, pch=1){ if(!is.na(on[1])) if(on[1] == 0) on[1] <- current.xts_chob()$get_last_action_panel()$id addSeries(x, ...=..., main=main, on=on, type="p", col=col, pch=pch) } # Add vertical lines to an existing xts plot # author: Ross Bennett addEventLines <- function(events, main="", on=0, lty=1, lwd=1, col=1, ...){ events <- try.xts(events) plot_object <- current.xts_chob() if(!is.na(on[1])) if(on[1] == 0) on[1] <- plot_object$get_last_action_panel()$id if(nrow(events) > 1){ if(length(lty) == 1) lty <- rep(lty, nrow(events)) if(length(lwd) == 1) lwd <- rep(lwd, nrow(events)) if(length(col) == 1) col <- rep(col, nrow(events)) } lenv <- plot_object$new_environment() lenv$plot_event_lines <- function(x, events, on, lty, lwd, col, ...){ xdata <- x$Env$xdata xsubset <- x$Env$xsubset ypos <- x$get_panel(on)$ylim[2] * 0.995 # we can add points that are not necessarily at the points on the main series subset.range <- paste(format(start(xdata[xsubset]), "%Y%m%d %H:%M:%OS6"), format(end(xdata[xsubset]), "%Y%m%d %H:%M:%OS6"), sep = "/") ta.adj <- merge(n=.xts(1:NROW(xdata[xsubset]), .index(xdata[xsubset]), tzone=tzone(xdata)), .xts(rep(1, NROW(events)),# use numeric for the merge .index(events)))[subset.range] # should we not merge and only add events that are in index(xdata)? ta.y <- ta.adj[,-1] # the merge should result in NAs for any object that is not in events event.ind <- which(!is.na(ta.y)) abline(v=x$get_xcoords()[event.ind], col=col, lty=lty, lwd=lwd) text(x=x$get_xcoords()[event.ind], y=ypos, labels=as.character(events[,1]), col=x$Env$theme$labels, ...) } # get tag/value from dots expargs <- substitute(alist(events=events, on=on, lty=lty, lwd=lwd, col=col, ...)) # capture values from caller, so we don't need to copy objects to lenv, # since this gives us evaluated versions of all the object values expargs <- lapply(expargs[-1L], eval, parent.frame()) exp <- as.call(c(quote(plot_event_lines), x = quote(current.xts_chob()), expargs)) if(is.na(on[1])){ xdata <- plot_object$Env$xdata xsubset <- plot_object$Env$xsubset lenv$xdata <- xdata ylim <- range(xdata[xsubset], na.rm=TRUE) lenv$ylim <- ylim # add series to a new panel this_panel <- plot_object$new_panel(lenv$ylim, asp = 1, envir = lenv, header = main) # plot data this_panel$add_action(exp, env = lenv) } else { for(i in on) { plot_object$add_panel_action(i, exp, lenv) } } plot_object } # Add legend to an existing xts plot # author: Ross Bennett addLegend <- function(legend.loc="topright", legend.names=NULL, col=NULL, ncol=1, on=0, ...){ plot_object <- current.xts_chob() if(!is.na(on[1])) if(on[1] == 0) on[1] <- plot_object$get_last_action_panel()$id lenv <- plot_object$new_environment() lenv$plot_legend <- function(x, legend.loc, legend.names, col, ncol, on, bty, text.col, ...){ if(is.na(on[1])){ yrange <- c(0, 1) } else { yrange <- x$get_panel(on)$ylim } # this just gets the data of the main plot # TODO: get the data of panels[on] if(is.null(ncol)){ ncol <- NCOL(x$Env$xdata) } if(is.null(col)){ col <- x$Env$theme$col[1:NCOL(x$Env$xdata)] } if(is.null(legend.names)){ legend.names <- x$Env$column_names } if(missing(bty)){ bty <- "n" } if(missing(text.col)){ text.col <- x$Env$theme$labels } lc <- legend.coords(legend.loc, x$Env$xlim, yrange) legend(x=lc$x, y=lc$y, legend=legend.names, xjust=lc$xjust, yjust=lc$yjust, ncol=ncol, col=col, bty=bty, text.col=text.col, ...) } # get tag/value from dots expargs <- substitute(alist(legend.loc=legend.loc, legend.names=legend.names, col=col, ncol=ncol, on=on, ...)) # capture values from caller, so we don't need to copy objects to lenv, # since this gives us evaluated versions of all the object values expargs <- lapply(expargs[-1L], eval, parent.frame()) exp <- as.call(c(quote(plot_legend), x = quote(current.xts_chob()), expargs)) # if on[1] is NA, then add a new frame for the legend if(is.na(on[1])){ # add legend to a new panel this_panel <- plot_object$new_panel(ylim = c(0, 1), asp = 0.8, envir = lenv, header = "") # legend data this_panel$add_action(exp, env = lenv) } else { for(i in on) { plot_object$add_panel_action(i, exp, lenv) } } plot_object } # Determine legend coordinates based on legend location, # range of x values and range of y values legend.coords <- function(legend.loc, xrange, yrange) { switch(legend.loc, topleft = list(xjust = 0, yjust = 1, x = xrange[1], y = yrange[2]), left = list(xjust = 0, yjust = 0.5, x = xrange[1], y = sum(yrange) / 2), bottomleft = list(xjust = 0, yjust = 0, x = xrange[1], y = yrange[1]), top = list(xjust = 0.5, yjust = 1, x = (xrange[1] + xrange[2]) / 2, y = yrange[2]), center = list(xjust = 0.5, yjust = 0.5, x = (xrange[1] + xrange[2]) / 2, y = sum(yrange) / 2), bottom = list(xjust = 0.5, yjust = 0, x = (xrange[1] + xrange[2]) / 2, y = yrange[1]), topright = list(xjust = 1, yjust = 1, x = xrange[2], y = yrange[2]), right = list(xjust = 1, yjust = 0.5, x = xrange[2], y = sum(yrange) / 2), bottomright = list(xjust = 1, yjust = 0, x = xrange[2], y = yrange[1]) ) } # Add a polygon to an existing xts plot # author: Ross Bennett addPolygon <- function(x, y=NULL, main="", on=NA, col=NULL, ...){ # add polygon to xts plot based on http://dirk.eddelbuettel.com/blog/2011/01/16/ # some simple checks x <- try.xts(x) if(!is.null(y)) stop("y is not null") if(ncol(x) > 2) warning("more than 2 columns detected in x, only the first 2 will be used") plot_object <- current.xts_chob() lenv <- plot_object$new_environment() lenv$plot_lines <- function(x, ta, on, col, ...){ xdata <- x$Env$xdata xsubset <- x$Env$xsubset xDataSubset <- xdata[xsubset] if(is.null(col)) col <- x$Env$theme$col # we can add points that are not necessarily at the points # on the main series, but need to ensure the new series only # has index values within the xdata subset if(xsubset == "") { subset.range <- xsubset } else { fmt <- "%Y-%m-%d %H:%M:%OS6" subset.range <- paste(format(start(xDataSubset), fmt), format(end(xDataSubset), fmt), sep = "/") } xds <- .xts(, .index(xDataSubset), tzone=tzone(xdata)) ta.y <- merge(ta, xds)[subset.range] # NAs in the coordinates break the polygon which is not the behavior we want ta.y <- na.omit(ta.y) # x coordinates n <- seq_len(NROW(ta.y)) xx <- x$get_xcoords(ta.y)[c(1, n, rev(n))] # y coordinates upper and lower # assume first column is upper and second column is lower y coords for # initial prototype yu <- as.vector(coredata(ta.y[,1])) yl <- as.vector(coredata(ta.y[,2])) polygon(x=xx, y=c(yl[1], yu, rev(yl)), border=NA, col=col, ...) } # get tag/value from dots expargs <- substitute(alist(ta=x, col=col, on=on, ...)) # capture values from caller, so we don't need to copy objects to lenv, # since this gives us evaluated versions of all the object values expargs <- lapply(expargs[-1L], eval, parent.frame()) exp <- as.call(c(quote(plot_lines), x = quote(current.xts_chob()), expargs)) xdata <- plot_object$Env$xdata xsubset <- plot_object$Env$xsubset lenv$xdata <- merge(x,xdata,retside=c(TRUE,FALSE)) if(hasArg("ylim")) { ylim <- eval.parent(substitute(alist(...))$ylim) } else { ylim <- range(lenv$xdata[xsubset], na.rm=TRUE) if(all(ylim == 0)) ylim <- c(-1, 1) } lenv$ylim <- ylim if(is.na(on[1])){ # add series to a new panel this_panel <- plot_object$new_panel(ylim = lenv$ylim, asp = 1, envir = lenv, header = main) # plot data this_panel$add_action(exp, env = lenv) } else { for(i in on) { plot_object$add_panel_action(i, exp, lenv) } } plot_object }# polygon # Based on quantmod/R/replot.R new.replot_xts <- function(panel=1,asp=1,xlim=c(1,10),ylim=list(structure(c(1,10),fixed=FALSE))) { # global variables # 'Env' is mainly the environment for the plot window, but some elements are for panels/frames Env <- new.env() Env$active_panel_i <- panel Env$asp <- 1 Env$xlim <- xlim # vector: c(min, max) (same for every panel) Env$last_action_panel_id <- 1 # getters get_ylim <- function() { update_panels(); get_active_panel()[["ylim_render"]] } get_xlim <- function() { update_panels(); Env$xlim } get_active_panel <- function() { get_panel(Env$active_panel_i) } get_last_action_panel <- function() { get_panel(Env$last_action_panel_id) } get_panel <- function(n) { if (n == 0) { get_last_action_panel() } else if (n > 0) { Env$panels[[n]] } else { stop("'n' must be a positive integer") } } add_panel_action <- function(id, expr, env, clip = TRUE, where = c("last", "first", "background"), ...) { if (id < 0) { where <- "first" } else { where <- match.arg(where) } this_panel <- get_panel(abs(id)) this_panel$add_action(expr, env, clip, where, ...) } create_ylim <- function(x, const_y_mult = 0.2) { # Create y-axis limits from 'x'. Jitter the max/min limits by # 'const_y_mult' if the max/min values are the same. lim <- range(x, na.rm = TRUE) if(isTRUE(all.equal(lim[1L], lim[2L]))) { # if max and min are the same if(lim[1L] == 0) { lim <- c(-1, 1) } else { lim <- lim[1L] * c(1 - const_y_mult, 1 + const_y_mult) } } return(lim) } # loop over panels and then actions render_panels <- function() { update_panels() # all panel header/series asp pairs all_asp <- lapply(Env$panels, function(p) p[["asp"]]) all_asp <- do.call(c, all_asp) # panel header asp is always 5% of the total asp panel_header_asp <- 0.05 * sum(all_asp) # update panel header asp values header_loc <- seq(1, length(all_asp), by = 2) all_asp[header_loc] <- panel_header_asp # main header asp is always 4% of the grand total asp main_title_asp <- 0.04 * sum(all_asp) all_asp <- c(main_title_asp, all_asp) n_asp <- length(all_asp) # render main plot header and x-axis plot.window(Env$xlim, c(0, 1)) clip(par("usr")[1], par("usr")[2], 0, 1) eval(Env$main_header_expr, Env) # header eval(Env$main_xaxis_expr, Env) # x-axis # render each panel for (panel_n in seq_along(Env$panels)) { panel <- Env$panels[[panel_n]] # set the current active panel for the entire plot Env$active_panel_i <- panel_n is_header <- TRUE # header is always the first action for (action in panel$actions) { if (is_header) { is_header <- FALSE asp <- panel_header_asp asp_n <- 2 * panel_n ylim <- c(0, 1) } else { asp <- panel$asp["series"] asp_n <- 2 * panel_n + 1 ylim <- panel$ylim_render } # scaled ylim ylim_scale <- all_asp / asp * abs(diff(ylim)) ymin_adj <- sum(ylim_scale[-seq_len(asp_n)]) ymax_adj <- sum(ylim_scale[-(asp_n:n_asp)]) scaled_ylim <- c(ylim[1] - ymin_adj, ylim[2] + ymax_adj) plot.window(Env$xlim, scaled_ylim) if (attr(action, "clip")) { clip(par("usr")[1], par("usr")[2], ylim[1], ylim[2]) } action_env <- attr(action, "env") eval(action, action_env) } } } get_xcoords <- function(xts_object = NULL, at_posix = FALSE) { # unique index for all series (always POSIXct) xcoords <- Env$xycoords$x if (!is.null(xts_object)) { # get the x-coordinates for the observations in xts_object temp_xts <- .xts(seq_along(xcoords), xcoords, tzone = tzone(xts_object)) xcoords <- merge(temp_xts, xts_object, fill = na.locf, # for duplicate index values join = "right", retside = c(TRUE, FALSE)) if (!isTRUE(Env$extend.xaxis)) { xc <- Env$xycoords$x xi <- .index(xcoords) xsubset <- which(xi >= xc[1] & xi <= xc[length(xc)]) xcoords <- xcoords[xsubset] } if(Env$observation.based && !at_posix) { result <- drop(coredata(xcoords)) } else { result <- .index(xcoords) } } else { if(Env$observation.based && !at_posix) { result <- seq_along(xcoords) } else { result <- xcoords } } return(result) } # main plot header Env$main_header_expr <- expression({ local({ text(x = xlim[1], y = 0.98, labels = main, adj = NULL, pos = 4, offset = 0, cex = 1.1, col = theme$labels, font = 2) if (main.timespan) { text(x = xlim[2], y = 0.98, labels = paste(start(xdata[xsubset]), end(xdata[xsubset]), sep = " / "), adj = c(0, 0), pos = 2, offset = 0.5, cex = 1, col = theme$labels, font = NULL) } }, new.env(TRUE, Env)) }) # main plot x-axis Env$main_xaxis_expr <- expression({ local({ # add observation level ticks on x-axis if < 400 obs. if (NROW(xdata[xsubset]) < 400) { axis(1, at = get_xcoords(), labels = FALSE, las = theme$las, lwd.ticks = NULL, mgp = NULL, tcl = 0.3, cex.axis = theme$cex.axis, col = theme$labels, col.axis = theme$grid2) } # and major and/or minor x-axis ticks and labels xcoords <- get_xcoords() x_index <- get_xcoords(at_posix = TRUE) x_data <- .xts(, x_index, tzone = tzone(xdata))[xsubset] use_major <- !isNullOrFalse(major.ticks) use_minor <- !isNullOrFalse(minor.ticks) types <- c("major", "minor")[c(use_major, use_minor)] for (type in types) { if (type== "major") { axt <- axTicksByTime(x_data, ticks.on = major.ticks, format.labels = format.labels) labels <- names(axt) lwd.ticks <- 1.5 } else { axt <- axTicksByTime(x_data, ticks.on = minor.ticks, format.labels = format.labels) labels <- FALSE lwd.ticks <- 0.75 } axis(1, at = xcoords[axt], labels = labels, las = theme$las, lwd.ticks = lwd.ticks, mgp = c(3,1.5,0), tcl = -0.4, cex.axis = theme$cex.axis, col = theme$labels, col.axis = theme$labels) } }, new.env(TRUE, Env)) }) # panel functionality Env$panels <- list() new_panel <- function(ylim, asp, envir, header, ..., use_fixed_ylim = FALSE, draw_left_yaxis = NULL, draw_right_yaxis = NULL, use_log_yaxis = FALSE, title_timespan = FALSE) { panel <- new.env(TRUE, envir) panel$id <- length(Env$panels) + 1 panel$asp <- c(header = 0.25, series = asp) panel$ylim <- ylim panel$ylim_render <- ylim panel$use_fixed_ylim <- isTRUE(use_fixed_ylim) panel$draw_left_yaxis <- ifelse(is.null(draw_left_yaxis), Env$theme$lylab, draw_left_yaxis) panel$draw_right_yaxis <- ifelse(is.null(draw_right_yaxis), Env$theme$rylab, draw_right_yaxis) panel$use_log_yaxis <- isTRUE(use_log_yaxis) panel$header <- header ### actions panel$actions <- list() panel$add_action <- function(expr, env = Env, clip = TRUE, where = c("last", "first", "background"), ...) { if (!is.expression(expr)) { expr <- as.expression(expr) } action <- structure(expr, clip = clip, env = env, ...) panel$actions <- switch(match.arg(where), last = { # after all the existing actions append(panel$actions, list(action)) }, first = { # after the header and grid lines append(panel$actions, list(action), after = 3) }, background = { # after the header (which must be the 1st panel action) append(panel$actions, list(action), after = 1) }) Env$last_action_panel_id <<- panel$id } ### header # NOTE: this must be the 1st action for a panel header_expr <- expression({ text(x = xlim[1], y = 0.3, labels = header, adj = c(0, 0), pos = 4, offset = 0, cex = 0.9, col = theme$labels, font = NULL) }) panel$add_action(header_expr, env = panel) ### y-axis yaxis_expr <- expression({ if (use_fixed_ylim) { # use the ylim argument yl <- ylim } else { # use the updated ylim based on all panel data yl <- ylim_render } # y-axis grid line labels and locations if (use_log_yaxis) { ylim_series <- exp(ylim_render) # labels are based on the raw series values grid_lbl <- pretty(ylim_series, Env$yaxis.ticks) grid_lbl <- grid_lbl[grid_lbl >= ylim_series[1] & grid_lbl <= ylim_series[2]] # locations are based on the log series values grid_loc <- log(grid_lbl) } else { grid_loc <- pretty(yl, Env$yaxis.ticks) grid_loc <- grid_loc[grid_loc >= yl[1] & grid_loc <= yl[2]] grid_lbl <- grid_loc } # draw y-axis grid lines segments(x0 = xlim[1], y0 = grid_loc, x1 = xlim[2], y1 = grid_loc, col = theme$grid, lwd = grid.ticks.lwd, lty = grid.ticks.lty) # draw left y-axis grid labels if (draw_left_yaxis) { text(x = xlim[1], y = grid_loc, labels = format(grid_lbl, justify = "right"), col = theme$labels, srt = theme$srt, offset = 0.5, pos = 2, cex = theme$cex.axis, xpd = TRUE) } # draw right y-axis grid labels if (draw_right_yaxis) { text(x = xlim[2], y = grid_loc, labels = format(grid_lbl, justify = "right"), col = theme$labels, srt = theme$srt, offset = 0.5, pos = 4, cex = theme$cex.axis, xpd = TRUE) } # draw y-axis label title(ylab = ylab[1], mgp = c(1, 1, 0)) }) panel$add_action(yaxis_expr, env = panel) # x-axis grid xaxis_action <- expression(x_grid_lines(xdata, grid.ticks.on, par("usr")[3:4])) panel$add_action(xaxis_action, env = panel) # append the new panel to the panel list Env$panels <- append(Env$panels, list(panel)) return(panel) } update_panels <- function(headers=TRUE) { # Recalculate each panel's 'ylim_render' value based on the # 'xdata' of every action in the panel for (panel_n in seq_along(Env$panels)) { panel <- get_panel(panel_n) if (!panel$use_fixed_ylim) { # set 'ylim_render' to +/-Inf when ylim is NOT fixed, so # it will be updated to include all the panel's data panel$ylim_render <- c(Inf, -Inf) # calculate a new ylim based on all the panel's data for (action in panel$actions) { action_env <- attr(action, "env") action_data <- action_env$xdata if (!is.null(action_data)) { # some actions (e.g. addLegend) do not have 'xdata' dat.range <- create_ylim(action_data[Env$xsubset]) # calculate new ylim based on the combination of the panel's # original ylim and the action's 'xdata' ylim new_ylim <- c(min(panel$ylim[1], dat.range, na.rm = TRUE), max(panel$ylim[2], dat.range, na.rm = TRUE)) # set to new ylim values panel$ylim_render <- new_ylim } } } if (panel$use_log_yaxis) { panel$ylim_render <- log(panel$ylim_render) } } update_xaxis <- function(panel, x_axis) { # Create x-axis values using index values from data from all panels for (action in panel$actions) { action_env <- attr(action, "env") action_data <- action_env$xdata if (!is.null(action_data)) { # some actions (e.g. addLegend) do not have 'xdata' action_xaxis <- .index(action_data[Env$xsubset]) new_xaxis <- sort(unique(c(x_axis, action_xaxis))) if (isTRUE(Env$extend.xaxis)) { result <- new_xaxis } else { xaxis_rng <- range(x_axis, na.rm = TRUE) result <- new_xaxis[new_xaxis >= xaxis_rng[1L] & new_xaxis <= xaxis_rng[2L]] } } } return(result) } x_axis <- .index(Env$xdata[Env$xsubset]) for (panel in Env$panels) { x_axis <- update_xaxis(panel, x_axis) } # Create x/y coordinates using the combined x-axis index Env$xycoords <- xy.coords(x_axis, seq_along(x_axis)) if (Env$observation.based) { Env$xlim <- c(1, length(get_xcoords())) } else { Env$xlim <- range(get_xcoords(), na.rm = TRUE) } } # return replot_env <- new.env() class(replot_env) <- c("replot_xts","environment") replot_env$Env <- Env replot_env$new_panel <- new_panel replot_env$get_xcoords <- get_xcoords replot_env$update_panels <- update_panels replot_env$render_panels <- render_panels replot_env$get_panel <- get_panel replot_env$add_panel_action <- add_panel_action replot_env$get_xlim <- get_xlim replot_env$get_ylim <- get_ylim replot_env$create_ylim <- create_ylim replot_env$get_last_action_panel <- get_last_action_panel replot_env$new_environment <- function() { new.env(TRUE, Env) } # function to plot the x-axis grid lines replot_env$Env$x_grid_lines <- function(x, ticks.on, ylim) { if (isNullOrFalse(ticks.on)) { invisible() } else { if (isTRUE(ticks.on)) ticks.on <- "auto" xcoords <- get_xcoords() x_index <- get_xcoords(at_posix = TRUE) atbt <- axTicksByTime(.xts(, x_index, tzone = tzone(x)), ticks.on = ticks.on) segments(xcoords[atbt], ylim[1L], xcoords[atbt], ylim[2L], col = Env$theme$grid, lwd = Env$grid.ticks.lwd, lty = Env$grid.ticks.lty) } } return(replot_env) } str.replot_xts <- function(object, ...) { print(str(unclass(object))) } print.replot_xts <- function(x, ...) plot(x,...) plot.replot_xts <- function(x, ...) { # must set the background color before calling plot.new obg <- par(bg = x$Env$theme$bg) plot.new() assign(".xts_chob",x,.plotxtsEnv) # only reasonable way to fix X11/quartz issue ocex <- par(cex = if(.Device == "X11") x$Env$cex else x$Env$cex * 1.5) omar <- par(mar = x$Env$mar) oxpd <- par(xpd = FALSE) usr <- par("usr") # reset par on.exit(par(xpd = oxpd$xpd, cex = ocex$cex, mar = omar$mar, bg = obg$bg)) x$render_panels() do.call("clip", as.list(usr)) # reset clipping region invisible(x$Env$actions) }
/scratch/gouwar.j/cran-all/cranData/xts/R/plot.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. print.xts <- function(x, fmt, ..., show.rows = 10, max.rows = 100) { check.TZ(x) nr <- NROW(x) nc <- NCOL(x) dots <- list(...) if (missing(max.rows)) { # the user didn't specify a value; use the global option value if it's # set; if it's not set, use the default value max.rows <- getOption("xts.print.max.rows", max.rows) } # 'max' in print.default() takes precedence over 'show.rows' if (hasArg("max")) { # 'max' is the number of *elements* (not rows) to print if (nr < 1) { show.rows <- 0 } else { # convert 'max' to 'show.rows' if (!is.null(dots$max)) { show.rows <- trunc(dots$max / nc) } } } else if (missing(show.rows)) { # the user didn't specify a value; use the global option value if it's # set; if it's not set, use the default value show.rows <- getOption("xts.print.show.rows", show.rows) } if (missing(fmt)) { fmt <- tformat(x) } if (is.null(fmt)) { fmt <- TRUE } if (!hasArg("quote")) { dots$quote <- FALSE } if (!hasArg("right")) { dots$right <- TRUE } if (nr > max.rows && nr > 2 * show.rows) { # 'show.rows' can't be more than 2*nrow(x) or observations will be printed # twice, once before the "..." and once after. seq.row <- seq_len(show.rows) seq.col <- seq_len(nc) seq.n <- (nr - show.rows + 1):nr # format all the index values that will be printed, # so every row will have the same number of characters index <- format(index(x)[c(seq.row, seq.n)]) # combine the index values with the '...' separator index <- c(index[seq.row], "...", index[-c(seq.row, tail(seq.row, 1))]) # as.matrix() to ensure we have dims # unclass() avoids as.matrix() method dispatch m <- as.matrix(unclass(x)) # convert to data.frame to format each column individually m <- data.frame(m[c(seq.row, seq.n), seq.col, drop = FALSE]) m[] <- lapply(m, format) m <- as.matrix(m) # insert blank row between top and bottom rows y <- rbind(utils::head(m, show.rows), rep("", nc), utils::tail(m, show.rows)) rownames(y) <- format(index, justify = "right") colnames(y) <- colnames(m[, seq.col, drop = FALSE]) } else { y <- coredata(x, fmt) } if (length(y) == 0) { if (!is.null(dim(x))) { p <- structure(vector(storage.mode(y)), dim = dim(x), dimnames = list(format(index(x)), colnames(x))) print(p) } else { cat('Data:\n') print(vector(storage.mode(y))) cat('\n') cat('Index:\n') index <- index(x) if (length(index) == 0) { print(index) } else { print(str(index)) } } } else { # ensure 'y' has dims and row names if (is.null(dim(y))) { y_names <- as.character(index(x)) y <- matrix(y, nrow = length(y), dimnames = list(y_names, NULL)) } # Create column names as column indexes. if (is.null(colnames(y))) { colnames(y) <- paste0("[,", seq_len(ncol(y)), "]") } do.call("print", c(list(y), dots)) } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/xts/R/print.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. rollapply.xts <- function(data, width, FUN, ..., by=1, by.column=TRUE, fill=if(na.pad) NA, na.pad=TRUE, partial=TRUE, align=c("right","center","left")) { if (!missing(na.pad)) { warning("na.pad argument is deprecated") } if (!missing(partial)) { warning("partial argument is not currently supported") } data <- try.xts(data) # jmu: is this necessary? # Code taken/adapted from rollapply.zoo from the 'zoo' package # embedi <- function(n, k, by = 1, ascending = FALSE) { # n = no of time points, k = number of columns # by = increment. normally = 1 but if = b calc every b-th point # ascending If TRUE, points passed in ascending order else descending. # Note that embed(1:n, k) corresponds to embedi(n, k, by = 1, rev = TRUE) # e.g. embedi(10, 3) # s <- seq(1, n-k+1, by) # lens <- length(s) # cols <- 1:k # if(!ascending) cols <- rev(cols) # matrix(s + rep(cols, rep(lens,k))-1, lens) # } # xts doesn't currently have these functions # if(by.column && by == 1 && ascending && length(list(...)) < 1) # switch(deparse(substitute(FUN)), # mean = return(rollmean(data, width, na.pad = na.pad, align = align)), # max = return(rollmax(data, width, na.pad = na.pad, align = align)), # median = return(rollmedian(data, width, na.pad = na.pad, align = align))) nr <- NROW(data) nc <- NCOL(data) width <- as.integer(width)[1] stopifnot( width > 0, width <= nr ) ## process alignment align <- match.arg(align) n1 <- switch(align, "left" = { width - 1}, "center" = { floor(width/2) }, "right" = { 0 }) idx <- index(data) tt <- index(data)[seq((width-n1), (nr-n1), by)] #tt <- idx[seq((width-n1), (nr-n1), 1)] ## evaluate FUN only on coredata(data) #data <- coredata(data) FUN <- match.fun(FUN) ind <- as.matrix(seq.int(width,nr,by)) #e <- embedi(nr, width, by, ascending) if( nc==1 ) { #xx <- apply(e, 1, function(i) FUN(data[i,],...)) #xx <- sapply(1:NROW(e), function(i) FUN(data[e[i,],],...)) ##xx <- sapply(ind, function(i) FUN(data[(i-width+1):i,],...)) xx <- sapply(ind, function(i) FUN(.subset_xts(data,(i-width+1):i),...)) if(!is.null(dim(xx))) xx <- t(xx) res <- xts(xx, tt, if (by == 1) attr(data, "frequency")) } else if( by.column ) { res <- xts( sapply( 1:NCOL(data), function(j) #apply(e, 1, function(i) FUN(data[i,j],...)) ), #apply(ind, 1, function(i) FUN(data[(i-width+1):i,j],...)) ), apply(ind, 1, function(i) FUN(.subset_xts(data,(i-width+1):i,j),...)) ), tt, if (by == 1) attr(data, "frequency") ) } else { #xx <- apply(e, 1, function(i) FUN(data[i,],...)) ##xx <- apply(ind, 1, function(i) FUN(data[(i-width+1):i,],...)) xx <- apply(ind, 1, function(i) FUN(.subset_xts(data,(i-width+1):i),...)) if(!is.null(dim(xx))) xx <- t(xx) res <- xts(xx, tt, if (by == 1) attr(data, "frequency")) } ix <- index(data) %in% index(res) tmp <- merge(res, xts(,idx, attr(data, "frequency"))) if(is.null(colnames(res))) { # remove dimnames (xts objects don't have rownames) dimnames(tmp) <- NULL } res <- na.fill(tmp, fill, ix) if( by.column && !is.null(dim(data)) ) { colnames(res) <- colnames(data) } return(res) } rollsum.xts <- function (x, k, fill=if(na.pad) NA, na.pad=TRUE, align=c("right", "center", "left"), ...) { ## FIXME: align and fill are not respected! # from rollapply.xts; is this necessary? x <- try.xts(x) # from rollmean.zoo if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.") # process alignment align <- match.arg(align) #n1 <- switch(align, # "left" = { k - 1 }, # "center" = { floor(k/2) }, # "right" = { 0 }) #ix <- index(x)[seq((k-n1), (nrow(x)-n1), 1)] res <- .Call(C_roll_sum, x, k) res } rollmean.xts <- function (x, k, fill=if(na.pad) NA, na.pad=TRUE, align=c("right", "center", "left"), ...) { rollsum.xts(x=x, k=k, fill=fill, align=align, ...) / k } rollmax.xts <- function (x, k, fill=if(na.pad) NA, na.pad=TRUE, align=c("right", "center", "left"), ...) { ## FIXME: align and fill are not respected! # from rollapply.xts; is this necessary? x <- try.xts(x) # from rollmean.zoo if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.") # process alignment align <- match.arg(align) #n1 <- switch(align, # "left" = { k - 1 }, # "center" = { floor(k/2) }, # "right" = { 0 }) #ix <- index(x)[seq((k-n1), (nrow(x)-n1), 1)] res <- .Call(C_roll_max, x, k) res } rollmin.xts <- function (x, k, fill=if(na.pad) NA, na.pad=TRUE, align=c("right", "center", "left"), ...) { ## FIXME: align and fill are not respected! # from rollapply.xts; is this necessary? x <- try.xts(x) # from rollmean.zoo if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.") # process alignment align <- match.arg(align) #n1 <- switch(align, # "left" = { k - 1 }, # "center" = { floor(k/2) }, # "right" = { 0 }) #ix <- index(x)[seq((k-n1), (nrow(x)-n1), 1)] res <- .Call(C_roll_min, x, k) res } rollcov.xts <- function (x, y, k, fill=if(na.pad) NA, na.pad=TRUE, align=c("right", "center", "left"), sample=TRUE, ...) { ## FIXME: align and fill are not respected! # from rollapply.xts; is this necessary? x <- try.xts(x) y <- try.xts(y) # from rollmean.zoo if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.") # process alignment align <- match.arg(align) #n1 <- switch(align, # "left" = { k - 1 }, # "center" = { floor(k/2) }, # "right" = { 0 }) #ix <- index(x)[seq((k-n1), (nrow(x)-n1), 1)] res <- .Call(C_roll_cov, x, y, k, sample) res }
/scratch/gouwar.j/cran-all/cranData/xts/R/rollapply.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `sort.xts` <- function(x, decreasing=FALSE, MARGIN=1, ...) { if(NCOL(x) > 1) { as.matrix(x)[order(x[,MARGIN],decreasing=decreasing,...),] } else as.matrix(x)[order(x,decreasing=decreasing,...),] }
/scratch/gouwar.j/cran-all/cranData/xts/R/sort.xts.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. split.xts <- function(x, f="months", drop=FALSE, k=1, ...) { if(is.character(f) && length(f) == 1L) { ep <- endpoints(x, on=f, k=k) sp <- (ep + 1)[-length(ep)] ep <- ep[-1] out <- lapply(seq_along(ep), function(X) x[sp[X]:ep[X]]) if(f == "secs" || f == "mins") { f <- substr(f, 1L, 3L) } f <- match.arg(f, c("years", "quarters", "months", "weeks", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "ms", "us")) obs.for.names <- index(x)[sp] outnames <- switch(f, "years" = format(obs.for.names, "%Y"), "quarters" = as.character(as.yearqtr(as.POSIXlt(obs.for.names))), "months" = format(obs.for.names, "%b %Y"), "weeks" = format(obs.for.names, "%Y-%m-%d"), "days" = format(obs.for.names, "%Y-%m-%d"), "hours" = format(obs.for.names, "%Y-%m-%d %H:00:00"), "minutes" = format(obs.for.names, "%Y-%m-%d %H:%M:00"), "seconds" = format(obs.for.names, "%Y-%m-%d %H:%M:%S"), "milliseconds" = , "ms" = format(obs.for.names, "%Y-%m-%d %H:%M:%OS3"), "microseconds" = , "us" = format(obs.for.names, "%Y-%m-%d %H:%M:%OS6")) setNames(out, outnames) } else NextMethod("split") }
/scratch/gouwar.j/cran-all/cranData/xts/R/split.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `start.xts` <- function(x, ...) { index(x[1,]) } `end.xts` <- function(x, ...) { if(length(x)==0) { index(x[length(.index(x)),]) } else index(x[NROW(x),]) }
/scratch/gouwar.j/cran-all/cranData/xts/R/start.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `startOfYear` <- function(from=1900, to=2200, origin=1970) { .Call(C_do_startofyear, from = as.integer(from), to = as.integer(to), origin = as.integer(origin)) }
/scratch/gouwar.j/cran-all/cranData/xts/R/startOfYear.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `str.xts` <- function(object, ..., ncols = 5) { is.data.empty <- is.null(dim(object)) || sum(dim(object)) == 0 is.zero.index <- (length(.index(object)) == 0) nr <- NROW(object) nc <- ifelse(is.data.empty, 0, NCOL(object)) # "zero-length" xts # * index length == 0, but tclass and tzone are set # * NROW == 0 # * NCOL > 0 and may have column names # examples: # str(.xts(1, 1)["1900"]) # str(.xts(cbind(a = 1, b = 2), 1)["1900"]) is.zero.length <- (is.zero.index && nr == 0 && !is.data.empty) # "zero-width" xts # * index length > 0 # * NROW == 0 # * NCOL == 0 # example: # str(.xts(, 1:5)) is.zero.width <- (!is.zero.index && is.data.empty) # "empty" xts # * index length == 0, but tclass and tzone are set # * NROW == 0 # * NCOL == 0 # example: # str(.xts(, numeric(0))) # str(.xts(matrix()[0,0], numeric(0))) is.empty <- (is.zero.index && is.data.empty) if (is.empty) { header <- "An empty xts object" } else if (is.zero.length) { header <- "A zero-length xts object" } else { # zero-width and regular xts objects if (is.zero.width) { header <- "A zero-width xts object on" } else { header <- "An xts object on" } time.range <- sub("/", " / ", .makeISO8601(object), fixed = TRUE) header <- paste(header, time.range, "containing:") } cat(header, "\n") # Data cat(sprintf(" Data: %s [%d, %d]\n", storage.mode(object), nr, nc)) # Column names cnames <- colnames(object) if (!is.null(cnames)) { if (nc > ncols) { more <- nc - ncols cname.str <- sprintf("%s ... with %d more %s", paste(cnames[seq_len(ncols)], collapse = ", "), more, ifelse(more > 1, "columns", "column")) } else { cname.str <- paste(colnames(object), collapse = ", ") } cat(sprintf(" Columns: %s\n", cname.str)) } # Index cat(sprintf(" Index: %s [%d] (TZ: \"%s\")\n", paste(tclass(object), collapse = ","), length(.index(object)), tzone(object))) if (!is.null(CLASS(object))) { cat(sprintf(" Original class: '%s'\n", CLASS(object))) } xts.attr <- xtsAttributes(object) if (!is.null(xts.attr)) { cat(" xts Attributes:\n") str(xts.attr, ..., comp.str = " $ ", no.list = TRUE) } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/xts/R/str.R
# # xts: eXtensible time-series # # Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com # # Contributions from Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. `convertIndex` <- function(x,value) { tclass(x) <- value x } tclass <- function(x, ...) { UseMethod('tclass') } tclass.default <- function(x, ...) { attr(x, "tclass") } tclass.xts <- function(x, ...) { tclass <- attr(attr(x, "index"), "tclass") # For xts objects created pre-0.10.3 if (is.null(tclass)) { # no tclass on the index sq_tclass <- sQuote("tclass") sq_both <- paste(sq_tclass, "or", sQuote(".indexCLASS")) warn_msg <- paste0("index does not have a ", sq_tclass, " attribute") tclass <- attr(x, "tclass") if (is.null(tclass)) { # no tclass on the xts object, look for .indexCLASS tclass <- attr(x, ".indexCLASS") } if (is.null(tclass)) { # no .indexCLASS on the xts object tc <- c("POSIXct", "POSIXt") warn_msg <- paste0(warn_msg, "\n and xts object does not have a ", sq_both, " attribute\n", " returning ", dQuote(tc)) warning(warn_msg) return(tc) } sym <- deparse(substitute(x)) warning(warn_msg, "\n use ", sym, " <- xts:::.update_index_attributes(", sym, ") to update the object") } return(tclass) } `tclass<-` <- function(x,value) { UseMethod('tclass<-') } `tclass<-.default` <- function(x, value) { if (!is.null(value)) { value <- as.character(value) } attr(x, "tclass") <- value x } indexClass <- function(x) { .Deprecated("tclass", "xts") tclass(x) } `indexClass<-` <- function(x, value) { .Deprecated("tclass<-", "xts") `tclass<-`(x, value) } `tclass<-.xts` <- function(x, value) { if(!is.character(value) && length(value) != 1) stop('improperly specified value for tclass') # remove 'POSIXt' from value, to prevent tclass(x) <- 'POSIXt' value <- value[!value %in% "POSIXt"] if(length(value)==0L) stop(paste('unsupported',sQuote('tclass'),'indexing type: POSIXt')) if(!value[1] %in% c('dates','chron','POSIXlt','POSIXct','Date','timeDate','yearmon','yearqtr','xtime') ) stop(paste('unsupported',sQuote('tclass'),'indexing type:',as.character(value[[1]]))) # Add 'POSIXt' virtual class if(value %in% c('POSIXlt','POSIXct')) value <- c(value,'POSIXt') # all index related meta-data will be stored in the index # as attributes if(isClassWithoutTZ(value)) { attr(attr(x,'index'), 'tzone') <- 'UTC' } attr(attr(x,'index'), 'tclass') <- value x_has_tz <- !isClassWithoutTZ(x) if(x_has_tz && isClassWithoutTZ(value)) { # update index values to midnight UTC (this also changes the tzone) index(x) <- index(x) } # Remove class attrs (object created before 0.10-3) attr(x, ".indexCLASS") <- NULL attr(x, "tclass") <- NULL x }
/scratch/gouwar.j/cran-all/cranData/xts/R/tclass.R