content
stringlengths
0
14.9M
filename
stringlengths
44
136
e0M_supplemental <- read.delim(file='e0M_supplemental.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0M_supplemental.R
e0Mproj <- read.delim(file='e0Mproj.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0Mproj.R
e0Mproj80l <- read.delim(file='e0Mproj80l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0Mproj80l.R
e0Mproj80u <- read.delim(file='e0Mproj80u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0Mproj80u.R
e0Mproj95l <- read.delim(file='e0Mproj95l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0Mproj95l.R
e0Mproj95u <- read.delim(file='e0Mproj95u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/e0Mproj95u.R
migration <- read.delim(file='migration.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/migration.R
mxF <- read.delim(file='mxF.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/mxF.R
mxM <- read.delim(file='mxM.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/mxM.R
percentASFR <- read.delim(file='percentASFR.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/percentASFR.R
# Total population (observed) # This dataset is created on the fly as a sum of the sex-specific population estimates popMT and popFT pop <- local({ source('popMT.R') source('popFT.R') # The male and female dataset should be in the same format, # i.e. the countries and years should be in the same order, but just to be sure # match columns and rows. It will fail if there are different sets of countries # in the two datasets. cols.to.sumM <- colnames(popMT)[-match(c('country_code', "name"), colnames(popMT))] cols.to.sumF <- colnames(popFT)[-match(c('country_code', "name"), colnames(popFT))] cols.to.sumF.idx <- match(cols.to.sumF, cols.to.sumM) rowsF.idx <- match(popFT$country_code, popMT$country_code) cbind(country_code=popMT$country_code, name=popMT[,"name"], popMT[,cols.to.sumM] + popFT[rowsF.idx, cols.to.sumF[cols.to.sumF.idx]]) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/pop.R
popF <- read.delim(file='popF.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popF.R
# Total female population (observed) # This dataset is created on the fly as a sum of the age-specific population estimates popF popFT <- local({ source('popF.R') sum.by.country <- function(dataset) { year.cols <- grep('^[0-9]{4}', colnames(dataset), value = TRUE) name.col <- grep('^name$|^country$', colnames(dataset), value=TRUE) data.table::setnames(dataset, name.col, "name") # rename if necessary dataset[, c("country_code", "name", year.cols), with = FALSE][,lapply(.SD, sum, na.rm = TRUE), by = c("country_code", "name")] } as.data.frame(sum.by.country(data.table::as.data.table(popF))) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popFT.R
# Total female population (projection median) # This dataset is created on the fly as a sum of the age-specific population median projections popFprojMed popFTproj <- local({ source('popFprojMed.R') sum.by.country <- function(dataset) { year.cols <- grep('^[0-9]{4}', colnames(dataset), value = TRUE) name.col <- grep('^name$|^country$', colnames(dataset), value=TRUE) data.table::setnames(dataset, name.col, "name") # rename if necessary dataset[, c("country_code", "name", year.cols), with = FALSE][,lapply(.SD, sum, na.rm = TRUE), by = c("country_code", "name")] } as.data.frame(sum.by.country(data.table::as.data.table(popFprojMed))) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popFTproj.R
popFprojHigh <- read.delim(file='popFprojHigh.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popFprojHigh.R
popFprojLow <- read.delim(file='popFprojLow.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popFprojLow.R
popFprojMed <- read.delim(file='popFprojMed.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popFprojMed.R
popM <- read.delim(file='popM.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popM.R
# Total male population (observed) # This dataset is created on the fly as a sum of the age-specific population estimates popM popMT <- local({ source('popM.R') #suppressPackageStartupMessages(library(data.table)) sum.by.country <- function(dataset) { year.cols <- grep('^[0-9]{4}', colnames(dataset), value = TRUE) name.col <- grep('^name$|^country$', colnames(dataset), value=TRUE) data.table::setnames(dataset, name.col, "name") # rename if necessary dataset[, c("country_code", "name", year.cols), with = FALSE][,lapply(.SD, sum, na.rm = TRUE), by = c("country_code", "name")] } as.data.frame(sum.by.country(data.table::as.data.table(popM))) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popMT.R
# Total male population (projection median) # This dataset is created on the fly as a sum of the age-specific population median projections popMprojMed popMTproj <- local({ source('popMprojMed.R') sum.by.country <- function(dataset) { year.cols <- grep('^[0-9]{4}', colnames(dataset), value = TRUE) name.col <- grep('^name$|^country$', colnames(dataset), value=TRUE) data.table::setnames(dataset, name.col, "name") # rename if necessary dataset[, c("country_code", "name", year.cols), with = FALSE][,lapply(.SD, sum, na.rm = TRUE), by = c("country_code", "name")] } as.data.frame(sum.by.country(data.table::as.data.table(popMprojMed))) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popMTproj.R
popMprojHigh <- read.delim(file='popMprojHigh.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popMprojHigh.R
popMprojLow <- read.delim(file='popMprojLow.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popMprojLow.R
popMprojMed <- read.delim(file='popMprojMed.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popMprojMed.R
# Total population (projection median) # This dataset is created on the fly as a sum of the sex-specific population median projections popMTproj and popFTproj popproj <- local({ source('popMTproj.R') source('popFTproj.R') # The male and female dataset should be in the same format, # i.e. the countries and years should be in the same order, but just to be sure # match columns and rows. It will fail if there are different sets of countries # in the two datasets. tpopM <- popMTproj tpopF <- popFTproj cols.to.sumM <- colnames(tpopM)[-match(c('country_code', "name"), colnames(tpopM))] cols.to.sumF <- colnames(tpopF)[-match(c('country_code', "name"), colnames(tpopF))] cols.to.sumF.idx <- match(cols.to.sumF, cols.to.sumM) rowsF.idx <- match(tpopF$country_code, tpopM$country_code) cbind(country_code=tpopM$country_code, name=tpopM[,"name"], tpopM[,cols.to.sumM] + tpopF[rowsF.idx, cols.to.sumF[cols.to.sumF.idx]]) })
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popproj.R
popproj80l <- read.delim(file='popproj80l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popproj80l.R
popproj80u <- read.delim(file='popproj80u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popproj80u.R
popproj95l <- read.delim(file='popproj95l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popproj95l.R
popproj95u <- read.delim(file='popproj95u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popproj95u.R
popprojHigh <- read.delim(file='popprojHigh.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popprojHigh.R
popprojLow <- read.delim(file='popprojLow.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/popprojLow.R
sexRatio <- read.delim(file='sexRatio.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/sexRatio.R
tfr <- read.delim(file='tfr.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfr.R
tfr_supplemental <- read.delim(file='tfr_supplemental.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfr_supplemental.R
tfrproj80l <- read.delim(file='tfrproj80l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrproj80l.R
tfrproj80u <- read.delim(file='tfrproj80u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrproj80u.R
tfrproj95l <- read.delim(file='tfrproj95l.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrproj95l.R
tfrproj95u <- read.delim(file='tfrproj95u.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrproj95u.R
tfrprojHigh <- read.delim(file='tfrprojHigh.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrprojHigh.R
tfrprojLow <- read.delim(file='tfrprojLow.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrprojLow.R
tfrprojMed <- read.delim(file='tfrprojMed.txt', comment.char='#', check.names=FALSE)
/scratch/gouwar.j/cran-all/cranData/wpp2019/data/tfrprojMed.R
utils::globalVariables("wpp.data.env") get.indicator.choices <- function() { ind.names <- c('Total Fertility Rate', 'Female Life Expectancy', 'Male Life Expectancy', 'Total Population', 'Female Population', 'Male Population', 'Net Migration', 'Net Migration Rate', 'Sex Ratio at Birth', 'Median Age', 'Mean Age at Childbearing', 'Mean Age of Women in Childbearing Ages', 'Total Dependency Ratio', 'Child Dependency Ratio', 'Old-age Dependency Ratio','Potential Support Ratio', 'Mean Annual Population Growth', 'Population by sex and age', 'Mortality Rate by sex and age', 'Age-specific Fertility Rate', 'Percent Age-specific Fertility') ind.def <- c('', '', '', 'Total population in thousands', 'Female population in thousands', 'Male population in thousands', 'Net migration counts in thousands per 5 years', 'Annual net migration rate (per thousand population; denominator is approx. average population)', 'Ratio of male to female', '', 'Mean age of mothers at the birth of their children', 'Mean age of women that are in childbearing ages', 'Ratio of population age 0-14 and 65+ to population age 15-64', 'Ratio of population age 0-14 to population age 15-64', 'Ratio of population age 65+ to population age 15-64', 'Ratio of population age 15-64 to population age 65+', 'log(P_t/P_{t-1})/5', 'Population in thousands', #'Mortality for ages 85-100+ are not official UN data. They were derived from UN published life table quantities.', '', '', '') funcs <- c('fert', 'leF', 'leM', 'tpop', 'tpopF', 'tpopM', 'mig', 'migrate', 'sexratio', 'medage', 'meanagechbear', 'meanageinchbearage', 'tdratio', 'chdratio', 'oadratio', 'psratio', 'popgrowth', 'popagesex', 'mortagesex', 'fertage', 'pfertage') # if a new indicator is added, change also the condition in ui.R for displaying age-specific stuff l <- length(ind.names) ini <- rep(FALSE, l) ind.df <- data.frame(by.age=ini, no.age.sum=ini, sum.in.table=ini, low.high=ini, half.child=ini, prob.ci=ini, mid.years=ini, digits=rep(1, l), has.negatives=ini) rownames(ind.df) <- funcs ind.df[c('popagesex', 'mortagesex', 'fertage', 'pfertage'), 'by.age'] <- TRUE # display sex and age menu ind.df[c('mortagesex','fertage'), 'no.age.sum'] <- TRUE # don't allow multiple age- and sex-selection ind.df[c('tpop', 'tpopF', 'tpopM', 'mig','popagesex'), 'sum.in.table'] <- TRUE # show sum in the trend table ind.df[c('fert', 'leF', 'leM', 'tpop'), 'low.high'] <- TRUE # has uncertainty ind.df[c('fert', 'tpop', 'popagesex'), 'half.child'] <- TRUE # has half.child variant ind.df[c('fert', 'leF', 'leM', 'mig', 'sexratio', 'mortagesex', 'fertage', 'pfertage'), 'mid.years'] <- TRUE # use mid years in slider (not implemented) ind.df[c('tpop', 'tpopF', 'tpopM','popagesex', 'mig'), 'digits'] <- 0 # number of digits the histogram ind.df[c('sexratio', 'popgrowth', 'mortagesex'), 'digits'] <- 4 ind.df['fertage', 'digits'] <- 3 ind.df[c('tdratio', 'chdratio', 'oadratio'), 'digits'] <- 2 ind.df[c('mig', 'migrate', 'popgrowth'), 'has.negatives'] <- TRUE structure( as.character(1:length(ind.names)), names = ind.names, definition = ind.def, settings = ind.df ) } assign("wpp.data.env", new.env(), envir=parent.env(environment()) #envir = .GlobalEnv ) data('iso3166', envir=wpp.data.env) wpp.data.env$indicators <- get.indicator.choices() wpp.data.env$package <- "wpp2019" # Filter out non-used countries do.call('data', list("popM", package=wpp.data.env$package, envir=wpp.data.env)) wpp.data.env$iso3166 <- wpp.data.env$iso3166[is.element(wpp.data.env$iso3166$uncode, wpp.data.env$popM$country_code),]
/scratch/gouwar.j/cran-all/cranData/wppExplorer/R/global.R
utils::globalVariables("wpp.data.env") wpp.explore <- function(wpp.year=NULL, host=NULL, ...) { if(!is.null(wpp.year)) set.wpp.year(wpp.year) if(missing(host)) host <- getOption("shiny.host", "0.0.0.0") shiny::runApp(system.file('explore', package='wppExplorer'), host = host, ...) } wpp.explore3d <- function(wpp.year=NULL) { if(!is.null(wpp.year)) set.wpp.year(wpp.year) shiny::runApp(system.file('bubbles', package='wppExplorer')) } get.available.wpps <- function() c(2008, 2010, 2012, 2015, 2017, 2019) check.wpp.revision <- function(wpp.year) { if (!wpp.year %in% get.available.wpps()) stop('wpp.year must be one of ', paste(get.available.wpps(), collapse=', ')) } wpp.year.from.package.name <- function(package) return(as.integer(substr(package, 4, nchar(package)))) wpp.indicator <- function(what, ...) { data <- do.call(what, list(...)) if(is.null(data)) return(NULL) merge.with.un.and.melt(data, what=what) } wpp.by.year <- function(data, year) { data <- data[data$Year == year,] data$Year <- NULL data } wpp.by.country <- function(data, country) { data <- data[data$charcode == country,] data$charcode <- NULL data } wpp.by.countries <- function(data, countries) { data <- data[data$charcode %in% countries,] data } set.wpp.year <- function(wpp.year) { check.wpp.revision(wpp.year) # cleanup the environment for (item in ls(wpp.data.env)) { if(!(item %in% c('indicators'))) rm(list=item, envir=wpp.data.env) } data('iso3166', envir=wpp.data.env, package="wppExplorer") wpp.data.env$package <- paste('wpp', wpp.year, sep='') # Filter out non-used countries do.call('data', list("popM", package=wpp.data.env$package, envir=wpp.data.env)) wpp.data.env$iso3166 <- wpp.data.env$iso3166[is.element(wpp.data.env$iso3166$uncode, wpp.data.env$popM$country_code),] cat('\nDefault WPP package set to', wpp.data.env$package,'.\n') } get.wpp.year <- function() as.integer(substr(wpp.data.env$package, 4,7)) tpop <- function(...) { # Create a dataset of total population if.not.exists.load('popM') if.not.exists.load('popF') tpop <- sumMFbycountry(wpp.data.env$popM, wpp.data.env$popF) if(wpp.year.from.package.name(wpp.data.env$package) > 2010) { #projection stored separately from observations if.not.exists.load('popMprojMed') if.not.exists.load('popFprojMed') tpopp <- sumMFbycountry(wpp.data.env$popMprojMed, wpp.data.env$popFprojMed) tpop <- merge(tpop, tpopp, by='country_code') } tpop } tpopF <- function(...) return(tpop.sex('F')) tpopM <- function(...) return(tpop.sex('M')) tpop.sex <- function(sex) { # Create a dataset of total population dataset <- paste('pop', sex, sep='') pop <- load.dataset.and.sum.by.country(dataset) if(wpp.year.from.package.name(wpp.data.env$package) > 2010) { #projection stored separately from observations dataset <- paste('pop', sex, 'projMed', sep='') popp <- load.dataset.and.sum.by.country(dataset) pop <- merge(pop, popp, by='country_code') } pop } mig <- function(...) { # Create a dataset of net migration if(wpp.year.from.package.name(wpp.data.env$package) <2015) { # sex- and age-specific migration available if.not.exists.load('migrationM') if.not.exists.load('migrationF') return(sumMFbycountry(wpp.data.env$migrationM, wpp.data.env$migrationF)) } load.and.merge.datasets('migration', NULL) # total migration available } migrate <- function(...) { migcounts <- mig() pop <- tpop() mergepop <- merge(migcounts[,'country_code', drop=FALSE], pop, sort=FALSE) ncols <- ncol(mergepop) cbind(country_code=mergepop$country_code, (migcounts[,2:ncol(migcounts)]*200.)/((mergepop[,3:ncols]+mergepop[,2:(ncols-1)])/2.)) } popagesex <- function(sexm, agem, ...){ age <- agem sex <- sexm if(is.null(age)) age <- '0-4' if(is.null(sex)) sex <- 'F' if(length(sex)==0 || length(age)==0) return(NULL) tpop <- tpopp <- NULL for(s in sex) { dataset.name <- paste('pop',s, sep='') if.not.exists.load(dataset.name) pop <- sum.by.country.subset.age(wpp.data.env[[dataset.name]], age) if(!is.null(tpop)){ tpop <- cbind(country_code=tpop[,'country_code'], tpop[,2:ncol(tpop)] + pop[,2:ncol(pop)]) } else tpop<-pop if(wpp.year.from.package.name(wpp.data.env$package) > 2010) { #projection stored separately from observations dataset.name <- paste('pop', s, 'projMed', sep='') if.not.exists.load(dataset.name) popp <- sum.by.country.subset.age(wpp.data.env[[dataset.name]], age) if(!is.null(tpopp)){ tpopp <- cbind(country_code=tpopp[,'country_code'], tpopp[,2:ncol(tpopp)] + popp[,2:ncol(popp)]) } else tpopp<-popp } } if(!is.null(tpopp)) tpop <- merge(tpop, tpopp, by='country_code') tpop } mortagesex <- function(sex, age, ...){ if(is.null(age)) age <- '0' if(is.null(sex)) sex <- 'F' dataset.name <- paste('mx',sex, sep='') if.not.exists.load(dataset.name) sum.by.country.subset.age(wpp.data.env[[dataset.name]], age) } fertage <- function(age, ...){ if(is.null(age)) age <- '15-19' if.not.exists.load('percentASFR') tfert <- fert() tfert <- cbind(country_code=tfert$country_code, tfert[,.get.year.cols.idx(tfert)]) asfr <- sum.by.country.subset.age(wpp.data.env[['percentASFR']], age) tfert <- tfert[tfert$country_code %in% asfr$country_code,] tfert <- tfert[match(asfr$country_code, tfert$country_code), ] # put rows in the same order #browser() cbind(country_code=tfert[,'country_code'], tfert[,2:ncol(tfert)] * asfr[,2:ncol(asfr)] / 100.) } pfertage <- function(agem, ...){ age <- agem if(is.null(age)) age <- '15-19' if.not.exists.load('percentASFR') sum.by.country.subset.age(wpp.data.env[['percentASFR']], age) } fert <- function(...) { name.pred <- if(wpp.data.env$package=='wpp2008') NULL else 'tfrprojMed' return(load.and.merge.datasets('tfr', name.pred)) } leF <- function(...) { name.pred <- if(wpp.data.env$package=='wpp2008') NULL else 'e0Fproj' return(load.and.merge.datasets('e0F', name.pred)) } leM <- function(...) { name.pred <- if(wpp.data.env$package=='wpp2008') NULL else 'e0Mproj' return(load.and.merge.datasets('e0M', name.pred)) } sexratio <- function(...) { return(load.and.merge.datasets('sexRatio', NULL)) } meanagechbear <- function(...) { # mean age of child bearing data <- load.and.merge.datasets('percentASFR', NULL) ddply(data[,-which(colnames(data) == "age")], "country_code", .fun=colwise(function(x) sum(seq(17.5, by=5, length=7)*x)/100.)) } .sum.popFM.keep.age <- function() { name.preds <- if(wpp.year.from.package.name(wpp.data.env$package) <= 2010) c(NULL, NULL) else c('popFprojMed', 'popMprojMed') pF <- load.and.merge.datasets('popF', name.preds[1], by=c('country_code', 'age'), remove.cols=c('country', 'name')) pM <- load.and.merge.datasets('popM', name.preds[2], by=c('country_code', 'age'), remove.cols=c('country', 'name')) cbind(country_code=pF[,1], pF[,-c(1,2)] + pM[,-c(1,2)]) } medage <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(gmedian)) } meanageinchbearage <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(gmean.child.bearing)) } tdratio <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(dependency.ratio, which='total')) } psratio <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(function(x) 1/dependency.ratio(x, which='old'))) } chdratio <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(dependency.ratio, which='child')) } oadratio <- function(...) { ddply(.sum.popFM.keep.age(), "country_code", .fun=colwise(dependency.ratio, which='old')) } popgrowth <- function(...) { pop <- tpop() ncols <- ncol(pop) #browser() cbind(country_code=pop$country_code, log(pop[,3:ncols]/pop[,2:(ncols-1)])/5) } .pi.suffix <- function(x) c(low='l', high='u')[x] fert.ci <- function(which.pi, bound, ...) { # which.pi is for '80', '95' or 'half.child' # bound is 'low' or 'high' if(wpp.data.env$package=='wpp2008') return(NULL) if(wpp.data.env$package=='wpp2010' && which.pi != 'half.child') return(NULL) dataset.name <- if(which.pi == 'half.child') paste0('tfrproj', capitalize(bound)) else paste0('tfrproj', which.pi, .pi.suffix(bound)) load.and.merge.datasets(dataset.name, NULL) } leF.ci <- function(which.pi, bound, ...) { e0.ci('F', which.pi, bound) } leM.ci <- function(which.pi, bound, ...) { e0.ci('M', which.pi, bound) } e0.ci <- function(sex, which.pi, bound) { if(wpp.year.from.package.name(wpp.data.env$package) <= 2010 || which.pi == 'half.child') return(NULL) load.and.merge.datasets(paste0('e0', sex, 'proj', which.pi, .pi.suffix(bound)), NULL) } tpop.ci <- function(which.pi, bound, ...) { # which.pi is for '80', '95' or 'half.child' # bound is 'low' or 'high' if(wpp.year.from.package.name(wpp.data.env$package) <= 2010) return(NULL) dataset.name <- if(which.pi == 'half.child') paste0('popproj', capitalize(bound)) else paste0('popproj', which.pi, .pi.suffix(bound)) load.and.merge.datasets(dataset.name, NULL) } popagesex.ci <- function(which.pi, bound, sexm, agem, ...) { # bound is 'low' or 'high' if((wpp.year.from.package.name(wpp.data.env$package) <= 2010) || (length(sexm) > 1) || (length(agem) > 1) || (which.pi != 'half.child')) return(NULL) dataset.name <- paste('pop', sexm, 'proj', capitalize(bound), sep='') if.not.exists.load(dataset.name) sum.by.country.subset.age(wpp.data.env[[dataset.name]], agem) } load.dataset.and.sum.by.country<-function(dataset){ if.not.exists.load(dataset) pop <- sum.by.country(wpp.data.env[[dataset]]) } trim.spaces <- function (x) gsub("^\\s+|\\s+$", "", x) if.not.exists.load <- function(name) { if(exists(name, where=wpp.data.env, inherits=FALSE)) return() do.call('data', list(name, package=wpp.data.env$package, envir=wpp.data.env)) # special handling of the age column (mostly because of inconsistent labels in the various datasets) # trim spaces in age column if needed if('age' %in% colnames(wpp.data.env[[name]]) && is.factor(wpp.data.env[[name]]$age)) { levels(wpp.data.env[[name]]$age) <- trim.spaces(levels(wpp.data.env[[name]]$age)) # 'age' in the mx dataset should be numeric but includes 100+, so it's factor # replace by 100 and make it numeric levs <- levels(wpp.data.env[[name]]$age) if("5" %in% levs && "100+" %in% levs) { levels(wpp.data.env[[name]]$age)[levs == "100+"] <- "100" wpp.data.env[[name]]$age <- as.integer(as.character(wpp.data.env[[name]]$age)) } } } load.and.merge.datasets <- function(name.obs, name.pred=NULL, by='country_code', remove.cols=c('country', 'name')){ if.not.exists.load(name.obs) data <- wpp.data.env[[name.obs]] if(length(remove.cols) > 0) data <- data[,-which(colnames(data)%in%remove.cols)] if(!is.null(name.pred)){ # load predictions if.not.exists.load(name.pred) data.pred <- wpp.data.env[[name.pred]] if(length(remove.cols) > 0) data.pred <- data.pred[,-which(colnames(data.pred)%in%remove.cols)] data <- merge(data, data.pred, by=by, sort=FALSE) } data } lookupByIndicator <- function(indicator, sex.mult=c(), sex=c(), age.mult=c(), age=c()) { indicator <- as.numeric(indicator) fun <- ind.fun(indicator) # load observed data #if(fun == 'mortagesex') browser() if(!is.null(wpp.data.env[[fun]])) return(wpp.data.env[[fun]]) data <- wpp.indicator(fun, sexm=sex.mult, sex=sex, agem=age.mult, age=age) if(!ind.is.by.age(indicator)) wpp.data.env[[fun]] <- data data } lookupByIndicatorInclArea <- function(indicator, ...) { if (as.numeric(indicator) == 0) { env <- new.env() data('UNlocations', envir=env, package=wpp.data.env$package) iso <- wpp.data.env$iso3166 df <- merge(iso[iso$is.country,c('charcode', 'uncode')], env$UNlocations[,c('country_code','area_name')], by.x='uncode', by.y='country_code')[,-1] colnames(df)[2] <- .indicator.title.incl.area(0) return(df) } lookupByIndicator(indicator, ...) } lookupByIndicator.mchart <- function(indicator, ...) { exdf <- wpp.data.env$mchart.data name <- .indicator.title.incl.area(indicator[1], ...) iso <- wpp.data.env$iso3166 iso <- iso[iso$is.country,] if(!is.null(exdf) && name %in% colnames(exdf)) { exdf <- merge(iso[,c('charcode', 'name')], exdf) return(exdf[,-1]) } df <- lookupByIndicatorInclArea(indicator[1], ...) colnames(df)[which(colnames(df)=='value')] <- name if(!is.null(exdf)) df <- merge(exdf, df) if(length(indicator) > 1) { for (ind in 2:length(indicator)) { name <- .indicator.title.incl.area(indicator[ind], ...) if(name %in% colnames(df)) next if (as.numeric(indicator[ind]) == 0) { env <- new.env() data('UNlocations', envir=env, package=wpp.data.env$package) locs <- merge(iso[,c('charcode', 'uncode')], env$UNlocations[,c('country_code','area_name')], by.x='uncode', by.y='country_code')[,-1] #browser() df <- merge(df, locs, by='charcode') colnames(df)[which(colnames(df)=='area_name')] <- name } else { df <- merge(df, lookupByIndicator(indicator[ind], ...)) colnames(df)[which(colnames(df)=='value')] <- name } } } wpp.data.env$mchart.data <- df df <- merge(iso[,c('charcode', 'name')], df) return(df[,-1]) } .indicator.title.incl.area <- function(indicator, ...) { indicator <- as.numeric(indicator) if(indicator == 0) return('UN Areas') return(get.indicator.title(indicator, ...)) } .get.pi.name <- function(x) c('80', '95', 'half.child')[x] .get.pi.name.for.label <- function(x) c('80%', '95%', '1/2child')[x] getUncertainty <- function(indicator, which.pi, bound='low', sex.mult=c(), sex=c(), age.mult=c(), age=c()) { indicator <- as.numeric(indicator) if(!ind.is.low.high(indicator) && !ind.is.half.child(indicator)) return(NULL) if(length(which.pi) == 0) return(NULL) fun <- paste(ind.fun(indicator), 'ci', sep='.') all.data <- NULL for(i in 1:length(which.pi)) { pi.idx <- as.integer(which.pi[i]) pi.name <-.get.pi.name(pi.idx) lookup.name <- paste(fun, pi.name, bound, sep='.') if(!is.null(wpp.data.env[[lookup.name]])) data <- wpp.data.env[[lookup.name]] else { data <- wpp.indicator(fun, pi.name, bound=bound, sexm=sex.mult, sex=sex, agem=age.mult, age=age) if(is.null(data)) next if(!ind.is.by.age(indicator)) wpp.data.env[[lookup.name]] <- data } colnames(data) <- sub('value', paste0('value.', pi.idx), colnames(data)) all.data <- if(is.null(all.data)) data else merge(all.data, data, by=c('charcode', 'Year')) } all.data } .get.year.col.names <- function(col.names) { col.names <- gsub('.y', '', col.names, fixed=TRUE) l <- nchar(col.names) substr(col.names, l-3, l) } .get.year.cols.idx <- function(data, remove.duplicate.columns=TRUE) { year.cols.idx <- grep('[0-9]{4}$|[0-9]{4}.y$', colnames(data)) # if(remove.duplicate.columns) { # dupl.year <- duplicated(.get.year.col.names(colnames(data)[year.cols.idx]), fromLast=TRUE) # if(any(dupl.year)) year.cols.idx <- year.cols.idx[-which(dupl.year)] # } year.cols.idx } merge.with.un.and.melt <- function(data, id.vars='charcode', what=NULL) { year.cols.idx <- .get.year.cols.idx(data) year.cols <- colnames(data)[year.cols.idx] data <- merge(wpp.data.env$iso3166[,c('uncode', 'name', 'charcode')], data, by.x='uncode', by.y='country_code', sort=FALSE) data <- data[,-which(colnames(data)=='uncode')] data <- melt(data, id.vars = id.vars, measure.vars = year.cols, variable.name = 'Year', na.rm=TRUE) data$Year <- as.numeric(.get.year.col.names(as.character(data$Year))) #if(!is.null(what) && ind.mid.years(what)) # data$Year <- data$Year - 2 #browser() data } sum.by.country <- function(dataset) { year.cols.idx <- grep('^[0-9]{4}', colnames(dataset)) ddply(dataset[,c(which(colnames(dataset)=='country_code'), year.cols.idx)], "country_code", .fun=colwise(sum, na.rm=TRUE)) } sumMFbycountry <- function(datasetM, datasetF) { tpopM <- sum.by.country(datasetM) tpopF <- sum.by.country(datasetF) cbind(country_code=tpopM[,'country_code'], tpopM[,2:ncol(tpopM)] + tpopF[,2:ncol(tpopF)]) } sum.by.country.subset.age <- function(dataset, ages) { if('100+' %in% ages) ages <- c(ages, "100") sum.by.country(with(dataset, dataset[gsub("^\\s+|\\s+$", "", age) %in% ages,])) } preserveStructure <- function(dataFrame) { structure( lapply(names(dataFrame), function(name) {I(dataFrame[[name]])}), names=names(dataFrame) ) } ind.settings <- function() attr(wpp.data.env$indicators, 'settings') ind.fun <- function(indicator) rownames(ind.settings())[indicator] ind.is.by.age <- function(indicator) ind.settings()[indicator, 'by.age'] ind.is.low.high <- function(indicator) ind.settings()[indicator, 'low.high'] ind.is.half.child <- function(indicator) ind.settings()[indicator, 'half.child'] ind.no.age.sum <- function(indicator) ind.settings()[indicator, 'no.age.sum'] ind.sum.in.table <- function(indicator) ind.settings()[indicator, 'sum.in.table'] ind.mid.years <- function(indicator) ind.settings()[indicator, 'mid.years'] ind.digits <- function(indicator) ind.settings()[indicator, 'digits'] ind.has.negatives <- function(indicator) ind.settings()[indicator, 'has.negatives'] ind.definition <- function(indicator) attr(wpp.data.env$indicators, 'definition')[indicator] set.data.env <- function(name, value) wpp.data.env[[name]] <- value gmedian <- function(f, cats=NULL) { # group median if(is.null(cats)) cats <- seq(0, by=5, length=length(f)+1) nhalf <- sum(f, na.rm = TRUE)/2. cumsumf <- cumsum(f[!is.na(f)]) medcat <- findInterval(nhalf, cumsumf) + 1 med <- cats[medcat] + ((nhalf-cumsumf[medcat-1])/f[medcat])*(cats[medcat+1]-cats[medcat]) return(med) } gmean <- function (f, cats = NULL) { if (all(is.na(f))) return(NA) if (is.null(cats)) cats <- seq(0, by = 5, length = length(f) + 1) l <- min(length(cats), length(f) + 1) mid.points <- cats[1:(l - 1)] + (cats[2:l] - cats[1:(l - 1)])/2 counts <- f * mid.points return(sum(counts)/sum(f)) } gmean.child.bearing <- function(f) { # group mean of child bearing age return(gmean(f[4:10], cats=seq(15, by=5, length=8))) } dependency.ratio <- function(counts, which='total'){ nom <- 0 if(which %in% c('total', 'child')) nom <- nom + sum(counts[1:3]) if(which %in% c('total', 'old')) nom <- nom + sum(counts[14:21]) nom/sum(counts[4:13]) } get.pyramid.data <- function(year, countries, which.pi=NULL, bound=NULL, indicators=c(F='popF', M='popM'), load.pred=TRUE) { name.preds <- name.obs <- c(NULL, NULL) if(is.null(which.pi)) { name.obs <- indicators if(wpp.year.from.package.name(wpp.data.env$package) > 2010 && load.pred) name.preds <- paste(indicators, 'projMed', sep='') } else { #PIs # only +-half.child available if(wpp.year.from.package.name(wpp.data.env$package) > 2010 && 'half.child' %in% .get.pi.name(as.integer(which.pi))) name.obs <- paste(indicators, 'proj', capitalize(bound), sep='') } if(all(is.null(c(name.preds, name.obs)))) return(NULL) dataB <- list() for(i in 1:min(2,length(indicators))) { p <- load.and.merge.datasets(name.obs[i], name.preds[i], by=c('country_code', 'age'), remove.cols=c('country', 'name')) dataB[[i]] <- merge.with.un.and.melt(cbind(p, age.num=.get.age.num(p$age)), id.vars=c('charcode', 'age', 'age.num'), what=indicators[i]) dataB[[i]] <- cbind(dataB[[i]], sex=names(indicators)[i]) } data <- wpp.by.year(if(length(indicators) > 1) rbind(dataB[[1]], dataB[[2]]) else dataB[[1]], year) wpp.by.countries(data, countries) } .get.pASFR <- function(year, countries) { if.not.exists.load('percentASFR') asfr <- wpp.data.env[['percentASFR']] asfr <- asfr[,-which(is.element(colnames(asfr), c('country', 'name')))] asfrm <- wpp.by.countries(wpp.by.year( merge.with.un.and.melt(cbind(asfr, age.num=.get.age.num(asfr$age)), id.vars=c('charcode', 'age', 'age.num')), year), countries) asfrm } get.age.profile.fert <- function(year, countries){ asfrm <- .get.pASFR(year, countries) #browser() tfert <- fert() tfert <- cbind(country_code=tfert$country_code, tfert[,.get.year.cols.idx(tfert)]) tfertm <- wpp.by.countries(wpp.by.year( merge.with.un.and.melt(tfert, id.vars='charcode'), year), countries) colnames(tfertm)[2] <- 'tfr' data <- merge(asfrm, tfertm, by='charcode') data <- ddply(data, 'charcode', mutate, value = get("value")/100. * get("tfr")) data$tfr <- NULL data } get.age.profile.pfert <- function(year, countries){ .get.pASFR(year, countries) } get.indicator.title <- function(indicator, sex.mult=c(), sex=c(), age.mult=c(), age=c()) { indicator <- as.numeric(indicator) title <- names(wpp.data.env$indicators)[indicator] if (!ind.is.by.age(indicator)) return(title) if(ind.no.age.sum(indicator)){ sex.string <- paste('sex: ', sex, sep='') age.string <- paste('age: ', age, sep='') } else { # multiple sex and age groups possible sex.string <- paste('sex: ', if(length(sex.mult)>1) 'Both' else sex.mult, sep='') age.string <- paste('age: ', paste(age.mult, collapse=', '), sep='') } return(paste(title, sex.string, age.string, sep='; ')) } .get.age.num <- function(age) { # Return numeric version of the age, either its index or its numeric value aorder <- .get.age.order() #browser() if(any(!(age %in% names(aorder)))) return(age) aorder[as.character(age)] } .get.age.order <- function() { age <- c(paste(seq(0, by=5, length=20), seq(4, by=5, length=20), sep='-'), '100+') age.array <- 1:21 names(age.array) <- age age.array }
/scratch/gouwar.j/cran-all/cranData/wppExplorer/R/wpp.R
library(wppExplorer) library(reshape2) library(googleVis) library(plyr) library(ggplot2) shinyServer(function(input, output, session) { ind.has.uncertainty <- function(ind) ((wppExplorer:::ind.is.low.high(ind) || wppExplorer:::ind.is.half.child(ind)) && wppExplorer:::get.wpp.year()>2010) observe({ # disable log scale button if migration indicator (because of negatives) shinyjs::toggleState("trend.logscale", !has.negatives.indicator()) }) observe({ ind.num <- as.integer.ind() has.uncertainty <- ind.has.uncertainty(ind.num) shinyjs::toggle(id = "uncertainty", anim = TRUE, condition=has.uncertainty) if(has.uncertainty) { selected.choices <- input$uncertainty available.choices <- structure(as.character(1:3), names=c('80%', '95%', '+-1/2child')) uncertainty.choices <- c() if(wppExplorer:::ind.is.low.high(ind.num)) uncertainty.choices <- available.choices[1:2] if(wppExplorer:::ind.is.half.child(ind.num)) uncertainty.choices <- c(uncertainty.choices, available.choices[3]) selected <- uncertainty.choices[uncertainty.choices %in% selected.choices] updateSelectInput(session, 'uncertainty', choices=uncertainty.choices, selected=if(length(selected) > 0) selected else uncertainty.choices[1]) } }) observe({ # switch log scale button to FALSE if migration or growth indicator (because of negatives) if(input$trend.logscale && has.negatives.indicator()) updateCheckboxInput(session, "trend.logscale", value = FALSE) }, priority=10) as.integer.ind <- reactive({as.integer(input$indicator)}) indicatorData <- reactive({ wppExplorer:::lookupByIndicator(input$indicator, input$indsexmult, input$indsex, input$selagesmult, input$selages) }) indicator.fun <- reactive({ wppExplorer:::ind.fun(as.integer.ind()) }) has.negatives.indicator <- reactive({ wppExplorer:::ind.has.negatives(as.integer.ind()) }) indicatorDataLow <- reactive({ wppExplorer:::getUncertainty(input$indicator, input$uncertainty, 'low', input$indsexmult, input$indsex, input$selagesmult, input$selages) }) indicatorDataHigh <- reactive({ wppExplorer:::getUncertainty(input$indicator, input$uncertainty, 'high', input$indsexmult, input$indsex, input$selagesmult, input$selages) }) data <- reactive({ wpp.by.year(indicatorData(), input$year) }) rangeForAllYears <- reactive({ range(indicatorData()$value, na.rm=TRUE) }) pyramid.data <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries) }) pyramid.data.low <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, input$uncertainty, bound='low') }) pyramid.data.high <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, input$uncertainty, bound='high') }) age.profile.mortM <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, indicators=c(M='mxM'), load.pred=FALSE) }) age.profile.mortF <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, indicators=c(F='mxF'), load.pred=FALSE) }) age.profile.popM <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, indicators=c(M='popM')) }) age.profile.popF <- reactive({ wppExplorer:::get.pyramid.data(input$year, input$seltcountries, indicators=c(F='popF')) }) age.profile.fert <- reactive({ wppExplorer:::get.age.profile.fert(input$year, input$seltcountries) }) age.profile.pfert <- reactive({ wppExplorer:::get.age.profile.pfert(input$year, input$seltcountries) }) data.env <- function() wppExplorer:::wpp.data.env year.range <- reactiveValues(min=1955, max=2100) output$yearUI <- renderUI({ sliderInput('year', h5('Year:'), sep="", animate=animationOptions(interval = 1000), min=isolate(year.range$min), max=isolate(year.range$max), value = 2020, step=5) }) observe({ # update the year slider if the indicator changes and its data have a different time range if(is.null(input$year)) return(NULL) data <- indicatorData() if(nrow(data)==0) return(NULL) yearRange <- range(data$Year) if(year.range$min == yearRange[1]) return(NULL) isolate(year.range$min <- yearRange[1]) value <- max(yearRange[1], isolate(input$year)) updateSliderInput(session, "year", min=yearRange[1], value=value) }) output$indicatorDesc <- renderText({ paste0("<small>", wppExplorer:::ind.definition(as.integer.ind()), "</small>") }) output$uncertaintyNote <- renderText({ if(ind.has.uncertainty(as.integer.ind())) return("") "No uncertainty available for this indicator." }) year.output <- reactive(paste('Year:', input$year)) output$mapyear <- renderText(year.output()) #output$year1 <- renderText(year.output()) output$year2 <- renderText(year.output()) output$year3 <- renderText(year.output()) get.country.charcodes <- function() return(data.env()$iso3166[data.env()$iso3166$is.country, 'charcode']) output$map <- reactive({ if (is.null(input$year)) return(NULL) df <- data() if (nrow(df) == 0) return(NULL) #df <- cbind(df, hover=rep('xxx', nrow(df))) country.codes <- get.country.charcodes() df <- df[df$charcode %in% country.codes,] has.negatives <- has.negatives.indicator() normalize <- input$normalizeMapAndCountryPlot options <- NULL # available projections that can go into options # projection="kavrayskiy-vii" #mercator, albers, lambert and kavrayskiy-vii if (normalize || has.negatives) { # fixed color scale options <- list(colorAxis = list()) if (has.negatives) options$colorAxis$colors=c("red", "lightgrey", "green") if(normalize) { inddata <- indicatorData() vranges <- range(inddata[inddata$charcode %in% country.codes, 'value']) } else vranges <- range(df$value) if (has.negatives) { maxranges <- max(abs(vranges)) vranges <- c(-maxranges, maxranges) } options$colorAxis$minValue <- vranges[1] options$colorAxis$maxValue <- vranges[2] } list(data = df, options=options) }) output$mapgvis <- renderGvis({ if (is.null(input$year)) return(NULL) df <- data() if (nrow(df) == 0) return(NULL) #browser() col <- c('0x0000CC', '0x00CCFF', '0x33FF66', '0xFFFF66', '0xFF9900', '0xFF3300') gvisGeoChart(df, locationvar="charcode", #numvar="value", #hovervar="value", colorvar="value", chartid="map", options=list(height=500, width=900, dataMode='regions'#, #colors=paste('[', paste(col, collapse=', '), ']' )) }) output$countryPlot <- renderPlot({ if (is.null(input$map_selection)) return(NULL) data <- indicatorData() data.l <- indicatorDataLow() data.h <- indicatorDataHigh() df <- wpp.by.country(data, input$map_selection) low <- wpp.by.country(data.l, input$map_selection) high <- wpp.by.country(data.h, input$map_selection) idx.col.val <- grep('\\.', colnames(low)) ylim <- range(c(df$value, low[,idx.col.val], high[,idx.col.val]), na.rm=TRUE) if (input$normalizeMapAndCountryPlot) { idx.col.val.all <- grep('\\.', colnames(data.l)) ylim <- range(c(data$value, data.l[,idx.col.val.all], data.h[,idx.col.val.all]), na.rm=TRUE) } plot(df, type='n', ylim=ylim) title(main = paste(data.env()$iso3166$name[data.env()$iso3166$charcode==input$map_selection])) lines(df$Year, df$value, type='l') if(!is.null(low)) { ipres <- which.max(df$Year[df$Year<min(low$Year)]) for(i in 3:1) { idx <- grep(paste0('\\.',i), colnames(low)) if(length(idx)==0) next lines(c(df$Year[ipres], low$Year), c(df[ipres, 'value'], low[,idx]), lty=i+1) lines(c(df$Year[ipres], high$Year),c(df[ipres, 'value'], high[,idx]), lty=i+1) } } abline(v=input$year, col=3, lty=3) }) # output$table <- renderTable({ # iso <- data.env()$iso3166 # if(!input$includeAggr1) iso <- iso[iso$is.country,] # data <- merge(data(), iso[,c('charcode', 'name')], by='charcode')# # data[,c('charcode', 'name', 'value')] # }, include.rownames = FALSE, include.colnames = FALSE) get.data.for.tabletab <- reactive({ year.data <- data() if(nrow(year.data)==0) invalidateLater(1000, session) iso <- data.env()$iso3166 if(!input$includeAggr2) iso <- iso[iso$is.country,] year.data <- merge(year.data, iso[,c('charcode', 'uncode', 'name')], by='charcode') low <- indicatorDataLow() data <- cbind(year.data[,c('charcode', 'uncode', 'name', 'value')], rank=rank(year.data$value)) # add rank column if(!is.null(low)) { # add intervals data.l <- wpp.by.year(low, input$year) if(nrow(data.l) > 0) { data.h <- wpp.by.year(indicatorDataHigh(), input$year) for(i in 1:3) { colnames(data.l) <- sub(paste0('value.',i), paste('low', wppExplorer:::.get.pi.name.for.label(i)), colnames(data.l)) colnames(data.h) <- sub(paste0('value.',i), paste('high', wppExplorer:::.get.pi.name.for.label(i)), colnames(data.h)) } ncoldata <- ncol(data) data <- merge(data, data.l, by='charcode') data <- merge(data, data.h, by='charcode') # rearrange, so that columns corresponding to (low, high) pairs is always beside one another if (ncol(data.l) > 2) { l <- ncol(data.l) - 1 col.idx <- matrix(1:(2*l), nrow=l) col.idx <- as.vector(t(col.idx)) data <- data[,c(1:ncoldata, col.idx+ncoldata)] } } } colnames(data)[1] <- 'id' colnames(data)[2] <- 'UN' data }) output$stable <- DT::renderDataTable({ get.data.for.tabletab() }) output$download <- downloadHandler( filename <- function() { paste0(indicator.fun(), "_", input$year, ".csv") }, content <- function(file) { tbl <- get.data.for.tabletab() write.csv(tbl, file, row.names = FALSE) } ) output$ghist <- renderGvis({ data <- data() if(is.null(data) || nrow(data)<=0) return(NULL) # filter out aggregations data <- merge(data[,c('charcode', 'value')], data.env()$iso3166[data.env()$iso3166$is.country, c('charcode', 'name'), drop=FALSE], by='charcode') data <- data[,c('name', 'value')] xlim <- if(input$fiXscaleHist) rangeForAllYears() else range(data$value, na.rm=TRUE) #browser() options <- list(title=paste(input$year), legend="{ position: 'none' }", colors="['green']", height="500px", histogram=paste0("{bucketSize: ", diff(xlim)/30, "}")) #width="900px", digits <- wppExplorer:::ind.digits(as.integer.ind()) options$hAxis <- paste0("{maxAlternation: 1, minValue:", xlim[1], ", maxValue:", xlim[2], ", ticks: [", paste(unique(round(seq(xlim[1], xlim[2], length=30), digits)), collapse=', '), "]}") gvisHistogram(data, options=options) }) output$hist <- renderPlot({ data <- data() if(is.null(data) || nrow(data)<=0) return(NULL) # filter out aggregations data <- merge(data[,c('charcode', 'value')], data.env()$iso3166[data.env()$iso3166$is.country, 'charcode', drop=FALSE], by='charcode')$value xlim <- if(input$fiXscaleHist) rangeForAllYears() else range(data, na.rm=TRUE) binw <- diff(xlim)/20 qplot(data()$value, binwidth=binw, xlim=c(xlim[1]-binw/2, xlim[2]+binw/2), xlab='value') }) output$ageselection <- renderUI({ if(indicator.fun() %in% c('fertage', 'pfertage')){ ages <- paste(seq(15, by=5, length=7), seq(19, by=5, length=7), sep='-') } else { if(indicator.fun()=='mortagesex') ages <- c(0,1,seq(5, by=5, length=19), "100+") else ages <- c(paste(seq(0, by=5, length=20), seq(4, by=5, length=20), sep='-'), "100+") } if (wppExplorer:::ind.no.age.sum(as.integer.ind())) { # no multiple choices allowed multiple <- FALSE name <- 'selages' selected<-NULL } else { multiple <- TRUE name <- 'selagesmult' selected <- ages[1] } selectInput(name, 'Age:', ages, multiple=multiple, selected=selected, selectize = FALSE) }) output$sexselection <- renderUI({ choices<-if(indicator.fun() %in% c('fertage', 'pfertage')) c(Female="F") else c(Female="F", Male="M") if(wppExplorer:::ind.no.age.sum(as.integer.ind())){ multiple <- FALSE selected <- NULL name <- 'indsex' } else { multiple <- TRUE selected <- choices name <- 'indsexmult' } selectInput(name, 'Sex:', choices=choices, selected=selected, selectize = FALSE, multiple=multiple) }) output$cselection <- renderUI({ o <- order(data.env()$iso3166[,'name']) codes <- as.character(data.env()$iso3166[o,'charcode']) names <- paste(codes, data.env()$iso3166[o,'name']) countries <- structure( codes, names = names ) do.call('selectInput', list('seltcountries', 'Select countries/areas:', countries, multiple=TRUE, selectize = FALSE, selected=countries[1] #names[1] )) }) cast.profile.data <- function(data) { vrange <- range(data$value, na.rm=TRUE) hrange <- if(is.element('15-19', data$age)) c(0, length(unique(data$age))) else range(data$age) #browser() data <- dcast(data, age.num + age ~ charcode, mean) data$age.num <- NULL list(casted=data, hrange=hrange, vrange=vrange) } filter.trend.data <- function(data, countries, cast=TRUE){ data <- wpp.by.countries(data, countries) if(is.null(data) || nrow(data) <= 0) return(NULL) hrange <- range(data$Year, na.rm=TRUE) vrange <- range(data[,grep('value', colnames(data))], na.rm=TRUE) if(cast) data <- dcast(data, Year ~ charcode, mean) list(casted=data, hrange=hrange, vrange=vrange) } get.trends <- reactive({ if(is.null(input$seltcountries)) return(NULL) filter.trend.data(indicatorData(), input$seltcountries) }) get.age.profiles <- reactive({ if(is.null(input$seltcountries)) return(NULL) filter.age.profiles(indicatorData(), input$seltcountries, input$year) }) get.trends.nocast <- reactive({ if(is.null(input$seltcountries)) return(NULL) filter.trend.data(indicatorData(), input$seltcountries, cast=FALSE) }) get.trends.low <- reactive({ if(is.null(input$seltcountries)) return(NULL) filter.trend.data(indicatorDataLow(), input$seltcountries, cast=FALSE) }) get.trends.high <- reactive({ if(is.null(input$seltcountries)) return(NULL) filter.trend.data(indicatorDataHigh(), input$seltcountries, cast=FALSE) }) output$trends <- reactive({ data <- get.trends() if(is.null(data)) return(data) list(data = wppExplorer:::preserveStructure(data$casted), options = list( hAxis = list(viewWindowMode = 'explicit', viewWindow = list( min = data$hrange[1], max = data$hrange[2] ), format="####"), vAxis = list(viewWindowMode = 'explicit', viewWindow = list( min = data$vrange[1], max = data$vrange[2] ), logScale=input$median.logscale) ) ) }) show.age.profile <- function(sex, fun, logscale=FALSE, year) { if(fun=='mortagesex') data <- do.call(paste0('age.profile.mort', sex), list()) else { if(fun %in% c('tpop', 'tpopF', 'tpopM', 'popagesex')) data <- do.call(paste0('age.profile.pop', sex), list()) else { if(fun %in% c('fert', 'fertage') && sex=='F') { data <- age.profile.fert() } else { if(fun == 'pfertage' && sex=='F') { data <- age.profile.pfert() } else { return(list(data=data.frame(age=c(0,0), v=c(0,0)), #data.frame(age=seq(0,100, by=5), value=rep(0, 21)), options=list(title=paste(c(F='Female', M='Male')[sex], ': No age profiles for this indicator.'), legend= list(position="none"), hAxis = list(viewWindow = list(min=-1, max=1)), vAxis = list(viewWindow = list(min=-1, max=1)) ))) } } } } if(is.null(data)) return(NULL) data <- cast.profile.data(data) list(data = wppExplorer:::preserveStructure(data$casted), options = list( hAxis = list(slantedText=fun!='mortagesex', viewWindowMode = 'explicit', viewWindow = list( min = data$hrange[1], max = data$hrange[2])), #), format="####"), vAxis = list(viewWindowMode = 'explicit', viewWindow = list( min = data$vrange[1], max = data$vrange[2] ), logScale=logscale), legend = list(position="right"), title = paste(year, c(F='Female', M='Male')[sex]) ) ) } output$age.profileM <- reactive({ show.age.profile('M', indicator.fun(), input$aprofile.logscale, input$year) }) output$age.profileF <- reactive({ show.age.profile('F', indicator.fun(), input$aprofile.logscale, input$year) }) reset.trend.data <- function() ggplot.data$trends <- NULL reset.pyramid.data <- function() ggplot.data$pyramid <- NULL ggplot.data <- reactiveValues(trends=NULL, pyramid=NULL) trends.ranges <- reactiveValues(year=NULL, value=NULL) output$probtrends <- renderPlot({ reset.trend.data() data <- get.trends.nocast() if(is.null(data)) return(data) data <- data$casted low <- get.trends.low() if(!is.null(low)) { high <- get.trends.high() colnames(low$casted) <- sub('value', 'low', colnames(low$casted)) colnames(high$casted) <- sub('value', 'high', colnames(high$casted)) low.high <- merge(low$casted, high$casted, by=c('charcode', 'Year')) #browser() #colnames(low.high)[3:4] <- c('low', 'high') min.year <- min(low.high$Year, na.rm=TRUE) data <- merge(data, low.high, by=c('charcode', 'Year'), all=TRUE) idx <- which(data$Year == min.year-5) for (col in grep('low|high', colnames(data))) data[idx,col] <- data$value[idx] } if(!is.null(trends.ranges$year)) { data.zoom <- subset(data, Year >= trends.ranges$year[1] & Year <= trends.ranges$year[2] & value >= trends.ranges$value[1] & value <= trends.ranges$value[2]) if(nrow(data.zoom) == 0) { trends.ranges$year <- trends.ranges$value <- NULL } else data <- data.zoom } g <- ggplot(data, aes(x=Year,y=value,colour=charcode, fill=charcode)) + geom_line() + theme(legend.title=element_blank()) if (input$trend.logscale && !has.negatives.indicator()) g <- g + coord_trans(y="log2") isolate(ggplot.data$trends <- data) if(!is.null(low)) { line.data <- NULL for(i in 3:1) { idx <- grep(paste0('\\.',i), colnames(data)) if(length(idx)==0) next g <- g + geom_ribbon(aes_string(ymin=colnames(data)[idx][1], ymax=colnames(data)[idx][2], fill="charcode", colour="charcode", linetype=NA), alpha=c(0.3, 0.2, 0.1)[i]) if(!is.null(line.data)) colnames(line.data) <- c('charcode', 'Year', 'low', 'high', 'variant') line.data <- rbind(line.data, setNames(cbind(data[,c(1,2,idx)], wppExplorer:::.get.pi.name.for.label(i)), colnames(line.data))) tmp1 <- tmp2 <- data tmp1[,'value'] <- data[,colnames(data)[idx][1]] tmp2[,'value'] <- data[,colnames(data)[idx][2]] isolate(ggplot.data$trends <- rbind(ggplot.data$trends, tmp1, tmp2)) } colnames(line.data) <- c('charcode', 'Year', 'low', 'high', 'variant') g <- g + geom_line(data=line.data, aes(y=low, linetype=variant, colour=charcode)) g <- g + geom_line(data=line.data, aes(y=high, linetype=variant, colour=charcode)) g <- g + scale_linetype_manual(values=c("80%"=2, '1/2child'=4, "95%"=3), na.value=0) } isolate(ggplot.data$trends <- merge(ggplot.data$trends, data.env()$iso3166[,c('charcode', 'name')], by='charcode', sort=FALSE)) g }) observeEvent(input$probtrends_zoom, { trends.ranges$year <- c(round(input$probtrends_zoom$xmin,0), round(input$probtrends_zoom$xmax,0)) trends.ranges$value <- c(input$probtrends_zoom$ymin, input$probtrends_zoom$ymax) }) observeEvent(input$probtrends_zoom_reset, { trends.ranges$year <- NULL trends.ranges$value <- NULL }) output$probtrends_selected <- renderText({ if(is.null(input$probtrends_values)) return(" ") selected <- nearPoints(ggplot.data$trends, input$probtrends_values, maxpoints = 1, threshold=10) if(nrow(selected) == 0) return(" ") paste0("Year = ", selected$Year, ', Value = ', round(selected$value,3), ", Country: ", selected$name) }) .is.pyramid.indicator <- function() return(indicator.fun() %in% c('tpop', 'tpopF', 'tpopM', 'popagesex')) .plot.no.pyramid <- function() { if(!.is.pyramid.indicator()) { df <- data.frame(x=0, y=0, lab='No pyramid data for this indicator.') g <- ggplot(df, aes(x=x, y=y, label=lab)) + geom_text() + scale_y_continuous(name='') + scale_x_continuous(name='') print(g) return(TRUE) } FALSE } .get.prop.data <- function(data, tpop) { tpop <- wppExplorer::wpp.by.countries(wppExplorer::wpp.by.year(tpop, input$year), input$seltcountries) colnames(tpop)[2] <- 'tpop' data <- merge(data, tpop, by='charcode') data <- ddply(data, 'charcode', mutate, value = value/tpop) data$tpop <- NULL data } .get.pyramid.data <- reactive({ #.get.pyramid.data <- function(proportion=FALSE) { proportion <- input$proppyramids data <- pyramid.data() if(proportion) { tpop <- wppExplorer::wpp.indicator('tpop') data <- .get.prop.data(data, tpop) } low <- pyramid.data.low() if(!is.null(low) && nrow(low)>0) { high <- pyramid.data.high() if(proportion) { #browser() which.pi <- wppExplorer:::.get.pi.name(as.integer(input$uncertainty)) which.pi <- if('half.child' %in% which.pi) 'half.child' else NULL # currently only half child for pyramid available if(!is.null(which.pi)) { tpop <- wppExplorer::wpp.indicator('tpop.ci', which.pi=which.pi, bound='low') low <- .get.prop.data(low, tpop) lowval <- low$value tpop <- wppExplorer::wpp.indicator('tpop.ci', which.pi=which.pi, bound='high') high <- .get.prop.data(high, tpop) low$value <- pmin(low$value, high$value, na.rm=TRUE) high$value <- pmax(high$value, lowval, na.rm=TRUE) } else low <- NULL } if(!is.null(low)) { low.high <- merge(low, high, by=c('charcode', 'age', 'age.num', 'sex'), sort=FALSE) colnames(low.high)[5:6] <- c('low', 'high') data <- merge(data, low.high, all=TRUE, sort=FALSE) } } data <- data[order(data$age.num),] data }) .print.pyramid <- function(data) { data.range <- range(abs(data$value), na.rm=TRUE) g <- ggplot(data, aes(y=value, x=reorder(age, age.num), group=charcode, colour=charcode)) + geom_line(data=subset(data, sex=='F')) + geom_line(data=subset(data, sex=='M')) + scale_x_discrete(name="") + scale_y_continuous(labels=function(x)abs(x)) + coord_flip() + ggtitle(input$year) + theme(legend.title=element_blank()) g <- g + geom_text(data=NULL, y=-data.range[2]/2, x=20, label="Male", colour='black') g <- g + geom_text(data=NULL, y=data.range[2]/2, x=20, label="Female", colour='black') g <- g + geom_hline(yintercept = 0) if(is.element('low', colnames(data))) { g <- g + geom_ribbon(data=subset(data, sex=='F'), aes(ymin=low, ymax=high, linetype=NA), alpha=0.3) g <- g + geom_ribbon(data=subset(data, sex=='M'), aes(ymin=high, ymax=low, linetype=NA), alpha=0.3) line.data <- cbind(data, variant=wppExplorer:::.get.pi.name.for.label(3)) # only half-child variant available g <- g + geom_line(data=subset(line.data, sex=='F'), aes(y=low, linetype=variant, colour=charcode, group=charcode)) # female low g <- g + geom_line(data=subset(line.data, sex=='F'), aes(y=high, linetype=variant, colour=charcode, group=charcode)) # female high g <- g + geom_line(data=subset(line.data, sex=='M'), aes(y=low, linetype=variant, colour=charcode, group=charcode)) # male low g <- g + geom_line(data=subset(line.data, sex=='M'), aes(y=high, linetype=variant, colour=charcode, group=charcode)) # male high g <- g + scale_linetype_manual(values=c("80%"=2, '1/2child'=4, "95%"=3), na.value=0) tmp1 <- tmp2 <- data tmp1[,'value'] <- data[,'low'] tmp2[,'value'] <- data[,'high'] isolate(ggplot.data$pyramid <- rbind(ggplot.data$pyramid, tmp1, tmp2)) } g } pyramid.ranges <- reactiveValues(age=NULL, value=NULL) output$pyramids <- renderPlot({ if(.plot.no.pyramid()) return() reset.pyramid.data() #data <- .get.pyramid.data(proportion=input$proppyramids) data <- .get.pyramid.data() male.idx <- which(data$sex=='M') for(col in c('value', 'low', 'high')) if(!is.null(data[[col]])) data[[col]][male.idx] <- -data[[col]][male.idx] if(!is.null(pyramid.ranges$age)) { #print(input$pyramid_zoom) data.zoom <- subset(data, age.num >= pyramid.ranges$age[1] & age.num <= pyramid.ranges$age[2] & value >= pyramid.ranges$value[1] & value <= pyramid.ranges$value[2]) if(nrow(data.zoom) == 0) { pyramid.ranges$age <- pyramid.ranges$value <- NULL } else data <- data.zoom } isolate(ggplot.data$pyramid <- data) g <- .print.pyramid(data) isolate(ggplot.data$pyramid <- merge(ggplot.data$pyramid, data.env()$iso3166[,c('charcode', 'name')], by='charcode', sort=FALSE)) g }) observeEvent(input$pyramid_zoom, { pyramid.ranges$age <- c(round(input$pyramid_zoom$ymin,0), round(input$pyramid_zoom$ymax,0)) pyramid.ranges$value <- c(input$pyramid_zoom$xmin, input$pyramid_zoom$xmax) }) observeEvent(input$pyramid_zoom_reset, { pyramid.ranges$age <- NULL pyramid.ranges$value <- NULL }) output$pyramid_selected <- renderText({ if(!.is.pyramid.indicator()) return(" ") if(length(input$pyramid_values)==0) return(" ") selected <- nearPoints(ggplot.data$pyramid, input$pyramid_values, xvar='value', yvar='age.num', maxpoints = 1, threshold=10) if(nrow(selected) == 0) return(" ") paste0(if(selected$value < 0) "Male " else "Female ", selected$age, ', Value = ', round(abs(selected$value),3), ", Country: ", selected$name) }) # .get.digits <- reactive({ # print(wppExplorer:::ind.digits(as.integer.ind())) # wppExplorer:::ind.digits(as.integer.ind()) # }) # format_num <- function(col, digits) { # format <- paste0("%.", digits, 'f') # if (is.numeric(col)) sprintf(format, col) # else col # } output$trendstable <- renderTable({ data <- get.trends() if(is.null(data)) return(data) df <- as.data.frame(data$casted[,-1]) if(ncol(df) > 1) { if(wppExplorer:::ind.sum.in.table(as.integer.ind())) { df <- cbind(df, rowSums(df)) colnames(df)[ncol(df)] <- 'Sum' } } else colnames(df) <- input$seltcountries # one country selected df <- t(df) #df <- t(as.data.frame(lapply(df, format_num, digits=wppExplorer:::ind.digits(as.integer.ind())))) # df <- t(data$casted[,-1]) # remove year column #browser() colnames(df) <- as.integer(data$casted[,'Year']) df }, include.rownames = TRUE) output$trendstabletitle <- renderText({ wppExplorer:::get.indicator.title(input$indicator, input$indsexmult, input$indsex, input$selagesmult, input$selages) }) all.data <- reactive({ if(is.null(wppExplorer:::wpp.data.env$mchart.data)) { inds <- unique(c(input$indicator, 1,2,0,4))[1:4] } else inds <- input$indicator wppExplorer:::lookupByIndicator.mchart(inds, input$indsexmult, input$indsex, input$selagesmult, input$selages) }) output$graphgvis <- renderGvis({ # Take a dependency on input$AddIndicator button input$AddIndicator df <- isolate(all.data()) #browser() gvisMotionChart(df, idvar="name", timevar="Year", #xvar="xvalue", yvar="yvalue", colorvar="UN Areas", #sizevar="zvalue", options=list(width=700, height=600)) }) output$AddIndicatorText <- renderText({"\nAdd indicator from the left panel\nto chart axes:*"}) })
/scratch/gouwar.j/cran-all/cranData/wppExplorer/inst/explore/server.R
library(shinythemes) geochartPrereqs <- tagList( tags$head( tags$script(src="https://www.google.com/jsapi"), tags$script(src="geochart.js") ) ) geochart <- function(id, options=list()) { tags$div(id=id, class="shiny-geochart-output", `data-options`=RJSONIO::toJSON(options)) } googleLineChart <- function(id, options=list()) { tags$div(id=id, class="google-linechart-output", `data-options`=RJSONIO::toJSON(options)) } googleHistogram <- function(id, options=list()) { tags$div(id=id, class="google-histogram-output", `data-options`=RJSONIO::toJSON(options)) } shinyUI( fluidPage(theme = shinytheme("yeti"), titlePanel(paste("WPP", wppExplorer:::get.wpp.year(), "Explorer"), title = HTML(paste("<h2>WPP", wppExplorer:::get.wpp.year(), "Explorer</h2><h5>Exploratory interface to the UN's World Population Projections</h5>") ) ), sidebarLayout( sidebarPanel( shinyjs::useShinyjs(), geochartPrereqs, tags$head( #tags$style(type="text/css", ".jslider { max-width: 50px; }"), #tags$style(type='text/css', ".well { padding: 0px; margin-bottom: 5px; max-width: 100px; }"), tags$style(type='text/css', ".span4 { max-width: 270px; }") ), uiOutput('yearUI'), hr(), selectInput('indicator', h5('Indicator:'), wppExplorer:::wpp.data.env$indicators), conditionalPanel(condition=paste("input.indicator >", sum(attr(wppExplorer:::wpp.data.env$indicators, "settings")$by.age == FALSE)), tags$head(tags$style(type="text/css", "#selagesmult { height: 150px; width: 85px}"), tags$style(type="text/css", "#selages { width: 85px}"), tags$style(type="text/css", "#indsexmult { height: 55px; width: 95px}"), tags$style(type="text/css", "#indsex { width: 95px}") ), fluidRow( column(4, offset=2, uiOutput('sexselection')), column(3, offset=1, uiOutput('ageselection')) ) ), htmlOutput('indicatorDesc'), hr(), selectInput('uncertainty', h5('Uncertainty:'), structure(as.character(1:3), names=c('80%', '95%', '+-1/2child')), multiple=TRUE, selected=1), textOutput('uncertaintyNote'), #shinythemes::themeSelector(), hr(), HTML("<p><small><b>Data Source:</b> United Nations, Department of Economic and Social Affairs, Population Division: <a href='http://population.un.org/wpp' target='_blank'>World Population Prospects 2019</a>. Made available under a <a href='http://creativecommons.org/licenses/by/3.0/igo'>Creative Commons license CC BY 3.0 IGO</a>.</small></p>"), HTML("<p><small><b>Methodology:</b> <a href='https://www.un.org/development/desa' target='_blank'>UN DESA</a> and <a href='http://bayespop.csss.washington.edu' target='_blank'>BayesPop research group</a> at University of Washington supported by <a href='https://www.nichd.nih.gov' target='_blank'>NICHD</a> (<a href='https://www.stat.washington.edu/raftery' target='_blank'>Adrian Raftery</a>, PI).</small></p>"), HTML("<p><small><b>User Interface:</b> Hana &#352;ev&#269;&#237;kov&#225;, <a href='http://bayespop.csss.washington.edu' target='_blank'>BayesPop research group</a>, <a href='https://www.csss.washington.edu' target='_blank'>CSSS</a>, University of Washington.</small></p>"), width=3 ), mainPanel( shinyjs::useShinyjs(), tabsetPanel( tabPanel('Map', fluidRow( column(6, checkboxInput('normalizeMapAndCountryPlot', 'Fixed scale over time', TRUE)) ), fluidRow( column(6, offset=5, textOutput('mapyear')) ), hr(), geochart('map'), #htmlOutput('mapgvis'), hr(), conditionalPanel(condition='input.map_selection', plotOutput('countryPlot', height='300px')) ), tabPanel('Sortable Data', fluidRow( column(6, checkboxInput('includeAggr2', 'Include Aggregations', FALSE)) ), fluidRow( column(3, offset=5, textOutput('year2')), column(1, offset=2, downloadLink("download", "Download", class = "fa fa-download alignright")) ), hr(), DT::dataTableOutput('stable') ), tabPanel('Trends & Pyramids', tags$head( tags$style(type="text/css", "#seltcountries { height: 450px}"), tags$style(type="text/css", "#trendstable { overflow-x: scroll}") ), #tags$div( fluidPage( #class = "container", fluidRow(HTML("<br>")), fluidRow( column(3, uiOutput('cselection')), column(9, tabsetPanel( tabPanel('Trends', plotOutput('probtrends', #height="400px", width="650px", click = "probtrends_values", hover = "probtrends_values", dblclick = "probtrends_zoom_reset", brush = brushOpts(id = "probtrends_zoom", resetOnNew = TRUE)), flowLayout( checkboxInput('trend.logscale', 'Log scale', FALSE), textOutput("probtrends_selected") ) ), tabPanel('Age Profile', googleLineChart('age.profileM', options=list(height=200)), googleLineChart('age.profileF', options=list(height=200)), checkboxInput('aprofile.logscale', 'Log scale', FALSE)), tabPanel('Pyramids', plotOutput('pyramids', click = "pyramid_values", hover = "pyramid_values", dblclick = "pyramid_zoom_reset", brush = brushOpts(id = "pyramid_zoom", resetOnNew = TRUE) ), flowLayout( checkboxInput('proppyramids', 'Pyramid of proportions', FALSE), textOutput("pyramid_selected") ) ), type="pills" ) ) ), fluidRow( column(12, textOutput('trendstabletitle'), tableOutput('trendstable') ) ) ) ), # end "Trends & Pyramids tab tabPanel('Histogram', flowLayout( checkboxInput('fiXscaleHist', 'Fixed x-axis over time', FALSE) ), hr(), htmlOutput('ghist') ), tabPanel('Rosling Chart', htmlOutput('graphgvis'), HTML("<br/>"), fluidRow(column(1, "")), fluidRow( column(3, offset=1, textOutput('AddIndicatorText')), column(1, actionButton("AddIndicator", "Add indicator")) ), HTML("<br/><i><small>*If you don't see a graph above, make sure Adobe Flash Player is installed and enabled in your browser.</small></i>") ), tabPanel("Help", includeHTML("README.html") ) ) #end tabsetPanel ) #end mainPanel )))
/scratch/gouwar.j/cran-all/cranData/wppExplorer/inst/explore/ui.R
setClassUnion("DateTime", c("Date", "POSIXct"))
/scratch/gouwar.j/cran-all/cranData/wql/R/DateTime-class.R
#' R2pss #' #' @param t temperature, Celsius #' @param p gauge pressure, decibar #' @param R conductivity ratio, dimensionless #' @author #' Alan Jassby, James Cloern #' #' @export R2pss <- function (R, t, p = 0) { ec2pss(42.914 * R, t = t, p = p) }
/scratch/gouwar.j/cran-all/cranData/wql/R/R2pss.R
#' Class WqData #' @name WqData-class #' @rdname WqData-class #' @exportClass WqData setClass( Class = 'WqData', contains = 'data.frame', validity = function(object) { if (!identical(object@names[1:5], c("time", "site", "depth", "variable", "value"))) stop("columns are not all named correctly") if (!all( is(object$time, "DateTime"), is(object$site, "factor"), is(object$depth, "numeric"), is(object$variable, "factor"), is(object$value, "numeric"))) stop("columns are not all of correct class") } ) #' @importFrom methods getSlots slot "slot<-" setMethod( f = `[`, signature = "WqData", definition = function(x, i, j="MISSING", drop="MISSING") { if (missing(i)) r <- TRUE else { if (is(i, "numeric")) r <- i else { e <- substitute(i) r <- eval(e, x, parent.frame()) if (!is.logical(r)) stop("'i' must be logical or numeric") r <- r & !is.na(r) } } x.stored <- x df <- `[.data.frame`(x, r, j=1:5, drop=FALSE) for (slot.name in names(getSlots("data.frame"))) { slot(x.stored, slot.name) <- slot(df, slot.name) } x.stored } ) setMethod( f = "summary", signature = "WqData", definition = function(object, ...) { trange <- range(as.Date(format(object$time)), na.rm = TRUE) cat("date range: ", paste(trange[1], "to", trange[2]), "\n\n") nums <- table(object$site, object$variable) quarts <- tapply(object$value, object$variable, summary) quarts1 <- matrix(unlist(quarts), byrow = TRUE, ncol = 6) colnames(quarts1) <- names(quarts[[1]]) rownames(quarts1) <- names(quarts)[seq_len(nrow(quarts1))] sumry <- list(observations = nums, quartiles = quarts1) sumry } ) #' @importFrom ggplot2 ggplot aes_string facet_wrap geom_boxplot #' @export setMethod( f = "plot", signature = "WqData", definition = function(x, y = "missing", vars, num.col = NULL) { if (missing(vars)) vars <- unique(x$variable) num.plots <- max(10, length(vars)) vars <- vars[1:num.plots] d <- data.frame(x) d <- d[d$variable %in% vars, ] ggplot(d, aes_string(x = "site", y = "value", z = "variable")) + geom_boxplot(outlier.colour = 'blue', outlier.shape = 1) + facet_wrap(~ variable, scales = "free_y", ncol = num.col) } )
/scratch/gouwar.j/cran-all/cranData/wql/R/WqData-class.R
#' date2decyear #' @param w date #' @author #' Alan Jassby, James Cloern #' @export date2decyear <- function(w) { old_ops <- options(digits = 8) on.exit(options(old_ops)) posx <- as.POSIXlt(w) yr <- 1900 + posx$year dy <- posx$yday + 0.5 yr + dy/ifelse(leapYear(yr), 366, 365) }
/scratch/gouwar.j/cran-all/cranData/wql/R/date2decyear.R
#' Decompose a time series #' #' The function decomposes a time series into a long-term mean, annual, #' seasonal and "events" component. The decomposition can be multiplicative or #' additive, and based on median or mean centering. #' #' The rationale for this simple approach to decomposing a time series, with #' examples of its application, is given by Cloern and Jassby (2010). It is #' motivated by the observation that many important events for estuaries (e.g., #' persistent dry periods, species invasions) start or stop suddenly. Smoothing #' to extract the annualized term, which can disguise the timing of these #' events and make analysis of them unnecessarily difficult, is not used. #' #' A multiplicative decomposition will typically be useful for a biological #' community- or population-related variable (e.g., chlorophyll-a) that #' experiences exponential changes in time and is approximately lognormal, #' whereas an additive decomposition is more suitable for a normal variable. #' The default centering method is the median, especially appropriate for #' series that have large, infrequent events. #' #' If \code{event = TRUE}, the seasonal component represents a recurring #' monthly pattern and the events component a residual series. Otherwise, the #' seasonal component becomes the residual series. The latter is appropriate #' when seasonal patterns change systematically over time. You can use #' \code{\link{plotSeason}} and \code{\link{seasonTrend}} to investigate the #' way seasonality changes. #' #' @param x a monthly time series vector #' @param event whether or not an "events" component should be determined #' @param type the type of decomposition, either multiplicative ("mult") or #' additive ("add") #' @param center the method of centering, either median or mean #' @return A monthly time series matrix with the following individual time #' series: \item{original }{original time series} \item{annual }{annual mean #' series} \item{seasonal }{repeating seasonal component} \item{events #' }{optionally, the residual or "events" series} #' @seealso \code{\link{plotSeason}}, \code{\link{seasonTrend}} #' @references Cloern, J.E. and Jassby, A.D. (2010) Patterns and scales of #' phytoplankton variability in estuarine-coastal ecosystems. \emph{Estuaries #' and Coasts} \bold{33,} 230--241. #' @keywords manip ts #' @author #' Alan Jassby, James Cloern #' @importFrom stats ts ts.union aggregate median start end #' @export #' @examples #' #' # Apply the function to a single series (Station 27) and plot it: #' y <- decompTs(sfbayChla[, 's27']) #' y #' plot(y, nc=1, main="") #' decompTs <- function(x, event = TRUE, type = c("mult", "add"), center = c("median", "mean")) { # Validate input if (!is.ts(x) || !identical(frequency(x), 12)) { stop("x must be a monthly 'ts' vector") } type <- match.arg(type) center <- match.arg(center) # Set the time window startyr <- start(x)[1] endyr <- end(x)[1] x <- window(x, start = c(startyr, 1), end = c(endyr, 12), extend=TRUE) # Choose the arithmetic typeations, depending on type if (type == "mult") { `%/-%` <- function(x, y) x / y `%*+%` <- function(x, y) x * y } else { `%/-%` <- function(x, y) x - y `%*+%` <- function(x, y) x + y } # Choose the centering method, depending on center if (center == "median") { center <- function(x, na.rm=FALSE) median(x, na.rm=na.rm) } else { center <- function(x, na.rm=FALSE) mean(x, na.rm=na.rm) } # Long-term center grand <- center(x, na.rm=TRUE) # Annual component x1 <- x %/-% grand annual0 <- aggregate(x1, 1, center, na.rm=TRUE) annual1 <- as.vector(t(matrix(rep(annual0, 12), ncol=12))) annual <- ts(annual1, start=startyr, frequency=12) # Remaining components x2 <- x1 %/-% annual if (event) { # Seasonal component seasonal0 <- matrix(x2, nrow=12) seasonal1 <- apply(seasonal0, 1, center, na.rm=TRUE) seasonal <- ts(rep(seasonal1, endyr - startyr + 1), start=startyr, frequency=12) # Events component x3 <- x2 %/-% seasonal # result ts.union(original=x, annual, seasonal, events=x3) } else { ts.union(original=x, annual, seasonal=x2) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/decompTs.R
#' decyear2date #' #' @param x date #' @author #' Alan Jassby, James Cloern #' @export decyear2date <- function(x) { yr <- floor(x) len <- ifelse(leapYear(yr), 366, 365) julday <- floor((x - yr) * len) as.Date(julday, origin = as.Date(paste(yr, 1, 1, sep = '-'))) }
/scratch/gouwar.j/cran-all/cranData/wql/R/decyear2date.R
#' Convert conductivity to salinity #' #' Electrical conductivity data are converted to salinity using the Practical #' Salinity Scale and an extension for salinities below 2. #' #' \code{ec2pss} converts electrical conductivity data to salinity using the #' Practical Salinity Scale 1978 in the range of 2-42 (Fofonoff and Millard #' 1983). Salinities below 2 are calculated using the extension of the #' Practical Salinity Scale (Hill et al. 1986). #' #' \code{R2pss} is the same function, except that conductivity ratios rather #' than conductivities are used as input. #' #' @param ec conductivity, mS/cm #' @param t temperature, Celsius #' @param p gauge pressure, decibar #' @return \code{ec2pss} and \code{R2pss} both return salinity values on the #' Practical Salinity Scale. #' @note Input pressures are not absolute pressures but rather gauge pressures. #' Gauge pressures are measured relative to 1 standard atmosphere, so the gauge #' pressure at the surface is 0. #' @author #' Alan Jassby, James Cloern #' @references Fofonoff N.P. and Millard Jr R.C. (1983) \emph{Algorithms for #' Computation of Fundamental Properties of Seawater.} UNESCO Technical Papers #' in Marine Science 44. UNESCO, Paris, 53 p. #' #' Hill K.D., Dauphinee T.M. and Woods D.J. (1986) The extension of the #' Practical Salinity Scale 1978 to low salinities. \emph{IEEE Journal of #' Oceanic Engineering} \bold{11,} 109-112. #' @keywords manip utilities #' @examples #' #' # Check values from Fofonoff and Millard (1983): #' R = c(1, 1.2, 0.65) #' t = c(15, 20, 5) #' p = c(0, 2000, 1500) #' R2pss(R, t, p) # 35.000 37.246 27.995 #' # Repeat calculation with equivalent conductivity values by setting #' # ec <- R * C(35, 15, 0): #' ec = c(1, 1.2, 0.65) * 42.9140 #' ec2pss(ec, t, p) # same results #' #' @export ec2pss ec2pss <- function (ec, t, p = 0) { # Define conductivity ratio R <- ec/42.914 # Estimate temperature correction (valid for -2 < t < 35) c <- c(0.6766097, 0.0200564, 0.0001104259, -6.9698e-07, 1.0031e-09) rt <- c[1] + c[2] * t + c[3] * t^2 + c[4] * t^3 + c[5] * t^4 # Estimate pressure correction (validity range varies with t and S) d <- c(0.03426, 0.0004464, 0.4215, -0.003107) e <- c(2.07e-05, -6.37e-10, 3.989e-15) Rp <- 1 + p * (e[1] + e[2] * p + e[3] * p^2)/(1 + d[1] * t + d[2] * t^2 + (d[3] + d[4] * t) * R) # Estimate salinity (valid for 2 < S < 42 and -2 < t < 35). Rt <- R/(Rp * rt) a <- c(0.008, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081) b <- c(5e-04, -0.0056, -0.0066, -0.0375, 0.0636, -0.0144) ft <- (t - 15)/(1 + 0.0162 * (t - 15)) S <- a[1] + a[2] * Rt^0.5 + a[3] * Rt + a[4] * Rt^1.5 + a[5] * Rt^2 + a[6] * Rt^2.5 + ft * (b[1] + b[2] * Rt^0.5 + b[3] * Rt + b[4] * Rt^1.5 + b[5] * Rt^2 + b[6] * Rt^2.5) # Estimate salinity correction for S < 2 x <- 400 * Rt y <- 100 * Rt ifelse(S >= 2, S, S - a[1]/(1 + 1.5 * x + x^2) - b[1] * ft/(1 + y^0.5 + y + y^1.5)) }
/scratch/gouwar.j/cran-all/cranData/wql/R/ec2pss.R
#' Empirical orthogonal function analysis #' #' Finds and rotates empirical orthogonal functions (EOFs). #' #' EOF analysis is used to study patterns of variability (\dQuote{modes}) in a #' matrix time series and how these patterns change with time #' (\dQuote{amplitude time series}). Hannachi et al. (2007) give a detailed #' discussion of this exploratory approach with emphasis on meteorological #' data. In oceanography and climatology, the time series represent #' observations at different spatial locations (columns) over time (rows). But #' columns can also be seasons of the year (Jassby et al. 1999) or even a #' combination of seasons and depth layers (Jassby et al. 1990). EOF analysis #' uses the same techniques as principal component analysis, but the time #' series are observations of the same variable in the same units. Scaling the #' data is optional, but it is the default here. #' #' Eigenvectors (unscaled EOFs) and corresponding eigenvalues (amount of #' explained variance) are found by singular value decomposition of the #' centered and (optionally) scaled data matrix using \code{\link{prcomp}}. In #' order to facilitate a physical interpretation of the variability modes, a #' subset consisting of the \code{n} most important EOFs is rotated (Richman #' 1986). \code{\link{eofNum}} can be used to help choose \code{n}. Hannachi et #' al. (2007) recommend orthogonal rotation of EOFs scaled by the square root #' of the corresponding eigenvalues to avoid possible computation problems and #' reduce sensitivity to the choice of \code{n}. We follow this recommendation #' here, using the \code{\link{varimax}} method for the orthogonal rotation. #' #' Note that the signs of the EOFs are arbitrary. #' #' @param x a data frame or matrix, with no missing values #' @param n number of EOFs to retain for rotation #' @param scale. logical indicating whether the (centered) variables should be #' scaled to have unit variance #' @importFrom stats is.mts time prcomp varimax #' @export #' @author #' Alan Jassby, James Cloern #' @return A list with the following members: \item{REOF}{a matrix with rotated #' EOFs} \item{amplitude}{a matrix with amplitude time series of #' \acronym{REOF}s} \item{eigen.pct}{all eigenvalues of correlation matrix as #' percent of total variance} \item{variance}{variance explained by retained #' EOFs} #' @seealso \code{\link{eofNum}}, \code{\link{eofPlot}}, #' \code{\link{monthCor}}, \code{\link{ts2df}} #' @references Hannachi, A., Jolliffe, I.T., and Stephenson, D.B. (2007) #' Empirical orthogonal functions and related techniques in atmospheric #' science: A review. \emph{International Journal of Climatology} \bold{27,} #' 1119--1152. #' #' Jassby, A.D., Powell, T.M., and Goldman, C.R. (1990) Interannual #' fluctuations in primary production: Direct physical effects and the trophic #' cascade at Castle Lake, California (USA). \emph{Limnology and Oceanography} #' \bold{35,} 1021--1038. #' #' Jassby, A.D., Goldman, C.R., Reuter, J.E., and Richards, R.C. (1999) Origins #' and scale dependence of temporal variability in the transparency of Lake #' Tahoe, California-Nevada. \emph{Limnology and Oceanography} \bold{44,} #' 282--294. #' #' Richman, M. (1986) Rotation of principal components. \emph{Journal of #' Climatology} \bold{6,} 293--335. #' @keywords ts #' @examples #' #' # Create an annual matrix time series #' chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) #' chla1 <- chla1[, 1:12] # remove stations with missing years #' # eofNum (see examples) suggests n = 1 #' eof(chla1, 1) #' eof <- function (x, n, scale. = TRUE) { # Validate args if (!is.matrix(x) && !is.data.frame(x)) stop("x must be a 'matrix' or 'data.frame'") if (identical(colnames(x), NULL)) colnames(x) <- paste("v", seq_len(ncol(x)), sep="") if (anyDuplicated(colnames(x)) > 0) stop("x must have distinct column names") if (is.mts(x)) rownames(x) <- time(x) if (identical(rownames(x), NULL)) rownames(x) <- seq_len(nrow(x)) if (anyDuplicated(rownames(x)) > 0) stop("x must have distinct row names") # get EOFs (as scaled eigenvectors) pr1 <- prcomp(x, scale.=TRUE) eigenval1 <- pr1[["sdev"]][1:n]^2 eigenvec1 <- pr1[["rotation"]][, 1:n] eof1 <- eigenvec1 %*% diag(sqrt(eigenval1), n, n) scores1 <- pr1[["x"]][, 1:n] amp1 <- scale(scores1) attributes(amp1)$`scaled:center` <- attributes(amp1)$`scaled:scale` <- NULL # get REOFs by orthogonally rotating EOFs if (identical(n, 1)) { reof <- as.matrix(eof1) amp <- amp1 } else { pr2 <- varimax(eof1) reof <- unclass(pr2[["loadings"]]) rotater <- pr2[["rotmat"]] amp <- amp1 %*% rotater } # percent and cumulative percent of total variance eigs <- pr1[["sdev"]]^2 eigen.pct <- round(100 * eigs/sum(eigs), 1) totvar.pct <- round(100 * cumsum(eigs/sum(eigs)), 1) # return results colnames(reof) <- colnames(amp) <- paste('EOF', 1:n, sep='') rownames(reof) <- colnames(x) list(REOF=reof, amplitude=amp, eigen.pct=eigen.pct, variance=totvar.pct) }
/scratch/gouwar.j/cran-all/cranData/wql/R/eof.R
#' Plot EOF percent variance #' #' Plots the variances associated with empirical orthogonal functions (EOF). #' Useful for deciding how many EOFs to retain for rotation. #' #' Calculates the eigenvalues from an EOF analysis, as described in #' \code{\link{eof}}. The eigenvalues are plotted against eigenvalue number #' (sometimes called a \dQuote{scree plot}), and the cumulative variance as \% #' of total is plotted over each eigenvalue. The approximate 0.95 confidence #' limits are depicted for each eigenvalue using North et al.'s (1982) #' rule-of-thumb, which ignores any autocorrelation in the data. If the #' autocorrelation structure is assessed separately and can be expressed in #' terms of effective sample size (e.g., Thiebaux and Zwiers 1984), then #' \code{n} can be set equal to this number. #' #' There is no universal rule for deciding how many of the EOFs should be #' retained for rotation (Hannachi et al. 2007). In practice, the number is #' chosen by requiring a minimum cumulative variance, looking for a sharp break #' in the spectrum, requiring that confidence limits not overlap, various Monte #' Carlo methods, or many other techniques. The plot produced here enables the #' first three methods. #' #' @param x a data frame or matrix, with no missing values #' @param n effective sample size #' @param scale. logical indicating whether the (centered) variables should be #' scaled to have unit variance #' @importFrom ggplot2 geom_errorbar geom_text #' @export #' @author #' Alan Jassby, James Cloern #' @return A plot of the eigenvectors. #' @seealso \code{\link{eof}}, \code{\link{interpTs}}, \code{\link{monthCor}}, #' \code{\link{eofPlot}} #' @references Hannachi, A., Jolliffe, I.T., and Stephenson, D.B. (2007) #' Empirical orthogonal functions and related techniques in atmospheric #' science: A review. \emph{International Journal of Climatology} \bold{27,} #' 1119--1152. #' #' North, G., Bell, T., Cahalan, R., and Moeng, F. (1982) Sampling errors in #' the estimation of empirical orthogonal functions. \emph{Monthly Weather #' Review} \bold{110,} 699--706. #' #' Thiebaux H.J. and Zwiers F.W. (1984) The interpretation and estimation of #' effective sample sizes. \emph{Journal of Climate and Applied Meteorology} #' \bold{23,} 800--811. #' @keywords Graphics ts #' @examples #' #' # Create an annual time series data matrix from sfbay chlorophyll data #' # Average over each year #' chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) #' chla1 <- chla1[, 1:12] # remove stations with missing years #' eofNum(chla1) #' # These stations appear to act as one with respect to chlorophyll #' # variability on the annual scale because there's one dominant EOF. #' eofNum <- function(x, n = nrow(x), scale. = TRUE) { # eigenvectors eigs <- prcomp(x, scale.=scale.)[["sdev"]]^2 eigs.pct <- 100 * eigs/sum(eigs) # 0.95 confidence limits eigs.lo <- eigs * (1 - sqrt(2/n)) eigs.hi <- eigs * (1 + sqrt(2/n)) # cum. variance cumvar <- round(cumsum(eigs.pct), 1) # plot p <- ncol(x) d <- data.frame(rank = factor(1:p), eigs, eigs.lo, eigs.hi, cumvar) d <- within(d, cumvar.line <- eigs.hi + 0.02 * max(eigs.hi)) d <- d[1:min(p, 10), ] ggplot(data = d, aes(x = rank, y = eigs)) + geom_errorbar(aes(x = rank, ymin = eigs.lo, ymax = eigs.hi), width = 0.3) + geom_point(size = 3) + geom_text(aes(x = rank, y = cumvar.line, label = cumvar), size = 3, vjust = 0) + labs(list(x = "Rank", y = "Eigenvalue")) + theme(panel.grid.minor = element_blank()) }
/scratch/gouwar.j/cran-all/cranData/wql/R/eofNum.R
#' Plot EOF analysis results #' #' Plots the rotated empirical orthogonal functions or amplitude time series #' resulting from \code{\link{eof}}. #' #' When the columns of the original data have a natural order, such as stations #' along a transect or months of the year, there may be no need to reorder the #' EOF coefficients. But if there is no natural order, such as when columns #' represents disparate sites around the world, the plot can be more #' informative if coefficients are ordered by size (\code{ord = TRUE}). #' #' Coefficients and amplitudes for a given EOF may be more easily interpreted #' if \code{rev = TRUE}, because the sign of the first coefficient is #' arbitrarily determined and all the other signs follow from that choice. #' @author #' Alan Jassby, James Cloern #' @param x result of the function \code{\link{eof}} #' @param type whether the EOF coefficients or amplitudes should be plotted #' @param rev logical indicating whether coefficients and amplitudes should be #' multiplied by \code{-1} #' @param ord logical indicating whether coefficients should be ordered by size #' @importFrom ggplot2 geom_hline geom_vline #' @export #' @return A plot of the EOF coefficients or amplitudes. #' @seealso \code{\link{eof}} #' @keywords Graphics #' @examples #' #' # Create an annual matrix time series #' chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) #' chla1 <- chla1[, 1:12] # remove stations with missing years #' #' # eofNum (see examples) suggests n = 1 #' e1 <- eof(chla1, n = 1) #' eofPlot(e1, type = 'coef') #' eofPlot(e1, type = 'amp') #' eofPlot <- function(x, type = c("coef", "amp"), rev = FALSE, ord = FALSE) { # Validate args type <- match.arg(type) num <- ncol(x$REOF) # Plot if (type == "coef") { d1 <- x$REOF if (ord) d1 <- d1[order(d1[, 1]), ] if (rev) d1 <- -d1 m1 <- melt(d1, varnames = c("variable", "eof")) ggplot(m1, aes_string(x = "value", y = "variable")) + geom_vline(xintercept = 0, colour = "red", size = 0.2) + geom_point(colour = "blue") + facet_wrap(~ eof, ncol = num) + labs(y = "", x = "Coefficient") } else { d1 <- x$amplitude if (rev) d1 <- -d1 m1 <- melt(d1, varnames = c("obs", "eof")) g1 <- ggplot(m1, aes_string(x = "obs", y = "value")) + geom_hline(aes(yintercept = 0), colour = "red", size = 0.2) + geom_point(colour = "blue") + facet_wrap(~ eof, nrow = num) + labs(x = "", y = "Amplitude") if (is.factor(m1$obs)) return(g1) g1 + geom_line(colour = "blue") } }
/scratch/gouwar.j/cran-all/cranData/wql/R/eofPlot.R
#' Interpolate or substitute missing time series values #' #' Imterpolates or substitutes missing data in a time series for gaps up to a #' specified size. #' #' When \code{type = "linear"}, the function performs linear interpolation of #' any \code{NA} runs of length smaller than or equal to \code{gap}. When #' \code{gap = NULL}, gaps of any size will be replaced. Does not change #' leading or trailing \code{NA} runs. This interpolation approach is best for #' periods of low biological activity when sampling is routinely suspended. #' #' When \code{type = "series.median"} or \code{"series.mean"}, missing values #' are replaced by the overall median or mean, respectively. This may be #' desirable when missing values are not allowed but one wants, for example, to #' avoid spurious enhancement of trends. #' #' When \code{type = "cycle.median"} or \code{type = "cycle.mean"}, missing #' values are replaced by the median or mean, respectively, for the same cycle #' position (i.e., same month, quarter, etc., depending on the frequency). This #' may give more realistic series than using the overall mean or median. #' #' Intended for time series but first three types will work with any vector or #' matrix. Matrices will be interpolated by column. #' @author #' Alan Jassby, James Cloern #' @param x object of class \code{"ts"} or \code{"mts"} #' @param type method of interpolation or substitution #' @param gap maximum gap to be replaced #' @export #' @importFrom zoo na.approx #' @importFrom stats tsp #' @return The time series with some or all missing values replaced. #' @seealso \code{\link{decompTs}} #' @keywords utilities manip #' @examples #' #' ### Interpolate a vector time series and highlight the imputed data #' chl27 <- sfbayChla[, 's27'] #' x1 <- interpTs(chl27, gap = 3) #' plot(x1, col = 'red') #' lines(chl27, col = 'blue') #' x2 <- interpTs(chl27, type = "series.median", gap = 3) #' plot(x2, col = 'red') #' lines(chl27, col = 'blue') #' #' ### Interpolate a matrix time series and plot results #' x3 <- interpTs(sfbayChla, type = "cycle.mean", gap = 1) #' plot(x3[, 1:10], main = "SF Bay Chl-a\n(gaps of 1 month replaced)") #' interpTs <- function(x, type = c("linear", "series.median", "series.mean", "cycle.median", "cycle.mean"), gap = NULL) { # Validate arguments gap.max <- nrow(as.matrix(x)) - 2 if (is.null(gap)) gap <- gap.max if (is.na(as.numeric(gap)) || gap < 1 || gap > gap.max) stop("gap must be a number between 1 and the length - 2") type <- match.arg(type) if (!is.ts(x) && type %in% c("cycle.median", "cycle.mean")) stop("x must be a time series for these types") # Define function for replacement by cycle tspx <- tsp(x) replaceNA <- function (x, stat) { x <- ts(x, start = tspx[1], frequency = tspx[3]) x1 <- window(x, start = start(x)[1], end = c(end(x)[1], 12), extend = TRUE) x2 <- matrix(x1, byrow = TRUE, ncol = 12) stats <- apply(x2, 2, stat, na.rm = TRUE) indx <- (seq_len(length(x1)))[is.na(x1)] x3 <- replace(x1, indx, stats[cycle(x1)[indx]]) window(x3, start = tspx[1], end = tspx[2]) } # Define function for vectors f1 <- function(x, gap, type) { if (sum(!is.na(x)) < 2) { x1 <- x } else { x1 <- switch(type, linear = na.approx(x, na.rm = FALSE), series.median = ifelse(is.na(x), median(x, na.rm = TRUE), x), series.mean = ifelse(is.na(x), mean(x, na.rm = TRUE), x), cycle.median = replaceNA(x, median), cycle.mean = replaceNA(x, mean) ) for (i in seq_len(length(x) - gap)) { seq1 <- i:(i + gap) if (all(is.na(x[seq1]))) x1[seq1] <- x[seq1] } } x1 } # Do the interpolation if (is.matrix(x)) { ans <- apply(x, 2, f1, gap, type) } else { ans <- f1(x, gap, type) } if (is.ts(x)) { ans <- ts(ans, start = start(x), frequency = frequency(x)) } ans }
/scratch/gouwar.j/cran-all/cranData/wql/R/interpTs.R
#' layerMean #' @importFrom stats na.omit #' @description Acts on a matrix or data frame with depth in the first #' column and observations for different variables (or different sites, or #' different times) in each of the remaining columns. The trapezoidal mean over #' the given depths is calculated for each of the variables. Replicate depths #' are averaged, and missing values or data with only one unique depth are #' handled. Data are not extrapolated to cover missing values at the top or #' bottom of the layer. The result can differ markedly from the simple mean #' even for equal spacing of depths, because the top and bottom values are #' weighted by 0.5 in a trapezoidal mean. #' @param d data.frame #' @author #' Alan Jassby, James Cloern #' @export layerMean <- function(d) { # Trapezoidal mean of scalar x versus z trapMean <- function(z, x) { # Handle NAs w <- na.omit(cbind(z, x)) n <- nrow(w) if (identical(n, 0L)) return(NA) z <- w[, 1] x <- w[, -1] z1 <- diff(z) x1 <- 0.5 * (x[-1] + x[-n]) sum(z1 * x1)/(z[n] - z[1]) } # Trapezoidal mean of vector d[, -1] vs d[, 1] # Handle single observations n <- nrow(d) if (is.null(n)) return(d[-1]) if (identical(n, 1L)) return(as.numeric(d[, -1])) # Handle duplicates d <- aggregate(d[, -1], by = list(z = d[, 1]), mean, na.rm = TRUE) apply(d[, -1, drop = FALSE], 2, function(x) trapMean(z = d[, 1], x)) }
/scratch/gouwar.j/cran-all/cranData/wql/R/layerMean.R
#' leapYear #' @description \code{TRUE} if \code{x} is a leap year, \code{FALSE} #' otherwise. #' @param x integer year #' @author #' Alan Jassby, James Cloern #' @export leapYear <- function(x) { if (!is.numeric(x)) stop('x must be numeric') x <- floor(x) x%%4 == 0 & (x%%100 != 0 | x%%400 == 0) }
/scratch/gouwar.j/cran-all/cranData/wql/R/leapYear.R
#' Mann-Kendall trend test and the Sen slope #' #' Applies Kendall's tau test for the significance of a monotonic time series #' trend (Mann 1945). Also calculates the Sen slope as an estimate of this #' trend. #' #' The Sen slope (alternately, Theil or Theil-Sen slope)---the median slope #' joining all pairs of observations---is expressed by quantity per unit time. #' The fraction of missing slopes involving the first and last fifths of the #' data are provided so that the appropriateness of the slope estimate can be #' assessed and results flagged. Schertz et al. [1991] discuss this and related #' decisions about missing data. Other results are used for further analysis by #' other functions. Serial correlation is ignored, so the interval between #' points should be long enough to avoid strong serial correlation. #' #' For the relative slope, the slope joining each pair of observations is #' divided by the first of the pair before the overall median is taken. The #' relative slope makes sense only as long as the measurement scale is #' non-negative (not, e.g., temperature on the Celsius scale). Comparing #' relative slopes is useful when the variables in \code{x} have different #' units. #' #' If \code{plot = TRUE}, then either the Sen slope (\code{type = "slope"}) or #' the relative Sen slope (\code{type = "relative"}) are plotted. The plot #' symbols indicate, respectively, that the trend is significant or not #' significant. The plot can be customized by passing any arguments used by #' \code{\link{dotchart}} such as \code{xlab} or \code{xlim}, as well as #' graphical parameters described in \code{\link{par}}. #' @author #' Alan Jassby, James Cloern #' @param x A numeric vector, matrix or data frame #' @param plot Should the trends be plotted when x is a matrix or data frame? #' @param type Type of trend to be plotted, actual or relative #' @param order Should the plotted trends be ordered by size? #' @param pval p-value for significance #' @param pchs Plot symbols for significant and not significant trend #' estimates, respectively #' @param ... Other arguments to pass to plotting function #' @return A list of the following if \code{x} is a vector: #' \item{sen.slope}{Sen slope} \item{sen.slope.rel}{Relative Sen slope} #' \item{p.value}{Significance of slope} \item{S}{Kendall's S} #' \item{varS}{Variance of S} \item{miss}{Fraction of missing slopes connecting #' first and last fifths of \code{x}} or a matrix with corresponding columns if #' \code{x} is a matrix or data frame. #' @note Approximate p-values with corrections for ties and continuity are used #' if \eqn{n > 10} or if there are any ties. Otherwise, exact p-values based on #' Table B8 of Helsel and Hirsch (2002) are used. In the latter case, \eqn{p = #' 0.0001} should be interpreted as \eqn{p < 0.0002}. #' @seealso \code{\link{seaKen}}, \code{\link{seasonTrend}}, #' \code{\link{tsSub}} #' @references Mann, H.B. (1945) Nonparametric tests against trend. #' \emph{Econometrica} \bold{13,} 245--259. #' #' Helsel, D.R. and Hirsch, R.M. (2002) \emph{Statistical methods in water #' resources.} Techniques of Water Resources Investigations, Book 4, chapter #' A3. U.S. Geological Survey. 522 pages. #' \url{http://pubs.usgs.gov/twri/twri4a3/} #' #' Schertz, T.L., Alexander, R.B., and Ohe, D.J. (1991) \emph{The computer #' program EStimate TREND (ESTREND), a system for the detection of trends in #' water-quality data.} Water-Resources Investigations Report 91-4040, U.S. #' Geological Survey. #' @keywords ts #' @importFrom stats pnorm #' @export #' @examples #' #' tsp(Nile) # an annual time series #' mannKen(Nile) #' #' y <- sfbayChla #' y1 <- interpTs(y, gap=1) # interpolate single-month gaps only #' y2 <- aggregate(y1, 1, mean, na.rm=FALSE) #' mannKen(y2) #' mannKen(y2, plot=TRUE) # missing data means missing trend estimates #' mannKen(y2, plot=TRUE, xlim = c(0.1, 0.25)) #' mannKen(y2, plot=TRUE, type='relative', order = TRUE, pval = .001, #' xlab = "Relative trend") #' legend("topleft", legend = "p < 0.001", pch = 19, bty="n") #' mannKen <- function(x, plot = FALSE, type = c("slope", "relative"), order = FALSE, pval = .05, pchs = c(19, 21), ...) { # validate args if (!is.numeric(x) && !is.matrix(x) && !is.data.frame(x)) stop("'x' must be a vector, matrix, or data.frame") if (!is.null(ncol(x)) && is.null(colnames(x))) colnames(x) <- paste("series_", seq_len(ncol(x)), sep="") type <- match.arg(type) kendalls_S_2sided_pvalues <- structure(list(n = c(3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10), S = c(1, 3, 0, 2, 4, 6, 0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11, 13, 15, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45 ), pvalue = c(1, 0.334, 0.625, 0.75, 0.334, 0.084, 0.592, 0.816, 0.484, 0.234, 0.084, 0.0166, 1, 0.72, 0.47, 0.272, 0.136, 0.056, 0.0166, 0.0028, 1, 0.772, 0.562, 0.382, 0.238, 0.136, 0.07, 0.03, 0.0108, 0.0028, 4e-04, 0.548, 0.904, 0.72, 0.548, 0.398, 0.276, 0.178, 0.108, 0.062, 0.0312, 0.0142, 0.0056, 0.0018, 4e-04, 1e-04, 0.54, 0.92, 0.762, 0.612, 0.476, 0.358, 0.26, 0.18, 0.12, 0.076, 0.044, 0.0248, 0.0126, 0.0058, 0.0024, 8e-04, 2e-04, 1e-04, 1e-04, 1, 0.862, 0.728, 0.6, 0.484, 0.38, 0.292, 0.216, 0.156, 0.108, 0.072, 0.046, 0.0286, 0.0166, 0.0092, 0.0046, 0.0022, 0.001, 4e-04, 1e-04, 1e-04, 1e-04, 1e-04)), .Names = c("n", "S", "pvalue" ), row.names = c(NA, 88L), class = "data.frame") # function for single vector mk <- function(x, ks = kendalls_S_2sided_pvalues) { # extent of NAs in first and last fifths of data len <- length(x) fifth <- ceiling(len/5) xbeg <- x[1:fifth] xend <- x[(len - fifth + 1):len] miss <- (fifth^2 - sum(!is.na(xbeg)) * sum(!is.na(xend)))/fifth^2 # get rid of NAs and check data length y <- x[!is.na(x)] t <- time(x)[!is.na(x)] # Sen slope outr <- outer(y, y, "-")/outer(t, t, "-") outr.rel <- sweep(outr, 2, y, '/') sen.slope <- median(outr[lower.tri(outr)]) sen.slope.rel <- median(outr.rel[lower.tri(outr.rel)]) # Kendall S outr <- sign(outer(y, y, "-")/outer(t, t, "-")) S <- sum(outr[lower.tri(outr)]) # variance of S ties <- rle(sort(y))$lengths n <- length(y) t1 <- n * (n - 1) * (2 * n + 5) t2 <- sum(ties * (ties - 1) * (2 * ties + 5)) varS <- (t1 - t2)/18 # p-value if (n > 10 || any(ties > 1) ) { # using approximate distribution Z <- (S - sign(S))/sqrt(varS) p.value <- 2 * pnorm(-abs(Z)) } else { if (n < 3) { p.value <- NA } else { # using exact values p.value <- ks[ks$S == abs(S) & ks$n == n, 3] } } c(sen.slope = sen.slope, sen.slope.rel = sen.slope.rel, p.value = p.value, S = S, varS = varS, miss = round(miss, 3)) } # apply mk for each vector if (is.null(dim(x))) return(as.list(mk(x))) if (ncol(x) == 1) return(as.list(mk(x[, 1]))) ans <- t(sapply(seq_len(ncol(x)), function(i) mk(x[, i]))) rownames(ans) <- colnames(x) # plot if TRUE if (!plot) { ans } else { v1 <- switch(type, slope = "sen.slope", relative = "sen.slope.rel" ) if (order) ans <- ans[order(ans[, v1]), ] pch <- ifelse(ans[, "miss"] >= .5, NA, ifelse(ans[, "p.value"] < pval, pchs[1], pchs[2])) dotchart(ans[, v1], pch = pch, ...) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/mannKen.R
#' meanSub #' @author #' Alan Jassby, James Cloern #' @param x numeric vector #' @param sub integer index #' @param na.rm logical #' #' @export meanSub <- function(x, sub, na.rm = FALSE) { mean(x[sub], na.rm = na.rm) }
/scratch/gouwar.j/cran-all/cranData/wql/R/meanSub.R
#' monthCor #' #' @param x ts #' #' @export #' @importFrom stats cor #' @author #' Alan Jassby, James Cloern monthCor <- function(x) { # Validate args if (!is.ts(x) || !identical(frequency(x), 12)) stop("x must be a monthly 'ts'") # Calculate cors x <- window(x, s = start(x)[1], end = c(end(x)[1], 12), extend = TRUE) d1 <- matrix(x, byrow = TRUE, ncol = 12) d2 <- cbind(d1, c(NA, d1[-1, 1])) d3 <- cor(d2, use = 'pairwise.complete.obs') cors <- d3[cbind(1:12, 2:13)] # Return names(cors) <- paste(month.abb, c(month.abb[-1], month.abb[1]), sep = '-') round(cors, 3) }
/scratch/gouwar.j/cran-all/cranData/wql/R/monthCor.R
#' monthNum #' @description Converts dates to the corresponding numeric month. #' @param y date #' @author #' Alan Jassby, James Cloern #' @export monthNum <- function(y) { match(months(y, abbreviate = TRUE), month.abb) }
/scratch/gouwar.j/cran-all/cranData/wql/R/monthNum.R
#' Converts matrix to vector time series for various analyses #' #' First aggregates multivariate matrix time series by year. Then converts to a #' vector time series in which \dQuote{seasons} correspond to these annualized #' values for the original variables. #' #' The \code{seas} parameter enables focusing the subsequent analysis on #' seasons of special interest, or to ignore seasons where there are too many #' missing data. The function can be used in conjunction with \code{seaKen} to #' conduct a Regional Kendall trend analysis. Sometimes just plotting the #' resulting function can be useful for exploring a spatial transect over time. #' @author #' Alan Jassby, James Cloern #' @param x An object of class "mts" #' @param seas Numeric vector of seasons to aggregate in original time series. #' @param na.rm Should missing data be ignored when aggregating? #' @export #' @return A vector time series #' @seealso \code{\link{seaKen}} #' @keywords ts manip #' @examples #' #' ## Quick plot a spatial transect of chlorophyll a during the #' ## spring bloom period (Feb-Apr) for each year. #' y <- mts2ts(sfbayChla, seas = 2:4) #' plot(y, type = 'n') #' abline(v = 1978:2010, col = 'lightgrey') #' lines(y, type = 'h') #' mts2ts <- function(x, seas = 1:frequency(x), na.rm = FALSE) { if (!is.mts(x)) stop("x must be of class 'mts'") st <- start(x)[1] x1 <- window(x, start = st, end = c(end(x)[1], frequency(x)), extend = TRUE) x1 <- aggregate(x1, 1, meanSub, sub = seas, na.rm = na.rm) x1 <- as.numeric(t(x1)) ts(x1, start = st, frequency = ncol(x)) }
/scratch/gouwar.j/cran-all/cranData/wql/R/mts2ts.R
#' Dissolved oxygen at saturation #' #' Finds dissolved oxygen concentration in equilibrium with water-saturated #' air. #' #' Calculations are based on the approach of Benson and Krause (1984), using #' Green and Carritt's (1967) equation for dependence of water vapor partial #' pressure on \code{t} and \code{S}. Equations are valid for temperature in #' the range 0-40 C and salinity in the range 0-40. #' #' @param t temperature, degrees C #' @param S salinity, on the Practical Salinity Scale #' @param P pressure, atm #' @return Dissolved oxygen concentration in mg/L at 100\% saturation. If #' \code{P = NULL}, saturation values at 1 atm are calculated. #' @references Benson, B.B. and Krause, D. (1984) The concentration and #' isotopic fractionation of oxygen dissolved in fresh-water and seawater in #' equilibrium with the atmosphere. \emph{Limnology and Oceanography} #' \bold{29,} 620-632. #' #' Green, E.J. and Carritt, D.E. (1967) New tables for oxygen saturation of #' seawater. \emph{Journal of Marine Research} \bold{25,} 140-147. #' @keywords manip #' @author #' Alan Jassby, James Cloern #' @export #' @examples #' #' # Convert DO into % saturation for 1-m depth at Station 32. #' # Use convention of expressing saturation at 1 atm. #' sfb1 <- subset(sfbay, depth == 1 & stn == 32) #' dox.pct <- with(sfb1, 100 * dox/oxySol(temp, sal)) #' summary(dox.pct) #' oxySol <- function (t, S, P = NULL) { temp <- t + 273.15 # deg K lnCstar <- -139.34411 + 157570.1/temp - 66423080/temp^2 + 1.2438e+10/temp^3 - 862194900000/temp^4 - S * (0.017674 - 10.754/temp + 2140.7/temp^2) Cstar1 <- exp(lnCstar) if (is.null(P)) { # equilibrium DO Cstar at P = 1 atm Cstar1 } else { # transform for nonstandard pressure Pwv <- (1 - 0.000537 * S) * exp(18.1973 * (1 - 373.16/temp) + 3.1813e-07 * (1 - exp(26.1205 * (1 - temp/373.16))) - 0.018726 * (1 - exp(8.03945 * (1 - 373.16/temp))) + 5.02802 * log(373.16/temp)) theta <- 0.000975 - 1.426e-05 * t + 6.436e-08 * t^2 Cstar1 * P * (1 - Pwv/P) * (1 - theta * P)/((1 - Pwv) * (1 - theta)) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/oxySol.R
#' Nonparametric Change-Point Detection #' #' Locates a single change-point in an annual series based on the Pettitt test. #' #' Pettitt's (1979) method is a rank-based nonparametric test for abrupt #' changes in a time series. It uses the Mann-Whitney statistic for testing #' that two samples (before and after the change-point) come from the same #' distribution, choosing the change-point that maximizes the statistic. The #' \emph{p}-value is approximate but accurate to 0.01 for \eqn{p \le} 0.5. #' Serial correlation is ignored, so the interval between points should be long #' enough to avoid strong serial correlation. The size of the change is #' estimated as the median difference between all pairs of observations in #' which the first one is after the change-point and the second is up to the #' change-point. #' #' Missing values are allowed at the beginning or end of each variable but #' interior missing values will produce an NA. Otherwise the change-point might #' not be meaningful. #' #' If \code{plot = TRUE}, a dot plot of \code{change.times} is shown. If #' \code{sort = TRUE}, the dots are sorted by \code{change.time}. The plot #' symbols indicate, respectively, that the trend is significant or not #' significant. The plot can be customized by passing any arguments used by #' \code{\link{dotchart}} such as \code{xlab}, as well as graphical parameters #' described in \code{\link{par}}. #' #' @param x a numeric vector, matrix or data frame with no missing interior #' values #' @param plot Should the trends be plotted when x is a matrix? #' @param order Should the plotted trends be ordered by size? #' @param pval p-value for significance #' @param pchs Plot symbols for significant and not significant trend #' estimates, respectively #' @param ... Other arguments to pass to plotting function #' @return A list of the following if \code{x} is a vector: #' \item{pettitt.K}{Pettitt's statistic} \item{p.value}{significance #' probability for statistic} \item{change.point}{last position preceding #' change to new level} \item{change.time}{if available, time of change.point #' position} \item{change.size}{median of all differences between points after #' and up to change.point} or a matrix with corresponding columns if \code{x} #' is a matrix or data frame. #' @note The \code{change.point} returned by these functions is the last #' position before the series actually changes, for consistency with the #' original Pettitt test. But for reporting purposes, the following position #' might be more appropriate to call the \dQuote{change-point}. #' #' The Pettitt test produces a supposed change-point, even when the trend is #' smooth, or when the abrupt change is smaller than the long-term smooth #' change. Remove any smooth, long-term trend before applying this test. #' @references Pettitt, A. N. (1979) A non-parametric approach to the #' change-point problem. \emph{Journal of the Royal Statistical Society. Series #' C (Applied Statistics)} \bold{28(2),} 126--135. #' @keywords ts nonparametric #' @importFrom zoo is.zoo #' @importFrom graphics dotchart #' @importFrom stats is.ts setNames frequency #' @export #' @author #' Alan Jassby, James Cloern #' @examples #' #' # data from Pettitt (1979, Table 1): #' y <- c(-1.05, 0.96, 1.22, 0.58, -0.98, -0.03, -1.54, -0.71, -0.35, 0.66, #' 0.44, 0.91, -0.02, -1.42, 1.26, -1.02, -0.81, 1.66, 1.05, 0.97, 2.14, 1.22, #' -0.24, 1.60, 0.72, -0.12, 0.44, 0.03, 0.66, 0.56, 1.37, 1.66, 0.10, 0.80, #' 1.29, 0.49, -0.07, 1.18, 3.29, 1.84) #' pett(y) # K=232, p=0.0146, change-point=17, the same results as Pettitt #' # identify the year of a change-point in an annual time series: #' pett(Nile) #' # apply to a matrix time series: #' y <- ts.intersect(Nile, LakeHuron) #' pett(y) #' pett(y, plot = TRUE, xlab = "Change-point") #' legend("topleft", legend = "p < 0.05", pch = 19, bty="n") #' # note how a smooth trend can disguise a change-point: #' # smooth trend with change-point at 75 #' y <- 1:100 + c(rep(0, 75), rep(10, 25)) #' pett(y) # gives 50, erroneously #' pett(residuals(lm(y~I(1:100)))) # removing trend gives 75, correctly #' pett <- function(x, plot = FALSE, order = FALSE, pval = .05, pchs = c(19, 21), ...) { # validate args if (!is.numeric(x) && !is.matrix(x) && !is.data.frame(x)) { stop("'x' must be a vector, matrix, or data.frame") } # function for single vector pet <- function(x) { # missing data check trimna <- cumsum(!is.na(x)) > 0 & rev(cumsum(rev(!is.na(x)))) > 0 if (is.ts(x)) { tx <- time(x)[trimna] x1 <- window(x, start = tx[1], end = tx[length(tx)]) } else { x1 <- x[trimna] } if (anyNA(x1)) return(setNames(rep(NA, 4), c("pettitt.k", "p.value", "change.point", "change.time"))) # Pettitt change-point statistic n <- length(x1) outr <- outer(x1, x1, "-") d <- sign(outr) u <- sapply(1:(n - 1), function(i) sum(d[1:i, (i + 1):n])) pettitt.K <- max(abs(u)) # approximate probability value for Pettitt statistic p.value <- 2 * exp(-6 * pettitt.K ^ 2 / (n ^ 3 + n ^ 2)) p.value <- signif(p.value, 3) # change position change.point <- which.max(abs(u)) if (is.ts(x1) || is.zoo(x1)) { change.time <- time(x1)[change.point] } else { change.time <- change.point } # change size change.size <- median(outr[(change.point+1):n, 1:change.point]) c( pettitt.K = pettitt.K, p.value = p.value, change.point = change.point, change.time = change.time, change.size = change.size ) } # apply pet for each vector if (is.null(dim(x))) return(as.list(pet(x))) if (identical(ncol(x), 1)) return(as.list(pet(x[, 1]))) ans <- t(sapply(seq_len(ncol(x)), function(i) pet(x[, i]))) rownames(ans) <- colnames(x) # plot if TRUE if (!plot) { ans } else { if (order) ans <- ans[order(ans[, "change.time"]), ] pch <- ifelse(ans[, "p.value"] < pval, pchs[1], pchs[2]) dotchart(ans[, "change.time"], pch = pch, ...) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/pett.R
setOldClass("zoo") setGeneric( name = 'phenoAmp', def = function(x, ...) standardGeneric("phenoAmp") ) #' @export setMethod( f = "phenoAmp", signature = "ts", definition = function(x, season.range = c(1, 12)) { # get series subset seasons <- season.range[1]:season.range[2] x1 <- tsSub(x, seas = seasons) # get statistics for each year range1 <- aggregate(x1, 1, max) - aggregate(x1, 1, min) var1 <- aggregate(x1, 1, var) mad1 <- aggregate(x1, 1, mad) mean1 <- aggregate(x1, 1, mean) median1 <- aggregate(x1, 1, median) # result cbind(range = range1, var = var1, mad = mad1, mean = mean1, median = median1) } ) #' @importFrom stats mad var setMethod( f = "phenoAmp", signature = "zoo", definition = function(x, month.range = c(1, 12)) { # validate args if (match(class(index(x)), c("Date", "POSIXct"), nomatch=0) == 0) stop('time index must be a Date or POSIXct object') # get series subset months1 <- month.range[1]:month.range[2] x1 <- x[as.numeric(format(index(x), "%m")) %in% months1] # get statistics for each year range1 <- aggregate(x1, years, max) - aggregate(x1, years, min) var1 <- aggregate(x1, years, var) mad1 <- aggregate(x1, years, mad) mean1 <- aggregate(x1, years, mean) median1 <- aggregate(x1, years, median) n <- aggregate(x1, years, length) # cbind(range = range1, var = var1, mad = mad1, mean = mean1, median = median1, n) } )
/scratch/gouwar.j/cran-all/cranData/wql/R/phenoAmp.R
setOldClass("zoo") setGeneric( name = 'phenoPhase', def = function(x, ...) standardGeneric("phenoPhase") ) #' @importFrom stats approxfun integrate optimize weighted.mean setMethod( f = "phenoPhase", signature = "ts", definition = function(x, season.range = c(1, 12), ...) { d1 <- data.frame(yr = floor(time(x)), season = cycle(x), val = as.numeric(x)) seasons <- season.range[1]:season.range[2] d2 <- d1[d1$season %in% seasons, ] yrs <- unique(d2$yr) yrs.ok <- table(d2$yr, is.na(d2$val))[, 1] == length(seasons) # max season a1 <- aggregate(d2$val, list(d2$yr), which.max) max.time <- ifelse(yrs.ok, a1$x, NA) max.time <- unlist(max.time) + seasons[1] - 1 # fulcrum fulc <- function(d) { if (sum(!is.na(d[,2])) < 2) { return(NA) } else { x <- d[, 1] y <- d[, 2] low <- min(x) up <- max(x) fun1 <- approxfun(x, y, rule = 2) fopt <- function(z) abs(integrate(fun1, low, z, ...)$value - 0.5*integrate(fun1, low, up, ...)$value) optimize(fopt, lower = low, upper = up)$minimum } } b1 <- by(d2[, c('season', 'val')], as.factor(d2$yr), fulc) fulcrum <- round(ifelse(yrs.ok, as.numeric(b1), NA), 2) # weighted mean season weighted.mean.df <- function(d) { d <- na.omit(d) if (nrow(d) == 0) { return(NA) } else { weighted.mean(d[,1], d[,2]) } } b2 <- by(d2[, c('season', 'val')], as.factor(d2$yr), weighted.mean.df) mean.wt <- round(ifelse(yrs.ok, as.numeric(b2), NA), 2) as.data.frame(cbind(year = yrs, max.time, fulcrum, mean.wt), row.names = seq_len(length(yrs))) } ) #' @importFrom zoo index #' @export setMethod( f = "phenoPhase", signature = "zoo", definition = function(x, season.range = c(1, 12), out = c('date', 'doy', 'julian'), ...) { # validate args if (!is(zoo::index(x), "DateTime")) stop('time index must be a DateTime object') indexx <- as.Date(index(x)) out <- match.arg(out) d1 <- data.frame(julday = julian(indexx, origin = as.Date("1970-01-01")), yr = years(indexx), season = monthNum(indexx), val = as.numeric(x)) seasons <- season.range[1]:season.range[2] d2 <- d1[d1$season %in% seasons, ] yrs <- unique(d2$yr) n <- table(d2$yr, is.na(d2$val))[, 1] # max day maxDay <- function(d) d[, 1][which.max(d[, 2])] b1 <- by(d2[, c('julday', 'val')], as.factor(d2$yr), maxDay) max.time <- as.numeric(b1) # fulcrum fulc <- function(d, m1 = season.range[1], m2 = season.range[2]) { if (sum(!is.na(d[,2])) < 2) { return(NA) } else { x <- d[, 1] y <- d[, 2] yr <- d[1, 3] season.length <- c(31, ifelse(leapYear(yr), 29, 28), 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) lo <- julian(as.Date(paste(yr, m1, 1, sep = '-')), origin = as.Date("1970-01-01")) up <- julian(as.Date(paste(yr, m2, season.length[m2], sep = '-')), origin = as.Date("1970-01-01")) fun1 <- approxfun(x, y, rule = 2) fopt <- function(z) abs(integrate(fun1, lo, z, ...)$value - 0.5*integrate(fun1, lo, up, ...)$value) optimize(fopt, lower = lo, upper = up)$minimum } } b2 <- by(d2[, c('julday', 'val', 'yr')], as.factor(d2$yr), fulc) fulcrum <- ceiling(as.numeric(b2)) # weighted mean day weighted.mean.df <- function(d) { d <- na.omit(d) if (nrow(d) == 0) { return(NA) } else { weighted.mean(d[,1], d[,2]) } } b3 <- by(d2[, c('julday', 'val')], as.factor(d2$yr), weighted.mean.df) mean.wt <- ceiling(as.numeric(b3)) switch(match(out, c('date', 'doy', 'julian')), data.frame(year = yrs, max.time = as.Date(max.time), fulcrum = as.Date(fulcrum), mean.wt = as.Date(mean.wt), n, row.names = NULL), data.frame(year = yrs, max.time = as.POSIXlt(as.Date(max.time))$yday + 1, fulcrum = as.POSIXlt(as.Date(fulcrum))$yday + 1, mean.wt = as.POSIXlt(as.Date(mean.wt))$yday + 1, n, row.names = NULL), as.data.frame(cbind(year = yrs, max.time, fulcrum, mean.wt, n), row.names = seq_len(length(yrs))) ) } )
/scratch/gouwar.j/cran-all/cranData/wql/R/phenoPhase.R
#' Plots seasonal patterns for a time series #' #' Divides the time range for a monthly time series into different eras and #' plots composites of seasonal pattern. Can also plot each month separately #' for the entire record. #' #' If \code{num.era} is an integer, the time range is divided into that many #' equal eras; otherwise, the time range is divided into eras determined by the #' \code{num.era} vector of years. When plotted \code{"by.era"} and #' \code{same.plot = FALSE}, the composite patterns are plotted in a horizontal #' row for easier comparison, which limits the number of periods that can be #' examined. Boxes based on fewer than half of the maximum possible years #' available are outlined in red. If \code{same.plot = TRUE}, a single plot is #' produced with era boxplots arranged by month. When plotted #' \code{"by.month"}, values for each month are first converted to standardized #' anomalies, i.e., by subtraction of long-term mean and division by standard #' deviation. As always, and especially with these plots, experiment with the #' device aspect ratio and size to get the clearest information. #' @author #' Alan Jassby, James Cloern #' @param x Monthly time series #' @param type Plot seasonal pattern by era, or each month for the entire #' record #' @param num.era Integer number of eras, or vector of era year breaks #' @param same.plot Should eras be plotted by month? #' @param ylab Optional character string label for y-axis #' @param num.col Number of columns when plotted \code{"by.month"} #' @return A plot (and the corresponding object of class \code{"ggplot"}). #' @seealso \code{\link{decompTs}}, \code{\link{seasonTrend}} #' @keywords Graphics ts #' @export #' @importFrom stats cycle #' @importFrom reshape2 melt #' @importFrom ggplot2 scale_x_discrete scale_y_continuous #' scale_colour_manual geom_linerange #' @examples #' #' chl27 <- sfbayChla[, "s27"] #' plotSeason(chl27, num.era = c(1978, 1988, 1998, 2008), ylab = "Stn 27 Chl-a") #' \dontrun{ #' plotSeason(chl27, num.era = 3, same.plot = FALSE, ylab = "Stn 27 Chl-a") #' plotSeason(chl27, "by.month", ylab = "Stn 27 Chl-a") #' } #' plotSeason <- function(x, type = c("by.era", "by.month"), num.era = 4, same.plot = TRUE, ylab = NULL, num.col = 3) { # Validate args if (!is(x, "ts") || is(x, "mts")) stop("x must be a single 'ts'") type <- match.arg(type) # Turn time series into data.frame sx <- start(x)[1] ex <- end(x)[1] x <- window(x, start = sx, end = c(ex, 12), extend = TRUE) d <- data.frame(x = as.numeric(x), mon = ordered(month.abb[cycle(x)], levels = month.abb), yr = as.numeric(floor(time(x)))) # Take care of case where num.era is a scalar if (length(num.era) == 1) { if (num.era < 1 || round(num.era) != num.era) { stop("num.era must be a whole number > 0") } else { num.era <- round((0:num.era) * (ex - sx) / num.era + sx, 0) } } if (type == "by.era") { # Break data into eras d$era <- cut(d$yr, breaks = num.era, include.lowest = TRUE, dig.lab = 4, ordered_result = TRUE) colnames(d)[1] <- "value" d <- na.omit(d) # Find missing fraction by month and era t0 <- table(d$mon, d$era) t1 <- sweep(t0, 2, diff(num.era), "/") t2 <- t1 < 0.5 t3 <- melt(t2) colnames(t3) <- c("mon", "era", "too.few") t4 <- within(t3, { mon <- ordered(mon, levels = levels(d$mon)) if (length(unique(era)) > 1) era <- ordered(era, levels = levels(d$era)) } ) d1 <- merge(d, t4) if (same.plot) { # Nest eras within months ggplot(d1, aes_string(x = "mon", y = "value", fill = "era")) + geom_boxplot(size = .2, position = "dodge") + labs(x = "", y = ylab, fill = "Era") } else { # Nest months within eras cols <- c(`TRUE` = "red", `FALSE` = "blue") p1 <- ggplot(d1, aes_string(x = "mon", y = "value", colour = "too.few")) + geom_boxplot(size = .2) + scale_x_discrete("", breaks = month.abb, labels = c("Jan", " ", " ", "Apr", " ", " ", "Jul", " ", " ", "Oct", " ", " ")) + scale_y_continuous(ylab) + scale_colour_manual("", values = cols, guide = "none") + theme(panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, colour = "grey50")) if (length(num.era) > 2) p1 <- p1 + facet_wrap(~era, nrow = 1) p1 } } else { # Plot standardized anomalies for each month x1 <- ts2df(x) x2 <- ts(x1, start = start(x)) plotTsAnom(x2, ylab = ylab, scales = "free_y") } }
/scratch/gouwar.j/cran-all/cranData/wql/R/plotSeason.R
#' Time series plot #' #' Creates line plot of vector or matrix time series, including any data #' surrounded by NAs as additional points. #' #' The basic time series line plot ignores data points that are adjacent to #' missing data, i.e., not directly connected to other observations. This can #' lead to an uninformative plot when there are many missing data. If one #' includes both a point and line plot, the resulting graph can be cluttered #' and difficult to decipher. \code{plotTs} plots only isolated points as well #' as lines joining adjacent observations. #' #' Options are passed to the underlying \code{facet_wrap} function in #' \pkg{ggplot2}. The main ones of interest are \code{ncol} for setting the #' number of plotting columns and \code{scales = "free_y"} for allowing the y #' scales of the different plots to be independent. #' @author #' Alan Jassby, James Cloern #' @param x matrix or vector time series #' @param dot.size size of dots representing isolated data points #' @param xlab optional x-axis label #' @param ylab optional y-axis label #' @param strip.labels labels for individual time series plots #' @param ... additional options #' @importFrom zoo as.Date #' @importFrom ggplot2 theme element_text geom_line labs geom_point aes #' @export #' @return A plot or plots and corresponding object of class \dQuote{ggplot}. #' @seealso \code{\link{plotTsAnom}} #' @keywords Graphics ts #' @examples #' #' # Chlorophyll at 4 stations in SF Bay #' chl <- sfbayChla[, 1:4] #' plotTs(chl, dot.size = 1.5, ylab = 'Chl-a', strip.labels = paste('Station', #' substring(colnames(chl), 2, 3)), ncol = 1, scales = "free_y") #' plotTs <- function(x, dot.size = 1, xlab = NULL, ylab = NULL, strip.labels = colnames(x), ...) { # Validate arguments if (!is.ts(x)) stop("x must be of class 'ts'") if (missing(xlab)) xlab <- "" if (missing(ylab)) ylab <- "" if (is.matrix(x)) { # a matrix time series # identify isolated points x.forward <- rbind(rep(NA, ncol(x)), x[1:(nrow(x)-1), ]) x.back <- rbind(x[2:nrow(x), ], rep(NA, ncol(x))) iso.pts <- is.na(x.forward) & is.na(x.back) & !is.na(x) iso <- data.frame(time = zoo::as.Date(x), ifelse(iso.pts & !is.na(x), x, NA)) iso1 <- melt(iso, id = 'time') # Create data frame d1 <- data.frame(time = as.Date(x), x) d2 <- melt(d1, id = 'time') d2 <- within(d2, variable <- factor(variable, levels = levels(variable), labels = strip.labels)) d2 <- cbind(d2, iso = iso1[, 'value']) # Plot g1 <- ggplot(d2) + geom_line(aes_string(x = "time", y = "value")) + facet_wrap(~ variable, ...) + labs(x = xlab, y = ylab) + theme(axis.text.x = element_text(angle=45, colour="grey50")) if (sum(!is.na(d2$iso)) == 0) { g1 } else { g1 + geom_point(aes(x = time, y = iso), size = dot.size, na.rm = TRUE) } } else { # a vector time series # identify isolated points x.forward <- c(NA, x[1:(length(x)-1)]) x.back <- c(x[2:length(x)], NA) iso.pts <- is.na(x.forward) & is.na(x.back) & !is.na(x) iso <- ifelse(iso.pts, x, NA) # Create data frame d1 <- data.frame(time = as.Date(x), value = as.numeric(x)) d2 <- cbind(d1, iso) # Plot g1 <- ggplot(d2) + geom_line(aes_string(x = "time", y = "value")) + labs(x = xlab, y = ylab) + theme(panel.grid.minor = element_blank()) if (sum(!is.na(d2$iso)) == 0) { g1 } else { g1 + geom_point(aes(x = time, y = iso), size = dot.size, na.rm = TRUE) } } }
/scratch/gouwar.j/cran-all/cranData/wql/R/plotTs.R
#' Anomaly plot of time series #' #' Series are illustrated by vertical lines extending from individual data #' values to the long-term mean. The axes are not scaled in any way. Anomaly #' plots are useful for visualizing shifts in time series levels. #' #' Options are passed to the underlying \code{facet_wrap} function in #' \pkg{ggplot2}. The main ones of interest are \code{ncol} for setting the #' number of plotting columns and \code{scales = "free_y"} for allowing the y #' scales of the different plots to be independent. #' @author #' Alan Jassby, James Cloern #' @param x matrix or vector time series #' @param xlab optional x-axis label #' @param ylab optional y-axis label #' @param strip.labels labels for individual time series plots #' @param ... additional options #' @return A plot and corresponding object of class \dQuote{ggplot}. #' @seealso \code{\link{plotTs}} #' @keywords Graphics ts #' @export #' @examples #' #' # Spring bloom size for 6 stations in SF Bay #' bloom <- aggregate(sfbayChla[, 1:6], 1, meanSub, sub=3:5) #' plotTsAnom(bloom, ylab = 'Chl-a', strip.labels = paste('Station', #' substring(colnames(bloom), 2, 3)), ncol = 2, scales = "free_y") #' plotTsAnom <- function(x, xlab = NULL, ylab = NULL, strip.labels = colnames(x), ...) { # Validate arguments if (!is.ts(x)) stop("x must be of class 'ts'") if (missing(xlab)) xlab <- "" if (missing(ylab)) ylab <- "" if (is.matrix(x)) { # a matrix time series # fill possible spaces in column names so melt+merge will work strip.labels <- strip.labels colnames(x) <- gsub(' ', '.', colnames(x)) # Create data frame x.mean <- apply(x, 2, mean, na.rm = TRUE) x.mean.df <- data.frame(variable = factor(names(x.mean)), x.mean) d <- data.frame(time=as.Date(time(x)), x) d1 <- melt(d, id = 'time') d2 <- merge(d1, x.mean.df) d3 <- within(d2, variable <- factor(variable, levels = levels(variable), labels = strip.labels)) d3 <- na.omit(d3) d3$ymin. <- with(d3, ifelse(value >= x.mean, x.mean, value)) d3$ymax. <- with(d3, ifelse(value >= x.mean, value, x.mean)) d3$colour. <- with(d3, value >= x.mean) # Plot ggplot(d3, aes_string(x="time", y="value", ymin="ymin.", ymax="ymax.", colour="colour.")) + geom_linerange() + geom_hline(aes(yintercept = x.mean), size = 0.25) + labs(x = xlab, y = ylab) + facet_wrap(~ variable, ...) + theme(legend.position='none', panel.grid.minor = element_blank(), axis.text.x = element_text(angle=45, colour="grey50")) } else { # a vector time series # Create data frame x.mean <- mean(x, na.rm = TRUE) d1 <- data.frame(time = as.Date(time(x)), x = as.numeric(x), x.mean) d1 <- na.omit(d1) d1$ymin. <- with(d1, ifelse(x >= x.mean, x.mean, x)) d1$ymax. <- with(d1, ifelse(x >= x.mean, x, x.mean)) d1$colour. <- with(d1, x >= x.mean) # Plot ggplot(d1, aes_string(x="time", y="x", ymin="ymin.", ymax="ymax.", colour="colour.")) + geom_linerange() + geom_hline(aes(yintercept = x.mean), size = 0.25) + labs(x = xlab, y = ylab) + theme(legend.position='none', panel.grid.minor = element_blank(), axis.text.x = element_text(angle=45, colour="grey50")) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/plotTsAnom.R
#' Image plot of monthly time series #' #' Monthly values are transformed into deciles or other bins, and corresponding #' colors are plotted in a month by year matrix. #' #' If \code{four = TRUE}, then \code{x} is first divided into a positive and #' negative bin. Each bin is then further divided into two bins by its mean, #' yielding a total of four bins. If \code{four=FALSE}, then \code{x} is simply #' divided into deciles. In either case, each bin has its own assigned color, #' with colors ranging from dark blue (smallest numbers) through light blue and #' pink to red. #' #' Although \code{four = TRUE} can be useful for any data in which 0 represents #' a value with special significance, it is especially so for data converted #' into log-anomalies, i.e., \code{log10(x/xbar)} where \code{xbar = mean(x, #' na.rm=TRUE)}. The mean month then has value 0, and a value of -1, for #' example, indicates original data equal to one-tenth the mean. Log-anomaly #' transforms can be particularly appropriate for biological populations, in #' which variability is often approximately proportional to the mean. #' #' When \code{loganom = TRUE}, the anomalies are calculated with respect to the #' overall mean month. This differs from, for example, the log-anomaly #' zooplankton plot of O'Brien et al. (2008), in which a monthly anomaly is #' calculated with respect to the mean value of the same month. To get the #' latter behavior, set \code{overall = FALSE}. A further option is to set #' \code{stat = "median"} rather than the default \code{stat = "mean"}, in #' which case \code{xbar = median(x, na.rm = TRUE)}, and the positive and #' negative bins are each divided into two bins by their median instead of #' mean. Using combinations of these different options can reveal complementary #' information. #' #' You may want to set \code{square = FALSE} and then adjust the plot window #' manually if you plan to use the plot in a subsequent layout or if there is #' too much white space. #' @author #' Alan Jassby, James Cloern #' @param x monthly time series. #' @param plot.title plot title. #' @param legend.title legend title. #' @param four logical indicating if data should be binned into 4 special #' groups or into deciles. #' @param loganom logical indicating if data should be transformed into #' log-anomalies. #' @param square logical indicating if tiles should be square. #' @param legend logical indicating if a legend should be included. #' @param trim logical indicating if leading and trailing NA values should be #' removed. #' @param overall determines whether anomalies are calculated with respect to #' overall mean or to long-term mean for the same month. #' @param stat determines whether anomalies are calculated and binned using #' mean or median. #' @return An image plot of monthly values classified into either deciles or #' into four bins as described above (and corresponding object of class #' \dQuote{ggplot}). #' @importFrom zoo as.zoo na.trim #' @importFrom ggplot2 geom_tile scale_x_continuous scale_y_discrete #' theme_bw coord_equal #' @importFrom grDevices colorRampPalette #' @importFrom stats quantile #' @export #' @references O'Brien T., Lopez-Urrutia A., Wiebe P.H., Hay S. (editors) #' (2008) \emph{ICES Zooplankton Status Report 2006/2007.} ICES Cooperative #' Research Report 292, International Council for the Exploration of the Sea, #' Copenhagen, 168 p. #' @keywords hplot ts #' @examples #' #' # plot log-anomalies in four bins #' chl27 = sfbayChla[, 's27'] #' plotTsTile(chl27, legend.title = 'Chl log-anomaly') #' #' # plot deciles #' plotTsTile(chl27, plot.title = 'SF Bay station 27', legend.title = #' 'chlorophyll', four = FALSE, loganom = FALSE, square = FALSE) #' plotTsTile <- function(x, plot.title = NULL, legend.title = NULL, four = TRUE, loganom = TRUE, square = TRUE, legend = TRUE, trim = TRUE, overall = TRUE, stat = c("median", "mean")) { # Validate args if (!is(x, "ts") || is(x, "mts") || !identical(frequency(x), 12)) stop("x must be a vector of class 'ts' with frequency = 12") stat <- match.arg(stat) # Define center function center <- function(x, type=stat) { switch(type, mean = mean(x, na.rm=TRUE), median = median(x, na.rm=TRUE) ) } # trim leading and trailing NAS if (trim) { x <- as.zoo(x) x <- na.trim(x) x <- as.ts(x) } # Complete partial years by padding with NAs sx <- start(x)[1] ex <- end(x)[1] x1 <- window(x, start = c(sx, 1), end = c(ex, 12), extend = TRUE) # Transform to log-anomalies if (loganom) { if (any(x1 <= 0, na.rm = TRUE)) { stop("All values must be positive if loganom=TRUE") } else { if (overall) { x1 <- x1/center(x1) } else { x1 <- as.matrix(ts2df(x1)) x1 <- sweep(x1, 2, apply(x1, 2, center), "/") x1 <- ts(as.vector(t(x1)), start = c(sx, 1), frequency=12) } } x1 <- log10(x1) } # Break data. if (four) { mmin <- min(x1, na.rm = TRUE) mlo <- center(x1[x1 < 0]) mhi <- center(x1[x1 > 0]) mmax <- max(x1, na.rm = TRUE) the.breaks <- c(mmin, mlo, 0, mhi, mmax) } else { the.breaks <- quantile(x1, probs = seq(0, 1, 0.1), na.rm = TRUE) } len <- length(the.breaks) if (length(unique(the.breaks)) < len) stop("Breaks between groups are\nnot unique: insufficient unique data.") x2 <- cut(x1, breaks = the.breaks, include.lowest = TRUE, dig.lab = 2) x3 <- data.frame(yr = floor(time(x1)), mon = ordered(month.abb[cycle(x1)], levels = month.abb), value = x2) # Plot it. mypalette <- colorRampPalette(c("darkblue", "lightblue", "pink", "red")) cols <- mypalette(len - 1) p1 <- ggplot(x3, aes_string(x="yr", y="mon", fill="value")) + geom_tile(colour = "white", size = 0.25) + scale_x_continuous(name = "", expand = c(0, 0)) + scale_y_discrete(name = "", expand = c(0, 0)) + scale_fill_manual(name = legend.title, values = cols, breaks = levels(x3$value), labels = levels(x3$value)) + theme_bw() + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank()) + labs(title = plot.title) if (!legend) p1 <- p1 + theme(legend.position = "none") if (square) p1 + coord_equal() else p1 }
/scratch/gouwar.j/cran-all/cranData/wql/R/plotTsTile.R
#' Seasonal and Regional Kendall trend test #' #' Calculates the Seasonal or Regional Kendall test of trend significance, #' including an estimate of the Sen slope. #' #' The Seasonal Kendall test (Hirsch et al. 1982) is based on the Mann-Kendall #' tests for the individual seasons (see \code{\link{mannKen}} for additional #' details). \emph{p}-values provided here are not corrected for serial #' correlation among seasons. #' #' If \code{plot = TRUE}, then either the Sen slope in units per year #' (\code{type = "slope"}) or the relative slope in fraction per year #' (\code{type = "relative"}) is plotted. The relative slope is defined #' identically to the Sen slope except that each slope is divided by the first #' of the two values that describe the slope. Plotting the relative slope is #' useful when the variables in \code{x} are always positive and have different #' units. #' #' The plot symbols indicate, respectively, that the trend is statistically #' significant or not. The plot can be customized by passing any arguments used #' by \code{\link{dotchart}} such as \code{xlab}, as well as graphical #' parameters described in \code{\link{par}}. #' #' If \code{mval} or more of the seasonal slope estimates are missing, then #' that trend is considered to be missing. The seasonal slope estimate #' (\code{\link{mannKen}}), in turn, is missing if half or more of the possible #' comparisons between the first and last 20\% of the years are missing. #' #' The function can be used in conjunction with \code{mts2ts} to calculate a #' Regional Kendall test of significance for annualized data, along with a #' regional estimate of trend (Helsel and Frans 2006). See the examples below. #' #' @param x A numeric vector, matrix or data frame made up of seasonal time #' series #' @param plot Should the trends be plotted when x is a matrix or data frame? #' @param type Type of trend to be plotted, actual or relative to series median #' @param order Should the plotted trends be ordered by size? #' @param pval p-value for significance #' @param mval Minimum fraction of seasons needed with non-missing slope #' estimates #' @param pchs Plot symbols for significant and not significant trend #' estimates, respectively #' @param ... Other arguments to pass to plotting function #' @author #' Alan Jassby, James Cloern #' @return A list of the following if \code{x} is a vector: \code{seaKen} #' returns a list with the following members: \item{sen.slope }{Sen slope} #' \item{sen.slope.pct}{Sen slope as percent of mean} #' \item{p.value}{significance of slope} \item{miss}{for each season, the #' fraction missing of slopes connecting first and last 20\% of the years} or a #' matrix with corresponding columns if \code{x} is a matrix or data frame. #' @seealso \code{\link{mannKen}}, \code{\link{mts2ts}}, #' \code{\link{trendHomog}} #' @references Helsel, D.R. and Frans, L. (2006) Regional Kendall test for #' trend. \emph{Environmental Science and Technology} \bold{40(13),} 4066-4073. #' #' Hirsch, R.M., Slack, J.R., and Smith, R.A. (1982) Techniques of trend #' analysis for monthly water quality data. \emph{Water Resources Research} #' \bold{18,} 107-121. #' @keywords ts #' @export #' @examples #' #' # Seasonal Kendall test: #' chl <- sfbayChla # monthly chlorophyll at 16 stations in San Francisco Bay #' seaKen(sfbayChla[, 's27']) # results for a single series at station 27 #' seaKen(sfbayChla) # results for all stations #' seaKen(sfbayChla, plot=TRUE, type="relative", order=TRUE) #' #' # Regional Kendall test: #' # Use mts2ts to change 16 series into a single series with 16 "seasons" #' seaKen(mts2ts(chl)) # too many missing data #' # better when just Feb-Apr, spring bloom period, #' # but last 4 stations still missing too much. #' seaKen(mts2ts(chl, seas = 2:4)) #' seaKen(mts2ts(chl[, 1:12], 2:4)) # more reliable result #' seaKen <- function(x, plot = FALSE, type = c("slope", "relative"), order = FALSE, pval = .05, mval = .5, pchs = c(19, 21), ...) { # validate args if (!is.numeric(x) && !is.matrix(x) && !is.data.frame(x)) stop("'x' must be a vector, matrix, or data.frame") if (!is.null(ncol(x)) && is.null(colnames(x))) colnames(x) <- paste("series_", seq_len(ncol(x)), sep="") type <- match.arg(type) # test for single series sk <- function(x) { # validate args if (!is.ts(x)) stop("'x' must be of class 'ts'") if (identical(frequency(x), 1)) stop("'x' must be a seasonal time series with frequency > 1") # extend series to full years fr <- frequency(x) xmod <- length(x) %% fr if (!identical(xmod, 0)) x <- ts(c(x, rep(NA, fr - xmod)), start = start(x), frequency = fr) # apply mannKen to matrix of months x1 <- matrix(x, ncol = fr, byrow = TRUE) mk1 <- mannKen(x1) # calculate sen slope slopes <- slopes.rel <- NULL for (m in 1:fr) { ## select data for current season xm <- x[cycle(x) == m] tm <- time(x)[cycle(x) == m] ## calculate slopes for current season outr <- outer(xm, xm, '-')/outer(tm, tm, '-') slopes.m <- outr[lower.tri(outr)] slopes <- c(slopes, slopes.m) outr.rel <- sweep(outr, 2, xm, '/') slopes.rel.m <- outr.rel[lower.tri(outr.rel)] slopes.rel <- c(slopes.rel, slopes.rel.m) } sen.slope <- median(slopes, na.rm = TRUE) sen.slope.rel <- median(slopes.rel, na.rm = TRUE) # calculate sen slope significance S <- sum(mk1[, "S"]) varS <- sum(mk1[, "varS"]) Z <- (S - sign(S)) / sqrt(varS) p.value <- 2 * pnorm(-abs(Z)) miss <- round(sum(mk1[, "miss"] >= .5) / fr, 3) c(sen.slope = sen.slope, sen.slope.rel = sen.slope.rel, p.value = p.value, miss = miss) } # apply sk for each series if (is.null(dim(x))) return(as.list(sk(x))) if (ncol(x) == 1) return(as.list(sk(x[, 1]))) ans <- t(sapply(seq_len(ncol(x)), function(i) sk(x[, i]))) rownames(ans) <- colnames(x) # plot if TRUE if (!plot) { ans } else { v1 <- switch(type, slope = "sen.slope", relative = "sen.slope.rel") if (order) ans <- ans[order(ans[, v1]), ] pch <- ifelse(ans[, "miss"] >= mval, NA, ifelse(ans[, "p.value"] < pval, pchs[1], pchs[2])) dotchart(ans[, v1], pch = pch, ...) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/seaKen.R
#' Rolling Seasonal Kendall trend test #' #' Calculates the Seasonal Kendall test of significance, including an estimate #' of the Sen slope, for rolling windows over a time series. #' #' The function \code{seaRoll} applies \code{seaKen} to rolling time windows of #' width \code{w}. A minimum \code{w} of five years is required. For any #' window, a season is considered missing if half or more of the possible #' comparisons between the first and last 20\% of the years is missing. If #' \code{mval} or more of the seasons are missing, then that windowed trend is #' considered to be missing. #' #' If \code{plot = TRUE}, a point plot will be drawn with the Sen slope plotted #' at the leading year of the trend window. The plot symbols indicate, #' respectively, that the trend is significant or not significant. The plot can #' be customized by passing any arguments used by \code{\link{plot.default}}, #' as well as graphical parameters described in \code{\link{par}}. #' #' @param x A seasonal time series vector. #' @param w The window width for \dQuote{rolling} estimates of slope. #' @param plot Indicates if a plot should be drawn #' @param pval p-value for significance #' @param mval Minimum fraction of seasons needed with non-missing slope #' estimates #' @param pchs Plot symbols for significant and not significant trend #' estimates, respectively #' @param xlab Optional label for x-axis #' @param ylab Optional label for y-axis #' @param ... Other arguments to pass to plotting function #' @author #' Alan Jassby, James Cloern #' @return \code{seaRoll} returns a matrix with one row per time window #' containing the Sen slope, the relative Sen slope, and the \emph{p-}value. #' Rows are labelled with the leading year of the window. #' @seealso \code{\link{seaKen}} #' @keywords ts #' @export #' @examples #' #' chl27 <- sfbayChla[, 's27'] #' seaRoll(chl27) #' seaRoll(chl27, plot = TRUE) #' seaRoll <- function(x, w = 10, plot = FALSE, pval = .05, mval = .5, pchs = c(19, 21), xlab = NULL, ylab = NULL, ...) { # validate args if (!is.ts(x) || is.matrix(x)) stop("'x' must be a vector of class 'ts'") if (w < 5) stop("window must be at least 5 years") # result for each window sr <- function(y, x1=x, w1=w, mval1=mval) { # set current window and get slope fr <- frequency(x1) x2 <- window(x1, start = y, end = c(y + w1 - 1, fr), extend = TRUE) sk <- seaKen(x2) # make sure enough data are present miss.ok <- sum(sk$miss >= 0.5) / fr < mval1 N <- sum(!is.na(x2)) if (N < 3 * fr || N < 10 || !miss.ok) { c(NA, NA, NA) } else { c(sk$sen.slope, sk$sen.slope.rel, sk$p.value) } } # combine windows sx <- start(x)[1] ex <- end(x)[1] ans <- t(sapply(sx:(ex - w + 1), function(i) sr(i))) rownames(ans) <- sx:(ex - w + 1) colnames(ans) <- c("sen.slope", "sen.slope.rel", "p.value") # plot if TRUE if (!plot) { ans } else { xlab <- if (is.null(xlab)) "" else xlab ylab <- if (is.null(ylab)) "" else ylab pch <- ifelse(ans[, "p.value"] < pval, pchs[1], pchs[2]) plot(ans[, "sen.slope"] ~ rownames(ans), pch = pch, xlab = xlab, ylab = ylab, ...) } }
/scratch/gouwar.j/cran-all/cranData/wql/R/seaRoll.R
#' Determine seasonal trends #' #' Finds the trend for each season and each variable in a time series. #' #' The Mann-Kendall test is applied for each season and series (in the case of #' a matrix). The actual and relative Sen slope (actual divided by median for #' that specific season and series); the p-value for the trend; and the #' fraction of missing slopes involving the first and last fifths of the data #' are calculated (see \code{\link{mannKen}}). #' #' If \code{plot = TRUE}, each season for each series is represented by a bar #' showing the trend. The fill colour indicates whether \eqn{p < 0.05} or not. #' If the fraction of missing slopes is 0.5 or more, the corresponding trends #' are omitted. #' #' Parameters can be passed to the plotting function, in particular, to #' \code{facet_wrap} in \pkg{ggplot2}. The most useful parameters here are #' \code{ncol} (or \code{nrow}), which determines the number of columns (or #' rows) of plots, and \code{scales}, which can be set to \code{"free_y"} to #' allow the y-axis to change for each time series. Like all \pkg{ggplot2} #' objects, the plot output can also be customized extensively by modifying and #' adding layers. #' #' @param x Time series vector, or time series matrix with column names #' @param plot Should the results be plotted? #' @param type Type of trend to be plotted, actual or relative to series median #' @param pval p-value for significance #' @param ... Further options to pass to plotting function #' @importFrom ggplot2 geom_bar scale_fill_manual element_blank #' @importFrom stats window #' @author #' Alan Jassby, James Cloern #' @return A data frame with the following fields: \item{series}{series names} #' \item{season}{season number} \item{sen.slope}{Sen slope in original units #' per year} \item{sen.slope.rel}{Sen slope divided by median for that specific #' season and series} \item{p}{p-value for the trend according to the #' Mann-Kendall test.} \item{missing}{Proportion of slopes joining first and #' last fifths of the data that are missing} #' @seealso \code{\link{mannKen}}, \code{\link{plotSeason}}, #' \code{\link[ggplot2]{facet_wrap}} #' @keywords Graphics ts #' @export #' @examples #' #' x <- sfbayChla #' seasonTrend(x) #' seasonTrend(x, plot = TRUE, ncol = 4) #' seasonTrend <- function(x, plot = FALSE, type = c("slope", "relative"), pval = .05, ...) { # validate args if (!is(x, "ts")) stop("x must be a 'ts'") type <- match.arg(type) # extend to full years first <- start(x)[1] last <- end(x)[1] fr <- frequency(x) x <- window(x, start = first, end = c(last, fr), extend = TRUE) # function for single vector st <- function(x) { x1 <- matrix(x, ncol = fr, byrow = TRUE) mannKen(x1)[, c(1:3, 6)] } # construct a data frame of all trends if (!is.matrix(x)) { ans <- data.frame(season = as.factor(1:fr), st(x), row.names = 1:fr) } else { nc <- ncol(x) colx <- colnames(x) series <- factor(rep(colx, each = fr), levels = colx, ordered = TRUE) season <- as.factor(rep(1:fr, times = nc)) ans0 <- do.call(rbind, lapply(1:nc, function(i) st(x[, i]))) ans <- data.frame(series, season, ans0, row.names = seq_len(nrow(ans0))) } if (!plot) return(ans) ans[ans$miss >= 0.5, c("sen.slope", "sen.slope.rel")] <- NA ans$sig <- ifelse(ans$p.value < pval, TRUE, FALSE) v1 <- switch(type, slope = "sen.slope", relative = "sen.slope.rel") ylb <- switch(type, slope = expression(paste("Trend, units ", yr^{-1})), relative = expression(paste("Relative trend, ", yr^{-1}))) names(ans)[match(v1, names(ans))] <- "trend" plt <- ggplot(ans, aes_string(x="season", y="trend", fill="sig")) + geom_bar(stat = "identity") + scale_fill_manual(name = "", values = c(`FALSE` = "grey65", `TRUE` = "dodgerblue"), labels = c(bquote(italic(p)>=.(pval)),bquote(italic(p)<.(pval)))) + labs(x = "Season", y = ylb) + theme(panel.grid.minor = element_blank()) if (!is.matrix(x)) return(plt) plt + facet_wrap(~ series, ...) }
/scratch/gouwar.j/cran-all/cranData/wql/R/seasonTrend.R
#' Trend homogeneity test #' #' Tests for homogeneity of seasonal trends using method proposed by van Belle #' and Hughes (1984). Seasons with insufficient data as defined in #' \code{\link{mannKen}} are ignored. #' #' #' @param x A vector time series with frequency > 1 #' @return \item{chisq.trend}{"Trend" chi-square.} #' \item{chisq.homog}{"Homogeneous" chi-square.} \item{p.value}{For null #' hypothesis that trends are homogeneous.} \item{n}{Number of seasons used.} #' @seealso \code{\link{seaKen}} #' @references van Belle, G. and Hughes, J.P. (1984) Nonparametric tests for #' trend in water quality. \emph{Water Resources Research} \bold{20,} 127-136. #' @keywords ts #' @importFrom stats pchisq #' @author #' Alan Jassby, James Cloern #' @export #' @examples #' #' ## Apply to a monthly vector time series to test homogeneity #' ## of seasonal trends. #' x <- sfbayChla[, 's27'] #' trendHomog(x) #' trendHomog <- function(x) { # validate args if (!is.ts(x) || is.mts(x)) stop("'x' must be a vector time series") if (frequency(x) < 2) stop("'x' must be a seasonal time series") # Use only seasons with enough data w <- seasonTrend(x) x0 <- tsSub(x, seas = as.numeric(w$season[w$miss<.5])) # functions to extract S and varS kens <- function(y) mannKen(y)$S kenvars <- function(y) mannKen(y)$varS # find S and varS for each season fr <- frequency(x0) x1 <-window(x0, s = start(x0)[1], end = c(end(x0)[1], fr), extend = TRUE) x1 <- matrix(x1, byrow = TRUE, ncol = fr) S <- apply(x1, 2, kens) varS <- apply(x1, 2, kenvars) Z <- S / varS ^ .5 chi2.tot <- sum(Z ^ 2) Zbar <- mean(Z) chi2.trend <- fr * Zbar ^ 2 chi2.homog <- chi2.tot - chi2.trend p.value <- pchisq(chi2.homog, fr - 1, 0, FALSE) list(chi2.trend = chi2.trend, chi2.homog = chi2.homog, p.value = p.value, n = fr) }
/scratch/gouwar.j/cran-all/cranData/wql/R/trendHomog.R
#' Convert time series to data frame #' #' Convert monthly time series vector to a year \code{x} month data frame for #' several possible subsequent analyses. Leading and trailing empty rows are #' removed. #' #' Our main use of \code{ts2df} is to convert a single monthly time series into #' a year \code{x} month data frame for EOF analysis of interannual #' variability. #' #' \code{monthCor} finds the month-to-month correlations in a monthly time #' series \code{x}. It is useful for deciding where to start the 12-month #' period for an \code{EOF} analysis (\code{mon1} in \code{ts2df}), namely, at #' a time of low serial correlation in \code{x}. #' #' @param x monthly time series vector #' @param mon1 starting month number, i.e., first column of the data frame #' @param addYr rows are normally labelled with the year of the starting month, #' but \code{addYr = TRUE} will add 1 to this year number #' @param omit if \code{TRUE}, then rows with any \code{NA} will be removed. #' @author #' Alan Jassby, James Cloern #' @return An \code{n x 12} data frame, where \code{n} is the number of years. #' @seealso \code{\link{eof}} #' @references Craddock, J. (1965) A meteorological application of principal #' component analysis. \emph{Statistician} \bold{15,} 143--156. #' @keywords ts manip #' @importFrom methods is #' @export #' @examples #' #' # San Francisco Bay station 27 chlorophyll has the lowest serial #' # correlation in Oct-Nov, with Sep-Oct a close second #' chl27 <- sfbayChla[, 's27'] #' monthCor(chl27) #' #' # Convert to a data frame with October, the first month of the #' # local "water year", in the first column #' tsp(chl27) #' chl27 <- round(chl27, 1) #' ts2df(chl27, mon1 = 10, addYr = TRUE) #' ts2df(chl27, mon1 = 10, addYr = TRUE, omit = TRUE) #' ts2df <- function(x, mon1 = 1, addYr = FALSE, omit = FALSE) { # validate args if (!is(x, 'ts') || is(x, 'mts') || !identical(frequency(x), 12)) stop("x must be a monthly 'ts' vector") if (!mon1 %in% 1:12) stop("mon1 must be between 1 and 12") # convert to data.frame x1 <- window(x, start = c(start(x)[1] - 1, mon1), end = c(end(x)[1] + 1, ifelse(mon1 == 1, 12, mon1 - 1)), extend = TRUE) d1 <- as.data.frame(matrix(x1, byrow = TRUE, ncol = 12)) colnames(d1) <- if (mon1 == 1) month.abb else month.abb[c(mon1:12, 1:(mon1 - 1))] rownames(d1) <- (start(x1)[1] + addYr):(start(x1)[1] + nrow(d1) - 1 + addYr) # trim leading and trailing NA rows, and optionally, rows with any NAs d1 <- d1[apply(d1, 1, function(x) !all(is.na(x))),] if (omit) d1 <- na.omit(d1) d1 }
/scratch/gouwar.j/cran-all/cranData/wql/R/ts2df.R
setGeneric( name = "tsMake", def = function(object, ...) standardGeneric("tsMake") ) setMethod( f = "tsMake", signature = "WqData", definition = function(object, focus, layer, type = c("ts.mon", "zoo"), qprob = NULL) { # Validate args d <- data.frame(object) if ( missing(focus) || length(focus) > 1 ) stop("'focus' must be the name of a single site or variable.") if (match(focus, d$site, nomatch = 0) > 0) { d <- d[d$site == focus, ] if (nrow(d) == 0) stop("No data for this site.") } else { if (match(focus, d$variable, nomatch = 0) > 0) { d <- d[d$variable == focus, ] if (nrow(d) == 0) stop("No data for this variable.") } else { stop("'focus' does not match any sites or variables") } } type <- match.arg(type) # Assemble all depths depths <- NULL if (missing(layer)) layer <- list(c(-Inf, Inf)) if (identical(layer, 'max.depths')) { ans <- aggregate(depth ~ time + site + variable, data = d, max, na.rm = TRUE) d <- merge(d, ans) d$depth <- depths <- max(d$depth, na.rm=TRUE) + 1 } else { if (!is(layer, "list")) layer <- list(layer) for (el in layer) { if ( !is(el, "numeric") || length(el) > 2 ) stop("layer is not specified correctly") if (length(el) > 1) { depths1 <- unique(d[d$depth >= el[1] & d$depth <= el[2], "depth"]) depths <- c(depths, depths1) } else { depths <- c(depths, el) } } } d <- d[d$depth %in% depths, ] if (nrow(d) == 0) stop("No data for this layer.") # Define aggregation function if (is.null(qprob)) { f <- mean } else { f <- function(x, ...) quantile(x, probs = qprob, ...) } # Reshape data if (match(focus, d$site, nomatch = 0) > 0) { c1 <- dcast(d, time ~ variable, fun.aggregate = f, na.rm = TRUE) } else { c1 <- dcast(d, time ~ site, fun.aggregate = f, na.rm = TRUE) } # Create zoo or ts object z1 <- zoo(c1[, -1], c1[, 1]) if (type == 'ts.mon') { z1 <- aggregate(z1, as.yearmon, f, na.rm = TRUE) z1names <- colnames(z1) z1 <- as.ts(z1) colnames(z1) <- z1names } z1 } )
/scratch/gouwar.j/cran-all/cranData/wql/R/tsMake.R
#' tsSub #' #' @param x1 ts #' @param seas numeric #' @author #' Alan Jassby, James Cloern #' #' @export tsSub <- function(x1, seas = 1:frequency(x1)) { if (!is(x1, "ts")) stop("x1 must be of class 'ts'") stx <- start(x1) frx <- frequency(x1) if (!is(x1, "mts")) dim(x1) <- c(length(x1), 1) x2 <- window(x1, start = stx[1], end = c(end(x1)[1], frx), extend = TRUE) x3 <- x2[cycle(x2) %in% seas, ] ts(x3, start = stx[1], frequency = length(seas)) }
/scratch/gouwar.j/cran-all/cranData/wql/R/tsSub.R
#' Construct an object of class "WqData" #' #' \code{wqData} is a constructor for the \code{"WqData"} class that is often #' more convenient to use than \code{new}. It converts a data.frame containing #' water quality data in \dQuote{long} or \dQuote{wide} format to a #' \code{"WqData"} object. In \dQuote{long} format, observations are all in one #' column and a second column is used to designate the variable being observed. #' In \dQuote{wide} format, observations for each variable are in a separate #' column. #' #' If the data are already in long format, the function has little to do but #' rename the data fields. If in wide format, the \pkg{reshape2} package is #' called to \code{melt} the data. The function also removes \code{NA} #' observations, converts \code{site} to (possibly ordered) factors with valid #' variable names, and converts \code{time} to class \code{"Date"} or #' \code{"POSIXct"} and ISO 8601 format, depending on \code{time.format}. #' #' @param data Data frame containing water quality data. #' @param locus Character or numeric vector designating column names or #' numbers, respectively, in \code{data} that correspond to \code{time}, #' \code{site} and \code{depth}. #' @param wqdata In the case of \dQuote{long} data, character or numeric vector #' designating column names or numbers, respectively, in \code{data} that #' correspond to \code{variable} and \code{value}. In the case of \dQuote{wide} #' data, character or numeric vector designating column names or numbers, #' respectively, in \code{data} that denote water quality variable data. #' @param site.order If \code{TRUE}, \code{site} factor levels will be ordered #' in alphanumeric order. #' @param time.format Conversion specification for \code{time} defined by #' \acronym{ISO C/POSIX} standard (see \code{\link{strptime}}). #' @param type Either \dQuote{long} or \dQuote{wide} \code{data}. #' @return An object of class \code{"WqData"}. #' @seealso \code{\link{as.Date}}, \code{\link{strptime}}, #' \code{\link{WqData-class}} #' @references International Organization for Standardization (2004) ISO 8601. #' Data elements and interchange formats - Information interchange - #' Representation of dates and times. #' @keywords classes data #' @author #' Alan Jassby, James Cloern #' @importFrom methods new #' @export #' @examples \dontrun{ #' #' # Create new WqData object from sfbay data. First combine date and time #' # into a single string after making sure that all times have 4 digits. #' sfb <- within(sfbay, time <- substring(10000 + time, 2, 5)) #' sfb <- within(sfb, time <- paste(date, time, sep = ' ')) #' sfb <- wqData(sfb, 2:4, 5:12, site.order = TRUE, type = "wide", #' time.format = "%m/%d/%Y %H%M") #' #' head(sfb) #' tail(sfb) #' #' # If time of day were not required, then the following would suffice: #' sfb <- wqData(sfbay, c(1,3,4), 5:12, site.order = TRUE, type = "wide", #' time.format = "%m/%d/%Y") #' } #' wqData <- function(data, locus, wqdata, site.order, time.format = "%Y-%m-%d", type = c("long", "wide")) { # Validate args if (length(locus) != 3) stop("locus must be of length 3") cnames <- colnames(data) if (is(wqdata, "character")) wqdata <- match(wqdata, cnames, nomatch=0) if (any(identical(wqdata, 0)) || max(wqdata) > ncol(data)) stop("wqdata not in data") type <- match.arg(type) # Reshape data if (identical(type, "long")) { data <- data.frame(data[, locus], data[, wqdata]) names(data) <- c("time", "site", "depth", "variable", "value") } else { if (identical(length(wqdata), 1L)) { data <- data.frame(data[, locus], variable = rep(cnames[wqdata], nrow(data)), value = data[, wqdata]) names(data)[1:3] <- c("time", "site", "depth") } else { # Avoid possible duplicate names wqd <- data[, wqdata] ind <- match(c("time", "site", "depth"), names(wqd), nomatch=0) names(wqd)[ind] <- paste(names(wqd)[ind], 1, sep="") # Assemble and reshape data data <- data.frame(data[, locus], wqd) names(data)[1:3] <- c("time", "site", "depth") data <- melt(data, id.vars = 1:3) } } # Change time to correct format and class if needed if (grepl('H', time.format)) { data <- within(data, time <- as.POSIXct(time, format = time.format)) } else { data <- within(data, time <- as.Date(time, format = time.format)) } # Remove NAs data <- data[!is.na(data$value), ] rownames(data) <- seq_len(nrow(data)) # Remove unneeded factor levels data <- within(data, site <- factor(site, ordered = site.order)) levels(data$site) <- gsub('X','s', make.names(levels(data$site), unique = TRUE)) # Make sure variable is a factor data <- within(data, variable <- as.factor(variable)) # new(Class="WqData", data) }
/scratch/gouwar.j/cran-all/cranData/wql/R/wqData.R
#' Class "DateTime" #' #' A class union of \code{"Date"} and \code{"POSIXct"} classes. #' #' #' @name DateTime-class #' @docType class #' @section Objects from the Class: A virtual Class: No objects may be created #' from it. #' @seealso \code{\link{WqData-class}} #' @keywords classes #' @examples #' #' showClass("DateTime") #' NULL #' Methods for Function phenoAmp #' #' Finds various measures of the amplitude of the annual cycle. #' #' #' @name phenoAmp-methods #' @docType methods #' @section Methods: \describe{ \item{list("signature(x = \"ts\")")}{See #' \code{\link{phenoAmp,ts-method}}} \item{list("signature(x = \"zoo\")")}{See #' \code{\link{phenoAmp,zoo-method}}} } #' @keywords methods NULL #' Phenological amplitude #' #' Finds various measures of the amplitude of the annual cycle, or of some #' specified season range. #' #' \code{phenoAmp} gives three measures of the amplitude of a seasonal cycle: #' the range, the variance, and the median absolute deviation, along with the #' mean and median to allow calculation of other statistics as well. #' #' These measures can be restricted to a subset of the year by giving the #' desired range of season numbers. This can be useful for isolating measures #' of, say, the spring and autumn phytoplankton blooms in temperate waters. In #' the case of a monthly time series, for example, a non-missing value is #' required for every month or the result will be \code{NA}, so using a period #' shorter than one year can also help avoid any months that are typically not #' covered by the sampling program. Similarly, in the case of dated #' observations, a shorter period can help avoid times of sparse data. The #' method for time series allows for other than monthly frequencies, but #' \code{season.range} is always interpreted as months for \code{zoo} objects. #' #' Note that the amplitude is sensitive to the number of samples for small #' numbers. This could be a problem for \code{zoo} objects if the sample number #' is changing greatly from year to year, depending on the amplitude measure #' and the underlying data distribution. So use \code{ts} objects or make sure #' that the sample number stays more or less the same over time. #' #' \code{\link{tsMake}} can be used to produce \code{ts} and \code{zoo} objects #' suitable as arguments to this function. #' #' @name phenoAmp #' @aliases phenoAmp,ts-method phenoAmp,zoo-method #' @param x A seasonal time series, or a class \code{zoo} object. #' @param season.range A vector of two numbers specifying the season range to #' be considered. #' @return A matrix of class \code{ts} or \code{zoo} with individual series for #' the range, variance, median absolute deviation, mean, median and -- in the #' case of \code{zoo} objects -- number of samples. #' @seealso \code{\link{phenoPhase}}, \code{\link{tsMake}} #' @references Cloern, J.E. and Jassby, A.D. (2008) Complex seasonal patterns #' of primary producers at the land-sea interface. \emph{Ecology Letters} #' \bold{11,} 1294--1303. #' @keywords manip ts #' @examples #' #' y <- sfbayChla[, "s27"] #' phenoAmp(y) # entire year #' # i.e., Jan-Jun only, which yields results for more years #' phenoAmp(y, c(1, 6)) #' NULL #' Methods for Function phenoPhase #' #' Finds various measures of the phase of the annual cycle. #' #' #' @name phenoPhase-methods #' @docType methods #' @section Methods: \describe{ \item{list("signature(x = \"ts\")")}{See #' \code{\link{phenoPhase,ts-method}}} \item{list("signature(x = #' \"zoo\")")}{See \code{\link{phenoPhase,zoo-method}}} } #' @keywords methods NULL #' Phenological phase #' #' Finds various measures of the phase of the annual cycle, or of some #' specified month range. #' #' \code{phenoPhase} gives three measures of the phasing of a seasonal cycle: #' the time of the maximum (Cloern and Jassby 2008), the \emph{fulcrum} or #' center of gravity, and the weighted mean season (Colebrook 1979). The latter #' has sometimes been referred to in the literature as \dQuote{centre of #' gravity}, but it is not actually the same. These measures differ in their #' sensitivity to changes in the seasonal pattern, and therefore also in their #' susceptibility to sampling variability. The time of maximum is the most #' sensitive, the weighted mean the least. #' #' These measures can be restricted to a subset of the year by giving the #' desired range of seasons. This can be useful for isolating measures of, say, #' the spring and autumn phytoplankton blooms in temperate waters. In the case #' of a seasonal time series, a non-missing value is required for every season #' or the result will be \code{NA}, so using a period shorter than one year can #' also help avoid any seasons that are typically not covered by the sampling #' program. Similarly, in the case of dated observations, a shorter period can #' help avoid times of sparse data. The method for time series allows for other #' than monthly frequencies, but \code{season.range} is always interpreted as #' months for \code{zoo} objects. The method for time series requires data for #' all seasons in \code{season.range}. The method for \code{zoo} objects will #' provide a result regardless of number of sampling days, so make sure that #' data are sufficient for a meaningful result. #' #' The measures are annum-centric, i.e., they reflect the use of calendar year #' as the annum, which may not be appropriate for cases in which important #' features occur in winter and span two calendar years. Such cases can be #' handled by lagging the time series by an appropriate number of months, or by #' subtracting an appropriate number of days from the individual dates. #' #' \code{\link{tsMake}} can be used to produce \code{ts} and \code{zoo} objects #' suitable as arguments to this function. #' #' The default parameters used for the \code{integrate} function in #' \code{phenoPhase} may fail for certain datasets. Try increasing the number #' of subdivisions above its default of 100 by adding, for example, #' \code{subdivisions = 1000} to the arguments of \code{phenoPhase}. #' #' @name phenoPhase #' @aliases phenoPhase,ts-method phenoPhase,zoo-method #' @param x A seasonal time series, or a class \code{zoo} object. #' @param season.range A vector of two numbers specifying the season range to #' be considered. #' @param out The form of the output. #' @param ... Additional arguments to be passed for changing integration #' defaults. #' @return A data frame with columns year, time of the maximum, fulcrum, #' weighted mean time and -- in the case of \code{zoo} objects -- number of #' observations. In the case of seasonal time series, the results are all given #' as decimal seasons of the year. In the case of dated observations, the #' results can be dates, day of the year, or julian day with an origin of #' 1970-01-01, depending on the option \code{out}. #' @seealso \code{\link{phenoAmp}}, \code{\link{tsMake}} #' @references Cloern, J.E. and Jassby, A.D. (2008) Complex seasonal patterns #' of primary producers at the land-sea interface. \emph{Ecology Letters} #' \bold{11,} 1294--1303. #' #' Colebrook, J.M. (1979) Continuous plankton records - seasonal cycles of #' phytoplankton and copepods in the North Atlantic ocean and the North Sea. #' \emph{Marine Biology} \bold{51,} 23--32. #' @keywords manip ts #' @examples #' #' # ts example #' y <- sfbayChla[, "s27"] #' p1 <- phenoPhase(y) #' p1 #' apply(p1, 2, sd, na.rm = TRUE) # max.time > fulcrum > mean.wt #' phenoPhase(y, c(3, 10)) #' #' # zoo example #' sfb <- wqData(sfbay, c(1, 3, 4), 5:12, site.order = TRUE, type = "wide", #' time.format = "%m/%d/%Y") #' y <- tsMake(sfb, focus = "chl", layer = c(0, 5), type = "zoo") #' phenoPhase(y[, "s27"]) #' NULL #' San Francisco Bay water quality data #' #' Selected observations and variables from U.S. Geological Survey water #' quality stations in south San Francisco Bay. Data include \acronym{CTD} and #' nutrient measurements. #' #' The original downloaded dataset was modified by taking a subset of six #' well-sampled stations and the period 1985--2004. Variable names were also #' simplified. The data frames \code{sfbayStns} and \code{sfbayVars} describe #' the stations and water quality variables in more detail; they were created #' from information at the same web site. Note that the station numbers in #' \code{sfbayStns} have been prefixed with \code{s} to make station codes into #' legal variable names. \code{sfbayChla} was constructed from the entire #' downloaded sfbay dataset and encompasses the period 1969--2009. #' #' @name sfbay #' @aliases sfbay sfbayStns sfbayVars sfbayChla #' @docType data #' @format \code{sfbay} is a data frame with 23207 observations (rows) of 12 #' variables (columns): #' #' \tabular{rll}{ \code{[, 1]} \tab \code{date} \tab date\cr \code{[, 2]} \tab #' \code{time} \tab time\cr \code{[, 3]} \tab \code{stn} \tab station code\cr #' \code{[, 4]} \tab \code{depth} \tab measurement depth\cr \code{[, 5]} \tab #' \code{chl} \tab chlorophyll \emph{a}\cr \code{[, 6]} \tab \code{dox.pct} #' \tab dissolved oxygen\cr \code{[, 7]} \tab \code{spm} \tab suspended #' particulate matter\cr \code{[, 8]} \tab \code{ext} \tab extinction #' coefficient\cr \code{[, 9]} \tab \code{sal} \tab salinity\cr \code{[, 10]} #' \tab \code{temp} \tab water temperature\cr \code{[, 11]} \tab \code{nox} #' \tab nitrate + nitrite\cr \code{[, 12]} \tab \code{nhx} \tab ammonium\cr } #' #' \code{sfbayStns} is a data frame with 16 observations of 6 variables: #' #' \tabular{rll}{ \code{[, 1]} \tab \code{site} \tab station code\cr \code{[, #' 2]} \tab \code{description} \tab station description\cr \code{[, 3]} \tab #' \code{lat} \tab latitude\cr \code{[, 4]} \tab \code{long} \tab longitude\cr #' \code{[, 5]} \tab \code{depthMax} \tab maximum depth, in m\cr \code{[, 6]} #' \tab \code{distFrom36} \tab distance from station 36, in km\cr } #' #' \code{sfbayVars} is a data frame with 7 observations of 3 variables: #' #' \tabular{rll}{ \code{[, 1]} \tab \code{variable} \tab water quality variable #' code\cr \code{[, 2]} \tab \code{description} \tab description\cr \code{[, #' 3]} \tab \code{units} \tab measurement units\cr } #' #' \code{sfbayChla} is a time series matrix (380 months \code{x} 16 stations) #' of average 0-5 m chlorophyll \emph{a} concentrations calculated from the #' data in \code{sfbay}. #' @source Downloaded from \url{https://sfbay.wr.usgs.gov/water-quality-database/} on #' 2009-11-17. #' @keywords datasets #' @examples #' #' data(sfbay) #' str(sfbay) #' str(sfbayStns) #' str(sfbayVars) #' plot(sfbayChla[, 1:10], main = "SF Bay Chl-a") #' NULL #' Methods for Function tsMake #' #' Creates a matrix of observations indexed by time. #' #' #' @name tsMake-methods #' @docType methods #' @section Methods: \describe{ \item{list("signature(x = \"WqData\")")}{See #' \code{\link{tsMake,WqData-method}}} } #' @keywords methods NULL #' Create time series from water quality data #' #' Creates a matrix time series object from an object of class \code{"WqData"}, #' either all variables for a single site or all sites for a single variable. #' #' When \code{qprob = NULL}, the function averages all included depths for each #' day, the implicit assumption being that the layer is well-mixed and/or the #' samples are evenly distributed with depth in the layer. If \code{layer = #' "max.depths"}, then only the value at the maximum depth for each time, site #' and variable combination will be used. If no layer is specified, all depths #' will be used. #' #' The function produces a matrix time series of all variables for the #' specified site or all sites for the specified variable. If \code{type = #' "ts.mon"}, available daily data are averaged to produce a monthly time #' series, from which a quarterly or annual series can be created if needed. If #' you want values for the actual dates of observation, then set \code{type = #' "zoo"}. #' #' When \code{qprob} is a number from 0 to 1, it is interpreted as a #' probability and the corresponding quantile is used to aggregate observations #' within the specified layer. So to get the maximum, for example, use qprob = #' 1. If \code{type = "ts.mon"}, the same quantile is used to aggregate all the #' available daily values. #' @name tsMake #' @aliases tsMake,WqData-method #' @export #' @importFrom reshape2 dcast #' @importFrom zoo zoo as.yearmon #' @importFrom stats as.ts #' @param object Object of class \code{"WqData"}. #' @param focus Name of a site or water quality variable. #' @param layer Number specifying a single depth; a numeric vector of length 2 #' specifying top and bottom depths of layer; a list specifying multiple depths #' and/or layers; or just the string \code{"max.depths"}. #' @param type \code{ts.mon} to get a monthly time series, \code{zoo} to get an #' object of class \code{"zoo"} with individual observation dates. #' @param qprob quantile probability, a number between 0 and 1. #' @return A matrix of class \code{"mts"} or \code{"zoo"}. #' @note The layer list is allowed to include negative numbers, which may have #' been used in the \code{WqData} object to denote variables that apply to the #' water column as a whole, such as, say, -1 for light attenuation coefficient. #' This enables \code{focus = 's27'} and \code{layer = list(-1, c(0, 5))} to #' produce a time series matrix for station 27 that includes both attenuation #' coefficient and chlorophyll averaged over the top 5 m. Negative numbers may #' also have been used in the \code{WqData} object to identify qualitative #' depths such as \dQuote{near bottom}, which is not uncommon in historical #' data sets. So data from such depths can be aggregated easily with other data #' to make these time series. #' @seealso \code{\link{WqData-class}} #' @keywords ts #' @examples #' #' # Create new WqData object #' sfb <- wqData(sfbay, c(1, 3:4), 5:12, site.order = TRUE, #' time.format = "%m/%d/%Y", type = "wide") #' #' # Find means in the 0-10 m layer #' y <- tsMake(sfb, focus = "s27", layer = c(0, 10)) #' plot(y, main = "Station 27") #' # Or select medians in the same layer #' y1 <- tsMake(sfb, focus = "s27", layer = c(0, 10), qprob = 0.5) #' plot(y1, main = "Station 27") #' # Compare means:medians #' apply(y / y1, 2, mean, na.rm = TRUE) #' #' # Combine a layer with a single additional depth #' y <- tsMake(sfb, focus = "chl", layer = list(c(0, 2), 5)) #' plot(y, main = "Chlorophyll a, ug/L") #' #' # Use values from the deepest samples #' y <- tsMake(sfb, focus = "dox", layer = "max.depths", type = "zoo") #' head(y) #' plot(y, type = "h", main = "'Bottom' DO, mg/L") #' NULL #' Miscellaneous utility functions #' #' A variety of small utilities used in other functions. #' #' \code{date2decyear}: Converts object of class \code{"Date"} to decimal year #' assuming time of day is noon. #' #' \code{decyear2date}: Converts decimal year to object of class \code{"Date"}. #' #' \code{layerMean}: Acts on a matrix or data frame with depth in the first #' column and observations for different variables (or different sites, or #' different times) in each of the remaining columns. The trapezoidal mean over #' the given depths is calculated for each of the variables. Replicate depths #' are averaged, and missing values or data with only one unique depth are #' handled. Data are not extrapolated to cover missing values at the top or #' bottom of the layer. The result can differ markedly from the simple mean #' even for equal spacing of depths, because the top and bottom values are #' weighted by 0.5 in a trapezoidal mean. #' #' \code{leapYear}: \code{TRUE} if \code{x} is a leap year, \code{FALSE} #' otherwise. #' #' \code{meanSub}: Mean of a subset of a vector. #' #' \code{monthNum}: Converts dates to the corresponding numeric month. #' #' \code{tsSub}: Drops seasons from a matrix or vector time series. #' #' \code{years}: Converts dates to the corresponding numeric years. #' #' @name years #' @param d A numeric matrix or data frame with depth in the first column and #' observations for some variable in each of the remaining columns. #' @param na.rm Should missing data be removed? #' @param seas An integer vector of seasons to be retained. #' @param sub An integer vector. #' @param w A vector of class \code{"Date"}. #' @param x A numeric vector. #' @param x1 A matrix or vector time series. #' @param y A vector of class \code{"Date"} or \code{"POSIX"} date-time. #' @keywords manip #' @export #' @examples #' #' dates <- as.Date(c("1996-01-01", "1999-12-31", "2004-02-29", "2005-03-01")) #' date2decyear(dates) #' #' decyear2date(c(1996.0014, 1999.9986, 2004.1626, 2005.1630)) #' #' z <- c(1, 2, 3, 5, 10) # 5 depths #' x <- matrix(rnorm(30), nrow = 5) # 6 variables at 5 depths #' layerMean(cbind(z, x)) #' #' leapYear(seq(1500, 2000, 100)) #' leapYear(c(1996.9, 1997)) #' #' ## Aggregate monthly time series over Feb-Apr only. #' aggregate(sfbayChla, 1, meanSub, sub = 2:4) #' #' monthNum(as.Date(c("2007-03-17", "2003-06-01"))) #' #' ## Ignore certain seasons in a Seasonal Kendall test. #' c27 <- sfbayChla[, "s27"] #' seaKen(tsSub(c27)) # Aug and Dec missing the most key data #' seaKen(tsSub(c27, seas = c(1:7, 9:11))) #' #' y <- Sys.time() #' years(y) #' NULL #' Class "WqData" #' #' A simple extension or subclass of the \code{"data.frame"} class for typical #' \dQuote{discrete} water quality monitoring programs that examine phenomena #' on a time scale of days or longer. It requires water quality data to be in a #' specific \dQuote{long} format, although a generating function #' \code{\link{wqData}} can be used for different forms of data. #' #' #' @name WqData-class #' @aliases WqData-class [,WqData-method summary,WqData-method #' plot,WqData-method #' @docType class #' @section Objects from the Class: Objects can be created by calls of the form #' \code{new("WqData", d)}, where \code{d} is a data.frame. \code{d} should #' have columns named \code{time, site, depth, variable, value} of class #' \code{"DateTime", "factor", "numeric", "factor", "numeric"}, respectively. #' @seealso \code{\link{DateTime-class}}, \code{\link{tsMake,WqData-method}}, #' \code{\link{wqData}} #' @keywords classes #' @examples #' #' showClass("WqData") #' # Construct the WqData object sfb as shown in the wqData examples. #' sfb <- wqData(sfbay, c(1, 3, 4), 5:12, site.order = TRUE, type = "wide", #' time.format = "%m/%d/%Y") #' # Summarize the data #' summary(sfb) #' # Create boxplot summary of data #' plot(sfb, vars = c("chl", "dox", "spm"), num.col = 2) #' # Extract some of the data as a WqData object #' sfb[1:10, ] # first 10 observations #' sfb[sfb$depth == 20, ] # all observations at 20 m #' NULL #' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_title(\"#1\")}", #' "wql")\Sexpr{tools:::Rd_package_title("wql")} #' #' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_description(\"#1\")}", #' "wql")\Sexpr{tools:::Rd_package_description("wql")} #' #' The main purpose of \pkg{wql} is to explore seasonal time series through #' plots and nonparametric trend tests. It was created originally to examine #' water quality data sets (hence, \dQuote{wql}) but is suitable as a more #' general purpose set of tools for looking at annual or seasonal time series. #' #' One of the more tedious tasks in exploring environmental data sets is #' creating usable time series from the original complex data sets, especially #' when you want many series at will that group data in different ways. So #' \pkg{wql} also provides a way of transforming data sets to a common format #' that then allows a diversity of time series to be created quickly. A few #' functions are specific to the fields of limnology and oceanography. #' #' The plots are designed for easy use, not for publication-quality graphs. #' Nonetheless, extensive customization is possible by passing options through #' \code{\ldots{}}, adding annotations in the case of base graphics, and adding #' layers in the case of \pkg{ggplot2} objects. #' #' Two functions are used mainly for preparing the times series: #' #' \itemize{ \item a function that transforms incoming data to a common data #' structure in the form of the \code{WqData} class \item a function that #' easily prepares time series objects from this class } #' #' The \code{WqData} class can be easily adapted to non-aquatic data. #' Obviously, the \code{depth} field can be used for elevation in atmospheric #' studies. But more generally, the \code{site} and \code{depth} fields can be #' used for many two-way classifications and don't need to refer to spatial #' location. #' #' Some of the time series functions include: #' #' \itemize{ \item a variety of plots to examine changes in seasonal patterns #' \item nonparametric trend tests \item time series interpolation and related #' manipulations \item a simple decomposition of a series into different time #' scales \item phenological analyses \item the use of empirical orthogonal #' functions to detect multiple independent mechanisms underlying temporal #' change } #' #' A few functions are specialized for the aquatic sciences: #' #' \itemize{ \item converting between oxygen concentrations and percent #' saturation \item converting between salinity and conductivity } #' #' The capabilities of \pkg{wql} are more fully explained in the accompanying #' vignette: \dQuote{wql: Exploring environmental monitoring data}. #' #' @name wql-package #' @aliases wql-package wql #' @docType package #' @author #' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_author(\"#1\")}", #' "wql")\Sexpr{tools:::Rd_package_author("wql")} #' #' Maintainer: #' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_maintainer(\"#1\")}", #' "wql")\Sexpr{tools:::Rd_package_maintainer("wql")} #' @keywords package NULL #' Class "zoo" #' #' Registration of S3 class \code{"zoo"} as a formally defined class. Used here #' to allow the \code{"zoo"} class to appear in method signatures. #' #' #' @name zoo-class #' @docType class #' @section Objects from the Class: A virtual Class: No objects may be created #' from it. #' @seealso \code{\link{phenoAmp}}, \code{\link{phenoPhase}} #' @keywords classes #' @examples #' #' showClass("zoo") #' NULL
/scratch/gouwar.j/cran-all/cranData/wql/R/wql-package.R
years <- function(y) as.numeric(format(y, "%Y"))
/scratch/gouwar.j/cran-all/cranData/wql/R/years.R
## ----setup, echo=FALSE-------------------------------------------------------- knitr::opts_chunk$set(fig.align="center", warning=FALSE) ## ---- echo=FALSE, fig.width=6, fig.height=3.7--------------------------------- knitr::include_graphics("wqflow.png", auto_pdf = TRUE) ## ----------------------------------------------------------------------------- library(wql) ## ----eval=FALSE--------------------------------------------------------------- # sfbay <- read.csv("sfbay.csv", header = FALSE, as.is = TRUE, # skip = 2) # names(sfbay) <- c('date', 'time', 'stn', 'depth', 'chl', 'dox', # 'spm', 'ext', 'sal', 'temp', 'nox', 'nhx') # sfbay <- subset(sfbay, stn %in% c(21, 24, 27, 30, 32, 36) & # substring(date, 7, 10) %in% 1985:2004) ## ----------------------------------------------------------------------------- head(sfbay) ## ----------------------------------------------------------------------------- x <- sample(seq_len(nrow(sfbay)), 10) sfbay[x, "dox"] sfbay1 <- transform(sfbay, dox = round(100 * dox/oxySol(sal, temp), 1)) sfbay1[x, "dox"] ## ----------------------------------------------------------------------------- sfb <- wqData(sfbay, c(1, 3:4), 5:12, site.order = TRUE, type = "wide", time.format = "%m/%d/%Y") head(sfb) ## ----------------------------------------------------------------------------- summary(sfb) ## ---- fig.width=5, fig.height=3.1--------------------------------------------- plot(sfb, vars = c('dox', 'temp'), num.col = 2) ## ----------------------------------------------------------------------------- y <- tsMake(sfb, focus = "chl", layer = c(0, 5)) y[1:4, ] tsp(y) ## ---- width=5, height=8------------------------------------------------------- plotTs(y[, 1:4], dot.size = 1.3, ylab = "Chlorophyll in San Francisco Bay", strip.labels = paste("Station", 21:24), ncol = 1, scales = "free_y") ## ----------------------------------------------------------------------------- head(tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo')) ## ----------------------------------------------------------------------------- chl27 <- sfbayChla[, 's27'] tsp(chl27) chl27 <- round(chl27, 1) head(ts2df(chl27)) ## ----------------------------------------------------------------------------- y <- window(sfbayChla, start = 2005, end = c(2009, 12)) # 5 years, 16 sites round(mts2ts(y, seas = 2:4), 1) # focus on Feb-Apr spring bloom ## ----------------------------------------------------------------------------- chl27 <- sfbayChla[, "s27"] chl27a <- interpTs(chl27, gap = 3) ## ----fig.height=3.7, fig.width=6---------------------------------------------- plot(chl27a, col = "red", lwd = .5, xlab = "") lines(chl27, col = "blue", lwd = 1.5) ## ----------------------------------------------------------------------------- mannKen(Nile) ## ---- fig.height=3.7, fig.width=6--------------------------------------------- plot(Nile, ylab = "Flow", xlab = "") abline(v=1898, col='blue') pett(Nile) ## ----------------------------------------------------------------------------- y <- ts.intersect(Nile, LakeHuron) pett(y) ## ----------------------------------------------------------------------------- y <- sfbayChla y1 <- tsSub(y, seas = 2:4) # focus on Feb-Apr spring bloom y2 <- aggregate(y1, 1, mean, na.rm = FALSE) signif(mannKen(y2), 3) ## ----------------------------------------------------------------------------- chl27 <- sfbayChla[, "s27"] seaKen(chl27) ## ----------------------------------------------------------------------------- seaRoll(chl27, w = 10) ## ---- fig.height=8, fig.width=7----------------------------------------------- x <- sfbayChla seasonTrend(x, plot = TRUE, ncol = 2, scales = 'free_y') ## ----------------------------------------------------------------------------- x <- sfbayChla[, 's27'] trendHomog(x) ## ----------------------------------------------------------------------------- chl <- sfbayChla[, 1:12] # first 12 stns have good data coverage seaKen(mts2ts(chl, 2:4)) # regional trend in spring bloom ## ---- fig.height=3.7, fig.width=6--------------------------------------------- chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) chla1 <- chla1[, 1:12] eofNum(chla1) ## ----------------------------------------------------------------------------- e1 <- eof(chla1, n = 1) e1 ## ---- fig.height=3.1, fig.width=5--------------------------------------------- eofPlot(e1, type = "amp") ## ----------------------------------------------------------------------------- chl27b <- interpTs(sfbayChla[, "s27"], gap = 3) chl27b <- ts2df(chl27b, mon1 = 10, addYr = TRUE, omit = TRUE) head(round(chl27b, 1)) ## ---- fig.height=3.1, fig.width=5--------------------------------------------- e2 <- eof(chl27b, n = 2, scale. = TRUE) eofPlot(e2, type = "coef") ## ---- fig.height=6, fig.width=6----------------------------------------------- chl27 <- sfbayChla[, "s27"] d1 <- decompTs(chl27) plot(d1, nc = 1, main = "Station 27 Chl-a decomposition") ## ---- fig.height=4.3, fig.width=7--------------------------------------------- plotSeason(chl27, num.era = 3, same.plot = FALSE, ylab = 'Stn 27 Chl-a') ## ---- fig.height=4.3, fig.width=7--------------------------------------------- plotSeason(chl27, num.era = 3, same.plot = TRUE, ylab = 'Stn 27 Chl-a') ## ---- fig.height=4.3, fig.width=7--------------------------------------------- plotSeason(chl27, "by.month", ylab = 'Stn 27 Chl-a') ## ----------------------------------------------------------------------------- chl27 <- sfbayChla[, 's27'] p1 <- phenoPhase(chl27) head(p1) p2 <- phenoPhase(chl27, c(1, 6)) head(p2) p3 <- phenoAmp(chl27, c(1, 6)) head(p3) ## ----------------------------------------------------------------------------- zchl <- tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo') head(zchl) zchl27 <- zchl[, 3] head(phenoPhase(zchl27)) head(phenoPhase(zchl27, c(1, 6), out = 'doy')) head(phenoPhase(zchl27, c(1, 6), out = 'julian')) ## ---- fig.height=3.7, fig.width=6--------------------------------------------- chl <- aggregate(sfbayChla[, 1:6], 1, meanSub, 2:4, na.rm = TRUE) plotTsAnom(chl, ylab = 'Chlorophyll-a', strip.labels = paste('Station', substring(colnames(chl), 2, 3))) ## ---- fig.height=4.3, fig.width=7--------------------------------------------- chl27 <- sfbayChla[, "s27"] plotTsTile(chl27)
/scratch/gouwar.j/cran-all/cranData/wql/inst/doc/wql-package.R
--- title: "wql: Exploring water quality monitoring data" author: Alan D. Jassby and James E. Cloern date: "`r format(as.Date('2016-05-18 00:00:01 EDT'), '%d %B, %Y')`" output: html_document: highlight: textmate keep_md: no number_sections: yes theme: united toc: yes toc_float: yes bibliography: wq.bib vignette: > \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{wql: Exploring water quality monitoring data} --- ```{r setup, echo=FALSE} knitr::opts_chunk$set(fig.align="center", warning=FALSE) ``` *Edited by Jemma Stachelek: `r format(Sys.time(), '%d %B, %Y')`* # Introduction This package contains functions to assist in the processing and exploration of data from monitoring programs for aquatic ecosystems. The name *wq* stands for *w*ater *q*uality. Although our own interest is in aquatic ecology, almost all of the functions should be useful for time series analysis regardless of the subject matter. The package is intended for programs that sample approximately monthly at discrete stations, a feature of many legacy data sets. ```{r, echo=FALSE, fig.width=6, fig.height=3.7 } knitr::include_graphics("wqflow.png", auto_pdf = TRUE) ``` The functions are summarized in the diagram above, which illustrates a typical sequence of analysis that could be facilitated by the package when the data are in a data frame or time series. The functions associated with each step of the sequence are listed below their corresponding step. First, we might want to **derive** additional variables of interest. A few functions are provided here for variables common to water monitoring data. Next, we **generate** time series from the data in a two-stage process: the `data.frame` is first converted into a standardized form (with `wqData`) and then another function (`tsMake`) is applied to this new data object to generate the series. This two-stage process is not necessary, and -- as implied in the diagram -- you can skip it by using time series that already exist or that you construct in another way. But it has advantages when you're constructing many different kinds of series from a data set, especially one that is unbalanced with respect to place and time. There are also a few special methods available to **summarize** this new data object. Next, we may need to **reshape** the time series in various ways for further analysis, perhaps also imputing missing values. Finally, we **analyze** the data to extract patterns using special plots, trend tests, and other approaches. We illustrate some of the steps in the diagram using the accompanying data set `sfbay`. ```{r} library(wql) ``` # Preparing data from an external file Our starting point is a comma-delimited file downloaded on 2009-11-17 from the U.S. Geological Survey's water quality data set for [San Francisco Bay](https://sfbay.wr.usgs.gov/water-quality-database/ "USGS: Water Quality of San Francisco Bay"). The downloaded file, `sfbay.csv`, starts with a row of variable names followed by a row of units, so the first two lines are skipped during import and simpler variable names are substituted for the originals. Also, only a subset of stations and years is used in order to keep `sfbay.csv` small: ```{r eval=FALSE} sfbay <- read.csv("sfbay.csv", header = FALSE, as.is = TRUE, skip = 2) names(sfbay) <- c('date', 'time', 'stn', 'depth', 'chl', 'dox', 'spm', 'ext', 'sal', 'temp', 'nox', 'nhx') sfbay <- subset(sfbay, stn %in% c(21, 24, 27, 30, 32, 36) & substring(date, 7, 10) %in% 1985:2004) ``` The resulting data frame `sfbay` is provided as part of the package, and its contents are explained in the accompanying help file. ```{r} head(sfbay) ``` The next step is to add any necessary derived variables to the data frame. An initial data set will sometimes contain conductivity rather than salinity data, and we might want to use `ec2pss` to derive the latter. That's not the case here, but let's assume that we want dissolved oxygen as percent saturation rather than in concentration units. Using `oxySol` and the convention of expressing percent saturation with respect to surface pressure: ```{r} x <- sample(seq_len(nrow(sfbay)), 10) sfbay[x, "dox"] sfbay1 <- transform(sfbay, dox = round(100 * dox/oxySol(sal, temp), 1)) sfbay1[x, "dox"] ``` # The `WqData` class We define a standardized format for water quality data by creating a formal (`S4`) class, the `WqData` class, that enforces the standards, and an accompanying generating function `wqData` (note lower-case w). This generating function constructs a `WqData` object from a data frame. The `WqData` object is just a restricted version of a `data.frame` that requires specific column names and classes. We decided to accommodate two types of sampling time, namely, the date either with or without the time of day. The former are converted to the `POSIXct` class and the latter to the `Date` class. A special class `DateTime` is created, which is the union of these two time classes. Classes that combine date and time of day require an additional level of care with respect to time zone [@grothendieck2004]. Surface location is specified by a `site` code, as the intention is to handle discrete monitoring programs as opposed to continuous transects. Latitude-longitude and distances from a fixed point are implicit in the `site` code and can be recorded in a separate table (see `sfbayVars`). The `depth` is specified separately as a number. Other information that may not be depth-specific, such as the mean vertical extinction coefficient in the near-surface layer, can be coded by a negative depth number. The last two fields in the data portion of a `WqData` object are the `variable` code and the `value`. The variables are given as character strings and the values as numbers. As in the case of the sampling site, additional information related to the variable code can be maintained in a separate table (see `sfbayVars`). Like all `S4` classes, `WqData` has a generating function called `new` automatically created along with the class. This function, however, requires that its data frame argument already have a fairly restricted form of structure. In order to decrease the manipulation required of the imported data, a separate, less restrictive generating function called `wqData` is available. This function is more forgiving of field names and classes and does a few other cleanup tasks with the data before calling `new`. Perhaps most useful, it converts data from a *wide* format with one field per variable into the *long* format used by the `WqData` class. For example, `sfbay` can be converted to a `WqData` object with a single command: ```{r} sfb <- wqData(sfbay, c(1, 3:4), 5:12, site.order = TRUE, type = "wide", time.format = "%m/%d/%Y") head(sfb) ``` There is a `summary` method for this class that tabulates the number of observations by site and variable, as well as the mean and quartiles for individual variables: ```{r} summary(sfb) ``` Plotting a `WqData` object produces a plot for each variable specified, each plot containing a boxplot of the values for each site. If no variables are specified, then the first 10 will be plotted. ```{r, fig.width=5, fig.height=3.1} plot(sfb, vars = c('dox', 'temp'), num.col = 2) ``` Apart from `summary` and `plot`, one can subset a `WqData` object with the `[` operator. All other existing methods for data frames will produce an object of class `data.frame` rather than one of class `WqData`. # Generating time series Historical water quality data are often suitable for analyzing as monthly time series, which permits the use of many existing time series functions. `tsMake` is a function for `WqData` objects that creates monthly time series for all variables at a single site or for a single variable at all sites, when the option `type = "ts.mon"`. If the quantile probability `qprob = NULL`, all replicates are first averaged and then the mean is found for the depth layers of interest. Otherwise the respective quantile will be used both to aggregate depths for each day and to aggregate days for each month. If no layers are specified, all depths will be used. If `layer = "max.depths"`, the time series will be values of the deepest sample for each time, site and variable. The `layer` argument allows for flexibility in specifying depths, including a list of layers and negative depths used as codes for, say, *near botton* or *entire water column*. ```{r} y <- tsMake(sfb, focus = "chl", layer = c(0, 5)) y[1:4, ] tsp(y) ``` The function `plotTs` is convenient for a quick look at the series. Lines join only adjacent data; otherwise, data are isolated dots. ```{r, width=5, height=8} plotTs(y[, 1:4], dot.size = 1.3, ylab = "Chlorophyll in San Francisco Bay", strip.labels = paste("Station", 21:24), ncol = 1, scales = "free_y") ``` If the option `type = "zoo"`, then `tsMake` produces an object of class `zoo` containing values by date of observation, rather than a monthly time series. ```{r} head(tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo')) ``` # Reshaping {#reshape} There are several functions for further reshaping of time series, preparing them for use in specific analyses. `ts2df` converts a monthly time series vector to a year $\times$ month data frame. Leading and trailing empty rows are removed, additional rows with missing data are optionally removed, and the data frame can be reconfigured to represent a local *water year*: ```{r} chl27 <- sfbayChla[, 's27'] tsp(chl27) chl27 <- round(chl27, 1) head(ts2df(chl27)) ``` Another example of its use is shown in [Empirical Orthogonal Functions](#eof) below. A similar reshaping function is `mts2ts`, which converts a matrix time series to a vector time series for various analyses. It first aggregates the multivariate matrix time series by year, then converts it to a vector time series in which the *seasons* correspond to these annnualized values for the original variables. The `seas` parameter enables focusing the subsequent analysis on seasons of special interest, or to ignore seasons where there are too many missing data. The function can be used in conjunction with `seaKen` to conduct a Regional Kendall trend analysis, as described in [Trends](#trends) below: ```{r} y <- window(sfbayChla, start = 2005, end = c(2009, 12)) # 5 years, 16 sites round(mts2ts(y, seas = 2:4), 1) # focus on Feb-Apr spring bloom ``` Some functions (e.g., `eof`) do not permit `NA`s and some kind of data imputation or omission will usually be required. The function `interpTs` is handy for interpolating small data gaps. It can also be used for filling in larger gaps with long-term or seasonal means or medians. Here, we use it to bridge gaps of up to three months. ```{r} chl27 <- sfbayChla[, "s27"] chl27a <- interpTs(chl27, gap = 3) ``` The interpolated series is then plotted in red and the original series overplotted below. ```{r fig.height=3.7, fig.width=6} plot(chl27a, col = "red", lwd = .5, xlab = "") lines(chl27, col = "blue", lwd = 1.5) ``` # Analyzing ## Trends {#trends} The function `mannKen` does a Mann-Kendall test of trend on a time series and provides the corresponding nonparametric slope estimate. Because of serial correlation for most monthly time series, the significance of such a trend is often overstated and `mannKen` is better suited for annual series, such as this one for Nile River flow: ```{r} mannKen(Nile) ``` The negative trend in Nile River flow identified by `mannKen` is due largely to a shift in the late 19th century. The Pettitt test, which has a similar basis to the Mann-Kendall test [@pettitt1979], provides a nonparametric estimate of the change-point. The shift happened in 1898--99 and coincides with the beginning of construction of the Lower Aswan Dam. <<fig=TRUE, echo=FALSE, width=6.5, height=4>>= plot(Nile, ylab = "Flow", xlab = "") abline(v=1898, col='blue') @ \caption{Nile River flow at Aswan, 1871--1970.} \label{fig:nile} \end{center} \end{figure} ```{r, fig.height=3.7, fig.width=6} plot(Nile, ylab = "Flow", xlab = "") abline(v=1898, col='blue') pett(Nile) ``` `pett` can also be used with a matrix: ```{r} y <- ts.intersect(Nile, LakeHuron) pett(y) ``` Both `mannKen` and `pett` can also handle matrices or data frames, with options for plotting trends in the original units per year or divided by the median for the series. The first option is suitable when time series are all in the same units, such as chlorophyll-*a* measurements from different stations. The second makes sense with variables of different units but is not suitable for variables that can span zero (e.g., sea level, or temperature in $^\circ$C) or that have a zero median. Plotted variables can be ordered by the size of their trends, statistical significance is mapped to point shape, and trends based on excessive missing data are omitted. When aggregating monthly series to produce an annual series for trend testing, there is a utility function `tsSub` that allows subsetting the months beforehand (`meanSub` is actually more efficient when aggregation is the goal). It can be useful for avoiding months with many missing data, or to focus attention on a particular time of year: ```{r} y <- sfbayChla y1 <- tsSub(y, seas = 2:4) # focus on Feb-Apr spring bloom y2 <- aggregate(y1, 1, mean, na.rm = FALSE) signif(mannKen(y2), 3) ``` A main role for `mannKen` in this package is as a support function for the Seasonal Kendall test of trend [@hirsch1982]. The Seasonal Kendall test combines information about trends for individual months (or some other subdivision of the year such as quarters) and produces an overall test of trend for a series. `mannKen` collects certain information on the pattern of missing data that is then used to determine if a Seasonal Kendall test is warranted. In particular, there is an option to report a result only if more than half the seasons are each missing less than half the possible comparisons between the first and last 20\% of the years [@schertz1991]: ```{r} chl27 <- sfbayChla[, "s27"] seaKen(chl27) ``` An important role, in turn, for `seaKen` in this package is as a support function for `seaRoll`, which applies the Seasonal Kendall test to a rolling window of years, such as a decadal window. There is an option to plot the results of `seaRoll`. `seaKen` is subject to distortion by correlation among months, but the relatively small number of years per window in typical use does not allow for an accurate correction: ```{r} seaRoll(chl27, w = 10) ``` The Seasonal Kendall test is not informative when trends for different months differ in sign. The function `seasonTrend` enables visualization of individual monthly trends and can be helpful for, among other things, deciding on the appropriateness of the Seasonal Kendall test. The Sen slopes are shown along with an indication, using bar colour, of the Mann-Kendall test of significance. The bar is omitted if the proportion of missing values in the first and last fifths of the data is less than 0.5. ```{r, fig.height=8, fig.width=7} x <- sfbayChla seasonTrend(x, plot = TRUE, ncol = 2, scales = 'free_y') ``` The function `trendHomog` can also be used to test directly for the homogeneity of seasonal trends [@belle1984]: ```{r} x <- sfbayChla[, 's27'] trendHomog(x) ``` A Regional Kendall test is similar to a Seasonal Kendall test, with annual data for multiple sites instead of annual data for multiple seasons [@helsel2006a]. The function `mts2ts` ([Reshaping](#reshape)) facilitates transforming an annual matrix time series into the required vector time series for `seaKen`, with stations playing the role of seasons. As with seasons, correlation among sites can inflate the apparent statistical significance, so the test is best used with stations from different subregions that are not too closely related, unlike the following example: ```{r} chl <- sfbayChla[, 1:12] # first 12 stns have good data coverage seaKen(mts2ts(chl, 2:4)) # regional trend in spring bloom ``` ## Empirical Orthogonal Functions {#eof} Empirical Orthogonal Function (EOF) analysis is a term used primarily in the earth sciences for principal component analysis applied to simultaneous time series at different spatial locations. @hannachi2007 provide a comprehensive summary. The function `eof` in this package, based on `prcomp` and `varimax` in the `stats` package, optionally scales the time series and applies a rotation to the EOFs. `eof` requires an estimate of the number of EOFs to retain for rotation. `eofNum` provides a guide to this number by plotting the eigenvalues and their confidence intervals in a *scree* plot. Here, we apply `eofNum` to annualized San Francisco Bay chlorophyll data and retain the stations with no missing data, namely, the first 12 stations. ```{r, fig.height=3.7, fig.width=6} chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) chla1 <- chla1[, 1:12] eofNum(chla1) ``` These stations have similar coefficients for the first EOF and appear to act as one with respect to chlorophyll variability on the annual scale. It suggests that further exploration of the interannual variability of these stations can be simplified by using a single time series, namely, the first EOF. ```{r} e1 <- eof(chla1, n = 1) e1 ``` The function `eofPlot` produces a graph of either the EOFs or their accompanying time series. In this case, with `n = 1`, there is only one plot for each such graph. ```{r, fig.height=3.1, fig.width=5} eofPlot(e1, type = "amp") ``` Principal component analysis can also be useful in studying the way different seasonal *modes* of variability contribute to overall year-to-year variability of a single time series \citep{jassby1999a}. The basic approach is to consider each month as determining a separate annual time series and then to calculate the eigenvalues for the resulting $12 \times n$ years time series matrix. The function `ts2df` is useful for expressing a monthly time series in the form needed by `eof`. For example, the following code converts the monthly chlorophyll time series for Station 27 in San Francisco Bay to the appropriate data frame with October, the first month of the local *water year*, in the first column, and years with missing data omitted: ```{r} chl27b <- interpTs(sfbayChla[, "s27"], gap = 3) chl27b <- ts2df(chl27b, mon1 = 10, addYr = TRUE, omit = TRUE) head(round(chl27b, 1)) ``` The following example plots the EOFs from an analysis of this month $\times$ year data frame for Station 27 chlorophyll after scaling the data. `eofNum` (not shown) suggested retaining up to two EOFs. The resulting rotated EOFs imply two separate modes of variability for further exploration, the first operating during May-Sep and the other during Nov-Jan: ```{r, fig.height=3.1, fig.width=5} e2 <- eof(chl27b, n = 2, scale. = TRUE) eofPlot(e2, type = "coef") ``` ## Time series decomposition An analysis of chlorophyll-*a* time series from many coastal and estuarine sites around the world demonstrates that the standard deviation of chlorophyll is approximately proportional to the mean, both among and within sites, as well as at different time scales [@cloern2010]. One consequence is that these monthly time series are well described by a multiplicative seasonal model: $c_{ij} = C y_i m_j \epsilon_{ij}$, where $c_{ij}$ is chlorophyll concentration in year $i$ and month $j$; $C$ is the long-term mean; $y_i$ is the annual effect; $m_j$ is the mean seasonal (in this case monthly) effect; and $\epsilon_{ij}$ is the residual series, which we sometimes refer to as the *events* component. The annual effect is simply the annual mean divided by the long-term mean: $y_{i} = Y_{i}/C$, where $Y_{i} = (1/12) \sum_{j=1}^{12}c_{ij}$. The mean monthly effect is given by $m_{j}=(1/N) \sum_{i=1}^{N} M_{ij}/(C y_{i})$, where $M_{ij}$ is the value for month $j$ in year $i$, and $N$ is the total number of years. The events component is then obtained by $\epsilon_{ij}=c_{ij}/C y_{i} m_{j}$. This simple approach is motivated partly by the observation that many important events for estuaries (e.g., persistent dry periods, species invasions) start or stop suddenly. Smoothing to extract the annualized term, which can disguise the timing of these events and make analysis of them unnecessarily difficult, is not used. The `decompTs` listed here accomplishes this multiplicative decomposition (an option allows additive decomposition as an alternative). The median rather than the mean can be used in the operations described above, and the median is, in fact, the default for the function. Large, isolated events are common in environmental time series, especially from the ocean or ocean-influenced habitats such as certain types of estuary. The median leads to a more informative decomposition in these cases. `decompTs` requires input of a time series matrix in which the columns are monthly time series. It allows missing data, but it is up to the user to decide how many data are sufficient and if the pattern of missing data will lead to bias in the results. If so, it would be advisable to eliminate problem years beforehand by setting all month values to `NA` for those years. There are two cases of interest here: one in which the seasonal effect is held constant from year to year, and another in which it is allowed to vary by not distinguishing a separate events component. The choice is made by setting `event = TRUE` or `event = FALSE`, respectively, in the input. The output of this function is a matrix time series containing the original time series and its multiplicative model components, except for the long-term median or mean. The average seasonal pattern may not resemble observed seasonality in a given year. Patterns that are highly variable from year to year will result in an average seasonal pattern of relatively low amplitude (i.e., low range of monthly values) compared to the amplitudes in individual years. An average seasonal pattern with high amplitude therefore indicates both high amplitude and a recurring pattern for individual years. The default time series `plot` again provides a quick illustration of the result. ```{r, fig.height=6, fig.width=6} chl27 <- sfbayChla[, "s27"] d1 <- decompTs(chl27) plot(d1, nc = 1, main = "Station 27 Chl-a decomposition") ``` The average seasonal pattern does not provide any information about potential secular trends in the pattern. A solution is to apply the decomposition to a moving time window. The window should be big enough to yield a meaningful average of interannual variability but short enough to allow a trend to manifest. This may be different for different systems, but a decadal window can be used as a starting point. A more convenient way to examine changing seasonality is with the dedicated function `plotSeason`. It divides the time period into intervals and plots a composite of the seasonal pattern in each interval. The intervals can be specifed by a single number -- the number of equal-length intervals -- or by a vector listing the breaks between intervals. The function also warns of months that may not be represented by enough data by colouring them red. `plotSeason` is an easy way to decide on the value for the `event` option in `decompTs`. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, num.era = 3, same.plot = FALSE, ylab = 'Stn 27 Chl-a') ``` The same boxplots can also be combined in one plot, with boxplots for the same month grouped together. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, num.era = 3, same.plot = TRUE, ylab = 'Stn 27 Chl-a') ``` `plotSeason` also has an option to plot all individual months separately as standardized anomalies for the entire record. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, "by.month", ylab = 'Stn 27 Chl-a') ``` With all types of seasonal plots, it is often helpful to adjust the device aspect ratio and size manually to get the clearest information. ## Phenological parameters `phenoPhase` and `phenoAmp` act on monthly time series or dated observations (`zoo` objects) and produce measures of the phase and amplitude, respectively, for each year. `phenoPhase` finds the month containing the maximum value, the *fulcrum* or center of gravity, and the weighted mean month. `phenoAmp` finds the range, the range divided by mean, and the coefficient of variation. Both functions can be confined to only part of the year, for example, the months containing the spring phytoplankton bloom. This feature can also be used to avoid months with chronic missing-data problems. Illustrating once again with chlorophyll observations from Station 27 in San Francisco Bay: ```{r} chl27 <- sfbayChla[, 's27'] p1 <- phenoPhase(chl27) head(p1) p2 <- phenoPhase(chl27, c(1, 6)) head(p2) p3 <- phenoAmp(chl27, c(1, 6)) head(p3) ``` Using the actual dated observations: ```{r} zchl <- tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo') head(zchl) zchl27 <- zchl[, 3] head(phenoPhase(zchl27)) head(phenoPhase(zchl27, c(1, 6), out = 'doy')) head(phenoPhase(zchl27, c(1, 6), out = 'julian')) ``` ## Miscellaneous plotting functions `plotTsAnom` plots (unstandardized) departures of vector or matrix time series from their long-term mean and can be a useful way of examining trends in annualized data. ```{r, fig.height=3.7, fig.width=6} chl <- aggregate(sfbayChla[, 1:6], 1, meanSub, 2:4, na.rm = TRUE) plotTsAnom(chl, ylab = 'Chlorophyll-a', strip.labels = paste('Station', substring(colnames(chl), 2, 3))) ``` `plotTsTile` plots a monthly time series as a month $\times$ year grid of tiles, with color representing magnitude. The data can be binned in either of two ways. The first is simply by deciles. The second, which is intended for log-anomaly data, is by four categories: Positive numbers higher or lower than the mean positive value, and negative numbers higher or lower than the mean negative value. In this version of `plotTsTile`, the anomalies are calculated with respect to the overall mean month. ```{r, fig.height=4.3, fig.width=7} chl27 <- sfbayChla[, "s27"] plotTsTile(chl27) ``` This plot shows clearly the change in chlorophyll magnitude after 1999. # References
/scratch/gouwar.j/cran-all/cranData/wql/inst/doc/wql-package.Rmd
--- title: "wql: Exploring water quality monitoring data" author: Alan D. Jassby and James E. Cloern date: "`r format(as.Date('2016-05-18 00:00:01 EDT'), '%d %B, %Y')`" output: html_document: highlight: textmate keep_md: no number_sections: yes theme: united toc: yes toc_float: yes bibliography: wq.bib vignette: > \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{wql: Exploring water quality monitoring data} --- ```{r setup, echo=FALSE} knitr::opts_chunk$set(fig.align="center", warning=FALSE) ``` *Edited by Jemma Stachelek: `r format(Sys.time(), '%d %B, %Y')`* # Introduction This package contains functions to assist in the processing and exploration of data from monitoring programs for aquatic ecosystems. The name *wq* stands for *w*ater *q*uality. Although our own interest is in aquatic ecology, almost all of the functions should be useful for time series analysis regardless of the subject matter. The package is intended for programs that sample approximately monthly at discrete stations, a feature of many legacy data sets. ```{r, echo=FALSE, fig.width=6, fig.height=3.7 } knitr::include_graphics("wqflow.png", auto_pdf = TRUE) ``` The functions are summarized in the diagram above, which illustrates a typical sequence of analysis that could be facilitated by the package when the data are in a data frame or time series. The functions associated with each step of the sequence are listed below their corresponding step. First, we might want to **derive** additional variables of interest. A few functions are provided here for variables common to water monitoring data. Next, we **generate** time series from the data in a two-stage process: the `data.frame` is first converted into a standardized form (with `wqData`) and then another function (`tsMake`) is applied to this new data object to generate the series. This two-stage process is not necessary, and -- as implied in the diagram -- you can skip it by using time series that already exist or that you construct in another way. But it has advantages when you're constructing many different kinds of series from a data set, especially one that is unbalanced with respect to place and time. There are also a few special methods available to **summarize** this new data object. Next, we may need to **reshape** the time series in various ways for further analysis, perhaps also imputing missing values. Finally, we **analyze** the data to extract patterns using special plots, trend tests, and other approaches. We illustrate some of the steps in the diagram using the accompanying data set `sfbay`. ```{r} library(wql) ``` # Preparing data from an external file Our starting point is a comma-delimited file downloaded on 2009-11-17 from the U.S. Geological Survey's water quality data set for [San Francisco Bay](https://sfbay.wr.usgs.gov/water-quality-database/ "USGS: Water Quality of San Francisco Bay"). The downloaded file, `sfbay.csv`, starts with a row of variable names followed by a row of units, so the first two lines are skipped during import and simpler variable names are substituted for the originals. Also, only a subset of stations and years is used in order to keep `sfbay.csv` small: ```{r eval=FALSE} sfbay <- read.csv("sfbay.csv", header = FALSE, as.is = TRUE, skip = 2) names(sfbay) <- c('date', 'time', 'stn', 'depth', 'chl', 'dox', 'spm', 'ext', 'sal', 'temp', 'nox', 'nhx') sfbay <- subset(sfbay, stn %in% c(21, 24, 27, 30, 32, 36) & substring(date, 7, 10) %in% 1985:2004) ``` The resulting data frame `sfbay` is provided as part of the package, and its contents are explained in the accompanying help file. ```{r} head(sfbay) ``` The next step is to add any necessary derived variables to the data frame. An initial data set will sometimes contain conductivity rather than salinity data, and we might want to use `ec2pss` to derive the latter. That's not the case here, but let's assume that we want dissolved oxygen as percent saturation rather than in concentration units. Using `oxySol` and the convention of expressing percent saturation with respect to surface pressure: ```{r} x <- sample(seq_len(nrow(sfbay)), 10) sfbay[x, "dox"] sfbay1 <- transform(sfbay, dox = round(100 * dox/oxySol(sal, temp), 1)) sfbay1[x, "dox"] ``` # The `WqData` class We define a standardized format for water quality data by creating a formal (`S4`) class, the `WqData` class, that enforces the standards, and an accompanying generating function `wqData` (note lower-case w). This generating function constructs a `WqData` object from a data frame. The `WqData` object is just a restricted version of a `data.frame` that requires specific column names and classes. We decided to accommodate two types of sampling time, namely, the date either with or without the time of day. The former are converted to the `POSIXct` class and the latter to the `Date` class. A special class `DateTime` is created, which is the union of these two time classes. Classes that combine date and time of day require an additional level of care with respect to time zone [@grothendieck2004]. Surface location is specified by a `site` code, as the intention is to handle discrete monitoring programs as opposed to continuous transects. Latitude-longitude and distances from a fixed point are implicit in the `site` code and can be recorded in a separate table (see `sfbayVars`). The `depth` is specified separately as a number. Other information that may not be depth-specific, such as the mean vertical extinction coefficient in the near-surface layer, can be coded by a negative depth number. The last two fields in the data portion of a `WqData` object are the `variable` code and the `value`. The variables are given as character strings and the values as numbers. As in the case of the sampling site, additional information related to the variable code can be maintained in a separate table (see `sfbayVars`). Like all `S4` classes, `WqData` has a generating function called `new` automatically created along with the class. This function, however, requires that its data frame argument already have a fairly restricted form of structure. In order to decrease the manipulation required of the imported data, a separate, less restrictive generating function called `wqData` is available. This function is more forgiving of field names and classes and does a few other cleanup tasks with the data before calling `new`. Perhaps most useful, it converts data from a *wide* format with one field per variable into the *long* format used by the `WqData` class. For example, `sfbay` can be converted to a `WqData` object with a single command: ```{r} sfb <- wqData(sfbay, c(1, 3:4), 5:12, site.order = TRUE, type = "wide", time.format = "%m/%d/%Y") head(sfb) ``` There is a `summary` method for this class that tabulates the number of observations by site and variable, as well as the mean and quartiles for individual variables: ```{r} summary(sfb) ``` Plotting a `WqData` object produces a plot for each variable specified, each plot containing a boxplot of the values for each site. If no variables are specified, then the first 10 will be plotted. ```{r, fig.width=5, fig.height=3.1} plot(sfb, vars = c('dox', 'temp'), num.col = 2) ``` Apart from `summary` and `plot`, one can subset a `WqData` object with the `[` operator. All other existing methods for data frames will produce an object of class `data.frame` rather than one of class `WqData`. # Generating time series Historical water quality data are often suitable for analyzing as monthly time series, which permits the use of many existing time series functions. `tsMake` is a function for `WqData` objects that creates monthly time series for all variables at a single site or for a single variable at all sites, when the option `type = "ts.mon"`. If the quantile probability `qprob = NULL`, all replicates are first averaged and then the mean is found for the depth layers of interest. Otherwise the respective quantile will be used both to aggregate depths for each day and to aggregate days for each month. If no layers are specified, all depths will be used. If `layer = "max.depths"`, the time series will be values of the deepest sample for each time, site and variable. The `layer` argument allows for flexibility in specifying depths, including a list of layers and negative depths used as codes for, say, *near botton* or *entire water column*. ```{r} y <- tsMake(sfb, focus = "chl", layer = c(0, 5)) y[1:4, ] tsp(y) ``` The function `plotTs` is convenient for a quick look at the series. Lines join only adjacent data; otherwise, data are isolated dots. ```{r, width=5, height=8} plotTs(y[, 1:4], dot.size = 1.3, ylab = "Chlorophyll in San Francisco Bay", strip.labels = paste("Station", 21:24), ncol = 1, scales = "free_y") ``` If the option `type = "zoo"`, then `tsMake` produces an object of class `zoo` containing values by date of observation, rather than a monthly time series. ```{r} head(tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo')) ``` # Reshaping {#reshape} There are several functions for further reshaping of time series, preparing them for use in specific analyses. `ts2df` converts a monthly time series vector to a year $\times$ month data frame. Leading and trailing empty rows are removed, additional rows with missing data are optionally removed, and the data frame can be reconfigured to represent a local *water year*: ```{r} chl27 <- sfbayChla[, 's27'] tsp(chl27) chl27 <- round(chl27, 1) head(ts2df(chl27)) ``` Another example of its use is shown in [Empirical Orthogonal Functions](#eof) below. A similar reshaping function is `mts2ts`, which converts a matrix time series to a vector time series for various analyses. It first aggregates the multivariate matrix time series by year, then converts it to a vector time series in which the *seasons* correspond to these annnualized values for the original variables. The `seas` parameter enables focusing the subsequent analysis on seasons of special interest, or to ignore seasons where there are too many missing data. The function can be used in conjunction with `seaKen` to conduct a Regional Kendall trend analysis, as described in [Trends](#trends) below: ```{r} y <- window(sfbayChla, start = 2005, end = c(2009, 12)) # 5 years, 16 sites round(mts2ts(y, seas = 2:4), 1) # focus on Feb-Apr spring bloom ``` Some functions (e.g., `eof`) do not permit `NA`s and some kind of data imputation or omission will usually be required. The function `interpTs` is handy for interpolating small data gaps. It can also be used for filling in larger gaps with long-term or seasonal means or medians. Here, we use it to bridge gaps of up to three months. ```{r} chl27 <- sfbayChla[, "s27"] chl27a <- interpTs(chl27, gap = 3) ``` The interpolated series is then plotted in red and the original series overplotted below. ```{r fig.height=3.7, fig.width=6} plot(chl27a, col = "red", lwd = .5, xlab = "") lines(chl27, col = "blue", lwd = 1.5) ``` # Analyzing ## Trends {#trends} The function `mannKen` does a Mann-Kendall test of trend on a time series and provides the corresponding nonparametric slope estimate. Because of serial correlation for most monthly time series, the significance of such a trend is often overstated and `mannKen` is better suited for annual series, such as this one for Nile River flow: ```{r} mannKen(Nile) ``` The negative trend in Nile River flow identified by `mannKen` is due largely to a shift in the late 19th century. The Pettitt test, which has a similar basis to the Mann-Kendall test [@pettitt1979], provides a nonparametric estimate of the change-point. The shift happened in 1898--99 and coincides with the beginning of construction of the Lower Aswan Dam. <<fig=TRUE, echo=FALSE, width=6.5, height=4>>= plot(Nile, ylab = "Flow", xlab = "") abline(v=1898, col='blue') @ \caption{Nile River flow at Aswan, 1871--1970.} \label{fig:nile} \end{center} \end{figure} ```{r, fig.height=3.7, fig.width=6} plot(Nile, ylab = "Flow", xlab = "") abline(v=1898, col='blue') pett(Nile) ``` `pett` can also be used with a matrix: ```{r} y <- ts.intersect(Nile, LakeHuron) pett(y) ``` Both `mannKen` and `pett` can also handle matrices or data frames, with options for plotting trends in the original units per year or divided by the median for the series. The first option is suitable when time series are all in the same units, such as chlorophyll-*a* measurements from different stations. The second makes sense with variables of different units but is not suitable for variables that can span zero (e.g., sea level, or temperature in $^\circ$C) or that have a zero median. Plotted variables can be ordered by the size of their trends, statistical significance is mapped to point shape, and trends based on excessive missing data are omitted. When aggregating monthly series to produce an annual series for trend testing, there is a utility function `tsSub` that allows subsetting the months beforehand (`meanSub` is actually more efficient when aggregation is the goal). It can be useful for avoiding months with many missing data, or to focus attention on a particular time of year: ```{r} y <- sfbayChla y1 <- tsSub(y, seas = 2:4) # focus on Feb-Apr spring bloom y2 <- aggregate(y1, 1, mean, na.rm = FALSE) signif(mannKen(y2), 3) ``` A main role for `mannKen` in this package is as a support function for the Seasonal Kendall test of trend [@hirsch1982]. The Seasonal Kendall test combines information about trends for individual months (or some other subdivision of the year such as quarters) and produces an overall test of trend for a series. `mannKen` collects certain information on the pattern of missing data that is then used to determine if a Seasonal Kendall test is warranted. In particular, there is an option to report a result only if more than half the seasons are each missing less than half the possible comparisons between the first and last 20\% of the years [@schertz1991]: ```{r} chl27 <- sfbayChla[, "s27"] seaKen(chl27) ``` An important role, in turn, for `seaKen` in this package is as a support function for `seaRoll`, which applies the Seasonal Kendall test to a rolling window of years, such as a decadal window. There is an option to plot the results of `seaRoll`. `seaKen` is subject to distortion by correlation among months, but the relatively small number of years per window in typical use does not allow for an accurate correction: ```{r} seaRoll(chl27, w = 10) ``` The Seasonal Kendall test is not informative when trends for different months differ in sign. The function `seasonTrend` enables visualization of individual monthly trends and can be helpful for, among other things, deciding on the appropriateness of the Seasonal Kendall test. The Sen slopes are shown along with an indication, using bar colour, of the Mann-Kendall test of significance. The bar is omitted if the proportion of missing values in the first and last fifths of the data is less than 0.5. ```{r, fig.height=8, fig.width=7} x <- sfbayChla seasonTrend(x, plot = TRUE, ncol = 2, scales = 'free_y') ``` The function `trendHomog` can also be used to test directly for the homogeneity of seasonal trends [@belle1984]: ```{r} x <- sfbayChla[, 's27'] trendHomog(x) ``` A Regional Kendall test is similar to a Seasonal Kendall test, with annual data for multiple sites instead of annual data for multiple seasons [@helsel2006a]. The function `mts2ts` ([Reshaping](#reshape)) facilitates transforming an annual matrix time series into the required vector time series for `seaKen`, with stations playing the role of seasons. As with seasons, correlation among sites can inflate the apparent statistical significance, so the test is best used with stations from different subregions that are not too closely related, unlike the following example: ```{r} chl <- sfbayChla[, 1:12] # first 12 stns have good data coverage seaKen(mts2ts(chl, 2:4)) # regional trend in spring bloom ``` ## Empirical Orthogonal Functions {#eof} Empirical Orthogonal Function (EOF) analysis is a term used primarily in the earth sciences for principal component analysis applied to simultaneous time series at different spatial locations. @hannachi2007 provide a comprehensive summary. The function `eof` in this package, based on `prcomp` and `varimax` in the `stats` package, optionally scales the time series and applies a rotation to the EOFs. `eof` requires an estimate of the number of EOFs to retain for rotation. `eofNum` provides a guide to this number by plotting the eigenvalues and their confidence intervals in a *scree* plot. Here, we apply `eofNum` to annualized San Francisco Bay chlorophyll data and retain the stations with no missing data, namely, the first 12 stations. ```{r, fig.height=3.7, fig.width=6} chla1 <- aggregate(sfbayChla, 1, mean, na.rm = TRUE) chla1 <- chla1[, 1:12] eofNum(chla1) ``` These stations have similar coefficients for the first EOF and appear to act as one with respect to chlorophyll variability on the annual scale. It suggests that further exploration of the interannual variability of these stations can be simplified by using a single time series, namely, the first EOF. ```{r} e1 <- eof(chla1, n = 1) e1 ``` The function `eofPlot` produces a graph of either the EOFs or their accompanying time series. In this case, with `n = 1`, there is only one plot for each such graph. ```{r, fig.height=3.1, fig.width=5} eofPlot(e1, type = "amp") ``` Principal component analysis can also be useful in studying the way different seasonal *modes* of variability contribute to overall year-to-year variability of a single time series \citep{jassby1999a}. The basic approach is to consider each month as determining a separate annual time series and then to calculate the eigenvalues for the resulting $12 \times n$ years time series matrix. The function `ts2df` is useful for expressing a monthly time series in the form needed by `eof`. For example, the following code converts the monthly chlorophyll time series for Station 27 in San Francisco Bay to the appropriate data frame with October, the first month of the local *water year*, in the first column, and years with missing data omitted: ```{r} chl27b <- interpTs(sfbayChla[, "s27"], gap = 3) chl27b <- ts2df(chl27b, mon1 = 10, addYr = TRUE, omit = TRUE) head(round(chl27b, 1)) ``` The following example plots the EOFs from an analysis of this month $\times$ year data frame for Station 27 chlorophyll after scaling the data. `eofNum` (not shown) suggested retaining up to two EOFs. The resulting rotated EOFs imply two separate modes of variability for further exploration, the first operating during May-Sep and the other during Nov-Jan: ```{r, fig.height=3.1, fig.width=5} e2 <- eof(chl27b, n = 2, scale. = TRUE) eofPlot(e2, type = "coef") ``` ## Time series decomposition An analysis of chlorophyll-*a* time series from many coastal and estuarine sites around the world demonstrates that the standard deviation of chlorophyll is approximately proportional to the mean, both among and within sites, as well as at different time scales [@cloern2010]. One consequence is that these monthly time series are well described by a multiplicative seasonal model: $c_{ij} = C y_i m_j \epsilon_{ij}$, where $c_{ij}$ is chlorophyll concentration in year $i$ and month $j$; $C$ is the long-term mean; $y_i$ is the annual effect; $m_j$ is the mean seasonal (in this case monthly) effect; and $\epsilon_{ij}$ is the residual series, which we sometimes refer to as the *events* component. The annual effect is simply the annual mean divided by the long-term mean: $y_{i} = Y_{i}/C$, where $Y_{i} = (1/12) \sum_{j=1}^{12}c_{ij}$. The mean monthly effect is given by $m_{j}=(1/N) \sum_{i=1}^{N} M_{ij}/(C y_{i})$, where $M_{ij}$ is the value for month $j$ in year $i$, and $N$ is the total number of years. The events component is then obtained by $\epsilon_{ij}=c_{ij}/C y_{i} m_{j}$. This simple approach is motivated partly by the observation that many important events for estuaries (e.g., persistent dry periods, species invasions) start or stop suddenly. Smoothing to extract the annualized term, which can disguise the timing of these events and make analysis of them unnecessarily difficult, is not used. The `decompTs` listed here accomplishes this multiplicative decomposition (an option allows additive decomposition as an alternative). The median rather than the mean can be used in the operations described above, and the median is, in fact, the default for the function. Large, isolated events are common in environmental time series, especially from the ocean or ocean-influenced habitats such as certain types of estuary. The median leads to a more informative decomposition in these cases. `decompTs` requires input of a time series matrix in which the columns are monthly time series. It allows missing data, but it is up to the user to decide how many data are sufficient and if the pattern of missing data will lead to bias in the results. If so, it would be advisable to eliminate problem years beforehand by setting all month values to `NA` for those years. There are two cases of interest here: one in which the seasonal effect is held constant from year to year, and another in which it is allowed to vary by not distinguishing a separate events component. The choice is made by setting `event = TRUE` or `event = FALSE`, respectively, in the input. The output of this function is a matrix time series containing the original time series and its multiplicative model components, except for the long-term median or mean. The average seasonal pattern may not resemble observed seasonality in a given year. Patterns that are highly variable from year to year will result in an average seasonal pattern of relatively low amplitude (i.e., low range of monthly values) compared to the amplitudes in individual years. An average seasonal pattern with high amplitude therefore indicates both high amplitude and a recurring pattern for individual years. The default time series `plot` again provides a quick illustration of the result. ```{r, fig.height=6, fig.width=6} chl27 <- sfbayChla[, "s27"] d1 <- decompTs(chl27) plot(d1, nc = 1, main = "Station 27 Chl-a decomposition") ``` The average seasonal pattern does not provide any information about potential secular trends in the pattern. A solution is to apply the decomposition to a moving time window. The window should be big enough to yield a meaningful average of interannual variability but short enough to allow a trend to manifest. This may be different for different systems, but a decadal window can be used as a starting point. A more convenient way to examine changing seasonality is with the dedicated function `plotSeason`. It divides the time period into intervals and plots a composite of the seasonal pattern in each interval. The intervals can be specifed by a single number -- the number of equal-length intervals -- or by a vector listing the breaks between intervals. The function also warns of months that may not be represented by enough data by colouring them red. `plotSeason` is an easy way to decide on the value for the `event` option in `decompTs`. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, num.era = 3, same.plot = FALSE, ylab = 'Stn 27 Chl-a') ``` The same boxplots can also be combined in one plot, with boxplots for the same month grouped together. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, num.era = 3, same.plot = TRUE, ylab = 'Stn 27 Chl-a') ``` `plotSeason` also has an option to plot all individual months separately as standardized anomalies for the entire record. ```{r, fig.height=4.3, fig.width=7} plotSeason(chl27, "by.month", ylab = 'Stn 27 Chl-a') ``` With all types of seasonal plots, it is often helpful to adjust the device aspect ratio and size manually to get the clearest information. ## Phenological parameters `phenoPhase` and `phenoAmp` act on monthly time series or dated observations (`zoo` objects) and produce measures of the phase and amplitude, respectively, for each year. `phenoPhase` finds the month containing the maximum value, the *fulcrum* or center of gravity, and the weighted mean month. `phenoAmp` finds the range, the range divided by mean, and the coefficient of variation. Both functions can be confined to only part of the year, for example, the months containing the spring phytoplankton bloom. This feature can also be used to avoid months with chronic missing-data problems. Illustrating once again with chlorophyll observations from Station 27 in San Francisco Bay: ```{r} chl27 <- sfbayChla[, 's27'] p1 <- phenoPhase(chl27) head(p1) p2 <- phenoPhase(chl27, c(1, 6)) head(p2) p3 <- phenoAmp(chl27, c(1, 6)) head(p3) ``` Using the actual dated observations: ```{r} zchl <- tsMake(sfb, focus = "chl", layer = c(0, 5), type = 'zoo') head(zchl) zchl27 <- zchl[, 3] head(phenoPhase(zchl27)) head(phenoPhase(zchl27, c(1, 6), out = 'doy')) head(phenoPhase(zchl27, c(1, 6), out = 'julian')) ``` ## Miscellaneous plotting functions `plotTsAnom` plots (unstandardized) departures of vector or matrix time series from their long-term mean and can be a useful way of examining trends in annualized data. ```{r, fig.height=3.7, fig.width=6} chl <- aggregate(sfbayChla[, 1:6], 1, meanSub, 2:4, na.rm = TRUE) plotTsAnom(chl, ylab = 'Chlorophyll-a', strip.labels = paste('Station', substring(colnames(chl), 2, 3))) ``` `plotTsTile` plots a monthly time series as a month $\times$ year grid of tiles, with color representing magnitude. The data can be binned in either of two ways. The first is simply by deciles. The second, which is intended for log-anomaly data, is by four categories: Positive numbers higher or lower than the mean positive value, and negative numbers higher or lower than the mean negative value. In this version of `plotTsTile`, the anomalies are calculated with respect to the overall mean month. ```{r, fig.height=4.3, fig.width=7} chl27 <- sfbayChla[, "s27"] plotTsTile(chl27) ``` This plot shows clearly the change in chlorophyll magnitude after 1999. # References
/scratch/gouwar.j/cran-all/cranData/wql/vignettes/wql-package.Rmd
wqs.est <- function(y.train, x.train, z.train = NULL, y.valid = y.train, x.valid = x.train, z.valid = z.train, n.quantiles = 4, B = 100, b1.pos = TRUE){ # Check training data check_train <- check_xyz(x.train, y.train, z.train) if(check_train[1])stop("x.train must be a matrix") if(check_train[2])stop("check dimensions of training data") if(check_train[3])stop("check dimensions of training data") # Check validation data check_valid <- check_xyz(x.valid, y.valid, z.valid) if(check_valid[1])stop("x.valid must be a matrix") if(check_valid[2])stop("check dimensions of validation data") if(check_valid[3])stop("check dimensions of validation data") # Check other inputs if(B < 2)stop("value of B must be at least 2") if(class(b1.pos)!= "logical")stop("b1.pos must be logical value of TRUE or FALSE") if(n.quantiles < 2 | n.quantiles > 10)stop("n.quantiles must be at least 2 and no greater than 10") c <- dim(x.train)[2] # number of components # calculate quantiles q.train <- quantile.fn(x.train, n.quantiles) q.valid <- quantile.fn(x.valid, n.quantiles) # specify lower and upper bounds for b1 and the weights bounds <- specify.bounds(b1.pos, c) ineqLB <- bounds$ineqLB ineqUB <- bounds$ineqUB # specify initial values init <- specify.init(z.train, y.train, b1.pos, c) # Estimate weights across bootstrap samples for WQS Regression result <- wqs_b.est(y.train, q.train, z.train, B, pars = init, fun = objfn.cont, eqfun = lincon, eqB = 1, ineqfun = ineq, ineqLB, ineqUB, LB = NULL, UB = NULL) wts.matrix <- result$wts.matrix # Calculate final weights for each subset using relative test statistic weights <- teststat.fn(wts.matrix, result$test_stat) # Apply weights to validation data Set final <- wqs.fit(q.valid, z.valid, y.valid, weights) out <- list(q.train, q.valid, wts.matrix, weights, final$WQS, final$fit) names(out) <- c("q.train", "q.valid", "wts.matrix", "weights", "WQS", "fit") return(out) }
/scratch/gouwar.j/cran-all/cranData/wqs/R/wqs.est.R
# Helper functions for WQS #---------------------------------------------------------------------------------------------- # Function to perform preliminary check of the data #---------------------------------------------------------------------------------------------- check_xyz <- function(x,y,z){ class_x <- ifelse(class(x)== "matrix" | class(x)== "data.frame", 0, 1) n <- length(y) n.x <- dim(x)[1] dim_x <- ifelse(n != n.x, 1, 0) if(is.null(z)){dim_z <- 0} else{ if(class(z) == "matrix") n.z <- dim(z)[1] if(class(z) == "vector") n.z <- length(z)[1] dim_z <- ifelse(n != n.z, 1, 0) } return(c(class_x, dim_x, dim_z)) } #---------------------------------------------------------------------------------------------- # Create ranked data (returns matrix of quantiles) #---------------------------------------------------------------------------------------------- quantile.fn <- function(data, n.quantiles){ q <- matrix(0, dim(data)[1], dim(data)[2]) I <- dim(data)[2] for(i in 1:I){ q[,i]<- cut(data[,i], breaks = quantile(data[,i], probs = c(0:n.quantiles/n.quantiles)), include.lowest=TRUE)} q <- q-1 #colnames(q) <- paste0("q", seq(1:I)) return(q) } #---------------------------------------------------------------------------------------------- # objective function for continuous response (returns least squares) #---------------------------------------------------------------------------------------------- # The objective functions will take the following arguments # q: matrix of quantiles (ranked data) # z: matrix of covariates # y: response vector # param: paramter vector (b0,b1,w1,...wc) # Note: The objective function and the constraint functions must take the same arguments # regardless of whether or not they are used by the particular function objfn.cont <- function(param, q, z, y){ c <- dim(q)[2] z.null <- ifelse(is.null(z),1,0) # 1 if NULL b0 <- param[1] # intercept b1 <- param[2] # coefficient for WQS term w <- param[3:(2+c)] # vector of weights (length c) ls <- numeric() # initialize space if(z.null == 0){ p <- dim(z)[2] # of covariates theta <- param[(3+c):(2+c+p)] # parameters for covariates (length p) mu <- b0 + b1*q%*%w + z%*%theta }else{ mu <- b0 + b1*q%*%w} ls <- (y-mu)**2 leastsq <- sum(ls) return(leastsq) # minimizes objective fn and we want to minimize least squares } #---------------------------------------------------------------------------------------------- # Linear constraint (allows us to contstrain weights to sum to 1) #---------------------------------------------------------------------------------------------- # Note: The objective function and the constraint functions must take the same arguments # regardless of whether or not they are used by the particular function lincon <- function(param, q, z, y){ c <- dim(q)[2] weights <- param[3:(2+c)] sum <- sum(weights) return(sum) } #---------------------------------------------------------------------------------------------- # Inequality constraints (allows us to put constraints on b1 and the weights) #---------------------------------------------------------------------------------------------- ineq <- function(param, q, z, y){ c <- dim(q)[2] b1 <- param[2] weights <- param[3:(2+c)] return(c(b1, weights)) } #-------------------------------------------------------------------------------------------------- # Calculate weights based on relative test statistic #-------------------------------------------------------------------------------------------------- teststat.fn <- function(wts, test_stat){ Sb = abs(test_stat) # take absolute value so we can calculate relative strength of test statistic signal <- Sb/sum(Sb) w <- colSums(signal*wts) return(w) } #-------------------------------------------------------------------------------------------------- # Function to apply weights to validation data set #-------------------------------------------------------------------------------------------------- wqs.fit <- function(q, z, y, w){ WQS <- as.vector(q%*%w) z.null <- ifelse(is.null(z),1,0) #1 if NULL if(z.null == 0){ temp <- data.frame(cbind(y, z, WQS)) } else{temp <- data.frame(cbind(y, WQS))} fit <- glm2(y ~ ., data = temp, family = "gaussian"(link = identity)) out <- list(WQS, fit) names(out) <- c("WQS", "fit") return(out) } #-------------------------------------------------------------------------------------------------- # Function to specify lower and upper bounds #-------------------------------------------------------------------------------------------------- specify.bounds <- function(b1.pos, c){ # Specify lower and upper bounds for b1 based on direction of constraint if(b1.pos == TRUE){ b1.LB <- 0 b1.UB <- Inf } else{ b1.LB <- -Inf b1.UB = 0 } # first term represents bound for b1, next c terms represent bounds for the weights ineqLB <- c(b1.LB, rep(0,c)) ineqUB <- c(b1.UB, rep(1,c)) out <- list(ineqLB, ineqUB) names(out) <- c("ineqLB", "ineqUB") return(out) } #-------------------------------------------------------------------------------------------------- # Function to specify initial values #-------------------------------------------------------------------------------------------------- specify.init <- function(z, y, b1.pos, c){ z.null <- ifelse(is.null(z),1,0) #1 if NULL b0.0 <- 0 w.0 <- rep(1/c, c) names(w.0) <- paste0("w", 1:c) b1.0 <- ifelse(b1.pos == TRUE, 0.1, -0.1) # Initial values for covariates if(z.null == 0){ fit.init <- glm2(y ~ z, family = "gaussian"(link = identity)) init.z <- coef(fit.init)[-1] p <- dim(z)[2] names(init.z) <- c(paste0("z",1:p)) init <- c(b0 = b0.0, b1 = b1.0, w.0, init.z) } else{init <- c(b0 = b0.0, b1 = b1.0, w.0)} return(init) } #-------------------------------------------------------------------------------------------------- # Function to estimate weights across bootstrap samples for WQS Regression #-------------------------------------------------------------------------------------------------- wqs_b.est <- function(y, q, z, B, pars, fun, eqfun, eqB, ineqfun, ineqLB, ineqUB, LB, UB){ z.null <- ifelse(is.null(z),1,0) #1 if NULL c <- dim(q)[2] if(z.null == 0){p <- dim(z)[2]} else{p <-0} # initialize matrix for parameter estimates (from estimation step) result <- matrix(0, nrow = B, ncol = length(pars)) colnames(result) <- names(pars) convergence <- rep(0, B) #0 indicates convergence; 1 or 2 indicates non-convergence beta_1 <- rep(0, B) pval <- rep(0, B) test_stat <- rep(0, B) # test statistic (z-value) #---------------------------- BOOTSTRAP ROUTINE ----------------------------------- for (b in 1:B) { # draw random sample (of same size as training data set) with replacement samp <- sample(1:length(y), replace = TRUE) y.b <- as.vector(y[samp]) q.b <- q[samp,] if(z.null == 0){z.b <- as.matrix(z[samp,])}else{z.b <- NULL} rownames(z.b) <- NULL result.b <- solnp(pars, fun, eqfun, eqB, ineqfun, ineqLB, ineqUB, LB, UB, q.b, z.b, y.b, control = list(tol = 1e-10,delta = 1e-10, trace = 0)) result[b,] <- result.b$pars convergence[b] <- result.b$convergence w <- result.b$pars[3:(2+c)] fit <- wqs.fit(q.b, z.b, y.b, w)$fit beta_1[b] <- fit$coefficients[row.names = "WQS"] test_stat[b] <- summary(fit)$coefficients['WQS', 't value'] # extract z-value for WQS parameter pval[b] <- summary(fit)$coefficients['WQS', 'Pr(>|t|)'] # extract p-value } wts.matrix <- result[,3:(2+c)]; colnames(wts.matrix) <- paste0('w', 1:c) out <- list(wts.matrix, convergence, beta_1, test_stat, pval) names(out) <- c("wts.matrix", "convergence", "beta_1", "test_stat", "pval") return(out) }
/scratch/gouwar.j/cran-all/cranData/wqs/R/wqs_helpers.R
#' Full wrapper WQS permutation test #' #' \code{wqs_full_perm} is a full wrapper function that is a full implementation #' of the Weighted Quantile Sum (WQS) regression method followed by the #' permutation test to determine the significance of the WQS coefficient. #' #' @param formula An object of class formula. The wqs term must be included in #' the formula (e.g., y ~ wqs + ...). #' @param data The \code{data.frame} to be used in the WQS regression run. #' @param mix_name A vector with the mixture column names. #' @param q An integer to indicate the number of quantiles to split the mixture #' variables. #' @param b_main The number of bootstraps for the main WQS regression run. #' @param b_perm The number of bootstraps for the iterated permutation test #' WQS regression runs and the reference WQS regression run (only for linear #' WQS regression and only when b_mean != b_perm). #' @param b1_pos A logical value that indicates whether beta values should be #' positive or negative. #' @param rs A logical value indicating whether random subset implementation #' should be performed. #' @param niter Number of permutation test iterations. #' @param seed An integer to fix the seed. This will only impact the the initial #' WQS regression run and not the permutation test iterations. The default #' setting is NULL, which means no seed is used for the initial WQS regression. #' The seed will be saved in the "gwqs_main" object as "gwqs_main$seed". #' @param plan_strategy Evaluation strategy for the plan function. You can choose #' among "sequential", "transparent", "multisession", "multicore", "multiprocess", #' "cluster" and "remote." See future::plan documentation for full details. #' @param b1_constr Logical value that determines whether to apply positive or #' negative constraints in the optimization function for the weight optimization. #' @param family A description of the error distribution and link function to be #' used in the model. This can be a character string naming a family function #' (e.g., "binomial") or a family object (e.g., binomial(link="logit")). #' Currently validated families include gaussian(link="identity") for linear #' regression, binomial() with any accepted link function (e.g., "logit" or #' "probit"), poisson(link = "log"), quasipoisson(link = "log"), or "negbin" for #' negative binomial. The "multinomial" family is not yet supported. #' @param stop_if_nonsig if TRUE, the function will not proceed with the #' permutation test if the main WQS regression run produces nonsignificant #' p-value. #' @param stop_thresh numeric p-value threshold required in order to proceed #' with the permutation test, if `stop_if_nonsig = TRUE`. #' @param ... Other parameters to put into the gwqs function call. #' #' @return \code{wqs_full_perm} returns an object of class `wqs_perm`, which #' contains three sublists: #' #' \item{perm_test}{List containing: (1) `pval`: permutation test p-value, (2) (linear #' regression only) `testbeta1`: reference WQS regression coefficient beta1 value, (3) #' (linear regression only) `betas`: Vector of beta values from each #' permutation test run, (4) (logistic regression only) `testpval`: test reference #' p-value, (5) (logistic regression only) `permpvals`: p-values from the null #' models.} #' \item{gwqs_main}{Main gWQS object (same as model input). This will now include an #' additional object "seed" that returns the seed used for this main WQS regression.} #' \item{gwqs_perm}{Permutation test reference gWQS object (NULL if model #' `family != "gaussian"` or if same number of bootstraps are used in permutation #' test WQS regression runs as in the main run).} #' @import gWQS #' @export wqs_full_perm #' #' @examples #' library(gWQS) #' #' # mixture names #' PCBs <- names(wqs_data)[1:17] #half of the original 34 for quick computation #' #' perm_test_res <- wqs_full_perm(formula = yLBX ~ wqs, data = wqs_data, #' mix_name = PCBs, q = 10, b_main = 5, #' b_perm = 5, b1_pos = TRUE, b1_constr = FALSE, #' niter = 3, seed = 16, plan_strategy = "multicore", #' stop_if_nonsig = FALSE) #' #' # Note: The default values of b_main = 1000, b_perm = 200, and niter = 200 #' # are the recommended parameter values. This example has a lower b_main, #' # b_perm, and niter in order to serve as a shorter test run. #' wqs_full_perm <- function(formula, data, mix_name, q = 10, b_main = 1000, b_perm = 200, b1_pos = TRUE, b1_constr = FALSE, rs = FALSE, niter = 200, seed = NULL, family = "gaussian", plan_strategy = "multicore", stop_if_nonsig = FALSE, stop_thresh = 0.05, ...){ if (is.character(family)) { if (family=="multinomial"){ stop("This simulation function doesn't yet accomodate multinomial WQS regression.") } } # run main WQS regression gwqs_res_main <- gWQS::gwqs(formula = formula, data = data, mix_name = mix_name, q = q, b = b_main, b1_pos = b1_pos, b1_constr = b1_constr, rs = rs, seed = seed, validation = 0, family = family, plan_strategy = plan_strategy, ...) gwqs_res_main$seed<-seed naive_p <- summary(gwqs_res_main)$coefficients["wqs", 4] if (stop_if_nonsig == TRUE & naive_p > stop_thresh){ message(sprintf("The main WQS regression run did not give a significant result (p = %s)", naive_p)) results <- list(gwqs_main = gwqs_res_main, family = gwqs_res_main$family$family, gwqs_perm = NULL, perm_test = NULL) } else { # run permutation test (using wqs_perm function) results <- wqs_pt(gwqs_res_main, niter = niter, boots = b_perm, b1_pos = b1_pos, b1_constr = b1_constr, rs = rs, plan_strategy = plan_strategy, seed = seed) } class(results) <- "wqs_pt" results }
/scratch/gouwar.j/cran-all/cranData/wqspt/R/wqs_full_perm.R
#' WQS permutation test #' #' \code{wqs_pt} takes a `gwqs` object as an input and runs the permutation #' test (Day et al. 2022) to obtain an estimate for the p-value significance for #' the WQS coefficient. #' #' To use `wqs_pt`, we first need to run an initial WQS regression run while #' setting `validation = 0`. We will use this `gwqs` object as the model argument #' for the `wqs_pt` function. Note that permutation test has so far only been #' validated for linear WQS regression (i.e., `family = "gaussian"`) or logistic #' WQS regression (i.e., `family = binomial(link = "logit")`), though the #' permutation test algorithm should also work for all WQS GLMs. Therefore, #' this function accepts `gwqs` objects made with the following families: #' "gaussian" or gaussian(link = "identity"), "binomial" or binomial() with #' any accepted link function (e.g., "logit" or "probit"), "poisson" or #' poisson(link="log"), "negbin" for negative binomial, and "quasipoisson" or #' quasipoisson(link="log"). This function cannot currently accommodate `gwqs` #' objects made with the "multinomial" family, and it is not currently able to #' accommodate stratified weights or WQS interaction terms (e.g., `y ~ wqs * sex`). #' #' The argument `boots` is the number of bootstraps for the WQS regression run #' in each permutation test iteration. Note that we may elect a bootstrap count #' `boots` lower than that specified in the model object for the sake of #' efficiency. If `boots` is not specified, then we will use the same bootstrap #' count in the permutation test WQS regression runs as that specified in the #' model argument. #' #' The arguments `b1_pos` and `rs` should be consistent with the inputs chosen #' in the model object. The seed should ideally be consistent with the seed set #' in the model object for consistency, though this is not required. #' #' @param model A \code{gwqs} object as generated from the \code{gWQS} package. #' @param niter Number of permutation test iterations. #' @param boots Number of bootstrap samples for each permutation test WQS #' regression iteration. If `boots` is not specified, then we will use the same #' bootstrap count for each permutation test WQS regression iteration as that #' specified in the main WQS regression run. #' @param b1_pos A logical value that indicates whether beta values should be #' positive or negative. #' @param b1_constr Logical value that determines whether to apply positive or #' negative constraints in the optimization function for the weight optimization. #' @param rs A logical value indicating whether random subset implementation #' should be performed. #' @param plan_strategy Evaluation strategy for the plan function. You can choose #' among "sequential", "transparent", "multisession", "multicore", #' "multiprocess", "cluster" and "remote." See future::plan documentation for full #' details. #' @param seed (optional) Random seed for the permutation test WQS reference run. #' This should be the same random seed as used for the main WQS regression run. #' This seed will be saved in the "gwqs_perm" object as "gwqs_perm$seed". #' #' @return \code{wqs_pt} returns an object of class `wqs_pt`, which contains: #' #' \item{perm_test}{List containing: (1) `pval`: permutation test p-value, #' (2) (linear WQS regression only) `testbeta1`: reference WQS coefficient beta1 value, #' (3) (linear WQS regression only) `betas`: Vector of beta values from #' each permutation test run, (4) (WQS GLM only) `testpval`: test reference #' p-value, (5) (WQS GLM only) `permpvals`: p-values from the null models.} #' \item{gwqs_main}{Main gWQS object (same as model input).} #' \item{gwqs_perm}{Permutation test reference gWQS object (NULL if model #' `family != "gaussian"` or if same number of bootstraps are used in permutation #' test WQS regression runs as in the main run).} #' @import gWQS ggplot2 viridis cowplot stats methods #' @export wqs_pt #' #' @examples #' library(gWQS) #' #' # mixture names #' PCBs <- names(wqs_data)[1:17] #half of the original 34 for quick computation #' #' # create reference wqs object with 5 bootstraps #' wqs_main <- gwqs(yLBX ~ wqs, mix_name = PCBs, data = wqs_data, q = 10, #' validation = 0, b = 5, b1_pos = TRUE, b1_constr = FALSE, #' plan_strategy = "multicore", family = "gaussian", seed = 16) #' # Note: We recommend niter = 1000 for the main WQS regression. This example #' # has a lower number of bootstraps to serve as a shorter test run. #' #' # run permutation test #' #' perm_test_res <- wqs_pt(wqs_main, niter = 3, b1_pos = TRUE) #' #' #' # Note: The default value of niter = 200 is the recommended parameter value. #' # This example has a lower niter in order to serve as a shorter test run. #' #' @references #' #' Day, D. B., Sathyanarayana, S., LeWinn, K. Z., Karr, C. J., Mason, W. A., & #' Szpiro, A. A. (2022). A permutation test-based approach to strengthening #' inference on the effects of environmental mixtures: comparison between single #' index analytic methods. Environmental Health Perspectives, 130(8). #' #' Day, D. B., Collett, B. R., Barrett, E. S., Bush, N. R., Swan, S. H., Nguyen, #' R. H., ... & Sathyanarayana, S. (2021). Phthalate mixtures in pregnancy, #' autistic traits, and adverse childhood behavioral outcomes. Environment #' International, 147, 106330. #' #' Loftus, C. T., Bush, N. R., Day, D. B., Ni, Y., Tylavsky, F. A., Karr, C. J., #' ... & LeWinn, K. Z. (2021). Exposure to prenatal phthalate mixtures and #' neurodevelopment in the Conditions Affecting Neurocognitive Development and #' Learning in Early childhood (CANDLE) study. Environment International, 150, #' 106409. #' wqs_pt <- function(model, niter = 200, boots = NULL, b1_pos = TRUE, b1_constr = FALSE, rs = FALSE, plan_strategy = "multicore", seed = NULL) { pbapply::pboptions(type="timer") if (is(model, "gwqs")) { if (model$family$family == "multinomial"){ stop("The permutation test is not currently set up to accomodate the multinomial WQS regressions.") } } else stop("'model' must be of class 'gwqs' (see gWQS package).") mm <- model$fit formchar <- as.character(formula(mm)) if (length(formchar) == 1) { tempchar <- rep(NA, 3) tempchar[1] <- "~" tempchar[2] <- gsub("\\ ~.*", "", formchar) tempchar[3] <- gsub(".*\\~ ", "", formchar) formchar <- tempchar rm(tempchar) } if (!is.null(model$stratified) | grepl("wqs:", formchar[3], fixed = TRUE)) { stop("This permutation test is not yet set up to accomodate stratified weights or WQS interaction terms.") } cl = match.call() yname <- as.character(formula(mm))[2] mix_name <- names(model$bres)[names(model$bres) %in% model$final_weights$mix_name] if (!is.null(model$qi)) { nq <- max(sapply(model$qi, length)) - 1 } else { # this is for cases when there is no quantile transformation or it's already been # done in the data frame nq <- NULL } if (is.null(boots)){ boots <- length(model$bindex) } if (model$family$family == "gaussian"){ Data <- model$data[, -which(names(model$data) %in% c("wqs", "wghts"))] # reference WQS regression run if (boots == length(model$bindex)){ perm_ref_wqs <- model ref_beta1 <- mm$coef[2] } else{ perm_ref_wqs <- gwqs(formula = formula(mm), data = Data, mix_name = mix_name, q = nq, b = boots, rs = rs, validation = 0, plan_strategy = plan_strategy, b1_pos = b1_pos, b1_constr = b1_constr, seed = seed) ref_beta1 <- perm_ref_wqs$fit$coef[2] } if (length(mm$coef) > 2) { # This is the permutation test algorithm when there are multiple independent # variables in the model lm_form <- formula(paste0(formchar[2], formchar[1], gsub("wqs + ", "", formchar[3], fixed = TRUE))) fit.partial <- lm(lm_form, data = Data) partial.yhat <- predict(fit.partial) partial.resid <- resid(fit.partial) reorgmat <- matrix(NA, dim(Data)[1], niter) reorgmat <- apply(reorgmat, 2, function(x) partial.yhat + sample(partial.resid, replace = F)) } else { # This is the permutation test algorithm when there is only one independent # variable in the model reorgmat <- matrix(NA, dim(Data)[1], niter) reorgmat <- apply(reorgmat, 2, function(x) sample(Data[, yname])) } getbetas <- function(x) { newDat <- Data newDat[, yname] <- x names(newDat) <- c(names(Data)) if (length(mm$coef) > 2) { form1 <- formula(paste0(formchar[2], formchar[1], formchar[3])) } else { form1 <- formula(paste0(formchar[2], formchar[1], "wqs")) } gwqs1 <- tryCatch({ suppressWarnings(gwqs(formula = form1, data = newDat, mix_name = mix_name, q = nq, b = boots, rs = rs, validation = 0, plan_strategy = plan_strategy, b1_pos = b1_pos, b1_constr = b1_constr)) }, error = function(e) NULL) if (is.null(gwqs1)) lm1 <- NULL else lm1 <- gwqs1$fit if (is.null(lm1)) { retvec <- NA } else { retvec <- lm1$coef[2] } return(retvec) } betas <- pbapply::pbapply(reorgmat, 2, getbetas) calculate_pval <- function(x, true, posb1 = b1_pos) { if (posb1) { length(which(x > true))/length(betas) } else { length(which(x < true))/length(betas) } } pval <- calculate_pval(betas, ref_beta1, b1_pos) perm_retlist <- list(pval = pval, testbeta1 = ref_beta1, betas = betas, call = cl) model$b1_pos <- b1_pos perm_ref_wqs$b1_pos <- b1_pos perm_ref_wqs$seed <- seed if (boots == length(model$bindex)){ ret_ref_wqs <- NULL } else{ ret_ref_wqs <- perm_ref_wqs } } else { Data <- model$data[, -which(names(model$data) %in% c("wqs", "wghts"))] initialfit <- function(m) { if(length(mm$coef) > 2){ newform <- formula(paste0(m, "~", gsub("wqs + ", "", formchar[3], fixed = TRUE))) } else { newform <- formula(paste0(m, "~1")) } fit.x1 <- lm(newform, data = Data) return(resid(fit.x1)) } residmat <- sapply(model$mix_name, initialfit) Data[, model$mix_name] <- residmat lwqs1 <- tryCatch({ suppressWarnings(gwqs(formula = formula(mm), data = Data, mix_name = model$mix_name, q = nq, b = boots, rs = rs, validation = 0, plan_strategy = plan_strategy, b1_pos = b1_pos, family = model$family, seed = seed, b1_constr = b1_constr)) }, error = function(e) NULL) fit1 <- lwqs1$fit if(length(mm$coef) > 2){ fit2form <- formula(paste0(yname, "~", gsub("wqs + ", "", formchar[3], fixed = TRUE))) } else { fit2form <- formula(paste0(yname, "~1")) } fit2 <- glm(fit2form, data = Data, family = model$family$family) p.value.obs <- 1 - pchisq(abs(fit1$deviance - fit2$deviance), 1) reorgmatlist <- lapply(1:niter, function(x) residmat[sample(1:nrow(residmat), replace = F), ]) getperms <- function(x) { newDat <- Data newDat[, model$mix_name] <- x formchar <- as.character(formula(mm)) if (length(mm$coef) > 2) { form1 <- formula(paste0(formchar[2], formchar[1], formchar[3])) } else { form1 <- formula(paste0(formchar[2], formchar[1], "wqs")) } gwqs1 <- tryCatch({ suppressWarnings(gwqs(formula = form1, data = newDat, mix_name = mix_name, q = model$q, b = boots, rs = rs, validation = 0, plan_strategy = plan_strategy, b1_pos = b1_pos, family = model$family$family, b1_constr = b1_constr) )}, error = function(e) NULL) if (is.null(gwqs1)) lm1 <- NULL else lm1 <- gwqs1$fit if (is.null(lm1)) { retvec <- NA } else { devi <- lm1$deviance pperm <- 1 - pchisq(abs(devi - fit2$deviance), 1) retvec <- pperm } return(retvec) } permstats <- pbapply::pbsapply(reorgmatlist, getperms) p0 <- length(permstats[which(permstats <= p.value.obs)]) / niter perm_retlist <- list(pval = p0, testpval = p.value.obs, permpvals = permstats) model$b1_pos <- b1_pos ret_ref_wqs <- NULL } results <- list(gwqs_main = model, family = model$family$family, gwqs_perm = ret_ref_wqs, perm_test = perm_retlist) class(results) <- "wqs_pt" results } #' @rawNamespace S3method(print, wqs_pt) print.wqs_pt <- function(x, ...){ cat("Permutation test WQS coefficient p-value: \n", x$perm_test$pval, "\n") main_sum <- summary(x$gwqs_main) print(main_sum) } #' @rawNamespace S3method(summary, wqs_pt) summary.wqs_pt <- function(object, ...){ message("Permutation test WQS coefficient p-value: \n", object$perm_test$pval, "\n") main_sum <- summary(object$gwqs_main) main_sum }
/scratch/gouwar.j/cran-all/cranData/wqspt/R/wqs_pt.R
#' WQS simulated dataset generator #' #' \code{wqs_sim} generates a simulated dataset of mixture components, covariates, #' and outcomes based on an initial set of specifications. #' #' @param nmix Number of mixture components in simulated dataset. #' @param ncovrt Number of covariates in simulated dataset. #' @param nobs Number of observations in simulated dataset. #' @param ntruewts Number of mixture components that have a non-zero association #' with the outcome (i.e., are not noise). #' @param ntruecovrt Number of covariates that have a non-zero association with #' the outcome (i.e., are not noise). #' @param vcov This parameter relates to the variance-covariance matrix of the #' simulated independent variables (i.e., the m exposure mixture components and #' z covariates). This is either a variance-covariance matrix of dimensions #' (m + z) x (m + z) or a single value. If this is a single value, the variance- #' covariance matrix will have ones on the diagonal and that single value will be #' all the off-diagonal values. For example, if this input were 0.4 and there were #' two mixture components and no covariates, the variance-covariance matrix would #' be matrix(c(1, 0.4, 0.4, 1), nrow = 2, ncol = 2). The default value is 0, #' giving a variance-covariance matrix with variances of 1 and covariances of 0. #' @param eps Dispersion parameter. If the family is "gaussian", this corresponds #' to the residual standard deviation. If the family is "binomial" or "poisson", #' this parameter is ignored. If the family is "negbin", this represents the "size" #' parameter of the negative binomial distribution (see the documentation for the #' rnbinom function for more details). #' @param truewqsbeta Simulated WQS beta_1 value. If NULL, then this value will #' be randomly sampled depending on the parameter rnd_wqsbeta_dir. #' @param truebeta0 Simulated beta_0 value. If NULL, then this value will be #' randomly sampled from a standard normal distribution. #' @param truewts Simulated vector of mixture weights. If NULL, then this value #' will be randomly sampled from a Dirichlet distribution with a vector of alpha #' values all equal to 1 (see the documentation for the extraDistr::rdirichlet #' function documentation for more details). #' @param truegamma Simulated gamma vector. If NULL, then this value will be #' randomly sampled from a standard normal distribution. #' @param rnd_wqsbeta_dir Direction of randomly sampled truewqsbeta (if #' truewqsbeta = NULL). The options are "positive", "negative", or NULL. If #' "positive" or "negative", the truewqsbeta will be sampled from a standard #' half normal distribution in either of those respective directions. If NULL, #' then truewqsbeta will be sampled from a standard normal distribution. #' @param seed Random seed. #' @param q Number of quantiles. #' @param family Family for the generative model creating the outcome vector. #' Options include "gaussian" or gaussian(link = "identity") for a continuous #' outcome, "binomial" or binomial() with any accepted link function for a binary #' outcome, and finally for count outcomes this can be "poisson" or #' poisson(link="log") for the Poisson distributed outcome values, or "negbin" #' for negative binomial distributed outcome values. #' #' @return \code{wqs_perm} returns a list of: #' \item{weights}{Simulated weights.} #' \item{coef}{Simulated beta coefficients.} #' \item{Data}{Simulated dataset.} #' \item{etahat}{predicted linear predictor (eta) values from the data generating model.} #' \item{wqs}{Weighted quantile sum vector (quantile-transformed mixture #' components multiplied by weights).} #' \item{modmat}{Model matrix.} #' \item{Xq}{Quantile-transformed mixture components.} #' #' @import mvtnorm extraDistr #' @export wqs_sim #' #' @examples #' #' # For these examples, we only run a GLM using the simulated dataset #' # including the simulated WQS vector just to show that the user-specified #' # coefficients for beta1 and beta0 are returned. An example of running #' # the full permutation test WQS regression for the simulated dataset #' # (for which the WQS vector would be determined by the model) #' # with the "gaussian" family is shown as well. #' #' wqsform<-formula(paste0("y~wqs+",paste(paste0("C",1:10),collapse="+"))) #' #' testsim_gaussian<- #' wqs_sim(truewqsbeta=0.2,truebeta0=-2, #' truewts=c(rep(0.15,5),rep(0.05,5)),family="gaussian") #' Dat<-testsim_gaussian$Data #' Dat$wqs<-testsim_gaussian$wqs #' summary(glm(wqsform,data=Dat,family="gaussian"))$coef[1:2,] #' \donttest{ #' perm_test_res <- wqs_full_perm(formula = wqsform, data = testsim_gaussian$Data, #' mix_name = paste0("T",1:10), q = 10, b_main = 5, #' b_perm = 5, b1_pos = TRUE, b1_constr = FALSE, #' niter = 4, seed = 16, plan_strategy = "multicore", #' stop_if_nonsig = FALSE) #' } #' # Note: The default values of b_main = 1000, b_perm = 200, and niter = 200 #' # are the recommended parameter values. This example has a lower b_main, #' # b_perm, and niter in order to serve as a shorter example run. #' #' \donttest{ #' testsim_logit<- #' wqs_sim(truewqsbeta=0.2,truebeta0=-2, #' truewts=c(rep(0.15,5),rep(0.05,5)),family="binomial") #' Dat<-testsim_logit$Data #' Dat$wqs<-testsim_logit$wqs #' summary(glm(wqsform,data=Dat,family="binomial"))$coef[1:2,] #' } #' wqs_sim <- function(nmix = 10, ncovrt = 10, nobs = 500, ntruewts = 10, ntruecovrt = 5, vcov = 0, eps = 1, truewqsbeta = NULL, truebeta0 = NULL, truewts = NULL, truegamma = NULL, rnd_wqsbeta_dir = "none", seed = 101, q = 10, family = "gaussian") { if (is.character(family)) { if (family=="multinomial"){ stop("This simulation function doesn't yet accomodate multinomial WQS regression.") } if (family %in% c("negbin")) family <- list(family = family) else family <- get(family, mode = "function", envir = parent.frame()) } if (is.function(family)) family <- family() if (is.null(family$family)) { stop(paste0("'family' ",family," not recognized\n")) } if (length(vcov) == 1) { Rho <- diag(nmix + ncovrt) Rho[upper.tri(Rho)] <- Rho[lower.tri(Rho)] <- vcov } else { if(nrow(vcov) != ncol(vcov)|nrow(vcov) != (nmix + ncovrt)){ stop("'vcov' must be a square matrix with the number of rows/columns being equal to the number of mixture components + the number of covariates.") } Rho <- vcov } weights <- rep(0, nmix) if (is.null(truewts)) { set.seed(seed) truewts <- extraDistr::rdirichlet(1, rep(1, ntruewts)) weights[1:ntruewts] <- truewts } else { if (length(truewts) == nmix & sum(abs(truewts)) != 1) { truewts <- truewts / sum(truewts) } if (length(truewts) < nmix) { weights[1:length(truewts)] <- truewts weights[(length(truewts) + 1):nmix] <- (1 - sum(truewts)) / (nmix - length(truewts)) } else { weights[1:length(truewts)] <- truewts } } if (round(sum(weights), 3) != 1.0) { warning(paste0("weights add up to ", sum(weights))) } set.seed(seed) Xmat <- mvtnorm::rmvnorm(nobs, mean = rep(0, nmix + ncovrt), sigma = Rho) if (is.null(q)) { Xmatquant <- Xmat } else { Xmatquant <- Xmat Xmatquant[, 1:nmix] <- apply( Xmatquant[, 1:nmix], 2, FUN = function(x) { as.numeric(as.character(cut( x, breaks = quantile(x, probs = seq(0, 1, by = (1 / q))), include.lowest = T, labels = 0:(q - 1) ))) } ) } if (ncovrt < ntruecovrt) { ntruecovrt <- ncovrt } if (!is.null(truegamma)) { if (length(truegamma) == 1) { covrtbetas <- rep(truegamma, ncovrt) } else { covrtbetas <- truegamma } } else { set.seed(seed + 2) covrtbetas <- c(rnorm(ntruecovrt), rep(0, length = ncovrt - ntruecovrt)) } set.seed(seed + 1) if (!is.null(truebeta0)) { beta0 <- truebeta0 } else { beta0 <- rnorm(1) } if (!is.null(truewqsbeta)) { wqsbeta <- truewqsbeta } else { set.seed(seed) if (rnd_wqsbeta_dir == "positive") { wqsbeta <- extraDistr::rhnorm(1) } else if (rnd_wqsbeta_dir == "negative") { wqsbeta <- extraDistr::rhnorm(1) * -1 } else { wqsbeta <- rnorm(1) } } wqs <- Xmatquant[, 1:nmix] %*% weights if (ncovrt > 0) { modmat <- cbind(1, wqs, Xmat[, c((nmix + 1):(nmix + ncovrt))]) dimnames(modmat)[[2]] <- c("Intercept", "wqs", paste0("C", 1:ncovrt)) betas <- c(beta0, wqsbeta, covrtbetas) names(betas) <- c("beta0", "beta1", paste0("gamma", 1:ncovrt)) } else { modmat <- cbind(1, wqs) dimnames(modmat)[[2]] <- dimnames(modmatq)[[2]] <- c("Intercept", "wqs") betas <- c(beta0, wqsbeta) names(betas) <- c("beta0", "beta1") } etahat <- modmat %*% betas if(family$family=="binomial"){ probs <- family$linkinv(etahat) set.seed(seed) y <- rbinom(nobs, size = 1, prob = probs) } else if(family$family=="gaussian"){ if(family$link!="identity") { stop("The gaussian() family is only supported for link='identity' for WQS regression") } set.seed(seed) epsilon <- rnorm(nobs, sd = eps) y <- etahat + epsilon } else if(family$family=="poisson"){ if(family$link!="log") { stop("The poisson() family is only supported for link='log' for WQS regression") } y<-rpois(nobs,lambda = exp(etahat)) } else if(family$family=="negbin"){ y <- rnbinom(n = nobs, mu = exp(etahat), size = eps) } else { stop(paste0("The family ",family$family, " (link=",family$link,") is not supported.")) } Data <- data.frame(cbind(y, Xmat)) if (ncovrt > 0) { names(Data) <- c("y", paste0("T", 1:nmix), paste0("C", 1:ncovrt)) colnames(Xmatquant) <- c(paste0("T", 1:nmix), paste0("C", 1:ncovrt)) colnames(Xmat) <- c(paste0("T", 1:nmix), paste0("C", 1:ncovrt)) } else { names(Data) <- c("y", paste0("T", 1:nmix)) colnames(Xmatquant) <- c(paste0("T", 1:nmix)) colnames(Xmat) <- c(paste0("T", 1:nmix)) } wtmat <- data.frame(mix_name = paste0("T", 1:nmix), true_weight = weights) retlist <- list( weights = wtmat, coef = betas, Data = Data, etahat = etahat, wqs = wqs, modmat = modmat, Xq = Xmatquant ) return(retlist) }
/scratch/gouwar.j/cran-all/cranData/wqspt/R/wqs_sim.R
#' Plotting method for wqspt object #' #' Generates plots to help visualize and summarize WQS permutation test results. #' #' @param wqsptresults An object of class `wqs_pt`. #' @param FixedPalette If TRUE, the heatmap color key for the mixture weights has #' categorical cutoffs with the following categories: <0.1, 0.1 - <0.2, 0.2 - <0.3, #' and >= 0.3. If false, the heatmap color key is continuous and dependent on the #' weight values. #' @param InclKey If TRUE, a horizontal heatmap legend is included at the bottom #' of the full plot. #' @param AltMixName Defaults to NULL. If not NULL, these are alternative names #' for the mixture components to be displayed on the heatmap y axis. #' @param AltOutcomeName Defaults to NULL. If not NULL, this is an alternative #' name for the outcome to be displayed on the heatmap x axis. #' @param ViridisPalette Color palette to be used for the viridisLite #' package-based coloring of the heatmap, with possible values from 'A' to 'E'. #' Defaults to 'D'. #' @param StripTextSize Text size for the plot strip labels. Defaults to 14. #' @param AxisTextSize.Y Text size for the y axis text. Defaults to 12. #' @param AxisTextSize.X Text size for the x axis text. Defaults to 12. #' @param LegendTextSize Text text size for the legend text. Defaults to 14. #' @param PvalLabelSize The geom_text size for the permutation test p-value #' label. Defaults to 5. #' @param HeatMapTextSize The geom_text size for the mixture weight heatmap #' labels. Defaults to 5. #' #' @return Returns a list with 4 objects. #' #' \item{FullPlot}{Two plots stacked vertically: (1) Forest plot of the beta WQS #' coefficient with the naive confidence intervals as well as the permutation #' test p-value (2) A heatmap of the WQS weights for each mixture component.} #' \item{CoefPlot}{Forest plot of the beta WQS #' coefficient with the naive confidence intervals as well as the permutation #' test p-value.} #' \item{WtPlot}{A heatmap of the WQS weights for each mixture component.} #' \item{WtLegend}{A legend for the weights in the WtPlot heatmap.} #' #' @importFrom rlang .data #' #' @export #' wqspt_plot <- function(wqsptresults, FixedPalette = FALSE, InclKey = FALSE, AltMixName = NULL, AltOutcomeName = NULL, ViridisPalette = "D", StripTextSize = 14, AxisTextSize.Y = 12, AxisTextSize.X = 12, LegendTextSize = 14, PvalLabelSize = 5, HeatMapTextSize = 5) { wqs_fam <- wqsptresults$family if(!is.character(wqs_fam)) wqs_fam <- wqs_fam$family thisfit <- wqsptresults$gwqs_main$fit b1pos <- wqsptresults$gwqs_main$b1_pos if (b1pos) thisdir <- "Positive" else thisdir <- "Negative" if (!is.null(AltOutcomeName)) outname <- AltOutcomeName else outname <- as.character(attr(thisfit$terms, "variables")[[2]]) if (wqs_fam == "gaussian"){ pval <- summary(thisfit)$coef["wqs", "Pr(>|t|)"] } else { pval <- summary(thisfit)$coef["wqs", "Pr(>|z|)"] } WQSResults <- data.frame( Outcome = outname, Direction = thisdir, Beta = thisfit$coef['wqs'], LCI = suppressMessages(confint(thisfit)[2, 1]), UCI = suppressMessages(confint(thisfit)[2, 2]), pval = pval, PTp = wqsptresults$perm_test$pval ) WQSResults$PTlabel <- paste0("PTp=", signif(WQSResults$PTp, 3)) WQSResults$FacetLabel <- "Coefficient" cirange <- WQSResults$UCI - WQSResults$LCI widercirange <- c(WQSResults$LCI - (WQSResults$LCI / 10), WQSResults$UCI + (WQSResults$UCI / 10)) if (widercirange[1] < 0 & widercirange[2] > 0) { gg1 <- ggplot(WQSResults, aes(x = .data$Outcome, y = .data$Beta)) + geom_point(size = 3) + theme_bw() + geom_errorbar(aes(ymin = .data$LCI, ymax = .data$UCI), size = 1, width = 0.75) + geom_hline(yintercept = 0) + geom_text(aes(label = .data$PTlabel, y = .data$UCI + cirange / 10), size = PvalLabelSize) + facet_grid(FacetLabel ~ Direction) + theme( strip.text = element_text(size = StripTextSize), axis.text.y = element_text(size = AxisTextSize.Y), axis.text.x = element_blank(), axis.title = element_blank(), axis.ticks.x = element_blank() ) } else { gg1 <- ggplot(WQSResults, aes(x = .data$Outcome, y = .data$Beta)) + geom_point(size = 3) + theme_bw() + geom_errorbar(aes(ymin = .data$LCI, ymax = .data$UCI), size = 1, width = 0.75) + geom_text(aes(label = .data$PTlabel, y = .data$UCI + cirange / 10), size = PvalLabelSize) + facet_grid(FacetLabel ~ Direction) + theme( strip.text = element_text(size = StripTextSize), axis.text.y = element_text(size = AxisTextSize.Y), axis.text.x = element_blank(), axis.title = element_blank(), axis.ticks.x = element_blank() ) } WQSwts <- wqsptresults$gwqs_main$final_weights[wqsptresults$gwqs_main$mix_name, ] WQSwts$FacetLabel <- "Weights" WQSwts$Outcome <- WQSResults$Outcome WQSwts$Direction <- WQSResults$Direction WQSwts$mix_name <- factor(as.character(WQSwts$mix_name), levels = wqsptresults$gwqs_main$mix_name) if (!is.null(AltMixName)) levels(WQSwts$mix_name) <- AltMixName WQSwts$mix_name <- factor(WQSwts$mix_name, levels = rev(levels(WQSwts$mix_name))) names(WQSwts)[1:2] <- c("Exposure", "Weight") if (FixedPalette) { mypal <- viridis::viridis_pal(option = ViridisPalette)(4) WQSwts$Wt <- WQSwts$Weight WQSwts$Weight <- factor( ifelse( WQSwts$Wt < 0.1, "<0.1", ifelse( WQSwts$Wt >= 0.1 & WQSwts$Wt < 0.2, "0.1-<0.2", ifelse( WQSwts$Wt >= 0.2 & WQSwts$Wt < 0.3, "0.2-<0.3", paste0("\u2265", "0.3") ) ) ), levels = c("<0.1", "0.1-<0.2", "0.2-<0.3", paste0("\u2265", "0.3")) ) Virclr <- ifelse( WQSwts$Weight == "<0.1", mypal[1], ifelse( WQSwts$Weight == "0.1-0.2", mypal[2], ifelse( WQSwts$Weight == "0.2-0.3", mypal[3], ifelse(is.na(WQSwts$Weight) == T, "grey50", mypal[4]) ) ) ) names(Virclr) <- as.character(WQSwts$Weight) legplot <- ggplot(data.frame(Weight = factor( levels(WQSwts$Weight), levels = levels(WQSwts$Weight) )), aes(x = 1, y = .data$Weight)) + geom_tile(aes(fill = .data$Weight)) + scale_fill_manual(values = mypal) + theme( legend.position = "bottom", legend.title = element_text(size = 14, face = "bold"), legend.text = element_text(size = 14) ) l1 <- cowplot::get_legend(legplot) gg2 <- ggplot(WQSwts, aes(x = .data$Outcome, y = .data$Exposure)) + theme_classic() + geom_tile(aes(fill = .data$Weight), alpha = 0.7) + geom_text(aes(label =round(.data$Wt, 2)), size = HeatMapTextSize) + scale_fill_manual(values = Virclr) + facet_grid(FacetLabel ~ Direction) + theme( strip.text.x = element_blank(), strip.text.y = element_text(size = StripTextSize), axis.text.x = element_text(size = AxisTextSize.X), axis.text.y = element_text(size = AxisTextSize.Y), axis.title = element_blank(), strip.background.y = element_rect(fill = "grey85", colour = "grey20"), legend.position = "bottom", legend.title = element_text(size = LegendTextSize, face = "bold"), legend.text = element_text(size = LegendTextSize) ) } else { gg2 <- ggplot(WQSwts, aes(x = .data$Outcome, y = .data$Exposure)) + theme_classic() + geom_tile(aes(fill = .data$Weight), alpha = 0.7) + geom_text(aes(label = round(.data$Weight, 2)), size = HeatMapTextSize) + scale_fill_viridis_c(option = ViridisPalette) + facet_grid(FacetLabel ~ Direction) + theme( strip.text.x = element_blank(), strip.text.y = element_text(size = StripTextSize), axis.text.x = element_text(size = AxisTextSize.X), axis.text.y = element_text(size = AxisTextSize.Y), axis.title = element_blank(), strip.background.y = element_rect(fill = "grey85", colour = "grey20"), legend.position = "bottom", legend.title = element_text(size = LegendTextSize, face = "bold"), legend.text = element_text(size = LegendTextSize), legend.key.size = unit(0.4, units = 'in') ) l1 <- cowplot::get_legend(gg2) } if (InclKey) { gg2 <- gg2 + theme(legend.position = "none") fullplot <- cowplot::plot_grid( cowplot::plot_grid( gg1, gg2, ncol = 1, align = "v", rel_heights = c(0.4, 0.6) ), l1, ncol = 1, rel_heights = c(1, 0.1) ) } else { gg2 <- gg2 + theme(legend.position = "none") fullplot <- cowplot::plot_grid( gg1, gg2, ncol = 1, rel_heights = c(0.4, 0.6), align = "v" ) } return(list( FullPlot = fullplot, CoefPlot = gg1, WtPlot = gg2, WtLegend = l1 )) }
/scratch/gouwar.j/cran-all/cranData/wqspt/R/wqspt_plot.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- warning=F, message=F---------------------------------------------------- library(gWQS) library(wqspt) ## ---- warning = F, message = F, eval=F---------------------------------------- # # simulated dataset # sim_res1 <- wqs_sim(nmix = 10, # ncovrt = 10, # nobs = 1000, # ntruewts = 10, # ntruecovrt = 5, # truewqsbeta = 0.2, # truebeta0 = 2, # truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, # 0.05, 0.05, 0.05, 0.05, 0.05), # q = 10, # seed = 16) # # sim_data1 <- sim_res1$Data # # wqs_form <- formula(paste0("y ~ wqs + ", paste(paste0("C",1:10), collapse="+"))) ## ---- warning = F, eval=F----------------------------------------------------- # # mixture names # mix_names1 <- colnames(sim_data1)[2:11] # # # create reference wqs object # wqs_main1 <- gwqs(wqs_form, mix_name = mix_names1, data = sim_data1, q = 10, validation = 0, # b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", # seed = 16) ## ---- eval=F------------------------------------------------------------------ # # run permutation test # perm_test_res1 <- wqs_pt(wqs_main1, niter = 50, boots = 5, b1_pos = T, seed = 16) ## ---- echo=F------------------------------------------------------------------ load("data/introduction-vignette.RData") ## ---- eval = F---------------------------------------------------------------- # main_sum1 <- summary(perm_test_res1$gwqs_main) ## ----------------------------------------------------------------------------- main_sum1$coefficients ## ----------------------------------------------------------------------------- perm_test_res1$perm_test$pval ## ---- fig.height = 6---------------------------------------------------------- wqspt_plot(perm_test_res1)$FullPlot ## ---- eval = F---------------------------------------------------------------- # sim_res2 <- wqs_sim(nmix = 10, # ncovrt = 10, # nobs = 1000, # ntruewts = 10, # ntruecovrt = 5, # truewqsbeta = 0, # truebeta0 = 0.1, # truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, # 0.05, 0.05, 0.05, 0.05, 0.05), # q = 10, # seed = 16) # # sim_data2 <- sim_res2$Data ## ---- eval=F------------------------------------------------------------------ # # mixture names # mix_names2 <- colnames(sim_data2)[2:11] # # # create reference wqs object # wqs_main2 <- gwqs(wqs_form, mix_name = mix_names2, data = sim_data2, q = 10, validation = 0, # b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", # seed = 16) # # # run permutation test # perm_test_res2 <- wqs_pt(wqs_main2, niter = 50, boots = 5, b1_pos = T, seed = 16) ## ---- eval = F---------------------------------------------------------------- # main_sum2 <- summary(perm_test_res2$gwqs_main) ## ----------------------------------------------------------------------------- main_sum2$coefficients ## ----------------------------------------------------------------------------- perm_test_res2$perm_test$pval ## ---- fig.height = 6---------------------------------------------------------- wqspt_plot(perm_test_res2)$FullPlot ## ---- eval = F---------------------------------------------------------------- # perm_test_res3 <- wqs_full_perm(wqs_form, # data = sim_data1, # mix_name = mix_names1, # q = 10, # b_main = 20, # b_perm = 5, # b1_pos = T, # niter = 50, # seed = 16, # plan_strategy = "multicore") ## ---- fig.height = 6---------------------------------------------------------- wqspt_plot(perm_test_res3)$FullPlot ## ---- eval=F------------------------------------------------------------------ # sim_res3 <- wqs_sim(nmix = 10, # ncovrt = 10, # nobs = 1000, # ntruewts = 10, # ntruecovrt = 5, # truewqsbeta = 0.4, # truebeta0 = -2.5, # truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, # 0.05, 0.05, 0.05, 0.05, 0.05), # q = 10, # family = "binomial", # seed = 16) # # sim_data3 <- sim_res3$Data # # perm_test_res4 <- wqs_full_perm(wqs_form, # data = sim_data3, # mix_name = mix_names1, # q = 10, # b_main = 20, # b_perm = 5, # b1_pos = T, # niter = 50, # seed = 16, # plan_strategy = "multicore", # family = "binomial") ## ---- fig.height=6------------------------------------------------------------ wqspt_plot(perm_test_res4)$FullPlot
/scratch/gouwar.j/cran-all/cranData/wqspt/inst/doc/introduction.R
--- title: "How to use the wqspt package" author: "Drew Day, James Peng" date: "5/26/2022" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use the wqspt package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Introduction Weighted quantile sum (WQS) regression is a statistical technique to evaluate the effect of complex exposure mixtures on an outcome ([Carrico 2015](https://link.springer.com/article/10.1007/s13253-014-0180-3)). It is a single-index method which estimates a combined mixture sum effect as well as weights determining each individual mixture component's contributions to the sum effect. However, the model features a statistical power and Type I error (i.e., false positive) rate tradeoff, as there is a machine learning step to determine the weights that optimize the linear model fit. If the full data is used to estimate both the mixture component weights and the regression coefficients, there is high power but also a high false positive rate since coefficient p-values are calculated for a weighted mixture independent variable with weights that have already been optimized to find a large effect. We recently proposed alternative methods based on a permutation test that should reliably allow for both high power and low false positive rate when utilizing WQS regression. The permutation test is a method of obtaining a p-value by simulating the null distribution through permutations of the data. The permutation test algorithm is described more in detail and validated in [Day et al. 2022](https://ehp.niehs.nih.gov/doi/10.1289/EHP10570). The version of this permutation test used for a continuous outcome variable has been applied in [Loftus et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412021000337), [Day et al. 2021](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9291724/), [Wallace et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412021006644), [Barrett et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022000034), and [Freije et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022001726). Another version of the permutation test adapted for WQS logistic regression with a binary outcome variable is applied in [Loftus et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022004214). ## About WQS The goal of WQS regression is to determine whether an exposure mixture is associated with an outcome in a prespecified direction. It fits the following model: $Y = \beta_0 + \beta_1(\sum_{i=1}^{m} w_i {X_q}_i) + Z'\gamma$ Where $Y$ is the outcome variable, $\beta_0$ is the intercept, $\beta_1$ is the coefficient for the weighted quantile sum, $\sum_{i=1}^{c} w_i {X_q}_i$ is the weighted index for the set of quantiled mixture exposures, $Z$ is the set of covariates, and $\gamma$ is the regression coefficients for the covariates. A full description of the WQS methodology is described in [Carrico 2015](https://link.springer.com/article/10.1007/s13253-014-0180-3). ## Permutation Test The WQS regression comprises two steps, for which we typically split the data into a training and validation set. Doing this reduces statistical power since we are training our model on only part of the data. On the other hand, if we skip this training/test split, we can get a skewed representation of uncertainty for the WQS coefficient. A permutation test method gives us a p-value for the uncertainty while also allowing us to use the full dataset for training and validation. This p-value is based on comparing a test value (e.g., coefficient or naive p-values) to iterated values, and so the minimum non-zero p-value that can be detected by the permutation test would be 1 divided by the number of permutation test iterations. For example, if we run 200 iterations, we’d be able to define a p-value of as low as 1/200 = 0.005, and any lower p-value would appear as zero and be interpreted as <0.005. ### Continuous outcome algorithm (linear regression) 1. Run WQS regression without splitting the data, obtaining a WQS coefficient estimate. 2. Regress the outcome on all covariates but not the WQS variable. Then obtain the predicted outcome values and their residuals from this regression. 3. Randomly permute the residual values and add them to the predicted outcome values to get a new outcome variable $y*$. 4. Run a WQS regression without splitting the data in which $y*$ replaces the vector of observed outcome variables, obtaining an estimate for the WQS coefficient $\beta_1^*$. 5. Repeat steps 3 and 4. 6. Calculate the p-value by taking the proportion of $\beta_1^*$ values greater than the WQS coefficient estimate obtained in Step 1. ### Binary or Count outcome algorithms (Generalized linear models (GLMs)) 1. Regress each of the $m$ mixture components on all covariates $Z$ and obtain a $n$ observations x $m$ matrix with columns being the residuals from each of the $m$ models ($R_{m|Z}$). 2. Obtain the initial fit ($fit1$) by running a “non-split” WQS logistic regression (or other WQS GLM) in which the binary (or count) outcome variable $Y$ is regressed on the WQS vector and the covariates, and the mixture matrix used to calculate the WQS vector is the matrix of residuals from Step 1, $R_{m|Z}$. 3. Obtain the reduced fit ($fit2$) by running a logistic regression (or other GLM) regressing $Y$ on $Z$. 4. Calculate the test p-value ($p_t$) as $1-pchisq(d(fit1)-d(fit2),1)$ where d is the deviance for a given model and $pchisq(x,1)$ is the probability density function of the chi-square distribution in which the input $x$ is the difference between the deviances of $fit1$ and $fit2$ and there is 1 degree of freedom. 5. Permute the rows of the $R_{m|Z}$ residual matrix from Step 1 and repeat Step 2 to get a series of null fit1 models ($fit1^*$) for K iterations. Obtain a distribution of permuted p-values ($p^*$) using the following formula: $p^*=1-pchisq(fit1^*)-d(fit2),1$). 6. Obtain the number of permuted $p^*$ less than or equal to the test $p_t$ from Step 4 and divide that by the number of iterations K to calculate the permutation test p-value. Note that the above algorithm has been validated in WQS logistic regressions but not yet for other forms of WQS GLMs (e.g., WQS Poisson regression). However, since deviances can also be derived from those models, the algorithm should work for those other WQS GLMs as well. # How to use the `wqspt` package The `wqspt` package builds from the `gWQS` package. The two main functions of the `wqspt` package are `wqs_pt` and `wqs_full_perm`. ## `wqs_pt` ### Arguments `wqs_pt` uses a `gwqs` object (from the `gWQS` [package](https://CRAN.R-project.org/package=gWQS)) as an input. To use `wqs_pt`, we first need to run an initial *permutation test reference WQS regression* run while setting `validation = 0`. Note that permutation test can currently take in `gwqs` inputs with the following families: `family = gaussian(link = "identity")`, `family = binomial()` with any accepted link function (e.g., "logit" or "probit"), `family = poisson(link = "log")`, `family = quasipoisson(link = "log")`, and `family = "negbin"` for negative binomial. It is not currently able to accommodate multinomial WQS regression, stratified weights, or WQS interaction terms. We will use this `gwqs` object as the `model` argument for the `wqs_pt` function and set the following additional parameters: * `boots`: Number of bootstraps for the WQS regression run in each permutation test iteration. Note that we may elect a bootstrap count `boots` lower than that specified in the `model` object for the sake of efficiency. If we do, `wqs_pt` will run the iterated WQS regressions for the permutation test with the number of bootstraps defined in `boots`. If `boots` is not specified, then the function will use the same bootstrap count in the permutation test iterated WQS regressions as that specified in the main WQS regression. * `niter`: Number of permutation test iterations. * `b1_pos`: A logical value that indicates whether beta values should be positive or negative. * `rs`: A logical value indicating whether the random subset implementation for WQS should be performed ([Curtin 2019](https://www.tandfonline.com/doi/abs/10.1080/03610918.2019.1577971?journalCode=lssp20)) * `plan_strategy`: Evaluation strategy for the plan function ("sequential", "transparent", "multisession", "multicore", "multiprocess", "cluster", or "remote"). See the documentation for the future::plan function for full details. * `seed`: Random seed for the permutation test WQS reference run The arguments `b1_pos` and `rs` should be consistent with the inputs chosen in the `model` object. The `seed` should ideally be consistent with the seed set in the `model` object, though this is not required. ### Outputs The permutation test returns an object of class `wqs_pt`, which contains three sublists: * **perm_test** * **pval**: permutation test p-value * *Linear WQS regression only* * **testbeta**: reference WQS coefficient $\beta_1$ value * **betas**: a vector of $\beta_1$ values from each iteration of the permutation test * *WQS GLM only* * **testpval**: test reference p-value * **permpvals**: p-values from each iteration of the permutation test * **gwqs_main**: main gWQS object (same as `model` input) * **gwqs_perm**: permutation test reference gWQS object (NULL if model `family != "gaussian"` or if same number of bootstraps are used in permutation test WQS regression runs as in the main run.) ### Plotting method The `wqs_pt` class has a `wqspt_plot` method to help visualize and summarize WQS permutation test results. Plots include (1) a forest plot of the beta WQS coefficient with the naive confidence intervals as well as the permutation test p-value and (2) a heatmap of the WQS weights for each mixture component. ## `wqs_full_perm` The second function `wqs_full_perm` is a full wrapper which implements the initial WQS regression run using gWQS::gwqs and the permutation test in one function call. To use `wqs_full_perm`, you must specify the same required arguments as needed in the `gwqs` call. This function can run WQS regressions and the permutation test for the following families: `family = gaussian(link = "identity")`, `family = binomial()` with any accepted link function (e.g., "logit" or "probit"), `family = poisson(link = "log")`, `family = quasipoisson(link = "log")`, and `family = "negbin"` for negative binomial. `wqs_full_perm `is not currently able to accommodate multinomial WQS regression, stratified weights, or WQS interaction terms. For the bootstrap count `b` argument, you must specify `b_main`,the number of bootstraps for the *main WQS regression* run, as well as `b_perm`, the number of bootstraps for the *permutation test reference WQS regression* run (linear WQS regression only) and each WQS regression iteration of the permutation test. As with before, you can choose to set `b_main` $>$ `b_perm` for the sake of efficiency. Finally, you should indicate the number of desired permutation test runs `niter`. Since the WQS permutation test can be computationally intensive, you can specify `stop_if_nonsig = TRUE` if you do not wish for the permutation test to proceed if the naive main WQS regression run produces an nonsignificant result (if the p-value is below the `stop_thresh` argument, for which the default is 0.05). See *Recommendations for Use* section below. The `wqs_full_perm` returns an object of class `wqs_pt`, with outputs described above. ## Recommendations for Use Larger bootstrap counts and numbers of iterations lead to better estimates, though it is unclear how many iterations or bootstraps are needed for a stable estimate. We generally recommend using 1000 bootstraps on the main WQS regression and then performing 200 iterations of 200-boostrap WQS regressions for the permutation test. However, this takes a substantial amount of computational time, and one could also get relatively stable p-values for instance for 100 iterations of 100-boostrap WQS regressions for the permutation test. We recommend that users only apply the permutation test in cases where the naive WQS test approaches significance or near-significance. If the naive test produces a non-significant result, then there likely is no reason to run the permutation test, as it will produce a result which is more conservative than the naive method (i.e., it will have a larger p-value). This is the strategy that we have applied in our published papers ([Loftus et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412021000337) and [Day et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412020322856)). # Examples ## Example 1 (using `wqs_pt`) This is an example where the WQS permutation test confirms a significant naive result. We first load both the wqspt and gWQS packages. ```{r, warning=F, message=F} library(gWQS) library(wqspt) ``` Then we produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0.2 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, warning = F, message = F, eval=F} # simulated dataset sim_res1 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0.2, truebeta0 = 2, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, seed = 16) sim_data1 <- sim_res1$Data wqs_form <- formula(paste0("y ~ wqs + ", paste(paste0("C",1:10), collapse="+"))) ``` Now we run WQS regression on the simulated data. ```{r, warning = F, eval=F} # mixture names mix_names1 <- colnames(sim_data1)[2:11] # create reference wqs object wqs_main1 <- gwqs(wqs_form, mix_name = mix_names1, data = sim_data1, q = 10, validation = 0, b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", seed = 16) ``` Finally, we can perform a permutation test on the WQS object. ```{r, eval=F} # run permutation test perm_test_res1 <- wqs_pt(wqs_main1, niter = 50, boots = 5, b1_pos = T, seed = 16) ``` Note that the naive WQS regression produces a significant result for the WQS coefficient (p-value < 0.001). ```{r, echo=F} load("data/introduction-vignette.RData") ``` ```{r, eval = F} main_sum1 <- summary(perm_test_res1$gwqs_main) ``` ```{r} main_sum1$coefficients ``` The permutation test confirms the significance of this result. ```{r} perm_test_res1$perm_test$pval ``` Here are the summary plots: ```{r, fig.height = 6} wqspt_plot(perm_test_res1)$FullPlot ``` ## Example 2 (using `wqs_pt`) This is an example where the WQS permutation test goes against a (false positive) significant naive result. We produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, eval = F} sim_res2 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0, truebeta0 = 0.1, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, seed = 16) sim_data2 <- sim_res2$Data ``` Now we run WQS regression as well as the permutation test on the simulated data. ```{r, eval=F} # mixture names mix_names2 <- colnames(sim_data2)[2:11] # create reference wqs object wqs_main2 <- gwqs(wqs_form, mix_name = mix_names2, data = sim_data2, q = 10, validation = 0, b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", seed = 16) # run permutation test perm_test_res2 <- wqs_pt(wqs_main2, niter = 50, boots = 5, b1_pos = T, seed = 16) ``` Note that the naive WQS regression produces a significant result for the WQS coefficient (p-value = `r round(main_sum2$coefficients[2,4],3)`). ```{r, eval = F} main_sum2 <- summary(perm_test_res2$gwqs_main) ``` ```{r} main_sum2$coefficients ``` The permutation test, however, repudiates the significance of these plots (p = `r round(perm_test_res2$perm_test$pval, 2)`). ```{r} perm_test_res2$perm_test$pval ``` Here are the summary plots: ```{r, fig.height = 6} wqspt_plot(perm_test_res2)$FullPlot ``` ## Example 3 (using `wqs_full_perm`) Using the same data as in Example 1, we run the WQS regression with permutation test using the full wrapper `wqs_full_perm` call. ```{r, eval = F} perm_test_res3 <- wqs_full_perm(wqs_form, data = sim_data1, mix_name = mix_names1, q = 10, b_main = 20, b_perm = 5, b1_pos = T, niter = 50, seed = 16, plan_strategy = "multicore") ``` ```{r, fig.height = 6} wqspt_plot(perm_test_res3)$FullPlot ``` ## Example 4 (using `wqs_full_perm` on binary outcome example) This is an example in which we apply the logistic regression version of the WQS permutation test. We produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0.4 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, eval=F} sim_res3 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0.4, truebeta0 = -2.5, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, family = "binomial", seed = 16) sim_data3 <- sim_res3$Data perm_test_res4 <- wqs_full_perm(wqs_form, data = sim_data3, mix_name = mix_names1, q = 10, b_main = 20, b_perm = 5, b1_pos = T, niter = 50, seed = 16, plan_strategy = "multicore", family = "binomial") ``` ```{r, fig.height=6} wqspt_plot(perm_test_res4)$FullPlot ``` # References * Barrett, E. S., Corsetti, M., Day, D., Thurston, S. W., Loftus, C. T., Karr, C. J., ... & Sathyanarayana, S. (2022). Prenatal phthalate exposure in relation to placental corticotropin releasing hormone (pCRH) in the CANDLE cohort. Environment International, 160, 107078. * Carrico, C., Gennings, C., Wheeler, D. C., & Factor-Litvak, P. (2015). Characterization of weighted quantile sum regression for highly correlated data in a risk analysis setting. Journal of Agricultural, Biological, and Environmental Statistics, 20(1), 100-120. * Curtin, P., Kellogg, J., Cech, N., & Gennings, C. (2019). A random subset implementation of weighted quantile sum (WQSRS) regression for analysis of high-dimensional mixtures. Communications in Statistics-Simulation and Computation, 50(4), 1119-1134. * Day, D. B., Collett, B. R., Barrett, E. S., Bush, N. R., Swan, S. H., Nguyen, R. H., ... & Sathyanarayana, S. (2021). Phthalate mixtures in pregnancy, autistic traits, and adverse childhood behavioral outcomes. Environment International, 147, 106330. * Day, D. B., Sathyanarayana, S., LeWinn, K. Z., Karr, C. J., Mason, W. A., & Szpiro, A. A. (2022). A permutation test-based approach to strengthening inference on the effects of environmental mixtures: comparison between single index analytic methods. Environmental Health Perspectives, 130(8). * Freije, S. L., Enquobahrie, D. A., Day, D. B., Loftus, C., Szpiro, A. A., Karr, C. J., ... & Sathyanarayana, S. (2022). Prenatal exposure to polycyclic aromatic hydrocarbons and gestational age at birth. Environment International, 164, 107246. * Loftus, C. T., Bush, N. R., Day, D. B., Ni, Y., Tylavsky, F. A., Karr, C. J., ... & LeWinn, K. Z. (2021). Exposure to prenatal phthalate mixtures and neurodevelopment in the Conditions Affecting Neurocognitive Development and Learning in Early childhood (CANDLE) study. Environment International, 150, 106409. * Loftus, C., Szpiro, A. A., Workman, T., Wallace, E. R., Hazlehurst, M. F., Day, D. B., ... & Karr, C. J. (2022). Maternal exposure to urinary polycyclic aromatic hydrocarbons (PAH) in pregnancy and childhood asthma in a pooled multi-cohort study. Environment International, 170, p.107494. * Renzetti, S., Curtin, P., Just, A. C., Bello, G., & Gennings, C. (2021). ‘gWQS: Generalized Weighted Quantile Sum Regression’. https://CRAN.R-project.org/package=gWQS. * Wallace, E. R., Ni, Y., Loftus, C. T., Sullivan, A., Masterson, E., Szpiro, A., ... & Sathyanarayana, S. (2022). Prenatal urinary metabolites of polycyclic aromatic hydrocarbons and toddler cognition, language, and behavior. Environment International, 159, 107039.
/scratch/gouwar.j/cran-all/cranData/wqspt/inst/doc/introduction.Rmd
--- title: "How to use the wqspt package" author: "Drew Day, James Peng" date: "5/26/2022" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use the wqspt package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Introduction Weighted quantile sum (WQS) regression is a statistical technique to evaluate the effect of complex exposure mixtures on an outcome ([Carrico 2015](https://link.springer.com/article/10.1007/s13253-014-0180-3)). It is a single-index method which estimates a combined mixture sum effect as well as weights determining each individual mixture component's contributions to the sum effect. However, the model features a statistical power and Type I error (i.e., false positive) rate tradeoff, as there is a machine learning step to determine the weights that optimize the linear model fit. If the full data is used to estimate both the mixture component weights and the regression coefficients, there is high power but also a high false positive rate since coefficient p-values are calculated for a weighted mixture independent variable with weights that have already been optimized to find a large effect. We recently proposed alternative methods based on a permutation test that should reliably allow for both high power and low false positive rate when utilizing WQS regression. The permutation test is a method of obtaining a p-value by simulating the null distribution through permutations of the data. The permutation test algorithm is described more in detail and validated in [Day et al. 2022](https://ehp.niehs.nih.gov/doi/10.1289/EHP10570). The version of this permutation test used for a continuous outcome variable has been applied in [Loftus et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412021000337), [Day et al. 2021](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9291724/), [Wallace et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412021006644), [Barrett et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022000034), and [Freije et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022001726). Another version of the permutation test adapted for WQS logistic regression with a binary outcome variable is applied in [Loftus et al. 2022](https://www.sciencedirect.com/science/article/pii/S0160412022004214). ## About WQS The goal of WQS regression is to determine whether an exposure mixture is associated with an outcome in a prespecified direction. It fits the following model: $Y = \beta_0 + \beta_1(\sum_{i=1}^{m} w_i {X_q}_i) + Z'\gamma$ Where $Y$ is the outcome variable, $\beta_0$ is the intercept, $\beta_1$ is the coefficient for the weighted quantile sum, $\sum_{i=1}^{c} w_i {X_q}_i$ is the weighted index for the set of quantiled mixture exposures, $Z$ is the set of covariates, and $\gamma$ is the regression coefficients for the covariates. A full description of the WQS methodology is described in [Carrico 2015](https://link.springer.com/article/10.1007/s13253-014-0180-3). ## Permutation Test The WQS regression comprises two steps, for which we typically split the data into a training and validation set. Doing this reduces statistical power since we are training our model on only part of the data. On the other hand, if we skip this training/test split, we can get a skewed representation of uncertainty for the WQS coefficient. A permutation test method gives us a p-value for the uncertainty while also allowing us to use the full dataset for training and validation. This p-value is based on comparing a test value (e.g., coefficient or naive p-values) to iterated values, and so the minimum non-zero p-value that can be detected by the permutation test would be 1 divided by the number of permutation test iterations. For example, if we run 200 iterations, we’d be able to define a p-value of as low as 1/200 = 0.005, and any lower p-value would appear as zero and be interpreted as <0.005. ### Continuous outcome algorithm (linear regression) 1. Run WQS regression without splitting the data, obtaining a WQS coefficient estimate. 2. Regress the outcome on all covariates but not the WQS variable. Then obtain the predicted outcome values and their residuals from this regression. 3. Randomly permute the residual values and add them to the predicted outcome values to get a new outcome variable $y*$. 4. Run a WQS regression without splitting the data in which $y*$ replaces the vector of observed outcome variables, obtaining an estimate for the WQS coefficient $\beta_1^*$. 5. Repeat steps 3 and 4. 6. Calculate the p-value by taking the proportion of $\beta_1^*$ values greater than the WQS coefficient estimate obtained in Step 1. ### Binary or Count outcome algorithms (Generalized linear models (GLMs)) 1. Regress each of the $m$ mixture components on all covariates $Z$ and obtain a $n$ observations x $m$ matrix with columns being the residuals from each of the $m$ models ($R_{m|Z}$). 2. Obtain the initial fit ($fit1$) by running a “non-split” WQS logistic regression (or other WQS GLM) in which the binary (or count) outcome variable $Y$ is regressed on the WQS vector and the covariates, and the mixture matrix used to calculate the WQS vector is the matrix of residuals from Step 1, $R_{m|Z}$. 3. Obtain the reduced fit ($fit2$) by running a logistic regression (or other GLM) regressing $Y$ on $Z$. 4. Calculate the test p-value ($p_t$) as $1-pchisq(d(fit1)-d(fit2),1)$ where d is the deviance for a given model and $pchisq(x,1)$ is the probability density function of the chi-square distribution in which the input $x$ is the difference between the deviances of $fit1$ and $fit2$ and there is 1 degree of freedom. 5. Permute the rows of the $R_{m|Z}$ residual matrix from Step 1 and repeat Step 2 to get a series of null fit1 models ($fit1^*$) for K iterations. Obtain a distribution of permuted p-values ($p^*$) using the following formula: $p^*=1-pchisq(fit1^*)-d(fit2),1$). 6. Obtain the number of permuted $p^*$ less than or equal to the test $p_t$ from Step 4 and divide that by the number of iterations K to calculate the permutation test p-value. Note that the above algorithm has been validated in WQS logistic regressions but not yet for other forms of WQS GLMs (e.g., WQS Poisson regression). However, since deviances can also be derived from those models, the algorithm should work for those other WQS GLMs as well. # How to use the `wqspt` package The `wqspt` package builds from the `gWQS` package. The two main functions of the `wqspt` package are `wqs_pt` and `wqs_full_perm`. ## `wqs_pt` ### Arguments `wqs_pt` uses a `gwqs` object (from the `gWQS` [package](https://CRAN.R-project.org/package=gWQS)) as an input. To use `wqs_pt`, we first need to run an initial *permutation test reference WQS regression* run while setting `validation = 0`. Note that permutation test can currently take in `gwqs` inputs with the following families: `family = gaussian(link = "identity")`, `family = binomial()` with any accepted link function (e.g., "logit" or "probit"), `family = poisson(link = "log")`, `family = quasipoisson(link = "log")`, and `family = "negbin"` for negative binomial. It is not currently able to accommodate multinomial WQS regression, stratified weights, or WQS interaction terms. We will use this `gwqs` object as the `model` argument for the `wqs_pt` function and set the following additional parameters: * `boots`: Number of bootstraps for the WQS regression run in each permutation test iteration. Note that we may elect a bootstrap count `boots` lower than that specified in the `model` object for the sake of efficiency. If we do, `wqs_pt` will run the iterated WQS regressions for the permutation test with the number of bootstraps defined in `boots`. If `boots` is not specified, then the function will use the same bootstrap count in the permutation test iterated WQS regressions as that specified in the main WQS regression. * `niter`: Number of permutation test iterations. * `b1_pos`: A logical value that indicates whether beta values should be positive or negative. * `rs`: A logical value indicating whether the random subset implementation for WQS should be performed ([Curtin 2019](https://www.tandfonline.com/doi/abs/10.1080/03610918.2019.1577971?journalCode=lssp20)) * `plan_strategy`: Evaluation strategy for the plan function ("sequential", "transparent", "multisession", "multicore", "multiprocess", "cluster", or "remote"). See the documentation for the future::plan function for full details. * `seed`: Random seed for the permutation test WQS reference run The arguments `b1_pos` and `rs` should be consistent with the inputs chosen in the `model` object. The `seed` should ideally be consistent with the seed set in the `model` object, though this is not required. ### Outputs The permutation test returns an object of class `wqs_pt`, which contains three sublists: * **perm_test** * **pval**: permutation test p-value * *Linear WQS regression only* * **testbeta**: reference WQS coefficient $\beta_1$ value * **betas**: a vector of $\beta_1$ values from each iteration of the permutation test * *WQS GLM only* * **testpval**: test reference p-value * **permpvals**: p-values from each iteration of the permutation test * **gwqs_main**: main gWQS object (same as `model` input) * **gwqs_perm**: permutation test reference gWQS object (NULL if model `family != "gaussian"` or if same number of bootstraps are used in permutation test WQS regression runs as in the main run.) ### Plotting method The `wqs_pt` class has a `wqspt_plot` method to help visualize and summarize WQS permutation test results. Plots include (1) a forest plot of the beta WQS coefficient with the naive confidence intervals as well as the permutation test p-value and (2) a heatmap of the WQS weights for each mixture component. ## `wqs_full_perm` The second function `wqs_full_perm` is a full wrapper which implements the initial WQS regression run using gWQS::gwqs and the permutation test in one function call. To use `wqs_full_perm`, you must specify the same required arguments as needed in the `gwqs` call. This function can run WQS regressions and the permutation test for the following families: `family = gaussian(link = "identity")`, `family = binomial()` with any accepted link function (e.g., "logit" or "probit"), `family = poisson(link = "log")`, `family = quasipoisson(link = "log")`, and `family = "negbin"` for negative binomial. `wqs_full_perm `is not currently able to accommodate multinomial WQS regression, stratified weights, or WQS interaction terms. For the bootstrap count `b` argument, you must specify `b_main`,the number of bootstraps for the *main WQS regression* run, as well as `b_perm`, the number of bootstraps for the *permutation test reference WQS regression* run (linear WQS regression only) and each WQS regression iteration of the permutation test. As with before, you can choose to set `b_main` $>$ `b_perm` for the sake of efficiency. Finally, you should indicate the number of desired permutation test runs `niter`. Since the WQS permutation test can be computationally intensive, you can specify `stop_if_nonsig = TRUE` if you do not wish for the permutation test to proceed if the naive main WQS regression run produces an nonsignificant result (if the p-value is below the `stop_thresh` argument, for which the default is 0.05). See *Recommendations for Use* section below. The `wqs_full_perm` returns an object of class `wqs_pt`, with outputs described above. ## Recommendations for Use Larger bootstrap counts and numbers of iterations lead to better estimates, though it is unclear how many iterations or bootstraps are needed for a stable estimate. We generally recommend using 1000 bootstraps on the main WQS regression and then performing 200 iterations of 200-boostrap WQS regressions for the permutation test. However, this takes a substantial amount of computational time, and one could also get relatively stable p-values for instance for 100 iterations of 100-boostrap WQS regressions for the permutation test. We recommend that users only apply the permutation test in cases where the naive WQS test approaches significance or near-significance. If the naive test produces a non-significant result, then there likely is no reason to run the permutation test, as it will produce a result which is more conservative than the naive method (i.e., it will have a larger p-value). This is the strategy that we have applied in our published papers ([Loftus et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412021000337) and [Day et al. 2021](https://www.sciencedirect.com/science/article/pii/S0160412020322856)). # Examples ## Example 1 (using `wqs_pt`) This is an example where the WQS permutation test confirms a significant naive result. We first load both the wqspt and gWQS packages. ```{r, warning=F, message=F} library(gWQS) library(wqspt) ``` Then we produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0.2 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, warning = F, message = F, eval=F} # simulated dataset sim_res1 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0.2, truebeta0 = 2, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, seed = 16) sim_data1 <- sim_res1$Data wqs_form <- formula(paste0("y ~ wqs + ", paste(paste0("C",1:10), collapse="+"))) ``` Now we run WQS regression on the simulated data. ```{r, warning = F, eval=F} # mixture names mix_names1 <- colnames(sim_data1)[2:11] # create reference wqs object wqs_main1 <- gwqs(wqs_form, mix_name = mix_names1, data = sim_data1, q = 10, validation = 0, b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", seed = 16) ``` Finally, we can perform a permutation test on the WQS object. ```{r, eval=F} # run permutation test perm_test_res1 <- wqs_pt(wqs_main1, niter = 50, boots = 5, b1_pos = T, seed = 16) ``` Note that the naive WQS regression produces a significant result for the WQS coefficient (p-value < 0.001). ```{r, echo=F} load("data/introduction-vignette.RData") ``` ```{r, eval = F} main_sum1 <- summary(perm_test_res1$gwqs_main) ``` ```{r} main_sum1$coefficients ``` The permutation test confirms the significance of this result. ```{r} perm_test_res1$perm_test$pval ``` Here are the summary plots: ```{r, fig.height = 6} wqspt_plot(perm_test_res1)$FullPlot ``` ## Example 2 (using `wqs_pt`) This is an example where the WQS permutation test goes against a (false positive) significant naive result. We produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, eval = F} sim_res2 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0, truebeta0 = 0.1, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, seed = 16) sim_data2 <- sim_res2$Data ``` Now we run WQS regression as well as the permutation test on the simulated data. ```{r, eval=F} # mixture names mix_names2 <- colnames(sim_data2)[2:11] # create reference wqs object wqs_main2 <- gwqs(wqs_form, mix_name = mix_names2, data = sim_data2, q = 10, validation = 0, b = 20, b1_pos = T, plan_strategy = "multicore", family = "gaussian", seed = 16) # run permutation test perm_test_res2 <- wqs_pt(wqs_main2, niter = 50, boots = 5, b1_pos = T, seed = 16) ``` Note that the naive WQS regression produces a significant result for the WQS coefficient (p-value = `r round(main_sum2$coefficients[2,4],3)`). ```{r, eval = F} main_sum2 <- summary(perm_test_res2$gwqs_main) ``` ```{r} main_sum2$coefficients ``` The permutation test, however, repudiates the significance of these plots (p = `r round(perm_test_res2$perm_test$pval, 2)`). ```{r} perm_test_res2$perm_test$pval ``` Here are the summary plots: ```{r, fig.height = 6} wqspt_plot(perm_test_res2)$FullPlot ``` ## Example 3 (using `wqs_full_perm`) Using the same data as in Example 1, we run the WQS regression with permutation test using the full wrapper `wqs_full_perm` call. ```{r, eval = F} perm_test_res3 <- wqs_full_perm(wqs_form, data = sim_data1, mix_name = mix_names1, q = 10, b_main = 20, b_perm = 5, b1_pos = T, niter = 50, seed = 16, plan_strategy = "multicore") ``` ```{r, fig.height = 6} wqspt_plot(perm_test_res3)$FullPlot ``` ## Example 4 (using `wqs_full_perm` on binary outcome example) This is an example in which we apply the logistic regression version of the WQS permutation test. We produce a simulated dataset with the following parameters: * WQS coefficient $\beta_1$: 0.4 * Mixture weights: 0.15 for first 5 components, 0.05 for remaining 5 components ```{r, eval=F} sim_res3 <- wqs_sim(nmix = 10, ncovrt = 10, nobs = 1000, ntruewts = 10, ntruecovrt = 5, truewqsbeta = 0.4, truebeta0 = -2.5, truewts = c(0.15, 0.15, 0.15, 0.15, 0.15, 0.05, 0.05, 0.05, 0.05, 0.05), q = 10, family = "binomial", seed = 16) sim_data3 <- sim_res3$Data perm_test_res4 <- wqs_full_perm(wqs_form, data = sim_data3, mix_name = mix_names1, q = 10, b_main = 20, b_perm = 5, b1_pos = T, niter = 50, seed = 16, plan_strategy = "multicore", family = "binomial") ``` ```{r, fig.height=6} wqspt_plot(perm_test_res4)$FullPlot ``` # References * Barrett, E. S., Corsetti, M., Day, D., Thurston, S. W., Loftus, C. T., Karr, C. J., ... & Sathyanarayana, S. (2022). Prenatal phthalate exposure in relation to placental corticotropin releasing hormone (pCRH) in the CANDLE cohort. Environment International, 160, 107078. * Carrico, C., Gennings, C., Wheeler, D. C., & Factor-Litvak, P. (2015). Characterization of weighted quantile sum regression for highly correlated data in a risk analysis setting. Journal of Agricultural, Biological, and Environmental Statistics, 20(1), 100-120. * Curtin, P., Kellogg, J., Cech, N., & Gennings, C. (2019). A random subset implementation of weighted quantile sum (WQSRS) regression for analysis of high-dimensional mixtures. Communications in Statistics-Simulation and Computation, 50(4), 1119-1134. * Day, D. B., Collett, B. R., Barrett, E. S., Bush, N. R., Swan, S. H., Nguyen, R. H., ... & Sathyanarayana, S. (2021). Phthalate mixtures in pregnancy, autistic traits, and adverse childhood behavioral outcomes. Environment International, 147, 106330. * Day, D. B., Sathyanarayana, S., LeWinn, K. Z., Karr, C. J., Mason, W. A., & Szpiro, A. A. (2022). A permutation test-based approach to strengthening inference on the effects of environmental mixtures: comparison between single index analytic methods. Environmental Health Perspectives, 130(8). * Freije, S. L., Enquobahrie, D. A., Day, D. B., Loftus, C., Szpiro, A. A., Karr, C. J., ... & Sathyanarayana, S. (2022). Prenatal exposure to polycyclic aromatic hydrocarbons and gestational age at birth. Environment International, 164, 107246. * Loftus, C. T., Bush, N. R., Day, D. B., Ni, Y., Tylavsky, F. A., Karr, C. J., ... & LeWinn, K. Z. (2021). Exposure to prenatal phthalate mixtures and neurodevelopment in the Conditions Affecting Neurocognitive Development and Learning in Early childhood (CANDLE) study. Environment International, 150, 106409. * Loftus, C., Szpiro, A. A., Workman, T., Wallace, E. R., Hazlehurst, M. F., Day, D. B., ... & Karr, C. J. (2022). Maternal exposure to urinary polycyclic aromatic hydrocarbons (PAH) in pregnancy and childhood asthma in a pooled multi-cohort study. Environment International, 170, p.107494. * Renzetti, S., Curtin, P., Just, A. C., Bello, G., & Gennings, C. (2021). ‘gWQS: Generalized Weighted Quantile Sum Regression’. https://CRAN.R-project.org/package=gWQS. * Wallace, E. R., Ni, Y., Loftus, C. T., Sullivan, A., Masterson, E., Szpiro, A., ... & Sathyanarayana, S. (2022). Prenatal urinary metabolites of polycyclic aromatic hydrocarbons and toddler cognition, language, and behavior. Environment International, 159, 107039.
/scratch/gouwar.j/cran-all/cranData/wqspt/vignettes/introduction.Rmd
#' Extract period (seasonal) averages from fitted GAM #' #' Extract period (seasonal) averages from fitted GAM #' #' @param mod input model object as returned by \code{\link{anlz_gam}} #' @param doystr numeric indicating start Julian day for extracting averages #' @param doyend numeric indicating ending Julian day for extracting averages #' #' @return A data frame of period averages #' @export #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') %>% #' filter(yr > 2015) #' #' mod <- anlz_gam(tomod, trans = 'log10') #' anlz_avgseason(mod, doystr = 90, doyend = 180) anlz_avgseason <- function(mod, doystr = 1, doyend = 364) { # transformation trans <- mod$trans # number of days in seasonal window numDays <- doyend - doystr + 1 # prediction matrix fillData <- anlz_prdmatrix(mod, doystr = doystr, doyend = doyend, avemat = TRUE) # yr vector yr <- fillData %>% dplyr::pull(yr) %>% unique ## See Examples section of help(predict.gam) Xp <- predict(mod, newdata = fillData, type = "lpmatrix") coefs <- coef(mod) A <- kronecker(diag(length(yr)), matrix(rep(1/numDays, numDays), nrow = 1)) Xs <- A %*% Xp means <- as.numeric(Xs %*% coefs) ses <- sqrt(diag(Xs %*% mod$Vp %*% t(Xs))) avgs <- data.frame(met = means, se = ses, yr = yr) # backtransform, add lwr/upr confidence intervals dispersion <- summary(mod)$dispersion if(trans == 'log10'){ avgs$bt_lwr <- 10^((avgs$met - 1.96 * avgs$se) + log(10) * dispersion / 2) avgs$bt_upr <- 10^((avgs$met + 1.96 * avgs$se) + log(10) * dispersion / 2) avgs$bt_met <- 10^(avgs$met + log(10) * dispersion / 2) } if(trans == 'ident'){ avgs$bt_met <- avgs$met avgs$bt_lwr <- avgs$met - 1.96 * avgs$se avgs$bt_upr <- avgs$met + 1.96 * avgs$se } out <- avgs %>% tibble::tibble() %>% dplyr::mutate(dispersion = dispersion) %>% dplyr::select(yr, met, se, bt_lwr, bt_upr, bt_met, dispersion) return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_avgseason.R
#' Back-transform response variable #' #' Back-transform response variable after fitting GAM #' #' @param dat input data with \code{trans} argument #' #' @return \code{dat} with the \code{value} column back-transformed using info from the \code{trans} column #' @export #' #' @details \code{dat} can be output from \code{\link{anlz_trans}} or \code{\link{anlz_prd}} #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') #' dat <- anlz_trans(tomod, trans = 'log10') #' backtrans <- anlz_backtrans(dat) #' head(backtrans) #' #' mod <- anlz_gam(tomod, trans = 'log10') #' dat <- anlz_prd(mod) #' backtrans <- anlz_backtrans(dat) #' head(backtrans) anlz_backtrans <- function(dat){ if(!'trans' %in% names(dat)) stop('trans info not found in dat') trans <- unique(dat$trans) # log if(trans == 'log10') dat <- dat %>% dplyr::mutate_if(grepl('value', names(.)), ~10 ^ .) return(dat) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_backtrans.R
#' Return summary statistics for GAM fits #' #' Return summary statistics for GAM fits #' #' @param mod input model object as returned by \code{\link{anlz_gam}} #' #' @return A \code{data.frame} with summary statistics for GAM fits #' @export #' #' @details Results show the overall summary of the model as Akaike Information Criterion (\code{AIC}), the generalized cross-validation score (\code{GCV}), and the \code{R2} values. Lower values for \code{AIC} and \code{GCV} and higher values for \code{R2} indicate improved model fit. #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') #' #' mod <- anlz_gam(tomod, trans = 'log10') #' anlz_fit(mod) anlz_fit <- function(mod) { out <- data.frame( AIC = AIC(mod), GCV = mod$gcv.ubre, R2 = summary(mod)$r.sq ) return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_fit.R
#' Fit a generalized additive model to a water quality time series #' #' Fit a generalized additive model to a water quality time series #' #' @param moddat input raw data, one station and paramater #' @param kts optional numeric vector for the upper limit for the number of knots in the term \code{s(cont_year)}, see details #' @param ... additional arguments passed to other methods, i.e., \code{trans = 'log10'} (default) or \code{trans = 'ident'} passed to \code{\link{anlz_trans}} #' #' @details #' #' The model structure is as follows: #' #'\describe{ #' \item{model S:}{chl ~ s(cont_year, k = large)} #'} #' The \code{cont_year} vector is measured as a continuous numeric variable for the annual effect (e.g., January 1st, 2000 is 2000.0, July 1st, 2000 is 2000.5, etc.) and \code{doy} is the day of year as a numeric value from 1 to 366. The function \code{\link[mgcv]{s}} models \code{cont_year} as a smoothed, non-linear variable. The optimal amount of smoothing on \code{cont_year} is determined by cross-validation as implemented in the mgcv package and an upper theoretical upper limit on the number of knots for \code{k} should be large enough to allow sufficient flexibility in the smoothing term. The upper limit of \code{k} was chosen as 12 times the number of years for the input data. If insufficient data are available to fit a model with the specified \code{k}, the number of knots is decreased until the data can be modelled, e.g., 11 times the number of years, 10 times the number of years, etc. #' #' @return a \code{\link[mgcv]{gam}} model object #' @import mgcv #' @export #' #' @concept analyze #' #' @examples #' library(dplyr) #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') #' anlz_gam(tomod, trans = 'log10') anlz_gam <- function(moddat, kts = NULL, ...){ if(length(unique(moddat$param)) > 1) stop('More than one parameter found in input data') if(length(unique(moddat$station)) > 1) stop('More than one station found in input data') # get transformation moddat <- anlz_trans(moddat, ...) frm <- "value ~ s(cont_year)" fct <- 12 # get upper bounds of knots if(is.null(kts)) kts <- fct * length(unique(moddat$yr)) p1 <- gsub('(^.*)s\\(cont\\_year\\).*$', '\\1', frm) p2 <- paste0('s(cont_year, k = ', kts, ')') frmin <- paste0(p1, p2) out <- try(gam(as.formula(frmin), data = moddat, na.action = na.exclude, select = F ), silent = T) # drops upper limit on knots until it works while(inherits(out, 'try-error')){ cat('reducing knots for cont_year spline from', kts, '\n') fct <- fct -1 # get upper bounds of knots kts <- fct * length(unique(moddat$yr)) p1 <- gsub('(^.*)s\\(cont\\_year\\).*$', '\\1', frm) p2 <- paste0('s(cont_year, k = ', kts, ')') frmin <- paste0(p1, p2) out <- try(gam(as.formula(frmin), data = moddat, na.action = na.exclude, select = F ), silent = T) } out$trans <- unique(moddat$trans) return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_gam.R
#' Extract period (seasonal) metrics from fitted GAM #' #' Extract period (seasonal) metrics from fitted GAM #' #' @param mod input model object as returned by \code{\link{anlz_gam}} #' @param metfun function input for metric to calculate, e.g., \code{mean}, \code{var}, \code{max}, etc #' @param doystr numeric indicating start Julian day for extracting averages #' @param doyend numeric indicating ending Julian day for extracting averages #' @param nsim numeric indicating number of random draws for simulating uncertainty #' @param ... additional arguments passed to \code{metfun}, e.g., \code{na.rm = TRUE)} #' #' @return A data frame of period metrics #' @export #' #' @details This function estimates a metric of interest for a given seasonal period each year using results from a fitted GAM (i.e., from \code{\link{anlz_gam}}). The estimates are based on the predicted values for each seasonal period, with uncertainty of the metric based on repeated sampling of the predictions following uncertainty in the model coefficients. #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') %>% #' filter(yr > 2015) #' #' mod <- anlz_gam(tomod, trans = 'log10') #' anlz_metseason(mod, mean, doystr = 90, doyend = 180, nsim = 100) anlz_metseason <- function(mod, metfun = mean, doystr = 1, doyend = 364, nsim = 1e4, ...) { # transformation trans <- mod$trans # number of days in seasonal window numDays <- doyend - doystr + 1 # prediction matrix fillData <- anlz_prdmatrix(mod, doystr = doystr, doyend = doyend) # basis function coefficients and var/cov matrix (uncertainty of coefs) gamcoefs <- coef(mod) vargamcoefs <- mod$Vp # random sim of coefs basedon multivariate normal coefrnd <- rmvn(nsim, gamcoefs, vargamcoefs) mets <- fillData %>% dplyr::group_by(yr) %>% tidyr::nest() %>% dplyr::mutate( met = purrr::map(data, function(x){ Xp <- predict(mod, x, typ = 'lpmatrix') # all basis functions pred <- Xp %*% gamcoefs met <- metfun(pred, ...) sims <- Xp %*% t(coefrnd) unc <- apply(sims, 2, metfun, ...) ses <- sd(unc) out <- data.frame(met = met, se = ses) return(out) }) ) %>% dplyr::select(-data) %>% dplyr::ungroup() %>% tidyr::unnest('met') # backtransform, add lwr/upr confidence intervals dispersion <- summary(mod)$dispersion if(trans == 'log10'){ mets$bt_lwr <- 10^((mets$met - 1.96 * mets$se) + log(10) * dispersion / 2) mets$bt_upr <- 10^((mets$met + 1.96 * mets$se) + log(10) * dispersion / 2) mets$bt_met <- 10^(mets$met + log(10) * dispersion / 2) } if(trans == 'ident'){ mets$bt_met <- mets$met mets$bt_lwr <- mets$met - 1.96 * mets$se mets$bt_upr <- mets$met + 1.96 * mets$se } # add dispersion to output mets$dispersion <- dispersion out <- mets return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_metseason.R
#' Fit a mixed meta-analysis regression model of trends #' #' Fit a mixed meta-analysis regression model of trends #' #' @param metseason output from \code{\link{anlz_metseason}} #' @param yrstr numeric for starting year #' @param yrend numeric for ending year #' @param yromit optional numeric vector for years to omit from, inherited from \code{\link{show_metseason}} #' @details Parameters are not back-transformed if the original GAM used a transformation of the response variable #' #' @concept analyze #' #' @return A list of \code{\link[mixmeta]{mixmeta}} fitted model objects #' @export #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') %>% #' filter(yr > 2015) #' #' mod <- anlz_gam(tomod, trans = 'log10') #' metseason <- anlz_metseason(mod, doystr = 90, doyend = 180) #' anlz_mixmeta(metseason, yrstr = 2016, yrend = 2019) anlz_mixmeta <- function(metseason, yrstr = 2000, yrend = 2019, yromit = NULL){ # input totrnd <- metseason %>% dplyr::mutate(S = se^2) %>% dplyr::filter(yr %in% seq(yrstr, yrend)) if(nrow(totrnd) != length(seq(yrstr, yrend)) & is.null(yromit)) return(NA) out <- mixmeta::mixmeta(met ~ yr, S = S, random = ~1|yr, data = totrnd, method = 'reml') return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_mixmeta.R
#' Estimate percent change trends from GAM results for selected time periods #' #' Estimate percent change trends from GAM results for selected time periods #' #' @param mod input model object as returned by \code{\link{anlz_gam}} #' @param baseyr numeric vector of starting years #' @param testyr numeric vector of ending years #' #' @export #' @return A data frame of summary results for change between the years. #' #' @details Working components of this function were taken from the gamDiff function in the baytrends package. #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') #' #' mod <- anlz_gam(tomod, trans = 'log10') #' anlz_perchg(mod, baseyr = 1990, testyr = 2016) anlz_perchg <- function(mod, baseyr, testyr){ # input data used to create model gamdat <- mod$model # transformation used trans <- mod$trans # estimated values are on the fifteenth of every month for each year of comparisons doyset <- c(15, 46, 75, 106, 136, 167, 197, 228, 259, 289, 320, 350) # set up base and test years nbaseyr <- length(baseyr) ntestyr <- length(testyr) yrset <- c(baseyr, testyr) # calculate total predictions in base period, test period nbase <- length(doyset) * nbaseyr ntest <- length(doyset) * ntestyr # create a data frame with Nrow rows where Nrow= (nbase*Ndoy) + (ntest*Ndoy)... # nbase yrs of Ndoy baseline dates and ntest-yrs # of Ndoy current dates. Include: doy, year, logical field (bl) indicating baseline # and current, and centered decimal year (cont_year) using same centering value # computed from data set (centerYear) pdat <- expand.grid(doyset, yrset) # make df with all comb. of doyset and yrset names(pdat) <- c('doy','year') # rename variables pdat$bl <- pdat$year <= baseyr[nbaseyr] # create logical field indicating baseline pdat$cont_year <- (pdat$year + (pdat$doy-1)/366) # compute cont_year # JBH(24Nov2017): extension of above xa and avg.per.mat # keeping weight the same--just extending number of values by "*nrow(pdatWgt)" xa <- c(rep(1/nbase,nbase), rep(0,ntest), rep(0,nbase), rep(1/ntest,ntest)) # construct a matrix to average baseline and current periods avg.per.mat <- matrix(xa,nrow=2,ncol=(nbase+ntest), byrow=TRUE) # construct matrix to get difference of current minus baseline diff.mat <- c(-1,1) # Extract coefficients (number of terms depends on complexity of GAM formula) beta <- mod$coefficients # coefficients vector VCmat <- mod$Vp # variance-covariance matrix of coefficents # Begin calculations to compute differences #### # extract matrix of linear predicters Xpc <- predict(mod, newdata = pdat,type="lpmatrix") # Compute predictions based on linear predictors (Nrow x 1 matrix) pdep <- Xpc%*%beta # equivalent to "predict(gamRslt,newdata=pdatLong)" # Calc. average baseline and average current; stores as a 2x1 matrix period.avg <- avg.per.mat %*% pdep # Calc. average current - average baseline; stores as a 1x1 matrix diff.avg <- diff.mat %*% period.avg # pre-multiply by differencing matrix to check results # Calc standard errors and confidence intervals on difference predictions xpd <- diff.mat%*%avg.per.mat%*%Xpc # premultiply linear predictors by averaging and differencing matrices. diff.est <- xpd%*%beta # compute estimate of difference diff.se <- sqrt(xpd%*%VCmat%*%t(xpd)) # compute Std. Err. by usual rules diff.t <- diff.est / diff.se pval <- 2*pt(abs(diff.t), mod$df.null, 0, lower.tail = FALSE) pval <- as.numeric(pval) #compute CI for differnce alpha <- 0.05 halpha <- alpha/2 diff.ci <- c(diff.est - qnorm(1-halpha) * diff.se,diff.est + qnorm(1-halpha) *diff.se) # observed units, backtransform per.mn.obs <- period.avg dispersion <- summary(mod)$dispersion if(trans == 'log10') per.mn.obs <- 10^(per.mn.obs + log(10) * dispersion / 2) # calculate percent change (03Nov) perchg <- 100*((per.mn.obs[2] - per.mn.obs[1])/per.mn.obs[1]) # results out <- tibble::tibble( baseval = per.mn.obs[1, 1], # average value in observed units, base period testval = per.mn.obs[2, 1], # average value in observed units, test period perchg = perchg, # percent change, back-transformed pval = pval # p value ) return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_perchg.R
#' Get predicted data from fitted GAMs across period of observation #' #' Get predicted data from fitted GAMs across period of observation #' #' @param mod input model object as returned by \code{\link{anlz_gam}} #' @param annual logical indicating if predictions only for the \code{cont_year} smoother are returned #' #' @return a \code{data.frame} with predictions #' @export #' #' @concept analyze #' #' @examples #' library(dplyr) #' #' # data to model #' tomod <- rawdat %>% #' filter(station %in% 34) %>% #' filter(param %in% 'chl') %>% #' filter(yr > 2015) #' #' mod <- anlz_gam(tomod, trans = 'log10') #' anlz_prd(mod) anlz_prd <- function(mod, annual = FALSE) { prddat <- mod$model trans <- mod$trans prddat <- data.frame( cont_year = seq(min(prddat$cont_year), max(prddat$cont_year), length = 1000) ) %>% dplyr::mutate( date = lubridate::date_decimal(cont_year), date = as.Date(date), mo = lubridate::month(date, label = TRUE), doy = lubridate::yday(date), yr = lubridate::year(date) ) # get predictions from terms matrix, value is sum of all plus intercept, annvalue is same minus anything with doy prd <- predict(mod, newdata = prddat, type = 'terms') int <- attr(prd, 'constant') value <- rowSums(prd) + int # get annual trend out <- prddat %>% dplyr::mutate( value = value, trans = mod$trans ) if(annual){ annvalue <- rowSums(prd[, !grepl('doy', colnames(prd)), drop = FALSE]) + int out <- out %>% dplyr::mutate( annvalue = annvalue ) } out <- out %>% anlz_backtrans() return(out) }
/scratch/gouwar.j/cran-all/cranData/wqtrends/R/anlz_prd.R