content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' @section Git/GitHub Authentication:
#' Many usethis functions, including those documented here, potentially interact
#' with GitHub in two different ways:
#' * Via the GitHub REST API. Examples: create a repo, a fork, or a pull
#' request.
#' * As a conventional Git remote. Examples: clone, fetch, or push.
#'
#' Therefore two types of auth can happen and your credentials must be
#' discoverable. Which credentials do we mean?
#'
#' * A GitHub personal access token (PAT) must be discoverable by the gh
#' package, which is used for GitHub operations via the REST API. See
#' [gh_token_help()] for more about getting and configuring a PAT.
#' * If you use the HTTPS protocol for Git remotes, your PAT is also used for
#' Git operations, such as `git push`. Usethis uses the gert package for this,
#' so the PAT must be discoverable by gert. Generally gert and gh will
#' discover and use the same PAT. This ability to "kill two birds with one
#' stone" is why HTTPS + PAT is our recommended auth strategy for those new
#' to Git and GitHub and PRs.
#' * If you use SSH remotes, your SSH keys must also be discoverable, in
#' addition to your PAT. The public key must be added to your GitHub account.
#'
#' Git/GitHub credential management is covered in a dedicated article:
#' [Managing Git(Hub) Credentials](https://usethis.r-lib.org/articles/articles/git-credentials.html)
|
/scratch/gouwar.j/cran-all/cranData/usethis/man/roxygen/templates/double-auth.R
|
#' us_fertilizer_county
#'
#' This is data adapted from the county-level estimates of fertilizer nitrogen and phosphorus based on
#' commercial sales from 1945 to 2012. Please visit [here](https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238)
#' for more details.
#'
#' @format A data frame with 582012rows and 11 variables:
#' \describe{
#' \item{FIPS}{FIPS is a combination of state and county codes, in character format.}
#' \item{State}{The state abbr. of U.S.}
#' \item{County}{County name in U.S.}
#' \item{ALAND}{The land area in each county, unit: km squared}
#' \item{AWATER}{The water area in each county, unit: km squared}
#' \item{INTPTLAT}{The latitude of centriod in each county, e.g. 32.53638}
#' \item{INTPTLONG}{The longitude of centriod in each county, e.g. -86.64449}
#' \item{Quantity}{The quantity of fertilizeation as N or P, e.g. kg N or kg P}
#' \item{Year}{The year of estimated data, e.g. 1994}
#' \item{Nutrient}{The fertilizer type, e.g. N or P}
#' \item{Farm.Type}{The land use type of fertilizer, e.g. farm and nonfarm}
#' \item{Input.Type}{The input type of nutrient, e.g. Fertilizer or Manure}
#' ...
#' }
#' @examples
#' require(usfertilizer)
#' data(us_fertilizer_county)
"us_fertilizer_county"
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/R/data.R
|
#' usfertilizer.
#'
#' @name usfertilizer
#' @docType package
NULL
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/R/usfertilizer-package.r
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE, eval = FALSE)
## ------------------------------------------------------------------------
# require(tidyverse)
# # county level data of fertilizer application.
# #Source: https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238
# raw_data = read_csv("../data-raw/CNTY_FERT_1987-2012.csv")
# #summary(raw_data)
#
# # County summary from US census bureau.
# # Source: https://www.census.gov/geo/maps-data/data/gazetteer2010.html
# county_raw = read.table("../data-raw/Gaz_counties_national.txt", sep = "\t", header=TRUE)
#
# # read in data, extracted from coverage in ArcGIS.
# n45_64 <- read.table("../data-raw/cty_fert0.n45-64.txt", sep = ",", header = T)
# n65_85 <- read.table("../data-raw/cty_fert0.n65-85.txt", sep = ",", header = T)
# p45_64 <- read.table("../data-raw/cty_fert0.p45-64.txt", sep = ",", header = T)
# p65_85 <- read.table("../data-raw/cty_fert0.p65-85.txt", sep = ",", header = T)
# # merge nitrogen and P data together.
# n45_85 = inner_join(n45_64, n65_85, by = c("FIPS","STATE","Rowid_"))
# p45_85 = inner_join(p45_64, p65_85, by = c("FIPS","STATE","Rowid_"))
#
## ------------------------------------------------------------------------
# # clean nitroge and phosphorus data.
# nitrogen_1985 = n45_85 %>%
# select(-Rowid_) %>% # remove irrelavent info.
# # add leading zeros for FIPS to make it 5 digits.
# mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
# gather(Year_temp, Quantity, Y45:Y85) %>%
# mutate(Fertilizer = rep("N", length(.$Quantity)),
# Farm.Type = rep("farm", length(.$Quantity)),
# Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
# ) %>%
# select(-Year_temp)
#
# phosphorus_1985 = p45_85 %>%
# select(-Rowid_) %>% # remove irrelavent info.
# mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
# gather(Year_temp, Quantity, Y45:Y85) %>%
# mutate(Fertilizer = rep("P", length(.$Quantity)),
# Farm.Type = rep("farm", length(.$Quantity)),
# Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
# ) %>%
# select(-Year_temp)
# # clean dataset for data before 1985
# clean_data_1985 = rbind(phosphorus_1985, nitrogen_1985)
## ------------------------------------------------------------------------
# # remove duplicates in county data.
# county_data = county_raw %>%
# distinct(GEOID, .keep_all = TRUE) %>%
# # select certin columns.
# select(GEOID, ALAND, AWATER,INTPTLAT, INTPTLONG) %>%
# mutate(FIPSno = GEOID) %>%
# select(-GEOID)
#
# # combine county data with county level fertilizer data.
# county_summary = left_join(raw_data,county_data, by = "FIPSno")
#
# clean_data = county_summary %>%
# # remove some columns with FIPS numbers.
# select(-c(FIPS_st, FIPS_co,FIPSno)) %>%
# # wide to long dataset.
# gather(Fert.Type, Quantity, farmN1987:nonfP2012) %>%
# # separate the fert.type into three columns: farm type, fertilizer, year.
# mutate(Year = str_sub(Fert.Type, start = -4),
# Fertilizer = str_sub(Fert.Type, start = -5, end = -5),
# Farm.Type = str_sub(Fert.Type, start = 1, end = 4)
# ) %>%
# # repalce nonf into nonfarm
# mutate(Farm.Type = ifelse(Farm.Type == "nonf", "nonfarm", "farm")) %>%
# # remove Fert.Type
# select(-Fert.Type)
#
# # extract county summaries info from clean data.
# cnty_summary_1985 = county_summary %>%
# select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
# right_join(clean_data_1985, by = "FIPS")
#
# # add data from 1945.
# clean_data = rbind(clean_data, cnty_summary_1985) %>%
# rename(Nutrient = Fertilizer) %>% # renam Fertilizer to nutrient.
# mutate(Input.Type = rep("Fertilizer")) # add a colume as fertilizer, compared with Manure.
## ------------------------------------------------------------------------
# # read in manure data from 1982 to 1997.
# cnty_manure_97 = read_csv("../data-raw/cnty_manure_82-97.csv")
# cnty_manure_summary = cnty_manure_97 %>%
# select(-c(State, County)) %>%
# gather(dummy, Quantity, N_1982:P_1997) %>% # dummy is a temporay column.
# mutate(Farm.Type = rep("farm", length(.$FIPS)),
# Input.Type = rep("Manure", length(.$FIPS))) %>%
# separate(dummy, c("Nutrient", "Year"), sep = "_")
## ------------------------------------------------------------------------
# # read in manure data.
# cnty_manure_02 = read_csv("../data-raw/cnty_manure_2002.csv")
# cnty_manure_07 = read_csv("../data-raw/cnty_manure_2007.csv")
# cnty_manure_12 = read_csv("../data-raw/cnty_manure_2012.csv")
#
# cnty_manure_02_12 = rbind(cnty_manure_02, cnty_manure_07, cnty_manure_12) %>%
# select(-c(State, County)) %>%
# gather(Nutrient, Quantity, N:P) %>%
# mutate(Farm.Type = rep("farm", length(.$FIPS)),
# Input.Type = rep("Manure", length(.$FIPS)))
## ----sava_data, eval=FALSE-----------------------------------------------
#
# # connect manure data.
# cnty_manure_summary = rbind(cnty_manure_summary,cnty_manure_02_12)
#
# cnty_manure_all = county_summary %>%
# select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
# right_join(cnty_manure_summary, by = "FIPS")
#
# clean_data = rbind(clean_data, cnty_manure_all)
#
# # NOT RUN
# # save cleaned data into .rda format.
# save(clean_data, file = "../data/usfertilizer_county.rda")
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/inst/doc/Data_sources_and_cleaning.R
|
---
title: "Data sources and processing procedures"
author: "Wenlong"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Data sources and processing procedures}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE, eval = FALSE)
```
## Introduction of data sources and availability
The data used in this package were original compiled and processed by United States Geographic Services (USGS). The fertilizer data include the application in both farms and non-farms for 1945 through 2012. The folks in USGS utilized the sales data of commercial fertilizer each state or county from the Association of American Plant Food Control Officials (AAPFCO) commercial fertilizer sales data. State estimates were then allocated to the county-level using fertilizer expenditure from the Census of Agriculture as county weights for farm fertilizer, and effective population density as county weights for nonfarm fertilizer. The data sources and other further information are availalbe in Table 1.
|Dataset name | Temporal coverage| Source| Website | Comments |
|-------------------|:-------------:|:---------:| ---------|:--------------------|
|Fertilizer data before 1985 | 1945 - 1985 | USGS | [Link](https://pubs.er.usgs.gov/publication/ofr90130) |Only has farm data.|
|Fertilizer data after 1986 | 1986 - 2012 | USGS | [Link](https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238) |Published in 2017.|
|County background data | 2010 | US Census| [Link](https://www.census.gov/geo/maps-data/data/gazetteer2010.html) |Assume descriptors of counties do not change.|
|Manure data before 1997 | 1982 - 1997 | USGS | [link](https://pubs.usgs.gov/sir/2006/5012/) | Manual data into farm every five years |
|Manure data in 2002 | 2002 | USGS | [link](https://pubs.usgs.gov/of/2013/1065/) | Published in 2013 |
|Manure data in 2007 and 2012| 2007 & 2012 | USGS | [link](https://www.sciencebase.gov/catalog/item/581ced4ee4b08da350d52303) | Published in 2017 |
## Data cleanning and processing
As the county-level fertilizer data were processed at different times and by different researchers, the format of the data are a little bit messy. For the sake of time and efforts to employ a complicated dataset, the author cleaned the data into a __Tidy Data__ following these rules from [Hadley Wickham](http://r4ds.had.co.nz/tidy-data.html):
* 1. Each variable must have its own column.
* 2. Each observation must have its own row.
* 3. Each value must have its own cell.
Fig. 1 shows the rules visually.

Fig. 1 Following three rules makes a dataset tidy: variables are in columns, observations are in rows, and values are in cells.
(The description of tidy data was adapted from [_R for data science_](http://r4ds.had.co.nz/))
### import libraries and data.
```{r}
require(tidyverse)
# county level data of fertilizer application.
#Source: https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238
raw_data = read_csv("../data-raw/CNTY_FERT_1987-2012.csv")
#summary(raw_data)
# County summary from US census bureau.
# Source: https://www.census.gov/geo/maps-data/data/gazetteer2010.html
county_raw = read.table("../data-raw/Gaz_counties_national.txt", sep = "\t", header=TRUE)
# read in data, extracted from coverage in ArcGIS.
n45_64 <- read.table("../data-raw/cty_fert0.n45-64.txt", sep = ",", header = T)
n65_85 <- read.table("../data-raw/cty_fert0.n65-85.txt", sep = ",", header = T)
p45_64 <- read.table("../data-raw/cty_fert0.p45-64.txt", sep = ",", header = T)
p65_85 <- read.table("../data-raw/cty_fert0.p65-85.txt", sep = ",", header = T)
# merge nitrogen and P data together.
n45_85 = inner_join(n45_64, n65_85, by = c("FIPS","STATE","Rowid_"))
p45_85 = inner_join(p45_64, p65_85, by = c("FIPS","STATE","Rowid_"))
```
## Data cleanning
### clean data before 1982.
```{r}
# clean nitroge and phosphorus data.
nitrogen_1985 = n45_85 %>%
select(-Rowid_) %>% # remove irrelavent info.
# add leading zeros for FIPS to make it 5 digits.
mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
gather(Year_temp, Quantity, Y45:Y85) %>%
mutate(Fertilizer = rep("N", length(.$Quantity)),
Farm.Type = rep("farm", length(.$Quantity)),
Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
) %>%
select(-Year_temp)
phosphorus_1985 = p45_85 %>%
select(-Rowid_) %>% # remove irrelavent info.
mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
gather(Year_temp, Quantity, Y45:Y85) %>%
mutate(Fertilizer = rep("P", length(.$Quantity)),
Farm.Type = rep("farm", length(.$Quantity)),
Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
) %>%
select(-Year_temp)
# clean dataset for data before 1985
clean_data_1985 = rbind(phosphorus_1985, nitrogen_1985)
```
### clean data after 1987
```{r}
# remove duplicates in county data.
county_data = county_raw %>%
distinct(GEOID, .keep_all = TRUE) %>%
# select certin columns.
select(GEOID, ALAND, AWATER,INTPTLAT, INTPTLONG) %>%
mutate(FIPSno = GEOID) %>%
select(-GEOID)
# combine county data with county level fertilizer data.
county_summary = left_join(raw_data,county_data, by = "FIPSno")
clean_data = county_summary %>%
# remove some columns with FIPS numbers.
select(-c(FIPS_st, FIPS_co,FIPSno)) %>%
# wide to long dataset.
gather(Fert.Type, Quantity, farmN1987:nonfP2012) %>%
# separate the fert.type into three columns: farm type, fertilizer, year.
mutate(Year = str_sub(Fert.Type, start = -4),
Fertilizer = str_sub(Fert.Type, start = -5, end = -5),
Farm.Type = str_sub(Fert.Type, start = 1, end = 4)
) %>%
# repalce nonf into nonfarm
mutate(Farm.Type = ifelse(Farm.Type == "nonf", "nonfarm", "farm")) %>%
# remove Fert.Type
select(-Fert.Type)
# extract county summaries info from clean data.
cnty_summary_1985 = county_summary %>%
select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
right_join(clean_data_1985, by = "FIPS")
# add data from 1945.
clean_data = rbind(clean_data, cnty_summary_1985) %>%
rename(Nutrient = Fertilizer) %>% # renam Fertilizer to nutrient.
mutate(Input.Type = rep("Fertilizer")) # add a colume as fertilizer, compared with Manure.
```
### Clean manure data before 1997
```{r}
# read in manure data from 1982 to 1997.
cnty_manure_97 = read_csv("../data-raw/cnty_manure_82-97.csv")
cnty_manure_summary = cnty_manure_97 %>%
select(-c(State, County)) %>%
gather(dummy, Quantity, N_1982:P_1997) %>% # dummy is a temporay column.
mutate(Farm.Type = rep("farm", length(.$FIPS)),
Input.Type = rep("Manure", length(.$FIPS))) %>%
separate(dummy, c("Nutrient", "Year"), sep = "_")
```
### Clean manure data after 1997
```{r}
# read in manure data.
cnty_manure_02 = read_csv("../data-raw/cnty_manure_2002.csv")
cnty_manure_07 = read_csv("../data-raw/cnty_manure_2007.csv")
cnty_manure_12 = read_csv("../data-raw/cnty_manure_2012.csv")
cnty_manure_02_12 = rbind(cnty_manure_02, cnty_manure_07, cnty_manure_12) %>%
select(-c(State, County)) %>%
gather(Nutrient, Quantity, N:P) %>%
mutate(Farm.Type = rep("farm", length(.$FIPS)),
Input.Type = rep("Manure", length(.$FIPS)))
```
### Save data as rdata with compaction
```{r sava_data, eval=FALSE}
# connect manure data.
cnty_manure_summary = rbind(cnty_manure_summary,cnty_manure_02_12)
cnty_manure_all = county_summary %>%
select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
right_join(cnty_manure_summary, by = "FIPS")
clean_data = rbind(clean_data, cnty_manure_all)
# NOT RUN
# save cleaned data into .rda format.
save(clean_data, file = "../data/usfertilizer_county.rda")
```
## Future development plan
There are some future features in the dataset, including:
* Add missing data in the year of 1986.
* Develop a package to retrieve, analyze and visualize the fertilizer data in watersheds.
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/inst/doc/Data_sources_and_cleaning.Rmd
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval= FALSE--------------------------------------------------------
# install.packages("usfertilizer")
## ---- message=FALSE, warning=FALSE---------------------------------------
require(usfertilizer)
require(tidyverse)
data("us_fertilizer_county")
## ------------------------------------------------------------------------
glimpse(us_fertilizer_county)
## ------------------------------------------------------------------------
# plot the top 10 nitrogen application in year 2008.
# Reorder to make the plot more cleanner.
year_plot = 2008
us_fertilizer_county %>%
filter(Nutrient == "N" & Year == year_plot & Input.Type == "Fertilizer" ) %>%
top_n(10, Quantity) %>%
ggplot(aes(x=reorder(paste(County,State, sep = ","), Quantity), Quantity, fill = Quantity))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 counties with most fertilizer application in the year of", year_plot)) +
scale_y_continuous(name = "Nitrogen from commecial fertilization (kg)")+
scale_x_discrete(name = "Counties")+
coord_flip()+
theme_bw()
## ------------------------------------------------------------------------
# plot the top 10 states with P application in year 1980.
# Reorder to make the plot more cleanner.
year_plot = 1980
us_fertilizer_county %>%
filter(Nutrient == "P" & Year == 1980 & Input.Type == "Fertilizer") %>%
group_by(State) %>%
summarise(p_application = sum(Quantity)) %>%
as.data.frame() %>%
top_n(10, p_application) %>%
ggplot(aes(x=reorder(State, p_application), p_application))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 States with most Phosphrus application in the year of", year_plot)) +
scale_y_continuous(name = "Phosphrus from commecial fertilizer (kg)")+
scale_x_discrete(name = "States")+
theme_bw()+
coord_flip()
## ---- message=F, warning=F-----------------------------------------------
year_plot = seq(1945, 2010, 1)
states = c("NC","SC")
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Input.Type == "Fertilizer") %>%
group_by(State, Year, Nutrient) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=State)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~Nutrient, scales = "free", ncol = 2)+
ggtitle("Estimated nutrient inputs into arable lands by commercial fertilizer\nfrom 1945 to 2010 in Carolinas")+
theme_bw()
## ------------------------------------------------------------------------
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Nutrient == "N") %>%
group_by(State, Year, Input.Type) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=Input.Type)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~State, scales = "free", ncol = 2)+
ggtitle("Estimated nitrogen inputs into arable lands by commercial fertilizer and manure\nfrom 1945 to 2012 in Carolinas")+
theme_bw()
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/inst/doc/Introduction.R
|
---
title: "Introduction of usfertilizer, an R package"
author: "Wenlong Liu"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Introduction of usfertilizer, an R package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Preface
Nutrients as commercial fertilizer is an important input to soil water systems, especially in agricultural fields. It is critical to, at least roughly, estimate the quantity of fertilizer application in the watershed, to conduct further evaluation of water quality from certain watersheds. Since 1990, folks from United States Geological Service (USGS) have investigated considerable time, manpower and efforts to estimate the fertilizer application at county scales in United States of America. Based on the sales data of commercial fertilizer, USGS researchers allocated the sold fertilizer to each county based on agricultural production, arable land, growing seasons, etc. Further details of data sources and specific attentions are available via <https://wenlong-liu.github.io/usfertilizer/articles/Data_sources_and_cleaning.html>.
Although there is no perfect way to estimate the nutrient application in watershed, the datasets from USGS have been carefully reviewed and can serve as an indication of nutrients input from commercial fertilizer and animal manure. In addition, please employ this datasets at watershed or regional scales. Please note that USGS does not endorse this package. Also data from 1986 is not available for now.
## Installment
You can also install it via CRAN:
```{r, eval= FALSE}
install.packages("usfertilizer")
```
# Get started
## Import data and related libraries
```{r, message=FALSE, warning=FALSE}
require(usfertilizer)
require(tidyverse)
data("us_fertilizer_county")
```
## Summary of the dataset
The dataset, named by us_fertilizer_county, contains `r length(us_fertilizer_county$FIPS)` observations and 11 variables. Details are available by using `?us_fertilizer_county`.
```{r}
glimpse(us_fertilizer_county)
```
## Examples
### Example 1: Find out the top 10 counties with most nitrogen appliation in 2008.
```{r}
# plot the top 10 nitrogen application in year 2008.
# Reorder to make the plot more cleanner.
year_plot = 2008
us_fertilizer_county %>%
filter(Nutrient == "N" & Year == year_plot & Input.Type == "Fertilizer" ) %>%
top_n(10, Quantity) %>%
ggplot(aes(x=reorder(paste(County,State, sep = ","), Quantity), Quantity, fill = Quantity))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 counties with most fertilizer application in the year of", year_plot)) +
scale_y_continuous(name = "Nitrogen from commecial fertilization (kg)")+
scale_x_discrete(name = "Counties")+
coord_flip()+
theme_bw()
```
### Example 2: Find out the top 10 states with most nitrogen appliation in 1980.
```{r}
# plot the top 10 states with P application in year 1980.
# Reorder to make the plot more cleanner.
year_plot = 1980
us_fertilizer_county %>%
filter(Nutrient == "P" & Year == 1980 & Input.Type == "Fertilizer") %>%
group_by(State) %>%
summarise(p_application = sum(Quantity)) %>%
as.data.frame() %>%
top_n(10, p_application) %>%
ggplot(aes(x=reorder(State, p_application), p_application))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 States with most Phosphrus application in the year of", year_plot)) +
scale_y_continuous(name = "Phosphrus from commecial fertilizer (kg)")+
scale_x_discrete(name = "States")+
theme_bw()+
coord_flip()
```
### Example 3: Plot the N and P input into farms for NC and SC from 1945 to 2010
```{r, message=F, warning=F}
year_plot = seq(1945, 2010, 1)
states = c("NC","SC")
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Input.Type == "Fertilizer") %>%
group_by(State, Year, Nutrient) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=State)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~Nutrient, scales = "free", ncol = 2)+
ggtitle("Estimated nutrient inputs into arable lands by commercial fertilizer\nfrom 1945 to 2010 in Carolinas")+
theme_bw()
```
### Example 4: Plot the N input into farms from fertilizer and manure for NC and SC from 1945 to 2012
```{r}
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Nutrient == "N") %>%
group_by(State, Year, Input.Type) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=Input.Type)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~State, scales = "free", ncol = 2)+
ggtitle("Estimated nitrogen inputs into arable lands by commercial fertilizer and manure\nfrom 1945 to 2012 in Carolinas")+
theme_bw()
```
## Comments and Questions.
If you have any problems or questions, feel free to open an issue [here](https://github.com/wenlong-liu/usfertilizer/issues).
## Lisence
[GPL](https://github.com/wenlong-liu/usfertilizer/blob/master/lisence.txt)
## Code of conduct
Please note that this project is released with a [Contributor Code of Conduct](https://github.com/wenlong-liu/usfertilizer/blob/master/CONDUCT.md). By participating in this project you agree to abide by its terms.
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/inst/doc/Introduction.Rmd
|
---
title: "Data sources and processing procedures"
author: "Wenlong"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Data sources and processing procedures}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE, eval = FALSE)
```
## Introduction of data sources and availability
The data used in this package were original compiled and processed by United States Geographic Services (USGS). The fertilizer data include the application in both farms and non-farms for 1945 through 2012. The folks in USGS utilized the sales data of commercial fertilizer each state or county from the Association of American Plant Food Control Officials (AAPFCO) commercial fertilizer sales data. State estimates were then allocated to the county-level using fertilizer expenditure from the Census of Agriculture as county weights for farm fertilizer, and effective population density as county weights for nonfarm fertilizer. The data sources and other further information are availalbe in Table 1.
|Dataset name | Temporal coverage| Source| Website | Comments |
|-------------------|:-------------:|:---------:| ---------|:--------------------|
|Fertilizer data before 1985 | 1945 - 1985 | USGS | [Link](https://pubs.er.usgs.gov/publication/ofr90130) |Only has farm data.|
|Fertilizer data after 1986 | 1986 - 2012 | USGS | [Link](https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238) |Published in 2017.|
|County background data | 2010 | US Census| [Link](https://www.census.gov/geo/maps-data/data/gazetteer2010.html) |Assume descriptors of counties do not change.|
|Manure data before 1997 | 1982 - 1997 | USGS | [link](https://pubs.usgs.gov/sir/2006/5012/) | Manual data into farm every five years |
|Manure data in 2002 | 2002 | USGS | [link](https://pubs.usgs.gov/of/2013/1065/) | Published in 2013 |
|Manure data in 2007 and 2012| 2007 & 2012 | USGS | [link](https://www.sciencebase.gov/catalog/item/581ced4ee4b08da350d52303) | Published in 2017 |
## Data cleanning and processing
As the county-level fertilizer data were processed at different times and by different researchers, the format of the data are a little bit messy. For the sake of time and efforts to employ a complicated dataset, the author cleaned the data into a __Tidy Data__ following these rules from [Hadley Wickham](http://r4ds.had.co.nz/tidy-data.html):
* 1. Each variable must have its own column.
* 2. Each observation must have its own row.
* 3. Each value must have its own cell.
Fig. 1 shows the rules visually.

Fig. 1 Following three rules makes a dataset tidy: variables are in columns, observations are in rows, and values are in cells.
(The description of tidy data was adapted from [_R for data science_](http://r4ds.had.co.nz/))
### import libraries and data.
```{r}
require(tidyverse)
# county level data of fertilizer application.
#Source: https://www.sciencebase.gov/catalog/item/5851b2d1e4b0f99207c4f238
raw_data = read_csv("../data-raw/CNTY_FERT_1987-2012.csv")
#summary(raw_data)
# County summary from US census bureau.
# Source: https://www.census.gov/geo/maps-data/data/gazetteer2010.html
county_raw = read.table("../data-raw/Gaz_counties_national.txt", sep = "\t", header=TRUE)
# read in data, extracted from coverage in ArcGIS.
n45_64 <- read.table("../data-raw/cty_fert0.n45-64.txt", sep = ",", header = T)
n65_85 <- read.table("../data-raw/cty_fert0.n65-85.txt", sep = ",", header = T)
p45_64 <- read.table("../data-raw/cty_fert0.p45-64.txt", sep = ",", header = T)
p65_85 <- read.table("../data-raw/cty_fert0.p65-85.txt", sep = ",", header = T)
# merge nitrogen and P data together.
n45_85 = inner_join(n45_64, n65_85, by = c("FIPS","STATE","Rowid_"))
p45_85 = inner_join(p45_64, p65_85, by = c("FIPS","STATE","Rowid_"))
```
## Data cleanning
### clean data before 1982.
```{r}
# clean nitroge and phosphorus data.
nitrogen_1985 = n45_85 %>%
select(-Rowid_) %>% # remove irrelavent info.
# add leading zeros for FIPS to make it 5 digits.
mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
gather(Year_temp, Quantity, Y45:Y85) %>%
mutate(Fertilizer = rep("N", length(.$Quantity)),
Farm.Type = rep("farm", length(.$Quantity)),
Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
) %>%
select(-Year_temp)
phosphorus_1985 = p45_85 %>%
select(-Rowid_) %>% # remove irrelavent info.
mutate(FIPS = str_pad(FIPS, 5, pad = "0")) %>%
gather(Year_temp, Quantity, Y45:Y85) %>%
mutate(Fertilizer = rep("P", length(.$Quantity)),
Farm.Type = rep("farm", length(.$Quantity)),
Year = paste("19",str_sub(Year_temp, start = 2),sep = "")
) %>%
select(-Year_temp)
# clean dataset for data before 1985
clean_data_1985 = rbind(phosphorus_1985, nitrogen_1985)
```
### clean data after 1987
```{r}
# remove duplicates in county data.
county_data = county_raw %>%
distinct(GEOID, .keep_all = TRUE) %>%
# select certin columns.
select(GEOID, ALAND, AWATER,INTPTLAT, INTPTLONG) %>%
mutate(FIPSno = GEOID) %>%
select(-GEOID)
# combine county data with county level fertilizer data.
county_summary = left_join(raw_data,county_data, by = "FIPSno")
clean_data = county_summary %>%
# remove some columns with FIPS numbers.
select(-c(FIPS_st, FIPS_co,FIPSno)) %>%
# wide to long dataset.
gather(Fert.Type, Quantity, farmN1987:nonfP2012) %>%
# separate the fert.type into three columns: farm type, fertilizer, year.
mutate(Year = str_sub(Fert.Type, start = -4),
Fertilizer = str_sub(Fert.Type, start = -5, end = -5),
Farm.Type = str_sub(Fert.Type, start = 1, end = 4)
) %>%
# repalce nonf into nonfarm
mutate(Farm.Type = ifelse(Farm.Type == "nonf", "nonfarm", "farm")) %>%
# remove Fert.Type
select(-Fert.Type)
# extract county summaries info from clean data.
cnty_summary_1985 = county_summary %>%
select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
right_join(clean_data_1985, by = "FIPS")
# add data from 1945.
clean_data = rbind(clean_data, cnty_summary_1985) %>%
rename(Nutrient = Fertilizer) %>% # renam Fertilizer to nutrient.
mutate(Input.Type = rep("Fertilizer")) # add a colume as fertilizer, compared with Manure.
```
### Clean manure data before 1997
```{r}
# read in manure data from 1982 to 1997.
cnty_manure_97 = read_csv("../data-raw/cnty_manure_82-97.csv")
cnty_manure_summary = cnty_manure_97 %>%
select(-c(State, County)) %>%
gather(dummy, Quantity, N_1982:P_1997) %>% # dummy is a temporay column.
mutate(Farm.Type = rep("farm", length(.$FIPS)),
Input.Type = rep("Manure", length(.$FIPS))) %>%
separate(dummy, c("Nutrient", "Year"), sep = "_")
```
### Clean manure data after 1997
```{r}
# read in manure data.
cnty_manure_02 = read_csv("../data-raw/cnty_manure_2002.csv")
cnty_manure_07 = read_csv("../data-raw/cnty_manure_2007.csv")
cnty_manure_12 = read_csv("../data-raw/cnty_manure_2012.csv")
cnty_manure_02_12 = rbind(cnty_manure_02, cnty_manure_07, cnty_manure_12) %>%
select(-c(State, County)) %>%
gather(Nutrient, Quantity, N:P) %>%
mutate(Farm.Type = rep("farm", length(.$FIPS)),
Input.Type = rep("Manure", length(.$FIPS)))
```
### Save data as rdata with compaction
```{r sava_data, eval=FALSE}
# connect manure data.
cnty_manure_summary = rbind(cnty_manure_summary,cnty_manure_02_12)
cnty_manure_all = county_summary %>%
select(FIPS,State, County, ALAND, AWATER, INTPTLAT, INTPTLONG) %>%
right_join(cnty_manure_summary, by = "FIPS")
clean_data = rbind(clean_data, cnty_manure_all)
# NOT RUN
# save cleaned data into .rda format.
save(clean_data, file = "../data/usfertilizer_county.rda")
```
## Future development plan
There are some future features in the dataset, including:
* Add missing data in the year of 1986.
* Develop a package to retrieve, analyze and visualize the fertilizer data in watersheds.
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/vignettes/Data_sources_and_cleaning.Rmd
|
---
title: "Introduction of usfertilizer, an R package"
author: "Wenlong Liu"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Introduction of usfertilizer, an R package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Preface
Nutrients as commercial fertilizer is an important input to soil water systems, especially in agricultural fields. It is critical to, at least roughly, estimate the quantity of fertilizer application in the watershed, to conduct further evaluation of water quality from certain watersheds. Since 1990, folks from United States Geological Service (USGS) have investigated considerable time, manpower and efforts to estimate the fertilizer application at county scales in United States of America. Based on the sales data of commercial fertilizer, USGS researchers allocated the sold fertilizer to each county based on agricultural production, arable land, growing seasons, etc. Further details of data sources and specific attentions are available via <https://wenlong-liu.github.io/usfertilizer/articles/Data_sources_and_cleaning.html>.
Although there is no perfect way to estimate the nutrient application in watershed, the datasets from USGS have been carefully reviewed and can serve as an indication of nutrients input from commercial fertilizer and animal manure. In addition, please employ this datasets at watershed or regional scales. Please note that USGS does not endorse this package. Also data from 1986 is not available for now.
## Installment
You can also install it via CRAN:
```{r, eval= FALSE}
install.packages("usfertilizer")
```
# Get started
## Import data and related libraries
```{r, message=FALSE, warning=FALSE}
require(usfertilizer)
require(tidyverse)
data("us_fertilizer_county")
```
## Summary of the dataset
The dataset, named by us_fertilizer_county, contains `r length(us_fertilizer_county$FIPS)` observations and 11 variables. Details are available by using `?us_fertilizer_county`.
```{r}
glimpse(us_fertilizer_county)
```
## Examples
### Example 1: Find out the top 10 counties with most nitrogen appliation in 2008.
```{r}
# plot the top 10 nitrogen application in year 2008.
# Reorder to make the plot more cleanner.
year_plot = 2008
us_fertilizer_county %>%
filter(Nutrient == "N" & Year == year_plot & Input.Type == "Fertilizer" ) %>%
top_n(10, Quantity) %>%
ggplot(aes(x=reorder(paste(County,State, sep = ","), Quantity), Quantity, fill = Quantity))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 counties with most fertilizer application in the year of", year_plot)) +
scale_y_continuous(name = "Nitrogen from commecial fertilization (kg)")+
scale_x_discrete(name = "Counties")+
coord_flip()+
theme_bw()
```
### Example 2: Find out the top 10 states with most nitrogen appliation in 1980.
```{r}
# plot the top 10 states with P application in year 1980.
# Reorder to make the plot more cleanner.
year_plot = 1980
us_fertilizer_county %>%
filter(Nutrient == "P" & Year == 1980 & Input.Type == "Fertilizer") %>%
group_by(State) %>%
summarise(p_application = sum(Quantity)) %>%
as.data.frame() %>%
top_n(10, p_application) %>%
ggplot(aes(x=reorder(State, p_application), p_application))+
scale_fill_gradient(low = "blue", high = "darkblue")+
geom_col()+
ggtitle(paste("Top 10 States with most Phosphrus application in the year of", year_plot)) +
scale_y_continuous(name = "Phosphrus from commecial fertilizer (kg)")+
scale_x_discrete(name = "States")+
theme_bw()+
coord_flip()
```
### Example 3: Plot the N and P input into farms for NC and SC from 1945 to 2010
```{r, message=F, warning=F}
year_plot = seq(1945, 2010, 1)
states = c("NC","SC")
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Input.Type == "Fertilizer") %>%
group_by(State, Year, Nutrient) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=State)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~Nutrient, scales = "free", ncol = 2)+
ggtitle("Estimated nutrient inputs into arable lands by commercial fertilizer\nfrom 1945 to 2010 in Carolinas")+
theme_bw()
```
### Example 4: Plot the N input into farms from fertilizer and manure for NC and SC from 1945 to 2012
```{r}
us_fertilizer_county %>%
filter(State %in% states & Year %in% year_plot &
Farm.Type == "farm" & Nutrient == "N") %>%
group_by(State, Year, Input.Type) %>%
summarise(Quantity = sum(Quantity, na.rm = T)) %>%
ggplot(aes(x = as.numeric(Year), y = Quantity, color=Input.Type)) +
geom_point() +
geom_line()+
scale_x_continuous(name = "Year")+
scale_y_continuous(name = "Nutrient input quantity (kg)")+
facet_wrap(~State, scales = "free", ncol = 2)+
ggtitle("Estimated nitrogen inputs into arable lands by commercial fertilizer and manure\nfrom 1945 to 2012 in Carolinas")+
theme_bw()
```
## Comments and Questions.
If you have any problems or questions, feel free to open an issue [here](https://github.com/wenlong-liu/usfertilizer/issues).
## Lisence
[GPL](https://github.com/wenlong-liu/usfertilizer/blob/master/lisence.txt)
## Code of conduct
Please note that this project is released with a [Contributor Code of Conduct](https://github.com/wenlong-liu/usfertilizer/blob/master/CONDUCT.md). By participating in this project you agree to abide by its terms.
|
/scratch/gouwar.j/cran-all/cranData/usfertilizer/vignettes/Introduction.Rmd
|
#' Data from ACTG315 trial of HIV viral load in adults undergoing ART
#'
#' Data from the ACTG315 clinical trial of HIV-infected adults undergoing ART.
#' Data are included for 46 individuals, with HIV viral load measurements observed
#' on specific days up to 28 weeks after treatment initiation,
#' and converted to log10 RNA copies/ml. The RNA assay detection threshold was 100 copies/ml.
#' Additional columns include patient identifiers and CD4 T cell counts.
#'
#' @docType data
#'
#' @usage data(actg315raw)
#'
#' @format A data frame with 361 rows and 5 columns:
#' \describe{
#' \item{Obs.No}{Row number}
#' \item{Patid}{Numerical patient identifier}
#' \item{Day}{Time of each observation, in days since treatment initiation}
#' \item{log10.RNA.}{HIV viral load measurements, in log10 RNA copies/ml}
#' \item{CD4}{CD4 T cell counts, in cells/mm^3}
#' }
#'
#' @keywords datasets
#'
#' @references Lederman et al (1998) JID 178(1), 70–79; Connick et al (2000) JID 181(1), 358–363;
#' Wu and Ding (1999) Biometrics 55(2), 410–418.
#'
#' @source \href{https://sph.uth.edu/divisions/biostatistics/wu/datasets/ACTG315LongitudinalDataViralLoad.htm}{Hulin Wu, Data Sets}
#'
#' @examples
#' library(dplyr)
#' data(actg315raw)
#'
#' actg315 <- actg315raw %>%
#' mutate(vl = 10^log10.RNA.) %>%
#' select(id = Patid, time = Day, vl)
#'
#' print(head(actg315))
#'
#' \donttest{plot_data(actg315, detection_threshold = 100)}
"actg315raw"
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/actg315_data.R
|
#' Evaluate error metric between data and model prediction
#'
#' For a given parameter set, this function computes the predicted viral load curve and evaluates the error metric between the prediction and observed data (to be passed to optim).
#'
#' @param params named vector of the parameters from which the model prediction should be generated.
#' @param param_names names of parameter vector.
#' @param free_param_index logical TRUE/FALSE vector indicating whether the parameters A, delta, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE) for the biphasic model and c(FALSE, FALSE, TRUE, TRUE) for the single phase model.
#' @param data dataframe with columns for the subject's viral load measurements ('vl'), and timing of sampling ('time')
#' @param model_list character indicating which model is begin fit. Can be either 'four' for the biphasic model, or 'two' for the single phase model.
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#'
get_error <- function(params, param_names, free_param_index, data, model_list,
inv_param_transform_fn){
# Free and fixed params are log-transformed so their values are unconstrained during optimization
# For model evaluations they must first be back-transformed
untransformed_params <- get_transformed_params(params = params, param_transform_fn = inv_param_transform_fn[free_param_index])
names(untransformed_params) <- param_names[free_param_index]
# Simulate VL with current parameters --------------------
timevec <- data$time
VLdata <- data$vl
# NB type of model fit depends on the # fitted params (2 = single phase, 4 = biphasic)
if (model_list == 'two'){
VLoutput <- get_singlephase(params = untransformed_params, timevec)
} else if (model_list == 'four'){
VLoutput <- get_biphasic(params = untransformed_params, timevec)
}
## Calculate SSRs and associated errors --------------------
# 1. transform VL data and output to log10 scale
transformed_data <- transformVL(VLdata)
transformed_output <- transformVL(VLoutput)
# 2. calculate residuals and SSRs
resids <- transformed_data - transformed_output
SSRs <- sum(resids^2)
# 3. calculate error to be minimised: negloglik from Hogan et al (2015)
# (correct up to a constant for each time-series)
negloglik <- 0.5 * length(VLdata) * log(SSRs)
return(negloglik)
}
#' Fit model to data using optim
#'
#' This function uses optim to fit either the biphasic or single phase model to data from a given subject
#' @param param_names names of parameter vector.
#' @param initial_params named vector of the initial parameter guess.
#' @param free_param_index logical vector indicating whether the parameters A, delta, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE) for the biphasic model and c(FALSE, FALSE, TRUE, TRUE) for the single phase model.
#' @param data dataframe with columns for the subject's viral load measurements ('vl'), and timing of sampling ('time')
#' @param model_list character indicating which model is begin fit. Can be either 'four' for the biphasic model, or 'two' for the single phase model. Defaults to 'four'.
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param searchmethod optimization algorithm to be used in optim. Defaults to Nelder-Mead.
#'
get_optim_fit <- function(initial_params, param_names, free_param_index, data,
model_list = "four",
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod){
tmp_params <- initial_params[free_param_index]
transformed_params <- get_transformed_params(params = tmp_params, param_transform_fn = forward_param_transform_fn[free_param_index])
fit <- stats::optim(par = transformed_params, # fitting free parameters only
fn = get_error,
method = searchmethod,
param_names = param_names,
free_param_index = free_param_index,
data = data,
inv_param_transform_fn = inv_param_transform_fn,
model_list = model_list,
hessian = TRUE)
return(fit)
}
#' Fit model and obtain parameter estimates
#'
#' This function fits either the biphasic or single phase model to the processed data and extracts the best-fit parameters.
#'
#' @param data dataframe with columns for each subject's identifier ('id'), viral load measurements ('vl'), and timing of sampling ('time')
#' @param id_vector vector of identifiers corresponding to the subjects to be fitted.
#' @param param_names names of parameter vector.
#' @param initial_params named vector of the initial parameter guess.
#' @param free_param_index logical vector indicating whether the parameters A, delta, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE) for the biphasic model and c(FALSE, FALSE, TRUE, TRUE) for the single phase model.
#' @param n_min_biphasic the minimum number of data points required to fit the biphasic model. Defaults to 6. It is highly advised not to go below this threshold.
#' @param model_list character indicating which model is to be fit. Can be either 'four' for the biphasic model, or 'two' for the single phase model. Defaults to 'four'.
#' @param whichcurve indicates which model prediction function to use. Should be get_biphasic for the biphasic model or get_singlephase for the singlephase model. Defaults to get_biphasic.
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param searchmethod optimization algorithm to be used in optim. Defaults to Nelder-Mead.
#'
fit_model <- function(data, id_vector, param_names,
initial_params, free_param_index,
n_min_biphasic,
model_list, whichcurve = get_biphasic,
forward_param_transform_fn, inv_param_transform_fn,
searchmethod){
fitlist <- vector("list", length = length(id_vector))
CIlist <- vector("list", length = length(id_vector))
model_fitlist <- vector("list", length = length(id_vector))
nofitlist <- vector("list", length = length(id_vector))
noCIlist <- vector("list", length = length(id_vector))
for (i in 1:length(id_vector)){
datasubset = data %>% filter(id == id_vector[i])
# For biphasic model: if there are less than user defined 'n_min_biphasic' dps, store as failed fit and move onto the next subject
if( (nrow(datasubset) < n_min_biphasic) && (model_list == "four")){
nofitlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "not enough data")
next
}
fitlist[[i]] <- data.frame(index = i, id = id_vector[i])
fit <- get_optim_fit(initial_params, param_names, free_param_index, data = datasubset,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
model_list = model_list,
searchmethod = searchmethod)
best_param <- get_params(fit, initial_params, free_param_index, param_names, inv_param_transform_fn, index = i)
# To get CIs, the Hessian has to be invertible (determinant nonzero).
# record id of patients for which CIs cannot be obtained
# for those which do have CIs (i.e. have reliable fits) - get fitted model prediction
if (det(fit$hessian)!=0 & all(!is.na(fit$hessian)) ) {
trySolve <- try(solve(fit$hessian))
if (inherits(trySolve, "try-error")) {
tmpCI <- NA
} else {
tmpCI <- get_CI(fit)
}
if( any(is.na(tmpCI) ) ){
noCIlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "could not obtain CIs")
} else{
CIlist[[i]] <- tmpCI %>% mutate(id = id_vector[i])
model_fitlist[[i]] <- get_curve(data = datasubset, best_param, param_names = param_names[free_param_index], whichcurve = whichcurve)
}
} else {
noCIlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "could not obtain CIs")
}
}
notfitted <- bind_rows(nofitlist) %>% rbind( bind_rows(noCIlist))
if (nrow(notfitted)) {
notfitted <- notfitted %>% filter(!is.na(index)) %>% arrange(index)
}
if(length(notfitted$id) > 0){
fitted <- bind_rows(fitlist) %>% filter(!id %in% notfitted$id)
} else{
fitted <- bind_rows(fitlist)
}
return(list(model_fitlist = model_fitlist, CIlist = CIlist, fitted = fitted, notfitted = notfitted))
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/fitting_fns.R
|
#' Compute the triphasic model curve
#'
#' This function calculates the triphasic model, V(t), for a vector of input times, t
#' @param params named numeric vector of all parameters needed to compute the triphasic model, V(t)
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @return numeric vector of viral load predictions, V(t), for each time point in 'timevec'
#' @export
#' @examples
#'
#' get_triphasic(params = c(A = 10000, delta = 1, B = 1000, gamma = 0.1, C = 100, omega = 0.03),
#' timevec = seq(1, 100, length.out = 100))
#'
get_triphasic <- function(params, timevec){
if(length(params) < 6){
stop("The triphasic model needs 6 parameters: A, delta, A_b, delta_b, B, gamma")
}
params["A"] * exp(- params["delta"] * timevec) + params["A_b"] * exp(- params["delta_b"] * timevec + params["B"] * exp(- params["gamma"] * timevec))
}
#' Evaluate error metric between data and model prediction
#'
#' For a given parameter set, this function computes the predicted viral load curve and evaluates the error metric between the prediction and observed data (to be passed to optim).
#'
#' @param params named vector of the parameters from which the model prediction should be generated.
#' @param param_names names of parameter vector.
#' @param free_param_index logical TRUE/FALSE vector indicating whether the parameters A, delta, A_b, delta_b, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE) for the triphasic model.
#' @param data dataframe with columns for the subject's viral load measurements ('vl'), and timing of sampling ('time').
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#'
get_error_triphasic <- function(params, param_names, free_param_index, data,
inv_param_transform_fn){
# Free and fixed params are transformed so their values are unconstrained during optimization
# For model evaluations they must first be back-transformed
untransformed_params <- get_transformed_params(params = params,
param_transform_fn = inv_param_transform_fn[free_param_index])
names(untransformed_params) <- param_names[free_param_index]
# Simulate VL with current parameters --------------------
timevec <- data$time
VLdata <- data$vl
VLoutput <- get_triphasic(params = untransformed_params, timevec)
## Calculate SSRs and associated errors --------------------
# 1. transform VL data and output to log10 scale
transformed_data <- transformVL(VLdata)
transformed_output <- transformVL(VLoutput)
# 2. calculate residuals and SSRs
resids <- transformed_data - transformed_output
SSRs <- sum(resids^2)
# 3. calculate error to be minimised: negloglik from Hogan et al (2015)
# (correct up to a constant for each time-series)
negloglik <- 0.5 * length(VLdata) * log(SSRs)
return(negloglik)
}
#' Fit model and obtain parameter estimates
#'
#' This function fits the triphasic model to the processed data and extracts the best-fit parameters.
#'
#' @param data dataframe with columns for each subject's identifier ('id'), viral load measurements ('vl'), and timing of sampling ('time')
#' @param id_vector vector of identifiers corresponding to the subjects to be fitted.
#' @param param_names names of parameter vector.
#' @param initial_params named vector of the initial parameter guess.
#' @param free_param_index logical vector indicating whether the parameters A, delta, A_b, delta_b, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE) for the triphasic model.
#' @param n_min_triphasic the minimum number of data points required to fit the triphasic model.
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param searchmethod optimization algorithm to be used in optim. Defaults to Nelder-Mead.
#'
fit_model_triphasic <- function(data, id_vector, param_names,
initial_params, free_param_index,
n_min_triphasic,
forward_param_transform_fn, inv_param_transform_fn,
searchmethod){
fitlist <- vector("list", length = length(id_vector))
CIlist <- vector("list", length = length(id_vector))
model_fitlist <- vector("list", length = length(id_vector))
nofitlist <- vector("list", length = length(id_vector))
noCIlist <- vector("list", length = length(id_vector))
for (i in 1:length(id_vector)){
datasubset = data %>% filter(id == id_vector[i])
# If there are less than user defined 'n_min_triphasic'dps,
# store as failed fit and move onto the next subject
if (nrow(datasubset) < n_min_triphasic) {
nofitlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "not enough data")
next
}
fitlist[[i]] <- data.frame(index = i, id = id_vector[i])
fit <- get_optim_fit_triphasic(initial_params, param_names, free_param_index, data = datasubset,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod = searchmethod)
best_param <- get_params(fit, initial_params, free_param_index, param_names, inv_param_transform_fn, index = i)
# To get CIs, the Hessian has to be invertible (determinant nonzero).
# record id of patients for which CIs cannot be obtained
# for those which do have CIs (i.e. have reliable fits) - get fitted model prediction
if (det(fit$hessian)!=0 & all(!is.na(fit$hessian)) ) {
trySolve <- try(solve(fit$hessian))
if (inherits(trySolve, "try-error")) {
tmpCI <- NA
} else {
tmpCI <- get_CI(fit)
}
if( any(is.na(tmpCI) ) ){
noCIlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "could not obtain CIs")
} else {
CIlist[[i]] <- tmpCI %>% mutate(id = id_vector[i])
model_fitlist[[i]] <- get_curve(data = datasubset, best_param,
param_names = param_names[free_param_index], whichcurve = get_triphasic)
}
} else {
noCIlist[[i]] <- data.frame(index = i, id = id_vector[i], reason = "could not obtain CIs")
}
}
notfitted <- bind_rows(nofitlist) %>% rbind( bind_rows(noCIlist))
if (nrow(notfitted)) {
notfitted <- notfitted %>% filter(!is.na(index)) %>% arrange(index)
}
if(length(notfitted$id) > 0){
fitted <- bind_rows(fitlist) %>% filter(!id %in% notfitted$id)
} else{
fitted <- bind_rows(fitlist)
}
return(list(model_fitlist = model_fitlist, CIlist = CIlist, fitted = fitted, notfitted = notfitted))
}
#' Fit triphasic model to data using optim
#'
#' This function uses optim to fit the triphasic model to data from a given subject
#' @param param_names names of parameter vector.
#' @param initial_params named vector of the initial parameter guess.
#' @param free_param_index logical vector indicating whether the parameters A, delta, A_b, delta_b, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE) for the triphasic model.
#' @param data dataframe with columns for the subject's viral load measurements ('vl'), and timing of sampling ('time')
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param searchmethod optimization algorithm to be used in optim. Defaults to Nelder-Mead.
#'
get_optim_fit_triphasic <- function(initial_params, param_names, free_param_index, data,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod){
tmp_params <- initial_params[free_param_index]
transformed_params <- get_transformed_params(params = tmp_params,
param_transform_fn = forward_param_transform_fn[free_param_index])
fit <- stats::optim(par = transformed_params,
fn = get_error_triphasic,
method = searchmethod,
param_names = param_names,
free_param_index = free_param_index,
data = data,
inv_param_transform_fn = inv_param_transform_fn,
hessian = TRUE)
return(fit)
}
#' Switch names of rate parameters
#'
#' This function switches the names of delta and gamma estimates if gamma > delta.
#' @param triphasicCI data frame of parameter estimates and confidence intervals for the biphasic model.
#'
tri_switch_params <- function(triphasicCI){
replace_cols <- c("estimate", "lowerCI", "upperCI")
if (triphasicCI$estimate[triphasicCI$param == "gamma"] > triphasicCI$estimate[triphasicCI$param == "delta_b"]) {
tmpRate <- triphasicCI[triphasicCI$param == "gamma", replace_cols]
tmpConst <- triphasicCI[triphasicCI$param == "B", replace_cols]
triphasicCI[triphasicCI$param == "gamma", replace_cols] <- triphasicCI[triphasicCI$param == "delta_b", replace_cols]
triphasicCI[triphasicCI$param == "delta_b", replace_cols] <- tmpRate
triphasicCI[triphasicCI$param == "B", replace_cols] <- triphasicCI[triphasicCI$param == "A_b", replace_cols]
triphasicCI[triphasicCI$param == "A_b", replace_cols] <- tmpConst
}
if (triphasicCI$estimate[triphasicCI$param == "delta_b"] > triphasicCI$estimate[triphasicCI$param == "delta"]) {
tmpRate <- triphasicCI[triphasicCI$param == "delta_b", replace_cols]
tmpConst <- triphasicCI[triphasicCI$param == "A_b", replace_cols]
triphasicCI[triphasicCI$param == "delta_b", replace_cols] <- triphasicCI[triphasicCI$param == "delta", replace_cols]
triphasicCI[triphasicCI$param == "delta", replace_cols] <- tmpRate
triphasicCI[triphasicCI$param == "A_b", replace_cols] <- triphasicCI[triphasicCI$param == "A", replace_cols]
triphasicCI[triphasicCI$param == "A", replace_cols] <- tmpConst
}
if (triphasicCI$estimate[triphasicCI$param == "gamma"] > triphasicCI$estimate[triphasicCI$param == "delta_b"]) {
tmpRate <- triphasicCI[triphasicCI$param == "gamma", replace_cols]
tmpConst <- triphasicCI[triphasicCI$param == "B", replace_cols]
triphasicCI[triphasicCI$param == "gamma", replace_cols] <- triphasicCI[triphasicCI$param == "delta_b", replace_cols]
triphasicCI[triphasicCI$param == "delta_b", replace_cols] <- tmpRate
triphasicCI[triphasicCI$param == "B", replace_cols] <- triphasicCI[triphasicCI$param == "A_b", replace_cols]
triphasicCI[triphasicCI$param == "A_b", replace_cols] <- tmpConst
}
return(triphasicCI)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/fitting_fns_triphasic.R
|
#' Prepare input data for non-parametric TTS calculations.
#'
#' This function prepares the raw input data for TTS interpolation. Individuals whose data do not meet specific inclusion criteria are removed (see Vignette for more details).
#'
#' Steps include:
#' 1. Setting values below the suppression threshold to half the suppression threshold (following standard practice).
#' 2. Filtering out subjects who do not suppress viral load below the suppression threshold by a certain time.
#' 3. Filtering out subjects who do not have a decreasing sequence of viral load (within some buffer range).
#' @param data raw data set. Must be a data frame with the following columns: 'id' - stating the unique identifier for each subject; 'vl' - numeric vector stating the viral load measurements for each subject; 'time' - numeric vector stating the time at which each measurement was taken.
#' @param suppression_threshold numeric value indicating the suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @param uppertime the maximum time point to include in the analysis. Subjects who do not suppress viral load below the suppression threshold within this time will be discarded from model fitting. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param censor_value positive numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param decline_buffer the maximum allowable deviation of values away from a strictly decreasing sequence in viral load. This allows for e.g. measurement noise and small fluctuations in viral load. Default value is 500.
#' @param initial_buffer numeric (integer) value indicating the maximum number of initial observations from which the beginning of each trajectory will be chosen. Default value is 3.
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' filter_dataTTS(data = simulated_data)
#'
filter_dataTTS <- function(data, suppression_threshold = 20,
uppertime = 365, censor_value = 10,
decline_buffer = 500, initial_buffer = 3){
# Check that data frame includes columns for 'id', 'time', 'vl'
if (!(all(c("vl", "time", "id") %in% names(data)))) {
stop("Data frame must have named columns for 'id', 'time', and 'vl'")
}
if (censor_value > suppression_threshold) {
warning("censor_value must be less than or equal to the suppression threshold. Defaulting to half the suppression threshold.")
censor_value <- 0.5 * suppression_threshold
}
if (censor_value < 0) {
warning("censor_value must be positive. Defaulting to half the suppression threshold.")
censor_value <- 0.5 * suppression_threshold
}
# 1. Change everything <= suppression_threshhold to censor_Value
data_filtered <- data %>% mutate(vl = case_when(vl <= suppression_threshold ~ censor_value,
vl >= suppression_threshold ~ vl) ) %>%
# 2. Look at only those who reach control within user defined uppertime
filter(time <= uppertime) %>% group_by(id) %>%
filter(any(vl <= suppression_threshold)) %>% ungroup() %>%
# 3a. Isolate data from the highest VL measurement (from points 1 - 3) to the first point below detection
filter(!is.na(vl)) %>% group_by(id) %>%
slice(which.max(vl[1:initial_buffer]):Position(function(x) x <= suppression_threshold, vl)) %>%
ungroup() %>%
# 3b. Only keep VL sequences that are decreasing with user defined buffer...
group_by(id) %>% filter(all(vl <= cummin(vl) + decline_buffer))
return(data_filtered)
}
#' Biphasic root function
#'
#' This function defines the root equation for the biphasic model, i.e. V(t) - suppression_threshold = 0.
#'
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @param params named vector of all parameters needed to compute the biphasic model, V(t)
#' @param suppression_threshold suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @export
#'
biphasic_root <- function(timevec, params, suppression_threshold){
value <- params["A"] * exp (- timevec * params["delta"]) + params["B"] * exp( - timevec * params["gamma"]) - suppression_threshold
as.numeric(value)
}
#' Single phase root function
#'
#' This function defines the root equation for the single phase model, i.e. V(t) - suppression_threshold = 0.
#'
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @param params named vector of all parameters needed to compute the single phase model, V(t)
#' @param suppression_threshold suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @export
#'
single_root <- function(timevec, params, suppression_threshold){
if (all(c("B", "gamma") %in% params)) {
value <- params["B"] * exp( - timevec * params["gamma"]) - suppression_threshold
} else{
value <- params["Bhat"] * exp( - timevec * params["gammahat"]) - suppression_threshold
}
as.numeric(value)
}
#' Triphasic root function
#'
#' This function defines the root equation for the triphasic model, i.e. V(t) - suppression_threshold = 0.
#'
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @param params named vector of all parameters needed to compute the triphasic model, V(t)
#' @param suppression_threshold suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @export
#'
triphasic_root <- function(timevec, params, suppression_threshold){
value <- params["A"] * exp (- timevec * params["delta"]) + params["A_b"] * exp (- timevec * params["delta_b"]) + params["B"] * exp( - timevec * params["gamma"]) - suppression_threshold
as.numeric(value)
}
#' Parametric TTS function
#'
#' This function computes the parametric form of the time to suppression
#'
#' @param params named vector of all parameters needed to compute the suppression model, V(t)
#' @param rootfunction specifies which function should be used to calculate the root: biphasic or single phase.
#' @param suppression_threshold suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @param uppertime numeric value indicating the maximum time that will be considered. Default value is 365.
#' @export
#'
get_parametricTTS <- function(params, rootfunction, suppression_threshold, uppertime){
TTS <- rep(NA, nrow(params))
for (i in 1:nrow(params)){
TTS[i] = stats::uniroot(rootfunction, lower = 1, upper = uppertime,
params = params[i,], suppression_threshold = suppression_threshold)$root
}
return(TTS)
}
#' Non-parametric TTS function
#'
#' This function computes the non-parametric form of the time to suppression
#'
#' @param vl numeric vector of viral load measurements.
#' @param suppression_threshold numeric value for the suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @param time numeric vector indicating the time when vl measurements were taken.
#' @param npoints numeric value indicating the number of interpolation points to be considered.
#' @export
#'
get_nonparametricTTS <- function(vl, suppression_threshold, time, npoints){
TTS <- time[which(vl == suppression_threshold)[1]]
firstbelow <- which(vl < suppression_threshold)[1]
if(is.na(TTS) | (!is.na(time[firstbelow]) & (time[firstbelow] < TTS)) ){
lastabove <- time[firstbelow - 1]
yax <- c(vl[firstbelow - 1], vl[firstbelow])
xax <- c(lastabove, time[firstbelow])
interpolation <- stats::approx(xax, yax, n = npoints)
TTS <- interpolation$x[interpolation$y <= suppression_threshold][1]
}
return(TTS)
}
#' Time to suppression (TTS) function
#'
#' This function calculates the time to suppress HIV below a specified threshold.
#'
#' Options include: parametric (i.e. using the fitted model) or non-parametric (i.e. interpolating the processed data).
#' @param model_output output from fitting model. Only required if parametric = TRUE.
#' @param data raw data set. Must be a data frame with the following columns: 'id' - stating the unique identifier for each subject; 'vl'- numeric vector stating the viral load measurements for each subject; 'time' - numeric vector stating the time at which each measurement was taken. Only required if parametric = FALSE.
#' @param suppression_threshold suppression threshold: measurements below this value will be assumed to represent viral suppression. Typically this would be the detection threshold of the assay. Default value is 20.
#' @param uppertime the maximum time interval to search for the time to suppression. Default value is 365.
#' @param censor_value positive numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param decline_buffer the maximum allowable deviation of values away from a strictly decreasing sequence in viral load. This allows for e.g. measurement noise and small fluctuations in viral load. Default value is 500.
#' @param initial_buffer numeric (integer) value indicating the maximum number of initial observations from which the beginning of each trajectory will be chosen. Default value is 3.
#' @param parametric logical TRUE/FALSE indicating whether time to suppression should be calculated using the parametric (TRUE) or non-parametric (FALSE) method. If TRUE, a fitted model object is required. If FALSE, the raw data frame is required. Defaults to TRUE.
#' @param ARTstart logical TRUE/FALSE indicating whether the time to suppression should be represented as time since ART initiation. Default = FALSE. If TRUE, ART initiation times must be included as a data column named 'ART'.
#' @param npoints numeric value of the number of interpolation points to be considered. Default is 1000.
#' @return a data frame containing all individuals who fit the inclusion criteria, along with their TTS estimates, and a column indicating whether the parametric or nonparametric approach was used.
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' get_TTS(data = simulated_data, parametric = FALSE)
#'
get_TTS <- function(model_output = NULL, data = NULL,
suppression_threshold = 20, uppertime = 365, censor_value = 10,
decline_buffer = 500, initial_buffer = 3,
parametric = TRUE, ARTstart = FALSE, npoints = 1000){
# 1. Parametric TTS ----------------------------------------------------------------
if(parametric == TRUE){
if(is.null(model_output)){
stop("Model output not found. You must supply the fitted model to calculate parametric TTS values. Try ?get_model_fits.")
}
if (length(model_output$triphasicCI) > 0) {
# Triphasic
triphasic_params <- model_output$triphasicCI %>%
select(-lowerCI, -upperCI) %>% spread(param, estimate) %>%
mutate(TTS = get_parametricTTS(params = ., rootfunction = triphasic_root, suppression_threshold, uppertime),
model = "triphasic", calculation = "parametric")
# All
TTS_output <- triphasic_params %>% select(id, TTS, model, calculation)
} else if (length(model_output$biphasicCI) > 0 & length(model_output$singleCI) > 0) {
# Biphasic
biphasic_params <- model_output$biphasicCI %>%
select(-lowerCI, -upperCI) %>% spread(param, estimate) %>%
mutate(TTS = get_parametricTTS(params = ., rootfunction = biphasic_root, suppression_threshold, uppertime),
model = "biphasic", calculation = "parametric")
# Single phase
single_params <- model_output$singleCI %>%
select(-lowerCI, -upperCI) %>% spread(param, estimate) %>%
mutate(TTS = get_parametricTTS(params = ., rootfunction = single_root, suppression_threshold, uppertime),
model = "single phase", calculation = "parametric")
# All
TTS_output <- biphasic_params %>% full_join(single_params) %>%
select(id, TTS, model, calculation)
} else if (length(model_output$biphasicCI) > 0 & length(model_output$singleCI) == 0) {
# Biphasic
biphasic_params <- model_output$biphasicCI %>%
select(-lowerCI, -upperCI) %>% spread(param, estimate) %>%
mutate(TTS = get_parametricTTS(params = ., rootfunction = biphasic_root, suppression_threshold, uppertime),
model = "biphasic", calculation = "parametric")
# All
TTS_output <- biphasic_params %>% select(id, TTS, model, calculation)
} else if (length(model_output$biphasicCI) == 0 & length(model_output$singleCI) > 0) {
# Single phase
single_params <- model_output$singleCI %>%
select(-lowerCI, -upperCI) %>% spread(param, estimate) %>%
mutate(TTS = get_parametricTTS(params = ., rootfunction = single_root, suppression_threshold, uppertime),
model = "single phase", calculation = "parametric")
# All
TTS_output <- single_params %>% select(id, TTS, model, calculation)
}
}
# 2. Non-parametric TTS ----------------------------------------------------------------
if(parametric == FALSE){
if(is.null(data)){
stop("Data not found. You must supply the data to calculate non-parametric TTS values")
}
# Check dataframe includes columns for 'id', 'time', 'vl'
if(!(all(c("vl", "time", "id") %in% names(data)))){
stop("Data frame must have named columns for 'id', 'time', and 'vl'")
}
# Filter out subjects to focus on those who reach suppression below the specified threshold.
data_filtered <- filter_dataTTS(data, suppression_threshold, uppertime, censor_value, decline_buffer, initial_buffer)
if( nrow(data_filtered) == 0){
stop("No individual trajectories remained after filtering. Do you need to set the arguments used by filter_dataTTS? (see ?filter_dataTTS)")
}
TTS_output <- data_filtered %>%
mutate(TTS = get_nonparametricTTS(vl, suppression_threshold, time, npoints)) %>%
ungroup() %>% distinct(id, TTS) %>% mutate(calculation = "non-parametric")
}
if(ARTstart == TRUE){
print("Calculating TTS as time since ART initiation...")
if(is.null(data$ART)){
print("Data frame is missing ART column. Returning original TTS values.")
} else {
ARTdata <- data %>% distinct(id, ART)
TTS_output <- TTS_output %>% left_join(ARTdata) %>% mutate(TTS = TTS - ART)
}
}
return(TTS_output)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/get_TTS.R
|
#' Extract fitted parameters
#'
#' This function extracts all untransformed parameters from the output of optim (i.e. the fitted model).
#' @param fit the output of optim i.e. the fitted model for a particular subject
#' @param initial_params named vector of the initial parameter guess
#' @param free_param_index logical TRUE/FALSE vector indicating whether the parameters A, delta, B, gamma are to be recovered. This should be c(TRUE, TRUE, TRUE, TRUE) for the biphasic model and c(FALSE, FALSE, TRUE, TRUE) for the single phase model.
#' @param param_names character vector of the parameter names. This should be c("A", "delta", "B", "gamma") for the biphasic model or c("B", "gamma") for the single phase model.
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions.
#' @param index indicator value used inside the master function to indicate the subject number.
#'
get_params = function(fit, initial_params, free_param_index, param_names, inv_param_transform_fn, index = NULL){
# get initial guesses
final_params <- initial_params
# get the transformed, fitted parameters
fitted_params <- fit$par
# now untransform these values
untransformed_fitted_params <- get_transformed_params(params = fitted_params, param_transform_fn = inv_param_transform_fn)
# replace the initial guesses of the free parameters with the fitted values
final_params[free_param_index] <- untransformed_fitted_params
return(c(final_params[free_param_index], index = index) )
}
#' Calculate parameter confidence intervals
#'
#' This function calculates parameter 95% confidence intervals from the fitted model (using the hessian supplied from optim).
#' @param fit the output of optim i.e. the fitted model for a particular subject
#'
get_CI <- function(fit){
if( det(fit$hessian) == 0){
stop("The determinant of the Hessian is zero: cannot calculate confidence intervals")
}
fisher_info <- solve(fit$hessian)
prop_sigma <- sqrt(diag(fisher_info))
upper <- fit$par + 1.96 * prop_sigma
lower <- fit$par - 1.96 * prop_sigma
interval <- data.frame(estimate = fit$par, upperCI = upper, lowerCI = lower)
return(exp(interval))
}
#' Make parameter summary table
#'
#' This function collate confidence intervals and parameter estimates from all subjects (fitted with the same model) into a nice table.
#' @param CIlist a list of confidence intervals and parameter estimates obtained from fitting either the single or biphasic model to each eligible subject.
#' @param param_names character vector of the parameter names. This should be c("A", "delta", "B", "gamma") for the biphasic model or c("B", "gamma") for the single phase model.
#' @param free_param_index logical vector indicating whether the parameters A, delta, B, gamma are to be included. This should be c(TRUE, TRUE, TRUE, TRUE) for the biphasic model and c(FALSE, FALSE, TRUE, TRUE) for the single phase model.
#' @param fitted data frame with an 'id' column of the unique identifiers for each subject represented in CIlist. Identifiers should be ordered according to their appearance in CIlist.
#'
get_CItable <- function(CIlist, param_names, free_param_index, fitted){
CItable <- bind_rows(CIlist) %>%
mutate(param = rep(param_names[free_param_index], times = nrow(fitted)),
id = rep(fitted$id, each = length(param_names[free_param_index]))) %>%
mutate(CIrange = upperCI - lowerCI, relativerange = CIrange/lowerCI)
return(CItable)
}
#' Switch names of rate parameters
#'
#' This function switches the names of delta and gamma estimates if gamma > delta.
#' @param biphasicCI data frame of parameter estimates and confidence intervals for the biphasic model.
#'
switch_params <- function(biphasicCI){
replace_cols <- c("estimate", "lowerCI", "upperCI")
if (biphasicCI$estimate[biphasicCI$param == "gamma"] > biphasicCI$estimate[biphasicCI$param == "delta"]) {
tmpRate <- biphasicCI[biphasicCI$param == "gamma", replace_cols]
tmpConst <- biphasicCI[biphasicCI$param == "B", replace_cols]
biphasicCI[biphasicCI$param == "gamma", replace_cols] <- biphasicCI[biphasicCI$param == "delta", replace_cols]
biphasicCI[biphasicCI$param == "delta", replace_cols] <- tmpRate
biphasicCI[biphasicCI$param == "B", replace_cols] <- biphasicCI[biphasicCI$param == "A", replace_cols]
biphasicCI[biphasicCI$param == "A", replace_cols] <- tmpConst
}
return(biphasicCI)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/get_model_output.R
|
utils::globalVariables(c("vl", "time", "id",
"vl_diff", "index", "tag", "n",
"firstbelow",
"calculation", "model", "Model",
"Included",
"Param", "param", "estimate",
"A", "B", "delta", "gamma", "Bhat", "gammahat", "A_b", "delta_b",
"ShortLifespan", "LongLifespan", "SingleLifespan",
"LongLifespanNonProductive", "ShortLifespanProductive", "ShortLifespanNonProductive",
"Median", "SD",
"TTS", "ART", "fit",
"lowerCI", "upperCI", "CIrange", "relativerange",
"."))
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/globals.R
|
#' Prepare input data
#'
#' This function prepares the raw input data for model fitting.
#'
#' Steps include:
#' 1. Setting values below the detection threshold to half the detection threshold (following standard practice).
#' 2. Filtering out subjects who do not suppress viral load below the detection threshold by a certain time.
#' 3. Filtering out subjects who do not have a decreasing sequence of viral load (within some buffer range).
#' 4. Filtering out subjects who do not have enough data for model fitting.
#' 5. Removing the last data point of subjects with the last two points very close to the detection threshold. This prevents skewing of the model fit.
#' Further details can be found in the Vignette.
#' @param data raw data set. Must be a data frame with the following columns: 'id' - stating the unique identifier for each subject; 'vl' - numeric vector with the viral load measurements for each subject; 'time' - numeric vector of the times at which each measurement was taken.
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Measurements below this value will be assumed to represent undetectable viral load levels. Default value is 20.
#' @param censortime numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param censor_value positive numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param decline_buffer numeric value indicating the value assigned to measurements below the detection threshold. Must be less than or equal to the detection threshold.
#' @param initial_buffer numeric (integer) value indicating the maximum number of initial observations from which the beginning of each trajectory will be chosen. Default value is 3.
#' @param n_min_single numeric value indicating the minimum number of data points required to be included in the analysis. Defaults to 3. It is highly advised not to go below this threshold.
#' @param threshold_buffer numerical value indicating the range above the detection threshold which represents potential skewing of model fits. Subjects with their last two data points within this range will have the last point removed. Default value is 10.
#' @param nsuppression numerical value (1 or 2) indicating whether suppression is defined as having one observation below the detection threshold, or two sustained observations. Default value is 1.
#' @import dplyr
#' @return data frame of individuals whose viral load trajectories meet the criteria for model fitting. Includes columns for 'id', 'vl', and 'time'.
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' filter_data(simulated_data)
#'
filter_data <- function(data, detection_threshold = 20,
censortime = 365, censor_value = 10,
decline_buffer = 500, initial_buffer = 3,
n_min_single = 3, threshold_buffer = 10, nsuppression = 1){
# Check that data frame includes columns for 'id', 'time', 'vl'
if (!(all(c("vl", "time", "id") %in% names(data)))) {
stop("'data' must be a data frame with named columns for 'id', 'time', and 'vl'")
}
if (!is.numeric(data$time)) {
stop("Column for the time of observations ('time') must be numeric")
}
if (is.factor(data$id)) {
data$id <- as.character(data$id)
}
if (any(is.na(data$id))) {
warning("Some subjects have missing IDs; removing these from the data")
data <- data %>% filter(!is.na(id))
}
if (censor_value > detection_threshold) {
warning("censor_value must be less than or equal to the detection threshold. Defaulting to half the detection threshold.")
censor_value <- 0.5 * detection_threshold
}
if (censor_value < 0) {
warning("censor_value must be positive. Defaulting to half the detection threshold.")
censor_value <- 0.5 * detection_threshold
}
if (!(nsuppression %in% c(1,2))) {
warning("nsuppression must take the numeric value 1 or 2 to define the criteria for reaching suppression; reverting to default nsuppression = 1")
nsuppression <- 1
}
if (nsuppression == 1) {
# 1. Change everything <= detection_threshhold to 1/2 * detection_threshhold
data_filtered <- data %>% mutate(vl = case_when(vl <= detection_threshold ~ censor_value,
vl >= detection_threshold ~ vl) ) %>%
# 2. Look at only those who reach control within user defined censortime
filter(time <= censortime) %>% group_by(id) %>%
filter(any(vl <= detection_threshold)) %>% ungroup() %>%
filter(!is.na(vl))
} else if (nsuppression == 2) {
data_filtered <- data %>% mutate(vl = case_when(vl <= detection_threshold ~ censor_value,
vl >= detection_threshold ~ vl) ) %>%
filter(!is.na(vl)) %>%
filter(time <= censortime) %>% group_by(id) %>%
# NOW: must have 2 consecutive measurements below threshold
mutate(firstbelow = intersect(which(vl <= detection_threshold),
which(vl <= detection_threshold) + 1)[1] - 1 ) %>%
mutate(firstbelow = time[firstbelow]) %>%
filter(time <= firstbelow) %>% ungroup()
}
# 3a. Isolate data from the highest VL measurement (from points 1 - 3) to the first point below detection
if (nrow(data_filtered) > 0) {
data_filtered <- data_filtered %>% group_by(id) %>%
slice(which.max(vl[1:initial_buffer]):Position(function(x) x <= detection_threshold, vl)) %>%
ungroup() %>%
# 3b. Only keep VL sequences that are decreasing with user defined buffer...
group_by(id) %>% filter(all(vl <= cummin(vl) + decline_buffer)) %>%
# 4. ...AND have min # dps above the detection threshold
filter(length(vl[vl > detection_threshold]) >= n_min_single)
}
# 5. Remove last data point of subjects with last two points very close to the threshold to prevent skewing model fit
if (nrow(data_filtered) > 0) {
data_filtered <- data_filtered %>% group_by(id) %>%
mutate(n = n(), index = 1:n(),
tag = ifelse(vl[n-1] - vl[n] < threshold_buffer, TRUE, FALSE) ) %>%
filter(!(tag == TRUE & index == n)) %>%
ungroup() %>% select(- index, -n, -tag)
}
return(data_filtered)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/input_processing.R
|
#' Master function
#'
#' This function performs the entire analysis, from data filtering to fitting the biphasic/single phase models. The biphasic/single phase models should be used when ART comprises of RTI/PIs.
#'
#' Steps include:
#' 1. Processing the raw data.
#' 2. Fitting the biphasic model to subjects with eligible data e.g. those with enough data points and reliable confidence interval estimates.
#' 3. Fitting the single phase model to the remaining subjects.
#' @param filter Logical TRUE/FALSE indicating whether the data should be processed (highly recommended) prior to model fitting. Default is TRUE.
#' @param data raw data set. Must be a data frame with the following columns: 'id' - stating the unique identifier for each subject; 'vl' - numeric vector stating the viral load measurements for each subject; 'time'- numeric vector stating the time at which each measurement was taken.
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Measurements below this value will be assumed to represent undetectable viral levels. Default value is 20.
#' @param censortime numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded from model fitting. Units are assumed to be same as the 'time' measurements. Default value is 365.
#' @param censor_value positive numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param decline_buffer numeric value indicating the maximum allowable deviation of values away from a strictly decreasing sequence in viral load. This allows for e.g. measurement noise and small fluctuations in viral load. Default value is 500.
#' @param initial_buffer integer value indicating the maximum number of initial observations from which the beginning of each trajectory will be chosen. Default value is 3.
#' @param threshold_buffer numeric value indicating the range above the detection threshold which represents potential skewing of model fits. Subjects with their last two data points within this range will have the last point removed. Default value is 10.
#' @param VL_max_decline numeric value indicating the maximum allowable difference between first and second viral load measurements. Default is 10,000.
#' @param CI_max_diff numeric value indicating the maximum allowable relative difference between lower and upper 95\% confidence intervals i.e. (upper CI - lower CI)/lower CI. Default is 1000.
#' @param n_min_single numeric value indicating the minimum number of data points required to be included in the analysis. Defaults to 3. It is highly advised not to go below this threshold.
#' @param n_min_biphasic numeric value indicating the minimum number of data points required to fit the biphasic model. Defaults to 6. It is highly advised not to go below this threshold.
#' @param nsuppression numerical value (1 or 2) indicating whether suppression is defined as having one observation below the detection threshold, or two sustained observations. Default value is 1.
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param initial_params named numeric vector of initial parameter guesses. Defaults to c(A = 10000, delta = 0.68, B = 1000, gamma = 0.03).
#' @param searchmethod optimization algorithm to be passed to 'optim()'. Defaults to 'Nelder-Mead'.
#' @return a list containing the filtered data ('data_filtered'); parameter estimates for the biphasic and single phase models ('biphasicCI' and 'singleCI'); and predictions from the biphasic and single phase models ('biphasic_fits' and 'single_fits').
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' model_output <- ushr(data = simulated_data)
#'
ushr <- function(data,
## User-defined variables
filter = TRUE,
detection_threshold = 20,
censortime = 365,
censor_value = 10,
decline_buffer = 500,
initial_buffer = 3,
threshold_buffer = 10,
VL_max_decline = 1e4,
CI_max_diff = 1e3,
n_min_single = 3,
n_min_biphasic = 6,
nsuppression = 1,
# Parameter tranformations for optimizer
forward_param_transform_fn = list(log, log, log, log),
inv_param_transform_fn = list(exp, exp, exp, exp),
## User defined fitting variables:
initial_params = c(A = 10000, delta = 0.68, B = 1000, gamma = 0.03),
searchmethod = "Nelder-Mead"){
if (!is.data.frame(data)) {
stop("Input 'data' must be a data frame")
}
if (!is.numeric(c(detection_threshold, censortime, decline_buffer, initial_buffer, n_min_single, n_min_biphasic,
threshold_buffer, VL_max_decline, CI_max_diff, nsuppression) )) {
stop("The following arguments must be numeric: detection_threshold, censortime, decline_buffer, initial_buffer,
n_min_single, n_min_biphasic, threshold_buffer, VL_max_decline, CI_max_diff, nsuppression")
}
if (floor(initial_buffer) != initial_buffer) {
initial_buffer <- floor(initial_buffer)
warning(paste0("initial_buffer must be a whole number: rounding down to ", floor(initial_buffer)))
}
## 1. Data processing ----------------------------------------------------------------
if (filter) {
data_filtered <- filter_data(data, detection_threshold, censortime, censor_value,
decline_buffer, initial_buffer, n_min_single, threshold_buffer, nsuppression)
} else {
data_filtered <- data
}
# Number of subjects after filtering
id_filtered = unique(data_filtered$id)
if (length(id_filtered) == 0) {
stop("No subjects were suitable for model fitting after the data was filtered.")
}
# Get transformed parameters -----------------------------------------
transformed_params <- get_transformed_params(params = initial_params,
param_transform_fn = forward_param_transform_fn)
param_names <- names(initial_params)
# 2. Fit biphasic model ------------------------------------------
free_param_index <- c(TRUE, TRUE, TRUE, TRUE)
biphasicmodel <- fit_model(data = data_filtered, id_vector = id_filtered, param_names = param_names,
initial_params = initial_params, free_param_index = free_param_index,
n_min_biphasic = n_min_biphasic,
model_list = "four", whichcurve = get_biphasic,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod = searchmethod)
if (nrow(biphasicmodel$fitted) > 0) {
biphasicCI <- get_CItable(biphasicmodel$CIlist, param_names, free_param_index, fitted = biphasicmodel$fitted)
# Flag subjects with unreliable CIs i.e. if at least one of the relative CI ranges is > CI_max_diff
badCI <- biphasicCI %>% filter(relativerange > CI_max_diff) %>% distinct(id)
biphasicCI <- biphasicCI %>% filter(!(id %in% badCI$id)) %>% select(id, param, estimate, lowerCI, upperCI)
if (nrow(biphasicCI) > 0) {
#biphasicCI <- switch_params(biphasicCI)
biphasicCI <- biphasicCI %>% group_by(id) %>% do(switch_params(.)) %>% ungroup()
}
biphasic_fits <- bind_rows(biphasicmodel$model_fitlist) %>% filter(!(id %in% badCI$id))
} else {
biphasicCI <- data.frame()
badCI <- c()
biphasic_fits <- data.frame()
}
# 3. Fit single phase for the remaining ids ------------------------------------------
# Choose subjects (include biphasic fits with unreliable CIs)
single_ids <- sort(union(biphasicmodel$notfitted$id, badCI$id))
# Take out first datapoint for ids with sharp initial decline i.e. with VL diff > VL_max_decline
# (suggests fast first phase with too few data points to model)
cutdat <- data_filtered %>% select(id, time, vl) %>%
arrange(id, time) %>% filter(id %in% single_ids) %>%
group_by(id) %>% slice(1:2) %>% mutate(vl_diff = max(vl) - min(vl)) %>%
filter(vl_diff > VL_max_decline) %>% distinct(id, vl_diff)
data_cut <- data_filtered %>% group_by(id) %>% do(remove_vl0(.$id, which_ids = cutdat$id, .)) %>% ungroup()
# Fit model on remaining data
free_param_index <- c(FALSE, FALSE, TRUE, TRUE)
if (length(single_ids) > 0) {
singlemodel <- fit_model(data = data_cut, id_vector = single_ids, param_names = param_names,
initial_params = initial_params, free_param_index = free_param_index,
n_min_biphasic = n_min_biphasic,
model_list = "two", whichcurve = get_singlephase,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod = searchmethod)
single_fits <- bind_rows(singlemodel$model_fitlist)
singleCI <- get_CItable(singlemodel$CIlist, param_names, free_param_index, fitted = singlemodel$fitted) %>%
select(id, param, estimate, lowerCI, upperCI) %>%
mutate(param = ifelse(param == "B", "Bhat", param),
param = ifelse(param == "gamma", "gammahat", param))
} else {
single_fits <- data.frame()
singleCI <- data.frame()
}
output <- list(data_filtered = data_cut,
biphasicCI = biphasicCI, singleCI = singleCI,
biphasic_fits = biphasic_fits, single_fits = single_fits)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/master_fn.R
|
#' Master function for the triphasic model
#'
#' This function performs the entire analysis, from data filtering to triphasic model fitting. The triphasic model should be used when ART includes an integrase inhibitor.
#'
#' Steps include:
#' 1. Processing the raw data.
#' 2. Fitting the triphasic model to subjects with eligible data e.g. those with enough data points and reliable confidence interval estimates.
#' @param filter Logical TRUE/FALSE indicating whether the data should be processed (highly recommended) prior to model fitting. Default is TRUE.
#' @param data raw data set. Must be a data frame with the following columns: 'id' - stating the unique identifier for each subject; 'vl' - numeric vector stating the viral load measurements for each subject; 'time'- numeric vector stating the time at which each measurement was taken.
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Measurements below this value will be assumed to represent undetectable viral levels. Default value is 20.
#' @param censortime numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded from model fitting. Units are assumed to be same as the 'time' measurements. Default value is 365.
#' @param censor_value positive numeric value indicating the maximum time point to include in the analysis. Subjects who do not suppress viral load below the detection threshold within this time will be discarded. Units are assumed to be the same as the 'time' column. Default value is 365.
#' @param decline_buffer numeric value indicating the maximum allowable deviation of values away from a strictly decreasing sequence in viral load. This allows for e.g. measurement noise and small fluctuations in viral load. Default value is 500.
#' @param initial_buffer integer value indicating the maximum number of initial observations from which the beginning of each trajectory will be chosen. Default value is 3.
#' @param threshold_buffer numeric value indicating the range above the detection threshold which represents potential skewing of model fits. Subjects with their last two data points within this range will have the last point removed. Default value is 10.
#' @param VL_max_decline numeric value indicating the maximum allowable difference between first and second viral load measurements. Default is 10,000.
#' @param CI_max_diff numeric value indicating the maximum allowable relative difference between lower and upper 95\% confidence intervals i.e. (upper CI - lower CI)/lower CI. Default is 1000.
#' @param n_min_triphasic numeric value indicating the minimum number of data points required to be included in the analysis. Defaults to 9. It is highly advised not to go below this threshold.
#' @param nsuppression numerical value (1 or 2) indicating whether suppression is defined as having one observation below the detection threshold, or two sustained observations. Default value is 1.
#' @param forward_param_transform_fn list of transformation functions to be used when fitting the model in optim. Defaults to log transformations for all parameters (to allow unconstrained optimization).
#' @param inv_param_transform_fn list of transformation functions to be used when back-transforming the transformed parameters. Should be the inverse of the forward transformation functions. Defaults to exponential.
#' @param initial_params named numeric vector of initial parameter guesses. Defaults to c(A = 10000, delta = 1, A_b = 1000, delta_b = 0.15, B = 10, gamma = 0.05).
#' @param searchmethod optimization algorithm to be passed to 'optim()'. Defaults to 'Nelder-Mead'.
#' @return a list containing the filtered data ('data_filtered'); parameter estimates for the triphasic model ('triphasicCI'); and predictions from the triphasic model ('triphasic_fits'').
#' @export
ushr_triphasic <- function(data,
## User-defined variables
filter = TRUE,
detection_threshold = 20,
censortime = 365,
censor_value = 10,
decline_buffer = 500,
initial_buffer = 3,
threshold_buffer = 10,
VL_max_decline = 1e4,
CI_max_diff = 1e3,
n_min_triphasic = 9,
nsuppression = 1,
# Parameter tranformations for optimizer
forward_param_transform_fn = list(log, log, log, log, log, log),
inv_param_transform_fn = list(exp, exp, exp, exp, exp, exp),
## User defined fitting variables:
initial_params = c(A = 10000, delta = 1, A_b = 1000, delta_b = 0.15, B = 10, gamma = 0.05),
searchmethod = "Nelder-Mead"){
if (!is.data.frame(data)) {
stop("Input 'data' must be a data frame")
}
if (!is.numeric(c(detection_threshold, censortime, decline_buffer, initial_buffer, n_min_triphasic,
threshold_buffer, VL_max_decline, CI_max_diff, nsuppression) )) {
stop("The following arguments must be numeric: detection_threshold, censortime, decline_buffer, initial_buffer,
n_min_triphasic, threshold_buffer, VL_max_decline, CI_max_diff, nsuppression")
}
if (floor(initial_buffer) != initial_buffer) {
initial_buffer <- floor(initial_buffer)
warning(paste0("initial_buffer must be a whole number: rounding down to ", floor(initial_buffer)))
}
## 1. Data processing ----------------------------------------------------------------
if (filter) {
data_filtered <- filter_data(data, detection_threshold, censortime, censor_value,
decline_buffer, initial_buffer, n_min_single = n_min_triphasic,
threshold_buffer, nsuppression)
} else {
data_filtered <- data
}
# Number of subjects after filtering
id_filtered = unique(data_filtered$id)
if (length(id_filtered) == 0) {
stop("No subjects were suitable for model fitting after the data was filtered.")
}
# Get transformed parameters -----------------------------------------
transformed_params <- get_transformed_params(params = initial_params,
param_transform_fn = forward_param_transform_fn)
param_names <- names(initial_params)
# 2. Fit biphasic model ------------------------------------------
free_param_index <- c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)
triphasicmodel <- fit_model_triphasic(data = data_filtered, id_vector = id_filtered, param_names = param_names,
initial_params = initial_params, free_param_index = free_param_index,
n_min_triphasic = n_min_triphasic,
forward_param_transform_fn = forward_param_transform_fn,
inv_param_transform_fn = inv_param_transform_fn,
searchmethod = searchmethod)
if (nrow(triphasicmodel$fitted) > 0) {
triphasicCI <- get_CItable(triphasicmodel$CIlist, param_names, free_param_index, fitted = triphasicmodel$fitted)
# Flag subjects with unreliable CIs i.e. if at least one of the relative CI ranges is > CI_max_diff
badCI <- triphasicCI %>% filter(relativerange > CI_max_diff) %>% distinct(id)
triphasicCI <- triphasicCI %>% filter(!(id %in% badCI$id)) %>% select(id, param, estimate, lowerCI, upperCI)
if (nrow(triphasicCI) > 0) {
triphasicCI <- triphasicCI %>% group_by(id) %>% do(tri_switch_params(.)) %>% ungroup()
}
triphasic_fits <- bind_rows(triphasicmodel$model_fitlist) %>% filter(!(id %in% badCI$id))
} else {
triphasicCI <- data.frame()
badCI <- c()
triphasic_fits <- data.frame()
}
output <- list(data_filtered = data_filtered,
triphasicCI = triphasicCI,
triphasic_fits = triphasic_fits)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/master_fn_triphasic.R
|
#' Compute the biphasic model curve
#'
#' This function calculates the biphasic model, V(t), for a vector of input times, t
#' @param params named numeric vector of all parameters needed to compute the biphasic model, V(t)
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @return numeric vector of viral load predictions, V(t), for each time point in 'timevec'
#' @export
#' @examples
#'
#' get_biphasic(params = c(A = 10000, delta = 0.68, B = 1000, gamma = 0.03),
#' timevec = seq(1, 100, length.out = 100))
#'
get_biphasic <- function(params, timevec){
if(length(params) < 4){
stop("The biphasic model needs 4 parameters: A, delta, B, gamma")
}
params["A"] * exp(- params["delta"] * timevec) + params["B"] * exp(- params["gamma"] * timevec)
}
#' Compute the single phase model curve
#'
#' This function calculates the single phase model, V(t), for vector of input times, t
#' @param params named numeric vector of all parameters needed to compute the single phase model, V(t)
#' @param timevec numeric vector of the times, t, at which V(t) should be calculated
#' @return numeric vector of viral load predictions, V(t), for each time point in 'timevec'
#' @export
#' @examples
#'
#' get_singlephase(params = c(B = 1000, gamma = 0.68), timevec = seq(1, 100, length.out = 100))
#'
get_singlephase <- function(params, timevec){
if(length(params) < 2){
stop("The single phase model needs 2 parameters: B, gamma")
}
params["B"] * exp(- params["gamma"] * timevec)
}
#' Compute the model for a given subject's data and best-fit parameters
#'
#' This function calculates the biphasic or single phase model given a subject's data and best-fit parameters
#' @param data data frame with columns for the subject's identifier ('id') and timing of sampling ('time')
#' @param best_param named numeric vector of best fit parameters obtained from fitting the biphasic or single phase model to the subjects data
#' @param param_names character vector containing the names of the parameters in 'best_param'
#' @param whichcurve character indicating which model function should be used. Use 'get_biphasic' for the biphasic model, or 'get_singlephase' for the single phase model. Defaults to 'get_biphasic'.
#' @return data frame with columns for the sampling times ('time'), fitted viral load predictions ('fit'), and the corresponding subject identifier ('id')
#' @export
#' @examples
#'
#' nobs <- 7
#' example_param <- c(A = 10000, delta = 0.03, B = 1000, gamma = 0.68)
#'
#' vldata <- get_biphasic(params = example_param, timevec = seq(5, 100, length.out = nobs))
#'
#' subjectdata <- data.frame(id = 123, time = seq(5, 100, length.out = nobs),
#' vl = 10^ (log10(vldata) + rnorm(nobs, 0, 0.2)))
#'
#' get_curve(data = subjectdata, best_param = example_param, param_names = names(example_param))
get_curve <- function(data, best_param, param_names, whichcurve = get_biphasic){
mint <- min(data$time) - 1
maxt <- max(data$time) + 1
tscale <- seq(mint, maxt, 0.5)
tscale <- tscale[tscale >= 0]
modelfit <- data.frame('time'= tscale,'fit'= whichcurve(params = best_param[param_names], tscale),
'id' = rep(data$id[1], times = length(tscale)))
return(modelfit)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/model_fns.R
|
#' Get plotting theme
#'
#' This function sets the plotting theme for ggplot.
#'
#' @param textsize numeric value for base text size. Default is 9.
#'
get_plottheme <- function(textsize){
mytheme <- theme_bw() + theme(axis.text = element_text(size = textsize),
axis.title = element_text(size = textsize + 2),
legend.text = element_text(size = textsize),
legend.title = element_text(size = textsize + 2),
strip.text.x = element_text(size = textsize),
strip.text.y = element_text(size = textsize))
return(mytheme)
}
#' Plot data
#'
#' This function plots raw, filtered, or simulated data.
#'
#' @param data data frame of raw, filtered, or simulated data. Must include the following columns: 'id' - stating the unique identifier for each subject; 'vl' - numeric vector stating the viral load measurements for each subject; 'time'- numeric vector stating the time at which each measurement was taken.
#' @param textsize numeric value for base text size in ggplot. Default is 9.
#' @param pointsize numeric value for point size in ggplot. Default is 1.
#' @param linesize numeric value for line width in ggplot. Default is 0.5.
#' @param facet_col numeric value for number of columns to use when faceting subject panels. Defaults to NULL (i.e. ggplot default).
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Default value is 20.
#' @import ggplot2
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' plot_data(simulated_data)
#'
plot_data <- function(data, textsize = 9, pointsize = 1, linesize = 0.5,
facet_col = NULL, detection_threshold = 20){
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" is required for automated plotting.
Either install it, or plot manually.")
}
mytheme <- get_plottheme(textsize)
data %>% ggplot(aes(x = time, y = vl)) + geom_point(size = pointsize) +
geom_hline(aes(yintercept = detection_threshold), size = linesize, linetype = "dashed") +
facet_wrap(~ id, ncol = facet_col) + mytheme +
scale_y_log10("HIV viral load") + scale_x_continuous("Time")
}
#' Plot model fits
#'
#' This function plots the output from model fitting.
#'
#' @param model_output output from model fitting using ushr().
#' @param type character string indicating whether the biphasic or single phase fits should be plotted. Must be either "biphasic", "single", or "triphasic". Defaults to "biphasic".
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Default value is 20.
#' @param textsize numeric value for base text size in ggplot. Default is 9.
#' @param pointsize numeric value for point size in ggplot. Default is 1.
#' @param linesize numeric value for line width in ggplot. Default is 0.5.
#' @param facet_col numeric value for number of columns to use when faceting subject panels. Defaults to NULL (i.e. ggplot default).
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' model_output <- ushr(data = simulated_data)
#'
#' plot_model(model_output, type = "biphasic")
#'
plot_model <- function(model_output, type = "biphasic", detection_threshold = 20,
textsize = 9, pointsize = 1, linesize = 0.5,
facet_col = NULL){
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" is required for automated plotting.
Either install it, or plot manually.")
}
if (is.null(model_output)) {
stop("Please include model output from 'ushr()' for plotting. If you want to plot data, use 'plot_data()'.")
}
# get desired fits for plotting
if (type == "biphasic") {
fits <- model_output$biphasic_fits
} else if (type == "single") {
fits <- model_output$single_fits
} else if (type == "triphasic") {
fits <- model_output$triphasic_fits
} else {
stop("Invalid 'type' argumement. Must be one of 'single', 'biphasic', or 'triphasic'.")
}
if (is.null(fits)) {
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
if (nrow(fits) == 0) {
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
filtered_data <- model_output$data_filtered %>% filter(id %in% unique(fits$id))
mytheme <- get_plottheme(textsize)
filtered_data %>% ggplot() + geom_point(aes(x = time, y = vl, group = id), size = pointsize) +
geom_line(data = fits, aes(x = time, y = fit, group = id), lty = 1, col = "black", size = linesize) +
facet_wrap(~id, ncol = facet_col) + mytheme +
xlab("Time") + scale_y_log10("HIV viral load") +
geom_hline(aes(yintercept = detection_threshold), linetype = "dashed")
}
#' Plot pairwise parameter distributions
#'
#' This function creates pairwise scatterplots of the estimates parameters. The default plotting method requires GGally; if this package is not available, base R is used instead.
#'
#' @param model_output output from model fitting using ushr().
#' @param type character string indicating whether the biphasic or single phase fits should be plotted. Must be either "biphasic", "single", or "triphasic". Defaults to "biphasic".
#' @param textsize numeric value for base text size. Default is 9.
#' @param pointsize numeric value for point size. Default is 1.
#' @param linesize numeric value for line width; only used for GGally plots. Default is 0.5.
#' @importFrom graphics pairs
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' model_output <- ushr(data = simulated_data)
#'
#' plot_pairs(model_output)
plot_pairs <- function(model_output, type = "biphasic", textsize = 9, pointsize = 1, linesize = 0.5) {
if (requireNamespace("GGally", quietly = TRUE)) {
mytheme <- get_plottheme(textsize)
if (type == "biphasic") {
if(is.null(model_output$biphasicCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$biphasicCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>% #mutate(A = log10(A), B = log10(B)) %>%
select(-id) %>%
GGally::ggpairs(., lower = "blank",
diag = list(continuous = GGally::wrap("densityDiag", size = linesize)),
upper = list(continuous = GGally::wrap("points", size = pointsize)),
axisLabels = "none",
labeller = label_parsed, progress = FALSE) +
mytheme
} else if (type == "single") {
if(is.null(model_output$singleCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$singleCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>%
select(-id) %>%
GGally::ggpairs(., lower = "blank",
upper = list(continuous = GGally::wrap("points", size = pointsize)),
axisLabels = "none",
columnLabels = c("hat(B)", "hat(gamma)"),
labeller = label_parsed, progress = FALSE) +
mytheme
} else if (type == "triphasic") {
if(is.null(model_output$triphasicCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$triphasicCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>%
select(-id) %>%
GGally::ggpairs(., lower = "blank",
upper = list(continuous = GGally::wrap("points", size = pointsize)),
axisLabels = "none",
columnLabels = c("A","A[b]", "B", "delta", "delta[b]", "gamma"),
labeller = label_parsed, progress = FALSE) +
mytheme
} else {
stop("Invalid 'type' argumement. Must be one of 'single', 'biphasic', or 'triphasic'.")
}
} else {
print("Could not find package GGally; plotting with base R instead")
# convert textsize to ~O(1) for base R
axistext <- textsize/9
if (type == "biphasic") {
if(is.null(model_output$biphasicCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$biphasicCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>%
select(-id) %>%
pairs(., pch = 19, cex = pointsize, cex.axis = textsize,
labels = c("A", "B", expression(delta), expression(gamma)))
} else if (type == "single") {
if(is.null(model_output$singleCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$singleCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>%
select(-id) %>%
pairs(., pch = 19, cex = pointsize, cex.axis = textsize,
labels = c(expression(hat(B)), expression(hat(gamma))))
} else if (type == "triphasic") {
if(is.null(model_output$triphasicCI)){
stop("There are no fits of the type you have chosen. Try specifying a different type of fit with type = 'biphasic', 'single', or 'triphasic'.")
}
model_output$triphasicCI %>% select(-lowerCI, -upperCI) %>%
spread(param, estimate) %>%
select(-id) %>%
pairs(., pch = 19, cex = pointsize, cex.axis = textsize,
labels = c("A", expression(A[b]), "B", expression(delta), expression(delta[b]), expression(gamma)))
} else {
stop("Invalid 'type' argumement. Must be one of 'single', 'biphasic', or 'triphasic'.")
}
}
}
#' Plot time to suppression distribution
#'
#' This function plots a histogram of the time to suppression estimates.
#'
#' @param TTS_output output from estimating time to suppression (TTS) values using get_TTS()..
#' @param textsize numeric value for base text size on ggplot. Default is 9.
#' @param bins numeric value indicating the number of bins for the histogram. Default is 20.
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' TTSestimates <- get_TTS(data = simulated_data, parametric = FALSE)
#'
#' plot_TTS(TTSestimates, bins = 5)
#'
plot_TTS <- function(TTS_output, textsize = 9, bins = 20){
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" is required for automated plotting.
Either install it, or plot manually.")
}
mytheme <- get_plottheme(textsize)
ggplot(data = TTS_output, aes(x = TTS)) +
geom_histogram(bins = bins, fill = "grey", colour = "black") +
mytheme + ylab("Frequency") + xlab("Time to suppression")
}
#' Summarize model output
#'
#' This function summarizes the output of model fitting..
#'
#' @param model_output output from model fitting using ushr().
#' @param data dataframe of original data used for model fitting. Must include named 'id' column with subject identifiers.
#' @param stats logical TRUE/FALSE: should the median and sd lifespans also be returned? Default is FALSE.
#' @import dplyr
#' @import tidyr
#' @importFrom stats median sd
#' @return a list containing (i) a summary of which subjects were successfully fit using the biphasic or single phase models, with their corresponding infected cell lifespan estimates ('summary'); (ii) if stats = TRUE: summary statistics for the estimated parameters from the biphasic model ('biphasicstats'); and (iii) if stats = TRUE: summary statistics for the estimated parameters from the single phase model ('singlestats').
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
#' model_output <- ushr(data = simulated_data)
#'
#' summarize_model(model_output, data = simulated_data)
#'
summarize_model <- function(model_output, data, stats = FALSE){
# get triphasic parameter estimates and summary statistics
if (length(model_output$triphasicCI) > 0){
triphasicfits <- model_output$triphasicCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(ShortLifespanProductive = 1/delta,
ShortLifespanNonProductive = 1/delta_b,
LongLifespanNonProductive = 1/gamma, Model = "Triphasic")
triphasicstats <- model_output$triphasicCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(ShortLifespanProductive = 1/delta,
ShortLifespanNonProductive = 1/delta_b,
LongLifespanNonProductive = 1/gamma) %>%
gather(Param, estimate, A:LongLifespanNonProductive) %>%
group_by(Param) %>%
summarize(Median = median(estimate), SD = sd(estimate)) %>%
mutate(Median = signif(Median, 3), SD = signif(SD, 3), Model = "Triphasic")
} else {
# get biphasic parameter estimates and summary statistics
if (length(model_output$biphasicCI) > 0){
biphasicfits <- model_output$biphasicCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(ShortLifespan = 1/delta, LongLifespan = 1/gamma, Model = "Biphasic")
biphasicstats <- model_output$biphasicCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(ShortLifespan = 1/delta, LongLifespan = 1/gamma) %>%
gather(Param, estimate, A:LongLifespan) %>%
group_by(Param) %>%
summarize(Median = median(estimate), SD = sd(estimate)) %>%
mutate(Median = signif(Median, 3), SD = signif(SD, 3), Model = "Biphasic")
}
# get single phase parameter estimates and summary statistics
if (length(model_output$singleCI) > 0){
singlephasefits <- model_output$singleCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(SingleLifespan = 1/gammahat, Model = "Single phase")
singlestats <- model_output$singleCI %>%
select(- lowerCI, - upperCI) %>% spread(param, estimate) %>%
mutate(SingleLifespan = 1/gammahat) %>%
gather(Param, estimate, Bhat:SingleLifespan) %>%
group_by(Param) %>%
summarize(Median = median(estimate), SD = sd(estimate)) %>%
ungroup() %>%
mutate(Median = signif(Median, 3), SD = signif(SD, 3), Model = "Single phase")
}
}
# join output
if (length(model_output$triphasicCI) > 0){
allfits <- triphasicfits
allinfo <- data %>% distinct(id) %>%
mutate(Included = ifelse(id %in% allfits$id, "Yes", "No")) %>%
left_join(allfits) %>%
select(id, Included,
Model, ShortLifespanProductive, ShortLifespanNonProductive,
LongLifespanNonProductive) %>%
mutate_if(is.numeric, round, 2) %>% mutate_all(~replace(., is.na(.), ""))
} else if (length(model_output$singleCI) > 0 & length(model_output$biphasicCI) > 0){
allfits <- biphasicfits %>% full_join(singlephasefits)
allinfo <- data %>% distinct(id) %>%
mutate(Included = ifelse(id %in% allfits$id, "Yes", "No")) %>%
left_join(allfits) %>%
select(id, Included,
Model, ShortLifespan, LongLifespan, SingleLifespan) %>%
mutate_if(is.numeric, round, 2) %>% mutate_all(~replace(., is.na(.), ""))
} else if (length(model_output$singleCI) == 0 & length(model_output$biphasicCI) > 0){
allfits <- biphasicfits
allinfo <- data %>% distinct(id) %>%
mutate(Included = ifelse(id %in% allfits$id, "Yes", "No")) %>%
left_join(allfits) %>%
select(id, Included,
Model, ShortLifespan, LongLifespan) %>%
mutate_if(is.numeric, round, 2) %>% mutate_all(~replace(., is.na(.), ""))
} else if (length(model_output$singleCI) > 0 & length(model_output$biphasicCI) == 0){
allfits <- singlephasefits
allinfo <- data %>% distinct(id) %>%
mutate(Included = ifelse(id %in% allfits$id, "Yes", "No")) %>%
left_join(allfits) %>%
select(id, Included,
Model, SingleLifespan) %>%
mutate_if(is.numeric, round, 2) %>% mutate_all(~replace(., is.na(.), ""))
}
# return information
if (stats & length(model_output$singleCI) > 0 & length(model_output$biphasicCI) > 0) {
return(list(summary = allinfo, biphasicstats = biphasicstats, singlestats = singlestats))
} else if (stats & length(model_output$singleCI) == 0 & length(model_output$biphasicCI) > 0) {
return(list(summary = allinfo, biphasicstats = biphasicstats))
} else if (stats & length(model_output$singleCI) > 0 & length(model_output$biphasicCI) == 0) {
return(list(summary = allinfo, singlestats = singlestats))
} else if (stats & length(model_output$triphasicCI) > 0) {
return(list(summary = allinfo, triphasicstats = triphasicstats))
} else {
return(allinfo)
}
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/output_processing.R
|
#' Create data function
#'
#' This function simulates example data that can be used to explore model fitting and plotting within the package. Subjects are assumed to be observed at regular intervals until either the end of the study or they are lost to follow up.
#'
#' @param nsubjects numeric value indicating the number of subjects you want to simulate data for. Default is 10.
#' @param detection_threshold numeric value indicating the detection threshold of the assay used to measure viral load. Measurements below this value will be assumed to represent undetectable viral load levels. Default value is 20.
#' @param censortime numeric value indicating the maximum time point to include in the analysis. Default value is 365.
#' @param max_datapoints numeric value indicating the maximum number of data points collected from any subject. Defaults to 24.
#' @param min_datapoints numeric value indicating the minimum number of data points collected from any subject. Defaults to 6.
#' @param sd_noise numeric value indicating the standard deviation level to be used when adding noise to the simulated data (on the log10 scale). Default value is 0.1
#' @param param_noise numeric vector indicating the standard deviation to be used when selecting parameter values (on the log scale). Order of entries should be: A, delta, B, gamma. Default value is c(1.5, 0.1, 1.5, 0.1).
#' @param mean_params named numeric vector indicating the mean parameter values for the subject decay curves. Default is c(A = 10000, delta = 0.3, B = 10000, gamma = 0.03).
#' @export
#' @examples
#'
#' set.seed(1234567)
#'
#' simulated_data <- simulate_data(nsubjects = 20)
#'
simulate_data <- function(nsubjects = 10, detection_threshold = 20, censortime = 365,
max_datapoints = 24, min_datapoints = 6,
sd_noise = 0.1, param_noise = c(1.5, 0.1, 1.5, 0.1),
mean_params = c(A = 10000, delta = 0.3, B = 10000, gamma = 0.03)){
if (!is.numeric(c(nsubjects, detection_threshold, censortime, max_datapoints, min_datapoints, sd_noise) )) {
stop("The following arguments must be numeric: nsubjects, detection_threshold, censortime, max_datapoints, min_datapoints, sd_noise")
}
if (!is.numeric(mean_params)) {
stop("The 'mean_params' argument must have numeric values for A, delta, B, and gamma.")
}
if (!all(c("A", "delta", "B", "gamma") %in% names(mean_params))) {
stop("The 'mean_params' argument must be a named vector with values for A, delta, B, and gamma.")
}
# 1. Give IDs to all subjects
ids <- paste0("S", 1:nsubjects)
# 2. Simulate parameters for each subject
params <- exp(stats::rnorm(n = nsubjects * length(mean_params),
mean = log(mean_params),
sd = param_noise)) %>%
matrix(byrow = T, ncol = length(mean_params))
colnames(params) <- names(mean_params)
params <- t(apply(params, 1, switch_simulated_params))
paramdat <- params %>% as_tibble() %>% mutate(id = ids)
# 3. Choose number of observations for all subjects
npoints <- sample(min_datapoints:max_datapoints, replace = TRUE, size = nsubjects)
# 4. Choose timing of observations for all subjects (simulate_time),
# 5. then get viral load at those timepoints using the biphasic model and simulated parameters (simulate_vl)
data <- data.frame(index = 1:nsubjects, npoints = npoints, id = ids) %>% group_by(id) %>%
do(simulate_time(.$npoints, censortime, .$id, .$index, max_datapoints)) %>%
do(simulate_vl(timevec = .$time, params = params[.$index[1],], id = .$id)) %>% ungroup()
# 6. Add noise to viral load measurements
simulated_data <- data %>% mutate(vl = add_noise(vl, sd_noise),
vl = ifelse(vl < detection_threshold, detection_threshold/2, vl),
id = as.character(id)) %>% left_join(paramdat)
return(simulated_data)
}
# #' Simulate timepoints for subjects at random.
# #'
# #' This function simulates observed timepoints randomly for each subject.
# #'
# #' @param npoints numeric value indicating the number of observations to be sampled.
# #' @param censortime numeric value indicatingthe maximum time point to inculde in the analysis.
# #' @param id subject id. Can be numeric or a character.
# #' @param index numeric identifier for each subject/model combination.
# #' @param max_datapoints numeric value indicating the maximum number of data points collected from any subject.
# #'
# simulate_time_random <- function(npoints, censortime, id, index, max_datapoints){
#
# initial_phase <- floor(censortime/2)
#
# if (npoints > max_datapoints/2) {
# npoints_initial <- floor(npoints/2)
#
# timepoints_initial <- sort(sample(1:initial_phase, size = npoints_initial, replace = FALSE))
# timepoints_late <- sort(sample((initial_phase + 1):censortime, size = npoints - npoints_initial, replace = FALSE))
#
# timepoints <- c(timepoints_initial, timepoints_late)
# } else {
# timepoints <- sort(sample(1:initial_phase, size = npoints, replace = FALSE))
# }
#
# return(data.frame(time = timepoints, id = id, index = index))
# }
#' Simulate timepoints for subjects according to fixed design.
#'
#' This function simulates observed timepoints for each subject according to a fixed sampling design.
#'
#' @param npoints numeric value indicating the number of observations to be sampled.
#' @param censortime numeric value indicating the maximum time point to include in the analysis.
#' @param id subject id. Can be numeric or a character.
#' @param index numeric identifier for each subject/model combination.
#' @param max_datapoints numeric value indicating the maximum number of data points collected from any subject.
#'
simulate_time_fixed <- function(npoints, censortime, id, index, max_datapoints){
sample_sequence <- seq(1, censortime, length.out = max_datapoints)
timepoints <- sample_sequence[1:npoints]
return(data.frame(time = timepoints, id = id, index = index))
}
#' Simulate timepoints for subjects
#'
#' This function chooses the correct function for sampling observation times.
#'
#' @param npoints numeric value indicating the number of observations to be sampled.
#' @param censortime numeric value indicating the maximum time point to include in the analysis.
#' @param id subject id. Can be numeric or a character.
#' @param index numeric identifier for each subject/model combination.
#' @param max_datapoints numeric value indicating the maximum number of data points collected from any subject.
#'
simulate_time <- function(npoints, censortime, id, index, max_datapoints){
output <- simulate_time_fixed(npoints, censortime, id, index, max_datapoints)
return(output)
}
#' Simulate vl for subjects
#'
#' This function simulates observed vl for each subject.
#'
#' @param params named numeric vector of parameter values to simulate the biphasic model.
#' @param timevec numeric vector of observed timepoints.
#' @param id subject id. Can be numeric or a character.
#'
simulate_vl <- function(params, timevec, id){
vl <- get_biphasic(params, timevec)
return(data.frame(time = timevec, vl = vl, id = id))
}
#' Add noise to viral load observations
#'
#' This function adds noise to vl measurements for each subject.
#'
#' @param vl numeric vector of viral load measurements.
#' @param sd_noise numeric value indicating the standard deviation level to be used when adding noise to the simulated data (on the log10 scale).
#'
add_noise <- function(vl, sd_noise){
logvl <- log10(vl) + stats::rnorm(n = length(vl), mean = 0, sd = sd_noise)
return(10^(logvl))
}
#' Switch names of simulated rate parameters
#'
#' This function switches the names of delta and gamma estimates if gamma > delta.
#' @param params matrix of parameter estimates
#'
switch_simulated_params <- function(params){
if (params["gamma"] > params["delta"]) {
tmpRate <- params["gamma"]
params["gamma"] <- params["delta"]
params["delta"] <- tmpRate
}
if (params["B"] > params["A"]) {
tmpConst <- params["B"]
params["B"] <- params["A"]
params["A"] <- tmpConst
}
return(params)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/simulate_data.R
|
#' Prune viral load data
#'
#' This function removes the first viral load data point for specific subjects
#'
#' @param id vector of subject ids
#' @param which_ids vector of ids that should have the first point removed
#' @param subset data frame to which the function should be applied
#'
remove_vl0 <- function(id, which_ids, subset){
if(id[1] %in% which_ids){ return(subset[-1,]) }
else {return(subset)}
}
#' Transform viral load data
#'
#' This function takes the log10 transform of viral load data & checks for NAs
#'
#' @param VL vector of viral load data
#'
transformVL = function(VL){
VL = log10(VL)
if(any(is.na(VL))){
print("Log10 transform produced NAs")
}
return(VL)
}
#' Transform parameters
#'
#' This function transforms parameter estimates according to user defined functions
#'
#' @param params vector of parameters
#' @param param_transform_fn vector of functions for parameter transformation
#'
get_transformed_params <- function(params, param_transform_fn){
transformed_params = rep(NA, length(params))
for(i in 1:length(params)){
transformed_params[i] = param_transform_fn[[i]](params[i])
}
return(transformed_params)
}
|
/scratch/gouwar.j/cran-all/cranData/ushr/R/transformation_fns.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, fig.height = 6, fig.width = 8, message = FALSE, warning = FALSE)
## ----load---------------------------------------------------------------------
library(ushr)
print(head(actg315raw))
## ----edit---------------------------------------------------------------------
actg315 <- actg315raw %>%
mutate(vl = 10^log10.RNA.) %>%
select(id = Patid, time = Day, vl)
print(head(actg315))
## ----plotdata, fig.height = 8, fig.width = 8----------------------------------
plot_data(actg315, detection_threshold = 100)
## ----fits---------------------------------------------------------------------
model_output <- ushr(data = actg315, detection_threshold = 100, censor_value = 50)
## ----bpfits, fig.width = 6, fig.height = 4------------------------------------
plot_model(model_output, type = "biphasic", detection_threshold = 100)
## ----spfits, fig.width = 3.5, fig.height = 2.5--------------------------------
plot_model(model_output, type = "single", detection_threshold = 100)
## ----summariz-----------------------------------------------------------------
actg315_summary <- summarize_model(model_output, data = actg315, stats = TRUE)
head(actg315_summary$summary)
actg315_summary$biphasicstats
actg315_summary$singlestats
## ----CIs----------------------------------------------------------------------
head(model_output$biphasicCI)
head(model_output$singleCI)
## ----TTSparametric------------------------------------------------------------
TTSparametric <- get_TTS(model_output = model_output, parametric = TRUE,
suppression_threshold = 100)
head(TTSparametric)
TTSparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
## ----TTSnonparametric---------------------------------------------------------
TTSnonparametric <- get_TTS(data = actg315, parametric = FALSE,
suppression_threshold = 100, censor_value = 50)
head(TTSnonparametric)
TTSnonparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
## ----TTSplot, fig.width = 2, fig.height = 2-----------------------------------
plot_TTS(TTSparametric, bins = 6, textsize = 7)
plot_TTS(TTSnonparametric, bins = 6, textsize = 7)
|
/scratch/gouwar.j/cran-all/cranData/ushr/inst/doc/Vignette.R
|
---
title: "ushr: understanding suppression of HIV in R"
author: "Sinead E. Morris"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ushr}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
bibliography: HIV.bib
---
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, fig.height = 6, fig.width = 8, message = FALSE, warning = FALSE)
```
## Introduction
In 2017, HIV/AIDS was responsible for the deaths of one million people globally, including 50,000 children less than one year old [@GBD2017paper, @GBD2017web]. Although mathematical modeling has provided important insights into the dynamics of HIV infection during anti-retroviral treatment (ART), there is still a lack of accessible tools for researchers unfamiliar with modeling techniques to apply them to their own datasets.
Here we present `ushr`, an open-source R package that models the decline of HIV during ART using a popular mathematical framework. `ushr` can be applied to longitudinal data of viral load measurements, and automates all stages of the model fitting process. By mathematically fitting the data, important biological parameters can be estimated, including the lifespans of short and long-lived infected cells, and the time to reach viral suppression below a defined detection threshold. The package also provides visualization and summary tools for fast assessment of model results.
More generally, `ushr` enables researchers without a strong mathematical or computational background to model the dynamics of HIV using longitudinal clinical data. Increasing accessibility to such methods may facilitate quantitative analysis across a wide range of independent studies, so that greater insights on HIV infection and treatment dynamics may be gained.
## Citing this package
Citation information can be found using `citation("ushr")`; the package paper is open access and available at [BMC Bioinformatics](https://rdcu.be/b1yZU).
## Getting further information
If you encounter any bugs related to this package please contact the package author directly. Additional descriptions of the model and analyses performed by this package are available in [Morris et al. (2020) BMC Bioinformatics](https://rdcu.be/b1yZU). Further details on the mathematical theory can also be found in the references cited below. <!--Package documentation is also available from CRAN-->
## Background
### Guide to the mathematical model
HIV decline in a patient on ART is typically described using ordinary differential equations (ODEs) that characterize the production and spread of virus by infected target cells, such as CD4 T cells [@perelson1997a, @wu1999biometrics, @Shet2016, @perelson1996hiv, @nowak2000book]. Assuming ART completely blocks viral replication, and that viral dynamics occur on a faster timescale than those of infected cells, one can obtain the following expression for the timecourse of viral load, $V$, during treatment
\begin{equation}
V(t) = A\exp(-\delta t) + B\exp(- \gamma t).\label{biphasic}
\end{equation}
Here $\delta$ and $\gamma$ are the death rates of short and long-lived infected target cells, respectively [@Shet2016]. The parameters $A$ and $B$ are composite constants without direct interpretation; however, $A + B$ represents the initial viral load (i.e. $V(t = 0)$), and $A/(A+B)$ can be understood as the proportion of infected cells at ART initiation that are short-lived.
Eqn. $\ref{biphasic}$ is referred to as the biphasic model. According to this, viral load initially decays rapidly, reflecting the loss of short-lived infected cells (at rate $\delta$), and then enters a second, slower decline phase reflecting the loss of longer-lived infected cells (at rate $\gamma$). For patient data exhibiting only one decline phase (for example, due to sparse or delayed viral load measurements), one can use a single phase version of Eqn. $\ref{biphasic}$ given by
\begin{equation}
V(t) = \hat{B}\exp(- \hat{\gamma} t),\label{singlephase}
\end{equation}
where there are no assumptions on whether decay reflects the fast or slow phase of virus suppression.
### Time to suppression
For each individual, the time to reach virologic suppression below a defined threshold (`time to suppression' (TTS)) can be estimated using both parametric and non-parametric methods. For the parametric approach, TTS was calculated as the first time at which $V(t) = x$, where $x$ is the suppression threshold, and $V(t)$ is given by Eqn. $\ref{biphasic}$ for the biphasic model and Eqn. $\ref{singlephase}$ for the single phase model. For the non-parametric approach, we first apply linear interpolation between the first measurement below the detection threshold and the preceding measurement. TTS is then defined as the time at which the interpolation line crosses the suppression threshold.
## Implementation
### Data preparation
Raw clinical data is often noisy and sparse, making it unsuitable for mathematical analysis of viral decline, and eventual suppression, during ART. Therefore, prior to any analysis, data must be processed to exclude individual trajectories that cannot be appropriately modeled. In `ushr`, we only consider individuals who reach suppression below a pre-defined threshold, within a particular timeframe (both specified by the user). By default, suppression is defined as having at least one viral load measurement below the detection threshold of the measurements assay, $d$. Alternatively, the user may define suppression as sustaining at least two consecutive measurements below $d$. Following previous work, all measurements below the detection threshold are set to $d/2$ [@wu1999characterization]. To isolate the kinetics leading to initial suppression, viral load trajectories are truncated after the first measurement below $d$.
To distinguish 'true' decay dynamics from instances of viral rebound (due to factors such as drug resistance or poor treatment adherence), we only consider viral load data that maintain a consistent decreasing trend towards suppression, such that each measurement is within a pre-defined range of the previous measurement. This buffer range ensures that transient increases in viral load (arising from noise and measurement error) do not exclude subjects from the analysis. We also allow initial increases in viral load (for example, arising from pharmacological delays in drug action) by defining the beginning of each individual's decreasing sequence as the maximum value from a pre-defined range of initial measurements.
### Model fitting
Parameter estimates with 95\% confidence intervals are obtained for each subject by fitting either the biphasic or single phase model to the corresponding viral load data using maximum likelihood optimization (as described previously [@hogan2015temporal]). Data are log$_{10}$-transformed prior to fitting and optimization is performed using `optim()`. After fitting, we use the resulting parameter estimates to calculate the lifespans of HIV-infected cells: $1/\delta$ and $1/\gamma$ for short and long-lived infected cells from the biphasic model, respectively, and $1/\hat{\gamma}$ for the single phase model.
To improve parameter identifiability, only subjects with a minimum number of measurements above the detection threshold are fit using the biphasic or single phase models. These can be specified by the user, but we recommend at least six observations for the biphasic model and three for the single phase model. Individuals with fewer measurements are not included in the model fitting procedure, although they are still included in non-parametric TTS calculations.
<!--Finally, as is common in sparse clinical data, some cases may have large differences in viral load between the first and second measurements, suggesting an unobserved transition from the fast to the slow decay phase. To prevent such occurrences biasing the estimated slope of decay when fitting the single phase model, we remove the first measurement if the difference in viral load is greater than a specified threshold. -->
## Quick Start Example
To illustrate basic usage of the package and allow users to explore functionality, we include a publicly available data set from the ACTG315 clinical trial. Briefly, the raw data consist of longitudinal HIV viral load measurements from 46 chronically-infected adults up to 28 weeks following ART initiation. The detection threshold was 100 copies/ml and observations are recorded as $\log_{10}$ RNA copies/ml. These data are available at https://sph.uth.edu/divisions/biostatistics/wu/datasets/ACTG315LongitudinalDataViralLoad.htm (date originally accessed: 15 September 2019), and have been described previously [@Lederman1998; @wu1999biometrics; @Connick2000].
### Data exploration
To begin, we load the package and print the first six rows to identify our columns of interest; these are the viral load observations ('log.10.RNA.'), the timing of these observations ('Day'), and the identifier for each subject ('Patid').
```{r load}
library(ushr)
print(head(actg315raw))
```
Since `ushr` requires absolute viral load (VL) measurements, and specific column names ('vl', 'time', 'id'), we first back-transform the $\log_{10}$ viral load measurements into absolute values, and rename the column headings.
```{r edit}
actg315 <- actg315raw %>%
mutate(vl = 10^log10.RNA.) %>%
select(id = Patid, time = Day, vl)
print(head(actg315))
```
We can then visualize these data using the `plot_data()` function. The `detection_threshold` argument defines the detection threshold of the measurement assay.
```{r plotdata, fig.height = 8, fig.width = 8}
plot_data(actg315, detection_threshold = 100)
```
Each panel represents a different individual, the points are the viral load measurements, and the dashed horizontal line is the assay detection threshold. From this we can see that the data is indeed noisy, individuals have different numbers of available observations, and only a subset suppress viral load below the detection threshold.
### Model fitting and output visualization
To fit the model to these data using just one line of code we call the `ushr()` function. This processes the data to filter out any individuals who do not meet the inclusion criteria defined above, and then fits either the single or biphasic model to each remaining trajectory, depending on the number of available observations (see the Background for more details). Note that the data processing step can be omitted using the `filter = FALSE` argument (default is TRUE); however this is not recommended unless rigorous processing efforts have already been made. Note also that the `censor_value` argument specifies how measurements below the detection threshold should be treated (here we set them to half of the detection theshold in line with previous work [@wu1999characterization]).
```{r fits}
model_output <- ushr(data = actg315, detection_threshold = 100, censor_value = 50)
```
With the fitted model output, we can then plot both the biphasic and single phase fits as follows
```{r bpfits, fig.width = 6, fig.height = 4}
plot_model(model_output, type = "biphasic", detection_threshold = 100)
```
```{r spfits, fig.width = 3.5, fig.height = 2.5}
plot_model(model_output, type = "single", detection_threshold = 100)
```
Again, each panel represents a different individual, points are the original data, and solid lines are the corresponding best-fit model. We can see that twelve subjects were successfully fit with the biphasic model, and four with the single phase model. Although some single phase subjects had sufficient data to fit the biphasic model (i.e. at least six observations), the resulting 95\% parameter confidence intervals were either unattainable or sufficiently wide to indicate an unreliable fit. This can occur, for example, when one of the decay phases is poorly documented (i.e. has few data points). As a result, the subjects were re-fit with the single phase model. This re-fitting step is automated in the package; however, the user can control the size of confidence interval above which a biphasic fit is deemed unreliable using the argument `CI_max_diff` in `ushr()`.
We can also visualize a summary of the fitting procedure and parameter estimates using `summarize_model()`. This creates a list with the following elements: (i) a summary of which subjects were successfully fit using the biphasic or single phase models, with their corresponding infected cell lifespan estimates (`summary`); (ii) summary statistics for the estimated parameters from the biphasic model (`biphasicstats`); and (iii) summary statistics for the estimated parameters from the single phase model (`singlestats`).
```{r summariz}
actg315_summary <- summarize_model(model_output, data = actg315, stats = TRUE)
head(actg315_summary$summary)
actg315_summary$biphasicstats
actg315_summary$singlestats
```
For a better understanding of parameter identifiability, one can also print the parameter estimates for each individual and model, along with their corresponding 95\% confidence intervals.
```{r CIs}
head(model_output$biphasicCI)
head(model_output$singleCI)
```
## Time to suppression
In addition to fitting the biphasic and single phase models, we can calculate the time to viral suppression (TTS) using both the parametric and non-parametric methods (see the Background for more details). Here we set the suppression threshold to be the same as the detection threshold (i.e. we want to know when viral load drops below the detection threshold of the assay). First, to get parametric estimates from the fitted model output, we use `get_TTS()` with the argument `parametric = TRUE`. We can subsequently obtain median and SD statistics, and the total number of subjects included in the analysis, using the `summarize()` function from `dplyr`.
```{r TTSparametric}
TTSparametric <- get_TTS(model_output = model_output, parametric = TRUE,
suppression_threshold = 100)
head(TTSparametric)
TTSparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
```
Alternatively, to calculate non-parametric TTS estimates, we set the argument `parametric = FALSE`, and supply the original data using `data = actg315`, rather than the fitted model output. The estimates are similar to those for the parametric method but, given the less stringent conditions for inclusion in the non-parametric analysis (there is no minimum requirement on the number of observations), we are able to estimate TTS for more subjects.
```{r TTSnonparametric}
TTSnonparametric <- get_TTS(data = actg315, parametric = FALSE,
suppression_threshold = 100, censor_value = 50)
head(TTSnonparametric)
TTSnonparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
```
We can also plot the histograms for both methods using `plot_TTS()`.
```{r TTSplot, fig.width = 2, fig.height = 2}
plot_TTS(TTSparametric, bins = 6, textsize = 7)
plot_TTS(TTSnonparametric, bins = 6, textsize = 7)
```
## Additional functionality
`ushr` provides additional functionality to the examples documented here. Notable examples are:
* For ART that includes an integrase inhibitor, a triphasic exponential model can be fit using `ushr_triphasic()` (see `?ushr_triphasic()`); this may be more appropriate than the biphasic model [@Cardozo2017]. Results can be visualized using the same plotting/summary functions as above.
* Noisy clinical data can be simulated from an underlying biphasic model using the `simulate_data()` function.
Further details of all functions and user-specific customizations can be found in the documentation.
## References
|
/scratch/gouwar.j/cran-all/cranData/ushr/inst/doc/Vignette.Rmd
|
---
title: "ushr: understanding suppression of HIV in R"
author: "Sinead E. Morris"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ushr}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
bibliography: HIV.bib
---
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, fig.height = 6, fig.width = 8, message = FALSE, warning = FALSE)
```
## Introduction
In 2017, HIV/AIDS was responsible for the deaths of one million people globally, including 50,000 children less than one year old [@GBD2017paper, @GBD2017web]. Although mathematical modeling has provided important insights into the dynamics of HIV infection during anti-retroviral treatment (ART), there is still a lack of accessible tools for researchers unfamiliar with modeling techniques to apply them to their own datasets.
Here we present `ushr`, an open-source R package that models the decline of HIV during ART using a popular mathematical framework. `ushr` can be applied to longitudinal data of viral load measurements, and automates all stages of the model fitting process. By mathematically fitting the data, important biological parameters can be estimated, including the lifespans of short and long-lived infected cells, and the time to reach viral suppression below a defined detection threshold. The package also provides visualization and summary tools for fast assessment of model results.
More generally, `ushr` enables researchers without a strong mathematical or computational background to model the dynamics of HIV using longitudinal clinical data. Increasing accessibility to such methods may facilitate quantitative analysis across a wide range of independent studies, so that greater insights on HIV infection and treatment dynamics may be gained.
## Citing this package
Citation information can be found using `citation("ushr")`; the package paper is open access and available at [BMC Bioinformatics](https://rdcu.be/b1yZU).
## Getting further information
If you encounter any bugs related to this package please contact the package author directly. Additional descriptions of the model and analyses performed by this package are available in [Morris et al. (2020) BMC Bioinformatics](https://rdcu.be/b1yZU). Further details on the mathematical theory can also be found in the references cited below. <!--Package documentation is also available from CRAN-->
## Background
### Guide to the mathematical model
HIV decline in a patient on ART is typically described using ordinary differential equations (ODEs) that characterize the production and spread of virus by infected target cells, such as CD4 T cells [@perelson1997a, @wu1999biometrics, @Shet2016, @perelson1996hiv, @nowak2000book]. Assuming ART completely blocks viral replication, and that viral dynamics occur on a faster timescale than those of infected cells, one can obtain the following expression for the timecourse of viral load, $V$, during treatment
\begin{equation}
V(t) = A\exp(-\delta t) + B\exp(- \gamma t).\label{biphasic}
\end{equation}
Here $\delta$ and $\gamma$ are the death rates of short and long-lived infected target cells, respectively [@Shet2016]. The parameters $A$ and $B$ are composite constants without direct interpretation; however, $A + B$ represents the initial viral load (i.e. $V(t = 0)$), and $A/(A+B)$ can be understood as the proportion of infected cells at ART initiation that are short-lived.
Eqn. $\ref{biphasic}$ is referred to as the biphasic model. According to this, viral load initially decays rapidly, reflecting the loss of short-lived infected cells (at rate $\delta$), and then enters a second, slower decline phase reflecting the loss of longer-lived infected cells (at rate $\gamma$). For patient data exhibiting only one decline phase (for example, due to sparse or delayed viral load measurements), one can use a single phase version of Eqn. $\ref{biphasic}$ given by
\begin{equation}
V(t) = \hat{B}\exp(- \hat{\gamma} t),\label{singlephase}
\end{equation}
where there are no assumptions on whether decay reflects the fast or slow phase of virus suppression.
### Time to suppression
For each individual, the time to reach virologic suppression below a defined threshold (`time to suppression' (TTS)) can be estimated using both parametric and non-parametric methods. For the parametric approach, TTS was calculated as the first time at which $V(t) = x$, where $x$ is the suppression threshold, and $V(t)$ is given by Eqn. $\ref{biphasic}$ for the biphasic model and Eqn. $\ref{singlephase}$ for the single phase model. For the non-parametric approach, we first apply linear interpolation between the first measurement below the detection threshold and the preceding measurement. TTS is then defined as the time at which the interpolation line crosses the suppression threshold.
## Implementation
### Data preparation
Raw clinical data is often noisy and sparse, making it unsuitable for mathematical analysis of viral decline, and eventual suppression, during ART. Therefore, prior to any analysis, data must be processed to exclude individual trajectories that cannot be appropriately modeled. In `ushr`, we only consider individuals who reach suppression below a pre-defined threshold, within a particular timeframe (both specified by the user). By default, suppression is defined as having at least one viral load measurement below the detection threshold of the measurements assay, $d$. Alternatively, the user may define suppression as sustaining at least two consecutive measurements below $d$. Following previous work, all measurements below the detection threshold are set to $d/2$ [@wu1999characterization]. To isolate the kinetics leading to initial suppression, viral load trajectories are truncated after the first measurement below $d$.
To distinguish 'true' decay dynamics from instances of viral rebound (due to factors such as drug resistance or poor treatment adherence), we only consider viral load data that maintain a consistent decreasing trend towards suppression, such that each measurement is within a pre-defined range of the previous measurement. This buffer range ensures that transient increases in viral load (arising from noise and measurement error) do not exclude subjects from the analysis. We also allow initial increases in viral load (for example, arising from pharmacological delays in drug action) by defining the beginning of each individual's decreasing sequence as the maximum value from a pre-defined range of initial measurements.
### Model fitting
Parameter estimates with 95\% confidence intervals are obtained for each subject by fitting either the biphasic or single phase model to the corresponding viral load data using maximum likelihood optimization (as described previously [@hogan2015temporal]). Data are log$_{10}$-transformed prior to fitting and optimization is performed using `optim()`. After fitting, we use the resulting parameter estimates to calculate the lifespans of HIV-infected cells: $1/\delta$ and $1/\gamma$ for short and long-lived infected cells from the biphasic model, respectively, and $1/\hat{\gamma}$ for the single phase model.
To improve parameter identifiability, only subjects with a minimum number of measurements above the detection threshold are fit using the biphasic or single phase models. These can be specified by the user, but we recommend at least six observations for the biphasic model and three for the single phase model. Individuals with fewer measurements are not included in the model fitting procedure, although they are still included in non-parametric TTS calculations.
<!--Finally, as is common in sparse clinical data, some cases may have large differences in viral load between the first and second measurements, suggesting an unobserved transition from the fast to the slow decay phase. To prevent such occurrences biasing the estimated slope of decay when fitting the single phase model, we remove the first measurement if the difference in viral load is greater than a specified threshold. -->
## Quick Start Example
To illustrate basic usage of the package and allow users to explore functionality, we include a publicly available data set from the ACTG315 clinical trial. Briefly, the raw data consist of longitudinal HIV viral load measurements from 46 chronically-infected adults up to 28 weeks following ART initiation. The detection threshold was 100 copies/ml and observations are recorded as $\log_{10}$ RNA copies/ml. These data are available at https://sph.uth.edu/divisions/biostatistics/wu/datasets/ACTG315LongitudinalDataViralLoad.htm (date originally accessed: 15 September 2019), and have been described previously [@Lederman1998; @wu1999biometrics; @Connick2000].
### Data exploration
To begin, we load the package and print the first six rows to identify our columns of interest; these are the viral load observations ('log.10.RNA.'), the timing of these observations ('Day'), and the identifier for each subject ('Patid').
```{r load}
library(ushr)
print(head(actg315raw))
```
Since `ushr` requires absolute viral load (VL) measurements, and specific column names ('vl', 'time', 'id'), we first back-transform the $\log_{10}$ viral load measurements into absolute values, and rename the column headings.
```{r edit}
actg315 <- actg315raw %>%
mutate(vl = 10^log10.RNA.) %>%
select(id = Patid, time = Day, vl)
print(head(actg315))
```
We can then visualize these data using the `plot_data()` function. The `detection_threshold` argument defines the detection threshold of the measurement assay.
```{r plotdata, fig.height = 8, fig.width = 8}
plot_data(actg315, detection_threshold = 100)
```
Each panel represents a different individual, the points are the viral load measurements, and the dashed horizontal line is the assay detection threshold. From this we can see that the data is indeed noisy, individuals have different numbers of available observations, and only a subset suppress viral load below the detection threshold.
### Model fitting and output visualization
To fit the model to these data using just one line of code we call the `ushr()` function. This processes the data to filter out any individuals who do not meet the inclusion criteria defined above, and then fits either the single or biphasic model to each remaining trajectory, depending on the number of available observations (see the Background for more details). Note that the data processing step can be omitted using the `filter = FALSE` argument (default is TRUE); however this is not recommended unless rigorous processing efforts have already been made. Note also that the `censor_value` argument specifies how measurements below the detection threshold should be treated (here we set them to half of the detection theshold in line with previous work [@wu1999characterization]).
```{r fits}
model_output <- ushr(data = actg315, detection_threshold = 100, censor_value = 50)
```
With the fitted model output, we can then plot both the biphasic and single phase fits as follows
```{r bpfits, fig.width = 6, fig.height = 4}
plot_model(model_output, type = "biphasic", detection_threshold = 100)
```
```{r spfits, fig.width = 3.5, fig.height = 2.5}
plot_model(model_output, type = "single", detection_threshold = 100)
```
Again, each panel represents a different individual, points are the original data, and solid lines are the corresponding best-fit model. We can see that twelve subjects were successfully fit with the biphasic model, and four with the single phase model. Although some single phase subjects had sufficient data to fit the biphasic model (i.e. at least six observations), the resulting 95\% parameter confidence intervals were either unattainable or sufficiently wide to indicate an unreliable fit. This can occur, for example, when one of the decay phases is poorly documented (i.e. has few data points). As a result, the subjects were re-fit with the single phase model. This re-fitting step is automated in the package; however, the user can control the size of confidence interval above which a biphasic fit is deemed unreliable using the argument `CI_max_diff` in `ushr()`.
We can also visualize a summary of the fitting procedure and parameter estimates using `summarize_model()`. This creates a list with the following elements: (i) a summary of which subjects were successfully fit using the biphasic or single phase models, with their corresponding infected cell lifespan estimates (`summary`); (ii) summary statistics for the estimated parameters from the biphasic model (`biphasicstats`); and (iii) summary statistics for the estimated parameters from the single phase model (`singlestats`).
```{r summariz}
actg315_summary <- summarize_model(model_output, data = actg315, stats = TRUE)
head(actg315_summary$summary)
actg315_summary$biphasicstats
actg315_summary$singlestats
```
For a better understanding of parameter identifiability, one can also print the parameter estimates for each individual and model, along with their corresponding 95\% confidence intervals.
```{r CIs}
head(model_output$biphasicCI)
head(model_output$singleCI)
```
## Time to suppression
In addition to fitting the biphasic and single phase models, we can calculate the time to viral suppression (TTS) using both the parametric and non-parametric methods (see the Background for more details). Here we set the suppression threshold to be the same as the detection threshold (i.e. we want to know when viral load drops below the detection threshold of the assay). First, to get parametric estimates from the fitted model output, we use `get_TTS()` with the argument `parametric = TRUE`. We can subsequently obtain median and SD statistics, and the total number of subjects included in the analysis, using the `summarize()` function from `dplyr`.
```{r TTSparametric}
TTSparametric <- get_TTS(model_output = model_output, parametric = TRUE,
suppression_threshold = 100)
head(TTSparametric)
TTSparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
```
Alternatively, to calculate non-parametric TTS estimates, we set the argument `parametric = FALSE`, and supply the original data using `data = actg315`, rather than the fitted model output. The estimates are similar to those for the parametric method but, given the less stringent conditions for inclusion in the non-parametric analysis (there is no minimum requirement on the number of observations), we are able to estimate TTS for more subjects.
```{r TTSnonparametric}
TTSnonparametric <- get_TTS(data = actg315, parametric = FALSE,
suppression_threshold = 100, censor_value = 50)
head(TTSnonparametric)
TTSnonparametric %>% summarize(median = median(TTS), SD = sd(TTS), N = n())
```
We can also plot the histograms for both methods using `plot_TTS()`.
```{r TTSplot, fig.width = 2, fig.height = 2}
plot_TTS(TTSparametric, bins = 6, textsize = 7)
plot_TTS(TTSnonparametric, bins = 6, textsize = 7)
```
## Additional functionality
`ushr` provides additional functionality to the examples documented here. Notable examples are:
* For ART that includes an integrase inhibitor, a triphasic exponential model can be fit using `ushr_triphasic()` (see `?ushr_triphasic()`); this may be more appropriate than the biphasic model [@Cardozo2017]. Results can be visualized using the same plotting/summary functions as above.
* Noisy clinical data can be simulated from an underlying biphasic model using the `simulate_data()` function.
Further details of all functions and user-specific customizations can be found in the documentation.
## References
|
/scratch/gouwar.j/cran-all/cranData/ushr/vignettes/Vignette.Rmd
|
#' Convert a data frame to the TAXSIM 35 output.
#'
#' This function takes a data set that is in the format required for \code{\link{taxsim_calculate_taxes}},
#' checks it to make sure it is in the proper format for TAXSIM 35, and then cleans so it can be sent to TAXSIM 35.
#' This function is useful for troubleshooting. It is not needed to calculate taxes. The function is useful
#' if you continue receiving unreasonable errors from \code{\link{taxsim_calculate_taxes}}. In such as case,
#' you can run this function on your data set. You should then save the resulting
#' data frame as a csv file. Then, upload the file to \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#'
#' \code{\link{create_dataset_for_taxsim}} takes the same columns as column names as \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#'
#' @param .data Data frame containing the information that will be used to calculate taxes.
#' This data set will be sent to TAXSIM. Data frame must have specified column names and data types.
#'
#' @return A data frame that that can be manually uploaded to \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#'
#' @examples
#'
#' family_income <- data.frame(
#' taxsimid = c(1, 2),
#' state = c('North Carolina', 'NY'),
#' year = c(2015, 2015),
#' mstat = c('single', 'married, jointly'),
#' pwages = c(10000, 100000),
#' page = c(26, 36)
#' )
#'
#' family_taxes <- create_dataset_for_taxsim(family_income)
#'
#'
#' @export
create_dataset_for_taxsim <- function(.data) {
state_colname <- 'state'
filing_status_colname <- 'mstat'
cols <- colnames(.data)
# only keep TAXSIM columns
cols_in_taxsim_and_df <- intersect(cols, taxsim_cols())
.data <- .data[cols_in_taxsim_and_df]
# return an error if any required columns have missing values (except for state)
for (col in c('taxsim', 'year', 'mstat')) {
if (any(is.na(.data[[col]]))) stop(paste0("No", col, "values can be NA."), call. = FALSE)
}
# convert all NA values to 0 for non-required items
cols_to_convert <- taxsim_cols()[5:length(taxsim_cols())]
.data <- convert_na(.data, cols_to_convert)
# make sure all the data is of the proper type
# function will either stop the running of a function with text of the error
# or print that everything is OK
check_data(.data, cols, state_colname)
# make sure all column that should be numeric are in fact numeric
# if so, also convert them to integer
.data <- check_numeric(.data, cols)
# if state is character, convert to SOI codes
# if state is numeric, ensure all values are SOI codes
if (state_colname %in% cols) {
if (is.character(.data[[state_colname]])) {
.data[[state_colname]] <- get_state_soi(.data[[state_colname]])
} else if (is.numeric(.data[[state_colname]])) {
# identify SOI codes in the data that are not actual SOI codes
not_soi_codes <- setdiff(unique(.data[[state_colname]]), soi_and_states_crosswalk)
# stop function if we find SOI codes in the data that are not actual SOI codes
if (length(not_soi_codes) > 0) {
stop(paste('The following SOI codes are in your data, but are not actual SOI codes: ', paste0(not_soi_codes, collapse = " "), collapse = " "))
}
}
# convert missing state values to 0
.data[[state_colname]][is.na(.data[[state_colname]])] <- 0
}
# make sure all filing_status values are proper
# and if character descriptions are used for filing status, convert to number
if (filing_status_colname %in% cols) {
.data[[filing_status_colname]] <- check_filing_status(.data[[filing_status_colname]])
}
return(.data)
}
#' @title
#' Calculate state and federal taxes using TASXSIM 35.
#'
#' @description
#' This function calculates state and federal income taxes using the TAXSIM 35 tax simulator.
#' See \url{http://taxsim.nber.org/taxsim35/} for more information on TAXSIM 35. The function uses
#' a compiled WebAssembly (wasm) version of the TAXSIM app that is part of the package to calculate taxes.
#' Details about generating the wasm file can be found here: \url{https://github.com/tmm1/taxsim.js}
#'
#' @param .data Data frame containing the information that will be used to calculate taxes.
#' This data set will be sent to TAXSIM. Data frame must have specified column names and data types.
#' @param marginal_tax_rates Variable to use when calculating marginal tax rates. One of 'Wages', 'Long Term Capital Gains',
#' 'Primary Wage Earner', or 'Secondary Wage Earner'. Default is 'Wages'.
#' @param return_all_information Boolean (TRUE or FALSE). Whether to return all information from TAXSIM (TRUE),
#' or only key information (FALSE). Returning all information returns 42 columns of output, while only
#' returning key information returns 9 columns. It is faster to download results with only key information.
#'
#' @section Formatting your data:
#'
#' In the input data set, \code{.data}, each column is a tax characteristic (year, filing status, income, etc.)
#' and each row is a tax filing unit.
#'
#' Columns should take the same names, and fulfill the same requirements, as those needed for \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#' Potential columns, with there names and descriptions, can be found at: \href{http://taxsim.nber.org/taxsim35/}{http://taxsim.nber.org/taxsim35/}.
#'
#' The following columns are required: \code{taxsimid}, \code{year}, \code{mstat}, and \code{state}.
#'
#' There are two points where \code{\link{taxsim_calculate_taxes}} departs from \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#'
#' 1. For filing status, \code{mstat}, users can either enter the number allowed by \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}
#' or one of the following descriptions:
#'
#' - "single"
#' - "married, jointly"
#' - "married, separately"
#' - "dependent child"
#' - "head of household"
#'
#' 2. For \code{state}, users can either enter the SOI code, as required by \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35},
#' the two-letter state abbreviation, or the full name of the state.
#'
#' It is OK if the input data set, \code{.data}, contains columns in addition to the ones that are used by \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35}.
#'
#' @return
#'
#' The output data set contains all the information returned by \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35},
#' using the same column names. Descriptions of these columns can be found at the bottom of the page
#' containing \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35's documentation}.
#'
#' @examples
#'
#' family_income <- data.frame(
#' taxsimid = c(1, 2),
#' state = c('North Carolina', 'NY'),
#' year = c(2015, 2015),
#' mstat = c('single', 'married, jointly'),
#' pwages = c(10000, 100000),
#' page = c(26, 36)
#' )
#'
#'
#' family_taxes <- taxsim_calculate_taxes(family_income)
#'
#' merge(family_income, family_taxes, by = 'taxsimid')
#'
#' @section Giving credit where it is due:
#'
#' The NBER's \href{http://taxsim.nber.org/taxsim35/}{TAXSIM 35} tax simulator does all tax
#' calculations. This package simply lets users interact with the tax simulator through R. Therefore,
#' users should cite the TASXSIM 35 tax simulator when they use this package in their work:
#'
#' Feenberg, Daniel Richard, and Elizabeth Coutts, An Introduction to the TAXSIM Model,
#' Journal of Policy Analysis and Management vol 12 no 1, Winter 1993, pages 189-194.
#'
#' @export
taxsim_calculate_taxes <- function(.data, marginal_tax_rates = 'Wages', return_all_information = FALSE) {
# check parameter options
# must change this function if parameters are added
check_parameters(.data, marginal_tax_rates, return_all_information)
# save input ID numbers as object, so we can make sure the output ID numbers are the same
input_s <- .data$taxsimid
# create data set to send to taxsim
.data <- create_dataset_for_taxsim(.data)
# add 2 to column if we need all columns, otherwise add 0 for only the default columns
idtl <- if (return_all_information) 2 else 0
.data[['idtl']] <- idtl
# add marginal tax rate calculation
.data[['mtr']] <- convert_marginal_tax_rates(marginal_tax_rates)
stop_error_message <- paste0(
"There was a problem in calculating the taxes. Please check the format of your data.\n",
"If the problem persists, you can try manually uploading the data to TAXSIM as an avenue of troubleshooting.\n",
"See the following address for more information: https://www.shaneorr.io/r/usincometaxes/articles/send-data-to-taxsim.html"
)
# calcualte taxes using wasm
from_taxsim <- tryCatch(
error = function(cnd) stop(stop_error_message, call. = FALSE),
calculate_taxes_wasm(.data)
)
# add column names to the TAXSIM columns that do not have names
from_taxsim <- clean_from_taxsim(from_taxsim)
# check that input and output data sets have the same unique ID numbers
output_s <- from_taxsim$taxsimid
if (!setequal(input_s, output_s)) {
stop(paste0(
"The input and output data sets should have the exact same numbers for `taxsimid` and they do not.",
"\nThis could mean that your input data was not in the proper format, producing problems in the output.",
"\nPlease check your input data.",
"\nSee the following link for formatting information: https://www.shaneorr.io/r/usincometaxes/articles/taxsim-input.html"
)
)
}
return(from_taxsim)
}
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/calculate_taxes.R
|
#' Financial and household characteristics of 1,000 taxpayer units.
#'
#' A data set containing financial and household characteristics of 1,000 taxpayer units.
#' The data set was randomly generated and does not reflect real data.
#' It is formatted and ready for use in the \code{usincometaxes} package.
#'
#' @format A data frame with 1,000 rows and 16 variables. Variable definitions can be
#' found in the following article: \url{https://www.shaneorr.io/r/usincometaxes/articles/taxsim-input.html}
#'
#' @source Created through random data generation.
'taxpayer_finances'
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/data.R
|
#' usincometaxes: A package for calculating state and federal income taxes in the United States.
#'
#' The \code{usincometaxes} package is a single function packages that calculates state and federal income
#' taxes in the United State. It relies on the NBER's TAXSIM 35 tax calculator for the calculations.
#' See \url{https://taxsim.nber.org/taxsim35/} for more information on TAXSIM 35.
'_PACKAGE'
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/usincometaxes.R
|
#' Ensure input data set has required fields and data types are correct
#'
#' Check to ensure all the required column are present and data types are correct. This function binds all the checks through helper functions.
#'
#' @param .data A data frame containing the input parameters for the TAXSIM 35 program. The column names of the input parameters are below. The column can be in any order.
#' @param cols The column names, as a string, in the data set `.data`
#' @param state_column_name The column name of the state column.
#'
#' @return The function does not return a value. It either produces a stop message with the error or prints that all checks were successful.
#' @keywords internal
check_data <- function(.data, cols, state_column_name) {
# make sure all the required column are present
check_required_cols(cols)
# ensure the taxsimid column is an integer and contains unique values
check_taxsimid(.data[['taxsimid']])
# some numeric columns must have all values greater than zero
check_greater_zero(.data, cols)
# make sure state names are either two letter abbreviations or full name of state
# only if state si a character
if (is.character(.data[['state']])) {
check_state(.data, cols, state_column_name)
}
# make sure that no single tax filers have spouse ages or income
check_spouse(.data, cols)
# tax year must be between the following two values
# tax year is required, so we don't need to check whether it exists
if (!all(.data$year >= 1960 & .data$year <= 2023)) {
stop("`year` must be a numeric value between 1960 and 2023.", call. = FALSE)
}
return(invisible(NULL))
}
#' Check state column
#'
#' State should be either a two letter abbreviation or full state name. Check to make sure this is true.
#'
#' @param .data A data frame containing the input parameters for the TAXSIM 35 program. The column names of the input parameters are below. The column can be in any order.
#' @param cols The column names, as a string, in the data set `.data`.
#' @param state_column_name The column name of the state column.
#'
#' @keywords internal
check_state <- function(.data, cols, state_column_name) {
# state should either be the two letter abbreviation or full name
# if state is a character
if (is.character(.data[[state_column_name]])) {
proper_states <- c(datasets::state.abb, datasets::state.name, "DC", "District of Columbia", "No State")
# make state list and entered data lower case to ensure a state is not recogizend simply because of capitalization
proper_states <- tolower(proper_states)
entered_states <- tolower(.data[[state_column_name]])
if (!all(entered_states %in% proper_states)) {
stop("One of your state names is unrecognizable. Names should either be the full name, two letter abbreviation, or SOI code.", call. = FALSE)
}
} else if (is.numeric(.data[[state_column_name]])) {
# check input SOIs against crosswalk
wrong_soi <- setdiff(.data[[state_column_name]], soi_and_states_crosswalk)
# produce an error if there are any wrong SOIs
if (length(wrong_soi) > 0) {
soi_string <- paste0(wrong_soi, collapse = ", ")
stop(paste0("The following state SOI code is nto a valid SOI: ", soi_string), call. = FALSE)
}
}
return(invisible(NULL))
}
#' Ensure the required columns are present
#'
#' @param cols The column names, as a string, in the data set `.data`
#'
#' @keywords internal
check_required_cols <- function(cols) {
required_columns <- taxsim_cols()[1:3]
required_cols_present <- sort(intersect(required_columns, cols))
all_required_present <- isTRUE(all.equal(sort(required_columns), sort(required_cols_present)))
if (!all_required_present) {
missing_column <- setdiff(required_columns, required_cols_present)
stop(paste0("The required column `", missing_column, "`is not present in `.data`."), call. = FALSE)
} else {
return(NULL)
}
}
#' Ensure values for filing status 'mstat' are proper.
#'
#' @param filing_status_vector Column, as a vector, containing filing status
#'
#' @keywords internal
check_filing_status <- function(filing_status_vector) {
# mapping of strings to integers
# if this changes, need to change test in test-calculate_taxes, where we copy and paste this
filing_status_values <- c(
'single' = 1,
'married, jointly' = 2,
'married, separately' = 6,
'dependent child' = 8,
'head of household' = 1
)
# return an error if any of marital status are NA
if (any(is.na(filing_status_vector))) stop("No mstat values can be NA.")
if (is.numeric(filing_status_vector)) {
# make sure that all values are one of the valid options
diff_names <- setdiff(unique(filing_status_vector), filing_status_values)
if (length(diff_names) > 0) {
stop(paste('The following filing status (mstat) are in your data, but are not legitimate values: ', paste0(diff_names, collapse = " "), collapse = " "))
}
} else if (is.character(filing_status_vector)) {
# make sure that all values are one of the valid options
diff_names <- setdiff(unique(tolower(filing_status_vector)), names(filing_status_values))
if (length(diff_names) > 0) {
stop(paste('The following filing status (mstat) are in your data, but are not legitimate values: ', paste0(diff_names, collapse = " "), collapse = " "))
}
filing_status_vector <- tolower(filing_status_vector)
filing_status_vector[filing_status_vector %in% c('single', 'head of household')] <- 1
filing_status_vector[filing_status_vector == 'married, jointly'] <- 2
filing_status_vector[filing_status_vector == 'married, separately'] <- 6
filing_status_vector[filing_status_vector == 'dependent child'] <- 8
filing_status_vector[filing_status_vector == 'head of household'] <- 1
}
return(filing_status_vector)
}
#' Check numeric columns
#'
#' Checks that each column which should be numeric or integer is numeric or integer.
#'
#' @param .data A data frame containing the input parameters for the TAXSIM 35 program. The column names of the input parameters are below. The column can be in any order.
#' @param cols The column names, as a string, in the data set `.data`.
#'
#' @keywords internal
check_numeric <- function(.data, cols) {
# all numeric columns should be 'double' or integer
numeric_cols <- names(taxsim_cols())[-non_numeric_col()]
numeric_data_types <- c('numeric', 'integer')
numeric_cols_in_data <- intersect(numeric_cols, cols)
# create boolean vector of each column that should be numeric and whether it is numeric in the data set
column_datatypes <- sapply(.data[numeric_cols_in_data], class)
column_datatypes_are_numeric <- column_datatypes %in% numeric_data_types
# if all the should-be numeric columns are not numeric, create stop message that contains the columns
# not of the proper data type
if (!all(column_datatypes_are_numeric)) {
col_wrong_datatype <- paste0(names(column_datatypes[!column_datatypes_are_numeric]), collapse = '; ')
stop(paste0("The following columns should be numeric: ", col_wrong_datatype), call. = FALSE)
} else {
# convert all numeric values to integer and return dataframe
.data[numeric_cols_in_data] <- as.data.frame(lapply(.data[numeric_cols_in_data], as.integer))
return(.data)
}
}
#' Check that columns are greater than zero
#'
#' Some columns must have all values greater than zero. Check to make sure this is true.
#'
#' @param .data A data frame containing the input parameters for the TAXSIM 35 program. The column names of the input parameters are below. The column can be in any order.
#' @param cols The column names, as a string, in the data set `.data`.
#'
#' @keywords internal
check_greater_zero <- function(.data, cols) {
cols_greater_zero <- names(taxsim_cols())[greater_zero_cols()]
greater_zero_cols_in_data <- intersect(cols_greater_zero, cols)
test_greater_zero <- function(test_data) all(test_data >= 0 | is.na(test_data))
are_cols_greater_zero <- sapply(.data[greater_zero_cols_in_data], test_greater_zero)
# if all values are not greater than zero, stop and provide message
if (!all(are_cols_greater_zero)) {
col_above_zero <- paste0(greater_zero_cols_in_data[!are_cols_greater_zero], collapse = '; ')
stop(paste0(
"The following columns have values less than zero: ",
col_above_zero,
"\nAll values in these columns should be greater than zero."
),
call. = FALSE)
} else {
return(invisible(NULL))
}
}
#' Check that the `taxsimid` column is an integer and every value is unique.
#'
#' The `taxsimid` column requires a whole number and unique value. Check to make sure this is true.
#'
#' @param taxsimid_col Vector that id the `taxsimid` column. This will always be the column `taxsimid` in the input data frame.
#'
#' @keywords internal
check_taxsimid <- function(taxsimid_col) {
# make sure taxsimid is an integer
id_remainders <- c(taxsimid_col) %% 1
all(id_remainders == 0)
if (!all(id_remainders == 0)) {
stop("taxsimid must be whole numbers.", call. = FALSE)
}
# make sure every value is unique
number_unique_values <- length(unique(taxsimid_col))
total_values <- length(taxsimid_col)
if (number_unique_values != total_values) {
stop("taxsimid must contain unique values.", call. = FALSE)
} else {
return(invisible(NULL))
}
}
#' Check input parameters
#'
#' Check that the input parameters to `taxsim_calculate_taxes` are of the proper type
#' The parameters to this function should be the same as those to `taxsim_calculate_taxes`
#'
#' @keywords internal
check_parameters <- function(.data, marginal_tax_rates, return_all_information) {
marginal_rates_options <- c('Wages', 'Long Term Capital Gains', 'Primary Wage Earner', 'Secondary Wage Earner')
marginal_rates_stop_message <- paste0("`marginal_tax_rates` parameter must be one of: '", paste0(marginal_rates_options, collapse = "', '"), "'.")
if (!is.data.frame(.data)) stop("`.data` parameter must be a data frame.", call. = FALSE)
if (!(return_all_information %in% c(T, F))) stop('`all_columns` parameter must be either TRUE or FALSE.', call. = FALSE)
if (!(marginal_tax_rates %in% marginal_rates_options)) stop(marginal_rates_stop_message, call. = FALSE)
return(invisible(NULL))
}
#' Ensure single taxpayers do not have spouse ages or income
#'
#' @param .data A data frame containing the input parameters for the TAXSIM 35 program. The column names of the input parameters are below. The column can be in any order.
#' @param cols The column names, as a string, in the data set `.data`.
#'
#' @keywords internal
check_spouse <- function(.data, cols) {
if ('sage' %in% cols) {
if (!('page' %in% cols)) stop("You have `sage` column, but not `page`. You need to add `page`.", call. = FALSE)
if (any(.data[['mstat']] == 1 & .data[['sage']] > 0)) {
stop("You have a 'single' filer with a `sage` greater than 0. All single filers must have spouse ages of 0", call. = FALSE)
}
}
if ('swages' %in% cols) {
if (!('pwages' %in% cols)) stop("You have `swages` column, but not `pwages`. You need to add `pwages`.", call. = FALSE)
}
return(invisible(NULL))
}
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/utils_check_data.R
|
#' Get state SOI from state name.
#'
#' Converts state names or state abbreviations to numeric SOI codes, which are required for TAXSIM.
#'
#' @param state_column Vectors containing the states to calculate taxes for. Generally, this is the
#' state column from the data set that will be sent to TAXSIM.
#'
#' @return Named integer vector with each number between 1 and 51 representing the state's SOI.
#' Names are the state's two letter abbreviation.
get_state_soi <- function(state_column) {
# the SOI crosswalk has two letter abbreviation
# if full names were entered, we need to change them to the full-state spellings
# add DC to list of states, since there is an SOI code for it
# lwoer-case everything to make it easier to match with the user-entered states
state_abb <- tolower(c(datasets::state.abb, "DC", "No State"))
state_name <- tolower(c(datasets::state.name, "District of Columbia", "No State"))
states_listed <- tolower(state_column)
# states in the original input dataframe, as two letter abbreviation and lower case
input_state_abb <- ifelse(nchar(states_listed) > 2, state_abb[match(states_listed,state_name)], states_listed)
# make state abbreviations upper case to match cross walk
input_state_abb <- toupper(input_state_abb)
# find SOI from two-letter abbreviation, using cross-walk
state_soi <- soi_and_states_crosswalk[input_state_abb]
return(state_soi)
}
#' Clean final TAXSIM data set.
#'
#' Clean the data set received from TAXSIM by renaming columns and removing columns not needed in
#' the final output.
#'
#' @param from_taxsim The data set received from TAXSIM.
#'
#' @return Data frame containing the row's `taxsimid` and tax calculations. This data frame can be
#' merged with the original input data frame by `taxsimid`.
#'
#' @keywords internal
clean_from_taxsim <- function(from_taxsim) {
# change column names from the TAXSIM names to more descriptive names
for (col in colnames(from_taxsim)) {
new_colname_output <- from_taxsim_cols()[[col]]
names(from_taxsim)[names(from_taxsim) == col] <- new_colname_output
}
# remove state and year because they are also in the input data
# since they are in the input data, when you join input and output by taxsimid, they will appear twice
from_taxsim[c('state', 'year')] <- NULL
return(from_taxsim)
}
#' Map input column names.
#'
#' Map the input column names required in this package to the input column names required by TAXSIM.
#'
#' @keywords internal
taxsim_cols <- function() {
# NOTE: You need to change check_required_cols() if you change this function.
c(
'taxsimid', 'year', 'mstat', # required
'state', 'page', 'sage',
'depx', 'age1', 'age2', 'age3', # dependents
'dep13', 'dep17', 'dep18', # dependents old way
'pwages', 'swages', 'psemp', 'ssemp', 'dividends', 'intrec', 'stcg', 'ltcg', 'otherprop', 'nonprop',
'pensions', 'gssi', 'pui', 'sui', 'transfers', 'rentpaid', 'proptax', 'otheritem',
'childcare', 'mortgage', 'scorp', 'pbusinc', 'pprofinc', 'sbusinc', 'sprofinc',
'mtr', 'idtl'
)
}
#' Map output column names.
#'
#' Map the output column names required in this package to the input column names required by TAXSIM.
#'
#' @keywords internal
from_taxsim_cols <- function() {
# named vector to rename the columns of the data set received from TAXSIM
c(
# primary output
'taxsimid' = 'taxsimid', 'year' = 'year', 'state' = 'state', 'fiitax' = 'fiitax',
'siitax' = 'siitax', 'fica' = 'fica', 'frate' = 'frate',
'srate' = 'srate', 'ficar' = 'ficar', 'tfica' = 'tfica', 'credits' = 'credits',
# extended output
'v10' = 'v10_federal_agi', 'v11' = 'v11_ui_agi', 'v12' = 'v12_soc_sec_agi', 'v13' = 'v13_zero_bracket_amount',
'v14' = 'v14_personal_exemptions', 'v15' = 'v15_exemption_phaseout', 'v16' = 'v16_deduction_phaseout',
'v17' = 'v17_itemized_deductions', 'v18' = 'v18_federal_taxable_income', 'v19' = 'v19_tax_on_taxable_income',
'v20' = 'v20_exemption_surtax', 'v21' = 'v21_general_tax_credit', 'v22' = 'v22_child_tax_credit_adjusted',
'v23' = 'v23_child_tax_credit_refundable', 'v24' = 'v24_child_care_credit', 'v25' = 'v25_eitc',
'v26' = 'v26_amt_income', 'v27' = 'v27_amt_liability', 'v28' = 'v28_fed_income_tax_before_credit', 'v29' = 'v29_fica',
# columns are zero if no state is specified
'v30' = 'v30_state_household_income', 'v31' = 'v31_state_rent_expense',
'v32' = 'v32_state_agi', 'v33' = 'v33_state_exemption_amount', 'v34' = 'v34_state_std_deduction_amount',
'v35' = 'v35_state_itemized_deduction', 'v36' = 'v36_state_taxable_income', 'v37' = 'v37_state_property_tax_credit',
'v38' = 'v38_state_child_care_credit', 'v39' = 'v39_state_eitc', 'v40' = 'v40_state_total_credits',
'v41' = 'v41_state_bracket_rate',
# not sure what this is, as it is new
'staxbc' = 'staxbc',
# extra federal columns
'v42' = 'v42_self_emp_income', 'v43' = 'v43_medicare_tax_unearned_income',
'v44' = 'v44_medicare_tax_earned_income', 'v45' = 'v45_cares_recovery_rebate'
)
}
#' @keywords internal
non_numeric_col <- function() {
# state is the only non-numeric column
# integer numbers represent the number in taxsim_cols
c(3, 4)
}
#' @keywords internal
greater_zero_cols <- function() {
# columns that must have all values greater than zero
# integer numbers represent the number in taxsim_cols
c(1, 2, 5, 6, 7, 8, 9, 10, 23, 24)
}
#' Recode marginal tax rates.
#'
#' Marginal tax rates are specified with the \code{marginal_tax_rates} parameter. The possible values are
#' descriptive strings. But,TAXSIM requires integers. Convert descriptive strings to integers.
#'
#' @param marginal_tax_rate String representing the \code{marginal_tax_rate} parameter in \code{taxsim_calculate_taxes}
#'
#' @keywords internal
convert_marginal_tax_rates <- function(marginal_tax_rate) {
possible_values <- c('Wages', 'Long Term Capital Gains', 'Primary Wage Earner', 'Secondary Wage Earner')
if (!marginal_tax_rate %in% possible_values) {
stop(paste0("`marginal_tax_rate` must be one of: ", "'", paste0(possible_values, collapse = "', '"), "'"))
}
switch(marginal_tax_rate,
'Wages' = 11,
'Long Term Capital Gains' = 70,
'Primary Wage Earner' = 85,
'Secondary Wage Earner' = 86
)
}
#' Convert NA values to either 0 or the proper state value
#'
#' @keywords internal
convert_na <- function(.data, cols_to_convert) {
cols_to_convert <- intersect(colnames(.data), cols_to_convert)
if (is.character(.data[['state']])) {
.data[['state']][is.na(.data[['state']])] <- 'No State'
} else if (is.numeric(.data[['state']])) {
.data[['state']][is.na(.data[['state']])] <- 0
}
.data[cols_to_convert][is.na(.data[cols_to_convert])] <- 0
return(.data)
}
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/utils_clean_data.R
|
#' Use WASM to calculate taxes locally
#'
#' @param .data Dataset that can be sent to WASM.
#'
#' @keywords internal
calculate_taxes_wasm <- function(.data) {
# connect to js and wasm files
wasm_path <- system.file("taxsim/taxsim.wasm", package = "usincometaxes")
js_path <- system.file("taxsim/taxsim.js", package = "usincometaxes")
wasm_binary <- readBin(wasm_path, raw(), file.info(wasm_path)$size)
# convert input data to string
data_string <- vroom::vroom_format(.data, delim = ",", eol = "\\n")
# load the V8 context
ctx <- V8::v8()
ctx$assign("wasmBinary", wasm_binary)
ctx$source(js_path)
response_text <- ctx$call("taxsim",
V8::JS(paste0("'", data_string, "'")),
V8::JS("{wasmBinary}"),
await = TRUE)
from_taxsim <- tibble::tibble(
utils::read.table(text = response_text,
header = T,
sep = ","))
return(from_taxsim)
}
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/R/utils_connect_server.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(usincometaxes)
## -----------------------------------------------------------------------------
data(taxpayer_finances)
taxsim_dataset <- create_dataset_for_taxsim(taxpayer_finances)
knitr::kable(head(taxsim_dataset))
## ----eval = FALSE-------------------------------------------------------------
# taxsim_filename <- 'taxsim_dataset.csv'
#
# vroom::vroom_write(taxsim_dataset, taxsim_filename)
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/send-data-to-taxsim.R
|
---
title: "Manually Upload Data to TAXSIM 35"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Manually Upload Data to TAXSIM 35}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(usincometaxes)
```
`usincometaxes` sends data to the National Bureau of Economic Research's (NBER) [TAXSIM 35](http://taxsim.nber.org/taxsim35/) server. The data must meet TAXSIM 35's requirements for data types and column names. `usincometaxes` performs checks to ensure the data will be accepted by TAXSIM 35. It's possible, however, that issues will slip through. This can produce cryptic errors that might be hard to location. If you receive errors that you cannot correct it could be helpful to manually upload your data to TAXSIM 35's servers.
`usincometaxes` provides a function, `create_dataset_for_taxsim()`, to format your data for manual uploading. To use the function, prepare your dataset as you normally would to use `taxsim_calculate_taxes()`. For example, follow the formatting required for [input columns](https://www.shaneorr.io/r/usincometaxes/articles/taxsim-input.html). Then, supply the dataset to `create_dataset_for_taxsim()`.
```{r}
data(taxpayer_finances)
taxsim_dataset <- create_dataset_for_taxsim(taxpayer_finances)
knitr::kable(head(taxsim_dataset))
```
Then, save this dataset as a csv file to your local computer. It is recommended to use `readr::read_csv()` to write out the dataset. `write.csv` tends to pose issues. Also note that you can name the `.csv` file anything you wish.
```{r eval = FALSE}
taxsim_filename <- 'taxsim_dataset.csv'
vroom::vroom_write(taxsim_dataset, taxsim_filename)
```
Now, manually upload the file `taxsim_dataset.csv` to TAXSIM 35's server by going to (https://taxsim.nber.org/taxsim35/), navigating to the section titled 'OR...Upload a (not too large) file with TAXSIM data:', and uploading the `.csv` file:
Errors from the manual upload could guide you in solving any data formatting issues.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/send-data-to-taxsim.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/taxsim-input.R
|
---
title: "Description of Input Columns"
author: "Shane Orr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Description of Input Columns}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
`usincometaxes` calculates taxes through the function `taxsim_calculate_taxes(.data)`. The key parameter in this function is `.data`, which is a data frame containing the information used to calculate income taxes. Each column in `.data` contains financial or household information and maps to the variables in [TAXSIM 35](http://taxsim.nber.org/taxsim35/) . All variables in TAXSIM 35 can be used in `usincometaxes`.
The column names in `usincometaxes` are the same as TAXSIM 35's variable names. The data types for inputs are also the same as what is noted in the [TAXSIM 35 documentation](http://taxsim.nber.org/taxsim35/), except for two exceptions.
1. For filing status, `mstat`, users can either enter a description of the filing status (shown below) or the number required by TAXSIM.
2. For state, `state`, users can enter the two letter state abbreviation or the SOI code, as required by TAXSIM.
`usincometaxes` will convert descriptions of filing statuses or state abbreviations to the numbers required by TAXSIM.
`.data` can contain columns beyond those listed below. The additional columns will be ignored.
## Required columns
* **taxsimid**: An arbitrary, non-negative, *whole number* greater than zero. This number links the results from TAXSIM
35 to the original input data frame specified with `.data`.
<br>
* **year**: Tax year ending Dec 31 (4 digits between 1960 and 2023). State must be zero if
year is before 1977 or after 2023.
<br>
* **mstat**: Filing status of tax unit. One of the following:
* "single" or 1 for single;
* "married, jointly" or 2 for married, filing jointly;
* "married, separately" or 6 for married, filing separately;
* "dependent child" or 8 for dependent, usually a child with income; or
* "head of household" or 1 for head of household filing status.
<br>
* **state**: State two letter abbreviation ('NC'), full state name ('North Carolina') or [state SOI code](https://taxsim.nber.org/statesoi.html) (32).
If state income taxes are not needed, either label as "No State" or remove this variable. State income tax information is only available from 1977 to 2023.
## Optional columns
* **page**: Age of primary taxpayer as of December 31st of tax year. Taxpayer age variables
determine eligibility for additional standard deductions, personal exemption, EITC and AMT exclusion.
<br>
* **sage**: Age of spouse as of December 31st of tax year (or 0 / NA if no spouse).
<br>
* **depx**: Total number of dependents (part of personal exemption calculation).
<br>
* **age1**: Age of youngest dependent. Used for EITC, CTC and CCC. For 1991+ code students between 20 and 23 as 19 to get the EITC calculation correct. Code infants as "1". If age1 is not present depx is used for the number of child eligible for the EIC, CTC and CDCC.
<br>
* **age2**: Age of 2nd youngest dependent.
<br>
* **age3**: Age of 3rd youngest dependent.
Ages of any additional dependents are not relevant for the tax calculation, but all dependents should be included in \code{depx}.
**Incomes**
* **pwages**: Wage and salary income of Primary Taxpayer (exclude QBI).
<br>
* **swages**: Wage and salary income of Spouse (include self-employment but no QBI). Must
be zero or the column should not exist for non-joint returns.
<br>
* **psemp**: Self-employment income of Primary Taxpayer (exclude QBI).
<br>
* **ssemp**: Self-empoyment income of Spouse.
<br>
* **dividends**: Dividend income (qualified dividends only for 2003 on).
<br>
* **intrec**: Interest income received (+/-).
<br>
* **stcg**: Short Term Capital Gains or losses (+/-).
<br>
* **ltcg**: Long Term Capital Gains or losses (+/-).
<br>
* **otherprop**: Other property income subject to NIIT, including:
* Unearned or limited partnership and passive S-Corp profits;
* Rent not eligible for QBI deduction;
* Non-qualified dividends;
* Other income or loss not otherwise enumerated here.
<br>
* **nonprop**: Other non-property income not subject to Medicare NIIT such as:
* Alimony;
* Nonwage fellowships;
* State income tax refunds (itemizers only);
* Alimony paid;
* Keogh and IRA contributions;
* Foreign income exclusion; and
* NOLs.
<br>
* **pensions**: Taxable Pensions and IRA distributions.
<br>
* **ggsi**: Gross Social Security Benefits.
<br>
* **pui**: Unemployment Compensation received - primary taxpayer.
<br>
* **sui**: Unemployment compensation received - secondary taxpayer. The split is relevant only 2020-2021.
<br>
* **transfers**: Other non-taxable transfer income such as:
* Welfare;
* Workers comp;
* Veterans benefits; and
* Child support that would affect eligibility for state property tax rebates but would not be taxable at the federal level.
<br>
* **rentpaid**: Rent paid (used only for calculating state property tax rebates).
<br>
* **proptax**: Real Estate taxes paid. This is a preference for the AMT and is is also
used to calculate state property tax rebates.
<br>
* **otheritem**: Other Itemized deductions that are a preference for the Alternative Minimum Tax. These would include:
* Other state and local taxes (line 8 of Schedule A) plus local income tax;
* Preference share of medical expenses; and
* Miscellaneous (line 27).
<br>
* **childcare**: Child care expenses.
<br>
**The following are for the TCJA Business Tax Deduction.**
* **scorp**: Active S-Corp income (is SSTB).
<br>
* **pbusinc**: Primary Taxpayer's Qualified Business Income (QBI) subject to a
preferential rate without phaseout and assuming sufficient wages paid or capital to be eligible
for the full deduction. Subject to SECA and Medicare additional Earnings Tax.
<br>
* **pprofinc**: Primary Taxpayer's Specialized Service Trade or Business service
(SSTB) with a preferential rate subject to claw-back. Subject to SECA and Medicare Additional Earnings Tax.
<br>
* **sbusinc**: Spouse's QBI. Must be zero for non-joint returns, or the
column should not exist.
<br>
* **sprofinc**: Spouse's SSTB. Must be zero for non-joint returns, or the
column should not exist.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/taxsim-input.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/taxsim-output.R
|
---
title: "Description of Output Columns"
author: "Shane Orr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Description of Output Columns}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
`usincometaxes` returns tax results as a data frame, with each column representing tax information such as
federal and state income taxes owed or other line items. Each row corresponds to a row in the input data frame.
Users can specify the amount of output with the `return_all_information` parameter in `taxsim_calculate_taxes()`.
Setting `return_all_information` to `TRUE` returns a data frame with 42 columns of detailed tax information.
`FALSE` returns 9 columns of key information. `FALSE` leads to quicker calculations and downloads from the NBER's servers.
`usincometaxes` provides the same output as [TAXSIM 35](http://taxsim.nber.org/taxsim35/).
Setting `return_all_information` to `FALSE` equates to setting `idtl` to 0 in TAXSIM, while `TRUE` corresponds to 2.
## Standard columns
Returned columns when `return_all_information` = `FALSE`.
* **taxsimid**: ID number from the input data set, so users can match the tax information
with the input data set.
* **fiitax**: Federal income tax liability including capital gains rates, surtaxes, AMT and
refundable and non-refundable credits.
* **siitax**: State income tax liability.
* **fica**: Total FICA taxes, including the employers and employees share.
* **frate**: Marginal federal tax rate.
* **srate**: Marginal state tax rate, if a state was identified.
* **ficar**: FICA rate.
* **tfica**: Taxpayer liability for FICA.
Marginal rates are with respect to wage income unless another rate is requested. If detailed intermediate results are requested, the following 35 columns of data are added:.
## Detailed columns
Returned columns when `return_all_information` = `TRUE`.
All standard columns shown above, plus:
* **credits**: Total refundable and non-refundable federal credits
* **v10_federal_agi**: Federal AGI
* **v11_ui_agi**: UI in AGI
* **v12_soc_sec_agi**: Social Security in AGI
* **v13_zero_bracket_amount**: Zero Bracket Amount
* **v14_personal_exemptions**: Personal Exemptions
* **v15_exemption_phaseout**: Exemption Phaseout
* **v16_deduction_phaseout**: Deduction Phaseout
* **v17_itemized_deductions**: Itemized Deductions Allowed (Zero for non-itemizers)
* **v18_federal_taxable_income**: Federal Taxable Income
* **v19_tax_on_taxable_income**: Tax on Taxable Income (no special capital gains rates)
* **v20_exemption_surtax**: Exemption Surtax
* **v21_general_tax_credit**: General Tax Credit
* **v22_child_tax_credit_adjusted**: Child Tax Credit (as adjusted)
* **v23_child_tax_credit_refundable**: Additional Child Tax Credit (refundable)
* **v24_child_care_credit**: Child Care Credit
* **v25_eitc**: Earned Income Credit (total federal)
* **v26_amt_income**: Income for the Alternative Minimum Tax
* **v27_amt_liability**: AMT Liability after credit for regular tax and other allowed credits.
* **v28_fed_income_tax_before_credit**:Federal Income Tax Before Credits (includes special treatment of Capital gains,
exemption surtax (1988-1996) and 15% rate phaseout (1988-1990) but not AMT)
* **v29_fica**: FICA
The following columns are zero if no state is specified:
* **v30_state_household_income**: State Household Income (imputation for property tax credit)
* **v31_state_rent_expense**: State Rent Expense (imputation for property tax credit)
* **v32_state_agi**: State AGI
* **v33_state_exemption_amount**: State Exemption amount
* **v34_state_std_deduction_amount**: State Standard Deduction
* **v35_state_itemized_deduction**: State Itemized Deductions
* **v36_state_taxable_income**: State Taxable Income
* **v37_state_property_tax_credit**: State Property Tax Credit
* **v38_state_child_care_credit**: State Child Care Credit
* **v39_state_eitc**: State EIC
* **v40_state_total_credits**: State Total Credits
* **v41_state_bracket_rate**: State Bracket Rate
* **staxbc**: State tax liability before credits
Additional federal results:
* **v42_self_emp_income**: Earned Self-Employment Income for FICA
* **v43_medicare_tax_unearned_income**: Medicare Tax on Unearned Income
* **v44_medicare_tax_earned_income**: Medicare Tax on Earned Income
* **v45_cares_recovery_rebate**: CARES act Recovery Rebates
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/taxsim-output.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
## ----setup, include = FALSE---------------------------------------------------
library(usincometaxes)
library(dplyr)
library(tidyr)
library(knitr)
library(ggplot2)
## ----import_data--------------------------------------------------------------
data(taxpayer_finances)
taxpayer_finances %>%
head() %>%
kable()
## ----calcualte_survey_taxes---------------------------------------------------
family_taxes <- taxsim_calculate_taxes(
.data = taxpayer_finances,
return_all_information = FALSE
)
family_taxes %>%
head() %>%
kable()
## ----join_tax_data------------------------------------------------------------
income_and_taxes <- taxpayer_finances %>%
left_join(family_taxes, by = 'taxsimid')
income_and_taxes %>%
head() %>%
kable()
## ----plot_family_taxes, fig.height = 7, fig.width = 9-------------------------
# custom theme for all plots in the vignette
plt_theme <- function() {
theme_minimal() +
theme(
legend.text = element_text(size = 11),
axis.text = element_text(size = 10),
axis.title=element_text(size=11,face="bold"),
strip.text = element_text(size = 11),
panel.grid.minor = element_blank(),
plot.title = element_text(face = "bold"),
plot.subtitle = element_text(size = 12),
legend.position = 'bottom'
)
}
# color palettes for number of children
dep_color_palette <- rev(c('#4B0055','#353E7C','#007094','#009B95','#00BE7D','#96D84B'))
income_and_taxes %>%
mutate(
tax_unit_income = pwages + swages,
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
filing_status = tools::toTitleCase(mstat)
) %>%
ggplot(aes(tax_unit_income, fiitax, color = num_dependents_eitc)) +
geom_point(alpha = .5) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(0, 200000)) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(-10000, 50000)) +
scale_color_discrete(type = dep_color_palette) +
facet_grid(rows = vars(mstat), cols = vars(year)) +
labs(
title = "Federal Income Taxes by Filing Status, Year, and Number of Children",
x = "\nHousehold Wages",
y = "Federal Income Taxes"
) +
plt_theme() +
guides(color = guide_legend(title = "Number of Childern 18 or Younger", title.position = "top", byrow = TRUE))
## -----------------------------------------------------------------------------
# calculate taxes from 0 to 200,000 in wages
wage_linespace <- seq(0, 200000, 100)
n_kids <- 4
base_family_income <- data.frame(
year = 2020,
mstat = 'married, jointly',
state = 'NC',
page = 40,
sage = 40,
depx = n_kids,
age1 = n_kids,
age2 = n_kids,
age3 = n_kids,
pwages = wage_linespace,
swages = 0
)
# create an additional data set with no dependents and add it to the original
family_income <- base_family_income %>%
bind_rows(
# make all numeber of dependent columns 0
base_family_income %>%
mutate(across(c(depx, age1, age2, age3), ~0))
) %>%
# add unique ID to each row
mutate(taxsimid = row_number()) %>%
select(taxsimid, everything())
family_income %>%
head() %>%
kable()
## -----------------------------------------------------------------------------
family_income_taxes <- taxsim_calculate_taxes(
.data = family_income,
return_all_information = TRUE
)
family_income_taxes %>%
head() %>%
kable()
## -----------------------------------------------------------------------------
family_income <- family_income %>%
left_join(family_income_taxes, by = 'taxsimid')
## -----------------------------------------------------------------------------
family_income_long <- family_income %>%
select(pwages, depx, fiitax, siitax) %>%
pivot_longer(cols = c('fiitax', 'siitax'),
names_to = 'jurisdiction', values_to = 'taxes_paid') %>%
mutate(
jurisdiction = recode(jurisdiction, 'fiitax' = 'Federal Income Taxes', 'siitax' = 'NC State Income Taxes'),
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
post_tax_wages = pwages - taxes_paid
)
# primary_wages, taxes_paid, color = as.character(num_dependents_eitc)
taxes_line_plot <- function(.data, x_var, y_var, color_var) {
ggplot(.data, aes({{x_var}}, {{y_var}}, color = {{color_var}})) +
geom_line(size = 1, alpha = .8) +
geom_hline(yintercept = 0) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_color_brewer(type = 'seq', palette = 'Set2') +
plt_theme()
}
taxes_line_plot(family_income_long, pwages, taxes_paid, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Wages and Income Taxes Paid",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Federal Income Taxes",
color = 'Number of Children 18 or Younger:'
)
## -----------------------------------------------------------------------------
taxes_line_plot(family_income_long, pwages, post_tax_wages, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Pre and Post-Tax Wages",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Post-Tax Hosuehold Wages",
color = 'Number of Children 18 or Younger:'
)
## -----------------------------------------------------------------------------
tax_items_mapping <- c(
v25_eitc = 'Earned Income Tax Credit',
child_tax_credit = 'Child Tax Credit'
)
family_income %>%
filter(depx == 4) %>%
mutate(child_tax_credit = v22_child_tax_credit_adjusted + v23_child_tax_credit_refundable) %>%
select(pwages, fiitax, v25_eitc, child_tax_credit) %>%
pivot_longer(cols = names(tax_items_mapping), names_to = 'tax_item', values_to = 'amount') %>%
mutate(tax_item = recode(tax_item, !!!tax_items_mapping)) %>%
taxes_line_plot(pwages, amount, tax_item) +
labs(
title = "Relationship Between Wages and Credits",
subtitle = "Taxpayer is married, filing jointly, in 2020 and has four children under 19",
x = "\nPre-Tax Wages",
y = "Credit Amount",
color = NULL
)
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/using-usincometaxes.R
|
---
title: "Calculating Federal and State Income Taxes"
output: rmarkdown::html_vignette
author: "Shane Orr"
vignette: >
%\VignetteIndexEntry{Calculating Federal and State Income Taxes}
%\VignetteDepends{ggplot2}
%\VignetteDepends{dplyr}
%\VignetteDepends{scales}
%\VignetteDepends{tidyr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
```
```{r setup, include = FALSE}
library(usincometaxes)
library(dplyr)
library(tidyr)
library(knitr)
library(ggplot2)
```
This article presents two use cases for `usincometaxes`. The first shows users how to estimate income taxes from a data frame containing financial information and other characteristics of tax payer units. This income could come from surveys such as the [Consumer Expenditure survey](https://www.bls.gov/cex/) or the [Panel Study of Income Dynamics survey](https://psidonline.isr.umich.edu/). The second use case focuses on running simulations.
## Calculating income taxes from survey data
For the first example we will use an internal data set called `taxpayer_finances`. The data is randomly generated and formatted for use with `usincometaxes`. Guidance on formatting data can be found in the [Description of Input Columns](taxsim-input.html) article.
The data set contains financial and other household characteristics that help estimate income taxes.
```{r import_data}
data(taxpayer_finances)
taxpayer_finances %>%
head() %>%
kable()
```
Each row in the data set is a tax paying unit. Thus, each row files one tax return. Columns represent items reported on tax returns that impact taxes. Of course, the information in the data set does not represent everything people report on tax returns. For this reason, the income tax calculations are simply estimates.
We call `taxsim_calculate_taxes()` to estimate federal and state income taxes for each tax paying unit. We are only interested in federal and state tax liabilities, not line item credits and deduction, so we are using `return_all_information = FALSE`.
```{r calcualte_survey_taxes}
family_taxes <- taxsim_calculate_taxes(
.data = taxpayer_finances,
return_all_information = FALSE
)
family_taxes %>%
head() %>%
kable()
```
The `taxsimid` column is required for any input data frame used in `taxsim_calculate_taxes`. This column is also returned in the output data frame containing tax calculations, allowing us to link the input and output data frames.
```{r join_tax_data}
income_and_taxes <- taxpayer_finances %>%
left_join(family_taxes, by = 'taxsimid')
income_and_taxes %>%
head() %>%
kable()
```
Now we have a single data frame containing both wages and income tax liabilities. Let's take a look at the relationship between wages and estimated federal income taxes. The colors represent the number of children 18 or younger.
```{r plot_family_taxes, fig.height = 7, fig.width = 9}
# custom theme for all plots in the vignette
plt_theme <- function() {
theme_minimal() +
theme(
legend.text = element_text(size = 11),
axis.text = element_text(size = 10),
axis.title=element_text(size=11,face="bold"),
strip.text = element_text(size = 11),
panel.grid.minor = element_blank(),
plot.title = element_text(face = "bold"),
plot.subtitle = element_text(size = 12),
legend.position = 'bottom'
)
}
# color palettes for number of children
dep_color_palette <- rev(c('#4B0055','#353E7C','#007094','#009B95','#00BE7D','#96D84B'))
income_and_taxes %>%
mutate(
tax_unit_income = pwages + swages,
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
filing_status = tools::toTitleCase(mstat)
) %>%
ggplot(aes(tax_unit_income, fiitax, color = num_dependents_eitc)) +
geom_point(alpha = .5) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(0, 200000)) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(-10000, 50000)) +
scale_color_discrete(type = dep_color_palette) +
facet_grid(rows = vars(mstat), cols = vars(year)) +
labs(
title = "Federal Income Taxes by Filing Status, Year, and Number of Children",
x = "\nHousehold Wages",
y = "Federal Income Taxes"
) +
plt_theme() +
guides(color = guide_legend(title = "Number of Childern 18 or Younger", title.position = "top", byrow = TRUE))
```
The plots shows what we would expect: higher income families pay more in taxes and households pay less the more children they have. We also see the reduction in federal marginal tax rates from 2000 to 2020, as shown by the decrease in income tax liabilities when comparing the two years.
## Income tax simulations
### Association between income taxes paid and household wages
An additional use of `usincometaxes` is to run simulations. This could be as simple as plotting the relationship between wages and income taxes paid. To do this, we first need to create a data set that holds everything constant except for wages. The code block below does this, except it also creates different data sets for households with zero and four children 18 or younger, so we can compare differences on this characteristic as well.
```{r}
# calculate taxes from 0 to 200,000 in wages
wage_linespace <- seq(0, 200000, 100)
n_kids <- 4
base_family_income <- data.frame(
year = 2020,
mstat = 'married, jointly',
state = 'NC',
page = 40,
sage = 40,
depx = n_kids,
age1 = n_kids,
age2 = n_kids,
age3 = n_kids,
pwages = wage_linespace,
swages = 0
)
# create an additional data set with no dependents and add it to the original
family_income <- base_family_income %>%
bind_rows(
# make all numeber of dependent columns 0
base_family_income %>%
mutate(across(c(depx, age1, age2, age3), ~0))
) %>%
# add unique ID to each row
mutate(taxsimid = row_number()) %>%
select(taxsimid, everything())
family_income %>%
head() %>%
kable()
```
Now, we will calculate federal and state income taxes for our simulated data set. Note that `return_all_information = TRUE`. This allows us to examine credit amounts like the Child Tax Credit and Earned Income Tax Credit (EITC).
```{r}
family_income_taxes <- taxsim_calculate_taxes(
.data = family_income,
return_all_information = TRUE
)
family_income_taxes %>%
head() %>%
kable()
```
As before, let's merge our tax data with the original input data set.
```{r}
family_income <- family_income %>%
left_join(family_income_taxes, by = 'taxsimid')
```
Now, let's look at the relationship between household wages and estimated income tax liabilities.
```{r}
family_income_long <- family_income %>%
select(pwages, depx, fiitax, siitax) %>%
pivot_longer(cols = c('fiitax', 'siitax'),
names_to = 'jurisdiction', values_to = 'taxes_paid') %>%
mutate(
jurisdiction = recode(jurisdiction, 'fiitax' = 'Federal Income Taxes', 'siitax' = 'NC State Income Taxes'),
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
post_tax_wages = pwages - taxes_paid
)
# primary_wages, taxes_paid, color = as.character(num_dependents_eitc)
taxes_line_plot <- function(.data, x_var, y_var, color_var) {
ggplot(.data, aes({{x_var}}, {{y_var}}, color = {{color_var}})) +
geom_line(size = 1, alpha = .8) +
geom_hline(yintercept = 0) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_color_brewer(type = 'seq', palette = 'Set2') +
plt_theme()
}
taxes_line_plot(family_income_long, pwages, taxes_paid, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Wages and Income Taxes Paid",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Federal Income Taxes",
color = 'Number of Children 18 or Younger:'
)
```
Note that North Carolina had a flat tax of 5.25% in 2020. That's why their taxes increase linearly.
### Relationship Between Pre and Post-Tax Wages
We'll create a additional plot comparing pre-tax and post-tax household wages.
```{r}
taxes_line_plot(family_income_long, pwages, post_tax_wages, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Pre and Post-Tax Wages",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Post-Tax Hosuehold Wages",
color = 'Number of Children 18 or Younger:'
)
```
### Child Tax Credit and Earned Income Tax Credit (EITC)
As noted previously, setting `return_all_information = TRUE` lets us retrieve additional output. Included in this additional output are amounts for the Child Tax Credit and EITC. Let's look at the amounts for both credits, while varying household wages. The values reflect a household with four children 18 or younger.
```{r}
tax_items_mapping <- c(
v25_eitc = 'Earned Income Tax Credit',
child_tax_credit = 'Child Tax Credit'
)
family_income %>%
filter(depx == 4) %>%
mutate(child_tax_credit = v22_child_tax_credit_adjusted + v23_child_tax_credit_refundable) %>%
select(pwages, fiitax, v25_eitc, child_tax_credit) %>%
pivot_longer(cols = names(tax_items_mapping), names_to = 'tax_item', values_to = 'amount') %>%
mutate(tax_item = recode(tax_item, !!!tax_items_mapping)) %>%
taxes_line_plot(pwages, amount, tax_item) +
labs(
title = "Relationship Between Wages and Credits",
subtitle = "Taxpayer is married, filing jointly, in 2020 and has four children under 19",
x = "\nPre-Tax Wages",
y = "Credit Amount",
color = NULL
)
```
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/using-usincometaxes.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/wasm.R
|
---
title: "About the Web Assembly Interface"
output: rmarkdown::html_vignette
author: "Shane Orr"
vignette: >
%\VignetteIndexEntry{About the Web Assembly Interface}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
```
`usincometaxes` uses a JavaScript / WebAssembly version of TAXSIM 35 to calculate taxes. The JavaScript / WebAssembly files are part of the package. Therefore, you can calculate taxes without sending data to the TAXSIM server. The output *should* be the same as the output from the TAXSIM 35 server. But, keep reading for why this might not always be the case.
The JavaScript / WebAssembly tooling comes from Aman Gupta Karmani's great work that you can find in this GitHub repo: https://github.com/tmm1/taxsim.js. And while you're checking out his work, don't overlook his tax calculator web app at [taxsim.app](https://taxsim.app).
The JavaScript / WebAssembly files are updated soon after the NBER updates the Fortran code that runs TAXSIM 35. We will then incorporate these updated JavaScript / WebAssembly files into a new version of `usincometaxes`. A benefit of this approach is that a given version of `usincometaxes` will always produce the same results when using 'wasm'. This holds because a given version of `usincometaxes` will always use the exact same JavaScript / WebAssembly files to calculate taxes. You could see a slight discrepancy between `usincometaxes` and TAXSIM 35 if there is a lag from when TAXSIM 35 is updated to when the JavaScript / WebAssembly files are updated.
`usincometaxes`'s [changelog](https://www.shaneorr.io/r/usincometaxes/news/index.html) will note when a version changes due to an update to the JavaScript / WebAssembly files. That way, you can check if you are using the version of `usincometaxes` with the most recent JavaScript / WebAssembly files and reinstall `usincometaxes` if this is not the case. As a reminder, the following code lets you check package versions: `packageVersion("usincometaxes")`.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/inst/doc/wasm.Rmd
|
---
title: "Manually Upload Data to TAXSIM 35"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Manually Upload Data to TAXSIM 35}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(usincometaxes)
```
`usincometaxes` sends data to the National Bureau of Economic Research's (NBER) [TAXSIM 35](http://taxsim.nber.org/taxsim35/) server. The data must meet TAXSIM 35's requirements for data types and column names. `usincometaxes` performs checks to ensure the data will be accepted by TAXSIM 35. It's possible, however, that issues will slip through. This can produce cryptic errors that might be hard to location. If you receive errors that you cannot correct it could be helpful to manually upload your data to TAXSIM 35's servers.
`usincometaxes` provides a function, `create_dataset_for_taxsim()`, to format your data for manual uploading. To use the function, prepare your dataset as you normally would to use `taxsim_calculate_taxes()`. For example, follow the formatting required for [input columns](https://www.shaneorr.io/r/usincometaxes/articles/taxsim-input.html). Then, supply the dataset to `create_dataset_for_taxsim()`.
```{r}
data(taxpayer_finances)
taxsim_dataset <- create_dataset_for_taxsim(taxpayer_finances)
knitr::kable(head(taxsim_dataset))
```
Then, save this dataset as a csv file to your local computer. It is recommended to use `readr::read_csv()` to write out the dataset. `write.csv` tends to pose issues. Also note that you can name the `.csv` file anything you wish.
```{r eval = FALSE}
taxsim_filename <- 'taxsim_dataset.csv'
vroom::vroom_write(taxsim_dataset, taxsim_filename)
```
Now, manually upload the file `taxsim_dataset.csv` to TAXSIM 35's server by going to (https://taxsim.nber.org/taxsim35/), navigating to the section titled 'OR...Upload a (not too large) file with TAXSIM data:', and uploading the `.csv` file:
Errors from the manual upload could guide you in solving any data formatting issues.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/vignettes/send-data-to-taxsim.Rmd
|
---
title: "Description of Input Columns"
author: "Shane Orr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Description of Input Columns}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
`usincometaxes` calculates taxes through the function `taxsim_calculate_taxes(.data)`. The key parameter in this function is `.data`, which is a data frame containing the information used to calculate income taxes. Each column in `.data` contains financial or household information and maps to the variables in [TAXSIM 35](http://taxsim.nber.org/taxsim35/) . All variables in TAXSIM 35 can be used in `usincometaxes`.
The column names in `usincometaxes` are the same as TAXSIM 35's variable names. The data types for inputs are also the same as what is noted in the [TAXSIM 35 documentation](http://taxsim.nber.org/taxsim35/), except for two exceptions.
1. For filing status, `mstat`, users can either enter a description of the filing status (shown below) or the number required by TAXSIM.
2. For state, `state`, users can enter the two letter state abbreviation or the SOI code, as required by TAXSIM.
`usincometaxes` will convert descriptions of filing statuses or state abbreviations to the numbers required by TAXSIM.
`.data` can contain columns beyond those listed below. The additional columns will be ignored.
## Required columns
* **taxsimid**: An arbitrary, non-negative, *whole number* greater than zero. This number links the results from TAXSIM
35 to the original input data frame specified with `.data`.
<br>
* **year**: Tax year ending Dec 31 (4 digits between 1960 and 2023). State must be zero if
year is before 1977 or after 2023.
<br>
* **mstat**: Filing status of tax unit. One of the following:
* "single" or 1 for single;
* "married, jointly" or 2 for married, filing jointly;
* "married, separately" or 6 for married, filing separately;
* "dependent child" or 8 for dependent, usually a child with income; or
* "head of household" or 1 for head of household filing status.
<br>
* **state**: State two letter abbreviation ('NC'), full state name ('North Carolina') or [state SOI code](https://taxsim.nber.org/statesoi.html) (32).
If state income taxes are not needed, either label as "No State" or remove this variable. State income tax information is only available from 1977 to 2023.
## Optional columns
* **page**: Age of primary taxpayer as of December 31st of tax year. Taxpayer age variables
determine eligibility for additional standard deductions, personal exemption, EITC and AMT exclusion.
<br>
* **sage**: Age of spouse as of December 31st of tax year (or 0 / NA if no spouse).
<br>
* **depx**: Total number of dependents (part of personal exemption calculation).
<br>
* **age1**: Age of youngest dependent. Used for EITC, CTC and CCC. For 1991+ code students between 20 and 23 as 19 to get the EITC calculation correct. Code infants as "1". If age1 is not present depx is used for the number of child eligible for the EIC, CTC and CDCC.
<br>
* **age2**: Age of 2nd youngest dependent.
<br>
* **age3**: Age of 3rd youngest dependent.
Ages of any additional dependents are not relevant for the tax calculation, but all dependents should be included in \code{depx}.
**Incomes**
* **pwages**: Wage and salary income of Primary Taxpayer (exclude QBI).
<br>
* **swages**: Wage and salary income of Spouse (include self-employment but no QBI). Must
be zero or the column should not exist for non-joint returns.
<br>
* **psemp**: Self-employment income of Primary Taxpayer (exclude QBI).
<br>
* **ssemp**: Self-empoyment income of Spouse.
<br>
* **dividends**: Dividend income (qualified dividends only for 2003 on).
<br>
* **intrec**: Interest income received (+/-).
<br>
* **stcg**: Short Term Capital Gains or losses (+/-).
<br>
* **ltcg**: Long Term Capital Gains or losses (+/-).
<br>
* **otherprop**: Other property income subject to NIIT, including:
* Unearned or limited partnership and passive S-Corp profits;
* Rent not eligible for QBI deduction;
* Non-qualified dividends;
* Other income or loss not otherwise enumerated here.
<br>
* **nonprop**: Other non-property income not subject to Medicare NIIT such as:
* Alimony;
* Nonwage fellowships;
* State income tax refunds (itemizers only);
* Alimony paid;
* Keogh and IRA contributions;
* Foreign income exclusion; and
* NOLs.
<br>
* **pensions**: Taxable Pensions and IRA distributions.
<br>
* **ggsi**: Gross Social Security Benefits.
<br>
* **pui**: Unemployment Compensation received - primary taxpayer.
<br>
* **sui**: Unemployment compensation received - secondary taxpayer. The split is relevant only 2020-2021.
<br>
* **transfers**: Other non-taxable transfer income such as:
* Welfare;
* Workers comp;
* Veterans benefits; and
* Child support that would affect eligibility for state property tax rebates but would not be taxable at the federal level.
<br>
* **rentpaid**: Rent paid (used only for calculating state property tax rebates).
<br>
* **proptax**: Real Estate taxes paid. This is a preference for the AMT and is is also
used to calculate state property tax rebates.
<br>
* **otheritem**: Other Itemized deductions that are a preference for the Alternative Minimum Tax. These would include:
* Other state and local taxes (line 8 of Schedule A) plus local income tax;
* Preference share of medical expenses; and
* Miscellaneous (line 27).
<br>
* **childcare**: Child care expenses.
<br>
**The following are for the TCJA Business Tax Deduction.**
* **scorp**: Active S-Corp income (is SSTB).
<br>
* **pbusinc**: Primary Taxpayer's Qualified Business Income (QBI) subject to a
preferential rate without phaseout and assuming sufficient wages paid or capital to be eligible
for the full deduction. Subject to SECA and Medicare additional Earnings Tax.
<br>
* **pprofinc**: Primary Taxpayer's Specialized Service Trade or Business service
(SSTB) with a preferential rate subject to claw-back. Subject to SECA and Medicare Additional Earnings Tax.
<br>
* **sbusinc**: Spouse's QBI. Must be zero for non-joint returns, or the
column should not exist.
<br>
* **sprofinc**: Spouse's SSTB. Must be zero for non-joint returns, or the
column should not exist.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/vignettes/taxsim-input.Rmd
|
---
title: "Description of Output Columns"
author: "Shane Orr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Description of Output Columns}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
`usincometaxes` returns tax results as a data frame, with each column representing tax information such as
federal and state income taxes owed or other line items. Each row corresponds to a row in the input data frame.
Users can specify the amount of output with the `return_all_information` parameter in `taxsim_calculate_taxes()`.
Setting `return_all_information` to `TRUE` returns a data frame with 42 columns of detailed tax information.
`FALSE` returns 9 columns of key information. `FALSE` leads to quicker calculations and downloads from the NBER's servers.
`usincometaxes` provides the same output as [TAXSIM 35](http://taxsim.nber.org/taxsim35/).
Setting `return_all_information` to `FALSE` equates to setting `idtl` to 0 in TAXSIM, while `TRUE` corresponds to 2.
## Standard columns
Returned columns when `return_all_information` = `FALSE`.
* **taxsimid**: ID number from the input data set, so users can match the tax information
with the input data set.
* **fiitax**: Federal income tax liability including capital gains rates, surtaxes, AMT and
refundable and non-refundable credits.
* **siitax**: State income tax liability.
* **fica**: Total FICA taxes, including the employers and employees share.
* **frate**: Marginal federal tax rate.
* **srate**: Marginal state tax rate, if a state was identified.
* **ficar**: FICA rate.
* **tfica**: Taxpayer liability for FICA.
Marginal rates are with respect to wage income unless another rate is requested. If detailed intermediate results are requested, the following 35 columns of data are added:.
## Detailed columns
Returned columns when `return_all_information` = `TRUE`.
All standard columns shown above, plus:
* **credits**: Total refundable and non-refundable federal credits
* **v10_federal_agi**: Federal AGI
* **v11_ui_agi**: UI in AGI
* **v12_soc_sec_agi**: Social Security in AGI
* **v13_zero_bracket_amount**: Zero Bracket Amount
* **v14_personal_exemptions**: Personal Exemptions
* **v15_exemption_phaseout**: Exemption Phaseout
* **v16_deduction_phaseout**: Deduction Phaseout
* **v17_itemized_deductions**: Itemized Deductions Allowed (Zero for non-itemizers)
* **v18_federal_taxable_income**: Federal Taxable Income
* **v19_tax_on_taxable_income**: Tax on Taxable Income (no special capital gains rates)
* **v20_exemption_surtax**: Exemption Surtax
* **v21_general_tax_credit**: General Tax Credit
* **v22_child_tax_credit_adjusted**: Child Tax Credit (as adjusted)
* **v23_child_tax_credit_refundable**: Additional Child Tax Credit (refundable)
* **v24_child_care_credit**: Child Care Credit
* **v25_eitc**: Earned Income Credit (total federal)
* **v26_amt_income**: Income for the Alternative Minimum Tax
* **v27_amt_liability**: AMT Liability after credit for regular tax and other allowed credits.
* **v28_fed_income_tax_before_credit**:Federal Income Tax Before Credits (includes special treatment of Capital gains,
exemption surtax (1988-1996) and 15% rate phaseout (1988-1990) but not AMT)
* **v29_fica**: FICA
The following columns are zero if no state is specified:
* **v30_state_household_income**: State Household Income (imputation for property tax credit)
* **v31_state_rent_expense**: State Rent Expense (imputation for property tax credit)
* **v32_state_agi**: State AGI
* **v33_state_exemption_amount**: State Exemption amount
* **v34_state_std_deduction_amount**: State Standard Deduction
* **v35_state_itemized_deduction**: State Itemized Deductions
* **v36_state_taxable_income**: State Taxable Income
* **v37_state_property_tax_credit**: State Property Tax Credit
* **v38_state_child_care_credit**: State Child Care Credit
* **v39_state_eitc**: State EIC
* **v40_state_total_credits**: State Total Credits
* **v41_state_bracket_rate**: State Bracket Rate
* **staxbc**: State tax liability before credits
Additional federal results:
* **v42_self_emp_income**: Earned Self-Employment Income for FICA
* **v43_medicare_tax_unearned_income**: Medicare Tax on Unearned Income
* **v44_medicare_tax_earned_income**: Medicare Tax on Earned Income
* **v45_cares_recovery_rebate**: CARES act Recovery Rebates
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/vignettes/taxsim-output.Rmd
|
---
title: "Calculating Federal and State Income Taxes"
output: rmarkdown::html_vignette
author: "Shane Orr"
vignette: >
%\VignetteIndexEntry{Calculating Federal and State Income Taxes}
%\VignetteDepends{ggplot2}
%\VignetteDepends{dplyr}
%\VignetteDepends{scales}
%\VignetteDepends{tidyr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
```
```{r setup, include = FALSE}
library(usincometaxes)
library(dplyr)
library(tidyr)
library(knitr)
library(ggplot2)
```
This article presents two use cases for `usincometaxes`. The first shows users how to estimate income taxes from a data frame containing financial information and other characteristics of tax payer units. This income could come from surveys such as the [Consumer Expenditure survey](https://www.bls.gov/cex/) or the [Panel Study of Income Dynamics survey](https://psidonline.isr.umich.edu/). The second use case focuses on running simulations.
## Calculating income taxes from survey data
For the first example we will use an internal data set called `taxpayer_finances`. The data is randomly generated and formatted for use with `usincometaxes`. Guidance on formatting data can be found in the [Description of Input Columns](taxsim-input.html) article.
The data set contains financial and other household characteristics that help estimate income taxes.
```{r import_data}
data(taxpayer_finances)
taxpayer_finances %>%
head() %>%
kable()
```
Each row in the data set is a tax paying unit. Thus, each row files one tax return. Columns represent items reported on tax returns that impact taxes. Of course, the information in the data set does not represent everything people report on tax returns. For this reason, the income tax calculations are simply estimates.
We call `taxsim_calculate_taxes()` to estimate federal and state income taxes for each tax paying unit. We are only interested in federal and state tax liabilities, not line item credits and deduction, so we are using `return_all_information = FALSE`.
```{r calcualte_survey_taxes}
family_taxes <- taxsim_calculate_taxes(
.data = taxpayer_finances,
return_all_information = FALSE
)
family_taxes %>%
head() %>%
kable()
```
The `taxsimid` column is required for any input data frame used in `taxsim_calculate_taxes`. This column is also returned in the output data frame containing tax calculations, allowing us to link the input and output data frames.
```{r join_tax_data}
income_and_taxes <- taxpayer_finances %>%
left_join(family_taxes, by = 'taxsimid')
income_and_taxes %>%
head() %>%
kable()
```
Now we have a single data frame containing both wages and income tax liabilities. Let's take a look at the relationship between wages and estimated federal income taxes. The colors represent the number of children 18 or younger.
```{r plot_family_taxes, fig.height = 7, fig.width = 9}
# custom theme for all plots in the vignette
plt_theme <- function() {
theme_minimal() +
theme(
legend.text = element_text(size = 11),
axis.text = element_text(size = 10),
axis.title=element_text(size=11,face="bold"),
strip.text = element_text(size = 11),
panel.grid.minor = element_blank(),
plot.title = element_text(face = "bold"),
plot.subtitle = element_text(size = 12),
legend.position = 'bottom'
)
}
# color palettes for number of children
dep_color_palette <- rev(c('#4B0055','#353E7C','#007094','#009B95','#00BE7D','#96D84B'))
income_and_taxes %>%
mutate(
tax_unit_income = pwages + swages,
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
filing_status = tools::toTitleCase(mstat)
) %>%
ggplot(aes(tax_unit_income, fiitax, color = num_dependents_eitc)) +
geom_point(alpha = .5) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(0, 200000)) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K"), limits = c(-10000, 50000)) +
scale_color_discrete(type = dep_color_palette) +
facet_grid(rows = vars(mstat), cols = vars(year)) +
labs(
title = "Federal Income Taxes by Filing Status, Year, and Number of Children",
x = "\nHousehold Wages",
y = "Federal Income Taxes"
) +
plt_theme() +
guides(color = guide_legend(title = "Number of Childern 18 or Younger", title.position = "top", byrow = TRUE))
```
The plots shows what we would expect: higher income families pay more in taxes and households pay less the more children they have. We also see the reduction in federal marginal tax rates from 2000 to 2020, as shown by the decrease in income tax liabilities when comparing the two years.
## Income tax simulations
### Association between income taxes paid and household wages
An additional use of `usincometaxes` is to run simulations. This could be as simple as plotting the relationship between wages and income taxes paid. To do this, we first need to create a data set that holds everything constant except for wages. The code block below does this, except it also creates different data sets for households with zero and four children 18 or younger, so we can compare differences on this characteristic as well.
```{r}
# calculate taxes from 0 to 200,000 in wages
wage_linespace <- seq(0, 200000, 100)
n_kids <- 4
base_family_income <- data.frame(
year = 2020,
mstat = 'married, jointly',
state = 'NC',
page = 40,
sage = 40,
depx = n_kids,
age1 = n_kids,
age2 = n_kids,
age3 = n_kids,
pwages = wage_linespace,
swages = 0
)
# create an additional data set with no dependents and add it to the original
family_income <- base_family_income %>%
bind_rows(
# make all numeber of dependent columns 0
base_family_income %>%
mutate(across(c(depx, age1, age2, age3), ~0))
) %>%
# add unique ID to each row
mutate(taxsimid = row_number()) %>%
select(taxsimid, everything())
family_income %>%
head() %>%
kable()
```
Now, we will calculate federal and state income taxes for our simulated data set. Note that `return_all_information = TRUE`. This allows us to examine credit amounts like the Child Tax Credit and Earned Income Tax Credit (EITC).
```{r}
family_income_taxes <- taxsim_calculate_taxes(
.data = family_income,
return_all_information = TRUE
)
family_income_taxes %>%
head() %>%
kable()
```
As before, let's merge our tax data with the original input data set.
```{r}
family_income <- family_income %>%
left_join(family_income_taxes, by = 'taxsimid')
```
Now, let's look at the relationship between household wages and estimated income tax liabilities.
```{r}
family_income_long <- family_income %>%
select(pwages, depx, fiitax, siitax) %>%
pivot_longer(cols = c('fiitax', 'siitax'),
names_to = 'jurisdiction', values_to = 'taxes_paid') %>%
mutate(
jurisdiction = recode(jurisdiction, 'fiitax' = 'Federal Income Taxes', 'siitax' = 'NC State Income Taxes'),
num_dependents_eitc = factor(depx, levels = as.character(0:5)),
post_tax_wages = pwages - taxes_paid
)
# primary_wages, taxes_paid, color = as.character(num_dependents_eitc)
taxes_line_plot <- function(.data, x_var, y_var, color_var) {
ggplot(.data, aes({{x_var}}, {{y_var}}, color = {{color_var}})) +
geom_line(size = 1, alpha = .8) +
geom_hline(yintercept = 0) +
scale_x_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_y_continuous(labels = scales::label_dollar(scale = .001, suffix = "K")) +
scale_color_brewer(type = 'seq', palette = 'Set2') +
plt_theme()
}
taxes_line_plot(family_income_long, pwages, taxes_paid, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Wages and Income Taxes Paid",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Federal Income Taxes",
color = 'Number of Children 18 or Younger:'
)
```
Note that North Carolina had a flat tax of 5.25% in 2020. That's why their taxes increase linearly.
### Relationship Between Pre and Post-Tax Wages
We'll create a additional plot comparing pre-tax and post-tax household wages.
```{r}
taxes_line_plot(family_income_long, pwages, post_tax_wages, num_dependents_eitc) +
facet_wrap(vars(jurisdiction)) +
labs(
title = "Relationship Between Pre and Post-Tax Wages",
subtitle = "Taxpayer is married, filing jointly, in 2020",
x = "\nPre-Tax Household Wages",
y = "Post-Tax Hosuehold Wages",
color = 'Number of Children 18 or Younger:'
)
```
### Child Tax Credit and Earned Income Tax Credit (EITC)
As noted previously, setting `return_all_information = TRUE` lets us retrieve additional output. Included in this additional output are amounts for the Child Tax Credit and EITC. Let's look at the amounts for both credits, while varying household wages. The values reflect a household with four children 18 or younger.
```{r}
tax_items_mapping <- c(
v25_eitc = 'Earned Income Tax Credit',
child_tax_credit = 'Child Tax Credit'
)
family_income %>%
filter(depx == 4) %>%
mutate(child_tax_credit = v22_child_tax_credit_adjusted + v23_child_tax_credit_refundable) %>%
select(pwages, fiitax, v25_eitc, child_tax_credit) %>%
pivot_longer(cols = names(tax_items_mapping), names_to = 'tax_item', values_to = 'amount') %>%
mutate(tax_item = recode(tax_item, !!!tax_items_mapping)) %>%
taxes_line_plot(pwages, amount, tax_item) +
labs(
title = "Relationship Between Wages and Credits",
subtitle = "Taxpayer is married, filing jointly, in 2020 and has four children under 19",
x = "\nPre-Tax Wages",
y = "Credit Amount",
color = NULL
)
```
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/vignettes/using-usincometaxes.Rmd
|
---
title: "About the Web Assembly Interface"
output: rmarkdown::html_vignette
author: "Shane Orr"
vignette: >
%\VignetteIndexEntry{About the Web Assembly Interface}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
message = FALSE,
collapse = TRUE,
comment = "#>",
fig.align = 'center',
fig.path = 'webimg/',
fig.width = 8,
fig.height = 5,
dpi = 72,
dev = 'png'
)
```
`usincometaxes` uses a JavaScript / WebAssembly version of TAXSIM 35 to calculate taxes. The JavaScript / WebAssembly files are part of the package. Therefore, you can calculate taxes without sending data to the TAXSIM server. The output *should* be the same as the output from the TAXSIM 35 server. But, keep reading for why this might not always be the case.
The JavaScript / WebAssembly tooling comes from Aman Gupta Karmani's great work that you can find in this GitHub repo: https://github.com/tmm1/taxsim.js. And while you're checking out his work, don't overlook his tax calculator web app at [taxsim.app](https://taxsim.app).
The JavaScript / WebAssembly files are updated soon after the NBER updates the Fortran code that runs TAXSIM 35. We will then incorporate these updated JavaScript / WebAssembly files into a new version of `usincometaxes`. A benefit of this approach is that a given version of `usincometaxes` will always produce the same results when using 'wasm'. This holds because a given version of `usincometaxes` will always use the exact same JavaScript / WebAssembly files to calculate taxes. You could see a slight discrepancy between `usincometaxes` and TAXSIM 35 if there is a lag from when TAXSIM 35 is updated to when the JavaScript / WebAssembly files are updated.
`usincometaxes`'s [changelog](https://www.shaneorr.io/r/usincometaxes/news/index.html) will note when a version changes due to an update to the JavaScript / WebAssembly files. That way, you can check if you are using the version of `usincometaxes` with the most recent JavaScript / WebAssembly files and reinstall `usincometaxes` if this is not the case. As a reminder, the following code lets you check package versions: `packageVersion("usincometaxes")`.
|
/scratch/gouwar.j/cran-all/cranData/usincometaxes/vignettes/wasm.Rmd
|
rm(list=ls())
library(tmvtnorm)
library(mvtnorm)
library(MCMCpack)
library(MASS)
library(stats)
sum.wt <- function(x=NULL, z=NULL ) { sum(x*z) }
Z.ig <- function(x=NULL, gpar=NULL, v=NULL) {
n = nrow(x)
g = length(gpar$pi);
if (g > 1) {
zlog = glogskewtden(x=x, gpar=gpar) #I think that v is for annealing
w = t(apply( zlog, 1, function(z,wt,v=1) {
x= exp( v*(z + log(wt)) )
x=x/sum(x);
return(x) }, wt=gpar$pi,v=v ))
} else w = matrix(1,nrow=n, ncol=g)
return(w)
}
z.rand <- function(x=NULL, G=NULL) {
priors=rep(1,G)
z=rdirichlet(nrow(x), priors)
return(z)
}
gpar.kmeans <- function(x=NULL, G=NULL){
class.vec <- kmeans(x,G)$cluster
z <- matrix(0, nrow=nrow(x), ncol=G)
for(g in 1:G) z[,g] = as.numeric(class.vec==g)
return(z)
}
gpar.rand <- function(x=NULL, G=NULL){
priors=rep(1,G)
z=rdirichlet(nrow(x), priors)
return(z)
}
rpar <- function(x=NULL, p=NULL, q=NULL, dlam =FALSE, z=NULL) {
val = list()
temp = cov.wt(x, wt=z)
val$xi = temp$center
if ( dlam ) val$lam = diag(rnorm(p, 0, sd=.01))
else val$lam = matrix(rnorm(p, 0, sd=.01),nrow=p, ncol=p)
val$sig = temp$cov
val$invSig = solve(val$sig)
p = nrow(val$lam)
val$omega = val$sig + t(val$lam) %*% val$lam
val$invOmg = solve( val$omega )
val$delta = diag(p) - val$lam %*% val$invOmg %*% t(val$lam)
esigma = eigen(val$sig)
if(q==1) val$Lambda = t(sweep(t(esigma$vectors[,1:q]), 2, sqrt(esigma$values[1:q]), FUN="*"))
if(q!=1) val$Lambda = sweep(esigma$vectors[,1:q], 2, sqrt(esigma$values[1:q]), FUN="*")
val$psi = diag(c(diag(val$sig - val$Lambda %*% t(val$Lambda))))
if(p>1) P=solve(val$psi)
if(p==1) P=1/val$psi
val$invSig = P-P%*%val$Lambda%*%(diag(q)+t(val$Lambda)%*%val$psi%*%val$Lambda)%*%t(val$Lambda)%*%P
val$nu = 10
return(val)
}
rgpar <- function(x=NULL, g=NULL, p=NULL, dlam=FALSE, q=NULL, z=NULL) {
val = list()
for (i in 1:g) val[[i]] = rpar(x=x, p=p, q=q, dlam = dlam, z=z[,i])
val$pi = apply(z,2,mean)
return(val)
}
aitken<-function(l, it){
flag=1
if(l[it-1] == l[it-2]){
flag<-0
}
else{
ak<-(l[it]-l[it-1])/(l[it-1]-l[it-2])
l.inf<-l[it-1]+((l[it]-l[it-1])/(1-ak))
if( abs(l.inf-l[it])< 0.1 ) flag<-0
else flag<-1
}
return(flag)
}
bic <- function(l=NULL, par=NULL, x=NULL, q=NULL){
it=length(l)
l.max=l[it]
N=nrow(x)
G=length(par)
p=ncol(x)
m=G*(3*p+p*q+1)
val = 2*l.max - m*log(N)
return(val)
}
uskewFA <- function(x=NULL, G=NULL, q=NULL, init=1, max.it=100) {
x=as.matrix(x)
val = list()
val$z = matrix(0,nrow=nrow(x),ncol=G)
if(init==1) val$z=gpar.kmeans(x=x, G=G)
if(init==2) val$z=gpar.rand(x=x, G=G)
val$gpar = rgpar(x=x, g=G, p=ncol(x), dlam=TRUE, q=q, z=val$z)
n=nrow(x)
it=1
not.converged=1
while(not.converged && it<=max.it) {
val$z = Z.ig(x=x, gpar=val$gpar, v=1)
for (g in 1:G) {
estepg = e.step(x=x, par=val$gpar[[g]], wt=val$z[,g])
val$gpar[[g]] = m.step(x= x, gpar=val$gpar[[g]], estep=estepg, z=val$z[,g], it=it)
}
val$gpar$pi = apply(val$z,2,mean)
#print(val$gpar$pi)
val$loglik[it] = loglik(x=x, gpar=val$gpar)
if(it>3) not.converged = aitken(l=val$loglik, it=it)
#print(it)
#plot(val$loglik)
it=it+1
}
val$z = Z.ig(x=x, gpar=val$gpar, v=1)
val$map = apply(val$z,1, function(z){ (1:length(z))[z==max(z)] })
output=list()
output$map=val$map
output$bic=bic(l=val$loglik,par=val$gpar,x=x,q=q)
output$zhat=val$z
output$likelihood=val$loglik
return(output)
}
loglik <- function(x=NULL, gpar=NULL) {
# output is a G x nrow(data) matrix
zlog = glogskewtden(x= x, gpar= gpar)
w = apply( exp(zlog),1,function(z,wt) { sum(z*wt) } , wt=gpar$pi)
val = sum(log(w))
if( is.nan(val) ) val =NA
return(val)
}
#Returns log of density to be used to calculate log likelihood
glogskewtden <- function(x=NULL, gpar=NULL) {
n = nrow(x); g = length(gpar$pi);
zlog = matrix(0, nrow=n, ncol=g)
for (k in 1:g) zlog[,k] = skewtden(x=x, par= gpar[[k]], logd= TRUE)
return(zlog)
}
update.nu <- function(nuv=NULL) {
nu0 = c(2,200)
v0 = log(nu0/2) + 1 - digamma(nu0/2) - nuv
sv0 = sign(v0)
if ( all(sv0<0) ) {
val = 2
} else if ( all(sv0>0) ) {
val= 200
} else {
temp = uniroot(f=function(nu=NULL, nuv=NULL) {
val = log(nu/2) + 1 - digamma(nu/2) - nuv
return(val) }, interval=c(2,200), nuv=nuv)
val = temp$root
}
return(val)
}
e.step <- function(x=NULL, par=NULL,wt=NULL) {
n = nrow(x); p =ncol(x);
if (is.null(wt)) wt= rep(1,n)
eta = matrix(0, nrow=n, ncol=p)
psi = matrix(0, nrow=p, ncol=p)
tau = numeric(n)
logtau = numeric(n)
u = mahalanobis(x, center=par$xi, cov= par$invOmg, inverted=TRUE)
qq = as.matrix(sweep(x, 2, par$xi, "-")) %*% par$invOmg %*% t(par$lam)
lx= rep(-Inf, p)
for (i in 1:nrow(x)) {
cc = (par$nu + p + c(0,2))/(par$nu + u[i])
pp = c( pmvt2(lower=lx, upper=qq[i,]*sqrt(cc[1]), delta=rep(0,p), sigma=round(par$delta,3), df=par$nu+p),
pmvt2(lower=lx, upper=qq[i,]*sqrt(cc[2]), delta=rep(0,p), sigma=round(par$delta,3), df=par$nu+p+2) )
tau[i] = cc[1] * pp[2]/pp[1]
# if (tau[i] <0) {
# print( c(tau[i], cc,pp) )
# print('tau <0')
# print(lx)
# print(qq[i,])
# print(par$delta)
# print(par$nu+p+2)
# #print(par)
# stop('here')
# }
logtau[i] = tau[i] - log( (par$nu+u[i])/2) - cc[1] + digamma( (par$nu+p)/2 )
temp=try(truncated.tmom(mu=qq[i,], sigma=round(par$delta/cc[2]), a=rep(0,p), nu=par$nu + p + 2 ),silent=TRUE)
if(is.list(temp)) temp=temp else temp = try(truncated.tmom(mu=qq[i,], sigma=round(par$delta/cc[2],3), a=rep(0,p), nu=par$nu + p + 2 ),silent=TRUE)
if(is.list(temp)) temp=temp else temp = try(truncated.tmom(mu=qq[i,], sigma=round(par$delta/cc[2],2), a=rep(0,p), nu=par$nu + p + 2 ),silent=TRUE)
eta[i,] = temp$tmean
psi = psi + wt[i]*tau[i]*( temp$tvar + outer(temp$tmean,temp$tmean) )
}
val = list(eta=eta, psi.wt=psi, tau=tau, logtau=logtau)
return(val)
}
m.step <- function(x= NULL, gpar=NULL, estep=NULL, z=NULL, it=NULL) {
n = nrow(x); p=ncol(x); q=ncol(gpar$Lambda); #
wtt = z*estep$tau
ngt = sum(wtt)
par1=gpar
par1$xi = as.numeric(apply(x - estep$eta %*% par1$lam,2, weighted.mean, w=wtt))
r = sweep(x,2, par1$xi, FUN="-")
r = as.matrix(r,nrow=n,ncol=p,dimnames=NULL)
# diagonal lam
temp = matrix(0,nrow=p,ncol=p)
for (j in 1:n) temp = temp+ wtt[j]*outer(as.vector(r[j,]), as.vector(estep$eta[j,]))
# # if(!is.null(par1$Delta)) par1$lam = diag( as.numeric(solve(par1$invSig * estep$psi.wt) %*% (par1$invSig * t(temp)) %*% matrix(1,nrow=p,ncol=1)), p, p)
# else par1$Delta = solve(estep$psi.wt) %*% t(temp)
par1$lam = par1$lam = diag( as.numeric(solve(par1$invSig * estep$psi.wt) %*% (par1$invSig * t(temp)) %*% matrix(1,nrow=p,ncol=1)), p, p)
rr = cov.wt(r, wt=wtt, center=rep(0,p), method="ML")$cov* ngt
sig = rr + t(par1$lam) %*% (estep$psi.wt) %*% par1$lam - ( (temp) %*% (par1$lam) + t(par1$lam) %*% t(temp) )
par1$sig = sig/sum(wtt)
par1$nu = update.nu(nuv=weighted.mean( estep$tau - estep$logtau , w=z) )
par1$Beta = t(par1$Lambda)%*%par1$invSig
par1$omega = par1$sig + t(par1$lam) %*% par1$lam;
par1$invOmg = solve( par1$omega );
par1$psi = diag(diag(par1$sig-par1$Lambda%*%par1$Beta%*%par1$sig))
#par1$inv.psi = solve(par1$psi)
old.Theta=par1$Theta
par1$Theta = diag(q)-par1$Beta%*%par1$Lambda+par1$Beta%*%par1$sig%*%t(par1$Beta)
#if(any(par1$Theta>10) && it>=4) par1$Theta=old.Theta
#print(par1$Theta)
par1$Lambda = par1$sig%*%t(par1$Beta)%*%ginv(par1$Theta)
#P=par1$inv.psi
par1$invSig = par1$invOmg #P-P%*%par1$Lambda%*%(diag(q)+t(par1$Lambda)%*%par1$psi%*%par1$Lambda)%*%t(par1$Lambda)%*%P
#par1$delta = diag(p) - (par1$lam) %*% par1$invOmg %*% t(par1$lam);
return(par1)
}
truncated.tmom <- function(mu=NULL, sigma=NULL, a=NULL, nu=NULL) {
if (length(mu) == 1 ) val = truncated.tmom1(mu= as.numeric(mu), sigma= as.numeric(sigma), lower=0, upper=Inf, nu=nu)
else if (length(mu) == 2) val = truncated.tmom2(mu= mu, sigma= sigma, a=a, nu=nu)
else val = truncated.tmomk(mu= mu, sigma= sigma, a=a, nu=nu)
return(val)
}
truncated.tmomk <- function(mu=NULL, sigma=NULL, a=NULL, nu=NULL) {
mu = -1*mu
p = length(mu)
if (p < 3 ) stop("truncated.tmomk is for k greater 2")
xi = numeric(p)
H = matrix(0,p,p)
for (k in 1:p) {
lv1 = 1/2*( log(nu/2) - log(2*pi*sigma[k,k]) )
lv2 = (nu-1)/2*( log(nu)-log(nu+(mu[k]-a[k])^2/sigma[k,k]) )
lv3 = lgamma((nu-1)/2) - lgamma(nu/2)
a.star = (a[-k]-mu[-k]) - ((a[k]-mu[k])/sigma[k,k]) * sigma[k,-k]
s.star = (nu + (mu[k]-a[k])^2/sigma[k,k])/(nu-1) *( sigma[-k,-k] - outer(sigma[k,-k],sigma[k,-k])/sigma[k,k] )
v4 = pmvt2(lower=rep(-Inf,p-1), upper=a.star, delta=rep(0,p-1), sigma=s.star, df=nu-1)
xi[k] = exp(lv1+lv2+lv3) *v4
for (l in 1:p) {
if (k != l ) {
akl = a[c(k,l)]; a.kl = a[-c(k,l)];
mukl = mu[c(k,l)]; mu.kl = mu[-c(k,l)];
slk = sigma[c(k,l),c(k,l)]; s.lk = sigma[-c(k,l),-c(k,l)];
s.lklk = as.matrix(sigma[c(k,l), -c(k,l)])
invslk = solve(slk)
nu.star = nu + as.numeric( (akl - mukl) %*% invslk %*% (akl - mukl) )
a.ss = as.numeric( (a.kl - mu.kl) - t(s.lklk) %*% invslk %*% (akl - mukl) )
s.ss = nu.star/(nu-2)*( s.lk - t(s.lklk) %*% invslk %*% (s.lklk) )
lv1 = log(nu/(nu-2))-log(2*pi)-log( sigma[k,k]*sigma[l,l] - sigma[k,l]*sigma[l,k] )/2
lv2 = (nu/2-1)*log(nu/nu.star)
v3 = pmvt2(lower=rep(-Inf,p-2), upper=a.ss, delta=rep(0,p-2), sigma=s.ss, df=nu-2)
H[k,l] = -exp(lv1+lv2)*v3
}}
}
for (k in 1:p) H[k,k] = ( xi[k]*(a[k]-mu[k]) - sum(sigma[-k,k]*H[-k,k]))/sigma[k,k]
c1 = pmvt2(lower=rep(-Inf,p), upper=a-mu, delta=rep(0,p), sigma=sigma, df=nu)
c2 = pmvt2(lower=rep(-Inf,p), upper=a-mu, delta=rep(0,p), sigma=sigma*(nu/(nu-2)), df=nu-2)
mu.star = as.numeric( (sigma %*% xi)/c1 )
tmu = mu - mu.star
s2 = ( sigma %*% H %*% sigma )/c1
s3 = ( c2/c1*nu/(nu-2) )* sigma
tsig = s3 - s2
tsig = tsig - outer(mu-tmu, mu-tmu)
val = list(tmean=-1*tmu, tvar=tsig)
return(val)
}
truncated.tmom1 <- function(mu=NULL, sigma=NULL, lower=NULL, upper=NULL, nu=NULL) {
s = sqrt(sigma)
ab = (c(lower,upper) - mu)/s
v = nu
d1 = -dt( ab*sqrt((v-2)/v), df=nu-2)*sqrt(v/(v-2))
p0 = pt( ab, df=nu)
rdp = diff(d1)/diff(p0)
mus = s*rdp
tmean = mu + mus
p2 = pt( ab/sqrt( nu/(nu-2) ), df=nu-2)
tvar = ((nu-1)*(nu/(nu-2))*diff(p2)/diff(p0) - nu )*sigma
#tvar = tvar - (mu^2 - tmean*mu - tmean* mu ) - tmean^2
tvar = tvar - (mu- tmean)^2
val = list(tmean=as.numeric(tmean), tvar=as.numeric(tvar) )
return(val)
}
truncated.tmom2 <- function(mu=NULL, sigma=NULL, a=NULL, nu=NULL) {
mu = -1*mu
if (length(mu) != 2 ) stop("mu does not have length 2")
p = length(mu)
xi = numeric(2)
H = matrix(0,2,2)
ds = diag(sigma)
cs = sigma[1,2]
lv1 = 1/2*( log(nu/2) - log(2*pi*ds) )
lv2 = (nu-1)/2*( log(nu)-log(nu+(mu-a)^2/ds) )
lv3 = lgamma( (nu-1)/2) - lgamma(nu/2)
a.star = rev(a-mu) - ((a-mu)/ds)*cs
s.star = ( rev(ds) - cs^2/ds )*(nu + (mu-a)^2/ds )/(nu-1)
v4 = pt(a.star/sqrt(s.star), df=nu-1)
xi = exp(lv1+lv2+lv3) *v4
nu.star = nu + as.numeric( (a - mu) %*% solve(sigma) %*% (a - mu) )
lv1 = log(nu/(nu-2))-log(2*pi)-log( prod(ds) - cs^2 )/2
lv2 = (nu/2-1)*log(nu/nu.star)
H[1,2] = -exp(lv1+lv2)
H[2,1] = H[1,2]
diag(H) = (xi*(a-mu) - cs*H[1,2])/ds
##########
c1 = pmvt2(lower=rep(-Inf,2), upper=a-mu, delta=rep(0,2), sigma=sigma, df=nu)
c2 = pmvt2(lower=rep(-Inf,2), upper=a-mu, delta=rep(0,2), sigma=sigma*(nu/(nu-2)), df=nu-2)
mu.star = as.numeric( (sigma %*% xi)/c1 )
tmu = mu - mu.star
# s1 = outer(mu-mu.star, mu-mu.star) - outer(mu.star,mu.star)
# s1 = outer(mu,mu)-outer(mu,mu.star)-outer(mu.star,mu)
s2 = ( sigma %*% H %*% sigma )/c1
s3 = ( c2/c1*nu/(nu-2) )* sigma
tsig = s3 - s2
tsig = tsig - outer(mu-tmu, mu-tmu)
val = list(tmean=-1*tmu, tvar=tsig)
return(val)
}
skewtden <- function(x=NULL, par=NULL, logd=TRUE) {
## x is the data
n = nrow(x); p = ncol(x);
r = sweep(x, 2, par$xi, "-")
qq = as.matrix(r) %*% par$invOmg %*% t(par$lam)
u = mahalanobis(r, center=rep(0,p), cov= par$invOmg, inverted=TRUE)
u.star = sqrt( (p + par$nu)/(u + par$nu) )
q.star = sweep(qq, 1, u.star, FUN="*")
v1 = p*log(2)
v2= try(dmvt(r, delta = rep(0,p), sigma = par$omega, df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,5), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,4), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,3), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,2), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,2), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,1), df = par$nu, log=TRUE ),silent=TRUE)
if(is.numeric(v2)) v2=v2 else v2= try(dmvt(r, delta = rep(0,p), sigma = round(par$omega,0), df = par$nu, log=TRUE ),silent=TRUE)
v3 = numeric(n)
for (k in 1:n) v3[k] = log(pmvt2(lower= rep(-Inf,p), upper=q.star[k,], delta = rep(0,p), sigma = round(par$delta,3), df=p+par$nu ))
lval = v1 + v2 + v3
if (!logd) val = exp(lval)
else val = lval
return(val)
}
pmvt2 <- function(lower=NULL, upper= NULL, delta= NULL, sigma= NULL, df=NULL) {
nu = df
if (length(lower) == 1) val = pt( (upper-delta)/sqrt(sigma), df=round(nu))
else val = pmvt(lower=lower, upper=upper, delta=delta, sigma=sigma, df=round(nu))
return(val)
}
# library(alr3)
# data(ais)
# x=ais[,c("Wt","Bfat","BMI","SSF","Ht")]
# temp=EM(x,2,1)
# Rprof("boot.out")
# temp=EM(x,2,1)
# Rprof(NULL)
# vec1=mvrnorm(100,mu=c(0,0,0,0,0,0,0),Sigma=diag(7))
# vec2=mvrnorm(100,mu=c(20,20,20,20,20,20,20),Sigma=diag(7))
# x=rbind(vec1,vec2)
# test=EM(x,2,1)
# vec1=mvrnorm(100,mu=c(0,0,0,0,0,0,0),Sigma=diag(7))
# vec2=mvrnorm(100,mu=c(20,20,20,20,20,20,20),Sigma=diag(7))
# x=rbind(vec1,vec2)
# test=EM(x,2,1)
|
/scratch/gouwar.j/cran-all/cranData/uskewFactors/R/uSkewFactors.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Class "\code{USL}" for Universal Scalability Law models
#'
#' This class encapsulates the Universal Scalability Law. Use the function
#' \code{\link{usl}} to create new objects from this class.
#'
#' @slot frame The model frame.
#' @slot call The call used to create the model.
#' @slot regr The name of the regressor variable.
#' @slot resp The name of the response variable.
#' @slot coefficients The coefficients alpha, beta and gamma of the model.
#' @slot coef.std.err The standard errors for the coefficients alpha and beta.
#' @slot coef.names A vector with the names of the coefficients.
#' @slot fitted The fitted values of the model. This is a vector.
#' @slot residuals The residuals of the model. This is a vector.
#' @slot df.residual The degrees of freedom of the model.
#' @slot sigma The residual standard deviation of the model.
#' @slot limit The scalability limit as per Amdahl.
#' @slot peak A vector with the predictor and response values of the peak.
#' @slot optimal A vector with the optimal predictor and response values.
#' @slot efficiency The efficiency, e.g. speedup per processor.
#' @slot na.action The \code{na.action} used by the model.
#'
#' @seealso \code{\link{usl}}
#'
#' @name USL-class
#' @exportClass USL
setClass("USL",
representation(frame = "data.frame",
call = "call",
regr = "character",
resp = "character",
coefficients = "vector",
coef.std.err = "vector",
coef.names = "vector",
fitted = "vector",
residuals = "vector",
df.residual = "integer",
sigma = "numeric",
limit = "numeric",
peak = "vector",
optimal = "vector",
efficiency = "vector",
na.action = "character"),
prototype(coef.names = c("alpha", "beta", "gamma"),
df.residual = 0L,
na.action = "na.omit"),
validity = function(object) {
err <- character()
if (length(object@regr) == 0) {
msg <- "name of regressor variable cannot be empty"
err <- c(err, msg)
}
if (length(object@resp) == 0) {
msg <- "name of regsponse variable cannot be empty"
err <- c(err, msg)
}
if (length(err) == 0) return(TRUE) else return(err)
})
|
/scratch/gouwar.j/cran-all/cranData/usl/R/AllClasses.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
setGeneric("plot", function(x, y, ...) standardGeneric("plot"))
setGeneric("print", function(x, ...) standardGeneric("print"))
setGeneric("predict", function(object, ...) standardGeneric("predict"))
setGeneric("summary", function(object, ...) standardGeneric("summary"))
setGeneric("confint", function(object, parm, level) standardGeneric("confint"))
setGeneric("sigma", function(object, ...) standardGeneric("sigma"))
#
# USL specific methods
#
setGeneric("scalability",
function(object, alpha, beta, gamma) standardGeneric("scalability"))
setGeneric("optimal.scalability",
function(object, alpha, beta, gamma) standardGeneric("optimal.scalability"))
setGeneric("peak.scalability",
function(object, alpha, beta, gamma) standardGeneric("peak.scalability"))
setGeneric("limit.scalability",
function(object, alpha, beta, gamma) standardGeneric("limit.scalability"))
setGeneric("efficiency",
function(object) standardGeneric("efficiency"))
setGeneric("overhead",
function(object, newdata) standardGeneric("overhead"))
|
/scratch/gouwar.j/cran-all/cranData/usl/R/AllGenerics.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Confidence Intervals for USL model parameters
#'
#' Estimate confidence intervals for one or more parameters in a USL model.
#' The intervals are calculated from the parameter standard error using the
#' Student t distribution at the given level.
#'
#' Bootstrapping is no longer used to estimate confidence intervals.
#'
#' @param object A USL object.
#' @param parm A specification of which parameters are to be given confidence
#' intervals, either a vector of numbers or a vector of names. If missing,
#' all parameters are considered.
#' @param level The confidence level required.
#'
#' @return A matrix (or vector) with columns giving lower and upper confidence
#' limits for each parameter. These will be labelled as (1-level)/2 and
#' 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%).
#'
#' @seealso \code{\link{usl}}
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' ## Create USL model
#' usl.model <- usl(throughput ~ load, specsdm91)
#'
#' ## Print confidence intervals
#' confint(usl.model)
#'
#' @export
#'
setMethod(
f = "confint",
signature = "USL",
definition = function(object, parm, level = 0.95) {
ci.value <- NULL # vector with confidence interval values
# Degree of freedom for Student t distribution
df <- length(object@residuals) - 1L
# Vectors to collect column and row names of result matrix
col.name <- paste(formatC(100 * c((1-level)/2, 1-(1-level)/2)), "%")
row.name <- NULL
# Return confidence intervals for both parameters if 'parm' is unset
if (missing(parm)) parm <- [email protected]
# Replace numeric parameters with named parameters
if (mode(parm) == "numeric") {
parm <- as.character(parm)
parm <- gsub("1", "alpha", parm, ignore.case = TRUE)
parm <- gsub("2", "beta", parm, ignore.case = TRUE)
parm <- gsub("3", "gamma", parm, ignore.case = TRUE)
}
# Calculate confidence intervals for the given level
for (i in [email protected]) {
if (i %in% parm) {
pa <- object@coefficients[i]
se <- [email protected][i] * qt(level, df)
ci.value <- c(ci.value, pa - se, pa + se)
row.name <- c(row.name, i)
}
}
# Build dummy matrix if no sensible parameters were requested
if (length(row.name) < 1) {
row.name <- NA
ci.value <- c(NA, NA)
}
# Return confidence intervals as matrix
matrix(ci.value, nrow = length(row.name), ncol = 2,
byrow = TRUE, dimnames = list(row.name, col.name))
})
|
/scratch/gouwar.j/cran-all/cranData/usl/R/confint.R
|
##############################################################################
#' Performanced of a Sun SPARCcenter 2000 in the SPEC SDM91 benchmark
#'
#' A dataset containing performance data for a Sun SPARCcenter 2000 (16 CPUs)
#'
#' A Sun SPARCcenter 2000 with 16 CPUs was used for the SPEC SDM91 benchmark
#' in October 1994. The benchmark simulates a number of users working on the
#' UNIX server and measures the number of script executions per hour.
#'
#' The data frame contains the following variables:
#' \itemize{
#' \item \code{load} The number of simulated users (1--216).
#' \item \code{throughput} The achieved throughput in scripts per hour.
#' }
#'
#' @name specsdm91
#' @docType data
#' @keywords datasets
#' @format A data frame with 7 rows on 2 variables
#' @source Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#' Original dataset from
#' \url{http://www.spec.org/osg/sdm91/results/results.html}
NULL
##############################################################################
#' Performance of a ray-tracing software on different hardware configurations
#'
#' A dataset containing performance data for a ray-tracing benchmark.
#'
#' The benchmark measured the number of ray-geometry intersections per second.
#' The data was gathered on an SGI Origin 2000 with 64 R12000 processors
#' running at 300 MHz.
#'
#' The data frame contains the following variables:
#' \itemize{
#' \item \code{processors} The number of CPUs used for the benchmark (1--64).
#' \item \code{throughput} The number of operations per second.
#' }
#'
#' @name raytracer
#' @docType data
#' @keywords datasets
#' @format A data frame with 11 rows on 2 variables
#' @source Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#' Original dataset from \url{https://sourceforge.net/projects/brlcad/}
NULL
##############################################################################
#' Performance of an Oracle database used for online transaction processing
#'
#' A dataset containing performance data for an Oracle OLTP database measured
#' between 8:00am and 8:00pm on January, 19th 2012. The measurements were
#' recorded for two minute intervals during this time and a timestamp indicates
#' the end of the measurement interval. The performance metrics were taken from
#' the \code{v$sysmetric} family of system performance views.
#'
#' The Oracle database was running on a 4-way server.
#'
#' The data frame contains different types of measurements:
#' \itemize{
#' \item Variables of the "time" type are expressed in seconds per second.
#' \item Variables of the "rate" type are expressed in events per second.
#' \item Variables of the "util" type are expressed as a percentage.
#' }
#'
#' The data frame contains the following variables:
#' \itemize{
#' \item \code{timestamp} The end of the two minute interval for which the
#' remaining variables contain the measurements.
#' \item \code{db_time} The time spent inside the database either working on
#' a CPU or waiting (I/O, locks, buffer waits ...). This time is expressed
#' as seconds per second, so two sessions working for exactly one second
#' each will contribute a total of two seconds per second of \code{db_time}.
#' In Oracle this value is also known as \emph{Average Active Sessions}
#' (AAS).
#' \item \code{cpu_time} The CPU time used during the interval. This is also
#' expressed as seconds per second. A 4-way machine has a theoretical
#' capacity of four CPU seconds per second.
#' \item \code{call_rate} The number of user calls (logins, parses, or
#' execute calls) per second.
#' \item \code{exec_rate} The number of statement executions per second.
#' \item \code{lio_rate} The number of logical I/Os per second. A logical
#' I/O is the Oracle term for a cache hit in the database buffer cache.
#' This metric does not indicate if an additional physical I/O was
#' necessary to load the buffer from disk.
#' \item \code{txn_rate} The number of database transactions per second.
#' \item \code{cpu_util} The CPU utilization of the database server in
#' percent. This was also measured from within the database.
#' }
#'
#' @name oracledb
#' @docType data
#' @keywords datasets
#' @format A data frame with 360 rows on 8 variables
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/usl/R/datasets.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Efficiency of the system
#'
#' The efficiency of a system expressed in terms of the deviation from
#' linear scalability.
#'
#' The function returns a vector which contains the deviation from linearity
#' for every measurement of the model input. A value of \code{1} indicates
#' linear scalability while values less than \code{1} correspond to the
#' fraction of the measurement compared to linear scalability.
#'
#' @param object A USL object.
#'
#' @return A vector of numeric values.
#'
#' @seealso \code{\link{usl}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Show the efficiency
#' efficiency(usl(throughput ~ processors, raytracer))
#'
#' @aliases efficiency
#' @export
#'
setMethod(
f = "efficiency",
signature = "USL",
definition = function(object) {
return(object@efficiency)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/efficiency.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Extract parts of a "\code{USL}" object
#'
#' The operator extracts a part of a \code{\link{USL-class}} object.
#'
#' This is a generic method for the class used in the usl package.
#'
#' The operator is used internally by functions like \code{\link{coef}}, so
#' it is necessary to have a working implementation of the \code{coef}
#' function.
#'
#' @param x Object from which to extract elements.
#' @param name A literal character string or a \link{name} (possibly quoted).
#'
#' @seealso \code{\link{USL-class}}, \code{\link{Extract}}
#'
#' @examples
#' \dontrun{
#' ## get coefficients from a usl model
#' usl.model$coefficients
#' }
#'
#' @keywords internal
#'
setMethod(
f = "$",
signature = "USL",
definition = function(x, name) { slot(x, name) }
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/extract.R
|
# Copyright (c) 2014-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Calculate gradient for the universal scalability function
#'
#' The implementation of this function has been adopted from the generated
#' output of the \code{\link{deriv}} function.
#'
#' @param x The USL object.
#'
#' @return The gradient matrix.
#'
#' @seealso \code{\link{usl}}
#'
#' @keywords internal
#'
gradient.usl <- function(x) {
alpha = x@coefficients['alpha']
beta = x@coefficients['beta']
gamma = x@coefficients['gamma']
n = x@frame[, x@regr, drop = TRUE]
# Based on the output of:
# deriv(~ (gamma * n) / (1 + (alpha * (n-1)) + (beta * n * (n-1))), # rhs
# c('alpha', 'beta', 'gamma'), # params
# function(alpha, beta, gamma, n){}) # args
expr1 <- gamma * n
expr2 <- n - 1
expr3 <- 1 + (alpha * expr2) + (beta * n * expr2)
expr4 <- expr3 ^ 2
grad.alpha <- -(expr1 * expr2 / expr4)
grad.beta <- -(expr1 * (n * expr2) / expr4)
grad.gamma <- n / expr3
matrix(c(grad.alpha, grad.beta, grad.gamma),
nrow = length(n),
dimnames = list(1:length(n), [email protected]))
}
|
/scratch/gouwar.j/cran-all/cranData/usl/R/gradient.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Generate an object from the "\code{USL}" class
#'
#' Initialize the object.
#'
#' @param .Object The object to initialize.
#' @param call The formula used to create the USL model.
#' @param frame The model frame containing the variables in the model.
#' @param regr The name of the regressor variable in the model.
#' @param resp The name of the response variable in the model.
#' @param alpha The contention parameter of the model.
#' @param beta The coherency delay parameter of the model.
#' @param gamma The slope of the ideal parallel scaling of the three parameter
#' model. This parameter corrsponds to the scale.factor parameter of the
#' two parameter model.
#'
#' @return An object of the specific type.
#'
#' @keywords internal
#'
setMethod(
f = "initialize",
signature = "USL",
definition = function(.Object, call, frame, regr, resp, alpha, beta, gamma) {
.Object@call <- call
.Object@coefficients <- structure(c(alpha, beta, gamma), names = [email protected])
.Object@frame <- frame
.Object@regr <- regr
.Object@resp <- resp
.Object@efficiency <- structure(frame[[resp]] / gamma / frame[[regr]],
names = frame[, regr])
[email protected] <- length(frame[[resp]]) - length([email protected])
# Call inspector
validObject(.Object)
return(.Object)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/initialize.R
|
# Copyright (c) 2014-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Overhead method for Universal Scalability Law models
#'
#' \code{overhead} calculates the overhead in processing time for a system
#' modeled with the Universal Scalability Law.
#' It evaluates the regression function in the frame \code{newdata} (which
#' defaults to \code{model.frame(object)}). The result contains the ideal
#' processing time and the additional overhead caused by contention and
#' coherency delays.
#'
#' The calculated processing times are given as percentages of a
#' non-parallelized workload. So for a non-parallelized workload the ideal
#' processing time will always be given as \emph{100\%} while the overhead
#' for contention and coherency will always be zero.
#'
#' Doubling the capacity will cut the ideal processing time in half but
#' increase the overhead percentages. The increase of the overhead depends on
#' the values of the parameters \code{alpha} and \code{beta} estimated by
#' \code{\link{usl}}.
#'
#' The calculation is based on \emph{A General Theory of Computational
#' Scalability Based on Rational Functions}, equation 26.
#'
#' @param object A USL model object for which the overhead will be calculated.
#' @param newdata An optional data frame in which to look for variables
#' with which to calculate the overhead.
#' If omitted, the fitted values are used.
#'
#' @return \code{overhead} produces a matrix of overhead percentages based on
#' a non-parallelized workload. The column \code{ideal} contains the ideal
#' percentage of execution time. The columns \code{contention} and
#' \code{coherency} give the additional overhead percentage caused by
#' the respective effects.
#'
#' @seealso \code{\link{usl}}, \code{\link{USL-class}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @references Neil J. Gunther. A General Theory of Computational Scalability
#' Based on Rational Functions. Computing Research Repository, 2008.
#' \code{http://arxiv.org/abs/0808.1431}
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' ## Print overhead in processing time for demo dataset
#' overhead(usl(throughput ~ load, specsdm91))
#'
#' @aliases overhead
#' @export
#'
setMethod(
f = "overhead",
signature = "USL",
definition = function(object, newdata) {
# Calculate overhead for the initial data used to create
# the model if no data frame 'newdata' is given as parameter
if (missing(newdata)) newdata <- object@frame
# Extract regressor variable from data frame
x <- newdata[, object@regr, drop=TRUE]
y.ideal <- 1 / x
y.contention <- coef(object)[['alpha']] * (x - 1) / x
y.coherency <- coef(object)[['beta']] * (1/2) * (x - 1)
col.names <- c("ideal", "contention", "coherency")
# Return the matrix
matrix(c(y.ideal, y.contention, y.coherency),
nrow = length(x), dimnames = list(seq(x), col.names))
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/overhead.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Plot the scalability function from a USL model
#'
#' Create a line plot for the scalability functionh of a Universal
#' Scalability Law model.
#'
#' \code{plot} creates a plot of the scalability function for the model
#' represented by the argument \code{x}.
#'
#' If \code{from} is not specified then the range starts at the minimum value
#' given to define the model. An unspecified value for \code{to} will lead
#' to plot ending at the maximum value from the model. For \code{add = TRUE}
#' the defaults are taken from the limits of the previous plot.
#'
#' \code{xlab} and \code{ylab} can be used to set the axis titles. The defaults
#' are the names of the regressor and response variables used in the model.
#'
#' If the parameter \code{bounds} is set to \code{TRUE} then the plot also
#' shows dotted lines for the theoretical bounds of scalability. These are
#' the linear scalability for small loads and the Amdahl asymptote for the
#' limit of scalability as load approaches infinity.
#'
#' The parameters \code{alpha} or \code{beta} are useful to do a what-if
#' analysis. Setting these parameters override the model parameters and show
#' how the system would behave with a different contention or coherency delay
#' parameter.
#'
#' @param x The USL object to plot.
#' @param from The start of the range over which the scalability function
#' will be plotted.
#' @param to The end of the range over which the scalability function
#' will be plotted.
#' @param xlab A title for the x axis: see \code{\link{title}}.
#' @param ylab A title for the y axis: see \code{\link{title}}.
#' @param bounds Add the bounds of scalability to the plot. This always
#' includes the linear scalability bound for low loads. If the contention
#' coefficient \code{alpha} is a positive number, then the Amdahl asymptote
#' for high loads will also be plotted. If the coherency coefficient
#' \code{beta} is also a positive number, then the point of peak scalability
#' will also be indicated. All bounds are show using dotted lines. Some
#' bounds might not be shown using the default plot area. In this case the
#' parameter \code{ylim} can be used to increase the visible plot area and
#' include all bounds in the output.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param ... Other graphical parameters passed to plot
#' (see \code{\link{par}}, \code{\link{plot.function}}).
#'
#' @seealso \code{\link{usl}}, \code{\link{plot.function}}
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' ## Plot result from USL model for demo dataset
#' plot(usl(throughput ~ load, specsdm91), bounds = TRUE, ylim = c(0, 3500))
#'
#' @export
#'
setMethod(
f = "plot",
signature = "USL",
definition = function(x, from = NULL, to = NULL, xlab = NULL, ylab = NULL,
bounds = FALSE, alpha, beta, ...) {
# Take range from the model if not specified
if (missing(from)) from <- min(x@frame[, x@regr])
if (missing(to)) to <- max(x@frame[, x@regr])
# Set titles for axis
if (missing(xlab)) xlab <- x@regr
if (missing(ylab)) ylab <- x@resp
# Use explicitly specified coefficients
if (missing(alpha)) alpha <- coef(x)[['alpha']]
if (missing(beta)) beta <- coef(x)[['beta']]
# Use gamma from the model
gamma <- coef(x)[['gamma']]
# Get the function to calculate scalability for the model
.func <- scalability(x, alpha, beta)
# Plot the scalability function
plot(x = .func, from = from, to = to, xlab = xlab, ylab = ylab, ...)
# Add theoretical bounds of scalability to the plot
if (bounds) {
# Bound 1: linear scalability
abline(a = 0, b = gamma, lty = "dotted")
if (alpha > 0) {
# Bound 2: Amdahl asymptote
abline(h = abs(1/alpha) * gamma, lty = "dotted")
if (beta > 0) {
# Point of peak scalability
Nmax <- sqrt((1 - alpha) / beta)
Xmax <- gamma * Nmax / (1 + alpha * (Nmax-1) + beta * Nmax * (Nmax-1))
abline(v = Nmax, lty = "dotted")
abline(h = Xmax, lty = "dotted")
}
# Point of optimal scalability
abline(v = abs(1/alpha), lty = "dotted")
}
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/plot.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Predict method for Universal Scalability Law models
#'
#' \code{predict} is a function for predictions of the scalability of a system
#' modeled with the Universal Scalability Law. It evaluates the regression
#' function in the frame \code{newdata} (which defaults to
#' \code{model.frame(object)}). Setting \code{interval} to "\code{confidence}"
#' requests the computation of confidence intervals at the specified
#' \code{level}.
#'
#' The parameters \code{alpha} or \code{beta} are useful to do a what-if
#' analysis. Setting these parameters override the model parameters and show
#' how the system would behave with a different contention or coherency delay
#' parameter.
#'
#' \code{predict} internally uses the function returned by
#' \code{\link{scalability,USL-method}} to calculate the result.
#'
#' @param object A USL model object for which prediction is desired.
#' @param newdata An optional data frame in which to look for variables
#' with which to predict. If omitted, the fitted values are used.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param interval Type of interval calculation. Default is to calculate no
#' confidence interval.
#' @param level Confidence level. Default is 0.95.
#'
#' @return \code{predict} produces a vector of predictions or a matrix of
#' predictions and bounds with column names \code{fit}, \code{lwr}, and
#' \code{upr} if \code{interval} is set to "\code{confidence}".
#'
#' @seealso \code{\link{usl}}, \code{\link{scalability,USL-method}},
#' \code{\link{USL-class}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Print predicted result from USL model for demo dataset
#' predict(usl(throughput ~ processors, raytracer))
#'
#' ## The same prediction with confidence intervals at the 99% level
#' predict(usl(throughput ~ processors, raytracer),
#' interval = "confidence", level = 0.99)
#'
#' @export
#'
setMethod(
f = "predict",
signature = "USL",
definition = function(object, newdata, alpha, beta,
interval = c("none", "confidence"),
level = 0.95) {
# Predict for the initial data used to create the model
# if no data frame 'newdata' is given as parameter
if (missing(newdata)) newdata <- object@frame
if (missing(alpha)) alpha <- coef(object)[['alpha']]
if (missing(beta)) beta <- coef(object)[['beta']]
if (missing(interval)) interval <- "none"
# Extract regressor variable from data frame
x <- newdata[, object@regr, drop=TRUE]
# Calculate values (ignore NA)
y <- scalability(object, alpha, beta)(x)
fit <- structure(y, names=row.names(newdata))
# Return just the vector if the confidence interval is not required
if (interval != "confidence") return(fit)
# The following calculation is taken from
# http://perfdynamics.blogspot.de/2010/09/confidence-bands-for-universal.html
dof <- length(object@frame[[object@resp]]) - 1L
y.se <- sqrt(sum(object@residuals ^ 2) / dof)
y.ci <- y.se * qt(level, dof)
# Create matrix with fitted value and lower/upper confidence interval
mat <- matrix(c(fit, fit - y.ci, fit + y.ci),
nrow = length(fit),
dimnames = list(seq(fit), c("fit", "lwr", "upr")))
return(mat)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/predict.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Print objects of class "\code{USL}"
#'
#' \code{print} prints its argument and returns it invisibly (via
#' \code{\link{invisible}(x)}).
#'
#' @param x An object from class \code{USL}.
#' @param digits Minimal number of \emph{significant} digits, see
#' \link{print.default}.
#' @param ... Other arguments passed to other methods.
#'
#' @return \code{print} returns the object \code{x} invisibly.
#'
#' @seealso \code{\link{usl}}, \code{\link{USL-class}}
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Print result from USL model for demo dataset
#' print(usl(throughput ~ processors, raytracer))
#'
#' @export
#'
setMethod(
f = "print",
signature = "USL",
definition = function(x, digits = max(3L, getOption("digits") - 3L), ...) {
qnames <- c("Min", "1Q", "Median", "3Q", "Max")
cat("\nCall:\n",
paste(deparse(x@call), sep = "\n", collapse = "\n"), "\n", sep = "")
cat("\nEfficiency:\n")
zz <- zapsmall(quantile(x@efficiency), digits + 1)
print(structure(zz, names = qnames), digits = digits, ...)
cat("\nResiduals:\n")
zz <- zapsmall(quantile(x@residuals), digits + 1)
print(structure(zz, names = qnames), digits = digits, ...)
cat("\nCoefficients:\n")
tval <- x@coefficients / [email protected]
pval <- 2 * pt(abs(tval), [email protected], lower.tail = FALSE)
para <- c(x@coefficients, [email protected], tval, pval)
rows <- attributes(x@coefficients)$names
cols <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
para.mat <- matrix(para, nrow = length([email protected]), dimnames = list(rows, cols))
printCoefmat(para.mat, digits = digits, print.gap = 2)
cat("\nResidual standard error:", format(signif(x@sigma, digits)),
"on", [email protected], "degrees of freedom\n")
cat("\nScalability bounds:\n")
cat("limit: ")
cat(x@resp, signif(x@limit, digits), "(Amdahl asymptote)\n")
cat("peak: ")
if (x@coefficients[['beta']] > 0) {
cat(x@resp, signif(x@peak[2], digits), "at ")
cat(x@regr, signif(x@peak[1], digits), "\n")
}
else {
cat("none (beta=0)\n")
}
cat("opt: ")
cat(x@resp, signif(x@optimal[2], digits), "at ")
cat(x@regr, signif(x@optimal[1], digits), "\n")
cat("\n")
invisible(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/print.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Scalability function of a USL model
#'
#' \code{scalability} is a higher order function and returns a function to
#' calculate the scalability for the specific USL model.
#'
#' The returned function can be used to calculate specific values once the
#' model for a system has been created.
#'
#' The parameters \code{alpha} and \code{beta} are useful to do a what-if
#' analysis. Setting these parameters override the model parameters and show
#' how the system would behave with a different contention or coherency delay
#' parameter.
#'
#' @param object A USL object.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param gamma Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#'
#' @return A function with parameter \code{x} that calculates the
#' scalability value of the specific model.
#'
#' @seealso \code{\link{usl}},
#' \code{\link{peak.scalability,USL-method}}
#' \code{\link{optimal.scalability,USL-method}}
#' \code{\link{limit.scalability,USL-method}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Compute the scalability function
#' scf <- scalability(usl(throughput ~ processors, raytracer))
#'
#' ## Print scalability for 32 CPUs for the demo dataset
#' print(scf(32))
#'
#' ## Plot scalability for the range from 1 to 64 CPUs
#' plot(scf, from=1, to=64)
#'
#' @aliases scalability
#' @export
#'
setMethod(
f = "scalability",
signature = "USL",
definition = function(object, alpha, beta, gamma) {
if (missing(alpha)) alpha <- coef(object)[['alpha']]
if (missing(beta)) beta <- coef(object)[['beta']]
if (missing(gamma)) gamma <- coef(object)[['gamma']]
.func <- function(x) {
# Formula (4.31) on page 57 of GCaP:
cap <- x / (1 + (alpha * (x-1)) + (beta * x * (x-1)))
# Scale it to the measurements
return(gamma * cap)
}
# Return the usl function (lexically scoped)
return(.func)
}
)
##############################################################################
#' Point of optimal scalability of a USL model
#'
#' Calculate the point of optimal scalability for a specific model.
#'
#' The point of optimal scalability is defined as:
#'
#' \deqn{Nopt = \frac{1}{\alpha}}{Nopt = 1 / \alpha}
#'
#' Below this point the existing capacity is underutilized. Beyond that point
#' the effects of diminishing returns become visible more and more.
#'
#' The value can be constructed graphically by projecting the intersection of
#' the linear scalability bound and the Amdahl asymptote onto the x-axis.
#'
#' The parameters \code{alpha}, \code{beta} and \code{gamma} are useful to do a
#' what-if analysis. Setting these parameters override the model parameters and
#' show how the system would behave with a different contention or coherency
#' delay parameter.
#'
#' The point of optimal scalability is undefined if \code{alpha} is zero.
#'
#' This function accepts a arguments for \code{beta} and \code{gamma} although
#' the values are not required to perform the calculation. This is on purpose
#' to provide a coherent interface.
#'
#' @param object A USL object.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param gamma Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#'
#' @return A numeric value for the load where optimal scalability will be
#' reached.
#'
#' @seealso \code{\link{usl}},
#' \code{\link{peak.scalability,USL-method}}
#' \code{\link{limit.scalability,USL-method}}
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' optimal.scalability(usl(throughput ~ load, specsdm91))
#' ## Optimal scalability will be reached at about 36 virtual users
#'
#' @aliases optimal.scalability
#' @export
#'
setMethod(
f = "optimal.scalability",
signature = "USL",
definition = function(object, alpha, beta, gamma) {
if (missing(alpha)) alpha <- coef(object)[['alpha']]
if (missing(beta)) beta <- coef(object)[['beta']]
if (missing(gamma)) gamma <- coef(object)[['gamma']]
return(1 / alpha)
}
)
##############################################################################
#' Scalability limit of a USL model
#'
#' Calculate the scalability limit for a specific model.
#'
#' The scalability limit is defined as:
#'
#'\deqn{Xroof = \frac{\gamma}{\alpha}}{Xroof = \gamma / \alpha}
#'
#' This is the upper bound (Amdahl asymptote) of system capacity.
#'
#' The parameters \code{alpha}, \code{beta} and \code{gamma} are useful to do a
#' what-if analysis. Setting these parameters override the model parameters and
#' show how the system would behave with a different contention or coherency
#' delay parameter.
#'
#' The scalability limit is undefined if \code{alpha} is zero.
#'
#' This function accepts an argument for \code{beta} although the value is not
#' required to perform the calculation. This is on purpose to provide a
#' coherent interface.
#'
#' @param object A USL object.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param gamma Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#'
#' @return A numeric value for the system capacity limit (e.g. throughput).
#'
#' @seealso \code{\link{usl}},
#' \code{\link{peak.scalability,USL-method}}
#' \code{\link{optimal.scalability,USL-method}}
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' limit.scalability(usl(throughput ~ load, specsdm91))
#' ## The throughput limit is about 3245
#'
#' @aliases limit.scalability
#' @export
#'
setMethod(
f = "limit.scalability",
signature = "USL",
definition = function(object, alpha, beta, gamma) {
if (missing(alpha)) alpha <- coef(object)[['alpha']]
if (missing(beta)) beta <- coef(object)[['beta']]
if (missing(gamma)) gamma <- coef(object)[['gamma']]
return(gamma / alpha)
}
)
##############################################################################
#' Point of peak scalability of a USL model
#'
#' Calculate the point of peak scalability for a specific model.
#'
#' The peak scalability is the point where the throughput of the system starts
#' to go retrograde, i.e., starts to decrease with increasing load.
#'
#' The parameters \code{alpha}, \code{beta} and \code{gamma} are useful to do a
#' what-if analysis. Setting these parameters override the model parameters and
#' show how the system would behave with a different contention or coherency
#' delay parameter.
#'
#' See formula (4.33) in \emph{Guerilla Capacity Planning}.
#'
#' This function accepts an argument for \code{gamma} although the value is
#' not required to perform the calculation. This is on purpose to provide a
#' coherent interface.
#'
#' @param object A USL object.
#' @param alpha Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param beta Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#' @param gamma Optional parameter to be used for evaluation instead of the
#' parameter computed for the model.
#'
#' @return A numeric value for the point where peak scalability will be
#' reached.
#'
#' @seealso \code{\link{usl}},
#' \code{\link{optimal.scalability,USL-method}}
#' \code{\link{limit.scalability,USL-method}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @examples
#' require(usl)
#'
#' data(specsdm91)
#'
#' peak.scalability(usl(throughput ~ load, specsdm91))
#' ## Peak scalability will be reached at about 96 virtual users
#'
#' @aliases peak.scalability
#' @export
#'
setMethod(
f = "peak.scalability",
signature = "USL",
definition = function(object, alpha, beta, gamma) {
if (missing(alpha)) alpha <- coef(object)[['alpha']]
if (missing(beta)) beta <- coef(object)[['beta']]
if (missing(gamma)) gamma <- coef(object)[['gamma']]
return(sqrt((1 - alpha) / beta))
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/scalability.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Show objects of class "\code{USL}"
#'
#' Display the object by printing it.
#'
#' @param object The object to be printed.
#'
#' @return \code{show} returns an invisible \code{NULL}.
#'
#' @seealso \code{\link{usl}}, \code{\link{USL-class}}
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Show USL model
#' show(usl(throughput ~ processors, raytracer))
#'
#' @export
setMethod(
f = "show",
signature = "USL",
definition = function(object) {
print(object)
invisible(NULL)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/show.R
|
# Copyright (c) 2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Extract Residual Standard Deviation 'Sigma'
#'
#' \code{sigma} Extract Residual Standard Deviation 'Sigma'
#'
#' @param object An object from class \code{USL}.
#' @param ... Other arguments passed to other methods.
#'
#' @return A single number.
#'
#' @seealso \code{\link{usl}}, \code{\link{USL-class}}
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Print result from USL model for demo dataset
#' print(sigma(usl(throughput ~ processors, raytracer)))
#'
#' @export
#'
setMethod(
f = "sigma",
signature = "USL",
definition = function(object, ...) {
return(object@sigma)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/sigma.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' USL Object Summary
#'
#' \code{summary} method for class "\code{USL}".
#'
#' @param object A USL object.
#' @param ... Other arguments passed to other methods.
#'
#' @seealso \code{\link{usl}}, \code{\link{USL-class}}
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Show summary for demo dataset
#' summary(usl(throughput ~ processors, raytracer))
#'
#' ## Extract model coefficients
#' summary(usl(throughput ~ processors, raytracer))$coefficients
#'
#' @export
#'
setMethod(
f = "summary",
signature = "USL",
definition = function(object, ...) {
return(object)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usl/R/summary.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Analyze system scalability with the Universal Scalability Law
#'
#' The Universal Scalability Law is a model to predict hardware and software
#' scalability. It uses system capacity as a function of load to forecast the
#' scalability for the system.
#'
#' Use the function \code{\link{usl}} to create a model from a formula and
#' a data frame.
#'
#' The USL model produces two coefficients as result: \code{alpha} models the
#' contention and \code{beta} the coherency delay of the system.
#'
#' The Universal Scalability Law has been created by Dr. Neil J. Gunther.
#'
#' @seealso \code{\link{usl}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @name usl-package
#' @docType package
#' @import methods
#' @import graphics
#' @import stats
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/usl/R/usl-package.R
|
# Copyright (c) 2013-2020 Stefan Moeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
##############################################################################
#' Solve a USL model using non linear regression
#'
#' This function solves a USL model using non linear regression with least
#' squares. It uses the function \code{\link{nls}} with the "\code{port}"
#' algorithm to perform the calculation. All restrictions of the algorithm
#' apply.
#'
#' @param model A data frame with two columns containing the values of the
#' predictor variable in the first column and the values of the response
#' variable in the second column.
#'
#' @return A list containing three elements: the model coefficients alpha,
#' beta and gamma.
#'
#' @seealso \code{\link{usl}}
#' @keywords internal
#'
usl.solve.nls <- function(model) {
names(model) <- c("x", "y")
gamma.start <- max(model$y / model$x)
model.fit <- nls(y ~ (gamma * x)/(1 + alpha * (x-1) + beta * x * (x-1)),
data = model,
start = c(gamma = gamma.start, alpha = 0.01, beta = 0.0001),
algorithm = "port",
lower = c(gamma = 0, alpha = 0, beta = 0),
upper = c(gamma = Inf, alpha = 1, beta = 1))
alpha = coef(model.fit)[['alpha']]
beta = coef(model.fit)[['beta']]
gamma = coef(model.fit)[['gamma']]
return(list(alpha = alpha, beta = beta, gamma = gamma))
}
##############################################################################
#' Solve a USL model using non linear regression
#'
#' This function solves a USL model using non linear regression with least
#' squares. It uses the function \code{\link{nlxb}} from the \pkg{nlsr}
#' package to perform the calculation.
#'
#' @param model A data frame with two columns containing the values of the
#' predictor variable in the first column and the values of the response
#' variable in the second column.
#'
#' @return A list containing three elements: the model coefficients alpha,
#' beta and gamma.
#'
#' @seealso \code{\link{usl}}
#'
#' @references John C. Nash. nlsr: Functions for nonlinear least squares
#' solutions, 2017. R package version 2017.6.18.
#'
#' @importFrom nlsr nlxb
#' @importFrom utils capture.output
#' @keywords internal
#'
usl.solve.nlxb <- function(model) {
names(model) <- c("x", "y")
gamma.start <- max(model$y / model$x)
log <- capture.output({
model.fit <- nlxb(y ~ (gamma * x)/(1 + alpha * (x-1) + beta * x * (x-1)),
data = model,
start = c(gamma = gamma.start, alpha = 0.01, beta = 0.0001),
lower = c(gamma = 0, alpha = 0, beta = 0),
upper = c(gamma = Inf, alpha = 1, beta = 1))
})
alpha = model.fit$coefficients[['alpha']]
beta = model.fit$coefficients[['beta']]
gamma = model.fit$coefficients[['gamma']]
return(list(alpha = alpha, beta = beta, gamma = gamma))
}
##############################################################################
#' Create a model for the Universal Scalability Law
#'
#' \code{usl} is used to create a model for the Universal Scalability Law.
#'
#' The Universal Scalability Law is used to forcast the scalability of
#' either a hardware or a software system.
#'
#' The USL model works with one independent variable (e.g. virtual users,
#' processes, threads, ...) and one dependent variable (e.g. throughput, ...).
#' Therefore the model formula must be in the simple
#' "\code{response ~ predictor}" format.
#'
#' The model produces two main coefficients as result: \code{alpha} models the
#' contention and \code{beta} the coherency delay of the system. The third
#' coefficient \code{gamma} estimates the value of the dependent variable
#' (e.g. throughput) for the single user/process/thread case. It therefore
#' corresponds to the scale factor calculated in previous versions of the
#' \code{usl} package.
#'
#' The function \code{\link{coef}} extracts the coefficients from the model
#' object.
#'
#' The argument \code{method} selects which solver is used to solve the
#' model:
#'
#' \itemize{
#' \item "\code{nls}" for a nonlinear regression model. This method
#' estimates all coefficients \code{alpha}, \code{beta} and \code{gamma}.
#' The R base function \code{\link{nls}} with the "\code{port}" algorithm
#' is used internally to solve the model. So all restrictions of the
#' "\code{port}" algorithm apply.
#' \item "\code{nlxb}" for a nonliner regression model using the function
#' \code{\link{nlxb}} from the \code{\link{nlsr}} package. This method
#' also estimates all three coefficients. It is expected to be more robust
#' than the \code{nls} method.
#' \item "\code{default}" for the default method using a transformation
#' into a 2nd degree polynom has been removed with the implementation
#' of the model using three coefficients in the \pkg{usl} package 2.0.0.
#' Calling the "\code{default}" method will internally dispatch to the
#' "\code{nlxb}" solver instead.
#' }
#'
#' The Universal Scalability Law can be expressed with following formula.
#' \code{C(N)} predicts the relative capacity of the system for a given
#' load \code{N}:
#'
#' \deqn{C(N) = \frac{\gamma N}{1 + \alpha (N - 1) + \beta N (N - 1)}}{C(N) = (\gamma N) / (1 + \alpha * (N - 1) + \beta * N * (N - 1))}
#'
#' @param formula An object of class "\code{\link{formula}}" (or one that
#' can be coerced to that class): a symbolic description of the model to be
#' analyzed. The details of model specification are given under 'Details'.
#' @param data A data frame, list or environment (or object coercible by
#' as.data.frame to a data frame) containing the variables in the model.
#' If not found in data, the variables are taken from
#' \code{environment(formula)}, typically the environment from which
#' \code{usl} is called.
#' @param method Character value specifying the method to use. The possible
#' values are described under 'Details'.
#'
#' @return An object of class USL.
#'
#' @seealso \code{\link{efficiency,USL-method}},
#' \code{\link{scalability,USL-method}},
#' \code{\link{peak.scalability,USL-method}},
#' \code{\link{optimal.scalability,USL-method}},
#' \code{\link{limit.scalability,USL-method}},
#' \code{\link{summary,USL-method}},
#' \code{\link{sigma,USL-method}}
#' \code{\link{predict,USL-method}},
#' \code{\link{overhead,USL-method}},
#' \code{\link{confint,USL-method}},
#' \code{\link{coef}},
#' \code{\link{fitted}},
#' \code{\link{residuals}},
#' \code{\link{df.residual}}
#'
#' @references Neil J. Gunther. Guerrilla Capacity Planning: A Tactical
#' Approach to Planning for Highly Scalable Applications and Services.
#' Springer, Heidelberg, Germany, 1st edition, 2007.
#'
#' @references John C. Nash. nlsr: Functions for nonlinear least squares
#' solutions, 2017. R package version 2017.6.18.
#'
#' @examples
#' require(usl)
#'
#' data(raytracer)
#'
#' ## Create USL model for "throughput" by "processors"
#' usl.model <- usl(throughput ~ processors, raytracer)
#'
#' ## Show summary of model parameters
#' summary(usl.model)
#'
#' ## Show complete list of efficiency parameters
#' efficiency(usl.model)
#'
#' ## Extract coefficients for model
#' coef(usl.model)
#'
#' ## Calculate point of peak scalability
#' peak.scalability(usl.model)
#'
#' ## Plot original data and scalability function
#' plot(raytracer)
#' plot(usl.model, add=TRUE)
#'
#' @export
#'
usl <- function(formula, data, method = "default") {
## canonicalize the arguments
formula <- as.formula(formula)
if (length(formula) < 3L) {
stop("'formula' must be a 3-part formula")
}
if(!is.data.frame(data) && !is.environment(data)) {
stop("'data' must be a data frame or an environment")
}
# Check parameter and variable names from formula
var.names <- all.vars(formula)
if (length(var.names) != 2L) {
stop("'formula' must contain exactly 2 variables")
}
# Create model frame
call <- match.call()
frame <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data"), names(frame), 0)
frame <- frame[c(1, m)]
frame$na.action <- "na.omit"
frame$drop.unused.levels <- TRUE
frame[[1]] <- as.name("model.frame")
frame <- eval(frame, parent.frame())
# Verify there are enough values to do the calculation
if (nrow(frame) < 6) {
warning("'data' has only a few values; the result might not be accurate")
}
# Extract terms from the formula and get the names of the
# predictor and response variables given by the user
mt <- attr(frame, "terms")
regr <- var.names[-attr(mt, "response")] # predictor
resp <- var.names[attr(mt, "response")] # response
model.input <- data.frame(frame[regr], frame[resp])
# Choose solver function
sel <- switch(method, nls=2, 1)
usl.solve <- switch(sel, usl.solve.nlxb, usl.solve.nls)
# Solve the model for the model frame
model.result <- usl.solve(model.input)
# Create object for class USL
.Object <- new(Class = "USL", call, frame, regr, resp,
model.result[['alpha']],
model.result[['beta']],
model.result[['gamma']])
# Finish building the USL object
nam <- row.names(frame)
y.obs <- frame[, resp, drop = TRUE]
y.fit <- predict(.Object)
y.res <- y.obs - y.fit
.Object@fitted <- structure(y.fit, names = nam)
.Object@residuals <- structure(y.res, names = nam)
# Calculate the point where the curve has its peak
Nmax <- sqrt((1 - model.result[['alpha']]) / model.result[['beta']])
Xmax <- model.result[['gamma']] * Nmax / (1 + model.result[['alpha']] * (Nmax - 1) + model.result[['beta']] * Nmax * (Nmax - 1))
.Object@peak <- structure(c(Nmax, Xmax), names = c(regr, resp))
# Calculate the optimal load
Nopt <- abs(1 / model.result[['alpha']])
Xopt <- model.result[['gamma']] * Nopt / (1 + model.result[['alpha']] * (Nopt - 1) + model.result[['beta']] * Nopt * (Nopt - 1))
.Object@optimal <- structure(c(Nopt, Xopt), names = c(regr, resp))
# Calculate the scalability limit (Amdahl asymptote)
Xlim <- model.result[['gamma']] * Nopt
.Object@limit <- structure(c(Xlim), names = c(resp))
# The following estimation of the standard errors is based on the
# source code of the nls() function in R base.
# See also: Nonlinear Regression and Nonlinear Least Squares,
# Appendix to An R and S-PLUS Companion to Applied Regression, John
# Fox, January 2002
# residual variance
df <- df.residual(.Object)
rv <- ifelse(df <= 0, NaN, sum(y.res ^ 2) / df)
# residual standard deviation
.Object@sigma <- sqrt(rv)
# gradient matrix
grad <- gradient.usl(.Object)
XtXinv <- solve(t(grad) %*% grad)
# standard error of coefficients
[email protected] <- sqrt(diag(XtXinv) * rv)
return(.Object)
}
|
/scratch/gouwar.j/cran-all/cranData/usl/R/usl.R
|
## ----echo=FALSE-------------------------------------------------------------
library(knitr)
opts_knit$set(progress=FALSE, verbose=FALSE)
knit_hooks$set(small.mar=function(before, options, envir) {
if (before && options$fig.show != 'none')
par(mar=c(5.1, 4.1, 1.1, 1.1), family='Helvetica', ps=11)
})
opts_chunk$set(prompt=TRUE, comment=NA, tidy=FALSE)
opts_chunk$set(out.width='\\textwidth', small.mar=TRUE)
opts_chunk$set(fig.width=7, fig.height=3.6)
opts_chunk$set(fig.align='center', fig.pos='htbp', fig.path='usl-')
options(prompt='R> ', scipen=4, digits=4, width=78)
options(digits.secs=3, show.signif.stars=TRUE)
options(str=strOptions(strict.width='cut'))
## ---------------------------------------------------------------------------
library(usl)
data(raytracer)
raytracer
## ----'rtplot1', fig.show='hide'---------------------------------------------
plot(throughput ~ processors, data = raytracer)
## ----'rtplot2', echo=FALSE, fig.cap='Measured throughput of a ray tracing software in relation to the number of available processors'----
plot(throughput ~ processors, data = raytracer)
## ---------------------------------------------------------------------------
usl.model <- usl(throughput ~ processors, data = raytracer)
## ---------------------------------------------------------------------------
summary(usl.model)
## ---------------------------------------------------------------------------
efficiency(usl.model)
## ----'rtbarplot', fig.cap='Rate of efficiency per processor for different numbers of processors running the ray tracing software'----
barplot(efficiency(usl.model), ylab = "efficiency / processor", xlab = "processors")
## ---------------------------------------------------------------------------
coef(usl.model)
## ----'rtplot3', fig.cap='Throughput of a ray tracing software using different numbers of processors'----
plot(throughput ~ processors, data = raytracer, pch = 16, ylim = c(0, 400))
plot(usl.model, add = TRUE, bounds = TRUE)
## ----'bounds', echo=FALSE---------------------------------------------------
Xroof <- usl.model$limit
Nopt <- usl.model$optimal[1]
Xopt <- usl.model$optimal[2]
## ---------------------------------------------------------------------------
confint(usl.model, level = 0.95)
## ---------------------------------------------------------------------------
predict(usl.model, data.frame(processors = c(96, 128)))
## ---------------------------------------------------------------------------
library(usl)
data(specsdm91)
specsdm91
## ---------------------------------------------------------------------------
usl.model <- usl(throughput ~ load, specsdm91, method = "nls")
## ---------------------------------------------------------------------------
summary(usl.model)
## ---------------------------------------------------------------------------
peak.scalability(usl.model)
peak.scalability(usl.model, beta = 0.00005)
## ----'spplot1', fig.show='hide'---------------------------------------------
plot(specsdm91, pch = 16, ylim = c(0,2500))
plot(usl.model, add = TRUE)
# Create function cache.scale to perform calculations with the model
cache.scale <- scalability(usl.model, beta = 0.00005)
curve(cache.scale, lty = 2, add = TRUE)
## ----'spplot2', echo=FALSE, fig.cap='The result of the SPEC SDM91 benchmark for a SPARCcenter 2000 (dots) together with the calculated scalability function (solid line) and a hypothetical scalability function (dashed line)'----
plot(specsdm91, pch = 16, ylim = c(0,2500))
plot(usl.model, add = TRUE)
# Create function cache.scale to perform calculations with the model
cache.scale <- scalability(usl.model, beta = 0.00005)
curve(cache.scale, lty = 2, add = TRUE)
## ---------------------------------------------------------------------------
scalability(usl.model)(peak.scalability(usl.model))
# Use cache.scale function defined before
cache.scale(peak.scalability(usl.model, beta = 0.00005))
## ---------------------------------------------------------------------------
load <- with(specsdm91, expand.grid(load = seq(min(load), max(load))))
## ---------------------------------------------------------------------------
fit <- predict(usl.model, newdata = load, interval = "confidence", level = 0.95)
## ---------------------------------------------------------------------------
usl.polygon <- matrix(c(load[, 1], rev(load[, 1]), fit[, 'lwr'], rev(fit[, 'upr'])),
nrow = 2 * nrow(load))
## ----'ciplot1', fig.cap='The result of the SPEC SDM91 benchmark with confidence bands for the scalability function at the 95\\% level'----
# Create empty plot (define canvas size, axis, ...)
plot(specsdm91, xlab = names(specsdm91)[1], ylab = names(specsdm91)[2],
ylim = c(0, 2000), type = "n")
# Plot gray polygon indicating the confidence interval
polygon(usl.polygon, border = NA, col = "gray")
# Plot the measured throughput
points(specsdm91, pch = 16)
# Plot the fit
lines(load[, 1], fit[, 'fit'])
## ---------------------------------------------------------------------------
load <- data.frame(load = c(10, 20, 100, 200))
ovhd <- overhead(usl.model, newdata = load)
ovhd
## ----'ovplot1', fig.cap='Decomposition of the execution time for parallelized workloads of the SPECSDM91 benchmark. The time is measured as a fraction of the time needed for serial execution of the workload.'----
barplot(height = t(ovhd), names.arg = load[, 1],
xlab = names(load), legend.text = TRUE)
## ---------------------------------------------------------------------------
data(oracledb)
head(subset(oracledb, select = c(timestamp, db_time, txn_rate)))
## ----'oraplot1', echo=FALSE, fig.cap='Transaction rates of an Oracle database system during the day of January 19th, 2012'----
plot(txn_rate ~ timestamp, oracledb, pch = 20, xlab = "Time of day", ylab = "Txn / sec")
## ----'orausl1', fig.cap='Relationship between the transaction rate and the number of average active sessions in an Oracle database system'----
plot(txn_rate ~ db_time, oracledb,
xlab = "Average active sessions", ylab = "Txn / sec")
usl.oracle <- usl(txn_rate ~ db_time, oracledb)
plot(usl.oracle, add = TRUE)
## ---------------------------------------------------------------------------
coef(usl.oracle)
## ---------------------------------------------------------------------------
peak.scalability(usl.oracle)
## ---------------------------------------------------------------------------
confint(usl.oracle)
|
/scratch/gouwar.j/cran-all/cranData/usl/inst/doc/usl.R
|
#' Population estimates (2022), county level
#'
#' @description US census population estimates by county for 2022. \cr\cr
#' The data is formatted for easy merging with output from [usmap::us_map()].
#'
#' @usage data(countypop)
#'
#' @details
#' \itemize{
#' \item \code{fips} The 5-digit FIPS code corresponding to the county.
#' \item \code{abbr} The 2-letter state abbreviation.
#' \item \code{county} The full county name.
#' \item \code{pop_2022} The 2022 population estimate (in number of people)
#' for the corresponding county.
#' }
#'
#' @name countypop
#' @format A data frame with 3222 rows and 4 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.census.gov/programs-surveys/popest.html}
#' \item \url{https://www.ers.usda.gov/data-products/county-level-data-sets/}
#' }
#' @keywords data
"countypop"
#' Population estimates (2022), state level
#'
#' @description US census population estimates by state for 2022. \cr\cr
#' The data is formatted for easy merging with output from [usmap::us_map()].
#'
#' @usage data(statepop)
#'
#' @details
#' \itemize{
#' \item \code{fips} The 2-digit FIPS code corresponding to the state.
#' \item \code{abbr} The 2-letter state abbreviation.
#' \item \code{full} The full state name.
#' \item \code{pop_2022} The 2022 population estimate (in number of people)
#' for the corresponding state.
#' }
#'
#' @name statepop
#' @format A data frame with 52 rows and 4 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.census.gov/programs-surveys/popest.html}
#' \item \url{https://www.ers.usda.gov/data-products/county-level-data-sets/}
#' }
#' @keywords data
"statepop"
#' Poverty percentage estimates (2021), county level
#'
#' @description US census poverty percentage estimates by county for 2021. \cr\cr
#' The data is formatted for easy merging with output from [usmap::us_map()].
#'
#' @usage data(countypov)
#'
#' @details
#' \itemize{
#' \item \code{fips} The 5-digit FIPS code corresponding to the county.
#' \item \code{abbr} The 2-letter state abbreviation.
#' \item \code{county} The full county name.
#' \item \code{pct_pov_2021} The 2021 poverty estimate (in percent of county population)
#' for the corresponding county.
#' }
#'
#' @name countypov
#' @format A data frame with 3194 rows and 4 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.census.gov/topics/income-poverty/poverty.html}
#' \item \url{https://www.ers.usda.gov/data-products/county-level-data-sets/}
#' }
#' @keywords data
"countypov"
#' Poverty percentage estimates (2021), state level
#'
#' @description US census poverty percentage estimates by state for 2021. \cr\cr
#' The data is formatted for easy merging with output from [usmap::us_map()].
#'
#' @usage data(statepov)
#'
#' @details
#' \itemize{
#' \item \code{fips} The 2-digit FIPS code corresponding to the state.
#' \item \code{abbr} The 2-letter state abbreviation.
#' \item \code{full} The full state name.
#' \item \code{pct_pov_2021} The 2021 poverty estimate (in percent of state population)
#' for the corresponding state
#' }
#'
#' @name statepov
#' @format A data frame with 51 rows and 4 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.census.gov/topics/income-poverty/poverty.html}
#' \item \url{https://www.ers.usda.gov/data-products/county-level-data-sets/}
#' }
#' @keywords data
"statepov"
#' Most populous city in each state (2010)
#'
#' @description The most populous city in each US state, as of the 2010 US Census.\cr\cr
#' The data is formatted for transforming with [usmap::usmap_transform()].
#' Once the longitude and latitude is transformed, it can be added to
#' [usmap::plot_usmap()] using [ggplot2::ggplot()] layers.
#'
#' @usage data(citypop)
#'
#' @details
#' \itemize{
#' \item \code{lon} The longitude of the most populous city.
#' \item \code{lat} The latitude of the most populous city.
#' \item \code{state} The name of the state containing the city.
#' \item \code{abbr} The abbreviation of the state containing the city.
#' \item \code{most_populous_city} The name of the city.
#' \item \code{city_pop} The population of the city.
#' }
#'
#' @name citypop
#' @format A data frame with 51 rows and 5 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.census.gov/programs-surveys/decennial-census/decade.2010.html}
#' }
#' @keywords data
"citypop"
#' Earthquakes (2019)
#'
#' @description US earthquakes with a magnitude of 2.5 or greater, occurring in the
#' first half of 2019, from January 1 to June 30, from USGS.\cr\cr
#' The data is formatted for transforming with [usmap::usmap_transform()].
#' Once the longitude and latitude is transformed, it can be added to
#' [usmap::plot_usmap()] using [ggplot2::ggplot()] layers.
#'
#' @usage data(earthquakes)
#'
#' @details
#' \itemize{
#' \item \code{lon} The longitude of the earthquake's location.
#' \item \code{lat} The latitude of the earthquake's location.
#' \item \code{mag} The magnitude of the earthquake.
#' }
#'
#' @name earthquakes
#' @format A data frame with 2254 rows and 3 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.usgs.gov/programs/earthquake-hazards/earthquakes}
#' \item \url{https://earthquake.usgs.gov/earthquakes/search/}
#' }
#' @keywords data
"earthquakes"
#' US Major Rivers (2010)
#'
#' @description Major rivers in the United States.\cr\cr
#' The data is can be transformed with [usmap::usmap_transform()].
#' Once the `Shape` strings are transformed, it can be added to
#' [plot_usmap()] using a [ggplot2::geom_sf()] layer.
#'
#' @usage data(usrivers)
#'
#' @details
#' \itemize{
#' \item \code{NAME} The name of the river.
#' \item \code{SYSTEM} The system the river belongs to.
#' \item \code{MILES} The length of the river in miles.
#' \item \code{Shape_Length} The length of the river in the coordinate system.
#' \item \code{Shape} The MULTILINESTRING features depicting the river, for plotting.
#' }
#'
#' @name usrivers
#' @format A simple features (sf) data frame with 55 rows and 5 variables.
#' @docType data
#' @references
#' \itemize{
#' \item \url{https://www.arcgis.com/home/item.html?id=290e4ab8a07f4d2c8392848d011add32#!}
#' \item Sources: Esri; Rand McNally; Bartholemew and Times Books;
#' Digital Chart of the World (DCW), U.S. National Geospatial-Intelligence Agency (NGA); i-cubed
#' }
#' @keywords data
"usrivers"
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/data.R
|
#' Retrieve FIPS code for either a US state or county
#'
#' @description Each US state and county has a unique FIPS
#' (Federal Information Processing Standards) code. Use
#' this function to obtain the FIPS code for a state or
#' county.
#'
#' @param state The state(s) for which to obtain a FIPS code(s).
#' Can be entered as either a state abbreviation or full name (case-insensitive).
#'
#' `state` can be entered as either a single state or a vector of states.
#' If `state` is a vector, `county` must be omitted.
#'
#' @param county The county for which to obtain a FIPS code.
#' Can be entered with or without "county" (case-insensitive).
#'
#' @note A \code{state} must be included when searching for \code{county},
#' otherwise multiple results may be returned for duplicate county names.
#'
#' @details State and county FIPS (Federal Information Processing Standards) are
#' two and five digit codes, respectively. They uniquely identify all states and
#' counties within the United States. The first two digits of the five digit county
#' codes correspond to the state that the county belongs to. FIPS codes also exist
#' for US territories and minor outlying islands, though this package only provides
#' information for the 50 US states (and their associated counties and
#' census designated areas).
#'
#' @return The FIPS code(s) of given \code{state} or \code{county}.
#'
#' If only states are entered, a vector of length equal to the number of states
#' is returned. If any states are not found or are invalid, `NA` is returned in their place.
#'
#' If a state and county are entered, a single value with the FIPS code
#' for the given county is returned. If the county is invalid for the given state,
#' an error is thrown.
#'
#' If both `state` and `county` are omitted, the entire list of available FIPS
#' codes is returned, sorted by the state's abbreviation (e.g. Alaska (AK) comes
#' before Alabama (AL)).
#'
#' @seealso [fips_info()]
#'
#' @examples
#' fips()
#'
#' fips("NJ")
#' fips("California")
#'
#' fips(c("AK", "CA", "UT"))
#'
#' fips("CA", county = "orange")
#' fips(state = "AL", county = "autauga")
#' fips(state = "Alabama", county = "Autauga County")
#' @export
fips <- function(state, county = c()) {
if (missing(state) && missing(county)) {
return(usmapdata::fips_data()$fips)
}
state_ <- tolower(state)
county_ <- tolower(county)
if (length(county_) == 0) {
df <- usmapdata::fips_data()
abbr <- tolower(df$abbr)
full <- tolower(df$full)
fips2 <- c(df$fips, df$fips)
result <- fips2[match(state_, c(abbr, full))]
result[result == "NA"] <- NA
result
} else {
if (length(state_) > 1) {
stop("`county` parameter cannot be used with multiple states.")
}
df <- usmapdata::fips_data("counties")
name <- tolower(df$county)
state_abbr <- tolower(df$abbr)
state_full <- tolower(df$full)
result <- c()
for (county_i in county_) {
result <- c(
result,
df$fips[which(
(name %in% county_i | name %in% paste(county_i, "county")) &
(state_abbr %in% state_ | state_full %in% state_)
)]
)
}
if (length(result) == 0) {
if (length(county) == 1) {
stop(paste0(county, " is not a valid county in ", state, ".\n"))
} else {
stop(paste0(county, " are not valid counties in ", state, ".\n"))
}
} else {
result
}
}
}
#' Retrieve states or counties using FIPS codes
#'
#' @param fips A one to five digit, either \code{numeric}
#' or \code{character}, vector of FIPS codes for which to look up states or counties.
#' States have a two digit FIPS code and counties have a five digit FIPS
#' code (where the first 2 numbers pertain to the state).
#'
#' @param sortAndRemoveDuplicates Whether or not to sort the output and remove
#' duplicates. By default, the output will be returned in the order of
#' the values provided to the \code{fips} parameter. Set this parameter to \code{TRUE}
#' to return the output sorted by FIPS with a single instance of each FIPS.
#'
#' @return A data frame with the states or counties and the associated
#' FIPS codes.
#'
#' If `fips` is omitted, the data frame containing all available states is
#' returned.
#'
#' @seealso [fips()]
#'
#' @examples
#' fips_info(2)
#' fips_info("2")
#' fips_info(c("02", "03", "04"))
#'
#' fips_info(2016)
#' fips_info(c("02016", "02017"), sortAndRemoveDuplicates = TRUE)
#'
#' @rdname fips_info
#' @export
fips_info <- function(fips, sortAndRemoveDuplicates = FALSE) {
if (missing(fips)) {
fips_info.character(usmap::fips())
} else {
UseMethod("fips_info", fips)
}
}
#' @rdname fips_info
#' @export
fips_info.numeric <- function(fips, sortAndRemoveDuplicates = FALSE) {
if (all(fips >= 1001 & fips <= 56043)) {
fips_ <- sprintf("%05d", fips)
} else if (all(fips >= 1 & fips <= 56)) {
fips_ <- sprintf("%02d", fips)
} else {
stop("Invalid FIPS code(s), must be either 2 digit (states) or 5 digit (counties), but not both.")
}
get_fips_info(fips_, sortAndRemoveDuplicates)
}
#' @rdname fips_info
#' @export
fips_info.character <- function(fips, sortAndRemoveDuplicates = FALSE) {
if (all(nchar(fips) %in% 4:5)) {
fips_ <- sprintf("%05s", fips)
} else if (all(nchar(fips) %in% 1:2)) {
fips_ <- sprintf("%02s", fips)
} else {
stop("Invalid FIPS code, must be either 2 digit (states) or 5 digit (counties), but not both.")
}
get_fips_info(fips_, sortAndRemoveDuplicates)
}
#' Gets FIPS info for either states or counties depending on input.
#' Helper function for S3 method [fips_info()].
#' @keywords internal
get_fips_info <- function(fips, sortAndRemoveDuplicates) {
if (all(nchar(fips) == 2)) {
df <- usmapdata::fips_data()
columns <- c("abbr", "fips", "full")
} else if (all(nchar(fips) == 5)) {
df <- usmapdata::fips_data("counties")
columns <- c("full", "abbr", "county", "fips")
}
if (sortAndRemoveDuplicates) {
result <- df[df$fips %in% fips, ]
} else {
result <- static_merge(data.frame(fips = fips), df)
}
if (nrow(result) == 0) {
# Present warning if no results found.
warning(paste("FIPS code(s)", toString(fips), "not found, returned 0 results."))
} else if (!all(fips %in% result$fips)) {
# Present warning if any FIPS codes included are not found.
excluded_fips <- fips[which(!fips %in% result$fips)]
warning(paste("FIPS code(s)", toString(excluded_fips), "not found"))
}
rownames(result) <- NULL
result[, columns]
}
#' Merge while maintaining original sort order
#'
#' Internal function used by [fips_info()].
#'
#' @seealso \url{https://stackoverflow.com/a/61560405/7264964}
#' @keywords internal
static_merge <- function(x, y, ...) {
x$join_id_ <- seq_len(nrow(x))
joined <- merge(x = x, y = y, sort = FALSE, ...)
joined[order(joined$join_id), colnames(joined) != "join_id_"]
}
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/fips.R
|
#' Join county or state level data to US map data
#'
#' @inheritParams us_map
#' @param data The data that should be joined to a US map. This
#' parameter should be a data frame consisting of two columns,
#' a fips code (2 characters for state, 5 characters for county)
#' and the value that should be associated with that region. The
#' columns of \code{data} \emph{must} be \code{fips} or \code{state} and
#' the value of the `values` parameter. If both \code{fips} and \code{state}
#' are provided, this function uses the \code{fips}.
#' @param values The name of the column that contains the values to be associated
#' with a given region. The default is \code{"values"}.
#' @param na The value to be inserted for states or counties that don't have
#' a value in \code{data}. This value must be of the same type as the \code{value}
#' column of \code{data}.
#'
#' @return A data frame composed of the map data frame (from [us_map()]) except
#' an extra column containing the values in \code{data} is included.
#'
#' The result can be plotted using [ggplot2::ggplot()] or [plot_usmap()].
#'
#' @seealso [plot_usmap()]
#'
#' @examples
#' state_data <- data.frame(fips = c("01", "02", "04"), values = c(1, 5, 8))
#' df <- map_with_data(state_data, na = 0)
#'
#' state_data <- data.frame(state = c("AK", "CA", "Utah"), values = c(6, 9, 3))
#' df <- map_with_data(state_data, na = 0)
#'
#' @export
map_with_data <- function(data,
values = "values",
include = c(),
exclude = c(),
na = NA) {
if (!is.data.frame(data)) {
stop("`data` must be a data frame")
}
if (nrow(data) == 0) {
if (length(include) == 0) {
region_type <- "state"
} else {
region_type <- ifelse(nchar(include[1]) == 2, "state", "county")
}
warning(paste("`data` is empty, returning basic", region_type, "US map data frame"))
return(usmap::us_map(regions = region_type, include = include, exclude = exclude))
}
if (!(values %in% names(data))) {
stop(paste0("\"", values, "\" column not found in `data`."))
}
if ("fips" %in% names(data)) {
# do nothing
} else if ("state" %in% names(data)) {
# convert to fips
data$fips <- usmap::fips(data$state)
} else {
# error
stop("`data` must be a data.frame containing either a `state` or `fips` column.")
}
data$fips <- as.character(data$fips)
region_type <- ifelse(nchar(data$fips[1]) <= 2, "state", "county")
map_df <- usmap::us_map(regions = region_type, include = include, exclude = exclude)
# Remove columns in data that are already in map_df
data$abbr <- NULL
data$full <- NULL
data$county <- NULL
data$geom <- NULL
#
padding <- ifelse(region_type == "state", 2, 5)
data$fips <- sprintf(paste0("%0", padding, "d"), as.numeric(data$fips))
result <- merge(map_df, data, by = "fips", all.x = TRUE, sort = FALSE)
result[is.na(result[, values]), values] <- na
result <- result[, c(setdiff(names(result), names(data)), names(data))]
if (region_type == "state") {
result <- result[order(result$full), ]
} else {
result <- result[order(result$full, result$county), ]
}
result
}
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/join-data.R
|
#' Conveniently plot basic US map
#'
#' @inheritParams us_map
#' @param data A data frame containing values to plot on the map. This
#' parameter should be a data frame consisting of two columns,
#' a FIPS code (2 characters for state, 5 characters for county)
#' and the value that should be associated with that region. The
#' columns of \code{data} \emph{must} be \code{fips} or \code{state} and
#' the value of the `values` parameter.
#' @param values The name of the column that contains the values to be associated
#' with a given region. The default is \code{"value"}.
#' @param theme The theme that should be used for plotting the map. The default
#' is \code{theme_map} from \href{https://github.com/jrnold/ggthemes}{ggthemes}.
#' @param labels Whether or not to display labels on the map. Labels are not displayed
#' by default.
#' @param label_color The color of the labels to display. Corresponds to the \code{color}
#' option in the [ggplot2::aes()] mapping. The default is \code{"black"}.
#' \href{https://usmap.dev/docs/Rcolor.pdf}{Click here}
#' for more color options.
#' @param ... Other arguments to pass to [ggplot2::aes()]. These are
#' often aesthetics, used to set an aesthetic to a fixed value, like \code{color = "red"}
#' or \code{linewidth = 3}. They affect the appearance of the polygons used to render
#' the map (for example fill color, line color, line thickness, etc.). If any of
#' \code{color}/\code{colour}, \code{fill}, or \code{linewidth} are not specified they
#' are set to their default values of \code{color="black"}, \code{fill="white"},
#' and \code{linewidth=0.4}.
#'
#' @return A [ggplot2::ggplot] object that contains a basic
#' US map with the described parameters. Since the result is a \code{ggplot}
#' object, it can be extended with more [ggplot2::Geom] layers, scales, labels,
#' themes, etc.
#'
#' @seealso [usmap], [ggplot2::theme()]
#'
#' @examples
#' plot_usmap()
#' plot_usmap(regions = "states")
#' plot_usmap(regions = "counties")
#' plot_usmap(regions = "state")
#' plot_usmap(regions = "county")
#'
#' # Output is ggplot object so it can be extended
#' # with any number of ggplot layers
#' library(ggplot2)
#' plot_usmap(include = c("CA", "NV", "ID", "OR", "WA")) +
#' labs(title = "Western States")
#'
#' # Color maps with data
#' plot_usmap(data = statepop, values = "pop_2022")
#'
#' # Include labels on map (e.g. state abbreviations)
#' plot_usmap(data = statepop, values = "pop_2022", labels = TRUE)
#' # Choose color for labels
#' plot_usmap(data = statepop, values = "pop_2022", labels = TRUE, label_color = "white")
#'
#' @importFrom rlang .data
#' @export
plot_usmap <- function(regions = c("states", "state", "counties", "county"),
include = c(),
exclude = c(),
data = data.frame(),
values = "values",
theme = theme_map(),
labels = FALSE,
label_color = "black",
...) {
# check for ggplot2
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("`ggplot2` must be installed to use `plot_usmap`.
Use: install.packages(\"ggplot2\") and try again.")
}
.data <- ggplot2::.data
# parse parameters
regions <- match.arg(regions)
geom_args <- list(...)
# set geom_polygon defaults
if (is.null(geom_args[["colour"]]) && is.null(geom_args[["color"]])) {
geom_args[["color"]] <- "black"
}
if (is.null(geom_args[["linewidth"]])) {
geom_args[["linewidth"]] <- 0.4
}
# set default "fill" if data is not included
if (is.null(geom_args[["fill"]]) && nrow(data) == 0) {
geom_args[["fill"]] <- "white"
}
# create polygon layer
if (nrow(data) == 0) {
map_df <- usmap::us_map(regions = regions, include = include, exclude = exclude)
geom_args[["mapping"]] <- ggplot2::aes()
} else {
map_df <- usmap::map_with_data(data, values = values, include = include, exclude = exclude)
if (!is.null(map_df$county)) regions <- "counties"
geom_args[["mapping"]] <- ggplot2::aes(fill = .data[[values]])
}
polygon_layer <- do.call(ggplot2::geom_sf, geom_args)
# create label layer
if (labels) {
if (regions == "state") regions <- "states"
else if (regions == "county") regions <- "counties"
centroid_labels <- usmapdata::centroid_labels(regions)
if (length(include) > 0) {
centroid_labels <- centroid_labels[
centroid_labels$full %in% include |
centroid_labels$abbr %in% include |
centroid_labels$fips %in% include,
]
}
if (length(exclude) > 0) {
centroid_labels <- centroid_labels[!(
centroid_labels$full %in% exclude |
centroid_labels$abbr %in% exclude |
centroid_labels$fips %in% exclude |
substr(centroid_labels$fips, 1, 2) %in% exclude
), ]
}
if (regions == "county" || regions == "counties") {
label_layer <- ggplot2::geom_sf_text(
data = centroid_labels,
ggplot2::aes(label = sub(" County", "", .data$county)),
color = label_color
)
} else {
label_layer <- ggplot2::geom_sf_text(
data = centroid_labels,
ggplot2::aes(label = .data$abbr), color = label_color
)
}
} else {
label_layer <- ggplot2::geom_blank()
}
# construct final plot
ggplot2::ggplot(data = map_df) + polygon_layer + label_layer + theme
}
#' Convenient theme map
#'
#' @description
#' This creates a nice map theme for use in [plot_usmap()].
#' It originated from the `ggthemes` package located at this repository:
#' \url{https://github.com/jrnold/ggthemes}.
#'
#' This function was manually rewritten here to avoid the need for
#' another package import.
#'
#' @keywords internal
theme_map <- function(base_size = 9, base_family = "") {
element_blank <- ggplot2::element_blank()
`%+replace%` <- ggplot2::`%+replace%` # nolint: object_name_linter
unit <- ggplot2::unit
ggplot2::theme_bw(base_size = base_size, base_family = base_family) %+replace%
ggplot2::theme(axis.line = element_blank,
axis.text = element_blank,
axis.ticks = element_blank,
axis.title = element_blank,
panel.background = element_blank,
panel.border = element_blank,
panel.grid = element_blank,
panel.spacing = unit(0, "lines"),
plot.background = element_blank,
legend.position = "inside",
legend.justification.inside = c(0, 0))
}
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/plot-map.R
|
#' New England census division
#'
#' @description
#' US Census Bureau regional division containing Connecticut, Maine,
#' Massachusetts, New Hampshire, Rhode Island, and Vermont.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .new_england, labels = TRUE)
#' @export
.new_england <- c("CT", "MA", "ME", "NH", "RI", "VT")
#' Mid-Atlantic census division
#'
#' @description
#' US Census Bureau regional division containing New Jersey, New York,
#' and Pennsylvania.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .mid_atlantic, labels = TRUE)
#' @export
.mid_atlantic <- c("NJ", "NY", "PA")
#' East North Central census division
#'
#' @description
#' US Census Bureau regional division containing Illinois, Indiana, Michigan,
#' Ohio, and Wisconsin.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .east_north_central, labels = TRUE)
#' @export
.east_north_central <- c("IL", "IN", "MI", "OH", "WI")
#' West North Central census division
#'
#' @description
#' US Census Bureau regional division containing Iowa, Kansas, Minnesota,
#' Missouri, Nebraska, North Dakota, and South Dakota.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .west_north_central, labels = TRUE)
#' @export
.west_north_central <- c("IA", "KS", "MN", "MO", "NE", "ND", "SD")
#' South Atlantic census division
#'
#' @description
#' US Census Bureau regional division containing Delaware, Florida, Georgia,
#' Maryland, North Carolina, South Carolina, Virginia,
#' District of Columbia, and West Virginia.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .south_atlantic, labels = TRUE)
#' @export
.south_atlantic <- c("DC", "DE", "FL", "GA", "MD", "NC", "SC", "VA", "WV")
#' East South Central census division
#'
#' @description
#' US Census Bureau regional division containing Alabama, Kentucky,
#' Mississippi, and Tennessee.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .east_south_central, labels = TRUE)
#' @export
.east_south_central <- c("AL", "KY", "MS", "TN")
#' West South Central census division
#'
#' @description
#' US Census Bureau regional division containing Arkansas, Louisiana, Oklahoma,
#' and Texas.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .west_south_central, labels = TRUE)
#' @export
.west_south_central <- c("AR", "LA", "OK", "TX")
#' Mountain census division
#'
#' @description
#' US Census Bureau regional division containing Arizona, Colorado, Idaho,
#' Montana, Nevada, New Mexico, Utah, and Wyoming.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .mountain, labels = TRUE)
#' @export
.mountain <- c("AZ", "CO", "ID", "MT", "NV", "NM", "UT", "WY")
#' Pacific census division
#'
#' @description
#' US Census Bureau regional division containing Alaska, California, Hawaii,
#' Oregon, and Washington.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .pacific, labels = TRUE)
#' @export
.pacific <- c("AK", "CA", "HI", "OR", "WA")
#' Northeast census region
#'
#' @description
#' US Census Bureau region containing the New England and Mid-Atlantic
#' divisions.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .northeast_region, labels = TRUE)
#' @export
.northeast_region <- c(.new_england, .mid_atlantic)
#' North-Central census region
#'
#' @description
#' Former US Census Bureau region containing the East North Central and West
#' North Central divisions. This region has been designated as "Midwest"
#' since June 1984.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .north_central_region, labels = TRUE)
#' @export
.north_central_region <- c(.east_north_central, .west_north_central)
#' Midwest census region
#'
#' @description
#' US Census Bureau region containing the East North Central and West
#' North Central divisions. This region was designated as "North Central Region"
#' prior to June 1984.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .midwest_region, labels = TRUE)
#' @export
.midwest_region <- .north_central_region
#' South census region
#'
#' @description
#' US Census Bureau region containing the South Atlantic, East South Central,
#' and West South Central divisions.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .midwest_region, labels = TRUE)
#' @export
.south_region <- c(.south_atlantic, .east_south_central, .west_south_central)
#' West census region
#'
#' @description
#' US Census Bureau region containing the Mountain and Pacific divisions.
#'
#' @details
#' See
#' \url{https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf}
#'
#' @examples
#' plot_usmap(include = .midwest_region, labels = TRUE)
#' @export
.west_region <- c(.mountain, .pacific)
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/regions.R
|
#' Convert spatial data to usmap projection
#'
#' @description Converting a spatial object of map coordinates will
#' allow those points to line up with the regular usmap plot by applying
#' the same US National Atlas Equal Area projection (including Alaska and
#' Hawaii of course) to those points as well.
#'
#' The input `data` is assumed to contain longitude and latitude coordinates
#' by default. If this is not the case, provide an [sf::st_crs] object
#' to the `crs` parameter with the appropriate coordinate reference system.
#'
#' @param data A data frame containing coordinates in a two column format
#' where the first column represents longitude and the second data frame
#' represents latitude. The names of the data frame column do not matter,
#' just that the order of the columns is kept intact.
#'
#' @param ... Additional parameters passed onto [sf::st_as_sf].
#' By default, `crs = sf::st_crs(4326)` is used, implying longitude and latitude
#' coordinates.
#'
#' @param input_names A character vector of length two which specifies the
#' longitude and latitude columns of the input data (the ones that should be
#' transformed), respectively. Only required if the input data is
#' a `data.frame` object. Defaults to `c("lon", "lat")`.
#'
#' @param output_names Defunct, this parameter is no longer used. The output
#' of this function will have a column named `"geometry"` with the transformed
#' coordinates. This parameter may be removed in a future version.
#'
#' @return An `sf` object containing the transformed coordinates from the
#' input data frame with the US National Atlas Equal Area projection applied.
#' The transformed columns will be appended to the data frame so that all
#' original columns should remain intact.
#' @examples
#' data <- data.frame(
#' lon = c(-74.01, -95.36, -118.24, -87.65, -134.42, -157.86),
#' lat = c(40.71, 29.76, 34.05, 41.85, 58.30, 21.31),
#' pop = c(8398748, 2325502, 3990456, 2705994, 32113, 347397)
#' )
#'
#' # Transform data
#' transformed_data <- usmap_transform(data)
#'
#' # Plot transformed data on map
#' library(ggplot2)
#'
#' plot_usmap() + geom_sf(
#' data = transformed_data,
#' aes(size = pop),
#' color = "red", alpha = 0.5
#' )
#'
#' @rdname usmap_transform
#' @export
usmap_transform <- function(data, ...) {
UseMethod("usmap_transform")
}
#' @rdname usmap_transform
#' @export
usmap_transform.sf <- function(data, ...) {
perform_transform(data, ...)
}
#' @rdname usmap_transform
#' @export
usmap_transform.data.frame <- function(data,
...,
input_names = c("lon", "lat"),
output_names = NULL) {
# ensure input is data.frame
data <- as.data.frame(data)
# validation
if (length(input_names) != 2 && !any(is.na(as.character(input_names)))) {
stop("`input_names` must be a character vector of length 2.")
} else {
input_names <- as.character(input_names)
}
if (!all(input_names %in% colnames(data))) {
stop("All `input_names` must exist as column names in `data`.")
}
if (ncol(data) < 2 ||
!is.numeric(data[, input_names[1]]) ||
!is.numeric(data[, input_names[2]])) {
stop("`data` must contain at least two numeric columns.")
}
if (!is.null(output_names)) {
warning("`output_names` is no longer used. This parameter will be removed in a future version of `usmap`.")
}
# convert to sf and perform transformation
data <- sf::st_as_sf(data, coords = input_names)
perform_transform(data, ...)
}
#' Transform `sf` coordinates to `usmap` transform
#'
#' Internal function with common functionality for transforming coordinates.
#' Using this function directly is not recommended.
#'
#' @keywords internal
perform_transform <- function(data, ...) {
data_sf <- sf::st_as_sf(data, ...)
if (is.na(sf::st_crs(data_sf))) {
crs <- list(...)[["crs"]]
if (is.null(crs)) crs <- sf::st_crs(4326)
sf::st_crs(data_sf) <- crs
}
# Transform to canonical projection
transformed <- sf::st_transform(data_sf, usmap_crs())
sf::st_agr(transformed) <- "constant"
# Transform Alaska points
ak_bbox <- usmapdata:::alaska_bbox()
alaska <- sf::st_intersection(transformed, ak_bbox)
alaska <- usmapdata:::transform_alaska(alaska)
# Transform Hawaii points
hi_bbox <- usmapdata:::hawaii_bbox()
hawaii <- sf::st_intersection(transformed, hi_bbox)
hawaii <- usmapdata:::transform_hawaii(hawaii)
# Re-combine all points
transformed_excl_ak <- sf::st_difference(transformed, ak_bbox)
sf::st_agr(transformed_excl_ak) <- "constant"
transformed_excl_ak_hi <- sf::st_difference(transformed_excl_ak, hi_bbox)
sf::st_agr(transformed_excl_ak_hi) <- "constant"
rbind(transformed_excl_ak_hi, alaska, hawaii)
}
#' usmap coordinate reference system
#'
#' @description This coordinate reference system (CRS) represents
#' the canonical projection used by the \code{usmap} package. It can
#' be used to transform shape files, spatial points, spatial data
#' frames, etc. to the same coordinate representation that is used
#' by the \code{plot_usmap} function.
#'
#' @export
usmap_crs <- function() {
usmapdata:::ea_crs()
}
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/transform.R
|
#' Retrieve US map data
#'
#' @param regions The region breakdown for the map, can be one of
#' (\code{"states"}, \code{"state"}, \code{"counties"}, \code{"county"}).
#' The default is \code{"states"}.
#' @param include The regions to include in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. If states are provided in the county map, only counties in the included states
#' will be returned.
#' @param exclude The regions to exclude in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. The regions listed in the \code{include} parameter are applied first and the
#' \code{exclude} regions are then removed from the resulting map. Any excluded regions
#' not present in the included regions will be ignored.
#'
#' @seealso [usmapdata::us_map()] of which this function is a wrapper for.
#'
#' @return A data frame of US map coordinates divided by the desired \code{regions}.
#'
#' @examples
#' str(us_map())
#'
#' df <- us_map(regions = "counties")
#' west_coast <- us_map(include = c("CA", "OR", "WA"))
#'
#' south_atl_excl_FL <- us_map(include = .south_atlantic, exclude = "FL")
#' @export
us_map <- function(regions = c("states", "state", "counties", "county"),
include = c(),
exclude = c()) {
usmapdata::us_map(regions = regions, include = include, exclude = exclude)
}
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/us-map.R
|
#' usmap: US maps including Alaska and Hawaii
#'
#' @description
#' It is usually difficult or inconvenient to create US maps that
#' include both Alaska and Hawaii in a convenient spot. All map
#' data presented in this package uses the US National Atlas Equal Area
#' projection.
#'
#' @section Map data:
#' Alaska and Hawaii have been manually moved to a new location so that
#' their new coordinates place them to the bottom-left corner of
#' the map. These maps can be accessed by using the [us_map()] function.
#'
#' The function provides the ability to retrieve maps with either
#' state borders or county borders using the \code{regions} parameter
#' for convenience.
#'
#' States (or counties) can be included and excluded using the provided
#' \code{include} and \code{exclude} parameters. These parameters can be used
#' together with any combination of names, abbreviations, or FIPS code to
#' create more complex maps.
#'
#' @section FIPS lookup tools:
#' Several functions have been included to lookup the US state or county
#' pertaining to a FIPS code.
#'
#' Likewise a reverse lookup can be done where a FIPS code can be used to
#' retrieve the associated states or counties. This can be useful when
#' preparing data to be merged with the map data frame.
#'
#' @section Plot US map data:
#' A convenience function [plot_usmap()] has been included which
#' takes similar parameters to [us_map()] and returns a [ggplot2::ggplot2]
#' object. Since the output is a \code{ggplot} object, other layers can be
#' added such as scales, themes, and labels. Including data in the function call
#' will color the map according to the values in the data, creating a choropleth.
#'
#' @section Transforming data:
#' It is also possible to add spatial data to the map, in the form of either
#' data frames or simple features ([sf::sf]) objects. If necessary, the
#' data can be transformed to be in the same coordinate reference system as
#' [usmap] by using [usmap_transform()] and then plotted using [ggplot2::geom_sf()].
#'
#' @author Paolo Di Lorenzo \cr
#' \itemize{
#' \item Email: \email{dilorenzo@@hey}
#' \item GitHub: \url{https://github.com/pdil/}
#' }
#'
#' @seealso
#' Helpful links:
#' \itemize{
#' \item FIPS code information \cr
#' \url{https://en.wikipedia.org/wiki/FIPS_county_code}
#' \url{https://en.wikipedia.org/wiki/FIPS_state_code}
#' \item US Census Shapefiles \cr
#' \url{https://www.census.gov/geographies/mapping-files/time-series/geo/cartographic-boundary.html}
#' \item Map Features \cr
#' \url{https://en.wikipedia.org/wiki/Map_projection} \cr
#' \url{https://en.wikipedia.org/wiki/Equal-area_projection} \cr
#' \url{https://en.wikipedia.org/wiki/Choropleth} \cr
#' \url{https://epsg.io/9311} (US National Atlas Equal Area)
#' }
#'
#' @references
#' Rudis, Bob. "Moving The Earth (well, Alaska & Hawaii) With R."
#' Blog post. Rud.is., 16 Nov. 2014. Web. 10 Aug. 2015.
#' \url{https://rud.is/b/2014/11/16/moving-the-earth-well-alaska-hawaii-with-r/}.
#'
#' @docType package
#' @name usmap
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/usmap/R/usmap-package.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----fig.align='center', fig.width=7------------------------------------------
usmap::plot_usmap()
## ----fig.align='center', fig.width=7------------------------------------------
usmap::plot_usmap(regions = "counties")
## ----eval = FALSE-------------------------------------------------------------
# states_df <- usmap::us_map()
# counties_df <- usmap::us_map(regions = "counties")
## -----------------------------------------------------------------------------
# Get FIPS code for a state
usmap::fips(state = "MA")
usmap::fips(state = "Massachusetts")
# Get FIPS code for a county
usmap::fips(state = "NJ", county = "Bergen")
usmap::fips(state = "CA", county = "Orange County")
# The parameters are NOT case sensitive!
usmap::fips(state = "ca", county = "oRanGe cOUNty")
## -----------------------------------------------------------------------------
usmap::fips_info(c("30", "33", "34"))
## -----------------------------------------------------------------------------
usmap::fips_info(c("01001", "01003", "01005", "01007"))
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap1.R
|
---
title: "1. Introduction"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{1. Introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Plotting
Plots of US maps in R usually lack Alaska and Hawaii. The reason is plotting takes the literal longitude and latitude coordinates and maps it to a cartesian x-y coordinate graph. Alaska and Hawaii are very far from the mainland US when using this so it can be unwieldy to include them. The `usmap` package solves this issue by providing data frames which have Alaska and Hawaii moved to a convenient spot just to the bottom left of the contiguous United States.
#### Blank US state map
```{r, fig.align='center', fig.width=7}
usmap::plot_usmap()
```
#### Blank US county map
```{r, fig.align='center', fig.width=7}
usmap::plot_usmap(regions = "counties")
```
## Raw map data
The raw US map data for counties or states can be obtained for further manipulation (and joining with data). The default `regions` is `"states"`.
```{r, eval = FALSE}
states_df <- usmap::us_map()
counties_df <- usmap::us_map(regions = "counties")
```
## FIPS codes
FIPS codes are defined in the Federal Information Processing Standards by the US government. One usage is uniquely identifying US states and counties (among other things such as identifying countries for the CIA World Factbook). Downloading datasets from the [US Census](https://www.census.gov/data.html) will often include FIPS codes as identifiers so it can be helpful to know what a FIPS code represents. The functions in `usmap` are built around the FIPS code identification system and so convenience methods for accessing them and performing reverse-lookups have been included.
#### State/County FIPS lookup
```{r}
# Get FIPS code for a state
usmap::fips(state = "MA")
usmap::fips(state = "Massachusetts")
# Get FIPS code for a county
usmap::fips(state = "NJ", county = "Bergen")
usmap::fips(state = "CA", county = "Orange County")
# The parameters are NOT case sensitive!
usmap::fips(state = "ca", county = "oRanGe cOUNty")
```
#### FIPS reverse lookup
If the FIPS code is known and want to see what state/county it corresponds to, use the reverse lookup function `fips_info`.
```{r}
usmap::fips_info(c("30", "33", "34"))
```
```{r}
usmap::fips_info(c("01001", "01003", "01005", "01007"))
```
#### Further reading
More information about FIPS can be read [here](https://en.wikipedia.org/wiki/Federal_Information_Processing_Standards).
---
> “A map is the greatest of all epic poems. Its lines and colors show the realization of great dreams.”
> - _Gilbert H. Grosvenor, Editor of National Geographic (1903 - 1954)_
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap1.Rmd
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
library(usmap)
library(ggplot2)
plot_usmap(regions = "counties") +
labs(title = "US Counties",
subtitle = "This is a blank map of the counties of the United States.") +
theme(panel.background = element_rect(color = "black", fill = "lightblue"))
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
library(usmap)
library(ggplot2)
plot_usmap(include = c("CA", "ID", "NV", "OR", "WA")) +
labs(title = "Western US States",
subtitle = "These are the states in the Pacific Timezone.")
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(name = "Population (2022)", label = scales::comma) +
theme(legend.position = "right")
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) + theme(legend.position = "right")
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
library(usmap)
library(ggplot2)
plot_usmap(
data = statepop, values = "pop_2022", include = c("CA", "ID", "NV", "OR", "WA"), color = "red"
) +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) +
labs(title = "Western US States", subtitle = "These are the states in the Pacific Timezone.") +
theme(legend.position = "right")
## ----fig.show='hide', message=FALSE, warning=FALSE----------------------------
df <- data.frame(
fips = c("02", "01", "05", "04"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
## ----fig.show='hide', message=FALSE, warning=FALSE----------------------------
df <- data.frame(
fips = c("02", "01", "05", "04"),
population = c(14, 18, 19, 8)
)
plot_usmap(data = df, values = "population")
## ----fig.show='hide', message=FALSE, warning=FALSE----------------------------
df <- data.frame(
state = c("AL", "Alaska", "AR", "AZ"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
## ----fig.show='hide', message=FALSE, warning=FALSE----------------------------
df <- data.frame(
fips = c("10001", "10003", "10005"),
values = c(93, 98, 41)
)
plot_usmap(data = df)
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
usmap::plot_usmap(include = .south_region)
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
usmap::plot_usmap(include = .east_south_central)
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
usmap::plot_usmap(include = .south_region, exclude = .east_south_central)
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
usmap::plot_usmap("counties",
include = c(.south_region, "IA"),
exclude = c(.east_south_central, "12")) # 12 = FL
## ----fig.align='center', fig.width=7, message=FALSE, warning=FALSE------------
usmap::plot_usmap("counties", fill = "yellow", alpha = 0.25,
# 06065 = Riverside County, CA
include = c(.south_region, "IA", "06065"),
# 12 = FL, 48141 = El Paso County, TX
exclude = c(.east_south_central, "12", "48141"))
## -----------------------------------------------------------------------------
.new_england
.mid_atlantic
.east_north_central
.west_north_central
.south_atlantic
.east_south_central
.west_south_central
.mountain
.pacific
## -----------------------------------------------------------------------------
.northeast_region # c(.new_england, .mid_atlantic)
.north_central_region # c(.east_north_central, .west_north_central)
.midwest_region # .north_central_region (renamed in June 1984)
.south_region # c(.south_atlantic, .east_south_central, .west_south_central)
.west_region # c(.mountain, .pacific)
## -----------------------------------------------------------------------------
str(usmap::us_map())
## -----------------------------------------------------------------------------
str(usmap::us_map(regions = "counties"))
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap2.R
|
---
title: "2. Mapping the US"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{2. Mapping the US}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Extending `plot_usmap` with `ggplot2`
The nice thing about `usmap::plot_usmap` is it returns a [ggplot object](https://ggplot2.tidyverse.org/index.html) object, which means we can add `ggplot` layers to the plot right out of the box.
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(regions = "counties") +
labs(title = "US Counties",
subtitle = "This is a blank map of the counties of the United States.") +
theme(panel.background = element_rect(color = "black", fill = "lightblue"))
```
#### Plot only certain states
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(include = c("CA", "ID", "NV", "OR", "WA")) +
labs(title = "Western US States",
subtitle = "These are the states in the Pacific Timezone.")
```
#### Add some data to the map
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(name = "Population (2022)", label = scales::comma) +
theme(legend.position = "right")
```
Notice the comprehensive expandability that can be applied to the map using `ggplot2` layers. For example, we might want to use a different color scheme.
#### Change fill color scale
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) + theme(legend.position = "right")
```
The data-filled map can also be filtered to show certain regions only, like the western states shown above.
#### Show data in certain states
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(
data = statepop, values = "pop_2022", include = c("CA", "ID", "NV", "OR", "WA"), color = "red"
) +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) +
labs(title = "Western US States", subtitle = "These are the states in the Pacific Timezone.") +
theme(legend.position = "right")
```
### Required Data Format
The data passed to the `data` parameter in `plot_usmap()` must be a data frame
with at least two columns. One of the columns must be named `"fips"` or `"state"` and contain
either the FIPS code, the state abbreviation, or the state name (for county maps
only the FIPS code is supported). The second column must be the values to be plotted
for each region. The default name of the values column is `"values"`. If a different
name is used in the data frame, the name can be specified in the `values` parameter
of `plot_usmap`. Any extra columns in the data frame will be ignored.
#### FIPS column with default `values` column
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("02", "01", "05", "04"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
```
#### FIPS column with custom `values` column
Name of values column must be specified in `values` parameter if it is not `"values"`.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("02", "01", "05", "04"),
population = c(14, 18, 19, 8)
)
plot_usmap(data = df, values = "population")
```
#### States
Abbreviations and full names can be mixed if desired.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
state = c("AL", "Alaska", "AR", "AZ"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
```
#### Counties
County names are not supported in `plot_usmap` data frames. Use `fips` instead.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("10001", "10003", "10005"),
values = c(93, 98, 41)
)
plot_usmap(data = df)
```
## Built-in Regions
`usmap` provides some built-in regions based on the [US Census Bureau Regions and Divisions](https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf). These can be used in place of the `include`/`exclude` parameters when using `us_map` or `plot_usmap` and start with a `.` (dot):
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .south_region)
```
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .east_south_central)
```
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .south_region, exclude = .east_south_central)
```
This also works with county maps. The regions can also be combined with actual state or FIPS values within the `include`/`exclude` parameters:
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c(.south_region, "IA"),
exclude = c(.east_south_central, "12")) # 12 = FL
```
You can even include or exclude individual counties (county-level inclusions/exclusions can only be done via their FIPS codes due to duplicate county names across states; for example eight different states have an "Orange County"):
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties", fill = "yellow", alpha = 0.25,
# 06065 = Riverside County, CA
include = c(.south_region, "IA", "06065"),
# 12 = FL, 48141 = El Paso County, TX
exclude = c(.east_south_central, "12", "48141"))
```
These parameters therefore allow for the possibility of some complex compositions of states and counties, to create the exact map that is desired.
#### Supported US Census Regions and Divisions
The following divisions are supported:
```{r}
.new_england
.mid_atlantic
.east_north_central
.west_north_central
.south_atlantic
.east_south_central
.west_south_central
.mountain
.pacific
```
Regions are composed of multiple divisions, and the following are supported:
```{r}
.northeast_region # c(.new_england, .mid_atlantic)
.north_central_region # c(.east_north_central, .west_north_central)
.midwest_region # .north_central_region (renamed in June 1984)
.south_region # c(.south_atlantic, .east_south_central, .west_south_central)
.west_region # c(.mountain, .pacific)
```
## Raw map data
The raw US map data for counties or states can be obtained for further manipulation (and joining with data).
```{r}
str(usmap::us_map())
```
```{r}
str(usmap::us_map(regions = "counties"))
```
You can also include only certain states and counties just like in `plot_usmap`. In fact, the `regions` and `include` parameters of `plot_usmap` are derived directly from their usage in `us_map`.
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap2.Rmd
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE----
usmap::plot_usmap("states", labels = TRUE)
## ----fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE----
usmap::plot_usmap("counties", include = c("MA", "CT", "RI"), labels = TRUE)
## ----fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE----
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue")
## ----fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE----
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue",
fill = "yellow", alpha = 0.25, color = "orange", linewidth = 2)
## ----warning=FALSE------------------------------------------------------------
usmap::usmap_crs()
## ----fig.align='center', fig.width=8, fig.height=5, message=FALSE, warning=FALSE----
library(usmap)
library(ggplot2)
eq_transformed <- usmap_transform(earthquakes)
plot_usmap() +
geom_sf(data = eq_transformed, aes(size = mag),
color = "red", alpha = 0.25) +
labs(title = "US Earthquakes",
subtitle = "Source: USGS, Jan 1 to Jun 30 2019",
size = "Magnitude") +
theme(legend.position = "right")
## ----fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE----
library(usmap)
library(ggplot2)
cities_t <- usmap_transform(citypop)
plot_usmap(fill = "yellow", alpha = 0.25) +
geom_sf(data = cities_t,
aes(size = city_pop),
color = "purple", alpha = 0.5) +
ggrepel::geom_label_repel(data = cities_t,
aes(label = most_populous_city, geometry = geometry),
size = 3, alpha = 0.8,
label.r = unit(0.5, "lines"), label.size = 0.5,
segment.color = "red", segment.size = 1,
stat = "sf_coordinates", seed = 1002,
max.overlaps = 20) +
scale_size_continuous(range = c(1, 16),
label = scales::comma) +
labs(title = "Most Populous City in Each US State",
subtitle = "Source: US Census 2010",
size = "City Population") +
theme(legend.position = "right")
## ----fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE----
library(usmap)
library(ggplot2)
rivers_t <- usmap_transform(usrivers)
plot_usmap("counties", color = "gray80") +
geom_sf(data = rivers_t, aes(linewidth = Shape_Length, color = SYSTEM, fill = SYSTEM)) +
scale_linewidth_continuous(range = c(0.3, 1.8), guide = "none") +
scale_color_discrete(guide = "none") +
labs(title = "Major Rivers in the United States",
subtitle = "Source: ESRI 2010",
fill = "River System") +
theme(legend.position = "right")
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap3.R
|
---
title: "3. Advanced Mapping"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{3. Advanced Mapping}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette will explore some of the more advanced mapping features of `usmap`. Before continuing, be sure to check out [Mapping the US](usmap2.html) as that will cover more of the basics of plotting US maps and styling them with `ggplot2`.
## Labels
As of `usmap 0.4.0`, maps with state labels can be created:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("states", labels = TRUE)
```
`usmap 0.5.0` adds the ability to add county labels:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties", include = c("MA", "CT", "RI"), labels = TRUE)
```
Labels can be colored using the `label_color` parameter:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue")
```
## `ggplot2` aesthetic mapping parameters
Parameters used by the map's aesthetic mapping (`ggplot2::aes`) can be passed directly via `plot_usmap` by adding the parameters anywhere at the call site:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue",
fill = "yellow", alpha = 0.25, color = "orange", linewidth = 2)
```
Notice in this case we set the `fill` and `alpha` parameters to fill in the counties with a semi-transparent yellow color.
The following parameters are supported:
- `fill`: fill color of the state/county polygons
- `alpha`: transparency of the state/county polygon fill colors
- `color`/`colour`: line color of the state/county polygons
- `linewidth`: thickness of the state/county polygon lines
## Transform data frames to match `usmap` projection
Data sets with longitude and latitude coordinates can be transformed to match the projection used in `usmap` (Albers Equal Area projection). This is convenient for plotting location-specific data and values using `ggplot2` layers such as `geom_point` and `geom_label`.
#### Projection
The projection used by `usmap` can also be accessed by using `usmap_crs()`:
```{r, warning=FALSE}
usmap::usmap_crs()
```
A convenience method called `usmap_transform` is provided that transforms a `data.frame` containing longitude/latitude columns to use this projection. (Currently, only `data.frame`s are supported. Other structures may be supported in the future.)
#### Example: earthquakes
Here is an example using the provided `earthquakes` dataset:
```{r, fig.align='center', fig.width=8, fig.height=5, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
eq_transformed <- usmap_transform(earthquakes)
plot_usmap() +
geom_sf(data = eq_transformed, aes(size = mag),
color = "red", alpha = 0.25) +
labs(title = "US Earthquakes",
subtitle = "Source: USGS, Jan 1 to Jun 30 2019",
size = "Magnitude") +
theme(legend.position = "right")
```
#### Example: most populous city in each state
And a more comprehensive example using the provided `citypop` dataset:
```{r fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
cities_t <- usmap_transform(citypop)
plot_usmap(fill = "yellow", alpha = 0.25) +
geom_sf(data = cities_t,
aes(size = city_pop),
color = "purple", alpha = 0.5) +
ggrepel::geom_label_repel(data = cities_t,
aes(label = most_populous_city, geometry = geometry),
size = 3, alpha = 0.8,
label.r = unit(0.5, "lines"), label.size = 0.5,
segment.color = "red", segment.size = 1,
stat = "sf_coordinates", seed = 1002,
max.overlaps = 20) +
scale_size_continuous(range = c(1, 16),
label = scales::comma) +
labs(title = "Most Populous City in Each US State",
subtitle = "Source: US Census 2010",
size = "City Population") +
theme(legend.position = "right")
```
#### Example: major rivers in the United States
Here is an example of transforming and plotting another `sf` object on the map,
using the provided `rivers` dataset. In this example the width of the river
corresponds to its length, and the color indicates the river system it belongs
to.
```{r fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
rivers_t <- usmap_transform(usrivers)
plot_usmap("counties", color = "gray80") +
geom_sf(data = rivers_t, aes(linewidth = Shape_Length, color = SYSTEM, fill = SYSTEM)) +
scale_linewidth_continuous(range = c(0.3, 1.8), guide = "none") +
scale_color_discrete(guide = "none") +
labs(title = "Major Rivers in the United States",
subtitle = "Source: ESRI 2010",
fill = "River System") +
theme(legend.position = "right")
```
<hr>
The `usmap_transform` function, combined with the power of `ggplot2` layers can allow for some very unique and complex data visualizations on the US map. The `usmap_transform` function also handles transforming points in the Alaska/Hawaii area so that they are appropriately displayed on their respective states.
<br><br><br><br><br><br><br><br><br><br>
|
/scratch/gouwar.j/cran-all/cranData/usmap/inst/doc/usmap3.Rmd
|
---
title: "1. Introduction"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{1. Introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Plotting
Plots of US maps in R usually lack Alaska and Hawaii. The reason is plotting takes the literal longitude and latitude coordinates and maps it to a cartesian x-y coordinate graph. Alaska and Hawaii are very far from the mainland US when using this so it can be unwieldy to include them. The `usmap` package solves this issue by providing data frames which have Alaska and Hawaii moved to a convenient spot just to the bottom left of the contiguous United States.
#### Blank US state map
```{r, fig.align='center', fig.width=7}
usmap::plot_usmap()
```
#### Blank US county map
```{r, fig.align='center', fig.width=7}
usmap::plot_usmap(regions = "counties")
```
## Raw map data
The raw US map data for counties or states can be obtained for further manipulation (and joining with data). The default `regions` is `"states"`.
```{r, eval = FALSE}
states_df <- usmap::us_map()
counties_df <- usmap::us_map(regions = "counties")
```
## FIPS codes
FIPS codes are defined in the Federal Information Processing Standards by the US government. One usage is uniquely identifying US states and counties (among other things such as identifying countries for the CIA World Factbook). Downloading datasets from the [US Census](https://www.census.gov/data.html) will often include FIPS codes as identifiers so it can be helpful to know what a FIPS code represents. The functions in `usmap` are built around the FIPS code identification system and so convenience methods for accessing them and performing reverse-lookups have been included.
#### State/County FIPS lookup
```{r}
# Get FIPS code for a state
usmap::fips(state = "MA")
usmap::fips(state = "Massachusetts")
# Get FIPS code for a county
usmap::fips(state = "NJ", county = "Bergen")
usmap::fips(state = "CA", county = "Orange County")
# The parameters are NOT case sensitive!
usmap::fips(state = "ca", county = "oRanGe cOUNty")
```
#### FIPS reverse lookup
If the FIPS code is known and want to see what state/county it corresponds to, use the reverse lookup function `fips_info`.
```{r}
usmap::fips_info(c("30", "33", "34"))
```
```{r}
usmap::fips_info(c("01001", "01003", "01005", "01007"))
```
#### Further reading
More information about FIPS can be read [here](https://en.wikipedia.org/wiki/Federal_Information_Processing_Standards).
---
> “A map is the greatest of all epic poems. Its lines and colors show the realization of great dreams.”
> - _Gilbert H. Grosvenor, Editor of National Geographic (1903 - 1954)_
|
/scratch/gouwar.j/cran-all/cranData/usmap/vignettes/usmap1.Rmd
|
---
title: "2. Mapping the US"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{2. Mapping the US}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Extending `plot_usmap` with `ggplot2`
The nice thing about `usmap::plot_usmap` is it returns a [ggplot object](https://ggplot2.tidyverse.org/index.html) object, which means we can add `ggplot` layers to the plot right out of the box.
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(regions = "counties") +
labs(title = "US Counties",
subtitle = "This is a blank map of the counties of the United States.") +
theme(panel.background = element_rect(color = "black", fill = "lightblue"))
```
#### Plot only certain states
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(include = c("CA", "ID", "NV", "OR", "WA")) +
labs(title = "Western US States",
subtitle = "These are the states in the Pacific Timezone.")
```
#### Add some data to the map
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(name = "Population (2022)", label = scales::comma) +
theme(legend.position = "right")
```
Notice the comprehensive expandability that can be applied to the map using `ggplot2` layers. For example, we might want to use a different color scheme.
#### Change fill color scale
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(data = statepop, values = "pop_2022", color = "red") +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) + theme(legend.position = "right")
```
The data-filled map can also be filtered to show certain regions only, like the western states shown above.
#### Show data in certain states
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
plot_usmap(
data = statepop, values = "pop_2022", include = c("CA", "ID", "NV", "OR", "WA"), color = "red"
) +
scale_fill_continuous(
low = "white", high = "red", name = "Population (2022)", label = scales::comma
) +
labs(title = "Western US States", subtitle = "These are the states in the Pacific Timezone.") +
theme(legend.position = "right")
```
### Required Data Format
The data passed to the `data` parameter in `plot_usmap()` must be a data frame
with at least two columns. One of the columns must be named `"fips"` or `"state"` and contain
either the FIPS code, the state abbreviation, or the state name (for county maps
only the FIPS code is supported). The second column must be the values to be plotted
for each region. The default name of the values column is `"values"`. If a different
name is used in the data frame, the name can be specified in the `values` parameter
of `plot_usmap`. Any extra columns in the data frame will be ignored.
#### FIPS column with default `values` column
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("02", "01", "05", "04"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
```
#### FIPS column with custom `values` column
Name of values column must be specified in `values` parameter if it is not `"values"`.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("02", "01", "05", "04"),
population = c(14, 18, 19, 8)
)
plot_usmap(data = df, values = "population")
```
#### States
Abbreviations and full names can be mixed if desired.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
state = c("AL", "Alaska", "AR", "AZ"),
values = c(14, 18, 19, 8)
)
plot_usmap(data = df)
```
#### Counties
County names are not supported in `plot_usmap` data frames. Use `fips` instead.
```{r, fig.show='hide', message=FALSE, warning=FALSE}
df <- data.frame(
fips = c("10001", "10003", "10005"),
values = c(93, 98, 41)
)
plot_usmap(data = df)
```
## Built-in Regions
`usmap` provides some built-in regions based on the [US Census Bureau Regions and Divisions](https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf). These can be used in place of the `include`/`exclude` parameters when using `us_map` or `plot_usmap` and start with a `.` (dot):
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .south_region)
```
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .east_south_central)
```
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap(include = .south_region, exclude = .east_south_central)
```
This also works with county maps. The regions can also be combined with actual state or FIPS values within the `include`/`exclude` parameters:
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c(.south_region, "IA"),
exclude = c(.east_south_central, "12")) # 12 = FL
```
You can even include or exclude individual counties (county-level inclusions/exclusions can only be done via their FIPS codes due to duplicate county names across states; for example eight different states have an "Orange County"):
```{r, fig.align='center', fig.width=7, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties", fill = "yellow", alpha = 0.25,
# 06065 = Riverside County, CA
include = c(.south_region, "IA", "06065"),
# 12 = FL, 48141 = El Paso County, TX
exclude = c(.east_south_central, "12", "48141"))
```
These parameters therefore allow for the possibility of some complex compositions of states and counties, to create the exact map that is desired.
#### Supported US Census Regions and Divisions
The following divisions are supported:
```{r}
.new_england
.mid_atlantic
.east_north_central
.west_north_central
.south_atlantic
.east_south_central
.west_south_central
.mountain
.pacific
```
Regions are composed of multiple divisions, and the following are supported:
```{r}
.northeast_region # c(.new_england, .mid_atlantic)
.north_central_region # c(.east_north_central, .west_north_central)
.midwest_region # .north_central_region (renamed in June 1984)
.south_region # c(.south_atlantic, .east_south_central, .west_south_central)
.west_region # c(.mountain, .pacific)
```
## Raw map data
The raw US map data for counties or states can be obtained for further manipulation (and joining with data).
```{r}
str(usmap::us_map())
```
```{r}
str(usmap::us_map(regions = "counties"))
```
You can also include only certain states and counties just like in `plot_usmap`. In fact, the `regions` and `include` parameters of `plot_usmap` are derived directly from their usage in `us_map`.
|
/scratch/gouwar.j/cran-all/cranData/usmap/vignettes/usmap2.Rmd
|
---
title: "3. Advanced Mapping"
author: "Paolo Di Lorenzo"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{3. Advanced Mapping}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette will explore some of the more advanced mapping features of `usmap`. Before continuing, be sure to check out [Mapping the US](usmap2.html) as that will cover more of the basics of plotting US maps and styling them with `ggplot2`.
## Labels
As of `usmap 0.4.0`, maps with state labels can be created:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("states", labels = TRUE)
```
`usmap 0.5.0` adds the ability to add county labels:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties", include = c("MA", "CT", "RI"), labels = TRUE)
```
Labels can be colored using the `label_color` parameter:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue")
```
## `ggplot2` aesthetic mapping parameters
Parameters used by the map's aesthetic mapping (`ggplot2::aes`) can be passed directly via `plot_usmap` by adding the parameters anywhere at the call site:
```{r, fig.align='center', fig.width=7, fig.height=5, message=FALSE, warning=FALSE}
usmap::plot_usmap("counties",
include = c("MA", "CT", "RI"),
labels = TRUE, label_color = "blue",
fill = "yellow", alpha = 0.25, color = "orange", linewidth = 2)
```
Notice in this case we set the `fill` and `alpha` parameters to fill in the counties with a semi-transparent yellow color.
The following parameters are supported:
- `fill`: fill color of the state/county polygons
- `alpha`: transparency of the state/county polygon fill colors
- `color`/`colour`: line color of the state/county polygons
- `linewidth`: thickness of the state/county polygon lines
## Transform data frames to match `usmap` projection
Data sets with longitude and latitude coordinates can be transformed to match the projection used in `usmap` (Albers Equal Area projection). This is convenient for plotting location-specific data and values using `ggplot2` layers such as `geom_point` and `geom_label`.
#### Projection
The projection used by `usmap` can also be accessed by using `usmap_crs()`:
```{r, warning=FALSE}
usmap::usmap_crs()
```
A convenience method called `usmap_transform` is provided that transforms a `data.frame` containing longitude/latitude columns to use this projection. (Currently, only `data.frame`s are supported. Other structures may be supported in the future.)
#### Example: earthquakes
Here is an example using the provided `earthquakes` dataset:
```{r, fig.align='center', fig.width=8, fig.height=5, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
eq_transformed <- usmap_transform(earthquakes)
plot_usmap() +
geom_sf(data = eq_transformed, aes(size = mag),
color = "red", alpha = 0.25) +
labs(title = "US Earthquakes",
subtitle = "Source: USGS, Jan 1 to Jun 30 2019",
size = "Magnitude") +
theme(legend.position = "right")
```
#### Example: most populous city in each state
And a more comprehensive example using the provided `citypop` dataset:
```{r fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
cities_t <- usmap_transform(citypop)
plot_usmap(fill = "yellow", alpha = 0.25) +
geom_sf(data = cities_t,
aes(size = city_pop),
color = "purple", alpha = 0.5) +
ggrepel::geom_label_repel(data = cities_t,
aes(label = most_populous_city, geometry = geometry),
size = 3, alpha = 0.8,
label.r = unit(0.5, "lines"), label.size = 0.5,
segment.color = "red", segment.size = 1,
stat = "sf_coordinates", seed = 1002,
max.overlaps = 20) +
scale_size_continuous(range = c(1, 16),
label = scales::comma) +
labs(title = "Most Populous City in Each US State",
subtitle = "Source: US Census 2010",
size = "City Population") +
theme(legend.position = "right")
```
#### Example: major rivers in the United States
Here is an example of transforming and plotting another `sf` object on the map,
using the provided `rivers` dataset. In this example the width of the river
corresponds to its length, and the color indicates the river system it belongs
to.
```{r fig.align='center', fig.height=5, fig.width=8, message=FALSE, warning=FALSE}
library(usmap)
library(ggplot2)
rivers_t <- usmap_transform(usrivers)
plot_usmap("counties", color = "gray80") +
geom_sf(data = rivers_t, aes(linewidth = Shape_Length, color = SYSTEM, fill = SYSTEM)) +
scale_linewidth_continuous(range = c(0.3, 1.8), guide = "none") +
scale_color_discrete(guide = "none") +
labs(title = "Major Rivers in the United States",
subtitle = "Source: ESRI 2010",
fill = "River System") +
theme(legend.position = "right")
```
<hr>
The `usmap_transform` function, combined with the power of `ggplot2` layers can allow for some very unique and complex data visualizations on the US map. The `usmap_transform` function also handles transforming points in the Alaska/Hawaii area so that they are appropriately displayed on their respective states.
<br><br><br><br><br><br><br><br><br><br>
|
/scratch/gouwar.j/cran-all/cranData/usmap/vignettes/usmap3.Rmd
|
#' Internal map creation tools
#'
#' @description
#' `create_us_map()` creates the modified shapefiles used by the
#' \link[usmap]{usmap} package.
#'
#' `ea_crs()` returns the US National Atlas Equal Area coordinate reference system
#' (CRS) used by this package and `usmap`.
#'
#' `transform2D()` computes a two dimensional affine transformation matrix
#' for the provided rotation angle and scale factor.
#'
#' `transform_alaska()` applies the appropriate transform for the Alaska polygons.
#'
#' `transform_hawaii()` applies the appropriate transform for the Hawaii polygons.
#'
#' `compute_centroids()` computes the modified centroids for each state or
#' county polygon using a center-of-mass technique on the largest polygon in
#' the region.
#'
#' `alaska_bbox()` returns the bounding box of Alaska pre-transformation.
#'
#' `hawaii_bbox()` returns the bounding box of Hawaii pre-transformation.
#'
#' @note
#' Using these functions externally is not recommended since they make certain
#' undocumented assumptions that may not work with all inputs.
#'
#' It is strongly recommend that the \link[usmap]{usmap} package is used
#' directly.
#'
#' In some cases where the raw data is required, the \link{us_map} and
#' \link{centroid_labels} functions located in this package can be used instead.
#'
#' @references {
#' Gert (2017). “How to calculate
#' polygon centroids in R (for
#' non-contiguous shapes).”
#' <https://gis.stackexchange.com/a/265475>.
#'
#' Rudis B (2014). “Moving The Earth
#' (well, Alaska & Hawaii) With R.”
#' <https://rud.is/b/2014/11/16/moving-the-earth-well-alaska-hawaii-with-r/>.
#' }
#'
#' @keywords internal
create_us_map <- function(
type = c("states", "counties"),
input_file,
output_file
) {
# check for dplyr
if (!requireNamespace("dplyr", quietly = TRUE)) {
stop("`dplyr` must be installed to use `create_us_map()`.
Use: install.packages(\"dplyr\") and try again.")
}
type <- match.arg(type)
# import map file
us <- sf::read_sf(input_file)
# ea: US National Atlas Equal Area
us_ea <- sf::st_transform(us, ea_crs())
# FIPS code for Alaska = 02
alaska <- transform_alaska(us_ea[us_ea$STATEFP == "02", ])
# FIPS code for Hawaii = 15
hawaii <- transform_hawaii(us_ea[us_ea$STATEFP == "15", ])
# keep only US states (i.e. remove territories, minor outlying islands, etc.)
# also remove Alaska (02) and Hawaii (15) so that we can add in shifted one
us_ea <- us_ea[!us_ea$STATEFP %in% c(as.character(57:80), "02", "15"), ]
us_ea <- rbind(us_ea, alaska, hawaii)
# delete unused columns
cols <- c()
if (type == "states") {
cols <- c("GEOID", "STUSPS", "NAME")
} else if (type == "counties") {
cols <- c("GEOID", "STUSPS", "STATE_NAME", "NAMELSAD")
}
us_ea <- dplyr::select(us_ea, dplyr::all_of(cols))
# rename remaining columns
new_cols <- c()
if (type == "states") {
new_cols <- c(fips = "GEOID", abbr = "STUSPS", full = "NAME")
} else if (type == "counties") {
new_cols <- c(fips = "GEOID", abbr = "STUSPS", full = "STATE_NAME", county = "NAMELSAD")
}
us_ea <- dplyr::rename(us_ea, dplyr::all_of(new_cols))
# sort output
if (type == "states") {
us_ea <- dplyr::arrange(us_ea, .data$abbr)
} else if (type == "counties") {
us_ea <- dplyr::arrange(us_ea, .data$abbr, .data$county)
}
# export modified shape file
sf::st_write(us_ea, output_file, quiet = TRUE, append = FALSE)
# compute centroids
centroids <- compute_centroids(us_ea)
# determine centroids file path
centroids_output_file <- file.path(
dirname(output_file),
paste0(
tools::file_path_sans_ext(basename(output_file)),
"_centroids.",
tools::file_ext(output_file)
)
)
# export centroids
sf::st_write(centroids, centroids_output_file, quiet = TRUE, append = FALSE)
}
#' @rdname create_us_map
#' @keywords internal
ea_crs <- function() {
sf::st_crs(9311) # US National Atlas Equal Area coordinate reference system
}
#' @rdname create_us_map
#' @keywords internal
transform2D <- function(angle = 0, scale = 1) {
r <- angle * pi / 180
matrix(c(scale * cos(r), scale * sin(r),
-scale * sin(r), scale * cos(r)), 2, 2)
}
#' @rdname create_us_map
#' @keywords internal
transform_alaska <- function(alaska) {
sf::st_geometry(alaska) <- sf::st_geometry(alaska) * transform2D(-50, 1 / 2)
sf::st_geometry(alaska) <- sf::st_geometry(alaska) + c(3e5, -2e6)
sf::st_crs(alaska) <- ea_crs()
alaska
}
#' @rdname create_us_map
#' @keywords internal
transform_hawaii <- function(hawaii) {
sf::st_geometry(hawaii) <- sf::st_geometry(hawaii) * transform2D(-35)
sf::st_geometry(hawaii) <- sf::st_geometry(hawaii) + c(3.6e6, 1.8e6)
sf::st_crs(hawaii) <- ea_crs()
hawaii
}
#' @rdname create_us_map
#' @keywords internal
compute_centroids <- function(polygons, iterations = 3, initial_width_step = 10) {
if (iterations < 1) {
stop("`iterations` must be greater than or equal to 1.")
}
if (initial_width_step < 1) {
stop("`initial_width_step` must be greater than or equal to 1.")
}
new_polygons <- sf::st_as_sf(polygons)
# Iterate through each provided polygon
for (i in seq_len(nrow(polygons))) {
width <- -initial_width_step
area <- as.numeric(sf::st_area(polygons[i, ]))
current_polygon <- polygons[i, ]
isEmpty <- FALSE
for (j in 1:iterations) {
# Stop if buffer polygon becomes empty
if (!isEmpty) {
buffer <- sf::st_buffer(current_polygon, dist = width)
# Repeatedly increase buffer size until non-empty if needed
subtract_width <- width / 20
while (sf::st_is_empty(buffer)) {
width <- width - subtract_width
buffer <- sf::st_buffer(current_polygon, dist = width)
isEmpty <- TRUE
}
new_area <- as.numeric(sf::st_area(buffer))
# Determine width needed to reduce area to 1/4 of current
# for next iteration
slope <- (new_area - area) / width
width <- (area / 4 - area) / slope
# Set values for next iteration
area <- new_area
current_polygon <- buffer
}
}
# Determine biggest polygon in case of multiple polygons
d <- sf::st_geometry(current_polygon)
if (length(d) > 1) {
biggest_area <- sf::st_area(d[1, ])
which_polygon <- 1
for (k in 2:length(d)) {
if (sf::st_area(d[k, ]) > biggest_area) {
biggest_area <- sf::st_area(d[k, ])
which_polygon <- k
}
}
current_polygon <- d[which_polygon, ]
}
# Replace existing polygon with new polygon
new_polygons[i, ] <- current_polygon
}
# Return centroids of newly computed polygons
sf::st_agr(new_polygons) <- "constant"
sf::st_centroid(new_polygons)
}
#' @rdname create_us_map
#' @keywords internal
alaska_bbox <- function() {
sf::st_as_sfc(
sf::st_bbox(
c(
xmin = -4377000,
xmax = -1519000,
ymin = 1466000,
ymax = 3914000
),
crs = ea_crs()
)
)
}
#' @rdname create_us_map
#' @keywords internal
hawaii_bbox <- function() {
sf::st_as_sfc(
sf::st_bbox(
c(
xmin = -5750000,
xmax = -5450000,
ymin = -1050000,
ymax = -441000
),
crs = ea_crs()
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/usmapdata/R/create-us-map.R
|
#' Retrieve state and county FIPS codes
#'
#' @param regions The region breakdown for the map, can be one of
#' (\code{"states"}, \code{"state"}, \code{"counties"}, \code{"county"}).
#' The default is \code{"states"}.
#' @param as_sf Defunct, this parameter no longer has any effect and will be removed in
#' the future.
#'
#' @return An data frame of FIPS codes of the desired \code{regions}.
#'
#' @examples
#' str(fips_data())
#'
#' state_fips <- fips_data()
#' county_fips <- fips_data(regions = "counties")
#'
#' @export
fips_data <- function(
regions = c("states", "state", "counties", "county"),
as_sf = TRUE
) {
regions <- match.arg(regions)
map_data <- usmapdata::us_map(regions)
sf::st_geometry(map_data) <- NULL
map_data
}
|
/scratch/gouwar.j/cran-all/cranData/usmapdata/R/fips-data.R
|
#' Retrieve US map data
#'
#' @param regions The region breakdown for the map, can be one of
#' (\code{"states"}, \code{"state"}, \code{"counties"}, \code{"county"}).
#' The default is \code{"states"}.
#' @param include The regions to include in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. If states are provided in the county map, only counties in the included states
#' will be returned.
#' @param exclude The regions to exclude in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. The regions listed in the \code{include} parameter are applied first and the
#' \code{exclude} regions are then removed from the resulting map. Any excluded regions
#' not present in the included regions will be ignored.
#' @param as_sf Defunct, this parameter no longer has any effect and will be removed in
#' the future.
#'
#' @return An `sf` data frame of US map coordinates divided by the desired \code{regions}.
#'
#' @examples
#' str(us_map())
#'
#' df <- us_map(regions = "counties")
#' west_coast <- us_map(include = c("CA", "OR", "WA"))
#'
#' excl_west_coast <- us_map(exclude = c("CA", "OR", "WA"))
#'
#' @export
us_map <- function(
regions = c("states", "state", "counties", "county"),
include = c(),
exclude = c(),
as_sf = TRUE
) {
regions <- match.arg(regions)
if (regions == "state") regions <- "states"
else if (regions == "county") regions <- "counties"
df <- sf::read_sf(
system.file("extdata", paste0("us_", regions, ".gpkg"),
package = "usmapdata")
)
if (length(include) > 0) {
df <- df[df$full %in% include |
df$abbr %in% include |
df$fips %in% include |
substr(df$fips, 1, 2) %in% include, ]
}
if (length(exclude) > 0) {
df <- df[!(df$full %in% exclude |
df$abbr %in% exclude |
df$fips %in% exclude |
substr(df$fips, 1, 2) %in% exclude), ]
}
df[order(df$abbr), ]
}
#' Retrieve centroid labels
#'
#' @param regions The region breakdown for the map, can be one of
#' (\code{"states"}, \code{"counties"}, as specified by the internal file names.
#' The default is \code{"states"}.
#' @param as_sf Defunct, this parameter no longer has any effect and will be removed in
#' the future.
#'
#' @return An `sf` data frame of state or county centroid labels and positions
#' relative to the coordinates returned by the \code{us_map} function.
#'
#' @export
centroid_labels <- function(
regions = c("states", "counties"),
as_sf = TRUE
) {
regions <- match.arg(regions)
sf::read_sf(
system.file("extdata", paste0("us_", regions, "_centroids.gpkg"),
package = "usmapdata")
)
}
|
/scratch/gouwar.j/cran-all/cranData/usmapdata/R/us-map.R
|
#' usmapdata: Mapping Data for usmap Package
#'
#' @description
#' It is usually difficult or inconvenient to create US maps that
#' include both Alaska and Hawaii in a convenient spot. All map
#' data frames produced by this package use the Albers Equal Area
#' projection.
#'
#' @section Map data frames:
#' Alaska and Hawaii have been manually moved to a new location so that
#' their new coordinates place them to the bottom-left corner of
#' the map. These maps can be accessed by using the \code{\link{us_map}} function.
#'
#' The function provides the ability to retrieve maps with either
#' state borders or county borders using the \code{regions} parameter
#' for convenience.
#'
#' States (or counties) can be included such that all other states (or counties)
#' are excluded using the \code{include} parameter.
#'
#' @author Paolo Di Lorenzo \cr
#' \itemize{
#' \item Email: \email{dilorenzo@@hey.com}
#' \item GitHub: \url{https://github.com/pdil/}
#' }
#'
#' @seealso
#' Helpful links:
#' \itemize{
#' \item US Census Shapefiles \cr
#' \url{https://www.census.gov/geographies/mapping-files/time-series/geo/tiger-line-file.html}
#' \item Map Features \cr
#' \url{https://en.wikipedia.org/wiki/Map_projection}
#' \url{https://en.wikipedia.org/wiki/Equal-area_projection}
#' \url{https://epsg.io/9311}
#' }
#'
#' @references {
#' Rudis B (2014). “Moving The Earth
#' (well, Alaska & Hawaii) With R.”
#' <https://rud.is/b/2014/11/16/moving-the-earth-well-alaska-hawaii-with-r/>.
#' }
#'
#' @docType package
#' @name usmapdata
"_PACKAGE"
## usethis namespace: start
#' @importFrom rlang .data
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/usmapdata/R/usmapdata-package.R
|
/scratch/gouwar.j/cran-all/cranData/ussherR/R/ussh.R
|
|
#' Cleaned and tidied data drawn from Archbishop James Ussher's chronology of ancient history known popularly as The Annals of the World (1658).
#'
#' "ussher" file - Chronological textual historic events are classified by index, year, epoch (or one of the 7 ancient "Ages of the World", Biblical source book if referenced (rarely), as well as alternate dating mechanisms, such as "Anno Mundi" (age of the world) or Julian Period. Additional file "usshfull" includes variables that may be of further interest to historians, such as Southern Kingdom and Northern Kingdom discrepant dates, and the original amalgamated Dating mechanic used by Ussher in the original text. The raw data can also be called using "usshraw"
#'
#' @format A tibble with 5011 rows and 8 variables:
#' \describe{
#' \item{Index}{dbl The indexed number of Ussher's paragraphs, in ascending chronological order, added by later editors.}
#' \item{EventTxt}{chr Ussher's original historical paragraph, cleaned for text processing}
#' \item{YearBCAD}{dbl Harmonized year BC or AD proposed by Ussher. BC is a negative number. AD is positive.}
#' \item{Epoch}{chr denoting which of the 7 ancient "Ages of the World" the event falls into}
#' \item{BibBk1}{chr a Biblical Book reference, if one is included in the paragraph}
#' \item{AnnoMund}{dbl denoting "Anno Mundus" or age of the world from Creation, or 4004 BC}
#' \item{Season}{chr denoting which quarter of the year - Autumn, Winter, Spring or Summer that event was calculated to occur}
#' \item{JulPer}{dbl denoting Julian Period date. Julian Period was backdated to creation by Ussher, following its adoption by Caesar in 43 BC}
#' }
#' @source \url{https://archive.org/stream/AnnalsOfTheWorld/Annals_djvu.txt}
#' @examples
#' summary(ussher)
"ussher"
|
/scratch/gouwar.j/cran-all/cranData/ussherR/R/ussher.R
|
#' Expanded data drawn from "ussher" Archbishop James Ussher's chronology of ancient history known popularly as The Annals of the World (1658).
#'
#' "usshfull" file - Expanded historical chronological textual historic events are classified by index, year, epoch (or one of the 7 ancient "Ages of the World", Biblical source book if referenced (rarely), as well as alternate dating mechanisms, such as "Anno Mundi" (age of the world) or Julian Period. Additional file "ussh.full" includes variables that may be of further interest to historians, such as Southern Kingdom and Northern Kingdom discrepant dates, and the original amalgamated Dating mechanic used by Ussher in the original text. The raw data can also be called using "ussh.raw"
#'
#' @format A tibble with 5011 rows and 8 variables:
#' \describe{
#' \item{Index}{dbl The indexed number of Ussher's paragraphs, in ascending chronological order, added by later editors.}
#' \item{EventTxt}{chr Ussher's original historical paragraph, cleaned for text processing}
#' \item{YearBCAD}{dbl Harmonized year BC or AD proposed by Ussher. BC is a negative number. AD is positive.}
#' \item{Epoch}{chr denoting which of the 7 ancient "Ages of the World" the event falls into}
#' \item{BibBk1}{chr a Biblical Book reference, if one is included in the paragraph}
#' \item{AnnoMund}{dbl denoting "Anno Mundus" or age of the world from Creation, or 4004 BC}
#' \item{Season}{chr denoting which quarter of the year - Autumn, Winter, Spring or Summer that event was calculated to occur}
#' \item{SKing}{chr denoting the year of the respective Southern Kingdom reign of the sitting king. This is a chr file because it is easily confused for a chronological date. Convert to numeric and associate with sitting king if necessary for more advanced date comparisons and algorithms}
#' \item{NKing}{chr denoting the year of the respective Northern Kingdom reign of the sitting king. This is a chr file because it is easily confused for a chronological date. Convert to numeric and associate with sitting king if necessary for more advanced date comparisons and algorithms}
#' \item{JulPer}{dbl denoting Julian Period date. Julian Period was backdated to creation by Ussher, following its adoption by Caesar in 43 BC}
#' \item{Dating}{chr the original full-text string originally created by Ussher to identify, compare and harmonize the various dating methods in expressing the algorithms an historical analysis. All numeric date variables are drawn from this original date grouping}
#' }
#' @source \url{https://archive.org/stream/AnnalsOfTheWorld/Annals_djvu.txt}
#' @examples
#' summary(usshfull)
"usshfull"
|
/scratch/gouwar.j/cran-all/cranData/ussherR/R/usshfull.R
|
#' Raw original data directly drawn with fundamental cleaning from "Annals_djvu.txt" Archbishop James Ussher's chronology of ancient history known popularly as The Annals of the World (1658).
#'
#' "usshraw" file - Truncated listing of amalgamated data drawn from original chronological textual historic events are classified by index, year, epoch (or one of the 7 ancient "Ages of the World", Biblical source book if referenced (rarely), as well as alternate dating mechanisms, such as "Anno Mundi" (age of the world) or Julian Period. Additional file "ussh.full" includes variables that may be of further interest to historians, such as Southern Kingdom and Northern Kingdom discrepant dates, and the original amalgamated Dating mechanic used by Ussher in the original text. The raw data can also be called using "ussh.raw"
#'
#' @format A tibble with 5011 rows and 3 variables:
#' \describe{
#' \item{Event}{chr Ussher's original historical paragraph, cleaned for text processing}
#' \item{Epoch}{chr denoting which of the 7 ancient "Ages of the World" the event falls into}
#' \item{Dating}{chr the original full-text string originally created by Ussher to identify, compare and harmonize the various dating methods in expressing the algorithms an historical analysis. All numeric date variables are drawn from this original date grouping}
#' }
#' @source \url{https://archive.org/stream/AnnalsOfTheWorld/Annals_djvu.txt}
#' @examples
#' summary(usshraw)
"usshraw"
|
/scratch/gouwar.j/cran-all/cranData/ussherR/R/usshraw.R
|
#' Retrieve multiple pages of Fiscal Data API in a single call
#'
#' @description
#'
#' `ustfd_all_pages()` is similar to `ustfd_simple()` with the difference that,
#' for requests that generate multiple pages of results, it will request all
#' pages and merge them into a single result.
#'
#' While care has been taken to optimize `ustfd_all_pages()`, for requests
#' spanning more than 10 pages you should consider breaking up the call further
#' if memory use is a concern, especially if you are writing the results to disk
#' or a database with atomic transactions.
#'
#' @inheritParams ustfd_query
#' @inheritParams ustfd_request
#' @param slowly pause between http requests when set to `TRUE`
#' @param pause length, in seconds, to pause
#' @param quiet when set to `FALSE` updates will be output via a message
#'
#' @return a list containing the following items
#' * `meta` - the metadata returned by the API
#' * `data` - the payload returned by the API in table form.
#' See [`ustfd_response_payload()`]
#'
#' @export
#'
#' @family ustfd_user
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#'
#' exchange_rates <- ustfd_all_pages(
#' 'v1/accounting/od/rates_of_exchange',
#' fields = c(
#' 'country_currency_desc', 'exchange_rate','record_date','effective_date'
#' ),
#' filter = list(
#' record_date = c('>=' = '2020-01-01'),
#' country_currency_desc = list('in' = c('Canada-Dollar','Mexico-Peso'))
#' )
#' )
#' }
ustfd_all_pages <- function(
endpoint, filter=NULL, fields=NULL, sort=NULL, page_size=10000L,
slowly = FALSE, pause = 0.25, quiet = TRUE,
user_agent='http://github.com/groditi/ustfd'
){
paged_request <- function(page_number){
if( !quiet )
rlang::inform(glue::glue("Requesting {endpoint} page {page_number}"))
ustfd_request(
ustfd_query(
endpoint = endpoint,
filter = filter,
fields = fields,
sort = sort,
page_size = page_size,
page_number = page_number
),
user_agent
)
}
if( slowly ){
rate <- purrr::rate_delay(pause)
paged_request <- purrr::slowly(paged_request, rate = rate, quiet = quiet)
}
keep <- c('labels', 'dataTypes', 'dataFormats','total-count','total-pages')
page <- paged_request(1)
meta <- page$meta[keep]
idx_start <- 1
idx_end <- length(page$data)
all_records <- vector(mode='list', length = meta$`total-count`)
all_records[idx_start:idx_end] <- page$data
rm(page)
if((page_count <- meta$`total-pages`) > 1){
for(page_num in 2:page_count){
page_data <- paged_request(page_num)$data
idx_start <- idx_end + 1
idx_end <- idx_end + length(page_data)
all_records[idx_start:idx_end] <- page_data
}
}
parsed <- parsed_payload(all_records, meta$dataTypes)
return(
list(
meta = meta,
data = parsed
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/paged_request.R
|
#' Retrieve Fiscal Data API in a single call
#'
#' @description
#'
#' `ustfd_simple()` aggregates the workflow for retrieving data from the API
#' into a single call.
#'
#' @inheritParams ustfd_query
#' @param user_agent optional string
#'
#' @return a list containing the following items
#' * `meta` - the metadata returned by the API
#' * `data` - the payload returned by the API in table form.
#' See [`ustfd_response_payload()`]
#'
#' @export
#'
#' @family ustfd_user
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#'
#' exchange_rates <- ustfd_simple(
#' 'v1/accounting/od/rates_of_exchange',
#' fields = c(
#' 'country_currency_desc', 'exchange_rate','record_date','effective_date'
#' ),
#' filter = list(
#' record_date = c('>=' = '2020-01-01'),
#' country_currency_desc = list('in' = c('Canada-Dollar','Mexico-Peso'))
#' )
#' )
#' }
ustfd_simple <- function(
endpoint, filter=NULL, fields=NULL, sort=NULL, page_size=NULL, page_number=NULL,
user_agent='http://github.com/groditi/ustfd'
){
query <- ustfd_query(endpoint, filter, fields, sort, page_size, page_number)
response <- ustfd_request(query, user_agent)
return(
list(
meta = ustfd_response_meta_object(response),
data = ustfd_response_payload(response)
)
)
}
#' Retrieve Data From the U.S. Bureau Of the Fiscal Service API
#'
#' @description
#'
#' `ustfd_request()` will execute queries against the Fiscal Data API. Queries
#' can generated using [ustfd_query()].
#'
#' @param query list generated by one of the query generating functions
#' @param user_agent string, optional
#' @param process_response function, optional. processes the `httr` response
#' object. Defaults to [`ustfd_json_response()`] which will return the JSON
#' payload parsed into a list
#' @param ... further arguments will be passed to `process_response` when called
#'
#' @return a httr response object
#'
#' @export
#'
#' @family ustfd_low_level
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#' query <- ustfd_query('v1/accounting/dts/dts_table_2', sort =c('-record_date'))
#' response <- ustfd_request(query)
#' payload_table <- ustfd_response_payload(response)
#' payload_meta <- ustfd_response_meta_object(response)
#' }
ustfd_request <- function(
query,
user_agent='http://github.com/groditi/ustfd',
process_response = ustfd_json_response,
...
){
url <- utils::URLdecode(ustfd_url(query))
response <- httr::GET(url, httr::user_agent(user_agent))
#httr::stop_for_status(response)
if(response$status_code > 200){
msg_text <- sprintf('Status code "%s" for URL %s', response$status_code, url)
rlang::warn(msg_text)
rlang::abort(httr::http_status(response)$message)
}
return(process_response(response, ...))
}
#' Process JSON Response of a Successful API Query
#'
#' @description
#'
#' `ustfd_json_response()` will process the response to a successful request
#' from Fiscal Data API and translate a JSON object into a R data structure.
#'
#' @param response an httr response returned by [ustfd_request()]
#' @param ... additional arguments passed to `httr::content`
#'
#' @return a list
#'
#' @export
#'
#' @family ustfd_low_level
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#' query <- ustfd_query('v1/accounting/dts/dts_table_2', sort =c('-record_date'))
#' response <- ustfd_request(query)
#' payload_table <- ustfd_response_payload(response)
#' payload_meta <- ustfd_response_meta_object(response)
#' }
ustfd_json_response <- function(response, ...){
if(httr::headers(response)[['content-type']] != 'application/json')
rlang::abort(paste(httr::headers(response)[['content-type']], 'is not JSON'))
parsed <- httr::content(response, as = 'parsed', simplifyVector = FALSE, ...)
if('error' %in% names(parsed))
rlang::abort(parsed$message)
return(parsed)
}
#' Extract Metadata From Parsed API Response
#'
#' @description
#'
#' `ustfd_response_meta_object()` will return the meta object included in a
#' successful API response. The meta object is a list with the following items:
#'
#' * `count` - the number of records in the response
#' * `labels` - a named list of labels for each field
#' * `dataTypes` - a named list describing the data type for each field
#' * `dataFormats` - a named list describing the data format for each field
#' * `total-count` - the total number of records matching the query
#' * `total-pages` - the total number of pages of records matching the query
#'
#'
#' @param response a parsed response returned by [ustfd_json_response()]
#'
#' @return a list
#'
#' @export
#'
#' @family ustfd_low_level
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#' query <- ustfd_query('v1/accounting/dts/dts_table_2', sort =c('-record_date'))
#' response <- ustfd_request(query)
#' payload_table <- ustfd_response_payload(response)
#' payload_meta <- ustfd_response_meta_object(response)
#' }
ustfd_response_meta_object <- function(response){
response$meta
}
#' Extract Payload as Table From Parsed API Response
#'
#' @description
#'
#' `ustfd_response_payload()` will return the results of the query in tabular
#' format in the form of a tibble with one column for each field returned and
#' one row for every record returned in the same order they were returned.
#'
#' @param response a parsed response returned by [ustfd_json_response()]
#'
#' @return a tibble
#'
#' @export
#'
#' @family ustfd_low_level
#'
#' @examples
#' \dontrun{
#' library(ustfd)
#' query <- ustfd_query('v1/accounting/dts/dts_table_2', sort =c('-record_date'))
#' response <- ustfd_request(query)
#' payload_table <- ustfd_response_payload(response)
#' payload_meta <- ustfd_response_meta_object(response)
#' }
#'
ustfd_response_payload <- function(response){
meta <- ustfd_response_meta_object(response)
empty_prototype <- empty_table_prototype(meta$dataTypes)
if(meta$count == 0) return(empty_prototype)
parsed_payload(response$data, meta$dataTypes)
}
parsed_payload <- function(data, data_types){
col_parsers <- col_processor_map(data_types)
tbl <- lapply(
payload_transpose(data, template = names(data_types)),
function(x){ x[x == 'null'] <- NA; x}
)
rm(data)
#lapply( # against all expectations this actually profiles worse than imap
# rlang::set_names(names(tbl), names(tbl)),
# function(nm) col_parsers[[nm]](tbl[[nm]])
#)
parsed <- purrr::imap(tbl, function(.x, nm) col_parsers[[nm]](.x))
tibble::as_tibble(parsed)
}
#purrr::list_transpose is slow and dplyr::bind_rows uses a lot of memory.
payload_transpose <- function(x, template){
len <- length(x)
wid <- length(template)
y <- rlang::rep_named(template, list())
for(col in template){
y[[col]] <- vapply(x, .subset2, character(1), col, USE.NAMES = FALSE)
}
y
}
col_processor_map <- function(types){
type_processor_map <- list(
'DATE' = lubridate::ymd,
'PERCENTAGE' = readr::parse_number,
'CURRENCY' = readr::parse_number,
'CURRENCY0' = readr::parse_number,
'NUMBER' = as.numeric,
'INTEGER' = as.numeric,
'YEAR' = as.integer,
'MONTH' = as.integer,
'DAY' = as.integer,
'QUARTER' = as.integer,
'STRING' = as.character,
'***' = as.character
)
types_c <- as.character(types)
unknown_types <- which(!types_c %in% names(type_processor_map))
if(length(unknown_types) > 0){
rlang::warn(sprintf("Unknown mapping for type '%s'.", types_c[unknown_types]))
types_c[unknown_types] <- '***'
}
purrr::set_names(
type_processor_map[types_c],
names(types)
)
}
record_processor_factory <- function(types){
record_processors <- col_processor_map(types)
processor <- function(record){
purrr::imap(
purrr::modify_if(record, ~.x == 'null', ~NA_character_),
~ record_processors[[.y]](.x)
)
}
return(processor)
}
empty_table_prototype <- function(types){
prototypes <- list(
'DATE' = lubridate::Date(0),
'PERCENTAGE' = double(0),
'CURRENCY' = double(0),
'NUMBER' = double(0),
'YEAR' = integer(0),
'MONTH' = integer(0),
'DAY' = integer(0),
'QUARTER' = integer(0),
'STRING' = character(0)
)
tbl_prototype <- purrr::set_names(
prototypes[unlist(types)],
names(types)
)
return(tibble::tibble(!!!tbl_prototype))
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/ustfd.R
|
root_api_url <- 'https://fiscaldata.treasury.gov/page-data'
fd_url <- function(path){
url <- httr::parse_url(root_api_url)
httr::modify_url(url, path = c(url$path, path))
}
get_ustfd_datasets <- function(){
url <- fd_url('sq/d/707360527.json')
res <- httr::GET(url)
datasets <- purrr::pluck(httr::content(res), 'data','allDatasets','datasets')
slugs <- purrr::map_chr(datasets, 'slug')
dataset_names <- stringr::str_extract(slugs, '[\\w-]+')
purrr::map(
purrr::set_names(datasets, dataset_names),
~purrr::list_modify(.x, dataset = .x$slug, slug = purrr::zap()),
)
}
get_ustfd_dataset_definition <- function(dataset){
path <- c('datasets', dataset, 'page-data.json')
#return(fd_url(path))
definition <- httr::content(httr::GET(fd_url(path)))
return(definition$result$pageContext$config)
}
# extract_ustfd_endpoints_table <- function(datasets){
# apis <- purrr::map(datasets, 'apis')
# endpoints <- purrr::reduce(purrr::imap(apis, ~lapply(.x, c, dataset =.y)), c)
# dplyr::select(
# purrr::list_rbind(endpoints),
# dataset, endpoint, name = 'tableName', description = 'tableDescription'
# )
# }
extract_ustfd_datatset_definitions <- function(dataset_definition){
page_context <- dataset_definition[c(
'dataStartYear', 'name', 'summaryText',
'notesAndKnownLimitations', 'techSpecs'
)]
page_context[['dataset']] <- dataset_definition$slug
api_list <- purrr::map(
dataset_definition$apis,`[`,
c('dateField', 'earliestDate', 'endpoint', 'fields', 'lastUpdated',
'latestDate', 'pathName', 'rowDefinition', 'tableDescription',
'tableName','updateFrequency')
)
names(api_list) <- purrr::map_chr(api_list, 'endpoint')
page_context[['apis']] <- api_list
return(page_context)
}
extract_ustfd_dictionaries <- function(datasets){
dataset_names <- purrr::set_names(as.list(names(datasets)), names(datasets))
raw_definitions <- lapply(dataset_names, get_ustfd_dataset_definition)
definitions <- lapply(raw_definitions, extract_ustfd_datatset_definitions)
dataStartYear <- isRequired <- earliestDate <- dataset <- dateField <- updateFrequency <- NULL
definitions <- purrr::map(
definitions,
~purrr::list_modify(
.x,
!!!.x$techSpecs[c('earliestDate','updateFrequency')],
techSpecs = purrr::zap()
)
)
definitions_df <- dplyr::mutate(
dplyr::bind_rows(
purrr::map(definitions, purrr::list_modify, apis = purrr::zap())
),
dataStartYear = as.integer(dataStartYear),
earliestDate = lubridate::mdy(earliestDate),
dataset = stringr::str_extract(dataset, '[\\w-]+'),
updateFrequency = stringr::str_match(
updateFrequency, '(?:Updated )([\\w-\\(\\) ]+)'
)[,2]
)
apis <- purrr::map(
purrr::map(definitions, 'apis'),
~purrr::set_names(.x, purrr::map_chr(.x, 'endpoint'))
)
reduced_apis <- purrr::reduce(
purrr::imap(apis, ~purrr::map(.x, purrr::list_modify, dataset = .y) ),
c
)
apis_df <- dplyr::mutate(
dplyr::bind_rows(
purrr::map(
reduced_apis,
purrr::list_modify,
fields = purrr::zap(), lastUpdated = purrr::zap(), latestDate = purrr::zap()
)
),
earliestDate = lubridate::ymd(earliestDate),
date_column = dateField
)
fields <- purrr::imap(
purrr::map(reduced_apis, 'fields'),
~purrr::map(.x, purrr::list_modify, endpoint = .y)
)
fields_df <- dplyr::mutate(
dplyr::bind_rows(purrr::reduce(fields, c)),
isRequired = as.logical(isRequired)
)
dictionary_orders <- list(
datasets = c(
'dataset', 'name', 'summary_text', 'earliest_date', 'data_start_year',
'update_frequency', 'notes_and_known_limitations'
),
endpoints = c(
'dataset', 'endpoint', 'table_name', 'table_description', 'row_definition',
'path_name', 'date_column', 'earliest_date', 'update_frequency'
),
fields = c(
'endpoint', 'column_name', 'data_type', 'pretty_name', 'definition',
'is_required'
)
)
dictionaries <- purrr::imap(
purrr::map(
list(datasets = definitions_df, endpoints = apis_df, fields = fields_df),
dplyr::rename_with,
snakecase::to_snake_case
),
~dplyr::select(.x, dplyr::all_of(dictionary_orders[[.y]]))
)
return(dictionaries)
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/ustfd_apis.R
|
#' Return a table of supported and known datasets
#'
#' @description
#'
#' `ustfd_datasets` provides details about 34 known datasets for Fiscal Data.
#' A data frame with 34 rows and the following 7 columns:
#'
#' * `dataset` - ID of the source dataset (natural key)
#' * `name` - name of the source dataset
#' * `summary_text` - description of the data set and the data it covers
#' * `earliest_date` - the date of the earliest record available for this table
#' * `data_start_year` - first year in the data set
#' * `update_frequency` - "Daily", "Monthly", "Quarterly", "Semi-Annually",
#' "Annually", "As Needed", "Daily (Discontinued)", "Monthly (Discontinued)"
#' * `notes_and_known_limitations` - notes about
#'
#' @return tibble
#'
#' @export
#'
#' @family ustfd_user
#'
#' @source \url{https://fiscaldata.treasury.gov/api-documentation/#list-of-endpoints}
#'
#' @examples
#' library(ustfd)
#' ustfd_datasets()
#'
#'
#'
ustfd_datasets <- function(){
return(.dictionaries$datasets)
}
#' Return a table of supported and known tables including the API endpoints
#' for the specified dataset(s). See [`ustfd_datasets()`] for known datasets.
#'
#' @description
#'
#' `ustfd_tables` provides details about 85 known endpoints for Fiscal Data.
#' A data frame with 85 rows and the following 9 columns:
#'
#' * `dataset` - ID of the source dataset
#' * `endpoint` - the table's API endpoint (natural key)
#' * `table_name` - Name of the table within the data set
#' * `table_description` - a description for the data in the endpoint
#' * `row_definition` - a description of what each row in the table describes
#' * `path_name` - API path name
#' * `date_column` - the name of the table column that holds the record's date
#' * `earliest_date` - the date of the earliest record available for this table
#' * `update_frequency` - "Daily", "Monthly", "Quarterly", "Semi-Annually",
#' "Annually", "As Needed", "Daily (Discontinued)", "Monthly (Discontinued)"
#'
#' @param datasets one or more strings representing a valid dataset ID. If
#' present, only endpoints belonging to matching datasets will be returned
#'
#' @return tibble
#'
#' @export
#'
#' @family ustfd_user
#'
#' @source \url{https://fiscaldata.treasury.gov/api-documentation/#list-of-endpoints}
#'
#' @examples
#' library(ustfd)
#' ustfd_tables(ustfd_datasets()$dataset[2])$endpoint
#'
#'
#'
ustfd_tables <- function(datasets = NULL){
if(is.null(datasets) ) return(.dictionaries$endpoints)
dataset <- NULL #make warnings quiet
dplyr::filter(.dictionaries$endpoints, dataset %in% datasets)
}
#' Return a table of known fields for known endpoints
#'
#' @description
#'
#' `ustfd_table_columns` returns the column dictionaries for the specified endpoint(s).
#' See [`ustfd_tables()`] for known endpoints.
#'
#' @param endpoints one or more strings representing a valid endpoint
#'
#' @details The format of a dictionary is a tibble with one row for every
#' table column and the following columns:
#'
#' * `endpoint` - the ID of the table this column belongs to
#' * `colum_name` - the field name recognizable to the API interface
#' * `data_type` - one of: "DATE", "STRING", "CURRENCY", "NUMBER",
#' "PERCENTAGE", "YEAR", "QUARTER", "MONTH", "DAY"
#' * `pretty_name` - a descriptive label
#' * `definition` - definition of the colmn's value
#' * `is_required` - logical value
#'
#' @return tibble
#'
#' @export
#'
#' @family ustfd_user
#'
#' @source \url{https://fiscaldata.treasury.gov/api-documentation/#fields-by-endpoint}
#'
#' @examples
#' library(ustfd)
#' ustfd_table_columns(ustfd_tables(ustfd_datasets()$dataset[2])$endpoint)
#'
#'
#'
ustfd_table_columns <- function(endpoints=NULL){
if(is.null(endpoints) ) return(.dictionaries$fields)
endpoint <- NULL #make warnings quiet
dplyr::filter(.dictionaries$fields, endpoint %in% leading_slash(endpoints))
}
#' Tests if an endpoint is known
#'
#' @description
#'
#' See [`ustfd_tables()`] for known endpoints.
#'
#' @param endpoint character vector
#'
#' @return logical matching input size
#'
#' @export
#'
#' @family ustfd_user
#'
#' @examples
#' library(ustfd)
#' endpoint_exists('v2/accounting/od/debt_to_penny')
#'
endpoint_exists <- function(endpoint){
leading_slash(endpoint) %in% ustfd_tables()$endpoint
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/ustfd_endpoints.R
|
#' Request filtered API results
#'
#' @description
#' Fiscal Data API allows for the filtering of results on the server side,
#' leading to a smaller payload. The combinations of fields and operators
#' supported are not currently defined, so it is suggested you test the desired
#' combinations before relying on them.
#'
#' @section Syntax:
#' A filter should be a named list of key-value pairs where the name corresponds
#' to the field that should be filtered and the value is a character vector or a
#' list where the name of an item corresponds to the operator and the value
#' corresponds to the operand. One field may have more than one filter.
#'
#' @section Operators:
#' - `>`, `<` Greater-than and lesser-than
#' - `>=`, `<=` Greater-/lesser-than or equal-to
#' - `=` Equal to
#' - `in` Subset-of
#'
#' @examples
#' \dontrun{
#' #records with a record_date no older than 10 days ago
#' list(record_date = c('>=' = lubridate::today()-10))
#'
#' #records with a record_date between two dates
#' list(
#' record_date = c('>=' = '2022-01-01'),
#' record_date = c('<=' = '2022-12-31')
#' )
#'
#' #records with a specific record_date
#' list(record_date = c('=' = lubridate::today()-2))
#'
#' #records where record_date is any of a set of specific dates
#' list(
#' record_date = list('in' = c('2022-06-13','2022-06-15','2022-06-17')
#' )
#' }
#'
#' @name filter-syntax
NULL
#' Form a Query
#'
#' @description
#'
#' `ustfd_query()` will verify the endpoint is valid and return a list suitable
#' for passing to [ustfd_url()] and [ustfd_request()].
#'
#' @param endpoint required string representing an API endpoint
#' @param filter optional list used to subset the data. See [filter-syntax] for
#' more information.
#' @param fields optional character vector of the fields to be retrieved
#' @param sort optional string or character vector. Ordering defaults to
#' ascending, to specify descending order precede the field name with '-'
#' @param page_size optional integer for pagination
#' @param page_number optional integer for pagination
#'
#' @return a list
#'
#' @export
#'
#' @family ustfd_user
#'
#' @examples
#'
#' library(ustfd)
#' ustfd_query(
#' 'v2/accounting/od/utf_qtr_yields',
#' filter = list(record_date = c('>=' = lubridate::today()-10))
#' )
#' ustfd_query(
#' 'v2/accounting/od/utf_qtr_yields',
#' filter = list(record_date = list('in' = c('2020-03-15','2020-03-16','2020-03-17')))
#' )
#' ustfd_query(
#' 'v2/accounting/od/utf_qtr_yields',
#' filter = list(record_date = c('=' = '2020-03-15'))
#' )
#'
#'
#'
ustfd_query <- function(endpoint, filter=NULL, fields=NULL, sort=NULL, page_size=NULL, page_number=NULL){
endpoint <- leading_slash(endpoint)
if(! endpoint_exists(endpoint))
rlang::warn(paste0('Endpoint "', endpoint,'" not known. see data ustfd_tables.'))
query <- list(
format = 'json',
filter = filter,
fields = fields,
sort = sort,
endpoint = endpoint,
page_size = page_size,
page_number = page_number
)
return(purrr::compact(query[!is.na(query)]))
}
leading_slash <- function(endpoints){
idx <- which(substr(endpoints,1,1) == '/')
endpoints[idx] <- substr(endpoints[idx], 2, nchar(endpoints[idx]))
return(endpoints)
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/ustfd_query.R
|
.base_url <- function(){
httr::parse_url('https://api.fiscaldata.treasury.gov/services/api/fiscal_service')
}
# lt= Less than
# lte= Less than or equal to
# gt= Greater than
# gte= Greater than or equal to
# eq= Equal to
# in= Contained in a given set
# ?filter=reporting_fiscal_year:in:(2007,2008,2009,2010)
# ?filter=funding_type_id:eq:202
.known_filter_operators <- function(){
list(
'<' = 'lt',
'<=' = 'lte',
'>' = 'gt',
'>=' = 'gte',
'=' = 'eq',
'in' = 'in'
)
}
.serialize_filter_operator <- function(operator, value){
known_operators <- .known_filter_operators()
if( !(operator %in% names(known_operators)) ){
known_operators <- paste(names(known_operators), collapse=',')
message <- paste('Unknown', operator, 'Operator not in:', known_operators)
rlang::abort(message)
}
serialized_operator <- paste0(':', known_operators[[operator]], ':')
if(operator == 'in'){
serialized_value <- paste0('(',paste(value, collapse=','),')')
} else {
serialized_value <- value
}
return(paste0(serialized_operator, serialized_value))
}
.serialize_filter <- function(filter){
paste(
purrr::imap(filter, ~paste0(.y, .serialize_filter_operator(names(.x)[1], .x[[1]]))),
collapse = ','
)
}
.serialize_fields <- function(fields){
paste(fields, collapse=',')
}
.serialize_sort <- function(sort){
paste(sort, collapse=',')
}
# .serialize_format <- function(format){
# if(!format %in% c('xml','json','csv'))
# warning(paste('Format "',format,'" is not supported',sep=''))
# paste('sort', format, sep='=')
# }
#' Generate URL To Access US Treasury Fiscal Data API
#'
#' @description
#'
#' `ustfd_url()` will generate a URL suitable for querying the Fiscal Data API.
#'
#' @param query required list
#'
#' @return a httr url object
#'
#' @export
#'
#' @family ustfd_low_level
#'
#' @examples
#'
#' library(ustfd)
#' ustfd_url(ustfd_query('/v1/accounting/dts/dts_table_4'))
#'
#'
ustfd_url <- function(query){
query_params <- list()
if(('filter' %in% names(query)) & is.list(query$filter))
query_params[['filter']] <- .serialize_filter(query$filter)
if(('fields' %in% names(query)) & length(query$fields) >= 1)
query_params[['fields']] <- .serialize_fields(query$fields)
if(('sort' %in% names(query)) & length(query$sort) >= 1)
query_params[['sort']] <- .serialize_sort(query$sort)
if('format' %in% names(query))
query_params[['format']] <- query$format
if('page_size' %in% names(query) & is.numeric(query$page_size))
query_params[['page[size]']] <- as.integer(query$page_size)
if('page_number' %in% names(query) & is.numeric(query$page_number))
query_params[['page[number]']] <- as.integer(query$page_number)
query_url <- httr::modify_url(
url = .base_url(),
path = c(.base_url()$path, query$endpoint),
query = query_params
)
return(query_url)
}
|
/scratch/gouwar.j/cran-all/cranData/ustfd/R/ustfd_url.R
|
#' @export
summary.ustyc <- function(object,...) {
results <- c(nrow(object$df),
ifelse(is.null(object$month),"All",object$month),
ifelse(is.null(object$year),"All",object$year),
object$updated)
names(results) <- c("rows","month","year","updated")
results
}
|
/scratch/gouwar.j/cran-all/cranData/ustyc/R/summary.ustyc.R
|
#' US Treasury yield curve.
#' @description Fetches US Treasury yield curve data and transforms to data frame.
#' @details Downloads the US Treasury yield curve data in its original XML format,
#' then translates that data into a data frame with dates as row names and maturities
#' as column names.
#' @author Matt Barry \email{mrb@@softisms.com}
#' @references US Treasury resource web site \url{http://www.treasury.gov/resource-center/data-chart-center/interest-rates/Pages/TextView.aspx?data=yield}
#' @keywords treasury yield curve
#' @name ustyc
#' @docType package
NULL
|
/scratch/gouwar.j/cran-all/cranData/ustyc/R/ustyc-package.r
|
#' @title Download US Treasury yield curve data.
#' @description Downloads US Treasury yield curve data from the US Treasury web site.
#' @details Forms a query to submit for US Treasury yield curve data, posting this query to the US Treasury web site's data feed service. By default the download includes data yield data for 12 products from January 1, 1990, some of which are NA during this span. The caller can pass parameters to limit the query to a certain year or year and month, but the full download is not especially large. The download data from the service is in XML format. This function transforms that data into a numeric data frame with treasury product items (constant maturity yields for 12 kinds of bills, notes, and bonds) as columns and dates as row names. The function returns a list which includes an item for this data frame as well as query-related values for reference and the update date from the service. The data frame can be used as-is or converted easily to a time series format such as \code{xts}.
#' @param year the desired year number or NULL for all years (default)
#' @param month the desired month number or NULL for all months (default)
#' @param base the base URL for the data service, defaulting to \url{http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData}. If the month or year arguments are not NULL, then the function modifies this URL to parameterize the download request.
#' @param allowParallel whether to allow \code{ldply} to use a registered parallel cluster. FALSE by default.
#' @return Class type \code{ustyc} containing update date \code{updated}, dataframe \code{df}, \code{month}, \code{year}, and \code{query} elements. The \code{query} element value is the string used to call the data service for download.
#' @export
#' @importFrom XML xmlParse xmlToList
#' @importFrom plyr ldply
#' @references \url{http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData}
#' @seealso \url{http://cran.r-project.org/web/packages/FRBData/} for different interest rates and source.
#' @examples
#' \dontrun{
#' xlist = getYieldCurve()
#' summary(xlist)
#' }
getYieldCurve <- function(year=NULL,
month=NULL,
base="http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData",
allowParallel=FALSE
) {
#require(XML)
#require(plyr)
location <- base
yloc <- mloc <- doc <- NULL
yloc <- if(is.null(year)==FALSE) paste("year(NEW_DATE)%20eq%20",year,sep='')
mloc <- if(is.null(month)==FALSE) paste("month(NEW_DATE)%20eq%20",month,sep='')
# determine whether caller wants subset of data
parameters <- ""
if (is.null(yloc)==FALSE && is.null(mloc)==FALSE) {
parameters = paste("?$filter=",mloc,"%20and%20",yloc,sep='')
} else {
if (is.null(yloc)==FALSE)
parameters = paste("?$filter=",yloc,sep='')
if (is.null(mloc)==FALSE)
parameters = paste("?$filter=",mloc,sep='')
}
doc <- xmlParse(paste(location,parameters,sep=''))
if (is.null(doc)) {
warning(paste("Could not parse the location",location))
return(NULL)
}
message("Download and parse complete. Converting to list...")
x <- xmlToList(doc)
message("List conversion complete. Converting to frame...")
# save the updated time
updated = x[[3]]
# truncate first four elements and the last element
x[1:4] <- NULL
x[[length(x)]] <- NULL
# field extraction function
cy <- function(t,p) {
if ("text" %in% names(p[[t]]))
p[[t]]$text
else
NA
}
# list manipulator to produce a data frame
y <- ldply(x, function(e) {
p <- e$content$properties
q = sapply(names(p),cy,p)
},
.id="NEW_DATE",
.parallel=allowParallel)
# strip hours and sort by date
y$NEW_DATE <- substring(y$NEW_DATE,1,10)
y <- y[with(y,order(NEW_DATE)),]
dates <- y$NEW_DATE
# trim the columns, convert remainder to double, assign row names
y <- data.frame(apply(y[,3:14],2,function(x) as.double(x)))
rownames(y) <- dates
message("Frame conversion complete.")
# return a list with data frame and some other useful tags from fetch
rv <- list(updated=updated,df=y,month=month,year=year,query=location)
class(rv) <- "ustyc"
rv
}
|
/scratch/gouwar.j/cran-all/cranData/ustyc/R/yieldcurve.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.