content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' BRVM Get info about a ticker beta, RSI, Closing, Valorisation, etc.
#'
#' @author Koffi Frederic SESSIE
#'
#' @description It receives the ticker of a company or index listed on the BRVM stock exchange,
#' Turn to upper case the input by using `toupper()` and returns informations about the company's RSI, Beta, Closing price, etc. .
#'
#' @seealso \url{https://www.sikafinance.com}
#'
#' @param ticker The ticker of a company
#'
#' @return A tibble
#' @export
#'
#' @importFrom rvest html_elements read_html
#'
#' @examples
#' BRVM_company_info("BOAS")
#' BRVM_company_info("BoaM")
#' BRVM_company_info("BRVMAG")
#'
#'
BRVM_company_info<- function(ticker){
ticker<-toupper(ticker)
all_tickers <- c( "ABJC", "BICC", "BNBC", "BOAB", "BOABF", "BOAC",
"BOAM", "BOAN", "BOAS", "CABC", "CBIBF", "CFAC",
"CIEC", "ECOC", "ETIT", "FTSC", "NEIC", "NSBC",
"NTLC", "ONTBF", "ORGT", "ORAC", "PALC", "PRSC", "SAFC",
"SCRC", "SDCC", "SDSC", "SEMC", "SGBC", "SHEC",
"SIBC", "SICC", "SIVC", "SLBC", "SMBC", "SNTS",
"SOGC", "SPHC", "STAC", "STBC", "TTLC",
"TTLS", "UNLC", "UNXC"
#, "TTRC", "SVOC"
)
Countries<-list(BENIN =c("BOAB"), "BURKINA FASO" = c("BOABF", "CBIBF", "ONTBF"),
"IVORY COAST" = c("ABJC", "BICC", "BNBC","BOAC", "CABC", "CFAC", "CIEC", "ECOC", "FTSC", "NEIC",
"NSBC","NTLC", "ORAC", "PALC", "PRSC", "SAFC", "SCRC", "SDCC", "SDSC", "SEMC","SGBC",
"SHEC", "SIBC", "SICC", "SIVC", "SLBC", "SMBC", "SOGC","SPHC", "STAC", "STBC",
"SVOC", "TTLC", "TTRC", "UNLC","UNXC"),
MALI = c("BOAM"), NIGER = c("BOAN"),
SENEGAL = c("BOAS", "SNTS", "TTLS"),
TOGO = c("ETIT", "ORGT"))
# all_indexes <- c("BRVM10", "BRVMAG", "BRVMC", "BRVMAS", "BRVMDI",
# "BRVMFI", "BRVMIN", "BRVMSP", "BRVMTR", "BRVMPR",
# "BRVMPA", "BRVM30", "CAPIBRVM")
.indexes<-list("BRVM 10" = c("BRVM10"),
AGRICULTURE = c("BRVMAG"),
"BRVM COMPOSITE" =c("BRVMC"),
"OTHER SECTOR" = c("BRVMAS"),
DISTRIBUTION = c("BRVMDI"),
FINANCE = c("BRVMFI"),
INDUSTRY = c("BRVMIN"),
"PUBLIC SERVICES" = c("BRVMSP"),
TRANSPORT = c("BRVMTR"),
"BRVM PRESTIGE" = c("BRVMPR"),
"BRVM PRINCIPAL" = c("BRVMPA"),
"BRVM 30" = c("BRVM30"),
CAPITALISATION = c("CAPIBRVM"))
if (ticker %in% .indexes) {
adn_ticker <- ticker
url <-paste0("https://www.sikafinance.com/marches/cotation_", adn_ticker)
} else if (ticker %in% all_tickers){
# url<-paste0("https://www.sikafinance.com/marches/cotation_", ticker)
if (company_country(ticker) %in% names(Countries)){
if (company_country(ticker) == "BENIN") {
adn<- ".bj"
} else if (company_country(ticker) == "BURKINA FASO") {
adn<- ".bf"
} else if (company_country(ticker) == "IVORY COAST") {
adn<- ".ci"
} else if (company_country(ticker) == "MALI") {
adn<- ".ml"
} else if (company_country(ticker) == "NIGER") {
adn<- ".ne"
} else if (company_country(ticker) == "SENEGAL") {
adn<- ".sn"
} else if (company_country(ticker) == "TOGO") {
adn<- ".tg"
}
# adn_ticker <- paste0(ticker, adn)
# url <-paste0("https://www.sikafinance.com/marches/cotation_", adn_ticker)
url <-paste0("https://www.sikafinance.com/marches/cotation_", ticker, adn)
# message(url)
} else {
message(paste0("Be sure that ", ticker, " belong's to BRVM stock market"))
}
} else {
message(paste0("Be sure that ", ticker, " belong's to BRVM stock market"))
}
##Create empty dataframe
ticker_info<-as.data.frame(matrix(NA, ncol = 2, nrow = 0))
tryCatch({
val<- read_html(url) %>% html_elements('table') %>% html_table()
# for (i in 2:4){
# # val<- (read_html(url) %>% html_elements('table') %>% html_table())[[i]]
#
#
# ticker_info<-rbind(ticker_info, val[[i]])
# }
ticker_info <- rbind(val[[2]], val[[3]], val[[4]])
# colnames(ticker_info) <- NULL
colnames(ticker_info) <- c("Informations", "Values")
return(ticker_info)
},
error = function(e) {
message("Make sure you have an active internet connection")
},
warning = function(w) {
message("Make sure you have an active internet connection")
})
}
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/brvm_company_info.R
|
#' Company's sector - To know the sector of a given company
#'
#' @family Data Retrieval
#' @family BRVM
#'
#' @author Koffi Frederic SESSIE
#'
#' @description It receives one company listed on the BRVM stock exchange,
#' Turn to upper case your input by using `toupper()` and returns informations about the company's sector.
#'
#' @param company The name of company listed on the BRVM stock exchange
#'
#' @return "character"
#'
#' @export
#'
#' @examples
#' company_sector("BICC")
#' company_sector("SNTS")
#'
#'
company_sector <- function(company){
company<-toupper(company)
.sectors =list(Agriculture = c("PALC","SCRC","SICC","SOGC","SPHC"),
Distribution = c("ABJC","BNBC","CFAC","PRSC","SHEC","TTLC","TTLS"),
Industry = c("CABC","FTSC","NEIC","NTLC","SEMC","SIVC","SLBC","SMBC","STBC","TTRC","UNLC","UNXC"),
Finance = c("BOAB","BOABF","BOAC","BOAM","BOAN","BOAS","BICC","CBIBF","ECOC","ETIT","NSBC","ORGT","SAFC","SGBC","SIBC"),
Transport = c("SDSC","SVOC"),
"Public service" = c("CIEC","ONTBF","SDCC","SNTS", "ORAC"),
Other = c("STAC"))
for (elem in 1 :length(.sectors)){
if (company %in% .sectors[[elem]]) {
return(names(.sectors)[[elem]])
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/brvm_company_sector.R
|
#' BRVM Get - Get BRVM stock exchange Ticker Data
#'
#' @description This function will get data from the Sikafinance exchange.
#'
#' @family Data Retrieval
#' @family Sikafinance
#'
#' @author Koffi Frederic SESSIE
#'
#' @seealso \url{https://www.sikafinance.com/}
#' @seealso `BRVM_ticker_desc()`, `BRVM_tickers()`, `BRVM_get()`, `BRVM_index_stock()`
#'
#' @details This function will get data of the companies listed on the BVRM exchange through the sikafinance site. The function
#' takes in a single parameter of `ticker` The function will auto-format the
#' tickers you input into all upper case by using `toupper()`
#'
#' @param ticker A vector of ticker, like: c("BICC","XOM","SlbC", "BRvm10")
#' @param Period Numeric number indicating time period. Valid entries are 0, 1, 5, 30, 91, and 365 representing respectively 'daily', 'one year', 'weekly', 'monthly', 'quarterly' and 'yearly'.
#' @param from A quoted start date, ie. "2020-01-01" or "2020/01/01". The date
#' must be in ymd format "YYYY-MM-DD" or "YYYY/MM/DD".
#' @param to A quoted end date, ie. "2022-01-31" or "2022/01/31". The date must
#' be in ymd format "YYYY-MM-DD" or "YYYY/MM/DD"
#'
#'@importFrom httr2 req_body_json req_perform request resp_body_json
#'@importFrom dplyr group_by summarise as_tibble
#'@importFrom lubridate parse_date_time
#'@importFrom rlang abort
#'@importFrom stringr str_sub
#'
#' @examples \donttest{
#' library(lubridate)
#' library(rlang)
#' library(httr2)
#' library(dplyr)
#' library(stringr)
#'
#' symbols <- c("BiCc","XOM","SlbC")
#' data_tbl <- BRVM_get1(ticker = symbols)
#' data_tbl
#'
#' #From three year ago to the present
#'
#' BRVM_get1("ALL INDEXES", from = Sys.Date() - 252*3, to = Sys.Date())
#'
#' BRVM_get1(ticker = "BRVMAG", from = "2010-01-04", to = "2022-01-04")
#'
#' BRVM_get1("ALL", Period = 0, from = "2010-01-04", to = "2022-01-04" ) #To get daily data
#'
#' BRVM_get1("BrvmAS", Period = 1 ) # To get daily data for a whole year
#'
#' BRVM_get1(c("BRVMPR", "BRVMAG"), Period = 5) # To get weekly data
#'
#' BRVM_get1("BRVMAG", Period = 30 ) # To get monthly data
#'
#' BRVM_get1("BRVMPR", Period = 91 ) # To get quaterly data
#'
#' BRVM_get1(c("brvmtr", "BiCc", "BOAS"), Period = 365 ) # To get yearly data
#'}
#'
#' @return
#' A tibble
#'
#' @export
#'
BRVM_get1 <- function(ticker ='BICC',
Period = 0,
from = Sys.Date() - 89,
to = Sys.Date() ) {
first_date <- lubridate::parse_date_time(from, orders = "ymd")
end_date <- lubridate::parse_date_time(to, orders = "ymd")
if (first_date >= end_date){
rlang::abort(
"The '.from' parameter (start_date) must be less than '.to' (end_date)"
)
}
else if (first_date >= Sys.Date()-2){
rlang::abort(
"The '.from' parameter (start_date) must be less than today's date"
)
}
ticker <- unique(toupper(ticker))
all_tickers <- c( "ABJC", "BICC", "BNBC", "BOAB", "BOABF", "BOAC",
"BOAM", "BOAN", "BOAS", "CABC", "CBIBF", "CFAC",
"CIEC", "ECOC", "ETIT", "FTSC", "NEIC", "NSBC",
"NTLC", "ONTBF", "ORGT", "ORAC", "PALC", "PRSC", "SAFC",
"SCRC", "SDCC", "SDSC", "SEMC", "SGBC", "SHEC",
"SIBC", "SICC", "SIVC", "SLBC", "SMBC", "SNTS",
"SOGC", "SPHC", "STAC", "STBC", "TTLC",
"TTLS", "UNLC", "UNXC"
#, "TTRC", "SVOC"
)
# idx <- c("BRVM10", "BRVMAG", "BRVMC", "BRVMAS", "BRVMDI",
# "BRVMFI", "BRVMIN", "BRVMSP", "BRVMTR", "BRVMPR",
# "BRVMPA", "BRVM30", "CAPIBRVM")
all_indexes <- c("BRVM10", "BRVMAG", "BRVMC", "BRVMAS", "BRVMDI",
"BRVMFI", "BRVMIN", "BRVMSP", "BRVMTR", "BRVMPR",
"BRVMPA", "BRVM30", "CAPIBRVM")
ifelse(ticker =="ALL",
ticker <- all_tickers,
ticker)
ifelse(ticker =="ALL INDEXES",
ticker <- all_indexes,
ticker)
.indexes<-list("BRVM 10" = c("BRVM10"),
AGRICULTURE = c("BRVMAG"),
"BRVM COMPOSITE" =c("BRVMC"),
"OTHER SECTOR" = c("BRVMAS"),
DISTRIBUTION = c("BRVMDI"),
FINANCE = c("BRVMFI"),
INDUSTRY = c("BRVMIN"),
"PUBLIC SERVICES" = c("BRVMSP"),
TRANSPORT = c("BRVMTR"),
"BRVM PRESTIGE" = c("BRVMPR"),
"BRVM PRINCIPAL" = c("BRVMPA"),
"BRVM 30" = c("BRVM30"),
CAPITALISATION = c("CAPIBRVM"))
tick_vec <- NULL
## Filter ticker in .indexes or all_ticker list
for (tick in ticker) {
if (tick %in% .indexes) {
tick_vec <- c(tick_vec, tick)
} else if (tick %in% all_tickers){
if (company_country(tick) == "BENIN") {
adn <- paste0(tick,".bj")
} else if (company_country(tick) == "BURKINA FASO") {
adn <- paste0(tick,".bf")
} else if (company_country(tick) == "IVORY COAST") {
adn <- paste0(tick,".ci")
} else if (company_country(tick) == "MALI") {
adn <- paste0(tick,".ml")
} else if (company_country(tick) == "NIGER") {
adn <- paste0(tick,".ne")
} else if (company_country(tick) == "SENEGAL") {
adn <- paste0(tick,".sn")
} else if (company_country(tick) == "TOGO") {
adn <- paste0(tick,".tg")
}
tick_vec <- c(tick_vec, adn)
}
}
# Check input parameters after filtering ----
if (length(tick_vec) < 1){
rlang::abort(
"The 'ticker' parameter cannot be blank. Please enter at least one ticker.
If entering multiple please use .symbol = c(Tick_1, Tick_2, ...)"
)
} else {
ticker <- tick_vec
}
index_stock <- as.data.frame(matrix(NA, ncol = 6, nrow = 0))
names(index_stock) <- c("Date", "Open", "High", "Low", "Close", "Ticker")
tryCatch(
{
if (as.numeric(Period) %in% c(1, 30, 91, 365) ){
for (Tick in ticker) {
if (nchar(Tick) == 7) {
Tick1 <- str_sub(Tick, 1,4)
} else if (nchar(Tick) == 8) {
Tick1 <- str_sub(Tick, 1,5)
} else {
Tick1 <- Tick
}
# ifelse(nchar(Tick) == 7,
# Tick1 <- str_sub(Tick, 1,4),
# Tick1 <- Tick)
my_data <- request("https://www.sikafinance.com/api/general/GetHistos") %>%
req_body_json(list('ticker'= Tick,
'xperiod'= paste0(Period,''))) %>%
req_perform() %>%
resp_body_json(simplifyVector = T)
my_data <- dplyr::as_tibble(my_data$lst)
my_data$Date<-as.Date.character(my_data$Date, format = "%d/%m/%Y")
my_data <- my_data[,-6]
# assign(Tick1, my_data, envir = globalenv())
# if (nchar(Tick) == 7) {
# my_data$Ticker <- str_sub(Tick, 1,4)
# } else {
# my_data$Ticker <- Tick
# }
my_data$Ticker <- Tick1
index_stock <- rbind(index_stock, my_data)
}
if (length(unique(index_stock$Ticker)) > 1) {
return(index_stock)
} else {
return(index_stock[, -6])
}
}
else if (as.numeric(Period) %in% c(0, 5) ){
for (Tick in ticker) {
if (nchar(Tick) == 7) {
Tick1 <- str_sub(Tick, 1,4)
} else if (nchar(Tick) == 8) {
Tick1 <- str_sub(Tick, 1,5)
} else {
Tick1 <- Tick
}
# ifelse(nchar(Tick) == 7,
# Tick1 <- str_sub(Tick, 1,4),
# Tick1 <- Tick)
stock.data <- as.data.frame(matrix(NA, ncol = 7, nrow = 0))
names(stock.data) <- c("Date", "Open", "High", "Low", "Close", "Ticker")
for(.date in seq(end_date, first_date, "-3 months")){
to_date = as.Date.POSIXct(.date)
from_date = to_date - 89
my_data <- request("https://www.sikafinance.com/api/general/GetHistos") %>%
req_body_json(list('ticker'= Tick,
'datedeb'= from_date,
'datefin'= to_date,
'xperiod'= paste0(Period,''))) %>%
req_perform() %>%
resp_body_json(simplifyVector = T)
if (length(my_data$lst)==6) {
my_data <- dplyr::as_tibble(my_data$lst)
stock.data <- rbind(stock.data, my_data)
}
}
if (length(stock.data)==6 && nrow(stock.data)!=0) {
stock.data$Date<-as.Date.character(stock.data$Date, format = "%d/%m/%Y")
ifelse (any(duplicated(stock.data$Date)),
stock.data<-stock.data%>%
dplyr::group_by(Date)%>%
summarise(Open=mean(Open),
High= mean(High),
Low= mean(Low),
Close= mean(Close)),
stock.data)
message(paste0("We obtained ",Tick1, " data from ",
min(stock.data$Date),
" to ",
max(stock.data$Date)))
# stock.data <- stock.data[, -6]
# assign(Tick1, stock.data, envir = globalenv())
stock.data$Ticker <- Tick1
index_stock <- rbind(index_stock, stock.data )
# ifelse(length(unique(index_stock$Ticker)) > 1,
# return(index_stock),
# return(index_stock[, -6]))
} else {
message(paste0(Tick1," data aren't available between ",
first_date,
" and ",
end_date))
}
}
if (length(unique(index_stock$Ticker)) > 1) {
return(index_stock)
} else if (length(unique(index_stock$Ticker)) == 1){
index_stock <- index_stock[, -6]
return(index_stock[, -6])
}
}
else {
message("Choose the best period between 0, 1, 5, 30, 91 and 365")
}
},
error = function(e) {
message("Make sure you have an active internet connection")
},
warning = function(w) {
message("Make sure you have an active internet connection")
}
)
}
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/brvm_get1.R
|
#' BRVM PLOT
#'
#' @description This function will get Ticker(s) data and then plot it.
#'
#' @family Data Retrieval
#' @family Plot
#' @family BRVM
#' @author Koffi Frederic SESSIE
#'
#' @param .company is the Ticker(s) name(s)
#' @param from A quoted start date, ie. "2020-01-01" or "2020/01/01". The date
#' must be in ymd format "YYYY-MM-DD" or "YYYY/MM/DD".
#' @param to A quoted end date, ie. "2022-01-31" or "2022/01/31". The date must
#' be in ymd format "YYYY-MM-DD" or "YYYY/MM/DD"
#' @param up.col is the up color
#' @param down.col is down color
#'
#' @seealso `BRVM_ticker_desc()`
#' @seealso `BRVM_tickers()`
#'
#' @return
#' An interactive chart
#'
#' @export
#'
#' @importFrom xts as.xts
#' @importFrom highcharter highchart hc_title hc_add_series hc_add_yAxis hc_add_series hc_yAxis_multiples hc_colors hc_exporting
#'
#' @examples
#'\donttest{
#' library(highcharter)
#' library(lubridate)
#' library(rlang)
#' library(httr2)
#' library(dplyr)
#' library(stringr)
#' library(xts)
#'
#' BRVM_plot("BICC")
#'
#' # You can change the up and down colors as follow
#' BRVM_plot("BICC", up.col = "blue", down.col = "pink")
#'
#' # Plot the closing price of a group of 3 tickers
#' BRVM_plot(c("BICC","ETIT", "SNTS"))
#'}
BRVM_plot<- function(.company,
from = Sys.Date() - 365,
to = Sys.Date() - 1,
up.col = "darkgreen",
down.col = "red") {
# message('It possible to plot each sector chart line. You can use as argument .sectors$Agriculture to plot. Example BRVM_plot(.sector$Agriculture)')
date1<- from
date2 = to
# Evaluate input parameters ----
.company <- unique(toupper(.company))
# companies <- c( "ABJC", "BICC", "BNBC", "BOAB", "BOABF", "BOAC", "BOAM", "BOAN", "BOAS", "CABC", "CBIBF", "CFAC", "CIEC", "ECOC", "ETIT", "FTSC", "NEIC", "NSBC", "NTLC", "ONTBF", "ORGT", "PALC", "PRSC", "SAFC", "SCRC", "SDCC", "SDSC", "SEMC", "SGBC", "SHEC", "SIBC", "SICC", "SIVC", "SLBC", "SMBC", "SNTS", "SOGC", "SPHC", "STAC", "STBC", "SVOC", "TTLC", "TTLS", "UNLC", "UNXC"
# #, "TTRC"
# )
# ifelse(.company == "ALL",
# .company<- companies,
# .company)
Global.returns<- BRVM_get(.symbol = .company, .from = date1, .to = date2 )
if (length(Global.returns)== 6){
ticker.name <- .company
Global.returns1 <- Global.returns
Global.returns <-as.xts(Global.returns[,-c(1)],
order.by=Global.returns$Date)
Global.returns1$direction<-NA
for (i in 2:nrow(Global.returns1)) {
i1<- i-1
ifelse (Global.returns1[i,6] >= Global.returns1[i1,6],
Global.returns1[i, "direction"] <- "up",
Global.returns1[i, "direction"] <- "down")
}
brvm.plot<- highchart (type="stock") %>%
hc_title(text = paste0(ticker.name," chart : from ", date1, " to ", date2),
style = list(fontWeight = "bold", fontSize = "25px"),
align = "center") %>%
hc_add_series (name = "Prices",
Global.returns,
yAxis = 0,
showInLegend = FALSE,
upColor= up.col,
color = down.col) %>%
hc_add_yAxis (nid = 1L,
relative = 1)%>%
hc_add_series (name = "Volume",
data = Global.returns1[, c(1,6,7)],
yAxis = 1,
showInLegend= FALSE,
type="column",
hcaes(x = Date,
y = Volume,
group = direction ))%>%
hc_add_yAxis (nid = 2L,
relative = 1) %>%
hc_yAxis_multiples(
list(title = list(
style=list(color='#333333',
fontSize = "20px",
fontFamily= "Erica One"),
text = "Price"), top = "-10%", height = "90%", opposite = FALSE),
list(title = list(
style=list(color='gray',
fontSize = "20px",
fontFamily= "Erica One"),
text = "Volume"), top = "80%", height = "20%")
)%>%
hc_colors(colors = c(down.col, up.col))%>%
hc_exporting(
enabled = TRUE, # always enabled,
filename = paste0(ticker.name," chart : from ", date1, " to ", date2))
}
else if (length(Global.returns) > 6) {
.company = paste0(.company, collapse = ", ")
brvm.plot<- highchart(type = "stock") %>%
hc_add_series(data = Global.returns,
type = "line",
hcaes(x =Date, y= Close, group= Ticker))%>%
hc_xAxis(title = list(text = ""))%>%
hc_title(text = paste0("Tickers (", .company, ") line chart from ", date1, " to ", date2)) %>%
hc_exporting(
enabled = TRUE, # always enabled,
filename = paste0("Tickers line chart from ", date1, " to ", date2)
)
}
return(brvm.plot)
}
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/brvm_plot.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/utils-pipe.R
|
#' Tidy eval helpers
#'
#' @description
#' This page lists the tidy eval tools reexported in this package from
#' rlang. To learn about using tidy eval in scripts and packages at a
#' high level, see the [dplyr programming
#' vignette](https://dplyr.tidyverse.org/articles/programming.html)
#' and the [ggplot2 in packages
#' vignette](https://ggplot2.tidyverse.org/articles/ggplot2-in-packages.html).
#' The [Metaprogramming
#' section](https://adv-r.hadley.nz/metaprogramming.html) of [Advanced
#' R](https://adv-r.hadley.nz) may also be useful for a deeper dive.
#'
#' * The tidy eval operators `{{`, `!!`, and `!!!` are syntactic
#' constructs which are specially interpreted by tidy eval functions.
#' You will mostly need `{{`, as `!!` and `!!!` are more advanced
#' operators which you should not have to use in simple cases.
#'
#' The curly-curly operator `{{` allows you to tunnel data-variables
#' passed from function arguments inside other tidy eval functions.
#' `{{` is designed for individual arguments. To pass multiple
#' arguments contained in dots, use `...` in the normal way.
#'
#' ```
#' my_function <- function(data, var, ...) {
#' data %>%
#' group_by(...) %>%
#' summarise(mean = mean({{ var }}))
#' }
#' ```
#'
#' * [enquo()] and [enquos()] delay the execution of one or several
#' function arguments. The former returns a single expression, the
#' latter returns a list of expressions. Once defused, expressions
#' will no longer evaluate on their own. They must be injected back
#' into an evaluation context with `!!` (for a single expression) and
#' `!!!` (for a list of expressions).
#'
#' ```
#' my_function <- function(data, var, ...) {
#' # Defuse
#' var <- enquo(var)
#' dots <- enquos(...)
#'
#' # Inject
#' data %>%
#' group_by(!!!dots) %>%
#' summarise(mean = mean(!!var))
#' }
#' ```
#'
#' In this simple case, the code is equivalent to the usage of `{{`
#' and `...` above. Defusing with `enquo()` or `enquos()` is only
#' needed in more complex cases, for instance if you need to inspect
#' or modify the expressions in some way.
#'
#' * The `.data` pronoun is an object that represents the current
#' slice of data. If you have a variable name in a string, use the
#' `.data` pronoun to subset that variable with `[[`.
#'
#' ```
#' my_var <- "disp"
#' mtcars %>% summarise(mean = mean(.data[[my_var]]))
#' ```
#'
#' * Another tidy eval operator is `:=`. It makes it possible to use
#' glue and curly-curly syntax on the LHS of `=`. For technical
#' reasons, the R language doesn't support complex expressions on
#' the left of `=`, so we use `:=` as a workaround.
#'
#' ```
#' my_function <- function(data, var, suffix = "foo") {
#' # Use `{{` to tunnel function arguments and the usual glue
#' # operator `{` to interpolate plain strings.
#' data %>%
#' summarise("{{ var }}_mean_{suffix}" := mean({{ var }}))
#' }
#' ```
#'
#' * Many tidy eval functions like `dplyr::mutate()` or
#' `dplyr::summarise()` give an automatic name to unnamed inputs. If
#' you need to create the same sort of automatic names by yourself,
#' use `as_label()`. For instance, the glue-tunnelling syntax above
#' can be reproduced manually with:
#'
#' ```
#' my_function <- function(data, var, suffix = "foo") {
#' var <- enquo(var)
#' prefix <- as_label(var)
#' data %>%
#' summarise("{prefix}_mean_{suffix}" := mean(!!var))
#' }
#' ```
#'
#' Expressions defused with `enquo()` (or tunnelled with `{{`) need
#' not be simple column names, they can be arbitrarily complex.
#' `as_label()` handles those cases gracefully. If your code assumes
#' a simple column name, use `as_name()` instead. This is safer
#' because it throws an error if the input is not a name as expected.
#'
#' @md
#' @name tidyeval
#' @keywords internal
#' @importFrom rlang enquo enquos .data := as_name as_label
#' @aliases enquo enquos .data := as_name as_label
#' @export enquo enquos .data := as_name as_label
#' @return "tibble"
NULL
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/utils-tidy-eval.R
|
# On library attachment, print message to user.
.onAttach <- function(libname, pkgname) {
msg <- paste0(
"\n",
"== Welcome to BRVM ================================================================",
"\nIf you find this package useful, please leave a star: ",
"\n https://github.com/Koffi-Fredysessie/BRVM",
"\n",
"\nIf you encounter a bug or want to request an enhancement please file an issue at:",
"\n https://github.com/Koffi-Fredysessie/BRVM/issues",
"\n",
"\nThank you for using BRVM!",
"\n"
)
packageStartupMessage(msg)
}
|
/scratch/gouwar.j/cran-all/cranData/BRVM/R/zzz.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 8,
fig.height = 4.5,
fig.align = 'center',
out.width = '95%',
dpi = 100,
message = FALSE,
warning = FALSE
)
## ----setup--------------------------------------------------------------------
library(BRVM)
## ----brvm_rank----------------------------------------------------------------
BRVM_rank("Top", 10)
|
/scratch/gouwar.j/cran-all/cranData/BRVM/inst/doc/getting-started.R
|
---
title: "Getting Started with BRVM"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started with BRVM}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 8,
fig.height = 4.5,
fig.align = 'center',
out.width = '95%',
dpi = 100,
message = FALSE,
warning = FALSE
)
```
```{r setup}
library(BRVM)
```
Lets take a look at a simple function that will get any n number of records based
on whether they are 'Top' or 'Bottom' ranked.
```{r brvm_rank}
BRVM_rank("Top", 10)
```
|
/scratch/gouwar.j/cran-all/cranData/BRVM/inst/doc/getting-started.Rmd
|
---
title: "Getting Started with BRVM"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started with BRVM}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 8,
fig.height = 4.5,
fig.align = 'center',
out.width = '95%',
dpi = 100,
message = FALSE,
warning = FALSE
)
```
```{r setup}
library(BRVM)
```
Lets take a look at a simple function that will get any n number of records based
on whether they are 'Top' or 'Bottom' ranked.
```{r brvm_rank}
BRVM_rank("Top", 10)
```
|
/scratch/gouwar.j/cran-all/cranData/BRVM/vignettes/getting-started.Rmd
|
is.R <- function() TRUE
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/00.R
|
BRugsFit <-
function(modelFile, data, inits, numChains = 3, parametersToSave,
nBurnin = 1000, nIter = 1000, nThin = 1, coda = FALSE,
DIC = TRUE, working.directory = NULL, digits = 5, seed=NULL,
BRugsVerbose = getOption("BRugsVerbose")){
if(is.null(BRugsVerbose))
BRugsVerbose <- TRUE
op <- options("BRugsVerbose" = BRugsVerbose)
on.exit(options(op))
if(!is.null(working.directory)){
working.directory <- path.expand(working.directory)
savedWD <- getwd()
setwd(working.directory)
on.exit(setwd(savedWD), add = TRUE)
}
if(is.function(modelFile)){
writeModel(modelFile, con = (modelFile <- tempfile("model")), digits = digits)
if(!is.R()) on.exit(file.remove(modelFile), add = TRUE)
}
if(!file.exists(modelFile)) stop(modelFile, " does not exist")
if(file.info(modelFile)$isdir) stop(modelFile, " is a directory, but a file is required")
modelCheck(modelFile)
if(!(is.vector(data) && is.character(data) && all(file.exists(data))))
data <- bugsData(data, digits = digits)
modelData(data)
modelCompile(numChains)
if(!is.null(seed)) modelSetRN(seed)
if(!missing(inits)){
if(is.list(inits) || is.function(inits))
inits <- bugsInits(inits = inits, numChains = numChains, digits = digits)
if (is.character(inits) && any(file.exists(inits))){
if(BRugsVerbose) print(inits)
modelInits(inits)
}
}
modelGenInits()
samplesSetThin(nThin)
modelUpdate(nBurnin)
if(DIC){
dicSet()
on.exit(dicClear(), add = TRUE)
}
samplesSet(parametersToSave)
modelUpdate(nIter)
if(coda)
return(buildMCMC("*"))
else
return(list(Stats = samplesStats("*"), DIC = if(DIC) dicStats()))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/BRugsFit.R
|
"bgrGrid" <-
function(node, bins = 50)
# Calculate grid of points at which to evaluate bgr statistic
{
sampleSize <- samplesSize(node)
beg <- samplesGetBeg()
end <- min(c(samplesGetEnd(), modelIteration()))
numChains <- samplesGetLastChain() - samplesGetFirstChain() + 1
sampleSize <- sampleSize %/% numChains
beg <- end - (sampleSize * samplesGetThin() - 1)
delta <- sampleSize %/% bins
grid <- ((1 : (bins - 1)) * delta) + beg
grid <- c(grid, end)
grid
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/bgr.grid.R
|
"bgrPoint" <-
function(sample)
# Calculate the bgr statistic given a sample concatenated over chains
{
numChains <- getNumChains()
sampleSize <- length(sample)
lenChain <- sampleSize %/% numChains
if (is.R())
dq <- quantile(sample, c(0.1, 0.9), names = FALSE)
else
dq <- quantile(sample, c(0.1, 0.9))
d.delta <- dq[2] - dq[1]
n.delta <- 0
for (i in 1:numChains) {
if (is.R())
nq <- quantile(sample[((i - 1) * lenChain + 1) : (i * lenChain)], c(0.1, 0.9), names = FALSE)
else
nq <- quantile(sample[((i - 1) * lenChain + 1) : (i * lenChain)], c(0.1, 0.9))
n.delta <- n.delta + nq[2] - nq[1]
}
n.delta <- n.delta / numChains
bgr.stat <- d.delta / n.delta
return(c(n.delta, d.delta, bgr.stat))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/bgr.point.R
|
buffer <- function(){
buffer <- file.path(tempdir(), "buffer.txt")
message(readLines(buffer))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/buffer.R
|
"bugsData" <-
function(data, fileName = file.path(tempdir(), "data.txt"), format="E", digits = 5){
if (is.character(unlist(data))) {
if(is.R()) {
data.list <- lapply(as.list(data), get, pos = parent.frame(2))
names(data.list) <- as.list(data)
write.datafile(lapply(data.list, formatC, digits = digits, format = format), fileName)
}
else {
data.list <- lapply(as.list(data), get, where = parent.frame(2))
names(data.list) <- unlist(data)
writeDatafileS4(data.list, towhere = "data.txt")
}
}
else if(is.list(data)) {
data <- lapply(data, function(x){x <- if(is.character(x)||is.factor(x)) match(x, unique(x)) else x})
if(is.R()) {
write.datafile(lapply(data, formatC, digits = digits, format = format), fileName)
}
else {
writeDatafileS4(data, towhere = "data.txt")
}
}
else stop("Expected a list of data, a list or vector of variable names")
invisible(fileName)
}
if(is.R()){
## need some fake functions for codetools
toSingleS4 <- function(...)
stop("This function is not intended to be called in R!")
"writeDatafileS4" <- toSingleS4
} else {
### The rest of this file is for S-PLUS only...
"writeDatafileS4" <-
#
# Writes to file "towhere" text defining a list containing "DATA" in a form compatable with WinBUGS.
# Required arguments:
# DATA - either a data frame or else a list consisting of any combination of scalars, vectors, arrays or data frames (but not lists).
# If a list, all list elements that are not data.frames must be named. Names of data.frames in DATA are ignored.
# Optional arguments:
# towhere - file to receive output. Is clipboard by default, which is convenient for pasting into a WinBUTS ODC file.
# fill - If numeric, number of columns for output. If FALSE, output will be on one line. If TRUE (default), number of
# columns is given by .Options$width.
# Value:
# Text defining a list is output to file "towhere".
# Details:
# The function performs considerable checking of DATA argument. Since WinBUGS requires numeric input, no factors or character vectors
# are allowed. All data must be named, either as named elements of DATA (if it is a list) or else using the names given in data frames.
# Data frames may contain matrices.
# Arrays of any dimension are rearranged to be in row-major order, as required by WinBUGS. Scientific notation is also handled properly.
# In particular, the number will consist of a mantissa _containing a decimal point_ followed by "E", then either "+" or "-", and finally
# a _two-digit_ number. S-Plus does not always provide a decimal point in the mantissa, uses "e" instead of "E", followed by
# either a "+" or "-" and then _three_ digits.
# Written by Terry Elrod. Disclaimer: This function is used at the user's own risk.
# Please send comments to [email protected].
# Revision history: 2002-11-19. Fixed to handle missing values properly.
function(DATA, towhere = "clipboard", fill = TRUE)
{
formatDataS4 =
#
# Prepared DATA for input to WinBUGS.
function(DATA)
{
if(!is.list(DATA))
stop("DATA must be a named list or data frame.")
dlnames <- names(DATA)
if(is.data.frame(DATA))
DATA <- as.list(DATA)
#
# Checking for lists in DATA....
lind <- sapply(DATA, is.list)
# Checking for data frames in DATA....
dfind <- sapply(DATA, is.data.frame)
# Any lists that are not data frames?...
if(any(lind & !dfind)) stop("DATA may not contain lists.")
# Checking for unnamed elements of list that are not data frames....
if(any(dlnames[!dfind] == "")) stop(
"When DATA is a list, all its elements that are not data frames must be named."
)
# Checking for duplicate names....
dupnames <- unique(dlnames[duplicated(dlnames)])
if(length(dupnames) > 0)
stop(paste(
"The following names are used more than once in DATA:",
paste(dupnames, collapse = ", ")))
if(any(dfind)) {
dataold <- DATA
DATA <- vector("list", 0)
for(i in seq(along = dataold)) {
if(dfind[i])
DATA <- c(DATA, as.list(dataold[[i]]))
else DATA <- c(DATA, dataold[i])
}
dataold <- NULL
}
dlnames <- names(DATA)
dupnames <- unique(dlnames[duplicated(dlnames)])
# Checking for duplicated names again (now that columns of data frames are included)....
if(length(dupnames) > 0) stop(paste(
"The following names are used more than once in DATA (at least once within a data frame):",
paste(dupnames, collapse = ", ")))
# Checking for factors....
factorind <- sapply(DATA, is.factor)
if(any(factorind))
stop(paste(
"DATA may not include factors. One or more factor variables were detected:",
paste(dlnames[factorind], collapse = ", ")))
# Checking for character vectors....
charind <- sapply(DATA, is.character)
if(any(charind))
stop(paste(
"WinBUGS does not handle character data. One or more character variables were detected:",
paste(dlnames[charind], collapse = ", ")))
# Checking for complex vectors....
complexind <- sapply(DATA, is.complex)
if(any(complexind))
stop(paste(
"WinBUGS does not handle complex data. One or more complex variables were detected:",
paste(dlnames[complexind], collapse = ", ")))
# Checking for values farther from zero than 1E+38 (which is limit of single precision)....
toobigind <- sapply(DATA, function(x)
{
y <- abs(x[!is.na(x)])
any(y[y > 0] > 9.9999999999999998e+37)
}
)
if(any(toobigind))
stop(paste(
"WinBUGS works in single precision. The following variables contain data outside the range +/-1.0E+38: ",
paste(dlnames[toobigind], collapse = ", "),
".\n", sep = ""))
# Checking for values in range +/-1.0E-38 (which is limit of single precision)....
toosmallind <- sapply(DATA, function(x)
{
y <- abs(x[!is.na(x)])
any(y[y > 0] < 9.9999999999999996e-39)
}
)
n <- length(dlnames)
data.string <- as.list(rep(NA, n))
for(i in 1:n) {
if(length(DATA[[i]]) == 1) {
ac <- toSingleS4(DATA[[i]])
data.string[[i]] <- paste(names(DATA)[i], "=",
ac, sep = "")
next
}
if(is.vector(DATA[[i]]) & length(DATA[[i]]) > 1) {
ac <- toSingleS4(DATA[[i]])
data.string[[i]] <- paste(names(DATA)[i], "=c(",
paste(ac, collapse = ", "), ")", sep =
"")
next
}
if(is.array(DATA[[i]])) {
ac <- toSingleS4(aperm(DATA[[i]]))
data.string[[i]] <- paste(names(DATA)[i],
"= structure(.Data= c(", paste(ac,
collapse = ", "), "), \n .Dim=c(",
paste(as.character(dim(DATA[[i]])),
collapse = ", "), "))", sep = "")
}
}
data.tofile <- paste("list(", paste(unlist(data.string),
collapse = ", "), ")", sep = "")
if(any(toosmallind))
warning(paste(
"WinBUGS works in single precision. The following variables contained nonzero data",
"\ninside the range +/-1.0E-38 that were set to zero: ",
paste(dlnames[toosmallind], collapse = ", "),
".\n", sep = ""))
return(data.tofile)
}
rslt <- formatDataS4(DATA)
cat(rslt, file = towhere, fill = fill)
invisible(0)
}
toSingleS4 <-
#
# Takes numeric vector and removes digit of exponent in scientific notation (if any)
#
# Written by Terry Elrod. Disclaimer: This function is used at the user's own risk.
# Please send comments to [email protected].
# Revision history: 2002-11-19. Fixed to handle missing values properly.
function(x)
{
xdim <- dim(x)
x <- as.character(as.single(x))
# First to look for positives:
pplus <- regMatchPos(x, "e\\+0")
pplusind <- apply(pplus, 1, function(y)
(!any(is.na(y))))
if(any(pplusind)) {
# Making sure that periods are in mantissa...
init <- substring(x[pplusind], 1, pplus[
pplusind, 1] - 1)
#...preceeding exponent
pper <- regMatchPos(init, "\\.")
pperind <- apply(pper, 1, function(y)
(all(is.na(y))))
if(any(pperind))
init[pperind] <- paste(init[pperind],
".0", sep = "")
# Changing the format of the exponent...
x[pplusind] <- paste(init, "E+", substring(
x[pplusind], pplus[pplusind, 2] + 1),
sep = "")
}
# Then to look for negatives:
pminus <- regMatchPos(x, "e\\-0")
pminusind <- apply(pminus, 1, function(y)
(!any(is.na(y))))
if(any(pminusind)) {
# Making sure that periods are in mantissa...
init <- substring(x[pminusind], 1, pminus[
pminusind, 1] - 1)
#...preceeding exponent
pper <- regMatchPos(init, "\\.")
pperind <- apply(pper, 1, function(y)
(all(is.na(y))))
if(any(pperind))
init[pperind] <- paste(init[pperind],
".0", sep = "")
# Changing the format of the exponent...
x[pminusind] <- paste(init, "E-", substring(
x[pminusind], pminus[pminusind, 2] +
1), sep = "")
}
x
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/bugs.data.R
|
"bugsInits" <-
function (inits, numChains = 1, fileName, format="E", digits = 5){
if(missing(fileName))
fileName <- file.path(tempdir(), paste("inits", 1:numChains, ".txt", sep = ""))
if(length(fileName) != numChains)
stop("numChains = ", numChains, " filenames must be specified")
if(!is.null(inits)){
for (i in 1:numChains){
if (is.function(inits))
if (is.R())
write.datafile(lapply(inits(), formatC, digits = digits, format = format), fileName[i])
else
writeDatafileS4(inits(), towhere = fileName[i])
else
if (is.R())
write.datafile(lapply(inits[[i]], formatC, digits = digits, format = format), fileName[i])
else
writeDatafileS4(inits[[i]], towhere = fileName[i])
}
}
invisible(fileName)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/bugs.inits.R
|
buildMCMC <- function(node, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin()){
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
mons <- samplesMonitors(node)
subBuildMCMC <- function(node){
sM <- samplesMonitors(node)
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model, for arrays use samplesAutoC")
sample <- samplesSample(node)
numChains <- samplesGetLastChain() - samplesGetFirstChain() + 1
matrix(sample, ncol = numChains)
}
sampleSize <- samplesSize(mons[1])
end <- min(c(modelIteration(), samplesGetEnd()))
thin <- samplesGetThin()
numChains <- samplesGetLastChain() - samplesGetFirstChain() + 1
sampleSize <- sampleSize %/% numChains
beg <- end - sampleSize * thin + 1
if (sampleSize==0) {
mcmcobj <- NA
}
else {
samples <- lapply(mons, subBuildMCMC)
samplesChain <- vector(mode="list", length=numChains)
for(i in 1:numChains){
if (is.R())
temp <- sapply(samples, function(x) x[,i])
else
temp <- sapply(samples, function(x,j) { x[,j]}, j=i)
##### If we want to special-case 1D-mcmc objects:
# if(ncol(temp) == 1){
# dim(temp) <- NULL
# samplesChain[[i]] <- temp
# }
# else{
samplesChain[[i]] <- temp
colnames(samplesChain[[i]]) <- mons
# }
}
mcmcobj <- lapply(samplesChain, mcmc, start = beg, end = end, thin = thin)
}
if(is.R())
class(mcmcobj) <- "mcmc.list"
else
oldClass(mcmcobj) <- "mcmc.list"
mcmcobj
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/buildMCMC.R
|
"dicClear" <-
function()
# Clear monitor for dic
{
command <- "DevianceEmbed.StatsGuard;DevianceEmbed.Clear"
invisible(.CmdInterpreter(command))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/dic.clear.R
|
"dicSet" <-
function()
# Set a monitor for dic
{
command <- "DevianceEmbed.SetVariable('*');DevianceEmbed.SetGuard;DevianceEmbed.Set"
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/dic.set.R
|
"dicStats" <-
function()
# Calculate dic statistics
{
command <- "DevianceEmbed.SetVariable('*');DevianceEmbed.StatsGuard;DevianceEmbed.Stats"
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len > 1) {
writeLines(rlb, buffer)
read.table(buffer)
} else {
message(rlb)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/dic.stats.R
|
"dimensions" <-
function(node)
# Get dimension information for quantity in OpenBUGS model
{
nodeLabel <- as.character(node)
if(!(nodeLabel %in% modelNames()))
stop("node must be a variable name from the model")
dimensions <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetNumDimensions"),
c("CharArray", "Integer"),
list(nodeLabel, NA))[[2]]
dimensions
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/dimensions.R
|
"formatdata" <-
function (datalist){
if (!is.list(datalist) || is.data.frame(datalist))
stop("argument to formatdata() ", "must be a list")
n <- length(datalist)
datalist.string <- vector(n, mode = "list")
datanames <- names(datalist)
for (i in 1:n) {
if (is.factor(datalist[[i]]))
datalist[[i]] <- as.integer(datalist[[i]])
datalist.string[[i]] <-
if (length(datalist[[i]]) == 1)
paste(names(datalist)[i],
"=", as.character(datalist[[i]]), sep = "")
else if (is.vector(datalist[[i]]) && length(datalist[[i]]) > 1)
paste(names(datalist)[i],
"=c(", paste(as.character(datalist[[i]]), collapse = ", "),
")", sep = "")
else
paste(names(datalist)[i],
"= structure(.Data= c(",
paste(as.character(as.vector(aperm(datalist[[i]]))), collapse = ", "),
"), .Dim=c(",
paste(as.character(dim(datalist[[i]])), collapse = ", "),
"))", sep = "")
}
datalist.tofile <- paste("list(",
paste(unlist(datalist.string), collapse = ", "),
")", sep = "")
return(datalist.tofile)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/formatdata.R
|
"getChain" <-
function()
# Get chain field
{
getOption("BRugsNextChain")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/get.chain.R
|
"getNumChains" <-
function()
# Get numChains field
{
command<- "BugsEmbed.numChains"
.Integer(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/get.num.chains.R
|
"infoMemory" <-
function(){
command <- "BugsEmbed.AllocatedMemory"
res <- .CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
res <- readLines(buffer)
mem <- as.numeric(gsub("^([0-9]+).+", "\\1", res))
mem
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/info.memory.R
|
"infoModules" <-
function()
# List loaded OpenBUGS components
{
command <- "BugsEmbed.Modules"
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
result <- read.table(buffer, skip = 1, as.is=TRUE, sep="\t")[,-1]
for(i in c(1,4,5,6))
result[,i] <- gsub(" ", "", result[,i])
names(result) <- c("Module", "Clients", "Version", "Maintainer", "Compiled", "Loaded")
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/info.modules.R
|
"infoNodeValues" <-
function(nodeLabel)
# Get current value of node
{
nodeLabel <- as.character(nodeLabel)
out <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetSize"),
c("CharArray","Integer"),
list(nodeLabel, NA))
nodeSize <- out[[2]]
if(nodeSize == -1)
stop(nodeLabel, " is not a node in BUGS model")
numChains <- getNumChains()
out <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetValues"),
c("CharArray","RealArray"),
list(nodeLabel, double(nodeSize*numChains)))
values <- matrix(out[[2]], nrow=nodeSize, ncol=numChains)
values
}
infoNodeMethods <- function(nodeLabel)
{
nodeName <- sQuote(nodeLabel)
command <- paste("BugsEmbed.SetNode(",nodeName,"); BugsEmbed.Methods");
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
result <- read.table(buffer, sep="\t", skip = 1, as.is=TRUE, col.names=c("empty", "Node", "Type", "Size", "Depth"))[,-1]
for (i in 1:2)
result[,i] <- gsub(" ", "", result[,i])
result
}
infoNodeTypes <- function(nodeLabel)
{
nodeName <- sQuote(nodeLabel)
command <- paste("BugsEmbed.SetNode(",nodeName,"); BugsEmbed.Types");
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
result <- read.table(buffer, sep="\t", skip = 1, as.is=TRUE, col.names=c("empty", "Node", "Type"))[,-1]
for (i in 1:2)
result[,i] <- gsub(" ", "", result[,i])
result
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/info.node.R
|
## display updaters sorted by node name
infoUpdatersbyName <- function()
{
command <- "BugsEmbed.NotCompiledGuard; BugsEmbed.UpdatersByName"
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
if (readLines(buffer)[1]=="BugsCmds:NotCompiled")
stop("Model not compiled")
buffer <- file.path(tempdir(), "Updater types.txt")
result <- read.table(buffer, sep="\t", skip=1, as.is=TRUE,
row.names=2, col.names=c("empty", "Node", "Type", "Size", "Depth"))[,-1]
## strip leading and trailing spaces
for (i in 1:2) {
result[,i] <- gsub("^ +", "\\1", result[,i])
result[,i] <- gsub(" +$", "\\1", result[,i])
}
rownames(result) <- gsub("^ +", "", rownames(result))
rownames(result) <- gsub(" +$", "", rownames(result))
unlink(buffer)
result
}
## display updaters sorted by node depth in graph
infoUpdatersbyDepth <- function()
{
command <- "BugsEmbed.NotCompiledGuard; BugsEmbed.UpdatersByDepth"
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
if (readLines(buffer)[1]=="BugsCmds:NotCompiled")
stop("Model not compiled")
buffer <- file.path(tempdir(), "Updater types.txt")
result <- read.table(buffer, sep="\t", skip=1, as.is=TRUE,
row.names=2, col.names=c("empty", "Node", "Type", "Size", "Depth"))[,-1]
## strip leading and trailing spaces
for (i in 1:2) {
result[,i] <- gsub("^ +", "\\1", result[,i])
result[,i] <- gsub(" +$", "\\1", result[,i])
}
rownames(result) <- gsub("^ +", "", rownames(result))
rownames(result) <- gsub(" +$", "", rownames(result))
unlink(buffer)
result
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/info.updaters.R
|
### Functions to run a single OpenBUGS API command string
.BugsCmd <- function(command)
{
unlist(.OpenBUGS(command, "BugsCmd"))
}
.CmdInterpreter <- function(command)
{
unlist(.OpenBUGS(command, "CmdInterpreter"))
}
.Integer <- function(command)
{
unlist(.OpenBUGS(command, "Integer"))
}
.CharArray <- function(command, arg)
{
unlist(.OpenBUGS(command, "CharArray", arg))
}
.RealArray <- function(command, arg)
{
unlist(.OpenBUGS(command, "RealArray", arg))
}
.OpenBUGS.cmdtypes <- c("CmdInterpreter","Integer","CharArray","RealArray","BugsCmd")
.OpenBUGS <- function(cmds, cmdtypes=NULL, args=NULL) {
ncmds <- length(cmds)
if (is.null(cmdtypes)) cmdtypes <- rep("CmdInterpreter", ncmds)
if (is.null(args)) args <- as.list(rep(NA, ncmds))
stopifnot(ncmds==length(cmdtypes))
stopifnot(ncmds==length(args))
.OpenBUGS.platform(cmds, cmdtypes, args)
}
dquote <- function(x){
paste("\"", x, "\"", sep="")
}
.OpenBUGS.helper <- function(cmds, cmdtypes, args) {
ncmds <- length(cmds)
if (ncmds > 99999) stop("Maximum number of OpenBUGS API commands exceeded")
tempDir <- getOption("BRugsTmpdir")
## Don't want internalize/externalize to overwrite the command
## output buffer, so redirect its output to a separate trash can.
trashDir <- file.path(tempDir, "trash", fsep="/")
extFile <- getOption("BRugsExtFile")
cmdFile <- paste(tempDir, "cmds.txt", sep="/")
bugsPath <- system.file("exec", paste("BugsHelper", if(.Platform$OS.type == "windows") ".exe", sep=""), package="BRugs")
shcmd <- paste(dquote(bugsPath), dquote(tempDir), dquote(trashDir), dquote(extFile), dquote(cmdFile), dquote(ncmds))
for (i in 1:ncmds) {
if (cmdtypes[i] %in% c("CharArray","RealArray"))
cat(args[[i]], file=paste(tempDir, "/input",i,".txt", sep=""))
}
cmd.id <- match(cmdtypes, .OpenBUGS.cmdtypes) - 1
write(rbind(cmds, cmd.id), cmdFile)
res <- system(shcmd)
handleRes(res)
out <- vector(ncmds, mode="list")
for (i in seq_along(cmds)){
if (cmdtypes[i] %in% c("Integer","CharArray","RealArray"))
out[[i]] <- scan(paste(tempDir,"/output",i,".txt",sep=""),
switch(cmdtypes[i],
"Integer" = integer(),
"CharArray" = character(),
"RealArray" = double()),
quiet=TRUE)
}
out
}
handleRes <- function(res)
{
maintainer <- maintainer("BRugs")
errors <- c("Internal \"trap\" error in OpenBUGS, or non-existent module or procedure called.",
"An OpenBUGS procedure was called with the wrong type of argument.",
"An OpenBUGS procedure was called with the wrong signature.")
## If a library call ends in a trap, then error code 1 will be returned from BugsHelper on Linux
## On Windows it shouldn't even get this far after a trap. TODO see if the trap message is shown.
if (res > 0) {
buf <- readLines(file.path(tempdir(), "buffer.txt"))
trap <- grep("Sorry something went wrong", buf, value=TRUE)
if(length(trap) > 0) message(trap[1])
stop(errors[res])#, "\nPlease report this bug to ", maintainer)
}
}
.SamplesGlobalsCmd <- function(node){
options.old <- options()
options(scipen=20) # don't pass numbers in scientific notation to OpenBUGS
commands <- c(paste("SamplesEmbed.beg :=", getOption("BRugsSamplesBeg")),
paste("SamplesEmbed.end :=", getOption("BRugsSamplesEnd")),
paste("SamplesEmbed.firstChain :=", getOption("BRugsSamplesFirstChain")),
paste("SamplesEmbed.lastChain :=", getOption("BRugsSamplesLastChain")),
paste("SamplesEmbed.thin :=", getOption("BRugsSamplesThin")),
paste("SamplesEmbed.SetVariable(", sQuote(node), ")", sep=""),
paste("BugsMappers.SetPrec(", getOption("BRugsPrec"), ")", sep="")
)
options(options.old)
paste(commands, collapse="; ")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/internal.R
|
"modelAdaptivePhase" <-
function()
# Get endOfAdapting field
{
command <- "BugsInterface.endOfAdapting"
.Integer(command) - 1
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.adaptivePhase.R
|
"modelCheck" <-
function(fileName)
# Check that OpenBUGS model is syntactically correct
{
path <- dirname(fileName)
path <- if(path == ".") getwd() else path
fileName <- file.path(path, basename(fileName))
if(!file.exists(fileName))
stop("File ", fileName, " does not exist")
if(file.info(fileName)$isdir)
stop(fileName, " is a directory, but a file is required")
command <- paste("BugsEmbed.SetFilePath(", sQuote(fileName),
");BugsEmbed.ParseGuard;BugsEmbed.Parse", sep = "")
if (!is.R()) {
command <- gsub ("\\\\", "/", command)
command <- gsub ("//", "/", command)
}
.CmdInterpreter(command)
.initGlobals()
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.check.R
|
"modelCompile" <-
function(numChains = 1)
# Compile OpenBUGS model
{
if(!is.numeric(numChains))
stop("numChains ", "must be numeric")
numChains <- as.integer(numChains)
command <- paste("BugsEmbed.CompileGuard",
";BugsEmbed.numChains :=", as.character(numChains), "; BugsEmbed.Compile", sep = "")
.CmdInterpreter(command)
samplesSetFirstChain(1)
samplesSetLastChain(numChains)
options("BRugsNextChain" = 1)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.compile.R
|
"modelData" <-
function(fileName = "data.txt")
{
# Load data for OpenBUGS model
for(i in fileName){
path <- dirname(i)
path <- if(path == ".") getwd() else path
fileNm <- file.path(path, basename(i))
if(!file.exists(fileNm))
stop("File ", fileNm, " does not exist")
if(file.info(fileNm)$isdir)
stop(fileNm, " is a directory, but a file is required")
command <- paste("BugsEmbed.SetFilePath(", sQuote(fileNm),
");BugsEmbed.LoadDataGuard;BugsEmbed.LoadData", sep = "")
if (!is.R()){
command <- gsub ("\\\\", "/", command)
command <- gsub ("//", "/", command)
}
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.data.R
|
modelDisable <- function(factory){
command <- paste("UpdaterMethods.SetFactory('", factory,"');UpdaterMethods.Disable", sep = "")
invisible(.CmdInterpreter(command))
}
modelEnable <- function(factory){
command <- paste("UpdaterMethods.SetFactory('", factory,"');UpdaterMethods.Enable", sep = "")
invisible(.CmdInterpreter(command))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.factory.R
|
"modelGenInits" <-
function()
# Generate initial values for OpenBUGS model
{
command <- paste("BugsEmbed.GenerateInitsGuard;", "BugsEmbed.GenerateInits")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.gen.inits.R
|
"modelGetRN" <-
function()
# Get the seed of random number generator
{
command <- "BugsEmbed.preSet"
.Integer(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.getRN.R
|
"modelInits" <-
function(fileName, chainNum = NULL)
# Load initial values for OpenBUGS model
{
if(is.null(chainNum))
chainNum <- getChain() + seq(along = fileName) - 1
if(!is.numeric(chainNum))
stop("chainNum ", "must be numeric")
if(length(fileName) != length(chainNum))
stop("length(chainNum) ", "must be equal to the number of filenames given")
chainNum <- as.integer(chainNum)
path <- dirname(fileName)
path <- ifelse(path == ".", getwd(), path)
fileName <- file.path(path, basename(fileName))
fileExist <- !file.exists(fileName)
if(any(fileExist))
stop("File(s) ", fileName[fileExist], " do(es) not exist.")
for(i in seq(along = fileName)){
if(file.info(fileName[i])$isdir)
stop(fileName[i], " is a directory, but a file is required.")
filename <- sQuote(fileName[i])
command <- paste("BugsEmbed.SetFilePath(", filename,
"); BugsEmbed.LoadInitsGuard; BugsEmbed.chain := ",
as.character(chainNum[i]), "; BugsEmbed.LoadInits")
if (!is.R()){
command <- gsub ("\\\\", "/", command)
command <- gsub ("//", "/", command)
}
.CmdInterpreter(command)
if(getOption("BRugsVerbose")){
message("Initializing chain ", chainNum[i], ": ", sep="")
buffer()
}
options("BRugsNextChain" = chainNum[i] + 1)
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.inits.R
|
"modelIteration" <-
function()
# Get iteration field
{
command <- "BugsEmbed.iteration"
.Integer(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.iteration.R
|
"modelNames" <-
function()
{
# gets names in OpenBUGS model
command <- "BugsRobjects.GetNumberNames"
number <- .Integer(command)
name <- character(number)
if(length(number)){
cmds <- character(0)
cmdtype <- character()
for(i in 1:number){
cmds <- c(cmds, paste("BugsRobjects.SetIndex(", i-1, ")", sep=""),
"BugsRobjects.GetStringLength")
cmdtype <- c(cmdtype, c("CmdInterpreter","Integer"))
}
res <- .OpenBUGS(cmds, cmdtype)
numchar <- unlist(res[seq(2, 2*number, by=2)])
cmds <- character(0)
cmdtype <- character()
args <- list()
for(i in 1:number){
char <- paste(rep(" ", numchar[i]), collapse="")
cmds <- c(cmds,
paste("BugsRobjects.SetIndex(", i-1, ")", sep=""),
"BugsRobjects.GetVariable")
cmdtype <- c(cmdtype, c("CmdInterpreter","CharArray"))
args <- c(args, list(NA, char))
}
res <- .OpenBUGS(cmds, cmdtype, args)
name <- unlist(res[seq(2, 2*number, by=2)])
}
return(name)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.names.R
|
"modelPrecision" <-
function(prec)
# Set the precision to which results are displayed
{
if(!is.numeric(prec))
stop("prec ", "must be numeric")
prec <- as.integer(prec)
options(BRugsPrec=prec)
# command <- paste("BugsMappers.SetPrec(", prec, ")")
# invisible(.CmdInterpreter(command))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.precision.R
|
"modelSaveState" <- function(stem)
{
## Saves the sate of each chain in OpenBUGS model
if(!is.character(stem) || length(stem)!=1)
stop("'stem' must be character of length 1")
if(dirname(stem) == ".")
stem <- file.path(getwd(), basename(stem))
command <- paste("BugsEmbed.UpdateGuard", ";BugsEmbed.WriteChains(", sQuote(stem), ")")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.save.state.R
|
"modelSetAP" <-
function(factoryName, adaptivePhase)
# Set the length of adaptive phase
{
name <- sQuote(factoryName)
command <- paste("UpdaterMethods.SetFactory(", name,
") ;UpdaterMethods.AdaptivePhaseGuard;",
"UpdaterMethods.SetAdaptivePhase(",
adaptivePhase,
")", sep = "")
.CmdInterpreter(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.setAP.R
|
"modelSetIts" <-
function(factoryName, iterations)
# Set maximum number of iterations in iterative algorithms
{
name <- sQuote(factoryName)
command <- paste("UpdaterMethods.SetFactory(", name,
") ;UpdaterMethods.IterationsGuard;",
"UpdaterMethods.SetIterations(",
iterations,
")", sep = "")
.CmdInterpreter(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.setIts.R
|
"modelSetOR" <-
function(factoryName, overRelaxation)
# Set over-relaxed updating
{
name <- sQuote(factoryName)
command <- paste("UpdaterMethods.SetFactory(", name,
") ;UpdaterMethods.OverRelaxationGuard;",
"UpdaterMethods.SetOverRelaxation(",
overRelaxation,
")", sep = "")
.CmdInterpreter(command)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.setOR.R
|
"modelSetRN" <-
function(state)
# Set the seed of random number generator
{
if(!state %in% 1:14)
stop("state must be an integer from 1 to 14")
state <- as.integer(state)
command <- paste("BugsEmbed.SetRNGuard; BugsEmbed.SetRNState(", state, ")" )
invisible(.CmdInterpreter(command))
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.setRN.R
|
## Simple alias to mimic the OpenBUGS script command
"modelSetWD" <- function(dir) setwd(dir)
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.setWD.R
|
"modelUpdate" <-
function(numUpdates, thin = 1, overRelax = FALSE)
# Update the each chain in OpenBUGS model numUpdates * thin time
{
if(!is.numeric(numUpdates))
stop("numUpdates ", "must be numeric")
numUpdates <- as.integer(numUpdates)
if(!is.numeric(thin))
stop("thin ", "must be numeric")
thin <- as.integer(thin)
if(!is.logical(overRelax))
stop("overRelax ", "must be logical")
command <- paste("BugsEmbed.UpdateGuard",
";BugsEmbed.thin := ", thin,
";BugsEmbed.overRelax := ", as.integer(overRelax),
";BugsEmbed.updates := ", numUpdates,
";BugsEmbed.Update")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/model.update.R
|
"plotAutoC" <-
function(node, plot = TRUE, colour = c("red", "blue", "green", "yellow", "black"),
lwd = 5, main = NULL, ...)
# Plot auto correlation function for single component of OpenBUGS name
{
sM <- samplesMonitors(node)
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model, for arrays use samplesAutoC")
nodeName <- sQuote(node)
sample <- samplesSample(node)
chain <- samplesGetFirstChain()
if (sd(sample) > 1.0E-10)
acfresult <- acf(sample, col = colour[chain], main = if(is.null(main)) nodeName else main,
lwd = lwd, demean = TRUE, plot = plot, ...)
else stop("ACF cannot be computed/plotted: standard deviation <= 1.0E-10")
acfresult$series <- node
if(plot) invisible(acfresult)
else return(acfresult)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/plot.autoC.R
|
### Plot bgr diagnostic for single component of OpenBUGS name
"plotBgr" <-
function(node, plot = TRUE, main = NULL, xlab = "iteration", ylab = "bgr",
col = c("red", "blue", "green"), bins = 50, ...)
{
sM <- samplesMonitors(node)
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model, for arrays use samplesBgr")
if (any(grep("^inference can not be made", sM))) { stop(sM) }
grid <- bgrGrid(node, bins = bins)
## Use a single API call instead of looping API calls over
## iterations - more efficient with the Linux helper.
## find size of available sample at each grid point
res <- .OpenBUGS(cmds = c(.SamplesGlobalsCmd(node),
as.vector(rbind(paste("SamplesEmbed.end := ", grid, ";"), "SamplesEmbed.SampleSize;"))),
cmdtypes = c("CmdInterpreter", rep(c("CmdInterpreter","Integer"), bins)),
args=as.list(c(NA, rep(c(NA, NA), bins)))
)
args <- list(NA)
for (i in seq(length=bins)){
args[[2*i]] <- NA
args[[2*i + 1]] <- double(res[[2*i + 1]])
}
## get available sample at each grid point
res <- .OpenBUGS(cmds =
c(.SamplesGlobalsCmd(node),
as.vector(rbind(paste("SamplesEmbed.end := ", grid, ";"), "SamplesEmbed.SampleValues;"))),
cmdtypes = c("CmdInterpreter", rep(c("CmdInterpreter","RealArray"), bins)),
args=args)
## remove junk elements of list, leaving a list of samples for each grid point
res[c(1, 2*seq(length=bins))] <- NULL
## calculate between, within and ratio statistics for each grid point
bgr <- rbind(grid, sapply(res, bgrPoint))
yRange <- range(bgr[4,])
yRange <- c(0, max(c(1.2, yRange[2])))
nRange <- range(bgr[2,])
nRange <- c(min(c(0, nRange[1])), nRange[2])
nDelta <- nRange[2] - nRange[1]
dRange <- range(bgr[3,])
dRange <- c(min(c(0, dRange[1])), dRange[2])
dDelta <- dRange[2] - dRange[1]
max <- 2 * max(c(nDelta, dDelta))
bgr[2,] <- bgr[2,] / max
bgr[3,] <- bgr[3,] / max
if(plot){
plot(grid, bgr[4,], ylim = yRange, type = "l",
main = if(is.null(main)) node else main, xlab = xlab, ylab = ylab, col = col[1], ...)
lines(grid, bgr[2,], col = col[2], ...)
lines(grid, bgr[3,], col = col[3], ...)
}
bgr <- data.frame(t(bgr))
names(bgr) <- c("Iteration", "pooledChain80pct", "withinChain80pct", "bgrRatio")
bgr$Iteration <- as.integer(bgr$Iteration)
if(plot) invisible(bgr)
else return(bgr)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/plot.bgr.R
|
"plotDensity" <-
function(node, plot=TRUE, main = NULL, xlab = "" , ylab = "", col = "red", ...)
# Plot posterior density for single component of OpenBUGS name
{
sM <- samplesMonitors(node)
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model, for arrays use samplesDensity")
nodeName <- sQuote(node)
sampleSize <- samplesSize(node)
sample <- samplesSample(node)
absSample <- abs(sample)
intSample <- as.integer(absSample + 1.0E-10)
zero <- absSample - intSample
intSample <- as.integer(sample)
if (sum(zero) > 0){
if (is.R())
d <- density(sample, adjust = 1.25)
else
d <- density(sample)
if (plot)
plot(d$x, d$y, type = "l", main = if(is.null(main)) nodeName else main,
xlab = xlab , ylab = ylab, col = col, ...)
res <- d
}
else{
histogram <- table(intSample) / sampleSize
xRange <- range(intSample)
xLim <- c(xRange[1] - 0.5, xRange[2] + 0.5)
if (plot)
plot(histogram, type = "h", xlim = xLim, ylim = c(0, 1),
main = if(is.null(main)) nodeName else main,
xlab = xlab , ylab = ylab, col = col, ...)
res <- histogram
}
if (plot) invisible(res) else return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/plot.density.R
|
"plotHistory" <-
function(node, plot = TRUE, colour = c("red", "blue", "green", "yellow", "black"),
main = NULL, xlab = "iteration", ylab = "", ...)
# Plot history for single component of OpenBUGS name
{
sM <- samplesMonitors(node)
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model, for arrays use samplesHistory")
nodeName <- sQuote(node)
sampleSize <- samplesSize(node)
sample <- samplesSample(node)
end <- min(c(modelIteration(), samplesGetEnd()))
thin <- samplesGetThin()
numChains <- samplesGetLastChain() - samplesGetFirstChain() + 1
sampleSize <- sampleSize %/% numChains
beg <- end - (sampleSize - 1) * thin
beg <- beg %/% thin
end <- end %/% thin
x <- (beg:end) * thin
y <- matrix(sample, ncol = numChains)
if(plot){
plot(x, y[,1], ylim = range(sample), type = "n",
main = if(is.null(main)) nodeName else main,
xlab = xlab , ylab = ylab, ...)
for(chain in 1:numChains){
lines(x, y[,chain], col = colour[chain], ...)
}
invisible(y)
}
else return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/plot.history.R
|
"ranksClear" <-
function(node)
# Clears a ranks monitor for vector quantity in OpenBUGS model
{
nodeName <- sQuote(node)
command <- paste("RanksEmbed.SetVariable(", nodeName, "); RanksEmbed.StatsGuard;",
"RanksEmbed.Clear")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/ranks.clear.R
|
"ranksSet" <-
function(node)
# Set a ranks monitor for vector quantity node in OpenBUGS model
{
nodeName <- sQuote(node)
command <- paste("RanksEmbed.SetVariable(", nodeName, "); RanksEmbed.SetGuard;",
"RanksEmbed.Set")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/ranks.set.R
|
"ranksStats" <-
function(node)
# Calculates ranks statistics for vector valued node in OpenBUGS model
{
if(length(node) > 1 || node == "*")
stop("node cannot be a vector, nor '*'")
nodeName <- sQuote(node)
command <- paste("RanksEmbed.SetVariable(", nodeName, "); RanksEmbed.StatsGuard;",
"RanksEmbed.Stats")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len > 1)
read.table(buffer)
else
message(rlb)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/ranks.stats.R
|
"samplesAutoC" <-
function(node, chain, beg = samplesGetBeg(), end = samplesGetEnd(),
thin = samplesGetThin(), plot = TRUE, mfrow = c(3, 2), ask = NULL, ann = TRUE, ...)
# Plot auto correlation function
{
if(plot && is.null(ask)) {
if (is.R())
ask <- !((dev.cur() > 1) && !dev.interactive())
else
ask <- !((dev.cur() > 1) && !interactive())
}
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
beg <- max(beg, modelAdaptivePhase())
samplesSetBeg(beg)
samplesSetEnd(end)
chain <- max(c(1, chain))
chain <- min(c(getNumChains(), chain))
samplesSetFirstChain(chain)
samplesSetLastChain(chain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
mons <- samplesMonitors(node)
if(plot){
if (is.R())
par(mfrow = mfrow, ask = ask, ann = ann)
else
par(mfrow = mfrow, ask = ask)
}
result <- lapply(mons, plotAutoC, plot = plot, ...)
names(result) <- mons
if(plot) invisible(result)
else return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.autoC.R
|
"samplesBgr" <-
function(node, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin(), bins = 50, plot = TRUE, mfrow = c(3, 2),
ask = NULL, ann = TRUE, ...)
# Plot bgr statistic
{
mons <- samplesMonitors(node)
if (any(grep("^inference can not be made", mons))) { stop(mons) }
if(plot && is.null(ask)) {
if (is.R())
ask <- !((dev.cur() > 1) && !dev.interactive())
else
ask <- !((dev.cur() > 1) && !interactive())
}
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
beg <- max(beg, modelAdaptivePhase())
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
mons <- samplesMonitors(node)
if(plot){
if (is.R())
par(mfrow = mfrow, ask = ask, ann = ann)
else
par(mfrow = mfrow, ask = ask)
}
result <- lapply(mons, plotBgr, bins = bins, plot = plot, ...)
names(result) <- mons
if(plot) invisible(result)
else return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.bgr.R
|
"samplesClear" <-
function(node)
# Clear a sample monitor
{
nodeName <- sQuote(node)
command <- paste("SamplesEmbed.SetVariable(", nodeName,
");SamplesEmbed.HistoryGuard;SamplesEmbed.Clear")
.CmdInterpreter(command)
if(getOption("BRugsVerbose"))
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.clear.R
|
"samplesCoda" <- function(node, stem, beg = samplesGetBeg(),
end = samplesGetEnd(), firstChain = samplesGetFirstChain(),
lastChain = samplesGetLastChain(), thin = samplesGetThin())
{
# Write out CODA files
if(!is.character(node) || length(node)!=1)
stop("'node' must be character of length 1")
if(!is.character(stem) || length(stem)!=1)
stop("'stem' must be character of length 1")
if(dirname(stem) == ".")
stem <- file.path(getwd(), basename(stem))
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
beg <- max(beg, modelAdaptivePhase())
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
command <- paste(.SamplesGlobalsCmd(node), ";SamplesEmbed.StatsGuard;",
"SamplesEmbed.CODA(", sQuote(stem), ")")
.CmdInterpreter(command)
buffer()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.coda.R
|
"samplesCorrel" <-
function(node0, node1, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin())
# Correlation matrix of two quantities in OpenBUGS model
{
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
command <- paste("CorrelEmbed.beg :=", getOption("BRugsSamplesBeg"),
"; CorrelEmbed.end :=", getOption("BRugsSamplesEnd"),
"; CorrelEmbed.firstChain :=", getOption("BRugsSamplesFirstChain"),
"; CorrelEmbed.lastChain :=", getOption("BRugsSamplesLastChain"),
"; CorrelEmbed.thin :=", getOption("BRugsSamplesThin"),
"; CorrelEmbed.SetVariable0(", sQuote(node0),
");CorrelEmbed.SetVariable1(", sQuote(node1),
");CorrelEmbed.Guard", ";CorrelEmbed.PrintMatrix"
)
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len > 1)
as.matrix(read.table(buffer))
else
message(rlb)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.correl.R
|
"samplesDensity" <-
function(node, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin(), plot = TRUE, mfrow = c(3, 2), ask = NULL,
ann = TRUE, ...)
# Plot posterior density
{
if(is.null(ask)) {
if (is.R())
ask <- !((dev.cur() > 1) && !dev.interactive())
else
ask <- !((dev.cur() > 1) && !interactive())
}
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
beg <- max(beg, modelAdaptivePhase())
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
mons <- samplesMonitors(node)
if (plot) {
if (is.R())
par(mfrow = mfrow, ask = ask, ann = ann)
else
par(mfrow = mfrow, ask = ask)
}
result <- sapply(mons, plotDensity, plot=plot, ...)
if (!is.R())
invisible()
else {
if(plot) invisible(result)
else return(result)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.density.R
|
"samplesGetBeg" <-
function()
# Beginning iteration from which to compute sample statistics
{
getOption("BRugsSamplesBeg")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.get.beg.R
|
"samplesGetEnd" <-
function()
# End iteration from which to compute sample statistics
{
getOption("BRugsSamplesEnd")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.get.end.R
|
"samplesGetFirstChain" <-
function()
# First chain from which to compute sample statistics
{
getOption("BRugsSamplesFirstChain")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.get.firstChain.R
|
"samplesGetLastChain" <-
function()
# Last chain from which to compute sample statistics
{
getOption("BRugsSamplesLastChain")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.get.lastChain.R
|
"samplesGetThin" <-
function()
# Thinning interval to apply to sample statistics
{
getOption("BRugsSamplesThin")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.get.thin.R
|
"samplesHistory" <-
function(node, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin(), plot = TRUE, mfrow = c(3, 1), ask = NULL, ann = TRUE, ...)
# Plot history
{
sM <- samplesMonitors(node)[1]
if(sM == "model must be initialized before monitors used")
stop("model must be initialized / updated / monitored before samplesSample is used")
if(length(grep("^no monitor set for variable", sM)))
stop(sM)
if (samplesSize(sM[1])==0)
stop("No monitored samples available")
if(plot && is.null(ask)) {
if (is.R())
ask <- !((dev.cur() > 1) && !dev.interactive())
else
ask <- !((dev.cur() > 1) && !interactive())
}
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
mons <- samplesMonitors(node)
if(plot){
if (is.R())
par(mfrow = mfrow, ask = ask, ann = ann)
else
par(mfrow = mfrow, ask = ask)
}
result <- lapply(mons, plotHistory, plot = plot, ...)
names(result) <- mons
if(plot) invisible(result)
else return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.history.R
|
"samplesMonitors" <-
function(node)
# List all sample monitors corresponding to node
{
if (is.R()){
command <- paste("SamplesEmbed.SetVariable(", sQuote(node),
");SamplesEmbed.StatsGuard;SamplesEmbed.Labels",sep="")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len == 1 && rlb == "command is not allowed (greyed out)")
message(rlb)
else{
if(len == 0){
message("model has probably not yet been updated")
invisible("model has probably not yet been updated")
}
else {
scan(buffer, what = "character", quiet = TRUE, sep="\n")
}
}
} else {
sampsMonsSingle <- function(node){
command <- paste("SamplesEmbed.SetVariable(", sQuote(node),
");SamplesEmbed.StatsGuard;SamplesEmbed.Labels",sep="")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len == 1 && rlb == "command is not allowed (greyed out)")
message(rlb)
else{
if(len == 0){
message("model has probably not yet been updated")
invisible("model has probably not yet been updated")
}
else {
scan(buffer, what = "character", sep="\n")
}
}
}
for(i in seq(along=node)){
mons <- lapply(node, sampsMonsSingle)
}
mons <- unlist(mons)
return(mons)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.monitors.R
|
"samplesSample" <-
function(node)
# Get stored sample for single component of OpenBUGS name
{
if(samplesGetFirstChain() > samplesGetLastChain())
stop("Number of first chain is larger than last chain!")
if(length(node) != 1)
stop("Exactly one scalar node must be given.")
sM <- samplesMonitors(node)[1]
if(sM == "model must be initialized before monitors used")
stop("model must be initialized / updated / monitored before samplesSample is used")
if(length(grep("^no monitor set for variable", sM)))
stop(sM)
nodeSize <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetSize"),
c("CharArray","Integer"),
list(node,NA))[[2]]
if(nodeSize > 1)
stop("Only scalar nodes such as ", node, "[1] are allowed.")
sampleSize <- samplesSize(node)
sample <- .OpenBUGS(c(.SamplesGlobalsCmd(node), "SamplesEmbed.SampleValues"),
c("CmdInterpreter","RealArray"),
list(node,double(sampleSize)))[[2]]
sample
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.sample.R
|
"samplesSet" <-
function(node)
# Set a sample monitor
{
nodeName <- sQuote(node)
for(i in seq(along=nodeName)){
sM <- paste(suppressMessages(samplesMonitors(node[i])), collapse = " ")
if(sM == "model must be initialized before monitors used")
stop("model must be initialized before monitors used")
if(sM %in% c("inference can not be made when sampler is in adaptive phase",
"model has probably not yet been updated"))
alreadySet <- FALSE
else
alreadySet <- !length(grep("no monitor set", sM))
eval(alreadySet)
command <- paste("SamplesEmbed.SetVariable(", nodeName[i],
");SamplesEmbed.SetGuard;SamplesEmbed.Set")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
if(rlb == "")
message("either model has not been updated or variable ", nodeName[i], " already set")
else{
if(getOption("BRugsVerbose")){
if(alreadySet)
message("monitor for variable ", nodeName[i], " already set")
else message(rlb, " for variable ", nodeName[i])
}
}
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.R
|
"samplesSetBeg" <-
function(begIt)
# Set the beg field
{
if(!is.numeric(begIt))
stop("begIt ", "must be numeric")
begIt <- as.integer(begIt)
options("BRugsSamplesBeg" = begIt)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.beg.R
|
"samplesSetEnd" <-
function(endIt)
# Set the end field
{
if(!is.numeric(endIt))
stop("endIt ", "must be numeric")
endIt <- as.integer(endIt)
options("BRugsSamplesEnd" = endIt)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.end.R
|
"samplesSetFirstChain" <-
function(first)
# Set the firstChain field
{
if(!is.numeric(first))
stop("first ", "must be numeric")
first <- as.integer(first)
if(!(first %in% 1:getNumChains()))
stop("it is required to have 1 <= first <= nchains")
options("BRugsSamplesFirstChain" = first)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.firstChain.R
|
"samplesSetLastChain" <-
function(last)
# Set the lastChain field
{
if(!is.numeric(last))
stop("last ", "must be numeric")
last <- as.integer(last)
if(!(last %in% 1:getNumChains()))
stop("it is required to have 1 <= last <= nchains")
options("BRugsSamplesLastChain" = last)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.lastChain.R
|
"samplesSetThin" <-
function(thin)
# Set the thin field
{
if(!is.numeric(thin))
stop("thin ", "must be numeric")
options("BRugsSamplesThin" = thin)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.set.thin.R
|
"samplesSize" <-
function(node)
# Size of stored sample of single component of OpenBUGS name
{
sM <- samplesMonitors(node)
# Doesn't distinguish between nodes not in the model and nodes not monitored
# so returns 0 for non-existent nodes
if (any(grep("^no monitor set", sM))) return(0)
if (any(grep("^model has probably not yet been updated", sM))) return(0)
if (any(grep("^inference can not be made", sM))) { warning(sM); return(0) }
if(length(sM) > 1 || sM != node)
stop("node must be a scalar variable from the model")
size <- .OpenBUGS(c(.SamplesGlobalsCmd(node), "SamplesEmbed.SampleSize"),
c("CmdInterpreter","Integer"))[[2]]
size
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.size.R
|
"samplesStats" <-
function(node, beg = samplesGetBeg(), end = samplesGetEnd(),
firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(),
thin = samplesGetThin())
# Calculate statistics for monitored node
{
oldBeg <- samplesGetBeg()
oldEnd <- samplesGetEnd()
oldFirstChain <- samplesGetFirstChain()
oldLastChain <- samplesGetLastChain()
oldThin <- samplesGetThin()
on.exit({
samplesSetBeg(oldBeg)
samplesSetEnd(oldEnd)
samplesSetFirstChain(oldFirstChain)
samplesSetLastChain(oldLastChain)
samplesSetThin(oldThin)
})
samplesSetBeg(beg)
samplesSetEnd(end)
samplesSetFirstChain(firstChain)
samplesSetLastChain(lastChain)
thin <- max(c(thin, 1))
samplesSetThin(thin)
if (is.R()){
result <- data.frame(mean=NULL, sd=NULL, MC_error = NULL, val2.5pc=NULL,
median=NULL, val97.5pc=NULL, start = NULL, sample=NULL)
} else {
result <- data.frame(mean=numeric(), sd=numeric(), MC.error = numeric(),
val2.5pc=numeric(), median=numeric(), val97.5pc=numeric(),
start = numeric(), sample=numeric())
}
for(i in seq(along=node)){
command <- paste(.SamplesGlobalsCmd(node[i]), "SamplesEmbed.StatsGuard;SamplesEmbed.Stats")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len > 1)
result <- rbind(result, read.table(buffer))
else{
if(length(grep("val97.5pc", rlb)))
message("Variable ", node[i], " has probably not been updated")
else
message("Variable ", node[i], ": ", rlb)
}
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/samples.stats.R
|
"setValues" <- function(nodeLabel, values)
# set value of node
{
nodeLabel <- as.character(nodeLabel)
# NA handling, now internal in OpenBUGS?
# cv <- currentValues(nodeLabel)
# DoNotSetNA <- is.na(values) & !is.na(cv)
# if(any(DoNotSetNA))
# warning("Some NA values formerly had a non-NA value -- left unchanged")
# values[DoNotSetNA] <- cv[DoNotSetNA]
nodeSize <- .OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.GetSize"),
c("CharArray","Integer"),
c(nodeLabel,NA))[[2]]
if(nodeSize == -1)
stop(nodeLabel, " is not a node in BUGS model")
numChains <- getNumChains()
if(length(values) != nodeSize*numChains)
stop("length(values) does not correspond to the node size and number of chains")
.OpenBUGS(c("BugsRobjects.SetVariable", "BugsRobjects.SetValues"),
c("CharArray","RealArray"),
list(nodeLabel,as.double(values)))[[2]]
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/set.values.R
|
"summaryClear" <-
function(node)
# Clear summary monitor for node in WinBUGS model
{
nodeName <- sQuote(node)
for(i in seq(along=nodeName)){
command <- paste("SummaryEmbed.SetVariable(", nodeName[i], "); SummaryEmbed.StatsGuard;",
"SummaryEmbed.Clear")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
if(getOption("BRugsVerbose"))
message("Variable ", nodeName[i], ": ", rlb)
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/summary.clear.R
|
"summarySet" <-
function(node)
# Set summary monitor for node in OpenBUGS model
{
nodeName <- sQuote(node)
for(i in seq(along=nodeName)){
command <- paste("SummaryEmbed.SetVariable(", nodeName[i], "); SummaryEmbed.SetGuard;",
"SummaryEmbed.Set")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
if(getOption("BRugsVerbose"))
message("Variable ", nodeName[i], ": ", rlb)
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/summary.set.R
|
"summaryStats" <-
function(node)
# Calculates statistics for summary monitor associated with node in OpenBUGS model
{
nodeName <- sQuote(node)
if (is.R())
result <- data.frame(mean=NULL, sd=NULL, val2.5pc=NULL,
median=NULL, val97.5pc=NULL, sample=NULL)
else
result <- data.frame(mean=numeric(), sd=numeric(), val2.5pc=numeric(),
median=numeric(), val97.5pc=numeric(), sample=numeric())
for(i in seq(along=nodeName)){
command <- paste("SummaryEmbed.SetVariable(", nodeName[i], "); SummaryEmbed.StatsGuard;",
"SummaryEmbed.Stats")
.CmdInterpreter(command)
buffer <- file.path(tempdir(), "buffer.txt")
rlb <- readLines(buffer)
len <- length(rlb)
if (len > 1)
result <- rbind(result, read.table(buffer))
else{
if(length(grep("val97.5pc", rlb)))
message("Variable ", nodeName[i], " has probably not been updated")
else if(getOption("BRugsVerbose"))
message("Variable ", nodeName[i], ": ", rlb)
}
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/summary.stats.R
|
help.BRugs <- function(browser = getOption("browser"))
{
## stolen from help.start()
# if(is.null(browser))
# stop("Invalid browser name, check options(\"browser\").")
# writeLines(strwrap(paste("If", browser, "is already running,",
# "it is *not* restarted, and you must",
# "switch to its window."),
# exdent = 4))
# writeLines("Otherwise, be patient ...")
# browseURL(system.file("OpenBUGS", "docu", "BRugs Manual.html", package="BRugs"))
# invisible("")
## Andrew now omits the BRugs introduction, hence just pointing to help.WinBUGS these days:
help.WinBUGS(browser = browser)
}
help.WinBUGS <- function(browser = getOption("browser"))
{
# stolen from help.start()
if(is.null(browser))
stop("Invalid browser name, check options(\"browser\").")
writeLines(strwrap(paste("If", browser, "is already running,",
"it is *not* restarted, and you must",
"switch to its window."),
exdent = 4))
writeLines("Otherwise, be patient ...")
browseURL(file.path(options()$OpenBUGSdoc, "Manuals", "Contents.html"))
invisible("")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/unix/help.R
|
.OpenBUGS.platform <- function(cmds, cmdtypes, args)
{
.OpenBUGS.helper(cmds, cmdtypes, args)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/unix/internal.R
|
if (is.R()){
".onLoad" <- function(lib, pkg){
## TODO any need for these to be user specifiable?
options("BRugsTmpdir" = gsub("\\\\", "/", tempdir()))
options("BRugsExtFile" = paste(basename(tempfile()), ".bug", sep=""))
options(OpenBUGS = "/usr/local/lib/OpenBUGS/lib")
options(OpenBUGSdoc = "/usr/local/lib/OpenBUGS/doc")
options(OpenBUGSExamples = paste(options()$OpenBUGSdoc, "Examples", sep="/"))
if(is.null(getOption("BRugsVerbose")))
options("BRugsVerbose" = TRUE)
.initGlobals()
ver <- system("echo \"modelQuit()\" | /usr/local/lib/OpenBUGS/lib/../bin/OpenBUGS", intern=TRUE)
ver <- sub("OpenBUGS version (([0-9]\\.)+[0-9]).+","\\1",ver[1])
packageStartupMessage("Welcome to BRugs connected to OpenBUGS version ", ver)
}
".onUnload" <- function(libpath){
}
## Windows-only
loadOpenBUGS <- function(dir) {
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/unix/zzz.R
|
findOpenBUGS <- function()
{
dir <- Sys.getenv("OpenBUGS_PATH")
if(!nchar(dir)){
deps <- packageDescription("BRugs", fields="SystemRequirements")
version.req <- gsub(".*OpenBUGS ?\\(>= ?(.+)\\).*", "\\1", deps)
ob.reg <- try(readRegistry("Software\\OpenBUGS", "HLM", view = "32-bit"), silent = TRUE)
if (inherits(ob.reg, "try-error")) {
warning("OpenBUGS ", version.req, " or greater must be installed\n(if so, this indicates missing registry keys of OpenBUGS).\nSetting the environment variable 'OpenBUGS_PATH' in advance of loading 'BRugs' overwrites the path.\nSee ?loadOpenBUGS in order to load OpenBUGS manually.")
return()
}
rnames <- names(ob.reg)
version.full <- gsub("OpenBUGS ", "", rnames)
## remove suffixes from development versions, converts e.g. 3.2.1alpha to 3.2.1
version.inst <- gsub("(.+[0-9]+)[a-zA-Z]+$","\\1", version.full)
if(length(version.inst > 1)){
id <- which(apply(outer(version.inst, version.inst, Vectorize(compareVersion, c("a", "b"))), 1, function(x) all(x >= 0)))
id <- max(id) # if more than one release with same number, arbitrarily choose last one in registry
version.inst <- version.inst[id]
version.full <- version.full[id]
rnames <- rnames[id]
}
if (compareVersion(version.inst, version.req) < 0) {
warning("Found OpenBUGS version ", version.inst, ".\n Requires ", version.req, " or greater.\nSetting the environment variable 'OpenBUGS_PATH' in advance of loading 'BRugs' overwrites the path.\nSee ?loadOpenBUGS in order to load OpenBUGS manually.")
return()
}
## OpenBUGS installation location
dir <- readRegistry(paste("Software","OpenBUGS",rnames,sep="\\"), "HLM", view = "32-bit")[["InstallPath"]]
} else {
if(!file.exists(file.path(dir, "libOpenBUGS.dll"))){
warning("Environment variable OpenBUGS_PATH found but cannot access ", file.path(dir, "libOpenBUGS.dll"))
return()
}
version.inst <- version.full <- NA
}
list(dir=dir, version=version.full)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/findOpenBUGS.R
|
help.BRugs <- function(browser = getOption("browser"))
{
## stolen from help.start()
# a <- system.file("OpenBUGS", "Manuals", "WinBUGS Manual.html", package="BRugs")
# if (!file.exists(a))
# stop("I can't find the html help")
# a <- chartr("/", "\\", a)
# message("If nothing happens, you should open `", a, "' yourself")
# browseURL(a, browser = browser)
# invisible("")
## Andrew now omits the BRugs introduction, hence just pointing to help.WinBUGS these days:
help.WinBUGS(browser = browser)
}
help.WinBUGS <- function(browser = getOption("browser"))
{
# stolen from help.start()
a <- file.path(options()$OpenBUGS, "Manuals", "Contents.html")
if (!file.exists(a))
stop("HTML help not found in file ", a)
if (is.R())
a <- chartr("/", "\\", a)
else
a <- gsub ("/", "\\\\", a)
message("If nothing happens, you should open `", a, "' yourself")
browseURL(a, browser = browser)
invisible("")
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/help.R
|
### Run a list of OpenBUGS API command strings
.OpenBUGS.platform <- function(cmds, cmdtypes, args)
{
if (.Platform$r_arch == "x64"){
out <- .OpenBUGS.helper(cmds, cmdtypes, args)
}
else if (.Platform$r_arch == "i386") {
ncmds <- length(cmds)
out <- vector(ncmds, mode="list")
for (i in 1:ncmds) {
out[[i]] <- switch(cmdtypes[i],
"CmdInterpreter" = {
res <- .C("CmdInterpreter", cmds[i], nchar(cmds[i]), integer(1), PACKAGE="libOpenBUGS")
handleRes(res[[3]])
res
},
"Integer" = {
values <- .C("Integer", cmds[i], nchar(cmds[i]), integer(1), integer(1), PACKAGE="libOpenBUGS")
handleRes(values[[4]])
as.integer(values[[3]])
},
"CharArray" = {
values <- .C("CharArray", cmds[i], nchar(cmds[i]), args[[i]], nchar(args[[i]]), integer(1), PACKAGE="libOpenBUGS")
handleRes(values[[5]])
values[[3]]
},
"RealArray" = {
values <- .C("RealArray", cmds[i], nchar(cmds[i]), args[[i]], length(args[[i]]), integer(1), PACKAGE="libOpenBUGS")
handleRes(values[[5]])
values[[3]]
})
}
}
else {
stop("Unknown architecture ", .Platform$r_arch, " , should be i386 or x64")
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/internal.R
|
if (is.R()){
".onLoad" <- function(lib, pkg){
if (.Platform$r_arch == "i386") {
.onLoad.i386(lib, pkg)
}
else if (.Platform$r_arch == "x64"){
.onLoad.x64(lib, pkg)
}
else {
stop("Unknown architecture ", .Platform$r_arch, " , should be i386 or x64")
}
}
".onUnload" <- function(libpath){
if (.Platform$r_arch == "i386") {
.onUnload.i386(libpath)
}
else if (.Platform$r_arch == "x64"){
.onUnload.x64(libpath)
}
else {
stop("Unknown architecture ", .Platform$r_arch, " , should be i386 or x64")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/zzz.R
|
".onLoad.i386" <- function(lib, pkg){
ob <- findOpenBUGS()
loadOpenBUGS(ob$dir)
msg <- paste("Welcome to BRugs connected to OpenBUGS")
if (!is.na(ob$version))
msg <- paste(msg, "version", ob$version)
else msg <- paste(msg, "in directory", ob$dir)
packageStartupMessage(msg)
}
".onUnload.i386" <- function(libpath){
if(is.loaded("CmdInterpreter")) {
libname <- paste(options()$OpenBUGS, "libOpenBUGS.dll", sep="/")
dyn.unload(libname)
}
}
## Load OpenBUGS from specified location
loadOpenBUGS <- function(dir) {
libname <- paste(dir, "libOpenBUGS.dll", sep="/")
if (!file.exists(libname)) {
warning("Shared library \"libOpenBUGS.dll\" not found in ", dir)
return(FALSE)
}
options(OpenBUGS = dir)
dyn.load(libname)
len <- nchar(dir)
.C("SetWorkingDir", as.character(dir), len, PACKAGE="libOpenBUGS")
## Set temporary dir for "buffer.txt" output
tempDir <- gsub("\\\\", "/", tempdir())
.C("SetTempDir", as.character(tempDir), nchar(tempDir), PACKAGE="libOpenBUGS")
command <- "BugsMappers.SetDest(2)"
.CmdInterpreter(command)
if(is.null(getOption("BRugsVerbose")))
options("BRugsVerbose" = TRUE)
.initGlobals()
options(OpenBUGSExamples = paste(dir, "Examples", sep="/"))
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/zzz.i386.R
|
if (is.R()){
".onLoad.x64" <- function(lib, pkg){
## TODO any need for these to be user specifiable?
options("BRugsTmpdir" = gsub("\\\\", "/", tempdir()))
options("BRugsExtFile" = paste(basename(tempfile()), ".bug", sep=""))
ob <- findOpenBUGS()
options(OpenBUGS = ob$dir)
options(OpenBUGSdoc = ob$dir)
options(OpenBUGSExamples = paste(ob$dir, "Examples", sep="/"))
if(is.null(getOption("BRugsVerbose")))
options("BRugsVerbose" = TRUE)
.initGlobals()
msg <- paste("Welcome to BRugs connected to OpenBUGS")
if (!is.na(ob$version))
msg <- paste(msg, "version", ob$version)
else msg <- paste(msg, "in directory", ob$dir)
packageStartupMessage(msg)
pathtoBUGS <- gsub("/", "\\", ob$dir)
oldpath <- Sys.getenv("PATH")
if(!length(grep(pathtoBUGS, oldpath, fixed=TRUE)))
Sys.setenv(PATH=paste(oldpath, pathtoBUGS, sep=";"))
}
".onUnload.x64" <- function(libpath){
}
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/windows/zzz.x64.R
|
"write.datafile" <-
function (datalist, towhere, fill = TRUE){
if (!is.list(datalist) || is.data.frame(datalist))
stop("First argument to write.datafile must be a list.")
cat(formatdata(datalist), file = towhere, fill = fill)
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/write.datafile.R
|
writeModel <- function(model, con = "model.txt", digits = 5)
{
if (is.R()){
model.text <- c("model", replaceScientificNotationR(body(model), digits = digits))
# "[\+\-]?\d*\.?[Ee]?[\+\-]?\d*"
} else {
## In S-PLUS the source code of a function can be obtained with
## as.character(function_name). This omits the "function_name <- function()" piece
model.text <- paste("model", as.character(model))
}
model.text <- gsub("%_%", "", model.text)
if (!is.R()){
## In S-PLUS, scientific notation is different than it is in WinBUGS.
## Change the format of any numbers in scientific notation.
model.text <- replaceScientificNotationS(model.text)
## remove the "invisible()" line.
model.text <- gsub("invisible[ ]*\\([ ]*\\)", "", model.text)
}
writeLines(model.text, con = con)
}
replaceScientificNotationR <- function(bmodel, digits = 5){
env <- new.env()
assign("rSNRidCounter", 0, envir=env)
replaceID <- function(bmodel, env, digits = 5){
for(i in seq_along(bmodel)){
if(length(bmodel[[i]]) == 1){
if(as.character(bmodel[[i]]) %in% c(":", "[", "[[")) return(bmodel)
if((typeof(bmodel[[i]]) %in% c("double", "integer")) && ((abs(bmodel[[i]]) < 1e-3) || (abs(bmodel[[i]]) > 1e+4))){
counter <- get("rSNRidCounter", envir=env) + 1
assign("rSNRidCounter", counter, envir=env)
id <- paste("rSNRid", counter, sep="")
assign(id, formatC(bmodel[[i]], digits=digits, format="E"), envir=env)
bmodel[[i]] <- id
}
} else {
bmodel[[i]] <- replaceID(bmodel[[i]], env, digits = digits)
}
}
bmodel
}
bmodel <- deparse(replaceID(bmodel, env, digits = digits), control = NULL)
for(i in ls(env)){
bmodel <- gsub(paste('"', i, '"', sep=''), get(i, envir=env), bmodel, fixed=TRUE)
}
bmodel
}
replaceScientificNotationS <- function(text){
## Change the format of any numbers in "text" that are in S-PLUS
## scientific notation to WinBUGS scientific notation
## First, handle the positive exponents
## Find the first instance
## Note that the number may or may not have a decimal point.
sciNoteLoc <- regexpr("[0-9]*\\.{0,1}[0-9]*e\\+0[0-9]{2}", text)
## For every instance, replace the number
while(sciNoteLoc > -1){
sciNoteEnd <- sciNoteLoc + attr(sciNoteLoc, "match.length")-1
sciNote <- substring(text, sciNoteLoc, sciNoteEnd)
text <- gsub(sciNote, toSingleS4(sciNote), text)
sciNoteLoc <- regexpr("[0-9]*\\.{0,1}[0-9]*e\\+0[0-9]{2}", text)
}
## Then, handle the negative exponents
## Find the first instance
sciNoteLoc <- regexpr("[0-9]*\\.{0,1}[0-9]*e\\-0[0-9]{2}", text)
## For every instance, replace the number
while(sciNoteLoc > -1){
sciNoteEnd <- sciNoteLoc + attr(sciNoteLoc, "match.length")-1
sciNote <- substring(text, sciNoteLoc, sciNoteEnd)
text <- gsub(sciNote, toSingleS4(sciNote), text)
sciNoteLoc <- regexpr("[0-9]*\\.{0,1}[0-9]*e\\-0[0-9]{2}", text)
}
text
}
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/write.model.R
|
## See unix/zzz.R, windows/zzz.R for platform specific .onLoad functions
if (is.R()){
.initGlobals <- function(){
options("BRugsSamplesBeg" = 1)
options("BRugsSamplesEnd" = 10000000)
options("BRugsSamplesFirstChain" = 1)
options("BRugsSamplesLastChain" = 1)
options("BRugsSamplesThin" = 1)
options("BRugsSamplesVariable" = "*")
options("BRugsNextChain" = 1) # index of chain which needs to be initialized next
options("BRugsPrec" = 4)
}
## Overwriting new (from R-2.6.0) sQuote (for typing human readable text) in R within the BRugs Namespace!
## we cannot use sQuote that uses fancy quotes!
sQuote <- function(x) paste("'", x, "'", sep="")
} else { # ends if (is.R())
".First.lib" <- function(lib.loc, section)
{
dyn.open(system.file("OpenBUGS", "brugs.dll", package="BRugs"))
## sets path / file variables and initializes subsystems
root <- file.path(system.file("OpenBUGS", package="BRugs"))
len <- nchar(root)
tempDir <- gsub("\\\\", "/", tempdir())
.C("SetRoot", as.character(root), len)
.C("SetTempDir", as.character(tempDir), nchar(tempDir))
command <- "BugsMappers.SetDest(2)"
.C("CmdInterpreter", as.character(command), nchar(command), integer(1))
if(is.null(getOption("BRugsVerbose")))
options("BRugsVerbose" = TRUE)
invisible()
}
.tempDir <- getwd()
tempdir <- function(){ .tempDir }
} # ends else
|
/scratch/gouwar.j/cran-all/cranData/BRugs/R/zzz.R
|
#############################################################################
#' @import lattice
#' @importFrom graphics abline axis box boxplot dotchart hist legend lines mtext par plot plot.design points polygon segments text title
#' @importFrom stats dbinom density dnorm fitted fivenum median pnorm pt qchisq qnorm qqline qqnorm qt quantile rbinom rnorm rstandard sd shapiro.test var
#' @importFrom utils combn
#' @importFrom e1071 skewness kurtosis
#'
NULL
###############################################################################
#
#' Daily price returns (in pence) of Abbey National shares between 7/31/91 and
#' 10/8/91
#'
#' Data used in problem 6.39
#'
#'
#' @name Abbey
#' @docType data
#' @format A data frame/tibble with 50 observations on one variable
#' \describe{
#' \item{price}{daily price returns (in pence) of Abbey National shares}
#' }
#'
#' @source Buckle, D. (1995), Bayesian Inference for Stable Distributions,
#' \emph{Journal of the American Statistical Association}, 90, 605-613.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Abbey$price)
#' qqline(Abbey$price)
#' t.test(Abbey$price, mu = 300)
#' hist(Abbey$price, main = "Exercise 6.39",
#' xlab = "daily price returns (in pence)",
#' col = "blue")
#'
"Abbey"
#' Three samples to illustrate analysis of variance
#'
#' Data used in Exercise 10.1
#'
#'
#' @name Abc
#' @docType data
#' @format A data frame/tibble with 54 observations on two variables
#' \describe{
#' \item{response}{a numeric vector}
#' \item{group}{a character vector \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(response ~ group, col=c("red", "blue", "green"), data = Abc )
#' anova(lm(response ~ group, data = Abc))
#'
"Abc"
#' Crimes reported in Abilene, Texas
#'
#' Data used in Exercise 1.23 and 2.79
#'
#'
#' @name Abilene
#' @docType data
#' @format A data frame/tibble with 16 observations on three variables
#' \describe{
#' \item{crimetype}{a character variable with values \code{Aggravated
#' assault}, \code{Arson}, \code{Burglary}, \code{Forcible rape}, \code{Larceny
#' theft}, \code{Murder}, \code{Robbery}, and \code{Vehicle theft}.}
#' \item{year}{a factor with levels \code{1992} and \code{1999}}
#' \item{number}{number of reported crimes}
#' }
#'
#' @source \emph{Uniform Crime Reports}, US Dept. of Justice.
#'
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(mfrow = c(2, 1))
#' barplot(Abilene$number[Abilene$year=="1992"],
#' names.arg = Abilene$crimetype[Abilene$year == "1992"],
#' main = "1992 Crime Stats", col = "red")
#' barplot(Abilene$number[Abilene$year=="1999"],
#' names.arg = Abilene$crimetype[Abilene$year == "1999"],
#' main = "1999 Crime Stats", col = "blue")
#' par(mfrow = c(1, 1))
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Abilene, aes(x = crimetype, y = number, fill = year)) +
#' geom_bar(stat = "identity", position = "dodge") +
#' theme_bw() +
#' theme(axis.text.x = element_text(angle = 30, hjust = 1))
#' }
#'
"Abilene"
#' Perceived math ability for 13-year olds by gender
#'
#' Data used in Exercise 8.57
#'
#'
#' @name Ability
#' @docType data
#' @format A data frame/tibble with 400 observations on two variables
#' \describe{
#' \item{gender}{a factor with levels \code{girls} and \code{boys}}
#' \item{ability}{a factor with levels \code{hopeless}, \code{belowavg}, \code{average}, \code{aboveavg}, and \code{superior}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' CT <- xtabs(~gender + ability, data = Ability)
#' CT
#' chisq.test(CT)
#'
"Ability"
#' Abortion rate by region of country
#'
#' Data used in Exercise 8.51
#'
#'
#' @name Abortion
#' @docType data
#' @format A data frame/tibble with 51 observations on the following 10 variables:
#' \describe{
#' \item{state}{a character variable with values \code{alabama},
#' \code{alaska}, \code{arizona}, \code{arkansas}, \code{california},
#' \code{colorado}, \code{connecticut}, \code{delaware}, \code{dist of columbia},
#' \code{florida,} \code{georgia}, \code{hawaii}, \code{idaho}, \code{illinois},
#' \code{indiana}, \code{iowa}, \code{kansas}, \code{kentucky}, \code{louisiana},
#' \code{maine}, \code{maryland}, \code{massachusetts}, \code{michigan},
#' \code{minnesota}, \code{mississippi}, \code{missouri}, \code{montana},
#' \code{nebraska}, \code{nevada}, \code{new hampshire}, \code{new jersey},
#' \code{new mexico}, \code{new york}, \code{north carolina}, \code{north dakota},
#' \code{ohio}, \code{oklahoma}, \code{oregon}, \code{pennsylvania}, \code{rhode
#' island}, \code{south carolina}, \code{south dakota}, \code{tennessee},
#' \code{texas}, \code{utah}, \code{vermont}, \code{virginia}, \code{washington},
#' \code{west virginia}, \code{wisconsin}, and \code{wyoming}}
#' \item{region}{a character variable with values \code{midwest} \code{northeast}
#' \code{south} \code{west}}
#' \item{regcode}{a numeric vector}
#' \item{rate1988}{a numeric vector}
#' \item{rate1992}{a numeric vector}
#' \item{rate1996}{a numeric vector}
#' \item{provide1988}{a numeric vector}
#' \item{provide1992}{a numeric vector}
#' \item{lowhigh}{a numeric vector}
#' \item{rate}{a factor with levels \code{Low} and \code{High}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~region + rate, data = Abortion)
#' T1
#' chisq.test(T1)
#'
"Abortion"
#' Number of absent days for 20 employees
#'
#' Data used in Exercise 1.28
#'
#'
#' @name Absent
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{days}{days absent}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' CT <- xtabs(~ days, data = Absent)
#' CT
#' barplot(CT, col = "pink", main = "Exercise 1.28")
#' plot(ecdf(Absent$days), main = "ECDF")
#'
"Absent"
#' Math achievement test scores by gender for 25 high school students
#'
#' Data used in Example 7.14 and Exercise 10.7
#'
#'
#' @name Achieve
#' @docType data
#' @format A data frame/tibble with 25 observations on two variables
#' \describe{
#' \item{score}{mathematics achiement score}
#' \item{gender}{a factor with 2 levels \code{boys} and \code{girls}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' anova(lm(score ~ gender, data = Achieve))
#' t.test(score ~ gender, var.equal = TRUE, data = Achieve)
#'
"Achieve"
#' Number of ads versus number of sales for a retailer of satellite dishes
#'
#' Data used in Exercise 9.15
#'
#'
#' @name Adsales
#' @docType data
#' @format A data frame/tibble with six observations on three variables
#' \describe{
#' \item{month}{a character vector listing month}
#' \item{ads}{a numeric vector containing number of ads}
#' \item{sales}{a numeric vector containing number of sales}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(sales ~ ads, data = Adsales, main = "Exercise 9.15")
#' mod <- lm(sales ~ ads, data = Adsales)
#' abline(mod, col = "red")
#' summary(mod)
#' predict(mod, newdata = data.frame(ads = 6), interval = "conf", level = 0.99)
#'
"Adsales"
#' Agressive tendency scores for a group of teenage members of a street gang
#'
#' Data used in Exercises 1.66 and 1.81
#'
#'
#' @name Aggress
#' @docType data
#' @format A data frame/tibble with 28 observations on one variable
#' \describe{
#' \item{aggres}{measure of aggresive tendency, ranging from 10-50}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' with(data = Aggress,
#' EDA(aggres))
#' # OR
#' IQR(Aggress$aggres)
#' diff(range(Aggress$aggres))
#'
"Aggress"
#' Monthly payments per person for families in the AFDC federal program
#'
#' Data used in Exercises 1.91 and 3.68
#'
#'
#' @name Aid
#' @docType data
#' @format A data frame/tibble with 51 observations on two variables
#' \describe{
#' \item{state}{a factor with levels \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Colunbia}, \code{Florida}, \code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa}, \code{Kansas}, \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland}, \code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana}, \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia},
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{payment}{average monthly payment per person in a family}
#' }
#'
#' @source US Department of Health and Human Services, 1993.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Aid$payment, xlab = "payment", main =
#' "Average monthly payment per person in a family",
#' col = "lightblue")
#' boxplot(Aid$payment, col = "lightblue")
#' dotplot(state ~ payment, data = Aid)
#'
"Aid"
#' Incubation times for 295 patients thought to be infected with HIV by a blood
#' transfusion
#'
#' Data used in Exercise 6.60
#'
#'
#' @name Aids
#' @docType data
#' @format A data frame/tibble with 295 observations on three variables
#' \describe{
#' \item{duration}{time (in months) from HIV infection to the clinical manifestation of full-blown AIDS}
#' \item{age}{age (in years) of patient}
#' \item{group}{a numeric vector}
#' }
#'
#' @source Kalbsleich, J. and Lawless, J., (1989), An analysis of the data on transfusion
#' related AIDS, \emph{Journal of the American Statistical Association, 84}, 360-372.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' with(data = Aids,
#' EDA(duration)
#' )
#' with(data = Aids,
#' t.test(duration, mu = 30, alternative = "greater")
#' )
#' with(data = Aids,
#' SIGN.test(duration, md = 24, alternative = "greater")
#' )
#'
"Aids"
#' Aircraft disasters in five different decades
#'
#' Data used in Exercise 1.12
#'
#'
#' @name Airdisasters
#' @docType data
#' @format A data frame /tibble with 141 observations on the following seven variables
#' \describe{
#' \item{year}{a numeric vector indicating the year of an aircraft accident}
#' \item{deaths}{a numeric vector indicating the number of deaths of an aircraft accident}
#' \item{decade}{a character vector indicating the decade of an aircraft accident}
#' }
#'
#' @source 2000 \emph{World Almanac and Book of Facts}.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(las = 1)
#' stripchart(deaths ~ decade, data = Airdisasters,
#' subset = decade != "1930s" & decade != "1940s",
#' method = "stack", pch = 19, cex = 0.5, col = "red",
#' main = "Aircraft Disasters 1950 - 1990",
#' xlab = "Number of fatalities")
#' par(las = 0)
#'
"Airdisasters"
#' Percentage of on-time arrivals and number of complaints for 11 airlines
#'
#' Data for Example 2.9
#'
#'
#' @name Airline
#' @docType data
#' @format A data frame/tibble with 11 observations on three variables
#' \describe{
#' \item{airline}{a charater variable with values \code{Alaska},
#' \code{Amer West}, \code{American}, \code{Continental}, \code{Delta},
#' \code{Northwest}, \code{Pan Am}, \code{Southwest}, \code{TWA},
#' \code{United}, and \code{USAir}}
#' \item{ontime}{a numeric vector}
#' \item{complaints}{complaints per 1000 passengers}
#' }
#'
#' @source Transportation Department.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' with(data = Airline,
#' barplot(complaints, names.arg = airline, col = "lightblue",
#' las = 2)
#' )
#' plot(complaints ~ ontime, data = Airline, pch = 19, col = "red",
#' xlab = "On time", ylab = "Complaints")
#'
"Airline"
#' Ages at which 14 female alcoholics began drinking
#'
#' Data used in Exercise 5.79
#'
#'
#' @name Alcohol
#' @docType data
#' @format A data frame/tibble with 14 observations on one variable
#' \describe{
#' \item{age}{age when individual started drinking}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Alcohol$age)
#' qqline(Alcohol$age)
#' SIGN.test(Alcohol$age, md = 20, conf.level = 0.99)
#'
"Alcohol"
#' Allergy medicines by adverse events
#'
#' Data used in Exercise 8.22
#'
#'
#' @name Allergy
#' @docType data
#' @format A data frame/tibble with 406 observations on two variables
#' \describe{
#' \item{event}{a factor with levels \code{insomnia},
#' \code{headache}, and \code{drowsiness}}
#' \item{medication}{a factor with levels \code{seldane-d},
#' \code{pseudoephedrine}, and \code{placebo}}
#' }
#'
#' @source Marion Merrel Dow, Inc. Kansas City, Mo. 64114.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~event + medication, data = Allergy)
#' T1
#' chisq.test(T1)
#'
"Allergy"
#' Recovery times for anesthetized patients
#'
#' Data used in Exercise 5.58
#'
#'
#' @name Anesthet
#' @docType data
#' @format A with 10 observations on one variable
#' \describe{
#' \item{recover}{recovery time (in hours)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Anesthet$recover)
#' qqline(Anesthet$recover)
#' with(data = Anesthet,
#' t.test(recover, conf.level = 0.90)$conf
#' )
#'
"Anesthet"
#' Math test scores versus anxiety scores before the test
#'
#' Data used in Exercise 2.96
#'
#'
#' @name Anxiety
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{anxiety}{anxiety score before a major math test}
#' \item{math}{math test score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(math ~ anxiety, data = Anxiety, ylab = "score",
#' main = "Exercise 2.96")
#' with(data = Anxiety,
#' cor(math, anxiety)
#' )
#' linmod <- lm(math ~ anxiety, data = Anxiety)
#' abline(linmod, col = "purple")
#' summary(linmod)
#'
"Anxiety"
#' Level of apolipoprotein B and number of cups of coffee consumed per day for
#' 15 adult males
#'
#' Data used in Examples 9.2 and 9.9
#'
#'
#' @name Apolipop
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{coffee}{number of cups of coffee per day}
#' \item{apolipB}{level of apoliprotein B}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(apolipB ~ coffee, data = Apolipop)
#' linmod <- lm(apolipB ~ coffee, data = Apolipop)
#' summary(linmod)
#' summary(linmod)$sigma
#' anova(linmod)
#' anova(linmod)[2, 3]^.5
#' par(mfrow = c(2, 2))
#' plot(linmod)
#' par(mfrow = c(1, 1))
#'
"Apolipop"
#' Median costs of an appendectomy at 20 hospitals in North Carolina
#'
#' Data for Exercise 1.119
#'
#'
#' @name Append
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{fee}{fees for an appendectomy for a random sample of 20 hospitals in North Carolina}
#' }
#'
#' @source North Carolina Medical Database Commission, August 1994.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' fee <- Append$fee
#' ll <- mean(fee) - 2*sd(fee)
#' ul <- mean(fee) + 2*sd(fee)
#' limits <-c(ll, ul)
#' limits
#' fee[fee < ll | fee > ul]
#'
"Append"
#' Median costs of appendectomies at three different types of North Carolina
#' hospitals
#'
#' Data for Exercise 10.60
#'
#'
#' @name Appendec
#' @docType data
#' @format A data frame/tibble with 59 observations on two variables
#' \describe{
#' \item{cost}{median costs of appendectomies at hospitals across the state of North Carolina in 1992}
#' \item{region}{a vector classifying each hospital as rural, regional, or metropolitan}
#' }
#'
#' @source \emph{Consumer's Guide to Hospitalization Charges in North Carolina Hospitals}
#' (August 1994), North Carolina Medical Database Commission, Department of Insurance.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(cost ~ region, data = Appendec, col = c("red", "blue", "cyan"))
#' anova(lm(cost ~ region, data = Appendec))
#'
"Appendec"
#' Aptitude test scores versus productivity in a factory
#'
#' Data for Exercises 2.1, 2.26, 2.35 and 2.51
#'
#'
#' @name Aptitude
#' @docType data
#' @format A data frame/tibble with 8 observations on two variables
#' \describe{
#' \item{aptitude}{aptitude test scores}
#' \item{product}{productivity scores}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(product ~ aptitude, data = Aptitude, main = "Exercise 2.1")
#' model1 <- lm(product ~ aptitude, data = Aptitude)
#' model1
#' abline(model1, col = "red", lwd=3)
#' resid(model1)
#' fitted(model1)
#' cor(Aptitude$product, Aptitude$aptitude)
#'
"Aptitude"
#' Radiocarbon ages of observations taken from an archaeological site
#'
#' Data for Exercises 5.120, 10.20 and Example 1.16
#'
#'
#' @name Archaeo
#' @docType data
#' @format A data frame/tibble with 60 observations on two variables
#' \describe{
#' \item{age}{number of years before 1983 - the year the data were obtained}
#' \item{phase}{Ceramic Phase numbers}
#' }
#'
#' @source Cunliffe, B. (1984) and Naylor and Smith (1988).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(age ~ phase, data = Archaeo, col = "yellow",
#' main = "Example 1.16", xlab = "Ceramic Phase", ylab = "Age")
#' anova(lm(age ~ as.factor(phase), data= Archaeo))
#'
"Archaeo"
#' Time of relief for three treatments of arthritis
#'
#' Data for Exercise 10.58
#'
#'
#' @name Arthriti
#' @docType data
#' @format A data frame/tibblewith 51 observations on two variables
#' \describe{
#' \item{time}{time (measured in days) until an arthritis sufferer experienced relief}
#' \item{treatment}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(time ~ treatment, data = Arthriti,
#' col = c("lightblue", "lightgreen", "yellow"),
#' ylab = "days")
#' anova(lm(time ~ treatment, data = Arthriti))
#'
"Arthriti"
#' Durations of operation for 15 artificial heart transplants
#'
#' Data for Exercise 1.107
#'
#'
#' @name Artifici
#' @docType data
#' @format A data frame/tibble with 15 observations on one variable
#' \describe{
#' \item{duration}{duration (in hours) for transplant}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Artifici$duration, 2)
#' summary(Artifici$duration)
#' values <- Artifici$duration[Artifici$duration < 6.5]
#' values
#' summary(values)
#'
"Artifici"
#' Dissolving time versus level of impurities in aspirin tablets
#'
#' Data for Exercise 10.51
#'
#'
#' @name Asprin
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{time}{time (in seconds) for aspirin to dissolve}
#' \item{impurity}{impurity of an ingredient with levels \code{1\%},
#' \code{5\%}, and \code{10\%}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(time ~ impurity, data = Asprin,
#' col = c("red", "blue", "green"))
#'
"Asprin"
#' Asthmatic relief index on nine subjects given a drug and a placebo
#'
#' Data for Exercise 7.52
#'
#'
#' @name Asthmati
#' @docType data
#' @format A data frame/tibble with nine observations on three variables
#' \describe{
#' \item{drug}{asthmatic relief index for patients given a drug}
#' \item{placebo}{asthmatic relief index for patients given a placebo}
#' \item{difference}{difference between the \code{placebo} and \code{drug}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Asthmati$difference)
#' qqline(Asthmati$difference)
#' shapiro.test(Asthmati$difference)
#' with(data = Asthmati,
#' t.test(placebo, drug, paired = TRUE, mu = 0, alternative = "greater")
#' )
#'
"Asthmati"
#' Number of convictions reported by U.S. attorney's offices
#'
#' Data for Example 2.2 and Exercises 2.43 and 2.57
#'
#'
#' @name Attorney
#' @docType data
#' @format A data frame/tibble with 88 observations on three variables
#' \describe{
#' \item{staff}{U.S. attorneys' office staff per 1 million population}
#' \item{convict}{U.S. attorneys' office convictions per 1 million population}
#' \item{district}{a factor with levels
#' \code{Albuquerque}, \code{Alexandria, Va}, \code{Anchorage}, \code{Asheville,
#' NC}, \code{Atlanta}, \code{Baltimore}, \code{Baton Rouge}, \code{Billings, Mt},
#' \code{Birmingham, Al}, \code{Boise, Id}, \code{Boston}, \code{Buffalo},
#' \code{Burlington, Vt}, \code{Cedar Rapids}, \code{Charleston, WVA},
#' \code{Cheyenne, Wy}, \code{Chicago}, \code{Cincinnati}, \code{Cleveland},
#' \code{Columbia, SC}, \code{Concord, NH}, \code{Denver}, \code{Des Moines},
#' \code{Detroit}, \code{East St. Louis}, \code{Fargo, ND}, \code{Fort Smith, Ark},
#' \code{Fort Worth}, \code{Grand Rapids, Mi}, \code{Greensboro, NC},
#' \code{Honolulu}, \code{Houston}, \code{Indianapolis}, \code{Jackson, Miss},
#' \code{Kansas City}, \code{Knoxville, Tn}, \code{Las Vegas}, \code{Lexington,
#' Ky}, \code{Little Rock}, \code{Los Angeles}, \code{Louisville}, \code{Memphis},
#' \code{Miami}, \code{Milwaukee}, \code{Minneapolis}, \code{Mobile, Ala},
#' \code{Montgomery, Ala}, \code{Muskogee, Ok}, \code{Nashville}, \code{New Haven,
#' Conn}, \code{New Orleans}, \code{New York (Brooklyn)}, \code{New York
#' (Manhattan)}, \code{Newark, NJ}, \code{Oklahoma City}, \code{Omaha},
#' \code{Oxford, Miss}, \code{Pensacola, Fl}, \code{Philadelphia}, \code{Phoenix},
#' \code{Pittsburgh}, \code{Portland, Maine}, \code{Portland, Ore},
#' \code{Providence, RI}, \code{Raleigh, NC}, \code{Roanoke, Va},
#' \code{Sacramento}, \code{Salt Lake City}, \code{San Antonio}, \code{San Diego},
#' \code{San Francisco}, \code{Savannah, Ga}, \code{Scranton, Pa}, \code{Seattle},
#' \code{Shreveport, La}, \code{Sioux Falls, SD}, \code{South Bend, Ind},
#' \code{Spokane, Wash} ,\code{Springfield, Ill}, \code{St. Louis},
#' \code{Syracuse, NY}, \code{Tampa}, \code{Topeka, Kan}, \code{Tulsa},
#' \code{Tyler, Tex}, \code{Washington}, \code{Wheeling, WVa}, and \code{Wilmington,
#' Del}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(mfrow=c(1, 2))
#' plot(convict ~ staff, data = Attorney, main = "With Washington, D.C.")
#' plot(convict[-86] ~staff[-86], data = Attorney,
#' main = "Without Washington, D.C.")
#' par(mfrow=c(1, 1))
#'
"Attorney"
#' Number of defective auto gears produced by two manufacturers
#'
#' Data for Exercise 7.46
#'
#'
#' @name Autogear
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{defectives}{number of defective gears in the production of 100 gears per day}
#' \item{manufacturer}{a factor with levels \code{A} and \code{B}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' t.test(defectives ~ manufacturer, data = Autogear)
#' wilcox.test(defectives ~ manufacturer, data = Autogear)
#' t.test(defectives ~ manufacturer, var.equal = TRUE, data = Autogear)
#'
"Autogear"
#' Illustrates inferences based on pooled t-test versus Wilcoxon rank sum test
#'
#' Data for Exercise 7.40
#'
#'
#' @name Backtoback
#' @docType data
#' @format A data frame/tibble with 24 observations on two variables
#' \describe{
#' \item{score}{a numeric vector}
#' \item{group}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' wilcox.test(score ~ group, data = Backtoback)
#' t.test(score ~ group, data = Backtoback)
#'
"Backtoback"
#' Baseball salaries for members of five major league teams
#'
#' Data for Exercise 1.11
#'
#'
#' @name Bbsalaries
#' @docType data
#' @format A data frame/tibble with 142 observations on two variables
#' \describe{
#' \item{salary}{1999 salary for baseball player}
#' \item{team}{a factor with levels \code{Angels}, \code{Indians},
#' \code{Orioles}, \code{Redsoxs}, and \code{Whitesoxs}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripchart(salary ~ team, data = Bbsalaries, method = "stack",
#' pch = 19, col = "blue", cex = 0.75)
#' title(main = "Major League Salaries")
#'
"Bbsalaries"
#' Graduation rates for student athletes and nonathletes in the Big Ten Conf.
#'
#' Data for Exercises 1.124 and 2.94
#'
#'
#' @name Bigten
#' @docType data
#' @format A data frame/tibble with 44 observations on the following four variables
#' \describe{
#' \item{school}{a factor with levels \code{Illinois},
#' \code{Indiana}, \code{Iowa}, \code{Michigan}, \code{Michigan State},
#' \code{Minnesota}, \code{Northwestern}, \code{Ohio State}, \code{Penn State},
#' \code{Purdue}, and \code{Wisconsin}}
#' \item{rate}{graduation rate}
#' \item{year}{factor with two levels \code{1984-1985} and \code{1993-1994}}
#' \item{status}{factor with two levels \code{athlete} and \code{student}}
#' }
#'
#' @source NCAA Graduation Rates Report, 2000.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(rate ~ status, data = subset(Bigten, year = "1993-1994"),
#' horizontal = TRUE, main = "Graduation Rates 1993-1994")
#' with(data = Bigten,
#' tapply(rate, list(year, status), mean)
#' )
#'
"Bigten"
#' Test scores on first exam in biology class
#'
#' Data for Exercise 1.49
#'
#'
#' @name Biology
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{score}{test scores on the first test in a beginning biology class}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Biology$score, breaks = "scott", col = "brown", freq = FALSE,
#' main = "Problem 1.49", xlab = "Test Score")
#' lines(density(Biology$score), lwd=3)
#'
"Biology"
#' Live birth rates in 1990 and 1998 for all states
#'
#' Data for Example 1.10
#'
#'
#' @name Birth
#' @docType data
#' @format A data frame/tibble with 51 observations on three variables
#' \describe{
#' \item{state}{a character with levels \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Colunbia}, \code{Florida}, \code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa}, \code{Kansas}, \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland}, \code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana}, \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia},
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{rate}{live birth rates per 1000 population}
#' \item{year}{a factor with levels \code{1990} and \code{1998}}
#' }
#'
#' @source \emph{National Vital Statistics Report, 48}, March 28, 2000, National
#' Center for Health Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' rate1998 <- subset(Birth, year == "1998", select = rate)
#' stem(x = rate1998$rate, scale = 2)
#' hist(rate1998$rate, breaks = seq(10.9, 21.9, 1.0), xlab = "1998 Birth Rate",
#' main = "Figure 1.14 in BSDA", col = "pink")
#' hist(rate1998$rate, breaks = seq(10.9, 21.9, 1.0), xlab = "1998 Birth Rate",
#' main = "Figure 1.16 in BSDA", col = "pink", freq = FALSE)
#' lines(density(rate1998$rate), lwd = 3)
#' rm(rate1998)
#'
"Birth"
#' Education level of blacks by gender
#'
#' Data for Exercise 8.55
#'
#'
#' @name Blackedu
#' @docType data
#' @format A data frame/tibble with 3800 observations on two variables
#' \describe{
#' \item{gender}{a factor with levels \code{Female} and \code{Male}}
#' \item{education}{a factor with levels \code{High school dropout},
#' \code{High school graudate}, \code{Some college}, \code{Bachelor}'\code{s degree}, and
#' \code{Graduate degree}}
#' }
#'
#' @source Bureau of Census data.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~gender + education, data = Blackedu)
#' T1
#' chisq.test(T1)
#'
"Blackedu"
#' Blood pressure of 15 adult males taken by machine and by an expert
#'
#' Data for Exercise 7.84
#'
#'
#' @name Blood
#' @docType data
#' @format A data frame/tibble with 15 observations on the following two variables
#' \describe{
#' \item{machine}{blood pressure recorded from an automated blood pressure machine}
#' \item{expert}{blood pressure recorded by an expert using an at-home device}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' DIFF <- Blood$machine - Blood$expert
#' shapiro.test(DIFF)
#' qqnorm(DIFF)
#' qqline(DIFF)
#' rm(DIFF)
#' t.test(Blood$machine, Blood$expert, paired = TRUE)
#'
"Blood"
#' Incomes of board members from three different universities
#'
#' Data for Exercise 10.14
#'
#'
#' @name Board
#' @docType data
#' @format A data frame/tibble with 7 observations on three variables
#' \describe{
#' \item{salary}{1999 salary (in $1000) for board directors}
#' \item{university}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(salary ~ university, data = Board, col = c("red", "blue", "green"),
#' ylab = "Income")
#' tapply(Board$salary, Board$university, summary)
#' anova(lm(salary ~ university, data = Board))
#' \dontrun{
#' library(dplyr)
#' dplyr::group_by(Board, university) %>%
#' summarize(Average = mean(salary))
#' }
"Board"
#' Bone density measurements of 35 physically active and 35 non-active women
#'
#' Data for Example 7.22
#'
#'
#' @name Bones
#' @docType data
#' @format A data frame/tibble with 70 observations on two variables
#' \describe{
#' \item{density}{bone density measurements}
#' \item{group}{a factor with levels \code{active} and \code{nonactive}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' t.test(density ~ group, data = Bones, alternative = "greater")
#' t.test(rank(density) ~ group, data = Bones, alternative = "greater")
#' wilcox.test(density ~ group, data = Bones, alternative = "greater")
#'
#'
"Bones"
#' Number of books read and final spelling scores for 17 third graders
#'
#' Data for Exercise 9.53
#'
#'
#' @name Books
#' @docType data
#' @format A data frame/tibble with 17 observations on two variables
#' \describe{
#' \item{book}{number of books read}
#' \item{spelling}{spelling score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(spelling ~ book, data = Books)
#' mod <- lm(spelling ~ book, data = Books)
#' summary(mod)
#' abline(mod, col = "blue", lwd = 2)
#'
"Books"
#' Prices paid for used books at three different bookstores
#'
#' Data for Exercise 10.30 and 10.31
#'
#'
#' @name Bookstor
#' @docType data
#' @format A data frame/tibble with 72 observations on two variables
#' \describe{
#' \item{dollars}{money obtained for selling textbooks}
#' \item{store}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(dollars ~ store, data = Bookstor,
#' col = c("purple", "lightblue", "cyan"))
#' kruskal.test(dollars ~ store, data = Bookstor)
#'
"Bookstor"
#' Brain weight versus body weight of 28 animals
#'
#' Data for Exercises 2.15, 2.44, 2.58 and Examples 2.3 and 2.20
#'
#'
#' @name Brain
#' @docType data
#' @format A data frame/tibble with 28 observations on three variables
#' \describe{
#' \item{species}{a factor with levels \code{African
#' elephant}, \code{Asian Elephant}, \code{Brachiosaurus}, \code{Cat},
#' \code{Chimpanzee}, \code{Cow}, \code{Diplodocus}, \code{Donkey}, \code{Giraffe},
#' \code{Goat}, \code{Gorilla}, \code{Gray wolf}, \code{Guinea Pig}, \code{Hamster},
#' \code{Horse}, \code{Human}, \code{Jaguar}, \code{Kangaroo}, \code{Mole},
#' \code{Mouse}, \code{Mt Beaver}, \code{Pig}, \code{Potar monkey}, \code{Rabbit},
#' \code{Rat}, \code{Rhesus monkey}, \code{Sheep}, and \code{Triceratops}}
#' \item{bodyweight}{body weight (in kg)}
#' \item{brainweight}{brain weight (in g)}
#' }
#'
#' @source P. Rousseeuw and A. Leroy, \emph{Robust Regression and Outlier Detection}
#' (New York: Wiley, 1987).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(log(brainweight) ~ log(bodyweight), data = Brain,
#' pch = 19, col = "blue", main = "Example 2.3")
#' mod <- lm(log(brainweight) ~ log(bodyweight), data = Brain)
#' abline(mod, lty = "dashed", col = "blue")
#'
#'
"Brain"
#' Repair costs of vehicles crashed into a barrier at 5 miles per hour
#'
#' Data for Exercise 1.73
#'
#'
#' @name Bumpers
#' @docType data
#' @format A data frame/tibble with 23 observations on two variables
#' \describe{
#' \item{car}{a factor with levels \code{Buick Century},
#' \code{Buick Skylark}, \code{Chevrolet Cavalier}, \code{Chevrolet Corsica},
#' \code{Chevrolet Lumina}, \code{Dodge Dynasty}, \code{Dodge Monaco}, \code{Ford
#' Taurus}, \code{Ford Tempo}, \code{Honda Accord}, \code{Hyundai Sonata},
#' \code{Mazda 626}, \code{Mitsubishi Galant}, \code{Nissan Stanza},
#' \code{Oldsmobile Calais}, \code{Oldsmobile Ciere}, \code{Plymouth Acclaim},
#' \code{Pontiac 6000}, \code{Pontiac Grand Am}, \code{Pontiac Sunbird},
#' \code{Saturn SL2}, \code{Subaru Legacy}, and \code{Toyota Camry}}
#' \item{repair}{total repair cost (in dollars) after crashing a car into a
#' barrier four times while the car was traveling at 5 miles per hour}
#' }
#'
#' @source Insurance Institute of Highway Safety.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Bumpers$repair)
#' stripchart(Bumpers$repair, method = "stack", pch = 19, col = "blue")
#' library(lattice)
#' dotplot(car ~ repair, data = Bumpers)
#'
"Bumpers"
#' Attendance of bus drivers versus shift
#'
#' Data for Exercise 8.25
#'
#'
#' @name Bus
#' @docType data
#' @format A data frame/tibble with 29363 observations on two variables
#' \describe{
#' \item{attendance}{a factor with levels \code{absent} and
#' \code{present}}
#' \item{shift}{a factor with levels \code{am}, \code{noon}, \code{pm},
#' \code{swing}, and \code{split}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~attendance + shift, data = Bus)
#' T1
#' chisq.test(T1)
#'
"Bus"
#' Median charges for coronary bypass at 17 hospitals in North Carolina
#'
#' Data for Exercises 5.104 and 6.43
#'
#'
#' @name Bypass
#' @docType data
#' @format A data frame/tibble with 17 observations on two variables
#' \describe{
#' \item{hospital}{a factor with levels \code{Carolinas Med
#' Ct}, \code{Duke Med Ct}, \code{Durham Regional}, \code{Forsyth Memorial},
#' \code{Frye Regional}, \code{High Point Regional}, \code{Memorial Mission},
#' \code{Mercy}, \code{Moore Regional}, \code{Moses Cone Memorial}, \code{NC
#' Baptist}, \code{New Hanover Regional}, \code{Pitt Co. Memorial},
#' \code{Presbyterian}, \code{Rex}, \code{Univ of North Carolina}, and \code{Wake
#' County}}
#' \item{charge}{median charge for coronary bypass}
#' }
#'
#' @source \emph{Consumer's Guide to Hospitalization Charges in North Carolina Hospitals}
#' (August 1994), North Carolina Medical Database Commission, Department of Insurance.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Bypass$charge)
#' t.test(Bypass$charge, conf.level=.90)$conf
#' t.test(Bypass$charge, mu = 35000)
#'
"Bypass"
#' Estimates of costs of kitchen cabinets by two suppliers on 20 prospective
#' homes
#'
#' Data for Exercise 7.83
#'
#'
#' @name Cabinets
#' @docType data
#' @format A data frame/tibble with 20 observations on three variables
#' \describe{
#' \item{home}{a numeric vector}
#' \item{supplA}{estimate for kitchen cabinets from supplier A (in dollars)}
#' \item{supplB}{estimate for kitchen cabinets from supplier A (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' DIF <- Cabinets$supplA - Cabinets$supplB
#' qqnorm(DIF)
#' qqline(DIF)
#' shapiro.test(DIF)
#' with(data = Cabinets,
#' t.test(supplA, supplB, paired = TRUE)
#' )
#' with(data = Cabinets,
#' wilcox.test(supplA, supplB, paired = TRUE)
#' )
#' rm(DIF)
#'
"Cabinets"
#' Survival times of terminal cancer patients treated with vitamin C
#'
#' Data for Exercises 6.55 and 6.64
#'
#'
#' @name Cancer
#' @docType data
#' @format A data frame/tibble with 64 observations on two variables
#' \describe{
#' \item{survival}{survival time (in days) of terminal patients
#' treated with vitamin C}
#' \item{type}{a factor indicating type of cancer with levels
#' \code{breast}, \code{bronchus}, \code{colon}, \code{ovary}, and
#' \code{stomach}}
#' }
#' @source Cameron, E and Pauling, L. 1978. \dQuote{Supplemental Ascorbate in the
#' Supportive Treatment of Cancer.} \emph{Proceedings of the National Academy of
#' Science}, 75, 4538-4542.
#'
#'
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(survival ~ type, Cancer, col = "blue")
#' stomach <- Cancer$survival[Cancer$type == "stomach"]
#' bronchus <- Cancer$survival[Cancer$type == "bronchus"]
#' boxplot(stomach, ylab = "Days")
#' SIGN.test(stomach, md = 100, alternative = "greater")
#' SIGN.test(bronchus, md = 100, alternative = "greater")
#' rm(bronchus, stomach)
#'
#'
"Cancer"
#' Carbon monoxide level measured at three industrial sites
#'
#' Data for Exercise 10.28 and 10.29
#'
#'
#' @name Carbon
#' @docType data
#' @format A data frame/tibble with 24 observations on two variables
#' \describe{
#' \item{CO}{carbon monoxide measured (in parts per million)}
#' \item{site}{a factor with levels \code{SiteA}, \code{SiteB}, and \code{SiteC}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(CO ~ site, data = Carbon, col = "lightgreen")
#' kruskal.test(CO ~ site, data = Carbon)
#'
"Carbon"
#' Reading scores on the California achievement test for a group of 3rd graders
#'
#' Data for Exercise 1.116
#'
#'
#' @name Cat
#' @docType data
#' @format A data frame/tibble with 17 observations on one variable
#' \describe{
#' \item{score}{reading score on the California Achievement Test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Cat$score)
#' fivenum(Cat$score)
#' boxplot(Cat$score, main = "Problem 1.116", col = "green")
#'
"Cat"
#' Entry age and survival time of patients with small cell lung cancer under
#' two different treatments
#'
#' Data for Exercises 7.34 and 7.48
#'
#'
#' @name Censored
#' @docType data
#' @format A data frame/tibble with 121 observations on three variables
#' \describe{
#' \item{survival}{survival time (in days) of patients with small cell lung cancer}
#' \item{treatment}{a factor with levels \code{armA} and \code{armB} indicating the
#' treatment a patient received}
#' \item{age}{the age of the patient}
#' }
#'
#' @source Ying, Z., Jung, S., Wei, L. 1995. \dQuote{Survival Analysis with Median Regression Models.}
#' \emph{Journal of the American Statistical Association}, 90, 178-184.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(survival ~ treatment, data = Censored, col = "yellow")
#' wilcox.test(survival ~ treatment, data = Censored, alternative = "greater")
#'
"Censored"
#' Temperatures and O-ring failures for the launches of the space shuttle
#' Challenger
#'
#' Data for Examples 1.11, 1.12, 1.13, 2.11 and 5.1
#'
#'
#' @name Challeng
#' @docType data
#' @format A data frame/tibble with 25 observations on four variables
#' \describe{
#' \item{flight}{a character variable indicating the flight}
#' \item{date}{date of the flight}
#' \item{temp}{temperature (in fahrenheit)}
#' \item{failures}{number of failures}
#' }
#'
#' @source Dalal, S. R., Fowlkes, E. B., Hoadley, B. 1989. \dQuote{Risk Analysis of the Space Shuttle: Pre-Challenger
#' Prediction of Failure.}
#' \emph{Journal of the American Statistical Association}, 84, No. 408, 945-957.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Challeng$temp)
#' summary(Challeng$temp)
#' IQR(Challeng$temp)
#' quantile(Challeng$temp)
#' fivenum(Challeng$temp)
#' stem(sort(Challeng$temp)[-1])
#' summary(sort(Challeng$temp)[-1])
#' IQR(sort(Challeng$temp)[-1])
#' quantile(sort(Challeng$temp)[-1])
#' fivenum(sort(Challeng$temp)[-1])
#' par(mfrow=c(1, 2))
#' qqnorm(Challeng$temp)
#' qqline(Challeng$temp)
#' qqnorm(sort(Challeng$temp)[-1])
#' qqline(sort(Challeng$temp)[-1])
#' par(mfrow=c(1, 1))
#'
"Challeng"
#' Starting salaries of 50 chemistry majors
#'
#' Data for Example 5.3
#'
#'
#' @name Chemist
#' @docType data
#' @format A data frame/tibble with 50 observations on one variable
#' \describe{
#' \item{salary}{starting salary (in dollars) for chemistry major}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Chemist$salary)
#'
"Chemist"
#' Surface salinity measurements taken offshore from Annapolis, Maryland in
#' 1927
#'
#' Data for Exercise 6.41
#'
#'
#' @name Chesapea
#' @docType data
#' @format A data frame/tibble with 16 observations on one variable
#' \describe{
#' \item{salinity}{surface salinity measurements (in parts per 1000) for station 11,
#' offshore from Annanapolis, Maryland, on July 3-4, 1927.}
#' }
#'
#' @source Davis, J. (1986) \emph{Statistics and Data Analysis in Geology, Second Edition}.
#' John Wiley and Sons, New York.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Chesapea$salinity)
#' qqline(Chesapea$salinity)
#' shapiro.test(Chesapea$salinity)
#' t.test(Chesapea$salinity, mu = 7)
#'
"Chesapea"
#' Insurance injury ratings of Chevrolet vehicles for 1990 and 1993 models
#'
#' Data for Exercise 8.35
#'
#'
#' @name Chevy
#' @docType data
#' @format A data frame/tibble with 67 observations on two variables
#' \describe{
#' \item{year}{a factor with levels \code{1988-90} and
#' \code{1991-93}}
#' \item{frequency}{a factor with levels \code{much better than average}, \code{above average},
#' \code{average}, \code{below average}, and \code{much worse than average}}
#' }
#'
#' @source Insurance Institute for Highway Safety and the Highway Loss Data Institute, 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~year + frequency, data = Chevy)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Chevy"
#' Weight gain of chickens fed three different rations
#'
#' Data for Exercise 10.15
#'
#'
#' @name Chicken
#' @docType data
#' @format A data frame/tibble with 13 observations onthree variables
#' \describe{
#' \item{gain}{weight gain over a specified period}
#' \item{feed}{a factor with levels \code{ration1}, \code{ration2},
#' and \code{ration3}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(gain ~ feed, col = c("red","blue","green"), data = Chicken)
#' anova(lm(gain ~ feed, data = Chicken))
#'
"Chicken"
#' Measurements of the thickness of the oxide layer of manufactured integrated
#' circuits
#'
#' Data for Exercises 6.49 and 7.47
#'
#'
#' @name Chipavg
#' @docType data
#' @format A data frame/tibble with 30 observations on three variables
#' \describe{
#' \item{wafer1}{thickness of the oxide layer for \code{wafer1}}
#' \item{wafer2}{thickness of the oxide layer for \code{wafer2}}
#' \item{thickness}{average thickness of the oxide layer of the eight measurements
#' obtained from each set of two wafers}
#' }
#'
#' @source Yashchin, E. 1995. \dQuote{Likelihood Ratio Methods
#' for Monitoring Parameters of a Nested Random Effect Model.}
#' \emph{Journal of the American Statistical Association}, 90, 729-738.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Chipavg$thickness)
#' t.test(Chipavg$thickness, mu = 1000)
#' boxplot(Chipavg$wafer1, Chipavg$wafer2, name = c("Wafer 1", "Wafer 2"))
#' shapiro.test(Chipavg$wafer1)
#' shapiro.test(Chipavg$wafer2)
#' t.test(Chipavg$wafer1, Chipavg$wafer2, var.equal = TRUE)
#'
"Chipavg"
#' Four measurements on a first wafer and four measurements on a second wafer
#' selected from 30 lots
#'
#' Data for Exercise 10.9
#'
#'
#' @name Chips
#' @docType data
#' @format A data frame/tibble with 30 observations on eight variables
#' \describe{
#' \item{wafer11}{first measurement of thickness of the oxide layer for \code{wafer1}}
#' \item{wafer12}{second measurement of thickness of the oxide layer for \code{wafer1}}
#' \item{wafer13}{third measurement of thickness of the oxide layer for \code{wafer1}}
#' \item{wafer14}{fourth measurement of thickness of the oxide layer for \code{wafer1}}
#' \item{wafer21}{first measurement of thickness of the oxide layer for \code{wafer2}}
#' \item{wafer22}{second measurement of thickness of the oxide layer for \code{wafer2}}
#' \item{wafer23}{third measurement of thickness of the oxide layer for \code{wafer2}}
#' \item{wafer24}{fourth measurement of thickness of the oxide layer for \code{wafer2}}
#' }
#'
#' @source Yashchin, E. 1995. \dQuote{Likelihood Ratio Methods
#' for Monitoring Parameters of a Nested Random Effect Model.}
#' \emph{Journal of the American Statistical Association}, 90, 729-738.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' with(data = Chips,
#' boxplot(wafer11, wafer12, wafer13, wafer14, wafer21,
#' wafer22, wafer23, wafer24, col = "pink")
#' )
#'
"Chips"
#' Effect of mother's smoking on birth weight of newborn
#'
#' Data for Exercise 2.27
#'
#'
#' @name Cigarett
#' @docType data
#' @format A data frame/tibble with 16 observations on two variables
#' \describe{
#' \item{cigarettes}{mothers' estimated average number of cigarettes smoked per day}
#' \item{weight}{children's birth weights (in pounds)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(weight ~ cigarettes, data = Cigarett)
#' model <- lm(weight ~ cigarettes, data = Cigarett)
#' abline(model, col = "red")
#' with(data = Cigarett,
#' cor(weight, cigarettes)
#' )
#' rm(model)
#'
"Cigarett"
#' Milligrams of tar in 25 cigarettes selected randomly from 4 different brands
#'
#' Data for Example 10.4
#'
#'
#' @name Cigar
#' @docType data
#' @format A data frame/tibble with 100 observations on two variables
#' \describe{
#' \item{tar}{amount of tar (measured in milligrams)}
#' \item{brand}{a factor indicating cigarette brand with levels \code{brandA}, \code{brandB},
#' \code{brandC}, and \code{brandD}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(tar ~ brand, data = Cigar, col = "cyan", ylab = "mg tar")
#' anova(lm(tar ~ brand, data = Cigar))
#'
"Cigar"
#' Percent of peak bone density of different aged children
#'
#' Data for Exercise 9.7
#'
#'
#' @name Citrus
#' @docType data
#' @format A data frame/tibble with nine observations on two variables
#' \describe{
#' \item{age}{age of children}
#' \item{percent}{percent peak bone density}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(percent ~ age, data = Citrus)
#' summary(model)
#' anova(model)
#' rm(model)
#'
"Citrus"
#' Residual contaminant following the use of three different cleansing agents
#'
#' Data for Exercise 10.16
#'
#'
#' @name Clean
#' @docType data
#' @format A data frame/tibble with 45 observations on two variables
#' \describe{
#' \item{clean}{residual contaminants}
#' \item{agent}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(clean ~ agent, col = c("red", "blue", "green"), data = Clean)
#' anova(lm(clean ~ agent, data = Clean))
#'
"Clean"
#' Signal loss from three types of coxial cable
#'
#' Data for Exercise 10.24 and 10.25
#'
#'
#' @name Coaxial
#' @docType data
#' @format A data frame/tibble with 45 observations on two variables
#' \describe{
#' \item{signal}{signal loss per 1000 feet}
#' \item{cable}{factor with three levels of coaxial cable \code{typeA},
#' \code{typeB}, and \code{typeC}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(signal ~ cable, data = Coaxial, col = c("red", "green", "yellow"))
#' kruskal.test(signal ~ cable, data = Coaxial)
#'
"Coaxial"
#' Productivity of workers with and without a coffee break
#'
#' Data for Exercise 7.55
#'
#'
#' @name Coffee
#' @docType data
#' @format A data frame/tibble with nine observations on three variables
#' \describe{
#' \item{without}{workers' productivity scores without a coffee break}
#' \item{with}{workers' productivity scores with a coffee break}
#' \item{differences}{\code{with} minus \code{without}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Coffee$differences)
#' qqline(Coffee$differences)
#' shapiro.test(Coffee$differences)
#' t.test(Coffee$with, Coffee$without, paired = TRUE, alternative = "greater")
#' wilcox.test(Coffee$with, Coffee$without, paired = TRUE,
#' alterantive = "greater")
#'
"Coffee"
#' Yearly returns on 12 investments
#'
#' Data for Exercise 5.68
#'
#'
#' @name Coins
#' @docType data
#' @format A data frame/tibble with 12 observations on one variable
#' \describe{
#' \item{return}{yearly returns on each of 12 possible investments}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Coins$return)
#' qqline(Coins$return)
#'
"Coins"
#' Commuting times for selected cities in 1980 and 1990
#'
#' Data for Exercises 1.13, and 7.85
#'
#'
#' @name Commute
#' @docType data
#' @format A data frame/tibble with 39 observations on three variables
#' \describe{
#' \item{city}{a factor with levels \code{Atlanta},
#' \code{Baltimore}, \code{Boston}, \code{Buffalo}, \code{Charlotte},
#' \code{Chicago}, \code{Cincinnati}, \code{Cleveland}, \code{Columbus},
#' \code{Dallas}, \code{Denver}, \code{Detroit}, \code{Hartford}, \code{Houston},
#' \code{Indianapolis}, \code{Kansas City}, \code{Los Angeles}, \code{Miami},
#' \code{Milwaukee}, \code{Minneapolis}, \code{New Orleans}, \code{New York},
#' \code{Norfolk}, \code{Orlando}, \code{Philadelphia}, \code{Phoenix},
#' \code{Pittsburgh}, \code{Portland}, \code{Providence}, \code{Rochester},
#' \code{Sacramento}, \code{Salt Lake City}, \code{San Antonio}, \code{San Diego},
#' \code{San Francisco}, \code{Seattle}, \code{St. Louis}, \code{Tampa}, and
#' \code{Washington}}
#' \item{year}{year}
#' \item{time}{commute times}
#' }
#'
#' @source Federal Highway Administration.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripplot(year ~ time, data = Commute, jitter = TRUE)
#' dotplot(year ~ time, data = Commute)
#' bwplot(year ~ time, data = Commute)
#' stripchart(time ~ year, data = Commute, method = "stack", pch = 1,
#' cex = 2, col = c("red", "blue"),
#' group.names = c("1980", "1990"),
#' main = "", xlab = "minutes")
#' title(main = "Commute Time")
#' boxplot(time ~ year, data = Commute, names=c("1980", "1990"),
#' horizontal = TRUE, las = 1)
#'
#'
"Commute"
#' Tennessee self concept scale scores for a group of teenage boys
#'
#' Data for Exercise 1.68 and 1.82
#'
#'
#' @name Concept
#' @docType data
#' @format A data frame/tibble with 28 observations on one variable
#' \describe{
#' \item{self}{Tennessee self concept scores}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(Concept$self)
#' sd(Concept$self)
#' diff(range(Concept$self))
#' IQR(Concept$self)
#' summary(Concept$self/10)
#' IQR(Concept$self/10)
#' sd(Concept$self/10)
#' diff(range(Concept$self/10))
#'
"Concept"
#' Compressive strength of concrete blocks made by two different methods
#'
#' Data for Example 7.17
#'
#'
#' @name Concrete
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{strength}{comprehensive strength (in pounds per square inch)}
#' \item{method}{factor with levels \code{new} and \code{old} indicating the
#' method used to construct a concrete block}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' wilcox.test(strength ~ method, data = Concrete, alternative = "greater")
#'
"Concrete"
#' Comparison of the yields of a new variety and a standard variety of corn
#' planted on 12 plots of land
#'
#' Data for Exercise 7.77
#'
#'
#' @name Corn
#' @docType data
#' @format A data frame/tibble with 12 observations on three variables
#' \describe{
#' \item{new}{corn yield with new meathod}
#' \item{standard}{corn yield with standard method}
#' \item{differences}{\code{new} minus \code{standard}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Corn$differences)
#' qqnorm(Corn$differences)
#' qqline(Corn$differences)
#' shapiro.test(Corn$differences)
#' t.test(Corn$differences, alternative = "greater")
#'
"Corn"
#' Exercise to illustrate correlation
#'
#' Data for Exercise 2.23
#'
#'
#' @name Correlat
#' @docType data
#' @format A data frame/tibble with 13 observations on two variables
#' \describe{
#' \item{x}{a numeric vector}
#' \item{y}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(y ~ x, data = Correlat)
#' model <- lm(y ~ x, data = Correlat)
#' abline(model)
#' rm(model)
#'
"Correlat"
#' Scores of 18 volunteers who participated in a counseling process
#'
#' Data for Exercise 6.96
#'
#'
#' @name Counsel
#' @docType data
#' @format A data frame/tibble with 18 observations on one variable
#' \describe{
#' \item{score}{standardized psychology scores after a counseling process}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Counsel$score)
#' t.test(Counsel$score, mu = 70)
#'
"Counsel"
#' Consumer price index from 1979 to 1998
#'
#' Data for Exercise 1.34
#'
#'
#' @name Cpi
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{year}{year}
#' \item{cpi}{consumer price index}
#' }
#'
#' @source Bureau of Labor Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(cpi ~ year, data = Cpi, type = "l", lty = 2, lwd = 2, col = "red")
#' barplot(Cpi$cpi, col = "pink", las = 2, main = "Problem 1.34")
#'
"Cpi"
#' Violent crime rates for the states in 1983 and 1993
#'
#' Data for Exercises 1.90, 2.32, 3.64, and 5.113
#'
#'
#' @name Crime
#' @docType data
#' @format A data frame/tibble with 102 observations on three variables
#' \describe{
#' \item{state}{a factor with levels \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{DC}, \code{Delaware}, \code{Florida},
#' \code{Georgia}, \code{Hawaii}, \code{Idaho}, \code{Illinois}, \code{Indiana},
#' \code{Iowa}, \code{Kansas}, \code{Kentucky}, \code{Louisiana}, \code{Maine},
#' \code{Maryland}, \code{Massachusetts}, \code{Michigan}, \code{Minnesota},
#' \code{Mississippi}, \code{Missour}, \code{Montana}, \code{Nebraska},
#' \code{Nevada}, \code{New Hampshire}, \code{New Jersey}, \code{New Mexico},
#' \code{New York}, \code{North Carolina}, \code{North Dakota}, \code{Ohio},
#' \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania}, \code{Rhode Island},
#' \code{South Carolina}, \code{South Dakota}, \code{Tennessee}, \code{Texas},
#' \code{Utah}, \code{Vermont}, \code{Virginia}, \code{Washington}, \code{West
#' Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{year}{a factor with levels \code{1983} and \code{1993}}
#' \item{rate}{crime rate per 100,000 inhabitants}
#' }
#'
#' @source U.S. Department of Justice, Bureau of Justice Statistics, \emph{Sourcebook of
#' Criminal Justice Statistics}, 1993.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(rate ~ year, data = Crime, col = "red")
#'
"Crime"
#' Charles Darwin's study of cross-fertilized and self-fertilized plants
#'
#' Data for Exercise 7.62
#'
#'
#' @name Darwin
#' @docType data
#' @format A data frame/tibble with 15 observations on three variables
#' \describe{
#' \item{pot}{number of pot}
#' \item{cross}{height of plant (in inches) after a fixed period of time when cross-fertilized}
#' \item{self}{height of plant (in inches) after a fixed period of time when self-fertilized}
#' }
#'
#' @source Darwin, C. (1876) \emph{The Effect of Cross- and Self-Fertilization in the
#' Vegetable Kingdom}, 2nd edition, London.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' differ <- Darwin$cross - Darwin$self
#' qqnorm(differ)
#' qqline(differ)
#' shapiro.test(differ)
#' wilcox.test(Darwin$cross, Darwin$self, paired = TRUE)
#' rm(differ)
#'
"Darwin"
#' Automobile dealers classified according to type dealership and service
#' rendered to customers
#'
#' Data for Example 2.22
#'
#'
#' @name Dealers
#' @docType data
#' @format A data frame/tibble with 122 observations on two variables
#' \describe{
#' \item{type}{a factor with levels \code{Honda}, \code{Toyota}, \code{Mazda},
#' \code{Ford}, \code{Dodge}, and \code{Saturn}}
#' \item{service}{a factor with levels \code{Replaces unnecessarily} and \code{Follows manufacturer guidelines}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' xtabs(~type + service, data = Dealers)
#' T1 <- xtabs(~type + service, data = Dealers)
#' T1
#' addmargins(T1)
#' pt <- prop.table(T1, margin = 1)
#' pt
#' barplot(t(pt), col = c("red", "skyblue"), legend = colnames(T1))
#' rm(T1, pt)
#'
"Dealers"
#' Number of defective items produced by 20 employees
#'
#' Data for Exercise 1.27
#'
#'
#' @name Defectiv
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{number}{number of defective items produced by the employees in a small business firm}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~ number, data = Defectiv)
#' T1
#' barplot(T1, col = "pink", ylab = "Frequency",
#' xlab = "Defective Items Produced by Employees", main = "Problem 1.27")
#' rm(T1)
#'
"Defectiv"
#' Percent of bachelor's degrees awarded women in 1970 versus 1990
#'
#' Data for Exercise 2.75
#'
#'
#' @name Degree
#' @docType data
#' @format A data frame/tibble with 1064 observations on two variables
#' \describe{
#' \item{field}{a factor with levels \code{Health},
#' \code{Education}, \code{Foreign Language}, \code{Psychology}, \code{Fine Arts},
#' \code{Life Sciences}, \code{Business}, \code{Social Science}, \code{Physical Sciences},
#' \code{Engineering}, and \code{All Fields}}
#' \item{awarded}{a factor with levels \code{1970} and \code{1990}}
#' }
#'
#' @source U.S. Department of Health and Human Services, National
#' Center for Education Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~field + awarded, data = Degree)
#' T1
#' barplot(t(T1), beside = TRUE, col = c("red", "skyblue"), legend = colnames(T1))
#' rm(T1)
#'
"Degree"
#' Delay times on 20 flights from four major air carriers
#'
#' Data for Exercise 10.55
#'
#'
#' @name Delay
#' @docType data
#' @format A data frame/tibble with 80 observations on two variables
#' \describe{
#' \item{delay}{the delay time (in minutes) for 80 randomly selected flights}
#' \item{carrier}{a factor with levels \code{A}, \code{B}, \code{C}, and \code{D}}
#' }
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(delay ~ carrier, data = Delay,
#' main = "Exercise 10.55", ylab = "minutes",
#' col = "pink")
#' kruskal.test(delay ~carrier, data = Delay)
#'
"Delay"
#' Number of dependent children for 50 families
#'
#' Data for Exercise 1.26
#'
#'
#' @name Depend
#' @docType data
#' @format A data frame/tibble with 50 observations on one variable
#' \describe{
#' \item{number}{number of dependent children in a family}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~ number, data = Depend)
#' T1
#' barplot(T1, col = "lightblue", main = "Problem 1.26",
#' xlab = "Number of Dependent Children", ylab = "Frequency")
#' rm(T1)
#'
"Depend"
#' Educational levels of a sample of 40 auto workers in Detroit
#'
#' Data for Exercise 5.21
#'
#'
#' @name Detroit
#' @docType data
#' @format A data frame/tibble with 40 observations on one variable
#' \describe{
#' \item{educ}{the educational level (in years) of a sample of 40 auto workers in a plant in Detroit}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Detroit$educ)
#'
"Detroit"
#' Demographic characteristics of developmental students at 2-year colleges and
#' 4-year colleges
#'
#' Data used for Exercise 8.50
#'
#'
#' @name Develop
#' @docType data
#' @format A data frame/tibble with 5656 observations on two variables
#' \describe{
#' \item{race}{a factor with levels \code{African American}, \code{American Indian},
#' \code{Asian}, \code{Latino}, and \code{White}}
#' \item{college}{a factor with levels \code{Two-year} and \code{Four-year}}
#' }
#'
#' @source \emph{Research in Development Education} (1994), V. 11, 2.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~race + college, data = Develop)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Develop"
#' Test scores for students who failed developmental mathematics in the fall
#' semester 1995
#'
#' Data for Exercise 6.47
#'
#'
#' @name Devmath
#' @docType data
#' @format A data frame/tibble with 40 observations on one variable
#' \describe{
#' \item{score}{first exam score}
#' }
#'
#' @source Data provided by Dr. Anita Kitchens.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Devmath$score)
#' t.test(Devmath$score, mu = 80, alternative = "less")
#'
"Devmath"
#' Outcomes and probabilities of the roll of a pair of fair dice
#'
#' Data for Exercise 3.109
#'
#'
#' @name Dice
#' @docType data
#' @format A data frame/tibble with 11 observations on two variables
#' \describe{
#' \item{x}{possible outcomes for the sum of two dice}
#' \item{px}{probability for outcome \code{x}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' roll1 <- sample(1:6, 20000, replace = TRUE)
#' roll2 <- sample(1:6, 20000, replace = TRUE)
#' outcome <- roll1 + roll2
#' T1 <- table(outcome)/length(outcome)
#' remove(roll1, roll2, outcome)
#' T1
#' round(t(Dice), 5)
#' rm(roll1, roll2, T1)
#'
"Dice"
#' Diesel fuel prices in 1999-2000 in nine regions of the country
#'
#' Data for Exercise 2.8
#'
#'
#' @name Diesel
#' @docType data
#' @format A data frame/tibble with 650 observations on three variables
#' \describe{
#' \item{date}{date when price was recorded}
#' \item{pricepergallon}{price per gallon (in dollars)}
#' \item{location}{a factor with levels \code{California}, \code{CentralAtlantic},
#' \code{Coast}, \code{EastCoast}, \code{Gulf}, \code{LowerAtlantic}, \code{NatAvg},
#' \code{NorthEast}, \code{Rocky}, and \code{WesternMountain}}
#' }
#'
#' @source Energy Information Administration, National Enerfy Information Center:
#' 1000 Independence Ave., SW, Washington, D.C., 20585.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(las = 2)
#' boxplot(pricepergallon ~ location, data = Diesel)
#' boxplot(pricepergallon ~ location,
#' data = droplevels(Diesel[Diesel$location == "EastCoast" |
#' Diesel$location == "Gulf" | Diesel$location == "NatAvg" |
#' Diesel$location == "Rocky" | Diesel$location == "California", ]),
#' col = "pink", main = "Exercise 2.8")
#' par(las = 1)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Diesel, aes(x = date, y = pricepergallon,
#' color = location)) +
#' geom_point() +
#' geom_smooth(se = FALSE) +
#' theme_bw() +
#' labs(y = "Price per Gallon (in dollars)")
#' }
"Diesel"
#' Parking tickets issued to diplomats
#'
#' Data for Exercises 1.14 and 1.37
#'
#'
#' @name Diplomat
#' @docType data
#' @format A data frame/tibble with 10 observations on three variables
#' \describe{
#' \item{country}{a factor with levels \code{Brazil},
#' \code{Bulgaria}, \code{Egypt}, \code{Indonesia}, \code{Israel}, \code{Nigeria},
#' \code{Russia}, \code{S. Korea}, \code{Ukraine}, and \code{Venezuela}}
#' \item{number}{total number of tickets}
#' \item{rate}{number of tickets per vehicle per month}
#' }
#'
#' @source \emph{Time}, November 8, 1993. Figures are from January to June 1993.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(las = 2, mfrow = c(2, 2))
#' stripchart(number ~ country, data = Diplomat, pch = 19,
#' col= "red", vertical = TRUE)
#' stripchart(rate ~ country, data = Diplomat, pch = 19,
#' col= "blue", vertical = TRUE)
#' with(data = Diplomat,
#' barplot(number, names.arg = country, col = "red"))
#' with(data = Diplomat,
#' barplot(rate, names.arg = country, col = "blue"))
#' par(las = 1, mfrow = c(1, 1))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Diplomat, aes(x = reorder(country, number),
#' y = number)) +
#' geom_bar(stat = "identity", fill = "pink", color = "black") +
#' theme_bw() + labs(x = "", y = "Total Number of Tickets")
#' ggplot2::ggplot(data = Diplomat, aes(x = reorder(country, rate),
#' y = rate)) +
#' geom_bar(stat = "identity", fill = "pink", color = "black") +
#' theme_bw() + labs(x = "", y = "Tickets per vehicle per month")
#' }
"Diplomat"
#' Toxic intensity for manufacturing plants producing herbicidal preparations
#'
#' Data for Exercise 1.127
#'
#'
#' @name Disposal
#' @docType data
#' @format A data frame/tibble with 29 observations on one variable
#' \describe{
#' \item{pounds}{pounds of toxic waste per $1000 of shipments of its products}
#' }
#'
#' @source Bureau of the Census, \emph{Reducing Toxins}, Statistical Brief SB/95-3,
#' February 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Disposal$pounds)
#' fivenum(Disposal$pounds)
#' EDA(Disposal$pounds)
#'
"Disposal"
#' Rankings of the favorite breeds of dogs
#'
#' Data for Exercise 2.88
#'
#'
#' @name Dogs
#' @docType data
#' @format A data frame/tibble with 20 observations on three variables
#' \describe{
#' \item{breed}{a factor with levels \code{Beagle},
#' \code{Boxer}, \code{Chihuahua}, \code{Chow}, \code{Dachshund},
#' \code{Dalmatian}, \code{Doberman}, \code{Huskie}, \code{Labrador},
#' \code{Pomeranian}, \code{Poodle}, \code{Retriever}, \code{Rotweiler},
#' \code{Schnauzer}, \code{Shepherd}, \code{Shetland}, \code{ShihTzu},
#' \code{Spaniel}, \code{Springer}, and \code{Yorkshire}}
#' \item{ranking}{numeric ranking}
#' \item{year}{a factor with levels \code{1992}, \code{1993}, \code{1997},
#' and \code{1998}}
#' }
#'
#' @source \emph{The World Almanac and Book of Facts}, 2000.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' cor(Dogs$ranking[Dogs$year == "1992"], Dogs$ranking[Dogs$year == "1993"])
#' cor(Dogs$ranking[Dogs$year == "1997"], Dogs$ranking[Dogs$year == "1998"])
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Dogs, aes(x = reorder(breed, ranking), y = ranking)) +
#' geom_bar(stat = "identity") +
#' facet_grid(year ~. ) +
#' theme(axis.text.x = element_text(angle = 85, vjust = 0.5))
#' }
"Dogs"
#' Rates of domestic violence per 1,000 women by age groups
#'
#' Data for Exercise 1.20
#'
#'
#' @name Domestic
#' @docType data
#' @format A data frame/tibble with five observations on two variables
#' \describe{
#' \item{age}{a factor with levels \code{12-19}, \code{20-24},
#' \code{25-34}, \code{35-49}, and \code{50-64}}
#' \item{rate}{rate of domestic violence per 1000 women}
#' }
#'
#' @source U.S. Department of Justice.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' barplot(Domestic$rate, names.arg = Domestic$age)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Domestic, aes(x = age, y = rate)) +
#' geom_bar(stat = "identity", fill = "purple", color = "black") +
#' labs(x = "", y = "Domestic violence per 1000 women") +
#' theme_bw()
#' }
"Domestic"
#' Dopamine b-hydroxylase activity of schizophrenic patients treated with an
#' antipsychotic drug
#'
#' Data for Exercises 5.14 and 7.49
#'
#'
#' @name Dopamine
#' @docType data
#' @format A data frame/tibble with 25 observations on two variables
#' \describe{
#' \item{dbh}{dopamine b-hydroxylase activity (units are nmol/(ml)(h)/(mg) of protein)}
#' \item{group}{a factor with levels \code{nonpsychotic} and \code{psychotic}}
#' }
#'
#' @source D.E. Sternberg, D.P. Van Kammen, and W.E. Bunney, "Schizophrenia: Dopamine
#' b-Hydroxylase Activity and Treatment Respsonse," \emph{Science, 216} (1982), 1423 - 1425.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(dbh ~ group, data = Dopamine, col = "orange")
#' t.test(dbh ~ group, data = Dopamine, var.equal = TRUE)
#'
"Dopamine"
#' Closing yearend Dow Jones Industrial averages from 1896 through 2000
#'
#' Data for Exercise 1.35
#'
#'
#' @name Dowjones
#' @docType data
#' @format A data frame/tibble with 105 observations on three variables
#' \describe{
#' \item{year}{date}
#' \item{close}{Dow Jones closing price}
#' \item{change}{percent change from previous year}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(close ~ year, data = Dowjones, type = "l", main = "Exercise 1.35")
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Dowjones, aes(x = year, y = close)) +
#' geom_point(size = 0.5) +
#' geom_line(color = "red") +
#' theme_bw() +
#' labs(y = "Dow Jones Closing Price")
#' }
"Dowjones"
#' Opinion on referendum by view on moral issue of selling alcoholic beverages
#'
#' Data for Exercise 8.53
#'
#'
#' @name Drink
#' @docType data
#' @format A data frame/tibble with 472 observations on two variables
#' \describe{
#' \item{drinking}{a factor with levels \code{ok},
#' \code{tolerated}, and \code{immoral}}
#' \item{referendum}{a factor with levels \code{for}, \code{against}, and \code{undecided}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~drinking + referendum, data = Drink)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Drink"
#' Number of trials to master a task for a group of 28 subjects assigned to a
#' control and an experimental group
#'
#' Data for Example 7.15
#'
#'
#' @name Drug
#' @docType data
#' @format A data frame/tibble with 28 observations on two variables
#' \describe{
#' \item{trials}{number of trials to master a task}
#' \item{group}{a factor with levels \code{control} and \code{experimental}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(trials ~ group, data = Drug,
#' main = "Example 7.15", col = c("yellow", "red"))
#' wilcox.test(trials ~ group, data = Drug)
#' t.test(rank(trials) ~ group, data = Drug, var.equal = TRUE)
#'
"Drug"
#' Data on a group of college students diagnosed with dyslexia
#'
#' Data for Exercise 2.90
#'
#'
#' @name Dyslexia
#' @docType data
#' @format A data frame/tibble with eight observations on seven variables
#' \describe{
#' \item{words}{number of words read per minute}
#' \item{age}{age of participant}
#' \item{gender}{a factor with levels \code{female} and
#' \code{male}}
#' \item{handed}{a factor with levels \code{left} and \code{right}}
#' \item{weight}{weight of participant (in pounds)}
#' \item{height}{height of participant (in inches)}
#' \item{children}{number of children in family}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(height ~ weight, data = Dyslexia)
#' plot(words ~ factor(handed), data = Dyslexia,
#' xlab = "hand", col = "lightblue")
#'
"Dyslexia"
#' One hundred year record of worldwide seismic activity(1770-1869)
#'
#' Data for Exercise 6.97
#'
#'
#' @name Earthqk
#' @docType data
#' @format A data frame/tibble with 100 observations on two variables
#' \describe{
#' \item{year}{year seimic activity recorded}
#' \item{severity}{annual incidence of sever earthquakes}
#' }
#'
#' @source Quenoille, M.H. (1952), \emph{Associated Measurements}, Butterworth, London.
#' p 279.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Earthqk$severity)
#' t.test(Earthqk$severity, mu = 100, alternative = "greater")
#'
"Earthqk"
#' Crime rates versus the percent of the population without a high school
#' degree
#'
#' Data for Exercise 2.41
#'
#'
#' @name Educat
#' @docType data
#' @format A data frame/tibble with 51 observations on three variables
#' \describe{
#' \item{state}{a factor with levels \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{DC}, \code{Delaware}, \code{Florida},
#' \code{Georgia}, \code{Hawaii}, \code{Idaho}, \code{Illinois}, \code{Indiana},
#' \code{Iowa}, \code{Kansas}, \code{Kentucky}, \code{Louisiana}, \code{Maine},
#' \code{Maryland}, \code{Massachusetts}, \code{Michigan}, \code{Minnesota},
#' \code{Mississippi}, \code{Missour}, \code{Montana}, \code{Nebraska},
#' \code{Nevada}, \code{New Hampshire}, \code{New Jersey}, \code{New Mexico},
#' \code{New York}, \code{North Carolina}, \code{North Dakota}, \code{Ohio},
#' \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania}, \code{Rhode Island},
#' \code{South Carolina}, \code{South Dakota}, \code{Tennessee}, \code{Texas},
#' \code{Utah}, \code{Vermont}, \code{Virginia}, \code{Washington}, \code{West
#' Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{nodegree}{percent of the population without a high school degree}
#' \item{crime}{violent crimes per 100,000 population}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(crime ~ nodegree, data = Educat,
#' xlab = "Percent of population without high school degree",
#' ylab = "Violent Crime Rate per 100,000")
#'
"Educat"
#' Number of eggs versus amounts of feed supplement
#'
#' Data for Exercise 9.22
#'
#'
#' @name Eggs
#' @docType data
#' @format A data frame/tibble with 12 observations on two variables
#' \describe{
#' \item{feed}{amount of feed supplement}
#' \item{eggs}{number of eggs per day for 100 chickens}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(eggs ~ feed, data = Eggs)
#' model <- lm(eggs ~ feed, data = Eggs)
#' abline(model, col = "red")
#' summary(model)
#' rm(model)
#'
"Eggs"
#' Percent of the population over the age of 65
#'
#' Data for Exercise 1.92 and 2.61
#'
#'
#' @name Elderly
#' @docType data
#' @format A data frame/tibble with 51 observations on three variables
#' \describe{
#' \item{state}{a factor with levels \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Colunbia}, \code{Florida}, \code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa}, \code{Kansas}, \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland}, \code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana}, \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia},
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{percent1985}{percent of the population over the age of 65 in 1985}
#' \item{percent1998}{percent of the population over the age of 65 in 1998}
#' }
#'
#' @source U.S. Census Bureau Internet site, February 2000.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' with(data = Elderly,
#' stripchart(x = list(percent1998, percent1985), method = "stack", pch = 19,
#' col = c("red","blue"), group.names = c("1998", "1985"))
#' )
#' with(data = Elderly, cor(percent1998, percent1985))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Elderly, aes(x = percent1985, y = percent1998)) +
#' geom_point() +
#' theme_bw()
#' }
"Elderly"
#' Amount of energy consumed by homes versus their sizes
#'
#' Data for Exercises 2.5, 2.24, and 2.55
#'
#'
#' @name Energy
#' @docType data
#' @format A data frame/tibble with 12 observations on two variables
#' \describe{
#' \item{size}{size of home (in square feet)}
#' \item{kilowatt}{killowatt-hours per month}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(kilowatt ~ size, data = Energy)
#' with(data = Energy, cor(size, kilowatt))
#' model <- lm(kilowatt ~ size, data = Energy)
#' plot(Energy$size, resid(model), xlab = "size")
#'
"Energy"
#' Salaries after 10 years for graduates of three different universities
#'
#' Data for Example 10.7
#'
#'
#' @name Engineer
#' @docType data
#' @format A data frame/tibble with 51 observations on two variables
#' \describe{
#' \item{salary}{salary (in $1000) 10 years after graduation}
#' \item{university}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(salary ~ university, data = Engineer,
#' main = "Example 10.7", col = "yellow")
#' kruskal.test(salary ~ university, data = Engineer)
#' anova(lm(salary ~ university, data = Engineer))
#' anova(lm(rank(salary) ~ university, data = Engineer))
#'
"Engineer"
#' College entrance exam scores for 24 high school seniors
#'
#' Data for Example 1.8
#'
#'
#' @name Entrance
#' @docType data
#' @format A data frame/tibble with 24 observations on one variable
#' \describe{
#' \item{score}{college entrance exam score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Entrance$score)
#' stem(Entrance$score, scale = 2)
#'
"Entrance"
#' Fuel efficiency ratings for compact vehicles in 2001
#'
#' Data for Exercise 1.65
#'
#'
#' @name Epaminicompact
#' @docType data
#' @format A data frame/tibble with 22 observations on ten variables
#' \describe{
#' \item{class}{a character variable with value \code{MINICOMPACT CARS}}
#' \item{manufacturer}{a character variable with values \code{AUDI},
#' \code{BMW}, \code{JAGUAR}, \code{MERCEDES-BENZ}, \code{MITSUBISHI}, and
#' \code{PORSCHE}}
#' \item{carline}{a character variable with values \code{325CI
#' CONVERTIBLE}, \code{330CI CONVERTIBLE}, \code{911 CARRERA 2/4}, \code{911
#' TURBO}, \code{CLK320 (CABRIOLET)}, \code{CLK430 (CABRIOLET)}, \code{ECLIPSE
#' SPYDER}, \code{JAGUAR XK8 CONVERTIBLE}, \code{JAGUAR XKR CONVERTIBLE}, \code{M3
#' CONVERTIBLE}, \code{TT COUPE}, and \code{TT COUPE QUATTRO}}
#' \item{displ}{engine displacement (in liters)}
#' \item{cyl}{number of cylinders}
#' \item{trans}{a factor with levels \code{Auto(L5)}, \code{Auto(S4)}, \code{Auto(S5)},
#' \code{Manual(M5)}, and \code{Manual(M6)}}
#' \item{drv}{a factor with levels \code{4}(four wheel drive), \code{F}(front wheel drive),
#' and \code{R}(rear wheel drive)}
#' \item{cty}{city mpg}
#' \item{hwy}{highway mpg}
#' \item{cmb}{combined city and highway mpg}
#' }
#'
#' @source EPA data.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(Epaminicompact$cty)
#' plot(hwy ~ cty, data = Epaminicompact)
#'
"Epaminicompact"
#' Fuel efficiency ratings for two-seater vehicles in 2001
#'
#' Data for Exercise 5.8
#'
#'
#' @name Epatwoseater
#' @docType data
#' @format A data frame/tibble with 36 observations on ten variables
#' \describe{
#' \item{class}{a character variable with value \code{TWO SEATERS}}
#' \item{manufacturer}{a character variable with values \code{ACURA}, \code{AUDI},
#' \code{BMW}, \code{CHEVROLET}, \code{DODGE}, \code{FERRARI}, \code{HONDA},
#' \code{LAMBORGHINI}, \code{MAZDA}, \code{MERCEDES-BENZ}, \code{PLYMOUTH},
#' \code{PORSCHE}, and \code{TOYOTA}}
#' \item{carline}{a character variable with values
#' \code{BOXSTER}, \code{BOXSTER S}, \code{CORVETTE}, \code{DB132/144
#' DIABLO}, \code{FERRARI 360 MODENA/SPIDER}, \code{FERRARI 550
#' MARANELLO/BARCHETTA}, \code{INSIGHT}, \code{MR2} ,\code{MX-5 MIATA}, \code{NSX},
#' \code{PROWLER}, \code{S2000}, \code{SL500}, \code{SL600}, \code{SLK230
#' KOMPRESSOR}, \code{SLK320}, \code{TT ROADSTER}, \code{TT ROADSTER QUATTRO},
#' \code{VIPER CONVERTIBLE}, \code{VIPER COUPE}, \code{Z3 COUPE}, \code{Z3
#' ROADSTER}, and \code{Z8}}
#' \item{displ}{engine displacement (in liters)}
#' \item{cyl}{number of cylinders}
#' \item{trans}{a factor with levels \code{Auto(L4)}, \code{Auto(L5)}, \code{Auto(S4)},
#' \code{Auto(S5)}, \code{Auto(S6)}, \code{Manual(M5)}, and \code{Manual(M6)}}
#' \item{drv}{a factor with levels \code{4}(four wheel drive) \code{F}(front wheel drive) \code{R}(rear wheel drive)}
#' \item{cty}{city mpg}
#' \item{hwy}{highway mpg}
#' \item{cmb}{combined city and highway mpg}
#' }
#'
#' @source Environmental Protection Agency.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(Epatwoseater$cty)
#' plot(hwy ~ cty, data = Epatwoseater)
#' boxplot(cty ~ drv, data = Epatwoseater, col = "lightgreen")
#'
"Epatwoseater"
#' Ages of 25 executives
#'
#' Data for Exercise 1.104
#'
#'
#' @name Executiv
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{age}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Executiv$age, xlab = "Age of banking executives",
#' breaks = 5, main = "", col = "gray")
#'
"Executiv"
#' Weight loss for 30 members of an exercise program
#'
#' Data for Exercise 1.44
#'
#'
#' @name Exercise
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{loss}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Exercise$loss)
#'
"Exercise"
#' Measures of softness of ten different clothing garments washed with and
#' without a softener
#'
#' Data for Example 7.21
#'
#'
#' @name Fabric
#' @docType data
#' @format A data frame/tibble with 20 observations on three variables
#' \describe{
#' \item{garment}{a numeric vector}
#' \item{softner}{a character variable with values \code{with} and \code{without}}
#' \item{softness}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' \dontrun{
#' library(tidyr)
#' tidyr::spread(Fabric, softner, softness) -> FabricWide
#' wilcox.test(Pair(with, without)~1, alternative = "greater", data = FabricWide)
#' T7 <- tidyr::spread(Fabric, softner, softness) %>%
#' mutate(di = with - without, adi = abs(di), rk = rank(adi),
#' srk = sign(di)*rk)
#' T7
#' t.test(T7$srk, alternative = "greater")
#' }
"Fabric"
#' Waiting times between successive eruptions of the Old Faithful geyser
#'
#' Data for Exercise 5.12 and 5.111
#'
#'
#' @name Faithful
#' @docType data
#' @format A data frame/tibble with 299 observations on two variables
#' \describe{
#' \item{time}{a numeric vector}
#' \item{eruption}{a factor with levels \code{1} and \code{2}}
#' }
#'
#' @source A. Azzalini and A. Bowman, "A Look at Some Data on the Old Faithful Geyser,"
#' \emph{Journal of the Royal Statistical Society}, Series C, \emph{39} (1990), 357-366.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' t.test(time ~ eruption, data = Faithful)
#' hist(Faithful$time, xlab = "wait time", main = "", freq = FALSE)
#' lines(density(Faithful$time))
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Faithful, aes(x = time, y = ..density..)) +
#' geom_histogram(binwidth = 5, fill = "pink", col = "black") +
#' geom_density() +
#' theme_bw() +
#' labs(x = "wait time")
#' }
"Faithful"
#' Size of family versus cost per person per week for groceries
#'
#' Data for Exercise 2.89
#'
#'
#' @name Family
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{number}{number in family}
#' \item{cost}{cost per person (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(cost ~ number, data = Family)
#' abline(lm(cost ~ number, data = Family), col = "red")
#' cor(Family$cost, Family$number)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Family, aes(x = number, y = cost)) +
#' geom_point() +
#' geom_smooth(method = "lm") +
#' theme_bw()
#' }
#'
"Family"
#' Choice of presidental ticket in 1984 by gender
#'
#' Data for Exercise 8.23
#'
#'
#' @name Ferraro1
#' @docType data
#' @format A data frame/tibble with 1000 observations on two variables
#' \describe{
#' \item{gender}{a factor with levels \code{Men} and
#' \code{Women}}
#' \item{candidate}{a character vector of 1984 president and vice-president candidates}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~gender + candidate, data = Ferraro1)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Ferraro1"
#' Choice of vice presidental candidate in 1984 by gender
#'
#' Data for Exercise 8.23
#'
#'
#' @name Ferraro2
#' @docType data
#' @format A data frame/tibble with 1000 observations on two variables
#' \describe{
#' \item{gender}{a factor with levels \code{Men} and
#' \code{Women}}
#' \item{candidate}{a character vector of 1984 president and vice-president candidates}
#' }
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~gender + candidate, data = Ferraro2)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Ferraro2"
#' Fertility rates of all 50 states and DC
#'
#' Data for Exercise 1.125
#'
#'
#' @name Fertility
#' @docType data
#' @format A data frame/tibble with 51 observations on two variables
#' \describe{
#' \item{state}{a character variable with values \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Colunbia}, \code{Florida}, \code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa}, \code{Kansas}, \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland},\code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana}, \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia},
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{rate}{fertility rate (expected number of births during childbearing years)}
#' }
#'
#' @source Population Reference Bureau.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Fertility$rate)
#' fivenum(Fertility$rate)
#' EDA(Fertility$rate)
#'
"Fertility"
#' Ages of women at the birth of their first child
#'
#' Data for Exercise 5.11
#'
#'
#' @name Firstchi
#' @docType data
#' @format A data frame/tibble with 87 observations on one variable
#' \describe{
#' \item{age}{age of woman at birth of her first child}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Firstchi$age)
#'
"Firstchi"
#' Length and number of fish caught with small and large mesh codend
#'
#' Data for Exercises 5.83, 5.119, and 7.29
#'
#'
#' @name Fish
#' @docType data
#' @format A data frame/tibble with 1534 observations on two variables
#' \describe{
#' \item{codend}{a character variable with values \code{smallmesh} and \code{largemesh} }
#' \item{length}{length of the fish measured in centimeters}
#' }
#'
#' @source R. Millar, \dQuote{Estimating the Size - Selectivity of Fishing Gear by Conditioning
#' on the Total Catch,} \emph{Journal of the American Statistical Association, 87} (1992), 962 - 968.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' tapply(Fish$length, Fish$codend, median, na.rm = TRUE)
#' SIGN.test(Fish$length[Fish$codend == "smallmesh"], conf.level = 0.99)
#' \dontrun{
#' dplyr::group_by(Fish, codend) %>%
#' summarize(MEDIAN = median(length, na.rm = TRUE))
#' }
#'
"Fish"
#' Number of sit-ups before and after a physical fitness course
#'
#' Data for Exercise 7.71
#'
#'
#' @name Fitness
#' @docType data
#' @format A data frame/tibble with 18 observations on the three variables
#' \describe{
#' \item{subject}{a character variable indicating subject number}
#' \item{test}{a character variable with values \code{After} and \code{Before}}
#' \item{number}{a numeric vector recording the number of sit-ups performed in one minute}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' \dontrun{
#' tidyr::spread(Fitness, test, number) -> FitnessWide
#' t.test(Pair(After, Before)~1, alternative = "greater", data = FitnessWide)
#'
#' Wide <- tidyr::spread(Fitness, test, number) %>%
#' mutate(diff = After - Before)
#' Wide
#' qqnorm(Wide$diff)
#' qqline(Wide$diff)
#' t.test(Wide$diff, alternative = "greater")
#' }
#'
"Fitness"
#' Florida voter results in the 2000 presidential election
#'
#' Data for Statistical Insight Chapter 2
#'
#'
#' @name Florida2000
#' @docType data
#' @format A data frame/tibble with 67 observations on 12 variables
#' \describe{
#' \item{county}{a character variable with values \code{ALACHUA},
#' \code{BAKER}, \code{BAY}, \code{BRADFORD}, \code{BREVARD}, \code{BROWARD},
#' \code{CALHOUN}, \code{CHARLOTTE}, \code{CITRUS}, \code{CLAY}, \code{COLLIER},
#' \code{COLUMBIA}, \code{DADE}, \code{DE SOTO}, \code{DIXIE}, \code{DUVAL},
#' \code{ESCAMBIA}, \code{FLAGLER}, \code{FRANKLIN}, \code{GADSDEN},
#' \code{GILCHRIST}, \code{GLADES}, \code{GULF}, \code{HAMILTON}, \code{HARDEE},
#' \code{HENDRY}, \code{HERNANDO}, \code{HIGHLANDS}, \code{HILLSBOROUGH},
#' \code{HOLMES}, \code{INDIAN RIVER}, \code{JACKSON}, \code{JEFFERSON},
#' \code{LAFAYETTE}, \code{LAKE}, \code{LEE}, \code{LEON}, \code{LEVY},
#' \code{LIBERTY}, \code{MADISON}, \code{MANATEE}, \code{MARION}, \code{MARTIN},
#' \code{MONROE}, \code{NASSAU}, \code{OKALOOSA}, \code{OKEECHOBEE}, \code{ORANGE},
#' \code{OSCEOLA}, \code{PALM BEACH}, \code{PASCO}, \code{PINELLAS}, \code{POLK},
#' \code{PUTNAM}, \code{SANTA ROSA}, \code{SARASOTA}, \code{SEMINOLE},
#' \code{ST. JOHNS}, \code{ST. LUCIE}, \code{SUMTER}, \code{SUWANNEE}, \code{TAYLOR},
#' \code{UNION}, \code{VOLUSIA}, \code{WAKULLA}, \code{WALTON}, and \code{WASHINGTON}
#' }
#' \item{gore}{number of votes}
#' \item{bush}{number of votes}
#' \item{buchanan}{number of votes}
#' \item{nader}{number of votes}
#' \item{browne}{number of votes}
#' \item{hagelin}{number of votes}
#' \item{harris}{number of votes}
#' \item{mcreynolds}{number of votes}
#' \item{moorehead}{number of votes}
#' \item{phillips}{number of votes}
#' \item{total}{number of votes}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(buchanan ~ total, data = Florida2000,
#' xlab = "Total votes cast (in thousands)",
#' ylab = "Votes for Buchanan")
#'
"Florida2000"
#' Breakdown times of an insulating fluid under various levels of voltage
#' stress
#'
#' Data for Exercise 5.76
#'
#'
#' @name Fluid
#' @docType data
#' @format A data frame/tibble with 76 observations on two variables
#' \describe{
#' \item{kilovolts}{a character variable showing kilowats}
#' \item{time}{breakdown time (in minutes)}
#' }
#'
#' @source E. Soofi, N. Ebrahimi, and M. Habibullah, 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' DF1 <- Fluid[Fluid$kilovolts == "34kV", ]
#' DF1
#' # OR
#' DF2 <- subset(Fluid, subset = kilovolts == "34kV")
#' DF2
#' stem(DF2$time)
#' SIGN.test(DF2$time)
#' \dontrun{
#' library(dplyr)
#' DF3 <- dplyr::filter(Fluid, kilovolts == "34kV")
#' DF3
#' }
#'
"Fluid"
#' Annual food expenditures for 40 single households in Ohio
#'
#' Data for Exercise 5.106
#'
#'
#' @name Food
#' @docType data
#' @format A data frame/tibble with 40 observations on one variable
#' \describe{
#' \item{expenditure}{a numeric vector recording annual food expenditure (in dollars) in the state of Ohio.}
#' }
#'
#' @source Bureau of Labor Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Food$expenditure)
#'
"Food"
#' Cholesterol values of 62 subjects in the Framingham Heart Study
#'
#' Data for Exercises 1.56, 1.75, 3.69, and 5.60
#'
#'
#' @name Framingh
#' @docType data
#' @format A data frame/tibble with 62 observations on one variable
#' \describe{
#' \item{cholest}{a numeric vector with cholesterol values}
#' }
#'
#' @source R. D'Agostino, et al., (1990) "A Suggestion for Using Powerful and Informative
#' Tests for Normality," \emph{The American Statistician, 44} 316-321.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Framingh$cholest)
#' boxplot(Framingh$cholest, horizontal = TRUE)
#' hist(Framingh$cholest, freq = FALSE)
#' lines(density(Framingh$cholest))
#' mean(Framingh$cholest > 200 & Framingh$cholest < 240)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Framingh, aes(x = factor(1), y = cholest)) +
#' geom_boxplot() + # boxplot
#' labs(x = "") + # no x label
#' theme_bw() + # black and white theme
#' geom_jitter(width = 0.2) + # jitter points
#' coord_flip() # Create horizontal plot
#' ggplot2::ggplot(data = Framingh, aes(x = cholest, y = ..density..)) +
#' geom_histogram(fill = "pink", binwidth = 15, color = "black") +
#' geom_density() +
#' theme_bw()
#' }
#'
"Framingh"
#' Ages of a random sample of 30 college freshmen
#'
#' Data for Exercise 6.53
#'
#'
#' @name Freshman
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{age}{a numeric vector of ages}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Freshman$age, md = 19)
#'
"Freshman"
#' Cost of funeral by region of country
#'
#' Data for Exercise 8.54
#'
#'
#' @name Funeral
#' @docType data
#' @format A data frame/tibble with 400 observations on two variables
#' \describe{
#' \item{region}{a factor with levels \code{Central},
#' \code{East,} \code{South}, and \code{West}}
#' \item{cost}{a factor with levels \code{less than expected}, \code{about what expected},
#' and \code{more than expected}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~region + cost, data = Funeral)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Funeral"
#' Velocities of 82 galaxies in the Corona Borealis region
#'
#' Data for Example 5.2
#'
#'
#' @name Galaxie
#' @docType data
#' @format A data frame/tibble with 82 observations on one variable
#' \describe{
#' \item{velocity}{velocity measured in kilometers per second}
#' }
#'
#' @source K. Roeder, "Density Estimation with Confidence Sets Explained by Superclusters
#' and Voids in the Galaxies," \emph{Journal of the American Statistical Association}, 85
#' (1990), 617-624.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Galaxie$velocity)
#'
"Galaxie"
#' Results of a Gallup poll on possession of marijuana as a criminal offense
#' conducted in 1980
#'
#' Data for Exercise 2.76
#'
#'
#' @name Gallup
#' @docType data
#' @format A data frame/tibble with 1,200 observations on two variables
#' \describe{
#' \item{demographics}{a factor with levels \code{National}, \code{Gender: Male}
#' \code{Gender: Female}, \code{Education: College}, \code{Eduction: High School},
#' \code{Education: Grade School}, \code{Age: 18-24}, \code{Age: 25-29}, \code{Age: 30-49},
#' \code{Age: 50-older}, \code{Religion: Protestant}, and \code{Religion: Catholic}}
#' \item{opinion}{a factor with levels \code{Criminal}, \code{Not Criminal}, and \code{No Opinion}}
#' }
#'
#' @source George H. Gallup \emph{The Gallup Opinion Index Report No. 179} (Princeton, NJ:
#' The Gallup Poll, July 1980), p. 15.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~demographics + opinion, data = Gallup)
#' T1
#' t(T1[c(2, 3), ])
#' barplot(t(T1[c(2, 3), ]))
#' barplot(t(T1[c(2, 3), ]), beside = TRUE)
#'
#' \dontrun{
#' library(dplyr)
#' library(ggplot2)
#' dplyr::filter(Gallup, demographics == "Gender: Male" | demographics == "Gender: Female") %>%
#' ggplot2::ggplot(aes(x = demographics, fill = opinion)) +
#' geom_bar() +
#' theme_bw() +
#' labs(y = "Fraction")
#' }
#'
"Gallup"
#' Price of regular unleaded gasoline obtained from 25 service stations
#'
#' Data for Exercise 1.45
#'
#'
#' @name Gasoline
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{price}{price for one gallon of gasoline}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Gasoline$price)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Gasoline, aes(x = factor(1), y = price)) +
#' geom_violin() +
#' geom_jitter() +
#' theme_bw()
#' }
#'
"Gasoline"
#' Number of errors in copying a German passage before and after an
#' experimental course in German
#'
#' Data for Exercise 7.60
#'
#'
#' @name German
#' @docType data
#' @format A data frame/tibble with ten observations on three variables
#' \describe{
#' \item{student}{a character variable indicating student number}
#' \item{when}{a character variable with values \code{Before} and \code{After}
#' to indicate when the student received experimental instruction in German}
#' \item{errors}{the number of errors in copying a German passage}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' \dontrun{
#' tidyr::spread(German, when, errors) -> GermanWide
#' t.test(Pair(After, Before) ~ 1, data = GermanWide)
#' wilcox.test(Pair(After, Before) ~ 1, data = GermanWide)
#' T8 <- tidyr::spread(German, when, errors) %>%
#' mutate(di = After - Before, adi = abs(di), rk = rank(adi), srk = sign(di)*rk)
#' T8
#' qqnorm(T8$di)
#' qqline(T8$di)
#' t.test(T8$srk)
#' }
#'
"German"
#' Distances a golf ball can be driven by 20 professional golfers
#'
#' Data for Exercise 5.24
#'
#'
#' @name Golf
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{yards}{distance a golf ball is driven in yards}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Golf$yards)
#' qqnorm(Golf$yards)
#' qqline(Golf$yards)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Golf, aes(sample = yards)) +
#' geom_qq() +
#' theme_bw()
#' }
#'
"Golf"
#' Annual salaries for state governors in 1994 and 1999
#'
#' Data for Exercise 5.112
#'
#'
#' @name Governor
#' @docType data
#' @format A data frame/tibble with 50 observations on three variables
#' \describe{
#' \item{state}{a character variable with values \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{Florida},
#' \code{Georgia}, \code{Hawaii}, \code{Idaho}, \code{Illinois}, \code{Indiana},
#' \code{Iowa}, \code{Kansas}, \code{Kentucky}, \code{Louisiana}, \code{Maine},
#' \code{Maryland}, \code{Massachusetts}, \code{Michigan}, \code{Minnesota},
#' \code{Mississippi}, \code{Missouri}, \code{Montana}, \code{Nebraska},
#' \code{Nevada}, \code{New Hampshire}, \code{New Jersey}, \code{New Mexico},
#' \code{New York}, \code{North Carolina}, \code{North Dakota}, \code{Ohio},
#' \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania}, \code{Rhode Island},
#' \code{South Carolina}, \code{South Dakota}, \code{Tennessee}, \code{Texas},
#' \code{Utah}, \code{Vermont}, \code{Virginia}, \code{Washington}, \code{West
#' Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{year}{a factor indicating year}
#' \item{salary}{a numeric vector with the governor's salary (in dollars)}
#' }
#'
#' @source \emph{The 2000 World Almanac and Book of Facts}.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(salary ~ year, data = Governor)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Governor, aes(x = salary)) +
#' geom_density(fill = "pink") +
#' facet_grid(year ~ .) +
#' theme_bw()
#' }
#'
"Governor"
#' High school GPA versus college GPA
#'
#' Data for Example 2.13
#'
#'
#' @name Gpa
#' @docType data
#' @format A data frame/tibble with 10 observations on two variables
#' \describe{
#' \item{hsgpa}{high school gpa}
#' \item{collgpa}{college gpa}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(collgpa ~ hsgpa, data = Gpa)
#' mod <- lm(collgpa ~ hsgpa, data = Gpa)
#' abline(mod) # add line
#' yhat <- predict(mod) # fitted values
#' e <- resid(mod) # residuals
#' cbind(Gpa, yhat, e) # Table 2.1
#' cor(Gpa$hsgpa, Gpa$collgpa)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Gpa, aes(x = hsgpa, y = collgpa)) +
#' geom_point() +
#' geom_smooth(method = "lm") +
#' theme_bw()
#' }
#'
#'
"Gpa"
#' Test grades in a beginning statistics class
#'
#' Data for Exercise 1.120
#'
#'
#' @name Grades
#' @docType data
#' @format A data frame with 29 observations on one variable
#' \describe{
#' \item{grades}{a numeric vector containing test grades}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Grades$grades, main = "", xlab = "Test grades", right = FALSE)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Grades, aes(x = grades, y = ..density..)) +
#' geom_histogram(fill = "pink", binwidth = 5, color = "black") +
#' geom_density(lwd = 2, color = "red") +
#' theme_bw()
#' }
#'
"Grades"
#' Graduation rates for student athletes in the Southeastern Conf.
#'
#' Data for Exercise 1.118
#'
#'
#' @name Graduate
#' @docType data
#' @format A data frame/tibble with 12 observations on three variables
#' \describe{
#' \item{school}{a character variable with values \code{Alabama},
#' \code{Arkansas}, \code{Auburn}, \code{Florida}, \code{Georgia}, \code{Kentucky},
#' \code{Louisiana St}, \code{Mississippi}, \code{Mississippi St}, \code{South
#' Carolina,} \code{Tennessee}, and \code{Vanderbilt}}
#' \item{code}{a character variable with values \code{Al}, \code{Ar}, \code{Au}
#' \code{Fl}, \code{Ge}, \code{Ke}, \code{LSt}, \code{Mi}, \code{MSt}, \code{SC},
#' \code{Te}, and \code{Va}}
#' \item{percent}{graduation rate}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' barplot(Graduate$percent, names.arg = Graduate$school,
#' las = 2, cex.names = 0.7, col = "tomato")
#'
"Graduate"
#' Varve thickness from a sequence through an Eocene lake deposit in the Rocky
#' Mountains
#'
#' Data for Exercise 6.57
#'
#'
#' @name Greenriv
#' @docType data
#' @format A data frame/tibble with 37 observations on one variable
#' \describe{
#' \item{thick}{varve thickness in millimeters}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Greenriv$thick)
#' SIGN.test(Greenriv$thick, md = 7.3, alternative = "greater")
#'
"Greenriv"
#' Thickness of a varved section of the Green river oil shale deposit near a
#' major lake in the Rocky Mountains
#'
#' Data for Exercises 6.45 and 6.98
#'
#'
#' @name Grnriv2
#' @docType data
#' @format A data frame/tibble with 101 observations on one variable
#' \describe{
#' \item{thick}{varve thickness (in millimeters)}
#' }
#'
#' @source J. Davis, \emph{Statistics and Data Analysis in Geology}, 2nd Ed., Jon Wiley and Sons, New York.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Grnriv2$thick)
#' t.test(Grnriv2$thick, mu = 8, alternative = "less")
#'
"Grnriv2"
#' Group data to illustrate analysis of variance
#'
#' Data for Exercise 10.42
#'
#'
#' @name Groupabc
#' @docType data
#' @format A data frame/tibble with 45 observations on two variables
#' \describe{
#' \item{group}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' \item{response}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(response ~ group, data = Groupabc,
#' col = c("red", "blue", "green"))
#' anova(lm(response ~ group, data = Groupabc))
#'
"Groupabc"
#' An illustration of analysis of variance
#'
#' Data for Exercise 10.4
#'
#'
#' @name Groups
#' @docType data
#' @format A data frame/tibble with 78 observations on two variables
#' \describe{
#' \item{group}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' \item{response}{a numeric vector}
#' }
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(response ~ group, data = Groups, col = c("red", "blue", "green"))
#' anova(lm(response ~ group, data = Groups))
#'
#'
"Groups"
#' Children's age versus number of completed gymnastic activities
#'
#' Data for Exercises 2.21 and 9.14
#'
#'
#' @name Gym
#' @docType data
#' @format A data frame/tibble with eight observations on three variables
#' \describe{
#' \item{age}{age of child}
#' \item{number}{number of gymnastic activities successfully completed}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(number ~ age, data = Gym)
#' model <- lm(number ~ age, data = Gym)
#' abline(model, col = "red")
#' summary(model)
#'
"Gym"
#' Study habits of students in two matched school districts
#'
#' Data for Exercise 7.57
#'
#'
#' @name Habits
#' @docType data
#' @format A data frame/tibble with 11 observations on four variables
#' \describe{
#' \item{A}{study habit score}
#' \item{B}{study habit score}
#' \item{differ}{\code{B} minus \code{A}}
#' \item{signrks}{the signed-ranked-differences}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' shapiro.test(Habits$differ)
#' qqnorm(Habits$differ)
#' qqline(Habits$differ)
#' wilcox.test(Pair(B, A) ~ 1, data = Habits, alternative = "less")
#' t.test(Habits$signrks, alternative = "less")
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Habits, aes(x = differ)) +
#' geom_dotplot(fill = "blue") +
#' theme_bw()
#' }
#'
"Habits"
#' Haptoglobin concentration in blood serum of 8 healthy adults
#'
#' Data for Example 6.9
#'
#'
#' @name Haptoglo
#' @docType data
#' @format A data frame/tibble with eight observations on one variable
#' \describe{
#' \item{concent}{haptoglobin concentration (in grams per liter)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' shapiro.test(Haptoglo$concent)
#' t.test(Haptoglo$concent, mu = 2, alternative = "less")
#'
#'
"Haptoglo"
#' Daily receipts for a small hardware store for 31 working days
#'
#'
#'
#' @name Hardware
#' @docType data
#' @format A data frame with 31 observations on one variable
#' \describe{
#' \item{receipt}{a numeric vector of daily receipts (in dollars)}
#' }
#'
#' @source J.C. Miller and J.N. Miller, (1988), \emph{Statistics for Analytical Chemistry}, 2nd Ed.
#' (New York: Halsted Press).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Hardware$receipt)
#'
"Hardware"
#' Tensile strength of Kraft paper for different percentages of hardwood in the
#' batches of pulp
#'
#' Data for Example 2.18 and Exercise 9.34
#'
#'
#' @name Hardwood
#' @docType data
#' @format A data frame/tibble with 19 observations on two variables
#' \describe{
#' \item{tensile}{tensile strength of kraft paper (in pounds per square inch)}
#' \item{hardwood}{percent of hardwood in the batch of pulp that was used to produce the paper}
#' }
#'
#' @source G. Joglekar, et al., "Lack-of-Fit Testing When Replicates Are Not Available,"
#' \emph{The American Statistician}, 43(3), (1989), 135-143.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(tensile ~ hardwood, data = Hardwood)
#' model <- lm(tensile ~ hardwood, data = Hardwood)
#' abline(model, col = "red")
#' plot(model, which = 1)
#'
#'
"Hardwood"
#' Primary heating sources of homes on indian reservations versus all
#' households
#'
#' Data for Exercise 1.29
#'
#'
#' @name Heat
#' @docType data
#' @format A data frame/tibble with 301 observations on two variables
#' \describe{
#' \item{fuel}{a factor with levels \code{Utility gas},
#' \code{LP bottled gas}, \code{Electricity}, \code{Fuel oil}, \code{Wood}, and
#' \code{Other}}
#' \item{location}{a factor with levels \code{American Indians on reservation},
#' \code{All U.S. households}, and \code{American Indians not on reservations}}
#' }
#'
#' @source Bureau of the Census, \emph{Housing of the American Indians on Reservations},
#' Statistical Brief 95-11, April 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~ fuel + location, data = Heat)
#' T1
#' barplot(t(T1), beside = TRUE, legend = TRUE)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Heat, aes(x = fuel, fill = location)) +
#' geom_bar(position = "dodge") +
#' labs(y = "percent") +
#' theme_bw() +
#' theme(axis.text.x = element_text(angle = 30, hjust = 1))
#' }
#'
"Heat"
#' Fuel efficiency ratings for three types of oil heaters
#'
#' Data for Exercise 10.32
#'
#'
#' @name Heating
#' @docType data
#' @format A data frame/tibble with 90 observations on the two variables
#' \describe{
#' \item{type}{a factor with levels \code{A}, \code{B}, and \code{C} denoting
#' the type of oil heater}
#' \item{efficiency}{heater efficiency rating}
#' }
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(efficiency ~ type, data = Heating,
#' col = c("red", "blue", "green"))
#' kruskal.test(efficiency ~ type, data = Heating)
#'
"Heating"
#' Results of treatments for Hodgkin's disease
#'
#' Data for Exercise 2.77
#'
#'
#' @name Hodgkin
#' @docType data
#' @format A data frame/tibble with 538 observations on two variables
#' \describe{
#' \item{type}{a factor with levels \code{LD},
#' \code{LP}, \code{MC}, and \code{NS}}
#' \item{response}{a factor with levels \code{Positive}, \code{Partial}, and \code{None}}
#' }
#'
#' @source I. Dunsmore, F. Daly, \emph{Statistical Methods, Unit 9, Categorical Data},
#' Milton Keynes, The Open University, 18.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~type + response, data = Hodgkin)
#' T1
#' barplot(t(T1), legend = TRUE, beside = TRUE)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Hodgkin, aes(x = type, fill = response)) +
#' geom_bar(position = "dodge") +
#' theme_bw()
#' }
#'
"Hodgkin"
#' Median prices of single-family homes in 65 metropolitan statistical areas
#'
#' Data for Statistical Insight Chapter 5
#'
#'
#' @name Homes
#' @docType data
#' @format A data frame/tibble with 65 observations on the four variables
#' \describe{
#' \item{city}{a character variable with values \code{Akron OH},
#' \code{Albuquerque NM}, \code{Anaheim CA}, \code{Atlanta GA}, \code{Baltimore
#' MD}, \code{Baton Rouge LA}, \code{Birmingham AL}, \code{Boston MA},
#' \code{Bradenton FL}, \code{Buffalo NY}, \code{Charleston SC}, \code{Chicago
#' IL}, \code{Cincinnati OH}, \code{Cleveland OH}, \code{Columbia SC},
#' \code{Columbus OH}, \code{Corpus Christi TX}, \code{Dallas TX},
#' \code{Daytona Beach FL}, \code{Denver CO}, \code{Des Moines IA},
#' \code{Detroit MI}, \code{El Paso TX}, \code{Grand Rapids MI},
#' \code{Hartford CT}, \code{Honolulu HI}, \code{Houston TX},
#' \code{Indianapolis IN}, \code{Jacksonville FL}, \code{Kansas City MO},
#' \code{Knoxville TN}, \code{Las Vegas NV}, \code{Los Angeles CA},
#' \code{Louisville KY}, \code{Madison WI}, \code{Memphis TN}, \code{Miami FL},
#' \code{Milwaukee WI}, \code{Minneapolis MN}, \code{Mobile AL},
#' \code{Nashville TN}, \code{New Haven CT}, \code{New Orleans LA}, \code{New
#' York NY}, \code{Oklahoma City OK}, \code{Omaha NE}, \code{Orlando FL},
#' \code{Philadelphia PA}, \code{Phoenix AZ}, \code{Pittsburgh PA},
#' \code{Portland OR}, \code{Providence RI}, \code{Sacramento CA}, \code{Salt
#' Lake City UT}, \code{San Antonio TX}, \code{San Diego CA}, \code{San
#' Francisco CA}, \code{Seattle WA}, \code{Spokane WA}, \code{St Louis MO},
#' \code{Syracuse NY}, \code{Tampa FL}, \code{Toledo OH}, \code{Tulsa OK}, and
#' \code{Washington DC}}
#' \item{region}{a character variable with values \code{Midwest}, \code{Northeast},
#' \code{South}, and \code{West}}
#' \item{year}{a factor with levels \code{1994} and \code{2000}}
#' \item{price}{median house price (in dollars)}
#' }
#'
#' @source National Association of Realtors.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' tapply(Homes$price, Homes$year, mean)
#' tapply(Homes$price, Homes$region, mean)
#' p2000 <- subset(Homes, year == "2000")
#' p1994 <- subset(Homes, year == "1994")
#' \dontrun{
#' library(dplyr)
#' library(ggplot2)
#' dplyr::group_by(Homes, year, region) %>%
#' summarize(AvgPrice = mean(price))
#' ggplot2::ggplot(data = Homes, aes(x = region, y = price)) +
#' geom_boxplot() +
#' theme_bw() +
#' facet_grid(year ~ .)
#' }
#'
#'
"Homes"
#' Number of hours per week spent on homework for private and public high
#' school students
#'
#' Data for Exercise 7.78
#'
#'
#' @name Homework
#' @docType data
#' @format A data frame with 30 observations on two variables
#' \describe{
#' \item{school}{type of school either \code{private} or \code{public}}
#' \item{time}{number of hours per week spent on homework}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(time ~ school, data = Homework,
#' ylab = "Hours per week spent on homework")
#' #
#' t.test(time ~ school, data = Homework)
#'
"Homework"
#' Miles per gallon for a Honda Civic on 35 different occasions
#'
#' Data for Statistical Insight Chapter 6
#'
#'
#' @name Honda
#' @docType data
#' @format A data frame/tibble with 35 observations on one variable
#' \describe{
#' \item{mileage}{miles per gallon for a Honda Civic}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#'
#' @examples
#'
#' t.test(Honda$mileage, mu = 40, alternative = "less")
#'
"Honda"
#' Hostility levels of high school students from rural, suburban, and urban
#' areas
#'
#' Data for Example 10.6
#'
#'
#' @name Hostile
#' @docType data
#' @format A data frame/tibble with 135 observations on two variables
#' \describe{
#' \item{location}{a factor with the location of the high school student
#' (\code{Rural}, \code{Suburban}, or \code{Urban})}
#' \item{hostility}{the score from the Hostility Level Test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(hostility ~ location, data = Hostile,
#' col = c("red", "blue", "green"))
#' kruskal.test(hostility ~ location, data = Hostile)
#'
"Hostile"
#' Median home prices for 1984 and 1993 in 37 markets across the U.S.
#'
#' Data for Exercise 5.82
#'
#'
#' @name Housing
#' @docType data
#' @format A data frame/tibble with 74 observations on three variables
#' \describe{
#' \item{city}{a character variable with values \code{Albany},
#' \code{Anaheim}, \code{Atlanta}, \code{Baltimore}, \code{Birmingham},
#' \code{Boston}, \code{Chicago}, \code{Cincinnati}, \code{Cleveland},
#' \code{Columbus}, \code{Dallas}, \code{Denver}, \code{Detroit}, \code{Ft
#' Lauderdale}, \code{Houston}, \code{Indianapolis}, \code{Kansas City}, \code{Los
#' Angeles}, \code{Louisville}, \code{Memphis}, \code{Miami}, \code{Milwaukee},
#' \code{Minneapolis}, \code{Nashville}, \code{New York}, \code{Oklahoma City},
#' \code{Philadelphia}, \code{Providence}, \code{Rochester}, \code{Salt Lake City},
#' \code{San Antonio}, \code{San Diego}, \code{San Francisco}, \code{San Jose},
#' \code{St Louis}, \code{Tampa}, and \code{Washington}}
#' \item{year}{a factor with levels \code{1984} and \code{1993}}
#' \item{price}{median house price (in dollars)}
#' }
#'
#' @source National Association of Realtors.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripchart(price ~ year, data = Housing, method = "stack",
#' pch = 1, col = c("red", "blue"))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Housing, aes(x = price, fill = year)) +
#' geom_dotplot() +
#' facet_grid(year ~ .) +
#' theme_bw()
#' }
#'
"Housing"
#' Number of storms, hurricanes and El Nino effects from 1950 through 1995
#'
#' Data for Exercises 1.38, 10.19, and Example 1.6
#'
#'
#' @name Hurrican
#' @docType data
#' @format A data frame/tibble with 46 observations on four variables
#' \describe{
#' \item{year}{a numeric vector indicating year}
#' \item{storms}{a numeric vector recording number of storms}
#' \item{hurrican}{a numeric vector recording number of hurricanes}
#' \item{elnino}{a factor with levels \code{cold}, \code{neutral}, and
#' \code{warm}}
#' }
#'
#' @source National Hurricane Center.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~hurrican, data = Hurrican)
#' T1
#' barplot(T1, col = "blue", main = "Problem 1.38",
#' xlab = "Number of hurricanes",
#' ylab = "Number of seasons")
#' boxplot(storms ~ elnino, data = Hurrican,
#' col = c("blue", "yellow", "red"))
#' anova(lm(storms ~ elnino, data = Hurrican))
#' rm(T1)
#'
"Hurrican"
#' Number of icebergs sighted each month south of Newfoundland and south of the
#' Grand Banks in 1920
#'
#' Data for Exercise 2.46 and 2.60
#'
#'
#' @name Iceberg
#' @docType data
#' @format A data frame with 12 observations on three variables
#' \describe{
#' \item{month}{a character variable with abbreviated months of the year}
#' \item{Newfoundland}{number of icebergs sighted south of Newfoundland}
#' \item{Grand Banks}{number of icebergs sighted south of Grand Banks}
#' }
#'
#' @source N. Shaw, \emph{Manual of Meteorology}, Vol. 2 (London: Cambridge University Press 1942),
#' 7; and F. Mosteller and J. Tukey, \emph{Data Analysis and Regression} (Reading, MA: Addison - Wesley, 1977).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(Newfoundland ~ `Grand Banks`, data = Iceberg)
#' abline(lm(Newfoundland ~ `Grand Banks`, data = Iceberg), col = "blue")
#'
"Iceberg"
#' Percent change in personal income from 1st to 2nd quarter in 2000
#'
#' Data for Exercise 1.33
#'
#'
#' @name Income
#' @docType data
#' @format A data frame/tibble with 51 observations on two variables
#' \describe{
#' \item{state}{a character variable with values \code{Alabama},
#' \code{Alaska}, \code{Arizona}, \code{Arkansas}, \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Colunbia}, \code{Florida}, \code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa}, \code{Kansas}, \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland}, \code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana}, \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia},
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{percent_change}{percent change in income from first quarter to the second quarter of 2000}
#' }
#'
#' @source US Department of Commerce.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' Income$class <- cut(Income$percent_change,
#' breaks = c(-Inf, 0.5, 1.0, 1.5, 2.0, Inf))
#' T1 <- xtabs(~class, data = Income)
#' T1
#' barplot(T1, col = "pink")
#' \dontrun{
#' library(ggplot2)
#' DF <- as.data.frame(T1)
#' DF
#' ggplot2::ggplot(data = DF, aes(x = class, y = Freq)) +
#' geom_bar(stat = "identity", fill = "purple") +
#' theme_bw()
#' }
#'
"Income"
#' Illustrates a comparison problem for long-tailed distributions
#'
#' Data for Exercise 7.41
#'
#'
#' @name Independent
#' @docType data
#' @format A data frame/tibble with 46 observations on two variables
#' \describe{
#' \item{score}{a numeric vector}
#' \item{group}{a factor with levels \code{A} and \code{B}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Independent$score[Independent$group=="A"])
#' qqline(Independent$score[Independent$group=="A"])
#' qqnorm(Independent$score[Independent$group=="B"])
#' qqline(Independent$score[Independent$group=="B"])
#' boxplot(score ~ group, data = Independent, col = "blue")
#' wilcox.test(score ~ group, data = Independent)
#'
"Independent"
#' Educational attainment versus per capita income and poverty rate for
#' American indians living on reservations
#'
#' Data for Exercise 2.95
#'
#'
#' @name Indian
#' @docType data
#' @format A data frame/tibble with ten observations on four variables
#' \describe{
#' \item{reservation}{a character variable with values \code{Blackfeet},
#' \code{Fort Apache}, \code{Gila River}, \code{Hopi}, \code{Navajo}, \code{Papago},
#' \code{Pine Ridge}, \code{Rosebud}, \code{San Carlos}, and \code{Zuni Pueblo}}
#' \item{percent high school}{percent who have graduated from high school}
#' \item{per capita income}{per capita income (in dollars)}
#' \item{poverty rate}{percent poverty}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(mfrow = c(1, 2))
#' plot(`per capita income` ~ `percent high school`, data = Indian,
#' xlab = "Percent high school graudates", ylab = "Per capita income")
#' plot(`poverty rate` ~ `percent high school`, data = Indian,
#' xlab = "Percent high school graudates", ylab = "Percent poverty")
#' par(mfrow = c(1, 1))
#'
"Indian"
#' Average miles per hour for the winners of the Indianapolis 500 race
#'
#' Data for Exercise 1.128
#'
#'
#' @name Indiapol
#' @docType data
#' @format A data frame/tibble with 39 observations on two variables
#' \describe{
#' \item{year}{the year of the race}
#' \item{speed}{the winners average speed (in mph)}
#' }
#'
#' @source The World Almanac and Book of Facts, 2000, p. 1004.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(speed ~ year, data = Indiapol, type = "b")
#'
"Indiapol"
#' Qualifying miles per hour and number of previous starts for drivers in 79th
#' Indianapolis 500 race
#'
#' Data for Exercises 7.11 and 7.36
#'
#'
#' @name Indy500
#' @docType data
#' @format A data frame/tibble with 33 observations on four variables
#' \describe{
#' \item{driver}{a character variable with values \code{andretti},
#' \code{bachelart}, \code{boesel}, \code{brayton}, \code{c.guerrero},
#' \code{cheever}, \code{fabi}, \code{fernandez}, \code{ferran}, \code{fittipaldi},
#' \code{fox}, \code{goodyear}, \code{gordon}, \code{gugelmin}, \code{herta},
#' \code{james}, \code{johansson}, \code{jones}, \code{lazier}, \code{luyendyk},
#' \code{matsuda}, \code{matsushita}, \code{pruett}, \code{r.guerrero},
#' \code{rahal}, \code{ribeiro}, \code{salazar}, \code{sharp}, \code{sullivan},
#' \code{tracy}, \code{vasser}, \code{villeneuve}, and \code{zampedri}}
#' \item{qualif}{qualifying speed (in mph)}
#' \item{starts}{number of Indianapolis 500 starts}
#' \item{group}{a numeric vector where 1 indicates the driver has 4 or fewer
#' Indianapolis 500 starts and a 2 for drivers with 5 or more Indianapolis 500 starts}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripchart(qualif ~ group, data = Indy500, method = "stack",
#' pch = 19, col = c("red", "blue"))
#' boxplot(qualif ~ group, data = Indy500)
#' t.test(qualif ~ group, data = Indy500)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Indy500, aes(sample = qualif)) +
#' geom_qq() +
#' facet_grid(group ~ .) +
#' theme_bw()
#' }
#'
"Indy500"
#' Private pay increase of salaried employees versus inflation rate
#'
#' Data for Exercises 2.12 and 2.29
#'
#'
#' @name Inflatio
#' @docType data
#' @format A data frame/tibble with 24 observations on four variables
#' \describe{
#' \item{year}{a numeric vector of years}
#' \item{pay}{average hourly wage for salaried employees (in dollars)}
#' \item{increase}{percent increase in hourly wage over previous year}
#' \item{inflation}{percent inflation rate}
#' }
#'
#' @source Bureau of Labor Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(increase ~ inflation, data = Inflatio)
#' cor(Inflatio$increase, Inflatio$inflation, use = "complete.obs")
#'
"Inflatio"
#' Inlet oil temperature through a valve
#'
#' Data for Exercises 5.91 and 6.48
#'
#'
#' @name Inletoil
#' @docType data
#' @format A data frame/tibble with 12 observations on one variable
#' \describe{
#' \item{temp}{inlet oil temperature (Fahrenheit)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Inletoil$temp, breaks = 3)
#' qqnorm(Inletoil$temp)
#' qqline(Inletoil$temp)
#' t.test(Inletoil$temp)
#' t.test(Inletoil$temp, mu = 98, alternative = "less")
#'
"Inletoil"
#' Type of drug offense by race
#'
#' Data for Statistical Insight Chapter 8
#'
#'
#' @name Inmate
#' @docType data
#' @format A data frame/tibble with 28,047 observations on two variables
#' \describe{
#' \item{race}{a factor with levels \code{white},
#' \code{black}, and \code{hispanic}}
#' \item{drug}{a factor with levels \code{heroin}, \code{crack}, \code{cocaine},
#' and \code{marijuana}}
#' }
#'
#' @source C. Wolf Harlow (1994), \emph{Comparing Federal and State Prison Inmates},
#' NCJ-145864, U.S. Department of Justice, Bureau of Justice Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~race + drug, data = Inmate)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Inmate"
#' Percent of vehicles passing inspection by type inspection station
#'
#' Data for Exercise 8.59
#'
#'
#' @name Inspect
#' @docType data
#' @format A data frame/tibble with 174 observations on two variables
#' \describe{
#' \item{station}{a factor with levels \code{auto inspection},
#' \code{auto repair}, \code{car care center}, \code{gas station}, \code{new car
#' dealer}, and \code{tire store}}
#' \item{passed}{a factor with levels \code{less than 70\%}, \code{between 70\% and 84\%}, and \code{more than 85\%}}
#' }
#'
#' @source \emph{The Charlotte Observer}, December 13, 1992.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~ station + passed, data = Inspect)
#' T1
#' barplot(T1, beside = TRUE, legend = TRUE)
#' chisq.test(T1)
#' rm(T1)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Inspect, aes(x = passed, fill = station)) +
#' geom_bar(position = "dodge") +
#' theme_bw()
#' }
#'
"Inspect"
#' Heat loss through a new insulating medium
#'
#' Data for Exercise 9.50
#'
#'
#' @name Insulate
#' @docType data
#' @format A data frame/tibble with ten observations on two variables
#' \describe{
#' \item{temp}{outside temperature (in degrees Celcius)}
#' \item{loss}{heat loss (in BTUs)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(loss ~ temp, data = Insulate)
#' model <- lm(loss ~ temp, data = Insulate)
#' abline(model, col = "blue")
#' summary(model)
#'
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Insulate, aes(x = temp, y = loss)) +
#' geom_point() +
#' geom_smooth(method = "lm", se = FALSE) +
#' theme_bw()
#' }
#'
"Insulate"
#' GPA versus IQ for 12 individuals
#'
#' Data for Exercises 9.51 and 9.52
#'
#'
#' @name Iqgpa
#' @docType data
#' @format A data frame/tibble with 12 observations on two variables
#' \describe{
#' \item{iq}{IQ scores}
#' \item{gpa}{Grade point average}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(gpa ~ iq, data = Iqgpa, col = "blue", pch = 19)
#' model <- lm(gpa ~ iq, data = Iqgpa)
#' summary(model)
#' rm(model)
#'
"Iqgpa"
#' R.A. Fishers famous data on Irises
#'
#' Data for Examples 1.15 and 5.19
#'
#'
#' @name Irises
#' @docType data
#' @format A data frame/tibble with 150 observations on five variables
#' \describe{
#' \item{sepal_length}{sepal length (in cm)}
#' \item{sepal_width}{sepal width (in cm)}
#' \item{petal_length}{petal length (in cm)}
#' \item{petal_width}{petal width (in cm)}
#' \item{species}{a factor with levels \code{setosa}, \code{versicolor}, and \code{virginica}}
#' }
#' @source Fisher, R. A. (1936) The use of multiple measurements in taxonomic problems.
#' \emph{Annals of Eugenics}, \strong{7}, Part II, 179-188.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' tapply(Irises$sepal_length, Irises$species, mean)
#' t.test(Irises$sepal_length[Irises$species == "setosa"], conf.level = 0.99)
#' hist(Irises$sepal_length[Irises$species == "setosa"],
#' main = "Sepal length for\n Iris Setosa",
#' xlab = "Length (in cm)")
#' boxplot(sepal_length ~ species, data = Irises)
#'
"Irises"
#' Number of problems reported per 100 cars in 1994 versus 1995s
#'
#' Data for Exercise 2.14, 2.17, 2.31, 2.33, and 2.40
#'
#'
#' @name Jdpower
#' @docType data
#' @format A data frame/tibble with 29 observations on three variables
#' \describe{
#' \item{car}{a factor with levels \code{Acura}, \code{BMW},
#' \code{Buick}, \code{Cadillac}, \code{Chevrolet}, \code{Dodge} \code{Eagle},
#' \code{Ford}, \code{Geo}, \code{Honda}, \code{Hyundai}, \code{Infiniti},
#' \code{Jaguar}, \code{Lexus}, \code{Lincoln}, \code{Mazda}, \code{Mercedes-Benz},
#' \code{Mercury}, \code{Mitsubishi}, \code{Nissan}, \code{Oldsmobile},
#' \code{Plymouth}, \code{Pontiac}, \code{Saab}, \code{Saturn}, and \code{Subaru},
#' \code{Toyota} \code{Volkswagen}, \code{Volvo}}
#' \item{1994}{number of problems per 100 cars in 1994}
#' \item{1995}{number of problems per 100 cars in 1995}
#' }
#'
#' @source \emph{USA Today}, May 25, 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(`1995` ~ `1994`, data = Jdpower)
#' summary(model)
#' plot(`1995` ~ `1994`, data = Jdpower)
#' abline(model, col = "red")
#' rm(model)
#'
"Jdpower"
#' Job satisfaction and stress level for 9 school teachers
#'
#' Data for Exercise 9.60
#'
#'
#' @name Jobsat
#' @docType data
#' @format A data frame/tibble with nine observations on two variables
#' \describe{
#' \item{wspt}{Wilson Stress Profile score for teachers}
#' \item{satisfaction}{job satisfaction score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(satisfaction ~ wspt, data = Jobsat)
#' model <- lm(satisfaction ~ wspt, data = Jobsat)
#' abline(model, col = "blue")
#' summary(model)
#' rm(model)
#'
"Jobsat"
#' Smoking habits of boys and girls ages 12 to 18
#'
#' Data for Exercise 4.85
#'
#'
#' @name Kidsmoke
#' @docType data
#' @format A data frame/tibble with 1000 observations on two variables
#' \describe{
#' \item{gender}{character vector with values \code{female} and \code{male}}
#' \item{smoke}{a character vector with values \code{no} and \code{yes}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~smoke + gender, data = Kidsmoke)
#' T1
#' prop.table(T1)
#' prop.table(T1, 1)
#' prop.table(T1, 2)
#'
"Kidsmoke"
#' Rates per kilowatt-hour for each of the 50 states and DC
#'
#' Data for Example 5.9
#'
#'
#' @name Kilowatt
#' @docType data
#' @format A data frame/tibble with 51 observations on two variables
#' \describe{
#' \item{state}{a factor with levels \code{Alabama}
#' \code{Alaska}, \code{Arizona}, \code{Arkansas} \code{California},
#' \code{Colorado}, \code{Connecticut}, \code{Delaware}, \code{District of
#' Columbia}, \code{Florida},\code{Georgia}, \code{Hawaii}, \code{Idaho},
#' \code{Illinois}, \code{Indiana}, \code{Iowa} \code{Kansas} \code{Kentucky},
#' \code{Louisiana}, \code{Maine}, \code{Maryland}, \code{Massachusetts},
#' \code{Michigan}, \code{Minnesota}, \code{Mississippi}, \code{Missour},
#' \code{Montana} \code{Nebraska}, \code{Nevada}, \code{New Hampshire}, \code{New
#' Jersey}, \code{New Mexico}, \code{New York}, \code{North Carolina}, \code{North
#' Dakota}, \code{Ohio}, \code{Oklahoma}, \code{Oregon}, \code{Pennsylvania},
#' \code{Rhode Island}, \code{South Carolina}, \code{South Dakota},
#' \code{Tennessee}, \code{Texas}, \code{Utah}, \code{Vermont}, \code{Virginia}
#' \code{Washington}, \code{West Virginia}, \code{Wisconsin}, and \code{Wyoming}}
#' \item{rate}{a numeric vector indicating rates for kilowatt per hour}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Kilowatt$rate)
#'
"Kilowatt"
#' Reading scores for first grade children who attended kindergarten versus
#' those who did not
#'
#' Data for Exercise 7.68
#'
#'
#' @name Kinder
#' @docType data
#' @format A data frame/tibble with eight observations on three variables
#' \describe{
#' \item{pair}{a numeric indicator of pair}
#' \item{kinder}{reading score of kids who went to kindergarten}
#' \item{nokinder}{reading score of kids who did not go to kindergarten}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Kinder$kinder, Kinder$nokinder)
#' diff <- Kinder$kinder - Kinder$nokinder
#' qqnorm(diff)
#' qqline(diff)
#' shapiro.test(diff)
#' t.test(diff)
#' rm(diff)
#'
"Kinder"
#' Median costs of laminectomies at hospitals across North Carolina in 1992
#'
#' Data for Exercise 10.18
#'
#'
#' @name Laminect
#' @docType data
#' @format A data frame/tibble with 138 observations on two variables
#' \describe{
#' \item{area}{a character vector indicating the area of the hospital with \code{Rural}, \code{Regional},
#' and \code{Metropol}}
#' \item{cost}{a numeric vector indicating cost of a laminectomy}
#' }
#'
#'@source \emph{Consumer's Guide to Hospitalization Charges in North Carolina Hospitals} (August 1994),
#'North Carolina Medical Database Commission, Department of Insurance.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#'boxplot(cost ~ area, data = Laminect, col = topo.colors(3))
#'anova(lm(cost ~ area, data = Laminect))
#'
"Laminect"
#' Lead levels in children's blood whose parents worked in a battery factory
#'
#' Data for Example 1.17
#'
#'
#' @name Lead
#' @docType data
#' @format A data frame/tibble with 66 observations on the two variables
#' \describe{
#' \item{group}{a character vector with values \code{exposed} and \code{control}}
#' \item{lead}{a numeric vector indicating the level of lead in children's blood (in micrograms/dl)}
#' }
#'
#' @source Morton, D. et al. (1982), "Lead Absorption in Children of Employees in a Lead-Related
#' Industry," \emph{American Journal of Epidemiology, 155,} 549-555.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(lead ~ group, data = Lead, col = topo.colors(2))
#'
"Lead"
#' Leadership exam scores by age for employees on an industrial plant
#'
#' Data for Exercise 7.31
#'
#'
#' @name Leader
#' @docType data
#' @format A data frame/tibble with 34 observations on two variables
#' \describe{
#' \item{age}{a character vector indicating age with values \code{under35} and \code{over35}}
#' \item{score}{score on a leadership exam}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#'boxplot(score ~ age, data = Leader, col = c("gray", "green"))
#'t.test(score ~ age, data = Leader)
#'
"Leader"
#' Survival time of mice injected with an experimental lethal drug
#'
#' Data for Example 6.12
#'
#'
#' @name Lethal
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{survival}{a numeric vector indicating time surivived
#' after injection (in seconds)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#'SIGN.test(Lethal$survival, md = 45, alternative = "less")
#'
#'
"Lethal"
#' Life expectancy of men and women in U.S.
#'
#' Data for Exercise 1.31
#'
#'
#' @name Life
#' @docType data
#' @format A data frame/tibble with eight observations on three variables
#' \describe{
#' \item{year}{a numeric vector indicating year}
#' \item{men}{life expectancy for men (in years)}
#' \item{women}{life expectancy for women (in years)}
#' }
#'
#' @source National Center for Health Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#'plot(men ~ year, type = "l", ylim = c(min(men, women), max(men, women)),
#' col = "blue", main = "Life Expectancy vs Year", ylab = "Age",
#' xlab = "Year", data = Life)
#'lines(women ~ year, col = "red", data = Life)
#'text(1955, 65, "Men", col = "blue")
#'text(1955, 70, "Women", col = "red")
#'
"Life"
#' Life span of electronic components used in a spacecraft versus heat
#'
#' Data for Exercise 2.4, 2.37, and 2.49
#'
#'
#' @name Lifespan
#' @docType data
#' @format A data frame/tibble with six observations two variables
#' \describe{
#' \item{heat}{temperature (in Celcius)}
#' \item{life}{lifespan of component (in hours)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(life ~ heat, data = Lifespan)
#' model <- lm(life ~ heat, data = Lifespan)
#' abline(model, col = "red")
#' resid(model)
#' sum((resid(model))^2)
#' anova(model)
#' rm(model)
#'
"Lifespan"
#' Relationship between damage reports and deaths caused by lightning
#'
#' Data for Exercise 2.6
#'
#'
#' @name Ligntmonth
#' @docType data
#' @format A data frame/tibble with 12 observations on four variables
#' \describe{
#' \item{month}{a factor with levels \code{1/01/2000},
#' \code{10/01/2000}, \code{11/01/2000}, \code{12/01/2000}, \code{2/01/2000},
#' \code{3/01/2000}, \code{4/01/2000}, \code{5/01/2000}, \code{6/01/2000},
#' \code{7/01/2000}, \code{8/01/2000}, and \code{9/01/2000}}
#' \item{deaths}{number of deaths due to lightning strikes}
#' \item{injuries}{number of injuries due to lightning strikes}
#' \item{damage}{damage due to lightning strikes (in dollars)}
#' }
#'
#' @source \emph{Lighting Fatalities, Injuries and Damage Reports in the United States},
#' 1959-1994, NOAA Technical Memorandum NWS SR-193, Dept. of Commerce.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(deaths ~ damage, data = Ligntmonth)
#' model = lm(deaths ~ damage, data = Ligntmonth)
#' abline(model, col = "red")
#' rm(model)
#'
"Ligntmonth"
#' Measured traffic at three prospective locations for a motor lodge
#'
#' Data for Exercise 10.33
#'
#'
#' @name Lodge
#' @docType data
#' @format A data frame/tibble with 45 observations on six variables
#' \describe{
#' \item{traffic}{a numeric vector indicating the amount of vehicles that passed a site in 1 hour}
#' \item{site}{a numeric vector with values \code{1}, \code{2}, and \code{3}}
#' \item{ranks}{ranks for variable \code{traffic}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(traffic ~ site, data = Lodge, col = cm.colors(3))
#' anova(lm(traffic ~ factor(site), data = Lodge))
#'
"Lodge"
#' Long-tailed distributions to illustrate Kruskal Wallis test
#'
#' Data for Exercise 10.45
#'
#'
#' @name Longtail
#' @docType data
#' @format A data frame/tibble with 60 observations on three variables
#' \describe{
#' \item{score}{a numeric vector}
#' \item{group}{a numeric vector with values \code{1}, \code{2}, and \code{3}}
#' \item{ranks}{ranks for variable \code{score}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ group, data = Longtail, col = heat.colors(3))
#' kruskal.test(score ~ factor(group), data = Longtail)
#' anova(lm(score ~ factor(group), data = Longtail))
#'
"Longtail"
#' Reading skills of 24 matched low ability students
#'
#' Data for Example 7.18
#'
#'
#' @name Lowabil
#' @docType data
#' @format A data frame/tibble with 12 observations on three variables
#' \describe{
#' \item{pair}{a numeric indicator of pair}
#' \item{experiment}{score of the child with the experimental method}
#' \item{control}{score of the child with the standard method}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' diff = Lowabil$experiment - Lowabil$control
#' qqnorm(diff)
#' qqline(diff)
#' shapiro.test(diff)
#' t.test(diff)
#' rm(diff)
#'
"Lowabil"
#' Magnesium concentration and distances between samples
#'
#' Data for Exercise 9.9
#'
#'
#' @name Magnesiu
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{distance}{distance between samples}
#' \item{magnesium}{concentration of magnesium}
#' }
#'
#' @source Davis, J. (1986), \emph{Statistics and Data Analysis in Geology}, 2d. Ed.,
#' John Wiley and Sons, New York, p. 146.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(magnesium ~ distance, data = Magnesiu)
#' model = lm(magnesium ~ distance, data = Magnesiu)
#' abline(model, col = "red")
#' summary(model)
#' rm(model)
#'
"Magnesiu"
#' Amounts awarded in 17 malpractice cases
#'
#' Data for Exercise 5.73
#'
#'
#' @name Malpract
#' @docType data
#' @format A data frame/tibble with 17 observations on one variable
#' \describe{
#' \item{award}{malpractice reward (in $1000)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Malpract$award, conf.level = 0.90)
#'
"Malpract"
#' Advertised salaries offered general managers of major corporations in 1995
#'
#' Data for Exercise 5.81
#'
#'
#' @name Manager
#' @docType data
#' @format A data frame/tibble with 26 observations on one variable
#' \describe{
#' \item{salary}{random sample of advertised annual salaries of top executives (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Manager$salary)
#' SIGN.test(Manager$salary)
#'
"Manager"
#' Percent of marked cars in 65 police departments in Florida
#'
#' Data for Exercise 6.100
#'
#'
#' @name Marked
#' @docType data
#' @format A data frame/tibble with 65 observations on one variable
#' \describe{
#' \item{percent}{percentage of marked cars in 65 Florida police departments}
#' }
#'
#' @source \emph{Law Enforcement Management and Administrative Statistics, 1993}, Bureau of
#' Justice Statistics, NCJ-148825, September 1995, p. 147-148.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Marked$percent)
#' SIGN.test(Marked$percent, md = 60, alternative = "greater")
#' t.test(Marked$percent, mu = 60, alternative = "greater")
#'
"Marked"
#' Standardized math test scores for 30 students
#'
#' Data for Exercise 1.69
#'
#'
#' @name Math
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{score}{scores on a standardized test for 30 tenth graders}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Math$score)
#' hist(Math$score, main = "Math Scores", xlab = "score", freq = FALSE)
#' lines(density(Math$score), col = "red")
#' CharlieZ <- (62 - mean(Math$score))/sd(Math$score)
#' CharlieZ
#' scale(Math$score)[which(Math$score == 62)]
#'
"Math"
#' Standardized math competency for a group of entering freshmen at a small
#' community college
#'
#' Data for Exercise 5.26
#'
#'
#' @name Mathcomp
#' @docType data
#' @format A data frame/tibble with 31 observations one variable
#' \describe{
#' \item{score}{scores of 31 entering freshmen at a community college
#' on a national standardized test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Mathcomp$score)
#' EDA(Mathcomp$score)
#'
"Mathcomp"
#' Math proficiency and SAT scores by states
#'
#' Data for Exercise 9.24, Example 9.1, and Example 9.6
#'
#'
#' @name Mathpro
#' @docType data
#' @format A data frame/tibble with 51 observations on four variables
#' \describe{
#' \item{state}{a factor with levels \code{} \code{Conn},
#' \code{D.C.}, \code{Del}, \code{Ga}, \code{Hawaii}, \code{Ind}, \code{Maine},
#' \code{Mass}, \code{Md}, \code{N.C.}, \code{N.H.}, \code{N.J.}, \code{N.Y.},
#' \code{Ore}, \code{Pa}, \code{R.I.}, \code{S.C.}, \code{Va}, and \code{Vt}}
#' \item{sat_math}{SAT math scores for high school seniors}
#' \item{profic}{math proficiency scores for eigth graders}
#' \item{group}{a numeric vector}
#' }
#'
#' @source National Assessment of Educational Progress and The College Board.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(sat_math ~ profic, data = Mathpro)
#' plot(sat_math ~ profic, data = Mathpro, ylab = "SAT", xlab = "proficiency")
#' abline(model, col = "red")
#' summary(model)
#' rm(model)
#'
"Mathpro"
#' Error scores for four groups of experimental animals running a maze
#'
#' Data for Exercise 10.13
#'
#'
#' @name Maze
#' @docType data
#' @format A data frame/tibble with 32 observations on two variables
#' \describe{
#' \item{score}{error scores for animals running through a maze under different conditions}
#' \item{condition}{a factor with levels \code{CondA},
#' \code{CondB,} \code{CondC}, and \code{CondD}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ condition, data = Maze, col = rainbow(4))
#' anova(lm(score ~ condition, data = Maze))
#'
"Maze"
#' Illustrates test of equality of medians with the Kruskal Wallis test
#'
#' Data for Exercise 10.52
#'
#'
#' @name Median
#' @docType data
#' @format A data frame/tibble with 45 observations on two variables
#' \describe{
#' \item{sample}{a vector with values \code{Sample1}, \code{Sample 2}, and \code{Sample 3}}
#' \item{value}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(value ~ sample, data = Median, col = rainbow(3))
#' anova(lm(value ~ sample, data = Median))
#' kruskal.test(value ~ factor(sample), data = Median)
#'
"Median"
#' Median mental ages of 16 girls
#'
#' Data for Exercise 6.52
#'
#'
#' @name Mental
#' @docType data
#' @format A data frame/tibble with 16 observations on one variable
#' \describe{
#' \item{age}{mental age of 16 girls}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Mental$age, md = 100)
#'
"Mental"
#' Concentration of mercury in 25 lake trout
#'
#' Data for Example 1.9
#'
#'
#' @name Mercury
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{mercury}{a numeric vector measuring mercury (in parts per million)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Mercury$mercury)
#'
"Mercury"
#' Monthly rental costs in metro areas with 1 million or more persons
#'
#' Data for Exercise 5.117
#'
#'
#' @name Metrent
#' @docType data
#' @format A data frame/tibble with 46 observations on one variable
#' \describe{
#' \item{rent}{monthly rent in dollars}
#' }
#'
#' @source U.S. Bureau of the Census, \emph{Housing in the Metropolitan Areas,
#' Statistical Brief} SB/94/19, September 1994.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Metrent$rent, col = "magenta")
#' t.test(Metrent$rent, conf.level = 0.99)$conf
#'
"Metrent"
#' Miller personality test scores for a group of college students applying for
#' graduate school
#'
#' Data for Example 5.7
#'
#'
#' @name Miller
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{miller}{scores on the Miller Personality test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Miller$miller)
#' fivenum(Miller$miller)
#' boxplot(Miller$miller)
#' qqnorm(Miller$miller,col = "blue")
#' qqline(Miller$miller, col = "red")
#'
"Miller"
#' Twenty scores on the Miller personality test
#'
#' Data for Exercise 1.41
#'
#'
#' @name Miller1
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{miller}{scores on the Miller personality test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Miller1$miller)
#' stem(Miller1$miller, scale = 2)
#'
"Miller1"
#' Moisture content and depth of core sample for marine muds in eastern
#' Louisiana
#'
#' Data for Exercise 9.32
#'
#'
#' @name Moisture
#' @docType data
#' @format A data frame/tibble with 16 observations on four variables
#' \describe{
#' \item{depth}{a numeric vector}
#' \item{moisture}{g of water per 100 g of dried sediment}
#' \item{lnmoist}{a numeric vector}
#' \item{depthsq}{a numeric vector}
#' }
#'
#' @source Davis, J. C. (1986), \emph{Statistics and Data Analysis in Geology}, 2d. ed.,
#' John Wiley and Sons, New York, pp. 177, 185.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(moisture ~ depth, data = Moisture)
#' model <- lm(moisture ~ depth, data = Moisture)
#' abline(model, col = "red")
#' plot(resid(model) ~ depth, data = Moisture)
#' rm(model)
#'
"Moisture"
#' Carbon monoxide emitted by smoke stacks of a manufacturer and a competitor
#'
#' Data for Exercise 7.45
#'
#'
#' @name Monoxide
#' @docType data
#' @format A data frame/tibble with ten observations on two variables
#' \describe{
#' \item{company}{a vector with values \code{manufacturer} and \code{competitor}}
#' \item{emission}{carbon monoxide emitted}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(emission ~ company, data = Monoxide, col = topo.colors(2))
#' t.test(emission ~ company, data = Monoxide)
#' wilcox.test(emission ~ company, data = Monoxide)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Monoxide, aes(x = company, y = emission)) +
#' geom_boxplot() +
#' theme_bw()
#' }
#'
"Monoxide"
#' Moral attitude scale on 15 subjects before and after viewing a movie
#'
#' Data for Exercise 7.53
#'
#'
#' @name Movie
#' @docType data
#' @format A data frame/tibble with 12 observations on three variables
#' \describe{
#' \item{before}{moral aptitude before viewing the movie}
#' \item{after}{moral aptitude after viewing the movie}
#' \item{differ}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Movie$differ)
#' qqline(Movie$differ)
#' shapiro.test(Movie$differ)
#' t.test(Movie$differ, conf.level = 0.99)
#' wilcox.test(Movie$differ)
#'
"Movie"
#' Improvement scores for identical twins taught music recognition by two
#' techniques
#'
#' Data for Exercise 7.59
#'
#'
#' @name Music
#' @docType data
#' @format A data frame/tibble with 12 observations on three variables
#' \describe{
#' \item{method1}{a numeric vector measuring the improvement scores on a music recognition test}
#' \item{method2}{a numeric vector measuring the improvement scores on a music recognition test}
#' \item{differ}{\code{method1} - \code{method2}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Music$differ)
#' qqline(Music$differ)
#' shapiro.test(Music$differ)
#' t.test(Music$differ)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Music, aes(x = differ)) +
#' geom_dotplot() +
#' theme_bw()
#' }
#'
"Music"
#' Estimated value of a brand name product and the conpany's revenue
#'
#' Data for Exercises 2.28, 9.19, and Example 2.8
#'
#'
#' @name Name
#' @docType data
#' @format A data frame/tibble with 42 observations on three variables
#' \describe{
#' \item{brand}{a factor with levels \code{Band-Aid},
#' \code{Barbie}, \code{Birds Eye}, \code{Budweiser}, \code{Camel}, \code{Campbell},
#' \code{Carlsberg}, \code{Coca-Cola}, \code{Colgate}, \code{Del Monte},
#' \code{Fisher-Price}, \verb{Gordon's}, \code{Green Giant}, \code{Guinness},
#' \code{Haagen-Dazs}, \code{Heineken}, \code{Heinz}, \code{Hennessy},
#' \code{Hermes}, \code{Hershey}, \code{Ivory}, \code{Jell-o}, \code{Johnnie
#' Walker}, \code{Kellogg}, \code{Kleenex}, \code{Kraft}, \code{Louis Vuitton},
#' \code{Marlboro}, \code{Nescafe}, \code{Nestle}, \code{Nivea}, \code{Oil of Olay},
#' \code{Pampers}, \code{Pepsi-Cola}, \code{Planters}, \code{Quaker}, \code{Sara
#' Lee}, \code{Schweppes}, \code{Smirnoff}, \code{Tampax}, \code{Winston}, and
#' \verb{Wrigley's}}
#' \item{value}{value in billions of dollars}
#' \item{revenue}{revenue in billions of dollars}
#' }
#'
#' @source Financial World.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(value ~ revenue, data = Name)
#' model <- lm(value ~ revenue, data = Name)
#' abline(model, col = "red")
#' cor(Name$value, Name$revenue)
#' summary(model)
#' rm(model)
#'
"Name"
#' Efficiency of pit crews for three major NASCAR teams
#'
#' Data for Exercise 10.53
#'
#'
#' @name Nascar
#' @docType data
#' @format A data frame/tibble with 36 observations on six variables
#' \describe{
#' \item{time}{duration of pit stop (in seconds)}
#' \item{team}{a numeric vector representing team 1, 2, or 3}
#' \item{ranks}{a numeric vector ranking each pit stop in order of speed}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(time ~ team, data = Nascar, col = rainbow(3))
#' model <- lm(time ~ factor(team), data = Nascar)
#' summary(model)
#' anova(model)
#' rm(model)
#'
"Nascar"
#' Reaction effects of 4 drugs on 25 subjects with a nervous disorder
#'
#' Data for Example 10.3
#'
#'
#' @name Nervous
#' @docType data
#' @format A data frame/tibble with 25 observations on two variables
#' \describe{
#' \item{react}{a numeric vector representing reaction time}
#' \item{drug}{a numeric vector indicating each of the 4 drugs}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(react ~ drug, data = Nervous, col = rainbow(4))
#' model <- aov(react ~ factor(drug), data = Nervous)
#' summary(model)
#' TukeyHSD(model)
#' plot(TukeyHSD(model), las = 1)
#'
"Nervous"
#' Daily profits for 20 newsstands
#'
#' Data for Exercise 1.43
#'
#'
#' @name Newsstand
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{profit}{profit of each newsstand (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Newsstand$profit)
#' stem(Newsstand$profit, scale = 3)
#'
"Newsstand"
#' Rating, time in 40-yard dash, and weight of top defensive linemen in the
#' 1994 NFL draft
#'
#' Data for Exercise 9.63
#'
#'
#' @name Nfldraf2
#' @docType data
#' @format A data frame/tibble with 47 observations on three variables
#' \describe{
#' \item{rating}{rating of each player on a scale out of 10}
#' \item{forty}{forty yard dash time (in seconds)}
#' \item{weight}{weight of each player (in pounds)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(rating ~ forty, data = Nfldraf2)
#' summary(lm(rating ~ forty, data = Nfldraf2))
#'
"Nfldraf2"
#' Rating, time in 40-yard dash, and weight of top offensive linemen in the
#' 1994 NFL draft
#'
#' Data for Exercises 9.10 and 9.16
#'
#'
#' @name Nfldraft
#' @docType data
#' @format A data frame/tibble with 29 observations on three variables
#' \describe{
#' \item{rating}{rating of each player on a scale out of 10}
#' \item{forty}{forty yard dash time (in seconds)}
#' \item{weight}{weight of each player (in pounds)}
#' }
#'
#' @source \emph{USA Today}, April 20, 1994.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(rating ~ forty, data = Nfldraft)
#' cor(Nfldraft$rating, Nfldraft$forty)
#' summary(lm(rating ~ forty, data = Nfldraft))
#'
"Nfldraft"
#' Nicotine content versus sales for eight major brands of cigarettes
#'
#' Data for Exercise 9.21
#'
#'
#' @name Nicotine
#' @docType data
#' @format A data frame/tibble with eight observations on two variables
#' \describe{
#' \item{nicotine}{nicotine content (in milligrams)}
#' \item{sales}{sales figures (in $100,000)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(sales ~ nicotine, data = Nicotine)
#' plot(sales ~ nicotine, data = Nicotine)
#' abline(model, col = "red")
#' summary(model)
#' predict(model, newdata = data.frame(nicotine = 1),
#' interval = "confidence", level = 0.99)
#'
"Nicotine"
#' Price of oranges versus size of the harvest
#'
#' Data for Exercise 9.61
#'
#'
#' @name Orange
#' @docType data
#' @format A data frame/tibble with six observations on two variables
#' \describe{
#' \item{harvest}{harvest in millions of boxes}
#' \item{price}{average price charged by California growers
#' for a 75-pound box of navel oranges}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(price ~ harvest, data = Orange)
#' model <- lm(price ~ harvest, data = Orange)
#' abline(model, col = "red")
#' summary(model)
#' rm(model)
#'
"Orange"
#' Salaries of members of the Baltimore Orioles baseball team
#'
#' Data for Example 1.3
#'
#'
#' @name Orioles
#' @docType data
#' @format A data frame/tibble with 27 observations on three variables
#' \describe{
#' \item{first name}{a factor with levels \code{Albert},
#' \code{Arthur}, \code{B.J.}, \code{Brady}, \code{Cal}, \code{Charles},
#' \code{dl-Delino}, \code{dl-Scott}, \code{Doug}, \code{Harold}, \code{Heathcliff},
#' \code{Jeff}, \code{Jesse}, \code{Juan}, \code{Lenny}, \code{Mike}, \code{Rich},
#' \code{Ricky}, \code{Scott}, \code{Sidney}, \code{Will}, and \code{Willis}}
#' \item{last name}{a factor with levels \code{Amaral}, \code{Anderson},
#' \code{Baines}, \code{Belle}, \code{Bones}, \code{Bordick}, \code{Clark},
#' \code{Conine}, \code{Deshields}, \code{Erickson}, \code{Fetters}, \code{Garcia},
#' \code{Guzman}, \code{Johns}, \code{Johnson}, \code{Kamieniecki}, \code{Mussina},
#' \code{Orosco}, \code{Otanez}, \code{Ponson}, \code{Reboulet}, \code{Rhodes},
#' \code{Ripken Jr.}, \code{Slocumb}, \code{Surhoff},\code{Timlin}, and
#' \code{Webster}}
#' \item{1999salary}{a numeric vector containing each player's salary (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripchart(Orioles$`1999salary`, method = "stack", pch = 19)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Orioles, aes(x = `1999salary`)) +
#' geom_dotplot(dotsize = 0.5) +
#' labs(x = "1999 Salary") +
#' theme_bw()
#' }
#'
"Orioles"
#' Arterial blood pressure of 11 subjects before and after receiving oxytocin
#'
#' Data for Exercise 7.86
#'
#'
#' @name Oxytocin
#' @docType data
#' @format A data frame/tibble with 11 observations on three variables
#' \describe{
#' \item{subject}{a numeric vector indicating each subject}
#' \item{before}{mean arterial blood pressure of subject before receiving oxytocin}
#' \item{after}{mean arterial blood pressure of subject after receiving oxytocin}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' diff = Oxytocin$after - Oxytocin$before
#' qqnorm(diff)
#' qqline(diff)
#' shapiro.test(diff)
#' t.test(diff)
#' rm(diff)
#'
"Oxytocin"
#' Education backgrounds of parents of entering freshmen at a state university
#'
#' Data for Exercise 1.32
#'
#'
#' @name Parented
#' @docType data
#' @format A data frame/tibble with 200 observations on two variables
#' \describe{
#' \item{education}{a factor with levels \code{4yr college
#' degree}, \code{Doctoral degree}, \code{Grad degree}, \code{H.S grad or less},
#' \code{Some college}, and \code{Some grad school}}
#' \item{parent}{a factor with levels \code{mother} and \code{father}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~education + parent, data = Parented)
#' T1
#' barplot(t(T1), beside = TRUE, legend = TRUE, col = c("blue", "red"))
#' rm(T1)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Parented, aes(x = education, fill = parent)) +
#' geom_bar(position = "dodge") +
#' theme_bw() +
#' theme(axis.text.x = element_text(angle = 85, vjust = 0.5)) +
#' scale_fill_manual(values = c("pink", "blue")) +
#' labs(x = "", y = "")
#' }
#'
"Parented"
#' Years of experience and number of tickets given by patrolpersons in New York
#' City
#'
#' Data for Example 9.3
#'
#'
#' @name Patrol
#' @docType data
#' @format A data frame/tibble with ten observations on three variables
#' \describe{
#' \item{tickets}{number of tickets written per week}
#' \item{years}{patrolperson's experience (in years)}
#' \item{log_tickets}{natural log of \code{tickets}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(tickets ~ years, data = Patrol)
#' summary(model)
#' confint(model, level = 0.98)
#'
"Patrol"
#' Karl Pearson's data on heights of brothers and sisters
#'
#' Data for Exercise 2.20
#'
#'
#' @name Pearson
#' @docType data
#' @format A data frame/tibble with 11 observations on three variables
#' \describe{
#' \item{family}{number indicating family of brother and sister pair}
#' \item{brother}{height of brother (in inches)}
#' \item{sister}{height of sister (in inches)}
#' }
#'
#' @source Pearson, K. and Lee, A. (1902-3), On the Laws of Inheritance in Man,
#' \emph{Biometrika, 2}, 357.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(brother ~ sister, data = Pearson, col = "lightblue")
#' cor(Pearson$brother, Pearson$sister)
#'
"Pearson"
#' Length of long-distance phone calls for a small business firm
#'
#' Data for Exercise 6.95
#'
#'
#' @name Phone
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{time}{duration of long distance phone call (in minutes)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Phone$time)
#' qqline(Phone$time)
#' shapiro.test(Phone$time)
#' SIGN.test(Phone$time, md = 5, alternative = "greater")
#'
"Phone"
#' Number of poisonings reported to 16 poison control centers
#'
#' Data for Exercise 1.113
#'
#'
#' @name Poison
#' @docType data
#' @format A data frame/tibble with 226,361 observations on one variable
#' \describe{
#' \item{type}{a factor with levels \code{Alcohol},
#' \code{Cleaning agent}, \code{Cosmetics}, \code{Drugs}, \code{Insecticides}, and
#' \code{Plants}}
#' }
#'
#' @source Centers for Disease Control, Atlanta, Georgia.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~type, data = Poison)
#' T1
#' par(mar = c(5.1 + 2, 4.1, 4.1, 2.1))
#' barplot(sort(T1, decreasing = TRUE), las = 2, col = rainbow(6))
#' par(mar = c(5.1, 4.1, 4.1, 2.1))
#' rm(T1)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Poison, aes(x = type, fill = type)) +
#' geom_bar() +
#' theme_bw() +
#' theme(axis.text.x = element_text(angle = 85, vjust = 0.5)) +
#' guides(fill = FALSE)
#' }
#'
"Poison"
#' Political party and gender in a voting district
#'
#' Data for Example 8.3
#'
#'
#' @name Politic
#' @docType data
#' @format A data frame/tibble with 250 observations on two variables
#' \describe{
#' \item{party}{a factor with levels \code{republican}, \code{democrat}, and \code{other}}
#' \item{gender}{a factor with levels \code{female} and \code{male}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~party + gender, data = Politic)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Politic"
#' Air pollution index for 15 randomly selected days for a major western city
#'
#' Data for Exercise 5.59
#'
#'
#' @name Pollutio
#' @docType data
#' @format A data frame/tibble with 15 observations on one variable
#' \describe{
#' \item{inde}{air pollution index}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Pollutio$inde)
#' t.test(Pollutio$inde, conf.level = 0.98)$conf
#'
"Pollutio"
#' Porosity measurements on 20 samples of Tensleep Sandstone, Pennsylvanian
#' from Bighorn Basin in Wyoming
#'
#' Data for Exercise 5.86
#'
#'
#' @name Porosity
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{porosity}{porosity measurement (percent)}
#' }
#'
#' @source Davis, J. C. (1986), \emph{Statistics and Data Analysis in Geology}, 2nd edition,
#' pages 63-65.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Porosity$porosity)
#' fivenum(Porosity$porosity)
#' boxplot(Porosity$porosity, col = "lightgreen")
#'
"Porosity"
#' Percent poverty and crime rate for selected cities
#'
#' Data for Exercise 9.11 and 9.17
#'
#'
#' @name Poverty
#' @docType data
#' @format A data frame/tibble with 20 observations on four variables
#' \describe{
#' \item{city}{a factor with levels \code{Atlanta},
#' \code{Buffalo}, \code{Cincinnati}, \code{Cleveland}, \code{Dayton, O},
#' \code{Detroit}, \code{Flint, Mich}, \code{Fresno, C}, \code{Gary, Ind},
#' \code{Hartford, C}, \code{Laredo}, \code{Macon, Ga}, \code{Miami},
#' \code{Milwaukee}, \code{New Orleans}, \code{Newark, NJ}, \code{Rochester,NY},
#' \code{Shreveport}, \code{St. Louis}, and \code{Waco, Tx}}
#' \item{poverty}{percent of children living in poverty}
#' \item{crime}{crime rate (per 1000 people)}
#' \item{population}{population of city}
#' }
#'
#' @source Children's Defense Fund and the Bureau of Justice Statistics.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(poverty ~ crime, data = Poverty)
#' model <- lm(poverty ~ crime, data = Poverty)
#' abline(model, col = "red")
#' summary(model)
#' rm(model)
#'
"Poverty"
#' Robbery rates versus percent low income in eight precincts
#'
#' Data for Exercise 2.2 and 2.38
#'
#'
#' @name Precinct
#' @docType data
#' @format A data frame/tibble with eight observations on two variables
#' \describe{
#' \item{rate}{robbery rate (per 1000 people)}
#' \item{income}{percent with low income}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(rate ~ income, data = Precinct)
#' model <- (lm(rate ~ income, data = Precinct))
#' abline(model, col = "red")
#' rm(model)
#'
"Precinct"
#' Racial prejudice measured on a sample of 25 high school students
#'
#' Data for Exercise 5.10 and 5.22
#'
#'
#' @name Prejudic
#' @docType data
#' @format A data frame with 25 observations on one variable
#' \describe{
#' \item{prejud}{racial prejudice score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Prejudic$prejud)
#' EDA(Prejudic$prejud)
#'
"Prejudic"
#' Ages at inauguration and death of U.S. presidents
#'
#' Data for Exercise 1.126
#'
#'
#' @name Presiden
#' @docType data
#' @format A data frame/tibble with 43 observations on five variables
#' \describe{
#' \item{first_initial}{a factor with levels \code{A.}, \code{B.},
#' \code{C.}, \code{D.}, \code{F.}, \code{G.}, \code{G. W.}, \code{H.}, \code{J.},
#' \code{L.}, \code{M.}, \code{R.}, \code{T.}, \code{U.}, \code{W.}, and \code{Z.}}
#' \item{last_name}{a factor with levels \code{Adams}, \code{Arthur},
#' \code{Buchanan}, \code{Bush}, \code{Carter}, \code{Cleveland}, \code{Clinton},
#' \code{Coolidge}, \code{Eisenhower}, \code{Fillmore}, \code{Ford},
#' \code{Garfield}, \code{Grant}, \code{Harding}, \code{Harrison}, \code{Hayes},
#' \code{Hoover}, \code{Jackson}, \code{Jefferson}, \code{Johnson}, \code{Kennedy},
#' \code{Lincoln}, \code{Madison}, \code{McKinley}, \code{Monroe}, \code{Nixon},
#' \code{Pierce}, \code{Polk}, \code{Reagan}, \code{Roosevelt}, \code{Taft},
#' \code{Taylor}, \code{Truman}, \code{Tyler}, \code{VanBuren}, \code{Washington}, and
#' \code{Wilson}}
#' \item{birth_state}{a factor with levels \code{ARK},
#' \code{CAL}, \code{CONN}, \code{GA}, \code{IA}, \code{ILL}, \code{KY}, \code{MASS},
#' \code{MO}, \code{NC}, \code{NEB}, \code{NH}, \code{NJ}, \code{NY}, \code{OH},
#' \code{PA}, \code{SC}, \code{TEX}, \code{VA}, and \code{VT}}
#' \item{inaugural_age}{President's age at inauguration}
#' \item{death_age}{President's age at death}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' pie(xtabs(~birth_state, data = Presiden))
#' stem(Presiden$inaugural_age)
#' stem(Presiden$death_age)
#' par(mar = c(5.1, 4.1 + 3, 4.1, 2.1))
#' stripchart(x=list(Presiden$inaugural_age, Presiden$death_age),
#' method = "stack", col = c("green","brown"), pch = 19, las = 1)
#' par(mar = c(5.1, 4.1, 4.1, 2.1))
#'
"Presiden"
#' Degree of confidence in the press versus education level for 20 randomly
#' selected persons
#'
#' Data for Exercise 9.55
#'
#'
#' @name Press
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{education_yrs}{years of education}
#' \item{confidence}{degree of confidence in the press (the higher the score, the more confidence)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(confidence ~ education_yrs, data = Press)
#' model <- lm(confidence ~ education_yrs, data = Press)
#' abline(model, col = "purple")
#' summary(model)
#' rm(model)
#'
"Press"
#' Klopfer's prognostic rating scale for subjects receiving behavior
#' modification therapy
#'
#' Data for Exercise 6.61
#'
#'
#' @name Prognost
#' @docType data
#' @format A data frame/tibble with 15 observations on one variable
#' \describe{
#' \item{kprs_score}{Kloper's Prognostic Rating Scale score}
#' }
#'
#' @source Newmark, C., et al. (1973), Predictive Validity of the Rorschach Prognostic Rating Scale
#' with Behavior Modification Techniques, \emph{Journal of Clinical Psychology, 29}, 246-248.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Prognost$kprs_score)
#' t.test(Prognost$kprs_score, mu = 9)
#'
"Prognost"
#' Effects of four different methods of programmed learning for statistics
#' students
#'
#' Data for Exercise 10.17
#'
#'
#' @name Program
#' @docType data
#' @format A data frame/tibble with 44 observations on two variables
#' \describe{
#' \item{method}{a character variable with values \code{method1}, \code{method2},
#' \code{method3}, and \code{method4}}
#' \item{score}{standardized test score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ method, col = c("red", "blue", "green", "yellow"), data = Program)
#' anova(lm(score ~ method, data = Program))
#' TukeyHSD(aov(score ~ method, data = Program))
#' par(mar = c(5.1, 4.1 + 4, 4.1, 2.1))
#' plot(TukeyHSD(aov(score ~ method, data = Program)), las = 1)
#' par(mar = c(5.1, 4.1, 4.1, 2.1))
#'
"Program"
#' PSAT scores versus SAT scores
#'
#' Data for Exercise 2.50
#'
#'
#' @name Psat
#' @docType data
#' @format A data frame/tibble with seven observations on the two variables
#' \describe{
#' \item{psat}{PSAT score}
#' \item{sat}{SAT score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(sat ~ psat, data = Psat)
#' par(mfrow = c(1, 2))
#' plot(Psat$psat, resid(model))
#' plot(model, which = 1)
#' rm(model)
#' par(mfrow = c(1, 1))
#'
"Psat"
#' Correct responses for 24 students in a psychology experiment
#'
#' Data for Exercise 1.42
#'
#'
#' @name Psych
#' @docType data
#' @format A data frame/tibble with 23 observations on one variable
#' \describe{
#' \item{score}{number of correct repsonses in a psychology experiment}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Psych$score)
#' EDA(Psych$score)
#'
"Psych"
#' Weekly incomes of a random sample of 50 Puerto Rican families in Miami
#'
#' Data for Exercise 5.22 and 5.65
#'
#'
#' @name Puerto
#' @docType data
#' @format A data frame/tibble with 50 observations on one variable
#' \describe{
#' \item{income}{weekly family income (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Puerto$income)
#' boxplot(Puerto$income, col = "purple")
#' t.test(Puerto$income,conf.level = .90)$conf
#'
"Puerto"
#' Plasma LDL levels in two groups of quail
#'
#' Data for Exercise 1.53, 1.77, 1.88, 5.66, and 7.50
#'
#'
#' @name Quail
#' @docType data
#' @format A data frame/tibble with 40 observations on two variables
#' \describe{
#' \item{group}{a character variable with values \code{placebo} and \code{treatment}}
#' \item{level}{low-density lipoprotein (LDL) cholestrol level}
#' }
#'
#' @source J. McKean, and T. Vidmar (1994), "A Comparison of Two Rank-Based Methods for the
#' Analysis of Linear Models," \emph{The American Statistician, 48}, 220-229.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(level ~ group, data = Quail, horizontal = TRUE, xlab = "LDL Level",
#' col = c("yellow", "lightblue"))
#'
"Quail"
#' Quality control test scores on two manufacturing processes
#'
#' Data for Exercise 7.81
#'
#'
#' @name Quality
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{process}{a character variable with values \code{Process1} and \code{Process2}}
#' \item{score}{results of a quality control test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ process, data = Quality, col = "lightgreen")
#' t.test(score ~ process, data = Quality)
#'
"Quality"
#' Rainfall in an area of west central Kansas and four surrounding counties
#'
#' Data for Exercise 9.8
#'
#'
#' @name Rainks
#' @docType data
#' @format A data frame/tibble with 35 observations on five variables
#' \describe{
#' \item{rain}{rainfall (in inches)}
#' \item{x1}{rainfall (in inches)}
#' \item{x2}{rainfall (in inches)}
#' \item{x3}{rainfall (in inches)}
#' \item{x4}{rainfall (in inches)}
#' }
#'
#' @source R. Picard, K. Berk (1990), Data Splitting, \emph{The American Statistician, 44}, (2),
#' 140-147.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' cor(Rainks)
#' model <- lm(rain ~ x2, data = Rainks)
#' summary(model)
#'
"Rainks"
#' Research and development expenditures and sales of a large company
#'
#' Data for Exercise 9.36 and Example 9.8
#'
#'
#' @name Randd
#' @docType data
#' @format A data frame/tibble with 12 observations on two variables
#' \describe{
#' \item{rd}{research and development expenditures (in million dollars)}
#' \item{sales}{sales (in million dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(sales ~ rd, data = Randd)
#' model <- lm(sales ~ rd, data = Randd)
#' abline(model, col = "purple")
#' summary(model)
#' plot(model, which = 1)
#' rm(model)
#'
"Randd"
#' Survival times of 20 rats exposed to high levels of radiation
#'
#' Data for Exercise 1.52, 1.76, 5.62, and 6.44
#'
#'
#' @name Rat
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{survival_time}{survival time in weeks for rats exposed to a high level of radiation}
#' }
#'
#' @source J. Lawless, \emph{Statistical Models and Methods for Lifetime Data} (New York: Wiley, 1982).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Rat$survival_time)
#' qqnorm(Rat$survival_time)
#' qqline(Rat$survival_time)
#' summary(Rat$survival_time)
#' t.test(Rat$survival_time)
#' t.test(Rat$survival_time, mu = 100, alternative = "greater")
#'
"Rat"
#' Grade point averages versus teacher's ratings
#'
#' Data for Example 2.6
#'
#'
#' @name Ratings
#' @docType data
#' @format A data frame/tibble with 250 observations on two variables
#' \describe{
#' \item{rating}{character variable with students' ratings of instructor (A-F)}
#' \item{gpa}{students' grade point average}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(gpa ~ rating, data = Ratings, xlab = "Student rating of instructor",
#' ylab = "Student GPA")
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Ratings, aes(x = rating, y = gpa, fill = rating)) +
#' geom_boxplot() +
#' theme_bw() +
#' theme(legend.position = "none") +
#' labs(x = "Student rating of instructor", y = "Student GPA")
#' }
#'
"Ratings"
#' Threshold reaction time for persons subjected to emotional stress
#'
#' Data for Example 6.11
#'
#'
#' @name Reaction
#' @docType data
#' @format A data frame/tibble with 12 observations on one variable
#' \describe{
#' \item{time}{threshold reaction time (in seconds) for persons subjected to emotional stress}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Reaction$time)
#' SIGN.test(Reaction$time, md = 15, alternative = "less")
#'
"Reaction"
#' Standardized reading scores for 30 fifth graders
#'
#' Data for Exercise 1.72 and 2.10
#'
#'
#' @name Reading
#' @docType data
#' @format A data frame/tibble with 30 observations on four variables
#' \describe{
#' \item{score}{standardized reading test score}
#' \item{sorted}{sorted values of \code{score}}
#' \item{trimmed}{trimmed values of \code{sorted}}
#' \item{winsoriz}{winsorized values of \code{score}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Reading$score, main = "Exercise 1.72",
#' col = "lightgreen", xlab = "Standardized reading score")
#' summary(Reading$score)
#' sd(Reading$score)
#'
"Reading"
#' Reading scores versus IQ scores
#'
#' Data for Exercises 2.10 and 2.53
#'
#'
#' @name Readiq
#' @docType data
#' @format A data frame/tibble with 14 observations on two variables
#' \describe{
#' \item{reading}{reading achievement score}
#' \item{iq}{IQ score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(reading ~ iq, data = Readiq)
#' model <- lm(reading ~ iq, data = Readiq)
#' abline(model, col = "purple")
#' predict(model, newdata = data.frame(iq = c(100, 120)))
#' residuals(model)[c(6, 7)]
#' rm(model)
#'
"Readiq"
#' Opinion on referendum by view on freedom of the press
#'
#' Data for Exercise 8.20
#'
#'
#' @name Referend
#' @docType data
#' @format A data frame with 237 observations on two variables
#' \describe{
#' \item{choice}{a factor with levels \code{A}, \code{B}, and \code{C}}
#' \item{response}{a factor with levels \code{for}, \code{against}, and \code{undecided}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~choice + response, data = Referend)
#' T1
#' chisq.test(T1)
#' chisq.test(T1)$expected
#'
"Referend"
#' Pollution index taken in three regions of the country
#'
#' Data for Exercise 10.26
#'
#'
#' @name Region
#' @docType data
#' @format A data frame/tibble with 48 observations on three variables
#' \describe{
#' \item{pollution}{pollution index}
#' \item{region}{region of a county (\code{west}, \code{central}, and \code{east})}
#' \item{ranks}{ranked values of \code{pollution}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(pollution ~ region, data = Region, col = "gray")
#' anova(lm(pollution ~ region, data = Region))
#'
"Region"
#' Maintenance cost versus age of cash registers in a department store
#'
#' Data for Exercise 2.3, 2.39, and 2.54
#'
#'
#' @name Register
#' @docType data
#' @format A data frame/tibble with nine observations on two variables
#' \describe{
#' \item{age}{age of cash register (in years)}
#' \item{cost}{maintenance cost of cash register (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(cost ~ age, data = Register)
#' model <- lm(cost ~ age, data = Register)
#' abline(model, col = "red")
#' predict(model, newdata = data.frame(age = c(5, 10)))
#' plot(model, which = 1)
#' rm(model)
#'
"Register"
#' Rehabilitative potential of 20 prison inmates as judged by two psychiatrists
#'
#' Data for Exercise 7.61
#'
#'
#' @name Rehab
#' @docType data
#' @format A data frame/tibble with 20 observations on four variables
#' \describe{
#' \item{inmate}{inmate identification number}
#' \item{psych1}{rating from first psychiatrist on the inmates rehabilative potential}
#' \item{psych2}{rating from second psychiatrist on the inmates rehabilative potential}
#' \item{differ}{\code{psych1} - \code{psych2}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Rehab$differ)
#' qqnorm(Rehab$differ)
#' qqline(Rehab$differ)
#' t.test(Rehab$differ)
#'
"Rehab"
#' Math placement test score for 35 freshmen females and 42 freshmen males
#'
#' Data for Exercise 7.43
#'
#'
#' @name Remedial
#' @docType data
#' @format A data frame/tibble with 84 observations on two variables
#' \describe{
#' \item{gender}{a character variable with values \code{female} and \code{male}}
#' \item{score}{math placement score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ gender, data = Remedial,
#' col = c("purple", "blue"))
#' t.test(score ~ gender, data = Remedial, conf.level = 0.98)
#' t.test(score ~ gender, data = Remedial, conf.level = 0.98)$conf
#' wilcox.test(score ~ gender, data = Remedial,
#' conf.int = TRUE, conf.level = 0.98)
#'
"Remedial"
#' Weekly rentals for 45 apartments
#'
#' Data for Exercise 1.122
#'
#'
#' @name Rentals
#' @docType data
#' @format A data frame/tibble with 45 observations on one variable
#' \describe{
#' \item{rent}{weekly apartment rental price (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Rentals$rent)
#' sum(Rentals$rent < mean(Rentals$rent) - 3*sd(Rentals$rent) |
#' Rentals$rent > mean(Rentals$rent) + 3*sd(Rentals$rent))
#'
"Rentals"
#' Recorded times for repairing 22 automobiles involved in wrecks
#'
#' Data for Exercise 5.77
#'
#'
#' @name Repair
#' @docType data
#' @format A data frame/tibble with 22 observations on one variable
#' \describe{
#' \item{time}{time to repair a wrecked in car (in hours)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Repair$time)
#' SIGN.test(Repair$time, conf.level = 0.98)
#'
"Repair"
#' Length of employment versus gross sales for 10 employees of a large retail
#' store
#'
#' Data for Exercise 9.59
#'
#'
#' @name Retail
#' @docType data
#' @format A data frame/tibble with 10 observations on two variables
#' \describe{
#' \item{months}{length of employment (in months)}
#' \item{sales}{employee gross sales (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(sales ~ months, data = Retail)
#' model <- lm(sales ~ months, data = Retail)
#' abline(model, col = "blue")
#' summary(model)
#'
"Retail"
#' Oceanography data obtained at site 1 by scientist aboard the ship Ron Brown
#'
#' Data for Exercise 2.9
#'
#'
#' @name Ronbrown1
#' @docType data
#' @format A data frame/tibble with 75 observations on two variables
#' \describe{
#' \item{depth}{ocen depth (in meters)}
#' \item{temperature}{ocean temperature (in Celsius)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(temperature ~ depth, data = Ronbrown1, ylab = "Temperature")
#'
"Ronbrown1"
#' Oceanography data obtained at site 2 by scientist aboard the ship Ron Brown
#'
#' Data for Exercise 2.56 and Example 2.4
#'
#'
#' @name Ronbrown2
#' @docType data
#' @format A data frame/tibble with 150 observations on three variables
#' \describe{
#' \item{depth}{ocean depth (in meters)}
#' \item{temperature}{ocean temperature (in Celcius)}
#' \item{salinity}{ocean salinity level}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(salinity ~ depth, data = Ronbrown2)
#' model <- lm(salinity ~ depth, data = Ronbrown2)
#' summary(model)
#' plot(model, which = 1)
#' rm(model)
#'
"Ronbrown2"
#' Social adjustment scores for a rural group and a city group of children
#'
#' Data for Example 7.16
#'
#'
#' @name Rural
#' @docType data
#' @format A data frame/tibble with 33 observations on two variables
#' \describe{
#' \item{score}{child's social adjustment score}
#' \item{area}{character variable with values \code{city} and \code{rural}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ area, data = Rural)
#' wilcox.test(score ~ area, data = Rural)
#' \dontrun{
#' library(dplyr)
#' Rural <- dplyr::mutate(Rural, r = rank(score))
#' Rural
#' t.test(r ~ area, data = Rural)
#' }
#'
"Rural"
#' Starting salaries for 25 new PhD psychologist
#'
#' Data for Exercise 3.66
#'
#'
#' @name Salary
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{salary}{starting salary for Ph.D. psycholgists (in dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Salary$salary, pch = 19, col = "purple")
#' qqline(Salary$salary, col = "blue")
#'
"Salary"
#' Surface-water salinity measurements from Whitewater Bay, Florida
#'
#' Data for Exercise 5.27 and 5.64
#'
#'
#' @name Salinity
#' @docType data
#' @format A data frame/tibble with 48 observations on one variable
#' \describe{
#' \item{salinity}{surface-water salinity value}
#' }
#'
#' @source J. Davis, \emph{Statistics and Data Analysis in Geology}, 2nd ed. (New York: John Wiley, 1986).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Salinity$salinity)
#' qqnorm(Salinity$salinity, pch = 19, col = "purple")
#' qqline(Salinity$salinity, col = "blue")
#' t.test(Salinity$salinity, conf.level = 0.99)
#' t.test(Salinity$salinity, conf.level = 0.99)$conf
#'
"Salinity"
#' SAT scores, percent taking exam and state funding per student by state for
#' 1994, 1995 and 1999
#'
#' Data for Statistical Insight Chapter 9
#'
#'
#' @name Sat
#' @docType data
#' @format A data frame/tibble with 102 observations on seven variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{verbal}{verbal SAT score}
#' \item{math}{math SAT score}
#' \item{total}{combined verbal and math SAT score}
#' \item{percent}{percent of high school seniors taking the SAT}
#' \item{expend}{state expenditure per student (in dollars)}
#' \item{year}{year}
#' }
#'
#' @source \emph{The 2000 World Almanac and Book of Facts}, Funk and Wagnalls Corporation, New Jersey.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' Sat94 <- Sat[Sat$year == 1994, ]
#' Sat94
#' Sat99 <- subset(Sat, year == 1999)
#' Sat99
#' stem(Sat99$total)
#' plot(total ~ percent, data = Sat99)
#' model <- lm(total ~ percent, data = Sat99)
#' abline(model, col = "blue")
#' summary(model)
#' rm(model)
#'
"Sat"
#' Problem asset ration for savings and loan companies in California, New York,
#' and Texas
#'
#' Data for Exercise 10.34 and 10.49
#'
#'
#' @name Saving
#' @docType data
#' @format A data frame/tibble with 65 observations on two variables
#' \describe{
#' \item{par}{problem-asset-ratio for Savings & Loans that were listed as being financially troubled in 1992}
#' \item{state}{U.S. state}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(par ~ state, data = Saving, col = "red")
#' boxplot(par ~ state, data = Saving, log = "y", col = "red")
#' model <- aov(par ~ state, data = Saving)
#' summary(model)
#' plot(TukeyHSD(model))
#' kruskal.test(par ~ factor(state), data = Saving)
#'
"Saving"
#' Readings obtained from a 100 pound weight placed on four brands of bathroom
#' scales
#'
#' Data for Exercise 1.89
#'
#'
#' @name Scales
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{brand}{variable indicating brand of bathroom scale (\code{A}, \code{B}, \code{C}, or \code{D})}
#' \item{reading}{recorded value (in pounds) of a 100 pound weight}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(reading ~ brand, data = Scales, col = rainbow(4),
#' ylab = "Weight (lbs)")
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Scales, aes(x = brand, y = reading, fill = brand)) +
#' geom_boxplot() +
#' labs(y = "weight (lbs)") +
#' theme_bw() +
#' theme(legend.position = "none")
#' }
#'
"Scales"
#' Exam scores for 17 patients to assess the learning ability of schizophrenics
#' after taking a specified does of a tranquilizer
#'
#' Data for Exercise 6.99
#'
#'
#' @name Schizop2
#' @docType data
#' @format A data frame/tibble with 17 observations on one variable
#' \describe{
#' \item{score}{schizophrenics score on a second standardized exam}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Schizop2$score, xlab = "score on standardized test after a tranquilizer",
#' main = "Exercise 6.99", breaks = 10, col = "orange")
#' EDA(Schizop2$score)
#' SIGN.test(Schizop2$score, md = 22, alternative = "greater")
#'
"Schizop2"
#' Standardized exam scores for 13 patients to investigate the learning ability
#' of schizophrenics after a specified dose of a tranquilizer
#'
#' Data for Example 6.10
#'
#'
#' @name Schizoph
#' @docType data
#' @format A data frame/tibble with 13 observations on one variable
#' \describe{
#' \item{score}{schizophrenics score on a standardized exam one
#' hour after recieving a specified dose of a tranqilizer.}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Schizoph$score, xlab = "score on standardized test",
#' main = "Example 6.10", breaks = 10, col = "orange")
#' EDA(Schizoph$score)
#' t.test(Schizoph$score, mu = 20)
#'
"Schizoph"
#' Injury level versus seatbelt usage
#'
#' Data for Exercise 8.24
#'
#'
#' @name Seatbelt
#' @docType data
#' @format A data frame/tibble with 86,759 observations on two variables
#' \describe{
#' \item{seatbelt}{a factor with levels \code{No} and \code{Yes}}
#' \item{injuries}{a factor with levels \code{None}, \code{Minimal},
#' \code{Minor}, or \code{Major} indicating the extent of the drivers injuries}
#' }
#'
#' @source Jobson, J. (1982), \emph{Applied Multivariate Data Analysis}, Springer-Verlag,
#' New York, p. 18.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~seatbelt + injuries, data = Seatbelt)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Seatbelt"
#' Self-confidence scores for 9 women before and after instructions on
#' self-defense
#'
#' Data for Example 7.19
#'
#'
#' @name Selfdefe
#' @docType data
#' @format A data frame/tibble with nine observations on three variables
#' \describe{
#' \item{woman}{number identifying the woman}
#' \item{before}{before the course self-confidence score}
#' \item{after}{after the course self-confidence score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' Selfdefe$differ <- Selfdefe$after - Selfdefe$before
#' Selfdefe
#' t.test(Selfdefe$differ, alternative = "greater")
#'
"Selfdefe"
#' Reaction times of 30 senior citizens applying for drivers license renewals
#'
#' Data for Exercise 1.83 and 3.67
#'
#'
#' @name Senior
#' @docType data
#' @format A data frame/tibble with 31 observations on one variable
#' \describe{
#' \item{reaction}{reaction time for senior citizens applying for a driver's license renewal}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Senior$reaction)
#' fivenum(Senior$reaction)
#' boxplot(Senior$reaction, main = "Problem 1.83, part d",
#' horizontal = TRUE, col = "purple")
#'
"Senior"
#' Sentences of 41 prisoners convicted of a homicide offense
#'
#' Data for Exercise 1.123
#'
#'
#' @name Sentence
#' @docType data
#' @format A data frame/tibble with 41 observations on one variable
#' \describe{
#' \item{months}{sentence length (in months) for prisoners convicted of homocide}
#' }
#'
#' @source U.S. Department of Justice, Bureau of Justice Statistics, \emph{Prison Sentences
#' and Time Served for Violence}, NCJ-153858, April 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Sentence$months)
#' ll <- mean(Sentence$months)-2*sd(Sentence$months)
#' ul <- mean(Sentence$months)+2*sd(Sentence$months)
#' limits <- c(ll, ul)
#' limits
#' rm(ul, ll, limits)
#'
"Sentence"
#' Effects of a drug and electroshock therapy on the ability to solve simple
#' tasks
#'
#' Data for Exercises 10.11 and 10.12
#'
#'
#' @name Shkdrug
#' @docType data
#' @format A data frame/tibble with 64 observations on two variables
#' \describe{
#' \item{treatment}{type of treament \code{Drug/NoS}, \code{Drug/Shk},
#' \code{NoDg/NoS}, or \code{NoDrug/S}}
#' \item{response}{number of tasks completed in a 10-minute period}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(response ~ treatment, data = Shkdrug, col = "gray")
#' model <- lm(response ~ treatment, data = Shkdrug)
#' anova(model)
#' rm(model)
#'
"Shkdrug"
#' Effect of experimental shock on time to complete difficult task
#'
#' Data for Exercise 10.50
#'
#'
#' @name Shock
#' @docType data
#' @format A data frame/tibble with 27 observations on two variables
#' \describe{
#' \item{group}{grouping variable with values of \code{Group1} (no shock),
#' \code{Group2} (medium shock), and \code{Group3} (severe shock)}
#' \item{attempts}{number of attempts to complete a task}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(attempts ~ group, data = Shock, col = "violet")
#' model <- lm(attempts ~ group, data = Shock)
#' anova(model)
#' rm(model)
#'
#'
"Shock"
#' Sales receipts versus shoplifting losses for a department store
#'
#' Data for Exercise 9.58
#'
#'
#' @name Shoplift
#' @docType data
#' @format A data frame/tibble with eight observations on two variables
#' \describe{
#' \item{sales}{sales (in 1000 dollars)}
#' \item{loss}{loss (in 100 dollars)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(loss ~ sales, data = Shoplift)
#' model <- lm(loss ~ sales, data = Shoplift)
#' summary(model)
#' rm(model)
#'
"Shoplift"
#' James Short's measurements of the parallax of the sun
#'
#' Data for Exercise 6.65
#'
#'
#' @name Short
#' @docType data
#' @format A data frame/tibble with 158 observations on two variables
#' \describe{
#' \item{sample}{sample number}
#' \item{parallax}{parallax measurements (seconds of a degree)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Short$parallax, main = "Problem 6.65",
#' xlab = "", col = "orange")
#' SIGN.test(Short$parallax, md = 8.798)
#' t.test(Short$parallax, mu = 8.798)
#'
"Short"
#' Number of people riding shuttle versus number of automobiles in the downtown
#' area
#'
#' Data for Exercise 9.20
#'
#'
#' @name Shuttle
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{users}{number of shuttle riders}
#' \item{autos}{number of automobiles in the downtown area}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(autos ~ users, data = Shuttle)
#' model <- lm(autos ~ users, data = Shuttle)
#' summary(model)
#' rm(model)
#'
"Shuttle"
#' Grade point averages of men and women participating in various sports-an
#' illustration of Simpson's paradox
#'
#' Data for Example 1.18
#'
#'
#' @name Simpson
#' @docType data
#' @format A data frame/tibble with 100 observations on three variables
#' \describe{
#' \item{gpa}{grade point average}
#' \item{sport}{sport played (basketball, soccer, or track)}
#' \item{gender}{athlete sex (male, female)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(gpa ~ gender, data = Simpson, col = "violet")
#' boxplot(gpa ~ sport, data = Simpson, col = "lightgreen")
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Simpson, aes(x = gender, y = gpa, fill = gender)) +
#' geom_boxplot() +
#' facet_grid(.~sport) +
#' theme_bw()
#' }
"Simpson"
#' Maximum number of situps by participants in an exercise class
#'
#' Data for Exercise 1.47
#'
#'
#' @name Situp
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{number}{maximum number of situps completed in an exercise class
#' after 1 month in the program}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Situp$number)
#' hist(Situp$number, breaks = seq(0, 70, 10), right = FALSE)
#' hist(Situp$number, breaks = seq(0, 70, 10), right = FALSE,
#' freq = FALSE, col = "pink", main = "Problem 1.47",
#' xlab = "Maximum number of situps")
#' lines(density(Situp$number), col = "red")
#'
"Situp"
#' Illustrates the Wilcoxon Rank Sum test
#'
#' Data for Exercise 7.65
#'
#'
#' @name Skewed
#' @docType data
#' @format A data frame/tibble with 21 observations on two variables
#' \describe{
#' \item{C1}{values from a sample of size 16 from a particular population}
#' \item{C2}{values from a sample of size 14 from a particular population}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Skewed$C1, Skewed$C2, col = c("pink", "lightblue"))
#' wilcox.test(Skewed$C1, Skewed$C2)
#'
"Skewed"
#' Survival times of closely and poorly matched skin grafts on burn patients
#'
#' Data for Exercise 5.20
#'
#'
#' @name Skin
#' @docType data
#' @format A data frame/tibble with 11 observations on four variables
#' \describe{
#' \item{patient}{patient identification number}
#' \item{close}{graft survival time in days for a closely matched skin graft on the same burn patient}
#' \item{poor}{graft survival time in days for a poorly matched skin graft on the same burn patient}
#' \item{differ}{difference between close and poor (in days)}
#' }
#'
#' @source R. F. Woolon and P. A. Lachenbruch, "Rank Tests for Censored Matched Pairs,"
#' \emph{Biometrika}, 67(1980), 597-606.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Skin$differ)
#' boxplot(Skin$differ, col = "pink")
#' summary(Skin$differ)
#'
"Skin"
#' Sodium-lithium countertransport activity on 190 individuals from six large
#' English kindred
#'
#' Data for Exercise 5.116
#'
#'
#' @name Slc
#' @docType data
#' @format A data frame/tibble with 190 observations on one variable
#' \describe{
#' \item{slc}{Red blood cell sodium-lithium countertransport}
#' }
#'
#' @source Roeder, K., (1994), "A Graphical Technique for Determining the Number of Components
#' in a Mixture of Normals," \emph{Journal of the American Statistical Association, 89}, 497-495.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Slc$slc)
#' hist(Slc$slc, freq = FALSE, xlab = "sodium lithium countertransport",
#' main = "", col = "lightblue")
#' lines(density(Slc$slc), col = "purple")
#'
"Slc"
#' Water pH levels of 75 water samples taken in the Great Smoky Mountains
#'
#' Data for Exercises 6.40, 6.59, 7.10, and 7.35
#'
#'
#' @name Smokyph
#' @docType data
#' @format A data frame/tibble with 75 observations on three variables
#' \describe{
#' \item{waterph}{water sample pH level}
#' \item{code}{charater variable with values \code{low} (elevation below 0.6 miles),
#' and \code{high} (elevation above 0.6 miles)}
#' \item{elev}{elevation in miles}
#' }
#'
#' @source Schmoyer, R. L. (1994), Permutation Tests for Correlation in Regression Errors,
#' \emph{Journal of the American Statistical Association, 89}, 1507-1516.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(Smokyph$waterph)
#' tapply(Smokyph$waterph, Smokyph$code, mean)
#' stripchart(waterph ~ code, data = Smokyph, method = "stack",
#' pch = 19, col = c("red", "blue"))
#' t.test(Smokyph$waterph, mu = 7)
#' SIGN.test(Smokyph$waterph, md = 7)
#' t.test(waterph ~ code, data = Smokyph, alternative = "less")
#' t.test(waterph ~ code, data = Smokyph, conf.level = 0.90)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Smokyph, aes(x = waterph, fill = code)) +
#' geom_dotplot() +
#' facet_grid(code ~ .) +
#' guides(fill = FALSE)
#' }
#'
"Smokyph"
#' Snoring versus heart disease
#'
#' Data for Exercise 8.21
#'
#'
#' @name Snore
#' @docType data
#' @format A data frame/tibble with 2,484 observations on two variables
#' \describe{
#' \item{snore}{factor with levels \code{nonsnorer}, \code{ocassional snorer},
#' \code{nearly every night}, and \code{snores every night}}
#' \item{heartdisease}{factor indicating whether the indiviudal has heart disease
#' (\code{no} or \code{yes})}
#' }
#'
#' @source Norton, P. and Dunn, E. (1985), Snoring as a Risk Factor for Disease,
#' \emph{British Medical Journal, 291},
#' 630-632.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~ heartdisease + snore, data = Snore)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Snore"
#' Concentration of microparticles in snowfields of Greenland and Antarctica
#'
#' Data for Exercise 7.87
#'
#'
#' @name Snow
#' @docType data
#' @format A data frame/tibble with 34 observations on two variables
#' \describe{
#' \item{concent}{concentration of microparticles from melted snow (in parts per billion)}
#' \item{site}{location of snow sample (\code{Antarctica} or \code{Greenland})}
#' }
#'
#' @source Davis, J., \emph{Statistics and Data Analysis in Geology}, John Wiley, New York.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(concent ~ site, data = Snow, col = c("lightblue", "lightgreen"))
#'
"Snow"
#' Weights of 25 soccer players
#'
#' Data for Exercise 1.46
#'
#'
#' @name Soccer
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{weight}{soccer players weight (in pounds)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Soccer$weight, scale = 2)
#' hist(Soccer$weight, breaks = seq(110, 210, 10), col = "orange",
#' main = "Problem 1.46 \n Weights of Soccer Players",
#' xlab = "weight (lbs)", right = FALSE)
#'
"Soccer"
#' Median income level for 25 social workers from North Carolina
#'
#' Data for Exercise 6.63
#'
#'
#' @name Social
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{income}{annual income (in dollars) of North Carolina social workers
#' with less than five years experience.}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Social$income, md = 27500, alternative = "less")
#'
"Social"
#' Grade point averages, SAT scores and final grade in college algebra for 20
#' sophomores
#'
#' Data for Exercise 2.42
#'
#'
#' @name Sophomor
#' @docType data
#' @format A data frame/tibble with 20 observations on four variables
#' \describe{
#' \item{student}{identification number}
#' \item{gpa}{grade point average}
#' \item{sat}{SAT math score}
#' \item{exam}{final exam grade in college algebra}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' cor(Sophomor)
#' plot(exam ~ gpa, data = Sophomor)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Sophomor, aes(x = gpa, y = exam)) +
#' geom_point()
#' ggplot2::ggplot(data = Sophomor, aes(x = sat, y = exam)) +
#' geom_point()
#' }
#'
"Sophomor"
#' Murder rates for 30 cities in the South
#'
#' Data for Exercise 1.84
#'
#'
#' @name South
#' @docType data
#' @format A data frame/tibble with 31 observations on one variable
#' \describe{
#' \item{rate}{murder rate per 100,000 people}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(South$rate, col = "gray", ylab = "Murder rate per 100,000 people")
#'
"South"
#' Speed reading scores before and after a course on speed reading
#'
#' Data for Exercise 7.58
#'
#'
#' @name Speed
#' @docType data
#' @format A data frame/tibble with 15 observations on four variables
#' \describe{
#' \item{before}{reading comprehension score before taking a speed-reading course}
#' \item{after}{reading comprehension score after taking a speed-reading course}
#' \item{differ}{after - before (comprehension reading scores)}
#' \item{signranks}{signed ranked differences}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' t.test(Speed$differ, alternative = "greater")
#' t.test(Speed$signranks, alternative = "greater")
#' wilcox.test(Pair(Speed$after, Speed$before) ~ 1, data = Speed, alternative = "greater")
#'
"Speed"
#' Standardized spelling test scores for two fourth grade classes
#'
#' Data for Exercise 7.82
#'
#'
#' @name Spellers
#' @docType data
#' @format A data frame/tibble with ten observations on two variables
#' \describe{
#' \item{teacher}{character variable with values \code{Fourth} and \code{Colleague}}
#' \item{score}{score on a standardized spelling test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ teacher, data = Spellers, col = "pink")
#' t.test(score ~ teacher, data = Spellers)
#'
"Spellers"
#' Spelling scores for 9 eighth graders before and after a 2-week course of
#' instruction
#'
#' Data for Exercise 7.56
#'
#'
#' @name Spelling
#' @docType data
#' @format A data frame/tibble with nine observations on three variables
#' \describe{
#' \item{before}{spelling score before a 2-week course of instruction}
#' \item{after}{spelling score after a 2-week course of instruction}
#' \item{differ}{after - before (spelling score)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Spelling$differ)
#' qqline(Spelling$differ)
#' shapiro.test(Spelling$differ)
#' t.test(Spelling$differ)
#'
"Spelling"
#' Favorite sport by gender
#'
#' Data for Exercise 8.32
#'
#'
#' @name Sports
#' @docType data
#' @format A data frame/tibble with 200 observations on two variables
#' \describe{
#' \item{gender}{a factor with levels \code{male} and \code{female}}
#' \item{sport}{a factor with levels \code{football}, \code{basketball},
#' \code{baseball}, and \code{tennis}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~gender + sport, data = Sports)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Sports"
#' Convictions in spouse murder cases by gender
#'
#' Data for Exercise 8.33
#'
#'
#' @name Spouse
#' @docType data
#' @format A data frame/tibble with 540 observations on two variables
#' \describe{
#' \item{result}{a factor with levels \code{not prosecuted}, \code{pleaded guilty},
#' \code{convicted}, and \code{acquited}}
#' \item{spouse}{a factor with levels \code{husband} and \code{wife}}
#' }
#'
#' @source Bureau of Justice Statistics (September 1995), \emph{Spouse Murder Defendants in Large
#' Urban Counties}, Executive Summary, NCJ-156831.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~result + spouse, data = Spouse)
#' T1
#' chisq.test(T1)
#' rm(T1)
#'
"Spouse"
#' Times of a 2-year old stallion on a one mile run
#'
#' Data for Exercise 6.93
#'
#'
#' @name Stable
#' @docType data
#' @format A data frame/tibble with nine observations on one variable
#' \describe{
#' \item{time}{time (in seconds) for horse to run 1 mile}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Stable$time, md = 98.5, alternative = "greater")
#'
"Stable"
#' Thicknesses of 1872 Hidalgo stamps issued in Mexico
#'
#' Data for Statistical Insight Chapter 1 and Exercise 5.110
#'
#'
#' @name Stamp
#' @docType data
#' @format A data frame/tibble with 485 observations on one variable
#' \describe{
#' \item{thickness}{stamp thickness (in mm)}
#' }
#'
#' @source Izenman, A., Sommer, C. (1988), Philatelic Mixtures and Multimodal Densities,
#' \emph{Journal of the American Statistical Association}, 83, 941-953.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Stamp$thickness, freq = FALSE, col = "lightblue",
#' main = "", xlab = "stamp thickness (mm)")
#' lines(density(Stamp$thickness), col = "blue")
#' t.test(Stamp$thickness, conf.level = 0.99)
#'
"Stamp"
#' Grades for two introductory statistics classes
#'
#' Data for Exercise 7.30
#'
#'
#' @name Statclas
#' @docType data
#' @format A data frame/tibble with 72 observations on two variables
#' \describe{
#' \item{class}{class meeting time (9am or 2pm)}
#' \item{score}{grade for an introductory statistics class}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' str(Statclas)
#' boxplot(score ~ class, data = Statclas, col = "red")
#' t.test(score ~ class, data = Statclas)
#'
"Statclas"
#' Operating expenditures per resident for each of the state law enforcement
#' agencies
#'
#' Data for Exercise 6.62
#'
#'
#' @name Statelaw
#' @docType data
#' @format A data frame/tibble with 50 observations on two variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{cost}{dollars spent per resident on law enforcement}
#' }
#'
#' @source Bureau of Justice Statistics, \emph{Law Enforcement Management and
#' Administrative Statistics, 1993}, NCJ-148825, September 1995, page 84.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Statelaw$cost)
#' SIGN.test(Statelaw$cost, md = 8, alternative = "less")
#'
"Statelaw"
#' Test scores for two beginning statistics classes
#'
#' Data for Exercises 1.70 and 1.87
#'
#'
#' @name Statisti
#' @docType data
#' @format A data frame/tibble with 62 observations on two variables
#' \describe{
#' \item{class}{character variable with values \code{Class1} and \code{Class2}}
#' \item{score}{test score for an introductory statistics test}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ class, data = Statisti, col = "violet")
#' tapply(Statisti$score, Statisti$class, summary, na.rm = TRUE)
#' \dontrun{
#' library(dplyr)
#' dplyr::group_by(Statisti, class) %>%
#' summarize(Mean = mean(score, na.rm = TRUE),
#' Median = median(score, na.rm = TRUE),
#' SD = sd(score, na.rm = TRUE),
#' RS = IQR(score, na.rm = TRUE))
#' }
#'
"Statisti"
#' STEP science test scores for a class of ability-grouped students
#'
#' Data for Exercise 6.79
#'
#'
#' @name Step
#' @docType data
#' @format A data frame/tibble with 12 observations on one variable
#' \describe{
#' \item{score}{State test of educational progress (STEP) science test score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Step$score)
#' t.test(Step$score, mu = 80, alternative = "less")
#' wilcox.test(Step$score, mu = 80, alternative = "less")
#'
"Step"
#' Short-term memory test scores on 12 subjects before and after a stressful
#' situation
#'
#' Data for Example 7.20
#'
#'
#' @name Stress
#' @docType data
#' @format A data frame/tibble with 12 observations on two variables
#' \describe{
#' \item{prestress}{short term memory score before being exposed to a stressful situation}
#' \item{poststress}{short term memory score after being exposed to a stressful situation}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' diff <- Stress$prestress - Stress$poststress
#' qqnorm(diff)
#' qqline(diff)
#' t.test(diff)
#' \dontrun{
#' wilcox.test(Pair(Stress$prestress, Stress$poststress)~1, data = Stress)
#' }
#'
"Stress"
#' Number of hours studied per week by a sample of 50 freshmen
#'
#' Data for Exercise 5.25
#'
#'
#' @name Study
#' @docType data
#' @format A data frame/tibble with 50 observations on one variable
#' \describe{
#' \item{hours}{number of hours a week freshmen reported studying for their courses}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Study$hours)
#' hist(Study$hours, col = "violet")
#' summary(Study$hours)
#'
"Study"
#' Number of German submarines sunk by U.S. Navy in World War II
#'
#' Data for Exercises 2.16, 2.45, and 2.59
#'
#'
#' @name Submarin
#' @docType data
#' @format A data frame/tibble with 16 observations on three variables
#' \describe{
#' \item{month}{month}
#' \item{reported}{number of submarines reported sunk by U.S. Navy}
#' \item{actual}{number of submarines actually sunk by U.S. Navy}
#' }
#'
#' @source F. Mosteller, S. Fienberg, and R. Rourke, \emph{Beginning Statistics with Data Analysis}
#' (Reading, MA: Addison-Wesley, 1983).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(actual ~ reported, data = Submarin)
#' summary(model)
#' plot(actual ~ reported, data = Submarin)
#' abline(model, col = "red")
#' rm(model)
#'
"Submarin"
#' Time it takes a subway to travel from the airport to downtown
#'
#' Data for Exercise 5.19
#'
#'
#' @name Subway
#' @docType data
#' @format A data frame/tibble with 30 observations on one variable
#' \describe{
#' \item{time}{time (in minutes) it takes a subway to travel from the airport to downtown}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Subway$time, main = "Exercise 5.19",
#' xlab = "Time (in minutes)", col = "purple")
#' summary(Subway$time)
#'
"Subway"
#' Wolfer sunspot numbers from 1700 through 2000
#'
#' Data for Example 1.7
#'
#'
#' @name Sunspot
#' @docType data
#' @format A data frame/tibble with 301 observations on two variables
#' \describe{
#' \item{year}{year}
#' \item{sunspots}{average number of sunspots for the year}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(sunspots ~ year, data = Sunspot, type = "l")
#' \dontrun{
#' library(ggplot2)
#' lattice::xyplot(sunspots ~ year, data = Sunspot,
#' main = "Yearly sunspots", type = "l")
#' lattice::xyplot(sunspots ~ year, data = Sunspot, type = "l",
#' main = "Yearly sunspots", aspect = "xy")
#' ggplot2::ggplot(data = Sunspot, aes(x = year, y = sunspots)) +
#' geom_line() +
#' theme_bw()
#' }
#'
"Sunspot"
#' Margin of victory in Superbowls I to XXXV
#'
#' Data for Exercise 1.54
#'
#'
#' @name Superbowl
#' @docType data
#' @format A data frame/tibble with 35 observations on five variables
#' \describe{
#' \item{winning_team}{name of Suberbowl winning team}
#' \item{winner_score}{winning score for the Superbowl}
#' \item{losing_team}{name of Suberbowl losing team}
#' \item{loser_score}{score of losing teama numeric vector}
#' \item{victory_margin}{winner_score - loser_score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Superbowl$victory_margin)
#'
"Superbowl"
#' Top speeds attained by five makes of supercars
#'
#' Data for Statistical Insight Chapter 10
#'
#'
#' @name Supercar
#' @docType data
#' @format A data frame/tibble with 30 observations on two variables
#' \describe{
#' \item{speed}{top speed (in miles per hour) of car without redlining}
#' \item{car}{name of sports car}
#' }
#'
#' @source \emph{Car and Drvier} (July 1995).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(speed ~ car, data = Supercar, col = rainbow(6),
#' ylab = "Speed (mph)")
#' summary(aov(speed ~ car, data = Supercar))
#' anova(lm(speed ~ car, data = Supercar))
#'
"Supercar"
#' Ozone concentrations at Mt. Mitchell, North Carolina
#'
#' Data for Exercise 5.63
#'
#'
#' @name Tablrock
#' @docType data
#' @format A data frame/tibble with 719 observations on the following 17 variables.
#' \describe{
#' \item{day}{date}
#' \item{hour}{time of day}
#' \item{ozone}{ozone concentration}
#' \item{tmp}{temperature (in Celcius)}
#' \item{vdc}{a numeric vector}
#' \item{wd}{a numeric vector}
#' \item{ws}{a numeric vector}
#' \item{amb}{a numeric vector}
#' \item{dew}{a numeric vector}
#' \item{so2}{a numeric vector}
#' \item{no}{a numeric vector}
#' \item{no2}{a numeric vector}
#' \item{nox}{a numeric vector}
#' \item{co}{a numeric vector}
#' \item{co2}{a numeric vector}
#' \item{gas}{a numeric vector}
#' \item{air}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(Tablrock$ozone)
#' boxplot(Tablrock$ozone)
#' qqnorm(Tablrock$ozone)
#' qqline(Tablrock$ozone)
#' par(mar = c(5.1 - 1, 4.1 + 2, 4.1 - 2, 2.1))
#' boxplot(ozone ~ day, data = Tablrock,
#' horizontal = TRUE, las = 1, cex.axis = 0.7)
#' par(mar = c(5.1, 4.1, 4.1, 2.1))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Tablrock, aes(sample = ozone)) +
#' geom_qq() +
#' theme_bw()
#' ggplot2::ggplot(data = Tablrock, aes(x = as.factor(day), y = ozone)) +
#' geom_boxplot(fill = "pink") +
#' coord_flip() +
#' labs(x = "") +
#' theme_bw()
#' }
#'
"Tablrock"
#' Average teacher's salaries across the states in the 70s 80s and 90s
#'
#' Data for Exercise 5.114
#'
#'
#' @name Teacher
#' @docType data
#' @format A data frame/tibble with 51 observations on three variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{year}{academic year}
#' \item{salary}{avaerage salary (in dollars)}
#' }
#'
#' @source National Education Association.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(mfrow = c(3, 1))
#' hist(Teacher$salary[Teacher$year == "1973-74"],
#' main = "Teacher salary 1973-74", xlab = "salary",
#' xlim = range(Teacher$salary, na.rm = TRUE))
#' hist(Teacher$salary[Teacher$year == "1983-84"],
#' main = "Teacher salary 1983-84", xlab = "salary",
#' xlim = range(Teacher$salary, na.rm = TRUE))
#' hist(Teacher$salary[Teacher$year == "1993-94"],
#' main = "Teacher salary 1993-94", xlab = "salary",
#' xlim = range(Teacher$salary, na.rm = TRUE))
#' par(mfrow = c(1, 1))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Teacher, aes(x = salary)) +
#' geom_histogram(fill = "purple", color = "black") +
#' facet_grid(year ~ .) +
#' theme_bw()
#' }
#'
"Teacher"
#' Tennessee self concept scores for 20 gifted high school students
#'
#' Data for Exercise 6.56
#'
#'
#' @name Tenness
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{score}{Tennessee Self-Concept Scale score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Tenness$score, freq= FALSE, main = "", col = "green",
#' xlab = "Tennessee Self-Concept Scale score")
#' lines(density(Tenness$score))
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Tenness, aes(x = score, y = ..density..)) +
#' geom_histogram(binwidth = 2, fill = "purple", color = "black") +
#' geom_density(color = "red", fill = "pink", alpha = 0.3) +
#' theme_bw()
#' }
#'
"Tenness"
#' Tensile strength of plastic bags from two production runs
#'
#' Data for Example 7.11
#'
#'
#' @name Tensile
#' @docType data
#' @format A data frame/tibble with 72 observations on two variables
#' \describe{
#' \item{tensile}{plastic bag tensile strength (pounds per square inch)}
#' \item{run}{factor with run number (1 or 2)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(tensile ~ run, data = Tensile,
#' col = c("purple", "cyan"))
#' t.test(tensile ~ run, data = Tensile)
#'
"Tensile"
#' Grades on the first test in a statistics class
#'
#' Data for Exercise 5.80
#'
#'
#' @name Test1
#' @docType data
#' @format A data frame/tibble with 25 observations on one variable
#' \describe{
#' \item{score}{score on first statistics exam}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Test1$score)
#' boxplot(Test1$score, col = "purple")
#'
"Test1"
#' Heat loss of thermal pane windows versus outside temperature
#'
#' Data for Example 9.5
#'
#'
#' @name Thermal
#' @docType data
#' @format A data frame/tibble with 12 observations on the two variables
#' \describe{
#' \item{temp}{temperature (degrees Celcius)}
#' \item{loss}{heat loss (BTUs)}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' model <- lm(loss ~ temp, data = Thermal)
#' summary(model)
#' plot(loss ~ temp, data = Thermal)
#' abline(model, col = "red")
#' rm(model)
#'
"Thermal"
#' 1999-2000 closing prices for TIAA-CREF stocks
#'
#' Data for your enjoyment
#'
#'
#' @name Tiaa
#' @docType data
#' @format A data frame/tibble with 365 observations on four variables
#' \describe{
#' \item{crefstk}{closing price (in dollars)}
#' \item{crefgwt}{closing price (in dollars)}
#' \item{tiaa}{closing price (in dollars)}
#' \item{date}{day of the year}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' data(Tiaa)
#'
"Tiaa"
#' Time to complete an airline ticket reservation
#'
#' Data for Exercise 5.18
#'
#'
#' @name Ticket
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{time}{time (in seconds) to check out a reservation}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Ticket$time)
#'
"Ticket"
#' Consumer Reports (Oct 94) rating of toaster ovens versus the cost
#'
#' Data for Exercise 9.36
#'
#'
#' @name Toaster
#' @docType data
#' @format A data frame/tibble with 17 observations on three variables
#' \describe{
#' \item{toaster}{name of toaster}
#' \item{score}{Consumer Reports score}
#' \item{cost}{price of toaster (in dollars)}
#' }
#'
#' @source \emph{Consumer Reports} (October 1994).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(cost ~ score, data = Toaster)
#' model <- lm(cost ~ score, data = Toaster)
#' summary(model)
#' names(summary(model))
#' summary(model)$r.squared
#' plot(model, which = 1)
#'
"Toaster"
#' Size of tonsils collected from 1,398 children
#'
#' Data for Exercise 2.78
#'
#'
#' @name Tonsils
#' @docType data
#' @format A data frame/tibble with 1,398 observations on two variables
#' \describe{
#' \item{size}{a factor with levels \code{Normal}, \code{Large}, and \code{Very Large}}
#' \item{status}{a factor with levels \code{Carrier} and \code{Non-carrier}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~size + status, data = Tonsils)
#' T1
#' prop.table(T1, 1)
#' prop.table(T1, 1)[2, 1]
#' barplot(t(T1), legend = TRUE, beside = TRUE, col = c("red", "green"))
#' \dontrun{
#' library(dplyr)
#' library(ggplot2)
#' NDF <- dplyr::count(Tonsils, size, status)
#' ggplot2::ggplot(data = NDF, aes(x = size, y = n, fill = status)) +
#' geom_bar(stat = "identity", position = "dodge") +
#' scale_fill_manual(values = c("red", "green")) +
#' theme_bw()
#' }
#'
"Tonsils"
#' The number of torts, average number of months to process a tort, and county
#' population from the court files of the nation's largest counties
#'
#' Data for Exercise 5.13
#'
#'
#' @name Tort
#' @docType data
#' @format A data frame/tibble with 45 observations on five variables
#' \describe{
#' \item{county}{U.S. county}
#' \item{months}{average number of months to process a tort}
#' \item{population}{population of the county}
#' \item{torts}{number of torts}
#' \item{rate}{rate per 10,000 residents}
#' }
#'
#' @source U.S. Department of Justice, \emph{Tort Cases in Large Counties}, Bureau of Justice
#' Statistics Special Report, April 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' EDA(Tort$months)
#'
"Tort"
#' Hazardous waste sites near minority communities
#'
#' Data for Exercises 1.55, 5.08, 5.109, 8.58, and 10.35
#'
#'
#' @name Toxic
#' @docType data
#' @format A data frame/tibble with 51 observations on five variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{region}{U.S. region}
#' \item{sites}{number of commercial hazardous waste sites}
#' \item{minority}{percent of minorities living in communities with commercial hazardous waste sites}
#' \item{percent}{a numeric vector}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' hist(Toxic$sites, col = "red")
#' hist(Toxic$minority, col = "blue")
#' qqnorm(Toxic$minority)
#' qqline(Toxic$minority)
#' boxplot(sites ~ region, data = Toxic, col = "lightgreen")
#' tapply(Toxic$sites, Toxic$region, median)
#' kruskal.test(sites ~ factor(region), data = Toxic)
#'
"Toxic"
#' National Olympic records for women in several races
#'
#' Data for Exercises 2.97, 5.115, and 9.62
#'
#'
#' @name Track
#' @docType data
#' @format A data frame with 55 observations on eight variables
#' \describe{
#' \item{country}{athlete's country}
#' \item{100m}{time in seconds for 100 m}
#' \item{200m}{time in seconds for 200 m}
#' \item{400m}{time in seconds for 400 m}
#' \item{800m}{time in minutes for 800 m}
#' \item{1500m}{time in minutes for 1500 m}
#' \item{3000m}{time in minutes for 3000 m}
#' \item{marathon}{time in minutes for marathon}
#' }
#'
#' @source Dawkins, B. (1989), "Multivariate Analysis of National Track Records," \emph{The American Statistician, 43}(2), 110-115.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(`200m` ~ `100m`, data = Track)
#' plot(`400m` ~ `100m`, data = Track)
#' plot(`400m` ~ `200m`, data = Track)
#' cor(Track[, 2:8])
#'
"Track"
#' Olympic winning times for the men's 1500-meter run
#'
#' Data for Exercise 1.36
#'
#'
#' @name Track15
#' @docType data
#' @format A data frame/tibble with 26 observations on two variables
#' \describe{
#' \item{year}{Olympic year}
#' \item{time}{Olympic winning time (in seconds) for the 1500-meter run}
#' }
#'
#' @source \emph{The World Almanac and Book of Facts}, 2000.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(time~ year, data = Track15, type = "b", pch = 19,
#' ylab = "1500m time in seconds", col = "green")
#'
"Track15"
#' Illustrates analysis of variance for three treatment groups
#'
#' Data for Exercise 10.44
#'
#'
#' @name Treatments
#' @docType data
#' @format A data frame/tibble with 24 observations on two variables
#' \describe{
#' \item{score}{score from an experiment}
#' \item{group}{factor with levels 1, 2, and 3}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(score ~ group, data = Treatments, col = "violet")
#' summary(aov(score ~ group, data = Treatments))
#' summary(lm(score ~ group, data = Treatments))
#' anova(lm(score ~ group, data = Treatments))
#'
"Treatments"
#' Number of trees in 20 grids
#'
#' Data for Exercise 1.50
#'
#'
#' @name Trees
#' @docType data
#' @format A data frame/tibble with 20 observations on one variable
#' \describe{
#' \item{number}{number of trees in a grid}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Trees$number)
#' hist(Trees$number, main = "Exercise 1.50", xlab = "number",
#' col = "brown")
#'
"Trees"
#' Miles per gallon for standard 4-wheel drive trucks manufactured by
#' Chevrolet, Dodge and Ford
#'
#' Data for Example 10.2
#'
#'
#' @name Trucks
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{mpg}{miles per gallon}
#' \item{truck}{a factor with levels \code{chevy}, \code{dodge}, and \code{ford}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(mpg ~ truck, data = Trucks, horizontal = TRUE, las = 1)
#' summary(aov(mpg ~ truck, data = Trucks))
#'
"Trucks"
#' Percent of students that watch more than 6 hours of TV per day versus
#' national math test scores
#'
#' Data for Examples 2.1 and 2.7
#'
#'
#' @name Tv
#' @docType data
#' @format A data frame/tibble with 53 observations on three variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{percent}{percent of students who watch more than six hours of TV a day}
#' \item{test}{state average on national math test}
#' }
#'
#' @source Educational Testing Services.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(test ~ percent, data = Tv, col = "blue")
#' cor(Tv$test, Tv$percent)
#'
"Tv"
#' Intelligence test scores for identical twins in which one twin is given a
#' drug
#'
#' Data for Exercise 7.54
#'
#'
#' @name Twin
#' @docType data
#' @format A data frame/tibble with nine observations on three variables
#' \describe{
#' \item{twinA}{score on intelligence test without drug}
#' \item{twinB}{score on intelligence test after taking drug}
#' \item{differ}{\code{twinA} - \code{twinB}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' qqnorm(Twin$differ)
#' qqline(Twin$differ)
#' shapiro.test(Twin$differ)
#' t.test(Twin$differ)
#'
"Twin"
#' Data set describing a sample of undergraduate students
#'
#' Data for Exercise 1.15
#'
#'
#' @name Undergrad
#' @docType data
#' @format A data frame/tibble with 100 observations on six variables
#' \describe{
#' \item{gender}{character variable with values \code{Female} and \code{Male}}
#' \item{major}{college major}
#' \item{class}{college year group classification}
#' \item{gpa}{grade point average}
#' \item{sat}{Scholastic Assessment Test score}
#' \item{drops}{number of courses dropped}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stripchart(gpa ~ class, data = Undergrad, method = "stack",
#' col = c("blue","red","green","lightblue"),
#' pch = 19, main = "GPA versus Class")
#' stripchart(gpa ~ gender, data = Undergrad, method = "stack",
#' col = c("red", "blue"), pch = 19,
#' main = "GPA versus Gender")
#' stripchart(sat ~ drops, data = Undergrad, method = "stack",
#' col = c("blue", "red", "green", "lightblue"),
#' pch = 19, main = "SAT versus Drops")
#' stripchart(drops ~ gender, data = Undergrad, method = "stack",
#' col = c("red", "blue"), pch = 19, main = "Drops versus Gender")
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Undergrad, aes(x = sat, y = drops, fill = factor(drops))) +
#' facet_grid(drops ~.) +
#' geom_dotplot() +
#' guides(fill = FALSE)
#' }
#'
"Undergrad"
#' Number of days of paid holidays and vacation leave for sample of 35 textile
#' workers
#'
#' Data for Exercise 6.46 and 6.98
#'
#'
#' @name Vacation
#' @docType data
#' @format A data frame/tibble with 35 observations on one variable
#' \describe{
#' \item{number}{number of days of paid holidays and vacation leave taken}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Vacation$number, col = "violet")
#' hist(Vacation$number, main = "Exercise 6.46", col = "blue",
#' xlab = "number of days of paid holidays and vacation leave taken")
#' t.test(Vacation$number, mu = 24)
#'
"Vacation"
#' Reported serious reactions due to vaccines in 11 southern states
#'
#' Data for Exercise 1.111
#'
#'
#' @name Vaccine
#' @docType data
#' @format A data frame/tibble with 11 observations on two variables
#' \describe{
#' \item{state}{U.S. state}
#' \item{number}{number of reported serious reactions per million doses of a vaccine}
#' }
#'
#' @source Center for Disease Control, Atlanta, Georgia.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Vaccine$number, scale = 2)
#' fn <- fivenum(Vaccine$number)
#' fn
#' iqr <- IQR(Vaccine$number)
#' iqr
#'
"Vaccine"
#' Fatality ratings for foreign and domestic vehicles
#'
#' Data for Exercise 8.34
#'
#'
#' @name Vehicle
#' @docType data
#' @format A data frame/tibble with 151 observations on two variables
#' \describe{
#' \item{make}{a factor with levels \code{domestic} and \code{foreign}}
#' \item{rating}{a factor with levels \code{Much better than average},
#' \code{Above average}, \code{Average}, \code{Below average}, and \code{Much worse than average}}
#' }
#'
#' @source Insurance Institute for Highway Safety and the Highway Loss Data Institute, 1995.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~make + rating, data = Vehicle)
#' T1
#' chisq.test(T1)
#'
"Vehicle"
#' Verbal test scores and number of library books checked out for 15 eighth
#' graders
#'
#' Data for Exercise 9.30
#'
#'
#' @name Verbal
#' @docType data
#' @format A data frame/tibble with 15 observations on two variables
#' \describe{
#' \item{number}{number of library books checked out}
#' \item{verbal}{verbal test score}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(verbal ~ number, data = Verbal)
#' abline(lm(verbal ~ number, data = Verbal), col = "red")
#' summary(lm(verbal ~ number, data = Verbal))
#'
"Verbal"
#' Number of sunspots versus mean annual level of Lake Victoria Nyanza from
#' 1902 to 1921
#'
#' Data for Exercise 2.98
#'
#'
#' @name Victoria
#' @docType data
#' @format A data frame/tibble with 20 observations on three variables
#' \describe{
#' \item{year}{year}
#' \item{level}{mean annual level of Lake Victoria Nyanza}
#' \item{sunspot}{number of sunspots}
#' }
#'
#' @source N. Shaw, \emph{Manual of Meteorology}, Vol. 1 (London: Cambridge University Press, 1942),
#' p. 284; and F. Mosteller and J. W. Tukey, \emph{Data Analysis and Regression} (Reading, MA: Addison-Wesley, 1977).
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(level ~ sunspot, data = Victoria)
#' model <- lm(level ~ sunspot, data = Victoria)
#' summary(model)
#' rm(model)
#'
"Victoria"
#' Viscosity measurements of a substance on two different days
#'
#' Data for Exercise 7.44
#'
#'
#' @name Viscosit
#' @docType data
#' @format A data frame/tibble with 11 observations on two variables
#' \describe{
#' \item{first}{viscosity measurement for a certain substance on day one}
#' \item{second}{viscosity measurement for a certain substance on day two}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(Viscosit$first, Viscosit$second, col = "blue")
#' t.test(Viscosit$first, Viscosit$second, var.equal = TRUE)
#'
"Viscosit"
#' Visual acuity of a group of subjects tested under a specified dose of a drug
#'
#' Data for Exercise 5.6
#'
#'
#' @name Visual
#' @docType data
#' @format A data frame/tibble with 18 observations on one variable
#' \describe{
#' \item{visual}{visual acuity measurement}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' stem(Visual$visual)
#' boxplot(Visual$visual, col = "purple")
#'
"Visual"
#' Reading scores before and after vocabulary training for 14 employees who did
#' not complete high school
#'
#' Data for Exercise 7.80
#'
#'
#' @name Vocab
#' @docType data
#' @format A data frame/tibble with 14 observations on two variables
#' \describe{
#' \item{first}{reading test score before formal vocabulary training}
#' \item{second}{reading test score after formal vocabulary training}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' t.test(Pair(Vocab$first, Vocab$second) ~ 1)
#'
"Vocab"
#' Volume of injected waste water from Rocky Mountain Arsenal and number of
#' earthquakes near Denver
#'
#' Data for Exercise 9.18
#'
#'
#' @name Wastewat
#' @docType data
#' @format A data frame/tibble with 44 observations on two variables
#' \describe{
#' \item{gallons}{injected water (in million gallons)}
#' \item{number}{number of earthqueakes detected in Denver}
#' }
#'
#' @source Davis, J. C. (1986), \emph{Statistics and Data Analysis in Geology}, 2 ed., John Wiley and Sons,
#' New York, p. 228, and Bardwell, G. E. (1970), Some Statistical Features of the Relationship between
#' Rocky Mountain Arsenal Waste Disposal and Frequency of Earthquakes, \emph{Geological Society of America, Engineering
#' Geology Case Histories, 8}, 33-337.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(number ~ gallons, data = Wastewat)
#' model <- lm(number ~ gallons, data = Wastewat)
#' summary(model)
#' anova(model)
#' plot(model, which = 2)
#'
"Wastewat"
#' Weather casualties in 1994
#'
#' Data for Exercise 1.30
#'
#'
#' @name Weather94
#' @docType data
#' @format A data frame/tibble with 388 observations on one variable
#' \describe{
#' \item{type}{factor with levels \code{Extreme Temp}, \code{Flash Flood},
#' \code{Fog}, \code{High Wind}, \code{Hurricane}, \code{Lighting}, \code{Other},
#' \code{River Flood}, \code{Thunderstorm}, \code{Tornado}, and \code{Winter Weather}}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' T1 <- xtabs(~type, data = Weather94)
#' T1
#' par(mar = c(5.1 + 2, 4.1 - 1, 4.1 - 2, 2.1))
#' barplot(sort(T1, decreasing = TRUE), las = 2, col = rainbow(11))
#' par(mar = c(5.1, 4.1, 4.1, 2.1))
#' \dontrun{
#' library(ggplot2)
#' T2 <- as.data.frame(T1)
#' T2
#' ggplot2::ggplot(data =T2, aes(x = reorder(type, Freq), y = Freq)) +
#' geom_bar(stat = "identity", fill = "purple") +
#' theme_bw() +
#' theme(axis.text.x = element_text(angle = 55, vjust = 0.5)) +
#' labs(x = "", y = "count")
#' }
#'
"Weather94"
#' Price of a bushel of wheat versus the national weekly earnings of production
#' workers
#'
#' Data for Exercise 2.11
#'
#'
#' @name Wheat
#' @docType data
#' @format A data frame/tibble with 19 observations on three variables
#' \describe{
#' \item{year}{year}
#' \item{earnings}{national weekly earnings (in dollars) for production workers}
#' \item{price}{price for a bushel of wheat (in dollars)}
#' }
#'
#' @source \emph{The World Almanac and Book of Facts}, 2000.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' par(mfrow = c(1, 2))
#' plot(earnings ~ year, data = Wheat)
#' plot(price ~ year, data = Wheat)
#' par(mfrow = c(1, 1))
#'
"Wheat"
#' Direct current produced by different wind velocities
#'
#' Data for Exercise 9.34
#'
#'
#' @name Windmill
#' @docType data
#' @format A data frame/tibble with 25 observations on two variables
#' \describe{
#' \item{velocity}{wind velocity (miles per hour)}
#' \item{output}{power generated (DC volts)}
#' }
#'
#' @source Joglekar, et al. (1989), Lack of Fit Testing when Replicates Are Not Available,
#' \emph{The American Statistician, 43},(3), 135-143.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' summary(lm(output ~ velocity, data = Windmill))
#' anova(lm(output ~ velocity, data = Windmill))
#'
"Windmill"
#' Wind leakage for storm windows exposed to a 50 mph wind
#'
#' Data for Exercise 6.54
#'
#'
#' @name Window
#' @docType data
#' @format A data frame/tibble with nine observations on two variables
#' \describe{
#' \item{window}{window number}
#' \item{leakage}{percent leakage from a 50 mph wind}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' SIGN.test(Window$leakage, md = 0.125, alternative = "greater")
#'
"Window"
#' Baseball team wins versus seven independent variables for National league teams
#' in 1990
#'
#' Data for Exercise 9.23
#'
#'
#' @name Wins
#' @docType data
#' @format A data frame with 12 observations on nine variables
#' \describe{
#' \item{team}{name of team}
#' \item{wins}{number of wins}
#' \item{batavg}{batting average}
#' \item{rbi}{runs batted in}
#' \item{stole}{bases stole}
#' \item{strkout}{number of strikeots}
#' \item{caught}{number of times caught stealing}
#' \item{errors}{number of errors}
#' \item{era}{earned run average}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(wins ~ era, data = Wins)
#' \dontrun{
#' library(ggplot2)
#' ggplot2::ggplot(data = Wins, aes(x = era, y = wins)) +
#' geom_point() +
#' geom_smooth(method = "lm", se = FALSE) +
#' theme_bw()
#' }
#'
"Wins"
#' Strength tests of two types of wool fabric
#'
#' Data for Exercise 7.42
#'
#'
#' @name Wool
#' @docType data
#' @format A data frame/tibble with 20 observations on two variables
#' \describe{
#' \item{type}{type of wool (\code{Type I}, \code{Type 2})}
#' \item{strength}{strength of wool}
#' }
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' boxplot(strength ~ type, data = Wool, col = c("blue", "purple"))
#' t.test(strength ~ type, data = Wool, var.equal = TRUE)
#'
"Wool"
#' Monthly sunspot activity from 1974 to 2000
#'
#' Data for Exercise 2.7
#'
#'
#' @name Yearsunspot
#' @docType data
#' @format A data frame/tibble with 252 observations on two variables
#' \describe{
#' \item{number}{average number of sunspots}
#' \item{year}{date}
#' }
#'
#' @source NASA/Marshall Space Flight Center, Huntsville, AL 35812.
#'
#' @references Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
#' Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
#' @keywords datasets
#' @examples
#'
#' plot(number ~ year, data = Yearsunspot)
#'
"Yearsunspot"
#'
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/BSDA-package.R
|
#' Confidence Interval Simulation Program
#'
#' This program simulates random samples from which it constructs confidence
#' intervals for one of the parameters mean (Mu), variance (Sigma), or
#' proportion of successes (Pi).
#'
#' Default is to construct confidence intervals for the population mean.
#' Simulated confidence intervals for the population variance or population
#' proportion of successes are possible by selecting the appropriate value in
#' the type argument.
#'
#' @param samples the number of samples desired.
#' @param n the size of each sample.
#' @param mu if constructing confidence intervals for the population mean or
#' the population variance, mu is the population mean (i.e., type is one of
#' either \code{"Mean"}, or \code{"Var"}). If constructing confidence intervals
#' for the poulation proportion of successes, the value entered for mu
#' represents the population proportion of successes \code{(Pi)}, and as such,
#' must be a number between 0 and 1.
#' @param sigma the population standard deviation. \code{sigma} is not required
#' if confidence intervals are of type \code{"Pi"}.
#' @param conf.level confidence level for the graphed confidence intervals,
#' restricted to lie between zero and one.
#' @param type character string, one of \code{"Mean"}, \code{"Var"} or
#' \code{"Pi"}, or just the initial letter of each, indicating the type of
#' confidence interval simulation to perform.
#' @return Graph depicts simulated confidence intervals. The number of
#' confidence intervals that do not contain the parameter of interest are
#' counted and reported in the commands window.
#' @author Alan T. Arnholt
#' @keywords distribution
#' @examples
#'
#' CIsim(100, 30, 100, 10)
#' # Simulates 100 samples of size 30 from
#' # a normal distribution with mean 100
#' # and standard deviation 10. From the
#' # 100 simulated samples, 95% confidence
#' # intervals for the Mean are constructed
#' # and depicted in the graph.
#'
#' CIsim(100, 30, 100, 10, type="Var")
#' # Simulates 100 samples of size 30 from
#' # a normal distribution with mean 100
#' # and standard deviation 10. From the
#' # 100 simulated samples, 95% confidence
#' # intervals for the variance are constructed
#' # and depicted in the graph.
#'
#' CIsim(100, 50, .5, type="Pi", conf.level=.90)
#' # Simulates 100 samples of size 50 from
#' # a binomial distribution where the population
#' # proportion of successes is 0.5. From the
#' # 100 simulated samples, 90% confidence
#' # intervals for Pi are constructed
#' # and depicted in the graph.
#'
#' @export CIsim
CIsim <-
function(samples=100, n=30, mu=0, sigma=1, conf.level = 0.95, type ="Mean")
{
Adkblue <- "#0080FF"
Aorange <- "#FF4C0C"
alpha <-1-conf.level
CL<-conf.level*100
N <-samples
choices <- c("Mean", "Var", "Pi")
alt <- pmatch(type, choices)
type <- choices[alt]
if (length(type) > 1 || is.na(type))
stop("alternative must be one \"Mean\", \"Var\", \"Pi\"")
if (type == "Pi" && (mu <=0 |mu >= 1))
stop("Value for Pi (mu) must be between 0 and 1.")
if (N <= 0 || n <= 0)
stop("Number of random CIs (samples) and sample size (n) must both be at least 1")
if (!missing(conf.level) && (length(conf.level) != 1 || !is.finite(conf.level) || conf.level <= 0 || conf.level >= 1))
stop("'conf.level' must be a single number between 0 and 1")
if (sigma <= 0 && (type=="Var" || type=="Mean") )
stop("Variance must be a positive value")
if (type == "Mean")
{
junk <- rnorm(N*n, mu, sigma)
jmat <- matrix(junk, N, n)
xbar <- apply(jmat, 1, mean)
ll <- xbar - qnorm(1 - alpha/2)*sigma/sqrt(n)
ul <- xbar + qnorm(1 - alpha/2)*sigma/sqrt(n)
notin <- sum((ll > mu) + (ul < mu))
percentage <- round((notin/N) * 100,2)
plot(ll, type = "n", ylim = c(min(ll), max(ul)), xlab = " ", ylab = " ")
title(sub=bquote(paste("Note: ",.(percentage),"% of the random confidence intervals do not contain ", mu ,"=", .(mu))))
title(main=bquote(paste(.(N), " random ", .(CL), "% confidence intervals where ", mu, " = ", .(mu) )))
for(i in 1:N)
{
low<-ll[i];
high<-ul[i];
if(low < mu & high > mu)
{
segments(i,low,i,high)
}
else if(low > mu & high > mu )
{
segments(i,low,i,high, col=Aorange, lwd=5)
}
else
{
segments(i,low,i,high, col=Adkblue, lwd=5)
}
}
abline(h = mu)
cat(percentage,"% of the random confidence intervals do not contain Mu =", mu,".", "\n")
}
else if (type == "Var")
{
junk <- rnorm(N*n, mu, sigma)
jmat <- matrix(junk, N, n)
s2 <- apply(jmat, 1, var)
ll <- ((n - 1)*s2)/qchisq(1 - alpha/2, (n - 1))
ul <- ((n - 1)*s2)/qchisq(alpha/2, (n -1))
variance <- sigma^2
notin <- sum((ll > variance) + (ul < variance))
percentage <- round((notin/samples) * 100,2)
plot(ll, type = "n", ylim = c(min(ll), max(ul)), xlab = " ", ylab = " " )
title(sub=bquote(paste("Note: ",.(percentage),"% of the random confidence intervals do not contain ", sigma^2 ,"=", .(variance))))
title(main=bquote(paste(.(N), " random ", .(CL), "% confidence intervals where ", sigma^2, " = ", .(variance) )))
for(i in 1:N)
{
low<-ll[i]
high<-ul[i]
if(low < variance & high > variance)
{
segments(i,low,i,high)
}
else if( low > variance & high > variance )
{
segments(i,low,i,high, col=Aorange, lwd=5)
}
else
{
segments(i,low,i,high, col=Adkblue, lwd=5)
}
}
abline(h = variance)
cat(percentage,"% of the random confidence intervals do not contain Var =", sigma^2,".", "\n")
}
else if (type == "Pi")
{
X <- rbinom(samples, n, mu)
p <- X/n
ll <- p - qnorm(1 - alpha/2)*sqrt((p * (1 - p))/n)
ul <- p + qnorm(1 - alpha/2)*sqrt((p * (1 - p))/n)
notin <- sum((ll > mu) + (ul < mu) )
percentage <- round((notin/samples)*100,2)
plot(ll, type = "n", ylim = c(min(ll), max(ul)), xlab = " ", ylab = " " )
title(sub=bquote(paste("Note: ",.(percentage),"% of the random confidence intervals do not contain ",pi,"=",.(mu))))
title(main=bquote(paste(.(N), " random ", .(CL), "% confidence intervals where ", pi, "=", .(mu) )))
for(i in 1:N)
{
low<-ll[i]
high<-ul[i]
if( low < mu & high > mu)
{
segments(i,low,i,high)
}
else if( low > mu & high > mu )
{
segments(i,low,i,high, col=Aorange, lwd=5)
}
else
{
segments(i,low,i,high, col=Adkblue, lwd=5)
}
}
abline(h = mu)
cat(percentage,"% of the random confidence intervals do not contain Pi =", mu,".", "\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/CIsim.R
|
#' Combinations
#'
#' Computes all possible combinations of \code{n} objects taken \code{k} at a
#' time.
#'
#'
#' @param n a number.
#' @param k a number less than or equal to \code{n}.
#' @return Returns a matrix containing the possible combinations of \code{n}
#' objects taken \code{k} at a time.
#' @seealso \code{\link{SRS}}
#' @keywords distribution
#' @examples
#'
#' Combinations(5,2)
#' # The columns in the matrix list the values of the 10 possible
#' # combinations of 5 things taken 2 at a time.
#'
#' @export Combinations
Combinations <-
function(n, k){
# Compute all n choose k combinations of size k from 1:n
# Return matrix with k rows and choose(n,k) columns.
# Avoids recursion. Code provided by Tim Hesterberg
if(!is.numeric(n) || length(n) != 1 || n%%1) stop("'n' must be an integer")
if(!is.numeric(k) || length(k) != 1 || k%%1) stop("'k' must be an integer")
if(k > n || k <= 0) return(numeric(0))
rowMatrix <- function(n) structure(1:n, dim=c(1,n))
colMatrix <- function(n) structure(1:n, dim=c(n,1))
if(k == n) return(colMatrix(n))
if(k == 1) return(rowMatrix(n))
L <- vector("list", k)
# L[[j]] will contain combinations(N, j) for N = 2:n
L[[1]] <- rowMatrix(2)
L[[2]] <- colMatrix(2)
Diff <- n-k
for(N in seq(3, n, by=1)){
# loop over j in reverse order, to avoid overwriting
for(j in seq(min(k, N-1), max(2, N-Diff), by= -1))
L[[j]] <- cbind(L[[j]], rbind(L[[j-1]], N, deparse.level=1))
if(N <= Diff+1) L[[1]] <- rowMatrix(N)
else L[[N-(Diff+1)]] <- numeric(0)
if(N <= k) L[[N]] <- colMatrix(N)
}
L[[k]]
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/Combinations.R
|
#' Exploratory Data Anaalysis
#'
#' Function that produces a histogram, density plot, boxplot, and Q-Q plot.
#'
#' Will not return command window information on data sets containing more than
#' 5000 observations. It will however still produce graphical output for data
#' sets containing more than 5000 observations.
#'
#' @param x numeric vector. \code{NA}s and \code{Inf}s are allowed but will be
#' removed.
#' @param trim fraction (between 0 and 0.5, inclusive) of values to be trimmed
#' from each end of the ordered data. If \code{trim = 0.5}, the result is the
#' median.
#' @return Function returns various measures of center and location. The values
#' returned for the Quartiles are based on the definitions provided in
#' \cite{BSDA}. The boxplot is based on the Quartiles returned in the commands
#' window.
#' @note Requires package \pkg{e1071}.
#' @author Alan T. Arnholt
#' @keywords univar
#' @examples
#'
#' EDA(rnorm(100))
#' # Produces four graphs for the 100 randomly
#' # generated standard normal variates.
#'
#' @export EDA
EDA <-
function(x, trim = 0.05)
{
# require(e1071)
#rgb(0, 128/255, 1, names="Adkblue") #Alan's dark blue
#rgb(169/255, 226/255, 1, names="Altblue") #Alan's light blue
Altblue <- "#A9E2FF"
Adkblue <- "#0080FF"
Ared <- "#C51111"
varname <- deparse(substitute(x))
N <- length(x)
UM <- sum(is.na(x))
n <- N - UM
x <- x[!(is.na(x) > 0)]
LQ1 <- (n + 1)/4
LQ3 <- (3 * (n + 1))/4
Sort <- sort(x)
V1 <- floor(LQ1)
V2 <- floor(LQ3)
V3 <- V1 + 1
V4 <- V2 + 1
Q1 <- round(Sort[V1] + (LQ1 - V1) * (Sort[V3] - Sort[V1]), 3)
Q3 <- round(Sort[V2] + (LQ3 - V2) * (Sort[V4] - Sort[V2]), 3)
IQR <- round(Q3 - Q1, 3)
Min <- round(min(x), 3)
Max <- round(max(x), 3)
Stdev <- round(sd(x, na.rm = TRUE), 3)
Mean <- round(mean(x, na.rm = TRUE), 3)
Median <- round(median(x, na.rm = TRUE), 3)
TrMean <- round(mean(x, trim = trim), 3)
Var <- round(var(x, na.rm = TRUE), 3)
SE <- round(Stdev/sqrt(n), 3)
Range <- round(Max - Min, 3)
par(omi=c(0,1,.5,1))
par(mfrow = c(2, 2))
par(mar = c(1, 0, 2, 0))
par(pty = "s")
print(varname)
hist(x, probability = TRUE, col=Adkblue, xlab = "", ylab = "", axes = FALSE,
main = paste("Histogram of", varname) )
box()
iqd <- summary(x)[5] - summary(x)[2]
plot(density(x, width = 2 * iqd, na.rm = TRUE), xlab = "",
ylab = "", axes = FALSE, type = "n", main = paste("Density of",
varname))
lines(density(x, width = 2 * iqd, na.rm = TRUE), col=Ared)
box()
l.out <- x[x < (Q1 - 1.5 * IQR)]
r.out <- x[x > (Q3 + 1.5 * IQR)]
outliers <- c(l.out, r.out)
rest <- x[x > (Q1 - 1.5 * IQR) & x < (Q3 + 1.5 * IQR)]
Minrest <- min(rest)
Maxrest <- max(rest)
plot(x, x, main = paste("Boxplot of", varname), xlab =
"", ylab = "", axes = FALSE, type = "n", xlim = c(min(x), max(
x)), ylim = c(0, 1))
box()
polygon(c(Q1, Q1, Q3, Q3), c(0.3, 0.7, 0.7, 0.3), density = -1, col=Altblue)
points(outliers, c(rep(0.5, length(outliers))), col = Ared)
lines(c(min(rest), Q1), c(0.5, 0.5), lty = 1)
lines(c(Q3, max(rest)), c(0.5, 0.5), lty = 1)
lines(c(min(rest), min(rest)), c(0.4, 0.6))
lines(c(max(rest), max(rest)), c(0.4, 0.6))
lines(c(Q1, Q1), c(0.3, 0.7))
lines(c(Q3, Q3), c(0.3, 0.7))
lines(c(Median, Median), c(0.3, 0.7))
lines(c(Q1, Q3), c(0.3, 0.3))
lines(c(Q1, Q3), c(0.7, 0.7))
points(Mean, 0.5, pch = 16, col = "black")
qqnorm(x, col = "black", main = paste("Q-Q Plot of", varname), xlab = "", ylab = "", axes = FALSE)
qqline(x, col = Ared)
box()
mtext("EXPLORATORY DATA ANALYSIS", side = 3, outer = TRUE, cex = 1.5,
col = Adkblue, line = 1)
par(oma = c(0, 0, 0, 0))
par(mfrow = c(1, 1))
par(mar = c(5.1, 4.1, 4.1, 2.1))
par(omi=c(0,0,0,0))
par(pty = "m")
SW <- shapiro.test(x)
K <- round(kurtosis(x), 3)
S <- round(skewness(x), 3)
SWpval <- round(SW$p.value, 3)
TOT <- c(n, UM, Min, Q1, Mean, Median, TrMean, Q3, Max, Stdev, Var,
SE, IQR, Range, K, S, SWpval)
names(TOT) <- c("Size (n)", "Missing", "Minimum", " 1st Qu", " Mean",
" Median", "TrMean", " 3rd Qu", " Max.", " Stdev.",
" Var.", "SE Mean", " I.Q.R.", " Range", "Kurtosis",
"Skewness", "SW p-val")
return(TOT)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/EDA.R
|
#' Sign Test
#'
#' This function will test a hypothesis based on the sign test and reports
#' linearly interpolated confidence intervals for one sample problems.
#'
#' Computes a \dQuote{Dependent-samples Sign-Test} if both \code{x} and
#' \code{y} are provided. If only \code{x} is provided, computes the
#' \dQuote{Sign-Test}.
#'
#' @param x numeric vector; \code{NA}s and \code{Inf}s are allowed but will be
#' removed.
#' @param y optional numeric vector; \code{NA}s and \code{Inf}s are allowed but
#' will be removed.
#' @param md a single number representing the value of the population median
#' specified by the null hypothesis
#' @param alternative is a character string, one of \code{"greater"},
#' \code{"less"}, or \code{"two.sided"}, or the initial letter of each,
#' indicating the specification of the alternative hypothesis. For one-sample
#' tests, \code{alternative} refers to the true median of the parent population
#' in relation to the hypothesized value of the median.
#' @param conf.level confidence level for the returned confidence interval,
#' restricted to lie between zero and one
#' @param ... further arguments to be passed to or from methods
#' @return A list of class \code{htest_S}, containing the following components:
#' \item{statistic}{the S-statistic (the number of positive differences between
#' the data and the hypothesized median), with names attribute \dQuote{S}.}
#' \item{p.value}{the p-value for the test}
#' \item{conf.int}{is a confidence interval (vector of length 2) for the true
#' median based on linear interpolation. The confidence level is recorded in the attribute
#' \code{conf.level}. When the alternative is not \code{"two.sided"}, the
#' confidence interval will be half-infinite, to reflect the interpretation of
#' a confidence interval as the set of all values \code{k} for which one would
#' not reject the null hypothesis that the true mean or difference in means is
#' \code{k}. Here infinity will be represented by \code{Inf}.}
#' \item{estimate}{is avector of length 1, giving the sample median; this
#' estimates the corresponding population parameter. Component \code{estimate}
#' has a names attribute describing its elements.}
#' \item{null.value}{is the value of the median specified by the null hypothesis.
#' This equals the input argument \code{md}. Component \code{null.value} has a
#' names attribute describing its elements.}
#' \item{alternative}{records the value of the input argument alternative:
#' \code{"greater"}, \code{"less"}, or \code{"two.sided"}}
#' \item{data.name}{a character string (vector of length 1)
#' containing the actual name of the input vector \code{x}}
#' \item{Confidence.Intervals}{a 3 by 3 matrix containing the lower achieved
#' confidence interval, the interpolated confidence interval, and the upper
#' achived confidence interval}
#'
#' @note The reported confidence interval is based on linear interpolation. The
#' lower and upper confidence levels are exact.
#'
#' @section Null Hypothesis: For the one-sample sign-test, the null hypothesis
#' is that the median of the population from which \code{x} is drawn is
#' \code{md}. For the two-sample dependent case, the null hypothesis is that
#' the median for the differences of the populations from which \code{x} and
#' \code{y} are drawn is \code{md}. The alternative hypothesis indicates the
#' direction of divergence of the population median for \code{x} from \code{md}
#' (i.e., \code{"greater"}, \code{"less"}, \code{"two.sided"}.)
#' @author Alan T. Arnholt
#' @seealso \code{\link{z.test}}, \code{\link{zsum.test}},
#' \code{\link{tsum.test}}
#' @references Gibbons, J.D. and Chakraborti, S. (1992). \emph{Nonparametric
#' Statistical Inference}. Marcel Dekker Inc., New York.
#'
#' Kitchens, L.J.(2003). \emph{Basic Statistics and Data Analysis}. Duxbury.
#'
#' Conover, W. J. (1980). \emph{Practical Nonparametric Statistics, 2nd ed}.
#' Wiley, New York.
#'
#' Lehmann, E. L. (1975). \emph{Nonparametrics: Statistical Methods Based on
#' Ranks}. Holden and Day, San Francisco.
#'
#' @export
#'
#' @examples
#'
#' x <- c(7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8)
#' SIGN.test(x, md = 6.5)
#' # Computes two-sided sign-test for the null hypothesis
#' # that the population median for 'x' is 6.5. The alternative
#' # hypothesis is that the median is not 6.5. An interpolated 95%
#' # confidence interval for the population median will be computed.
#'
#' reaction <- c(14.3, 13.7, 15.4, 14.7, 12.4, 13.1, 9.2, 14.2,
#' 14.4, 15.8, 11.3, 15.0)
#' SIGN.test(reaction, md = 15, alternative = "less")
#' # Data from Example 6.11 page 330 of Kitchens BSDA.
#' # Computes one-sided sign-test for the null hypothesis
#' # that the population median is 15. The alternative
#' # hypothesis is that the median is less than 15.
#' # An interpolated upper 95% upper bound for the population
#' # median will be computed.
#'
#'
SIGN.test <- function(x, y = NULL, md = 0, alternative = "two.sided", conf.level = 0.95, ...){
if(is.null(class(x))){
class(x) <- data.class(x)
}
UseMethod("SIGN.test")
}
#' @export
SIGN.test.default <-
function(x, y = NULL, md = 0, alternative = "two.sided", conf.level = 0.95, ...)
{
choices <- c("two.sided", "greater", "less")
alt <- pmatch(alternative, choices)
alternative <- choices[alt]
if(length(alternative) > 1 || is.na(alternative))
stop("alternative must be one \"greater\", \"less\", \"two.sided\"")
if(!missing(md))
if(length(md) != 1 || is.na(md))
stop("median must be a single number")
if(!missing(conf.level))
if(length(conf.level) != 1 || is.na(conf.level) || conf.level < 0 || conf.level > 1)
stop("conf.level must be a number between 0 and 1")
if( is.null(y) )
{
# One-Sample Sign-Test Exact Test
dname <- paste(deparse(substitute(x)))
x <- sort(x)
diff <- (x - md)
n <- length(x)
nt <- length(x) - sum(diff == 0)
s <- sum(diff > 0)
estimate <- median(x)
method <- c("One-sample Sign-Test")
names(estimate) <- c("median of x")
names(md) <- "median"
names(s) <- "s"
CIS <- "Conf Intervals"
if(alternative == "less")
{
# zobs <- (s-0.5*n)/sqrt(n*0.25)
pval <- sum(dbinom(0:s, nt, 0.5))
# Note: Code uses linear interpolation to arrive at the confidence intervals.
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)][1]
if(k < 1)
{
conf.level <- (1 - (sum(dbinom(k, n, 0.5))))
xl <- -Inf
xu <- x[n]
ici <- c(xl, xu)
}
else
{
ci1 <- c(-Inf, x[n - k + 1])
acl1 <- (1 - (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(-Inf, x[n - k])
acl2 <- (1 - (sum(dbinom(0:k, n, 0.5))))
xl <- -Inf
xu <- (((x[n - k + 1] - x[n - k]) * (conf.level - acl2))/(acl1 - acl2)) + x[n - k]
ici <- c(xl, xu)
}
}
else if(alternative == "greater")
{
pval <- (1 - sum(dbinom(0:s - 1, nt, 0.5)))
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)][1]
if(k < 1)
{
conf.level <- (1 - (sum(dbinom(k, n, 0.5))))
xl <- x[1]
xu <- Inf
ici <- c(xl, xu)
}
else
{
ci1 <- c(x[k], Inf)
acl1 <- (1 - (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(x[k + 1], Inf)
acl2 <- (1 - (sum(dbinom(0:k, n, 0.5))))
xl <- (((x[k] - x[k + 1]) * (conf.level - acl2))/(acl1 - acl2)) + x[k + 1]
xu <- Inf
ici <- c(xl, xu)
}
}
else
{
p1 <- sum(dbinom(0:s, nt, 0.5))
p2 <- (1 - sum(dbinom(0:s - 1, nt, 0.5)))
pval <- min(2 * p1, 2 * p2, 1)
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)/2][1]
if(k < 1)
{
conf.level <- (1 - 2 * (sum(dbinom(k, n, 0.5))))
xl <- x[1]
xu <- x[n]
ici <- c(xl, xu)
}
else
{
ci1 <- c(x[k], x[n - k + 1])
acl1 <- (1 - 2 * (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(x[k + 1], x[n - k])
acl2 <- (1 - 2 * (sum(dbinom(0:k, n, 0.5))))
xl <- (((x[k] - x[k + 1]) * (conf.level - acl2))/(acl1 - acl2)) + x[k + 1]
xu <- (((x[n - k + 1] - x[n - k]) * (conf.level - acl2))/(acl1 - acl2)) + x[n - k]
ici <- c(xl, xu)
}
}
}
else
{
# Paired-Samples Sign Test
if(length(x)!=length(y))
stop("Length of x must equal length of y")
xy <- sort(x-y)
diff <- (xy - md)
n <- length(xy)
nt <- length(xy) - sum(diff == 0)
s <- sum(diff > 0)
dname <- paste(deparse(substitute(x)), " and ", deparse(substitute(y)), sep = "")
estimate <- median(xy)
method <- c("Dependent-samples Sign-Test")
names(estimate) <- c("median of x-y")
names(md) <- "median difference"
names(s) <- "S"
CIS <- "Conf Intervals"
if(alternative == "less")
{
pval <- sum(dbinom(0:s, nt, 0.5))
# Note: Code uses linear interpolation to arrive at the confidence intervals.
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)][1]
if(k < 1)
{
conf.level <- (1 - (sum(dbinom(k, n, 0.5))))
xl <- -Inf
xu <- xy[n]
ici <- c(xl, xu)
}
else
{
ci1 <- c(-Inf, xy[n - k + 1])
acl1 <- (1 - (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(-Inf, xy[n - k])
acl2 <- (1 - (sum(dbinom(0:k, n, 0.5))))
xl <- -Inf
xu <- (((xy[n - k + 1] - xy[n - k]) * (conf.level - acl2))/(acl1 - acl2)) + xy[n - k]
ici <- c(xl, xu)
}
}
else if(alternative == "greater")
{
pval <- (1 - sum(dbinom(0:s - 1, nt, 0.5)))
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)][1]
if(k < 1)
{
conf.level <- (1 - (sum(dbinom(k, n, 0.5))))
xl <- xy[1]
xu <- Inf
ici <- c(xl, xu)
}
else
{
ci1 <- c(xy[k], Inf)
acl1 <- (1 - (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(xy[k + 1], Inf)
acl2 <- (1 - (sum(dbinom(0:k, n, 0.5))))
xl <- (((xy[k] - xy[k + 1]) * (conf.level - acl2))/(acl1 - acl2)) + xy[k + 1]
xu <- Inf
ici <- c(xl, xu)
}
}
else
{
p1 <- sum(dbinom(0:s, nt, 0.5))
p2 <- (1 - sum(dbinom(0:s - 1, nt, 0.5)))
pval <- min(2 * p1, 2 * p2, 1)
loc <- c(0:n)
prov <- (dbinom(loc, n, 0.5))
k <- loc[cumsum(prov) > (1 - conf.level)/2][1]
if(k < 1)
{
conf.level <- (1 - 2 * (sum(dbinom(k, n, 0.5))))
xl <- xy[1]
xu <- xy[n]
ici <- c(xl, xu)
}
else
{
ci1 <- c(xy[k], xy[n - k + 1])
acl1 <- (1 - 2 * (sum(dbinom(0:k - 1, n, 0.5))))
ci2 <- c(xy[k + 1], xy[n - k])
acl2 <- (1 - 2 * (sum(dbinom(0:k, n, 0.5))))
xl <- (((xy[k] - xy[k + 1]) * (conf.level - acl2))/(acl1 - acl2)) + xy[k + 1]
xu <- (((xy[n - k + 1] - xy[n - k]) * (conf.level - acl2))/(acl1 - acl2)) + xy[n - k]
ici <- c(xl, xu)
}
}
}
if(k < 1)
{
cint <- ici
attr(cint, "conf.level") <- conf.level
rval <- structure(list(statistic = s, parameter = NULL, p.value = pval,
conf.int = cint, estimate = estimate, null.value = md,
alternative = alternative, method = method, data.name = dname,
conf.int=cint, Confidence.Intervals = NULL ))
class(rval) <- "htest_S"
rval
}
else
{
result1 <- c(acl2, ci2)
result2 <- c(conf.level, ici)
result3 <- c(acl1, ci1)
Confidence.Intervals <- round(as.matrix(rbind(result1, result2, result3)), 4)
cnames <- c("Conf.Level", "L.E.pt", "U.E.pt")
rnames <- c("Lower Achieved CI", "Interpolated CI", "Upper Achieved CI")
dimnames(Confidence.Intervals) <- list(rnames, cnames)
cint <- ici
attr(cint, "conf.level") <- conf.level
rval <- structure(list(statistic = s, parameter = NULL, p.value = pval,
conf.int = cint, estimate = estimate, null.value = md,
alternative = alternative, method = method, data.name = dname,
Confidence.Intervals = Confidence.Intervals))
class(rval) <- "htest_S"
rval
}
}
#' @export
print.htest_S <- function (x, digits = getOption("digits"), prefix = "\t", ...)
{
cat("\n")
cat(strwrap(x$method, prefix = prefix), sep = "\n")
cat("\n")
cat("data: ", x$data.name, "\n", sep = "")
out <- character()
if (!is.null(x$statistic))
out <- c(out, paste(names(x$statistic), "=", format(signif(x$statistic,
max(1L, digits - 2L)))))
if (!is.null(x$parameter))
out <- c(out, paste(names(x$parameter), "=", format(signif(x$parameter,
max(1L, digits - 2L)))))
if (!is.null(x$p.value)) {
fp <- format.pval(x$p.value, digits = max(1L, digits -
3L))
out <- c(out, paste("p-value", if (substr(fp, 1L, 1L) ==
"<") fp else paste("=", fp)))
}
cat(strwrap(paste(out, collapse = ", ")), sep = "\n")
if (!is.null(x$alternative)) {
cat("alternative hypothesis: ")
if (!is.null(x$null.value)) {
if (length(x$null.value) == 1L) {
alt.char <- switch(x$alternative, two.sided = "not equal to",
less = "less than", greater = "greater than")
cat("true ", names(x$null.value), " is ", alt.char,
" ", x$null.value, "\n", sep = "")
}
else {
cat(x$alternative, "\nnull values:\n", sep = "")
print(x$null.value, digits = digits, ...)
}
}
else cat(x$alternative, "\n", sep = "")
}
if (!is.null(x$conf.int)) {
cat(format(100 * attr(x$conf.int, "conf.level")), " percent confidence interval:\n",
" ", paste(format(c(x$conf.int[1L], x$conf.int[2L])),
collapse = " "), "\n", sep = "")
}
if (!is.null(x$estimate)) {
cat("sample estimates:\n")
print(x$estimate, digits = digits, ...)
}
if(!is.null(x$Confidence.Intervals)){
cat("\n")
cat("Achieved and Interpolated Confidence Intervals: \n\n")
print(x$Confidence.Intervals)
cat("\n")
}
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/SIGN.test.R
|
#' Simple Random Sampling
#'
#' Computes all possible samples from a given population using simple random
#' sampling.
#'
#'
#' @param POPvalues vector containing the poulation values.
#' @param n the sample size.
#' @return Returns a matrix containing the possible simple random samples of
#' size \code{n} taken from a population \code{POPvalues}.
#' @author Alan T. Arnholt
#' @seealso \code{\link{Combinations}}
#' @keywords distribution
#' @examples
#'
#' SRS(c(5,8,3),2)
#' # The rows in the matrix list the values for the 3 possible
#' # simple random samples of size 2 from the population of 5,8, and 3.
#'
#' @export SRS
SRS <-
function(POPvalues,n)
{
# SRS generates all possible SRS's of size n
# from the population in vector POPvalues
# by calling the function Combinations.
N <- length(POPvalues)
store <- t(Combinations(N,n))
matrix(POPvalues[t(store)],nrow=nrow(store),byrow=TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/SRS.R
|
#' Normal Area
#'
#' Function that computes and draws the area between two user specified values
#' in a user specified normal distribution with a given mean and standard
#' deviation
#'
#'
#' @param lower the lower value
#' @param upper the upper value
#' @param m the mean for the population
#' @param sig the standard deviation of the population
#' @author Alan T. Arnholt
#' @keywords distribution
#' @examples
#'
#' normarea(70, 130, 100, 15)
#' # Finds and P(70 < X < 130) given X is N(100,15).
#'
#' @export normarea
normarea <-
function (lower = -Inf, upper = Inf, m, sig)
{
Altblue <- "#CDCDED"
Fontcol <-"#3333B3"
par(mar=c(4,1,4,1))
area <- pnorm(upper, m, sig) - pnorm(lower, m, sig)
ra <- round(area,4)
x <- seq(m - 4 * sig, m + 4 * sig, length = 1000)
y <- dnorm(x, m, sig)
par(pty = "m")
plot(x, y, type = "n", xaxt = "n", yaxt = "n", xlab = "",
ylab = "",main="")
mtext(substitute(paste("The area between ",lower," and ",upper," is ",ra)),
side=3,line=1,font=2,cex=1.15)
mtext(substitute(paste("X~Normal (" ,mu==m,", ",sigma==sig,")" ),
list(m=m,sig=sig)),side=1,line=3,col=Fontcol)
if (lower == -Inf || lower < m - 4 * sig) {
lower <- m - 4 * sig
}
if (upper == Inf || upper > m + 4 * sig) {
upper <- m + 4 * sig
}
axis(1, at = c(m, lower, upper), labels = c(m, lower, upper))
xaxis1 <- seq(lower, upper, length = 200)
yaxis1 <- dnorm(xaxis1, m, sig)
xaxis1 <- c(lower, xaxis1, upper)
yaxis1 <- c(0, yaxis1, 0)
polygon(xaxis1, yaxis1, density = -1, col = Altblue)
lines(x, y, lwd = 2)
lines(c(m - 4 * sig, m + 4 * sig), c(0, 0), lwd = 2)
lines(c(lower, lower), c(0, dnorm(lower, m, sig)), lwd = 2)
lines(c(upper, upper), c(0, dnorm(upper, m, sig)), lwd = 2)
par(mar=c(5.1,4.1,4.1,2.1))
}
|
/scratch/gouwar.j/cran-all/cranData/BSDA/R/normarea.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.