content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Search for Sidra Series
#'
#' Searches the Sidra databases for a series by its description or a given table descriptions.
#'
#' @param x Either a character or a numeric. If character, function searches the Sidra metadata. If a numeric argument is provided the descriptions of the given table are seached .
#' @param len A \code{}.
#' @param nova_req A \code{}.
#' @param from A \code{}.
#' @param to A \code{}.
#' @param inputs A \code{}.
#' @param territory A \code{}.
#' @param variable A \code{}.
#' @param header A \code{}.
#' @param sections A \code{}.
#'
#'
#' @import xml2 rvest stringr
#' @importFrom lubridate month
#' @importFrom httr GET content
sidra.aux <- function(x, len, nova_req, from, to, inputs, territory, variable, header, sections) {
tabela <- xml2::read_html(paste0("http://api.sidra.ibge.gov.br/desctabapi.aspx?c=", x))
tabela <- rvest::html_text(tabela)
d = strsplit(tabela, split = "/P/")
d = strsplit(d[[1]][2], split = ":")
d = trimws(d[[1]][1])
if (stringr::str_count(d, "Ano") == 1){
minus = to - from
minus = floor(minus/ 3)
for(i in len){
tabela = data.frame()
header2 = NULL
for(j in seq(from,to, by=minus)){
tabela1=httr::GET(paste0("http://api.sidra.ibge.gov.br/values/",
"t/", inputs[i], "/", territory, "/", "p/",
from, "-", to,
"/v/", variable[i], "/f/", "u", "/h/", header,
sections[[i]]))
tabela1 = base::rawToChar(httr::content(tabela1,'raw'))
t1 = paste("tabela", x, sep="_")
tabela1 = rjson::fromJSON(tabela1)
tabela1 = as.data.frame(do.call("rbind", tabela1))
if(is.null(header2)){header2 = tabela1[1,]}
tabela = as.data.frame(do.call("rbind", list(tabela, tabela1[2:nrow(tabela1),])))
}
}
colnames(tabela) <- unlist(header2)
} else if (stringr::str_count(d, "M\u00EAs") == 1){
# month(to2) <- month(to2) - month(from2)
for(i in len){
tabela = data.frame()
header2 = NULL
from2 = paste0(substr(from,1,4), "-", substr(from,5,6), "-01")
to2 = paste0(substr(to,1,4), "-", substr(to,5,6), "-01")
from2 = as.Date(from2)
to2 = as.Date(to2)
dif_mes = as.numeric(floor((to2 - from2)/365*12/nova_req))
lubridate::month(from2) = dif_mes + lubridate::month(from2)
init = paste0(substr(from,1,4),substr(from,5,6))
fin = paste0(substr(from2,1,4),substr(from2,6,7))
for(j in 1:nova_req){
tabela1=httr::GET(paste0("http://api.sidra.ibge.gov.br/values/",
"t/", inputs[i], "/", territory, "/", "p/",
from, "-", to,
"/v/", variable[i], "/f/", "u", "/h/", header,
sections[[i]]))
tabela1 = base::rawToChar(httr::content(tabela1,'raw'))
init = paste0(sum(as.numeric(substr(from2,1,4)),1),substr(from2,6,7))
lubridate::month(from2) = dif_mes + lubridate::month(from2)
fin = paste0(substr(from2,1,4),substr(from2,6,7))
t1 = paste("tabela", x, sep="_")
tabela1 = rjson::fromJSON(tabela1)
tabela1 = as.data.frame(do.call("rbind", tabela1))
if(is.null(header2)){header2 = tabela1[1,]}
tabela = as.data.frame(do.call("rbind", list(tabela, tabela1[2:nrow(tabela1),])))
}
}
colnames(tabela) <- unlist(header2)
} else if(stringr::str_count(d, "Trimestre") == 1){
for(i in len){
tabela = data.frame()
header2 = NULL
from2 = paste0(substr(from,1,4), "-", substr(from,5,6), "-01")
to2 = paste0(substr(to,1,4), "-", substr(to,5,6), "-01")
from2 = as.yearqtr(from2)
to2 = as.yearqtr(to2)
dif_mes = as.numeric(floor((to2 - from2)/nova_req))
from2 = dif_mes + from2
init = paste0(substr(from,1,4),substr(from,5,6))
fin = paste0(substr(from2,1,4),"0",substr(from2,7,7))
for(j in 1:nova_req){
tabela1=httr::GET(paste0("http://api.sidra.ibge.gov.br/values/",
"t/", inputs[i], "/", territory, "/", "p/",
from, "-", to,
"/v/", variable[i], "/f/", "u", "/h/", header,
sections[[i]]))
tabela1 = base::rawToChar(httr::content(tabela1,'raw'))
init = paste0(sum(as.numeric(substr(from2,1,4)),1),"0",substr(from2,7,7))
from2 = dif_mes + from2
fin = paste0(substr(from2,1,4),"0",substr(from2,7,7))
t1 = paste("tabela", x, sep="_")
tabela1 = rjson::fromJSON(tabela1)
tabela1 = as.data.frame(do.call("rbind", tabela1))
if(is.null(header2)){header2 = tabela1[1,]}
tabela = as.data.frame(do.call("rbind", list(tabela, tabela1[2:nrow(tabela1),])))
}
}
colnames(tabela) <- unlist(header2)
}
rm(tabela1)
# id2 = which(colnames(tabela)== "D4N")
id = which(colnames(tabela)=="V" | colnames(tabela)=="Valor")
id3 = which(colnames(tabela) == "M\u00EAs" | colnames(tabela) == "Ano" |
colnames(tabela) == "Trimestre")
if ( colnames(tabela[id3]) == "M\u00EAs" & length(tabela[[id3]]) > 1){
tabela$mes <- sapply(tabela["M\u00EAs"],
FUN = function(x){substr(x,1,(nchar(x)-5))})
tabela$ano <- sapply(tabela["M\u00EAs"],
FUN = function(x){substr(x,(nchar(x)-3), nchar(x))})
tabela$mes[tabela$mes == "janeiro"] <- "01"
tabela$mes[tabela$mes == "fevereiro"] <- "02"
tabela$mes[tabela$mes == "mar\u00E7o"] <- "03"
tabela$mes[tabela$mes == "abril"] <- "04"
tabela$mes[tabela$mes == "maio"] <- "05"
tabela$mes[tabela$mes == "junho"] <- "06"
tabela$mes[tabela$mes == "julho"] <- "07"
tabela$mes[tabela$mes == "agosto"] <- "08"
tabela$mes[tabela$mes == "setembro"] <- "09"
tabela$mes[tabela$mes == "outubro"] <- "10"
tabela$mes[tabela$mes == "novembro"] <- "11"
tabela$mes[tabela$mes == "dezembro"] <- "12"
tabela$mes_ano <- base::paste0(tabela$ano, "-",tabela$mes, "-01")
tabela$mes_ano <- base::as.Date(tabela$mes_ano)
tabela["M\u00EAs"] <- tabela$mes_ano
tabela <- tabela[,1:(length(tabela)-3)]
colnames(tabela)[id3] <- "Data"
}
if(colnames(tabela[id3]) == "Ano" & length(tabela[[id3]]) > 1){
tabela$Ano <- base::paste0(tabela$Ano, "-01-01")
tabela$Ano <- base::as.Date(tabela$Ano)
colnames(tabela)[id3] <- "Data"
}
if(colnames(tabela[id3]) == "Trimestre" & length(tabela[[id3]]) > 1){
tabela$trimestre <- sapply(tabela["Trimestre"],
FUN = function(x){substr(x,1,1)})
tabela$ano <- sapply(tabela["Trimestre"],
FUN = function(x){substr(x,(nchar(x)-3), nchar(x))})
tabela$tri_ano <- base::paste0(tabela$ano, "-0",tabela$trimestre)
tabela$tri_ano <- zoo::as.yearqtr(tabela$tri_ano)
tabela["Trimestre"] <- tabela$tri_ano
tabela <- tabela[,1:(length(tabela)-3)]
colnames(tabela)[id3] <- "Data"
}
#Transformando a coluna V em valor
valor = NULL
id = which(colnames(tabela)=="V" | colnames(tabela)=="Valor")
tabela[,id] = suppressWarnings(ifelse(unlist(tabela[,id])!="..",
as.numeric(unlist(tabela[,id])),NA))
return(tabela)
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/sidra.aux.R
|
#' A function to extract Sidra series using their API
#'
#' The different parameters define the table and its dimensions (periods, variables, territorial units and classification) to be consulted. The parameters that define the sections may vary from table to table. Henceforth, the Sidra function ranges between 5 mandatory arguments to 7. You can only choose one variable per series per request, but multiple sections within the variable.
#' @param x Sidra series number.
#' @param from A string or character vector specifying where the series shall start
#' @param to A string or character vector specifying where the series shall end
#' @param territory Specifies the desired territorial levels.
#' @param variable An integer describing what variable characteristics are to be returned.
#' Defaults to all available.
#' @param cl A vector containing the classification codes in a vector.
#' @param sections A vector or a list of vectors if there are two or more classification
#' codes containing the desired tables from the classification.
#' @keywords sidra
#' @export
#' @import rjson zoo
#' @importFrom httr GET content
#' @examples
#' \dontrun{sidra = sidraGet(x = c(1612), from = 1990, to = 2015, territory = "brazil", variable =109)
#' sidra = sidraGet(x = c(3653), from = c("200201"),
#' to = c("201703"), territory = "brazil",
#' variable = 3135, sections = c(129316,129330), cl = 544)
#' sidra = sidraGet(x = c(3653), from = c("200201"),
#' to = c("201512"), territory = "brazil", variable = 3135,
#' sections = "all", cl = 544)
#' sidra = sidraGet(x = c(1618), from = c("201703"), to = c("201703"),
#' territory = "brazil",
#' variable = 109, sections=list(c(39427), c(39437,39441)), cl = c(49, 48))
#' trim - x = 1620; from = 199001; to = 201701; territory = "brazil";
#' sections = list(c(90687)); cl =c(11255); variable = 583
#' sidra = sidraGet(x = 1620, from = 199001, to = 201701,
#' territory = "brazil",
#' sections=list(c(90687)), cl =c(11255), variable = 583)}
# x = c(5932); from = 199601; to = 201701; territory = "brazil"; variable =6564; cl = c(11255); sections = c("all")
# x = c(1612); from = 1990; to = 2015; territory = "n6/all"; variable =109; cl = NULL; sections = NULL
sidraGet <- function(x, from, to, territory = c(n1 = "brazil", n2 = "region", n3 = "state",
n6 = "city", n8 = "mesoregion", n9 = "microregion",
n129 = "citizenship", n132 = "semiarid",
n133 = "semiaridUF"),
variable, cl = NULL, sections = NULL){
# browser()
if (missing(x)){
stop("Need to specify one serie.")
} else if (length(x) > 1){stop("Argument x must have length one.")}
x = as.character(x)
if (missing(from)){
stop("Need to specify 'from' parameter.")}
if (missing(to)){
stop("Need to specify 'to' parameter.")}
if (missing(variable)){
stop("Need to specify one variable.")
} else if (length(x) > 1){stop("Argument x must have length one.")}
# Território
territory <- base::match.arg(territory)
territory <- base::switch(territory,
brazil = "n1/all",
region = "n2/all",
state = "n3/all",
city = "n6/all",
mesoregion = "n8/all",
microregion = "n9/all",
citizenship = "n129/all",
semiarid = "n132/all",
semiaridUF = "n133/all")
header = "y"
if (length(cl) > 1){
t1=list()
for (i in 1:length(cl)){
t1[i] = paste0("/c", paste0(cl[i], collapse = ","))
}
t2 = NULL
for (i in 1: length(sections)){
t2[i] = paste0(t1[i], "/", paste0(sections[[i]], collapse = ","))
}
sections = paste0(t2, collapse = "")
}
if (! is.null(sections) & length(cl) == 1){
sections = unlist(sections)
sections = c(cl,sections)
sections = list(sections)
for (i in seq_along(sections)){
sections[i] = paste0("/c", sections[[i]][1], "/",
paste0(sections[[i]][2:length(sections[[i]])],
collapse = ","))
}
}
sections = c(sections, rep('', (length(x)+1)-length(sections)))
inputs = as.character(x)
len = seq_along(inputs)
serie = mapply(paste0, "serie_", inputs, USE.NAMES = FALSE)
for (i in len){
tabela=httr::GET(paste0("http://api.sidra.ibge.gov.br/values/",
"t/", inputs[i], "/", territory, "/", "p/",
from, "-", to,
"/v/", variable[i], "/f/", "u", "/h/", header,
sections[[i]]))
tabela = base::rawToChar(httr::content(tabela,'raw'))
if (strsplit(tabela, " ")[[1]][1] == "Par\uE2metro") {
stop("The parameters 'from', 'to' or both are misspecified")
} else if (strsplit(tabela, " ")[[1]][1] == "Tabela" &
strsplit(tabela, " ")[[1]][3] == "Tabela"){
param = strsplit(tabela, " ")[[1]][2]
param = substr(param, 1, nchar(param)-1)
warning(sprintf("The table %s does not contain public data", param))
} else if (strsplit(tabela, " ")[[1]][1] == "Quantidade"){
nova_req <- ceiling(as.numeric(strsplit(tabela, " ")[[1]][5]) /
as.numeric(strsplit(tabela, " ")[[1]][9])) + 1
tabela <- sidra.aux(x, len, nova_req, from, to, inputs, territory, variable, header, sections)
} else{
t1 = paste("tabela", x, sep="_")
tabela = rjson::fromJSON(tabela)
tabela = as.data.frame(do.call("rbind", tabela))
tabela2 = tabela
colnames(tabela) = unlist(tabela[1,])
tabela = tabela[-1,]
id = which(colnames(tabela)=="V" | colnames(tabela)=="Valor")
id2 = which(colnames(tabela2)== "D4N")
id3 = which(colnames(tabela) == "M\u00EAs" | colnames(tabela) == "Ano" |
colnames(tabela) == "Trimestre")
if ( colnames(tabela[id3]) == "M\u00EAs" & length(tabela[[id3]]) > 1){
tabela$mes <- sapply(tabela["M\u00EAs"],
FUN = function(x){substr(x,1,(nchar(x)-5))})
tabela$ano <- sapply(tabela["M\u00EAs"],
FUN = function(x){substr(x,(nchar(x)-3), nchar(x))})
tabela$mes[tabela$mes == "janeiro"] <- "01"
tabela$mes[tabela$mes == "fevereiro"] <- "02"
tabela$mes[tabela$mes == "mar\u00E7o"] <- "03"
tabela$mes[tabela$mes == "abril"] <- "04"
tabela$mes[tabela$mes == "maio"] <- "05"
tabela$mes[tabela$mes == "junho"] <- "06"
tabela$mes[tabela$mes == "julho"] <- "07"
tabela$mes[tabela$mes == "agosto"] <- "08"
tabela$mes[tabela$mes == "setembro"] <- "09"
tabela$mes[tabela$mes == "outubro"] <- "10"
tabela$mes[tabela$mes == "novembro"] <- "11"
tabela$mes[tabela$mes == "dezembro"] <- "12"
tabela$mes_ano <- base::paste0(tabela$ano, "-",tabela$mes, "-01")
tabela$mes_ano <- base::as.Date(tabela$mes_ano)
tabela["M\u00EAs"] <- tabela$mes_ano
tabela <- tabela[,1:(length(tabela)-3)]
colnames(tabela)[id3] <- "Data"
}
if(colnames(tabela[id3]) == "Ano" & length(tabela[[id3]]) > 1){
tabela$Ano <- base::paste0(tabela$Ano, "-01-01")
tabela$Ano <- base::as.Date(tabela$Ano)
colnames(tabela)[id3] <- "Data"
}
if(colnames(tabela[id3]) == "Trimestre" & length(tabela[[id3]]) > 1){
tabela$trimestre <- sapply(tabela["Trimestre"],
FUN = function(x){substr(x,1,1)})
tabela$ano <- sapply(tabela["Trimestre"],
FUN = function(x){substr(x,(nchar(x)-3), nchar(x))})
tabela$tri_ano <- base::paste0(tabela$ano, "-0",tabela$trimestre)
tabela$tri_ano <- zoo::as.yearqtr(tabela$tri_ano)
tabela["Trimestre"] <- tabela$tri_ano
tabela <- tabela[,1:(length(tabela)-3)]
colnames(tabela)[id3] <- "Data"
}
#Transformando a coluna V em valor
valor = NULL
id = which(colnames(tabela)=="V" | colnames(tabela)=="Valor")
tabela[,id] = suppressWarnings(ifelse(unlist(tabela[,id])!="..",
as.numeric(unlist(tabela[,id])),NA))
rm(tabela2)
}
assign(serie[i],tabela)
rm(tabela)
}
lista = list()
ls_df = ls()[grepl('data.frame', sapply(ls(), function(x) class(get(x))))]
for ( obj in ls_df ) { lista[obj]=list(get(obj)) }
return(lista)
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/sidraGet.R
|
#' Search for Sidra Series
#'
#' Searches the Sidra databases for a series by its description or a given table descriptions.
#'
#' @param description A \code{character} argument. Function searches the Sidra metadata and prints results in a window.
#' @param code A numeric argument must be provided. The descriptions of the given table are returned.
#' @param view A \code{boolean}. The default is \code{TRUE}. If set to \code{FALSE}, the results are NOT going to be shown.
#' @param browse A \code{boolean}. If browse is set to \code{TRUE}, the description table opens in your browser for better visualization.
#' @examples
#' \dontrun{
#' sidraSearch(description = "pib")
#' sidraSearch(code = 1248)
#' }
#' @keywords sidra IBGE
#' @importFrom utils View
#' @importFrom htmltools html_print
#' @importFrom stringr str_split
#' @import xml2 rvest stringr RMySQL DBI
#' @export
#'
#'
#'
sidraSearch <- function(description = NULL, code, view = TRUE, browse = FALSE) {
conn = connection()
tb = "metadata_pt"
# description = description
# browser()
if(is.null(description) & missing(code)){
invisible(dbDisconnect(conn))
return(msg("No search parameters. Please set the values of one or more parameters."))
}
if(!is.null(description) & !missing(code)){
invisible(dbDisconnect(conn))
return(msg("You must input a description OR a code, not both."))
}
if (!is.null(description)){
if (is.numeric(description)){
code = description; rm(description)
}
}
# browser()
if (!is.null(description) && missing(code)) {
if(description == "*" && missing(code)){
query <- paste0("select * from ", tb, " where source = 'Sidra'")
}
params = vector(mode = "character")
## Break description parameters
and_params = vector(mode = "character")
or_params = vector(mode = "character")
# Workaround
description = paste0(description, " ")
# Do not match whole expressions
exprs = regmatches(description,gregexpr("~ ?'(.*?)'",description))[[1]]
if(length(exprs) != 0){
for(i in 1:length(exprs)){
description = gsub(exprs[i], "", description)
exprs[i] = gsub("~", "", exprs[i])
exprs[i] = gsub("'", "", exprs[i])
exprs[i] = trimws(exprs[i])
and_params = c(and_params, paste0("description not like " ,"\'%", exprs[i] ,"%\'"))
}
}
# Match whole expressions
exprs = regmatches(description,gregexpr("'(.*?)'",description))[[1]]
if(length(exprs) != 0){
for(i in 1:length(exprs)){
description = gsub(exprs[i], "", description)
exprs[i] = gsub("'", "", exprs[i])
exprs[i] = trimws(exprs[i])
or_params = c(or_params, paste0("description like " ,"\'%", exprs[i] ,"%\'"))
}
}
# Do not match words
words = regmatches(description,gregexpr("~ ?(.*?) ",description))[[1]]
if(length(words) != 0){
for(i in 1:length(words)){
description = gsub(words[i], "", description)
words[i] = gsub("~", "", words[i])
words[i] = trimws(words[i])
and_params = c(and_params, paste0("description not like " ,"\'%", words[i] ,"%\'"))
}
}
# Match words
words = str_split(description, " ")[[1]]
words = words[words != ""]
if(length(words) != 0){
for(i in 1:length(words)){
or_params = c(or_params, paste0("description like " ,"\'%", words[i] ,"%\'"))
}
}
if(length(and_params) > length(or_params)){
desc = and_params[1]
and_params = and_params[-1]
} else {
desc = or_params[1]
or_params = or_params[-1]
}
if(length(or_params) != 0){
for(i in 1:length(or_params)){
desc = paste(desc, "and", or_params[i])
}
}
if(length(and_params) != 0){
for(i in 1:length(and_params)){
desc = paste(desc, "and", and_params[i])
}
}
params = c(params, desc)
query = paste0("select * from ", tb, " where source = 'Sidra' and")
query = paste(query, params[1])
if(length(params) != 1) {
for(i in 2:length(params)){
query = paste(query, "and", params[i])
}
}
results = dbGetQuery(conn, query)
results$description = iconv(results$description, from = "UTF-8")
results$unit = iconv(results$unit, from = "UTF-8")
results$code = str_replace(results$code, "Sidra_", "")
count = dbGetQuery(conn,paste0("select count(source) from ", tb, " where source = 'Sidra'"))
invisible(dbDisconnect(conn))
if(nrow(results) > 0){
msg(paste("Found", nrow(results),"out of", count,"series.",sep=" "))
msg("If you have found the series you want, you can input its number in this function to get the metadata.")
if(view==T){
return(View(results,"Metadata"))
}
else{
return(results)
}
}
else{
description <- stringr::str_replace_all(description, " ", "%20")
tabela <- xml2::read_html(paste0("https://sidra.ibge.gov.br/Busca?q=", description))
tabela <- rvest::html_nodes(tabela,".busca-link-tabela")
tabela <- rvest::html_text(tabela)
generic = function(x){
aux2 = data.frame(
tabela = character(),
info = character(),
description = character()
)
for(i in 1:length(x)){
aux = str_split(x[i], pattern = "-")
if(length(aux[[1]])!=3){
aux[[1]][3] = NA
x[i] = paste(aux[[1]],collapse ="-")
}
aux2 = rbind(aux2,aux)
}
data = data.frame(do.call('rbind', strsplit(as.character(x),'-',fixed=FALSE)))
names(data) = c("tabela","info", "description")
options(warn=-1)
return(data)
}
data = generic(x = tabela)
if(nrow(data) == 0){
msg("No series found. Try using another combination of search terms.")
}else{
return(data)
}
}
}
if(!missing(code)){
tabela <- xml2::read_html(paste0("http://api.sidra.ibge.gov.br/desctabapi.aspx?c=", code))
tabela <- rvest::html_text(tabela)
d = strsplit(tabela, split = "\r\n")
d = trimws(d[[1]])
d2 = c()
for ( i in seq_along(d)){
if(d[i] != ""){
d2 = c(d2,d[i])
}
}
d3 = paste(d2[10:length(d2)], collapse = "\n")
if(browse != FALSE){
shell.exec(paste0("http://api.sidra.ibge.gov.br/desctabapi.aspx?c=", code))
} else{
# utils::View(d3)
return(writeLines(d3))
}
}
}
# if(is.character(description) & missing(code)){
#
# description <- stringr::str_replace_all(description, " ", "%20")
#
# tabela <- xml2::read_html(paste0("https://sidra.ibge.gov.br/Busca?q=", description))
#
# tabela <- rvest::html_nodes(tabela,".busca-link-tabela")
# tabela <- rvest::html_text(tabela)
#
#
# tabela <- stringr::str_replace(tabela, "Tabela ", "")
# tabela <- stringr::str_split(tabela, "-", n = 2)
# tabela <- matrix(trimws(unlist(tabela)), ncol = 2, byrow = TRUE)
#
# colnames(tabela) <- c("code", "description")
# msg(paste("Found", nrow(tabela), "results."))
# utils::View(tabela)
#
#
# # return(writeLines(tabela))
# } else if (is.numeric(code)){
#
#
# tabela <- xml2::read_html(paste0("http://api.sidra.ibge.gov.br/desctabapi.aspx?c=", code))
# tabela <- rvest::html_text(tabela)
#
#
#
#
#
# d = strsplit(tabela, split = "\r\n")
# d = trimws(d[[1]])
# d2 = c()
#
# for ( i in seq_along(d)){
#
# if(d[i] != ""){
#
# d2 = c(d2,d[i])
#
# }
#
# }
#
# d3 = paste(d2[10:length(d2)], collapse = "\n")
#
#
#
#
#
# if(browse != FALSE){
#
# shell.exec(paste0("http://api.sidra.ibge.gov.br/desctabapi.aspx?c=", code))
# } else{
#
# # utils::View(d3)
# return(writeLines(d3))
#
# }
#
#
# } else{ stop("Either 'description' or 'code' must be provided as input.") }
# }
# library(htmltools); View(html_print(pre(paste0(capture.output(print(mtcars)), collapse="\n"))))
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/sidraSearch.R
|
#' @title Plot standardized residuals
#'
#' @description Uses a model object to create a plot of standardized residuals. This model can be an \link[forecast]{Arima} or an \link[stats]{arima}. In a near future, this function will also accept objects returned by \link[BETS]{grnn.train}.
#'
#' @param model An \link[forecast]{Arima} or an \link[stats]{arima} object. The model.
#' @param alpha A \code{numeric} between 0 and 1. The significance level.
#'
#' @return Besides showing the plot, this function returns a \code{numeric vector} containing the standardized residuals.
#'
#' @importFrom stats sd qnorm
#' @importFrom graphics abline
#' @author Talitha Speranza \email{[email protected]}
#'
#' @export
std_resid = function(model, alpha = 0.05){
resid <- resid(model)
rp <- (resid - mean(resid))/sd(resid)
plot(rp, col = "royalblue", ylim = c(-0.5 + min(rp),0.5 + max(rp)), ylab = "Standard Residuals")
abline(h = c(-qnorm(1 - alpha/2),qnorm(1- alpha/2)), col = "gray", lty = 2)
return(rp)
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/std_resid.R
|
#' @title Test the significance of the parameters of an ARIMA model
#'
#' @description Performs the t test on every parameter of an ARIMA model. This model can be an \link[forecast]{Arima} or an \link[stats]{arima}.
#'
#' @param model An \link[forecast]{Arima} or an \link[stats]{arima} object. The model for which the parameters must be tested.
#' @param nx An \code{integer}. The number of exogenous variables
#' @param alpha A \code{numeric} value between 0 and 1. The significance level.
#'
#'
#' @examples
#' require(forecast)
#' data("AirPassengers")
#' fit.air<- Arima(AirPassengers,order = c(1,1,1), seasonal = c(1,1,1), method ="ML",lambda=0)
#' summary(fit.air)
#'
#' # Significance test for the model SARIMA(1,1,1)(1,1,1)[12]
#' t_test(model = fit.air)
#'
#'
#' @return A \code{data.frame} containing the standard erros, the t-statistic, the critical values and whether the null hypothesis should be rejected or not, for each model parameter.
#' @importFrom stats qt
#' @author Talitha Speranza \email{[email protected]}, Daiane Marcolino \email{[email protected]}
#'
#' @export
t_test <- function(model, nx = 0, alpha = 0.05){
coef <- model$coef
se <- sqrt(diag(model$var.coef))
t <- abs(coef/se)
crit = qt(1 - alpha/2, length(model$x) - sum(model$arma[c(1,2,3,4,6,7)]) - nx)
ok <- t > crit
resul <- data.frame(Coeffs = coef, Std.Errors = se, t = t, Crit.Values = crit, Rej.H0 = ok )
return(resul)
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/t_test.R
|
#' @title Perform unit root tests
#'
#' @description This function uses the package 'urca' to perform unit root tests on a pre-defined time series. Unlike urca functions, it returns a meaningful table summarizing the results.
#'
#' @param ... Arguments passed on to urca functions
#' @param mode A \code{character}. The type of the test. Set it to 'ADF' for Augmented Dickey-Fuller, 'KPSS' for KPSS or 'PP' for Phillips-Perron.
#' @param level A \code{character}. The confidence level. Can be either '1pct' (not for KPSS), '2.5pct', '5pct' or '10pct'
#'
#' @return A \code{list} object. The first element is a \code{data.frame} with the test statistics, the critical values and the test results. The second, the model residuals.
#'
#' @author Talitha Speranza \email{[email protected]}
#'
#' @export
#' @import urca
ur_test = function(..., mode = "ADF", level = "5pct"){
if(mode == "ADF"){
df <- ur.df(...)
} else if(mode == "KPSS"){
df <- ur.kpss(...)
} else if(mode == "PP"){
df <- ur.pp(...)
} else {
return(invisible(msg(paste("mode = ",mode, " - ",.MSG_PARAMETER_NOT_VALID))))
}
cval = as.matrix(df@cval[,level])
stat = t(df@teststat)
res = vector(mode = "logical")
for(i in 1:length(stat)){
if(mode == "KPSS"){
if(stat[i] > cval[i]){
res = c(res, "yes")
}
else {
res = c(res, "no")
}
} else {
# If the test statistic is less (this test is non symmetrical so we do
# not consider an absolute value) than the critical value,
# then the null hypothesis of tau2 = 0 is rejected and no unit root is present.
if(stat[i] > cval[i]){
res = c(res, "no")
}
else {
res = c(res, "yes")
}
}
}
res = as.matrix(res)
results = data.frame(statistic = stat,crit.val = cval, rej.H0 = res)
return(list(results = results, residuals = df@res))
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/ur_test.R
|
.onLoad <- function(lib, pkg)
{
#txt <- c("\n",
# paste(sQuote("BETS")),
# "\n",
# paste(sQuote("BETS"),
# "Brazilian Economic Time Series"),
# "\n",
# paste("See",
# sQuote("library(help=\"BETS\")"),
# "for details"),
#"\n",
#paste("Bug reports: https://github.com/pedrocostaferreira/BETS/issues"),
#"\n",
#paste("Maintainer: Jonatha Costa <[email protected]>"),
#"\n"
#)
#if(interactive() || getOption("verbose"))
# base::writeLines(strwrap(txt, indent = 4, exdent = 4))
}
|
/scratch/gouwar.j/cran-all/cranData/BETS/R/zzz.R
|
---
title: "Fitted GRNN Model"
author: "BETS Package"
date: "`r Sys.Date()`"
output: html_document
params:
ts: 13522
auto.reg: TRUE
present.regs: FALSE
lag.max: 2
regs: 4382
start.train: !r c(1999,1)
end.train: !r c(2016,1)
start.test: !r c(2016,2)
end.test: !r c(2016,11)
sigma.interval: !r c(0.8,0.9)
sigma.step: 0.1
var.names: !r NA
series.file: !r NA
---
```{r setup, echo = FALSE}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
```
```{r echo = FALSE}
library(BETS)
regs = as.list(params$regs)
regs = append(regs,params$ts,0)
gen.name = FALSE
if(is.na(params$var.names)){
var.names = vector(mode = "character")
gen.name = TRUE
} else {
var.names = params$var.names
}
info = data.frame()
j = 1
for(i in 1:length(regs)){
reg = regs[[i]]
if(class(reg) == "numeric" || class(reg) == "character" || class(reg) == "integer"){
res = BETSsearch(code = reg, view = F)[1,]
res = data.frame(cbind(res," "),stringsAsFactors = F)
info = rbind(info,res)
regs[[i]] = BETSget(reg)
}
else {
res = data.frame(t(c("-", paste("Custom series",j),
"-",paste(start(reg),collapse = "-"),
paste(end(reg),collapse = "-"),
rep("-",2)," ")),
stringsAsFactors = F)
names(res) = names(info)
info = rbind(info, res)
j = j + 1
}
if(gen.name){
var.names = c(var.names,paste0("series",i))
}
}
info[,8] = var.names
names(info) <- c("Code","Description","Unit","Periodicity","Start","End","Source","Name")
info[,"Description"] <- trimws(info[,"Description"])
```
## User-Defined Parameters
```{r echo = FALSE}
r = info[-1,1]
inx = (r == "-")
custom = ""
if(any(inx)){
custom = "and custom"
}
r = paste(paste(r[!inx],collapse = ", "),custom)
pars = c("ts","regs","auto.reg","present.reg","lag.max","start.train","end.train","start.test","end.test","sigma.inteval","sigma.step","var.names")
desc = c("Dependant variable","Regressors","Is the dependant variable auto-regressive?","Include non-lagged series among regressors?","Regressors' maximum lag","Training set starting period","Training set ending period","Testing set starting period","Testing set ending period","Sigma inteval","Sigma step", "Variable names")
vals = c(info[1,1],r,params$auto.reg,params$present.regs,params$lag.max, paste(params$start.train,collapse = "."), paste(params$end.train,collapse = "."), paste(params$start.test,collapse = "."), paste(params$end.test,collapse = "."), paste(params$sigma.interval,collapse = " to "), params$sigma.step, paste(var.names, collapse = ", "))
knitr::kable(data.frame(Parameter = pars,Description = desc, Value = vals))
```
```{r echo = F}
auto.reg = params$auto.reg
present.regs = params$present.regs
lag.max = params$lag.max
start.train = params$start.train
end.train = params$end.train
start.test = params$start.test
end.test = params$end.test
sigma.interval = params$sigma.interval
sigma.step = params$sigma.step
ts = params$ts
series.file = params$series.file
```
### Series Information
The table shown below was saved into a variable called `info`. It is going to be used later.
```{r echo = FALSE}
knitr::kable(info, format = "markdown")
```
Note: If the series is in BETS database, you can get information about it using the function `BETSsearch`.
## Graphs
All series were stored in a list called `regs`, the first element being the dependant variable. We are now going to subset these series according to starting and ending periods.
```{r}
for(i in 1:length(regs)){
regs[[i]] = window(regs[[i]], start = start.train, end = end.test)
}
```
```{r echo = F}
mult = FALSE
if(length(regs) > 2){
mult = TRUE
}
```
### Dependant Variable
```{r}
# Load mFilter, a package with several filters
library(mFilter)
# Calculate the trend of dependant variable using an HP filter
trend = fitted(hpfilter(regs[[1]]))
# Load dygraphs and make a plot
library(dygraphs)
dygraph(cbind(Series = regs[[1]], Trend = trend), main = info[1,"Description",]) %>%
dySeries("Series",color = "royalblue",drawPoints = TRUE) %>%
dySeries("Trend", strokeWidth = 1, strokePattern = "dashed", color = "red") %>%
dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>%
dyAxis("y", label = info[1,"Unit"])
```
```{asis eval = mult}
### Regressors
```
```{r eval = mult, echo = mult}
# Load lattice, a charting library
library(lattice)
# Load zoo, a library to manipulate time series and dates
library(zoo)
# Get the dates of each observation of the dependant variable
dates = as.Date.ts(regs[[1]])
# Create a data.frame in which each column contains a regressor
df = data.frame("date" = dates)
for(i in 2:length(regs)){
df = cbind(df, as.vector(regs[[i]]))
}
# Name columns after variable names
names(df)[2:length(regs)] = var.names[2:length(regs)]
# Convert the data.frame into a zoo object
df <- read.zoo(df)
# Plot it with lattice
xyplot(df)
```
```{asis eval = !mult, echo = !mult}
### Regressor
```
```{r eval = !mult, echo = !mult}
trend = fitted(hpfilter(regs[[2]]))
library(dygraphs)
dygraph(cbind(Series = regs[[2]], Trend = trend), main = info[2,"Description"]) %>%
dySeries("Series",color = "royalblue",drawPoints = TRUE) %>%
dySeries("Trend", strokeWidth = 1, strokePattern = "dashed", color = "red") %>%
dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>%
dyAxis("y", label = info[2,"Unit"])
```
## Normalization
Input values normalization is a very important step when working with neural networks. Normalizing means standartizing the values of a series, in order to smooth its variability and to enhance the accuracy of numerical computation, once redundancies are removed. The effect of normalization is, therefore, to improve network performance, helping to avoid simulation faults and making it more efficient.
We are going to normalize every series by applying two operations on each of its elements: subtract the series mean and divide by the series standard deviation. `BETS` has a function that performs these operations, `normalize` with parameter `mode` set to `scale`:
```{r}
regs.norm = list()
for(i in 1:length(regs)){
regs.norm[[i]] = normalize(regs[[i]], mode = "scale")
}
```
The ranges changed and became very similar after normalization:
```{r echo = F}
library(lattice)
library(zoo)
dates = as.Date.ts(regs.norm[[1]])
df = data.frame("date" = dates)
for(i in 1:length(regs)){
df = cbind(df, as.vector(regs.norm[[i]]))
}
names(df)[-1] = var.names
df <- read.zoo(df)
xyplot(df)
```
This way, no series will dominate and thus distort the training process.
## Definition of Training and Testing Sets
Before training the neural network, we need to initialize the inputs accepted by `grnn.train`, in particular the argumento `train.set`. It is a list of objects of type `ts` (time series), where the first must be the dependant variable (in this case, `r var.names[1]`) and the others, regressors. Each lag must be provided as an aditional regressor. We will name each lagged regressor with an underscore plus its lag (for instance, `r paste0(var.names[2],"_1")` will be the first lag of variable `r var.names[2]`).
To build the training set, we first need to add `r lag.max` periods (the number of lags) to `start.train`, since all series must start in the same period:
```{r echo = F}
freq = frequency(regs.norm[[1]])
f1 = (freq == 1)
f2 = (freq == 4 || freq == 12)
```
```{r echo = f1, eval = f1}
# Load lubridate, a package to manipulate dates
library(lubridate)
# Transform start.train in a ymd object
start.train = ymd(paste0(start.train,"-01-01"))
# Sum lag.max*12 to the ymd object using the special operator %m+%
start.train = start.train %m+% months(lag.max*12)
# Extract the resulting year
start.train = as.numeric(format(start.train,"%Y"))
```
```{r echo = f2, eval = f2}
# Load lubridate, a package to manipulate dates
library(lubridate)
# Transform start.train in a ymd object
y = start.train[1]
m = start.train[2]
start.train = ymd(paste0(y,"-",m,"-","01"))
# Sum lag.max*12/freq to the ymd object using the special operator %m+%
start.train = start.train %m+% months(lag.max*12/frequency(regs.norm[[1]]))
# Extract the resulting period
start.train = as.numeric(c(format(start.train,"%Y"),format(start.train,"%m")))
```
Now, `start.train` is equal to `r paste(start.train,collapse = ".")` and we can proceed to divide our data into training and testing sets.
```{r}
complete = list()
training = list()
testing = list()
# The list 'complete' will contain all series, i.e, original and lagged series
complete[[1]] = regs.norm[[1]]
nms = var.names[1]
# If the dependant variable is auto-regressive, add its lags to the list
if(auto.reg){
for(j in 1:lag.max){
complete[[1 + j]] = lag(regs.norm[[1]],-j)
nms = c(nms, paste0(var.names[1],"_",j))
}
}
# Add regressors lags to the series list and their names to the names vector
nregs = length(regs.norm)
s = length(complete)
for(i in 2:nregs){
if(present.regs){
complete[[s + 1]] = regs.norm[[i]]
nms = c(nms, var.names[i])
s = s + 1
}
for(j in 1:lag.max){
complete[[s + 1]] = lag(regs.norm[[i]],-j)
nms = c(nms,paste0(var.names[i],"_",j))
s = s + 1
}
}
# Divide series in training and testing sets
for(i in 1:length(complete)){
training[[i]] = window(complete[[i]], start = start.train, end = end.train)
testing[[i]] = window(complete[[i]], start = start.test, end = end.test)
}
names(training) = nms
names(testing) = nms
```
## Network Training
Finally, the GRNN can be trained:
```{r}
results = grnn.train(training, sigma = sigma.interval, step = sigma.step)
```
From the list outputted by `grnn.train` we see that the best network in terms of fitting used `r paste(nms[results[[1]]$regressors],collapse = ", ")` as regressors and a sigma of `r results[[1]]$sigma`, obtaining a MAPE of `r round(results[[1]]$mape,2)`.
## Network Testing
The next step is, naturally, testing the best networks and choosing one of them. The function `grnn.test`.
```{r}
best.net = grnn.test(results,testing)
# 'accuracy' field of object best.net (MAPE)
best.net[['mape']]
# Regressors of best net in terms of one-step-ahead forecasts
best.net[['regressors']]
nms[best.net[['regressors']]]
```
## Forecasts
Using the `results` object and the testing set, we can easily obtain forecasts through `predict`:
```{r}
preds = predict(results, testing, actual = testing[[1]],
unnorm = c(mean(regs[[1]]), sd(regs[[1]])), xlim = c(2013, 2016 + 11/12),
ylab = info[1,"Unit"], style = "normal")
preds[['accuracy']]
```
```{r echo = F, eval = !is.na(series.file)}
data = c(regs[[1]],preds$mean)
if(grepl("\\.spss$", series.file)){
saveSpss(file.name = gsub("\\.spss$", "", series.file), data = data)
} else if(grepl("\\.dta$", series.file)){
saveStata(file.name = gsub("\\.dta$", "", series.file), data = data)
} else if(grepl("\\.sas$", series.file)){
saveSas(file.name = gsub("\\.sas$", "", series.file), data = data)
}else if(grepl("\\.csv$", series.file)) {
write.csv(data, file = series.file, row.names = F)
} else if(grepl("\\.csv2$", series.file)) {
series.file = gsub("\\.csv2$", ".csv", series.file)
write.csv2(data, file = series.file, row.names = F)
}
```
<br>
`r if(!is.na(series.file)) 'The whole dependant variable series and model predictions are available at [THIS LINK]('``r if(!is.na(series.file)) series.file``r if(!is.na(series.file)) ')'`
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/analysis_GRNN.Rmd
|
---
title: "Fitted Exponential Smoothing Model"
author: "BETS Package"
date: "`r Sys.Date()`"
output: html_document
params:
ts: 21864
alpha: !r NA
beta: !r FALSE
gamma: !r FALSE
additive: !r TRUE
l.start: !r NULL
b.start: !r NULL
s.start: !r NULL
n.ahead: 10
series.file: !r NA
---
```{r setup, echo = FALSE}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
custom.ts = TRUE
ts = params$ts
if(class(ts) != 'ts'){
custom.ts = FALSE
code = as.integer(ts)
} else{
code = "None"
}
alpha = params$alpha
beta = params$beta
gamma = params$gamma
additive = params$additive
n.ahead = params$n.ahead
series.file = params$series.file
l.start = params$l.start
b.start = params$b.start
s.start = params$s.start
str = s.start
if(!is.null(s.start)){
str = paste(s.start, collapse = ", ")
}
```
## User-Defined Parameters
Parameter | Value | Variable
---------------------------------- | -------------------| ----------
Series code | `r code` | `ts`
Alpha | `r alpha` | `alpha`
Beta | `r beta` | `beta`
Gamma | `r gamma` | `gamma`
Additive | `r additive` | `additive`
Level initial value | `r l.start` | `l.start`
Trend initial value | `r b.start` | `b.start`
Seasonal components initial values | `r str` | `s.start`
Steps ahead | `r n.ahead` | `n.ahead`
## Series Information
```{r echo = !custom.ts, eval = !custom.ts}
library(BETS)
info <- BETSsearch(code = ts, view = F)
```
```{r echo = FALSE, eval = custom.ts}
info <- data.frame(matrix(nrow = 1, ncol = 6))
names(info) <- c("Code","Description","Periodicity","Start","Source","Unit")
info[1,] <- c(code," ",frequency(ts),paste0(start(ts),collapse = "."),"Custom"," ")
```
```{r echo = FALSE, eval = !custom.ts}
names(info) <- c("Code","Description","Periodicity","Start","Source","Unit")
info[,"Start"] <- paste(start(ts),collapse=".")
info[,"Description"] <- trimws(info[,"Description"])
```
```{r echo = FALSE}
knitr::kable(info, format = "markdown")
```
## Graph
```{r eval = !custom.ts, echo = !custom.ts}
ts = BETSget(code = ts)
```
```{r}
library(mFilter)
trend = fitted(hpfilter(ts))
library(dygraphs)
dygraph(cbind(Series = ts, Trend = trend), main = info[1,"Description",]) %>%
dySeries("Series",color = "royalblue",drawPoints = TRUE) %>%
dySeries("Trend", strokeWidth = 1, strokePattern = "dashed", color = "red") %>%
dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>%
dyAxis("y", label = info[1,"Unit"])
```
```{r echo = F}
# Aditivo e NAO TEM alpha
type1 = (additive && is.na(alpha))
# Aditivo e TEM alpha
type2 = (additive && !is.na(alpha))
# Multiplicativo e TEM alpha
type3 = (!additive && !is.na(alpha))
# Multiplicativo e NAO TEM alpha
type4 = (!additive && is.na(alpha))
```
## Smoothing
```{asis echo = type1}
You believe the series can be decomposed in a additive fashion (possibly because it seems homoscedastic) and you want the Holt-Winters R algorithm to choose the best alpha value.
```
```{asis echo = type2}
You believe the series can be decomposed in a additive fashion (possibly because it seems homoscedastic) and you do not want the Holt-Winters R algorithm to choose the best alpha value.
```
```{asis echo = type3}
You believe the series can be decomposed in a multiplicative fashion (possibly because it seems heteroscedastic) and you want the Holt-Winters R algorithm to choose the best alpha value.
```
```{asis echo = type4}
You believe the series can be decomposed in a multiplicative fashion (possibly because it seems heteroscedastic) and you do not want the Holt-Winters R algorithm to choose the best alpha value.
```
In addition, you `r if(!beta) 'do not'` want to model trend and `r if(!gamma) 'do not'` think the series has seasonal patterns.
Therefore, this is how we are going to create the model:
```{r echo = type1, eval = type1}
model = HoltWinters(ts, beta = beta, gamma = gamma,
l.start = l.start, b.start = b.start, s.start = s.start)
```
```{r echo = type2, eval = type2}
model = HoltWinters(ts, alpha = alpha, beta = beta, gamma = gamma,
l.start = l.start, b.start = b.start, s.start = s.start)
```
```{r echo = type3, eval = type3}
model = HoltWinters(ts, beta = beta, gamma = gamma, seasonal = "multiplicative",
l.start = l.start, b.start = b.start, s.start = s.start)
```
```{r echo = type4, eval = type4}
model = HoltWinters(ts, alpha = alpha,
beta = beta, gamma = gamma, seasonal = "multiplicative",
l.start = l.start, b.start = b.start, s.start = s.start)
```
These are the final parameters:
```{r}
model
```
## Forecasts
```{r eval = custom.ts, echo = custom.ts}
library(BETS)
```
```{r}
preds = predict(model,h=n.ahead, main = info[,"Description"], ylab = info[,"Unit"], style = "normal")
```
## Model Evaluation
If the model is well-specified, its normalized residuals should not surpass the boundaries of confidence intervals. In addition, it should look like white noise. Here, we plot the normalized residuals with a 95% confidence interval:
```{r}
std.resid = std_resid(model, alpha = 0.05)
```
We can use a Ljung-Box test to accept or reject the hypothesis of autocorrelation in the residuals (the forecasting errors):
```{r echo = -1}
bt = Box.test(preds$residuals, lag=20, type="Ljung-Box")
Box.test(preds$residuals, lag=20, type="Ljung-Box")
```
```{asis echo = bt$p.value < 0.5}
Note that the `p.value` is too low, which is an evidence of non-zero autocorrelations in the forecasting errors at lags 1 to 20. It suggests you should change the model specification.
```
```{asis echo = bt$p.value > 0.5}
Note that the `p.value` is high. So, there is little evidence of non-zero autocorrelations in the forecasting errors at lags 1 to 20.
```
To confirm these results, we can take look at the residuals ACF:
```{r}
corrgram(preds$residuals, lag.max = 20, mode = "bartlett", knit = T)
```
```{r echo = F, eval = !is.na(series.file)}
data = c(ts,preds$mean)
if(grepl("\\.spss$", series.file)){
saveSpss(file.name = gsub("\\.spss$", "", series.file), data = data)
} else if(grepl("\\.dta$", series.file)){
saveStata(file.name = gsub("\\.dta$", "", series.file), data = data)
} else if(grepl("\\.sas$", series.file)){
saveSas(file.name = gsub("\\.sas$", "", series.file), data = data)
}else if(grepl("\\.csv$", series.file)) {
write.csv(data, file = series.file, row.names = F)
} else if(grepl("\\.csv2$", series.file)) {
series.file = gsub("\\.csv2$", ".csv", series.file)
write.csv2(data, file = series.file, row.names = F)
}
```
<br>
`r if(!is.na(series.file)) 'The whole series and the model predictions are available at [THIS LINK]('``r if(!is.na(series.file)) series.file``r if(!is.na(series.file)) ')'`
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/analysis_HOLT-WINTERS.Rmd
|
---
title: "Fitted SARIMA Model"
author: "BETS Package"
date: "`r Sys.Date()`"
output: html_document
params:
ts: 21864
series.file: !r NA
cf.lags: 48
n.ahead: 12
inf.crit: "BIC"
dummy: !r NA
ur.test: !r list(mode = "ADF", type = "drift", lags = 6, selectlags = "BIC", level = "5pct")
arch.test: !r list(lags = 12, demean = FALSE, alpha = 0.05)
box.test: !r list(lag = 1, type = "Ljung-Box", fitdf = 0)
---
```{r setup, echo = FALSE}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
custom.ts = TRUE
ts = params$ts
if(class(ts) != 'ts'){
custom.ts = FALSE
code = as.integer(ts)
} else{
code = "None"
}
has.dum = T
if(is.na(params$dummy[1])){
has.dum = F
}
series.file = params$series.file
```
## User-Defined Parameters
Parameter | Value | Variable
------------------ | -------------------- | ----------
Series Code | `r code` | `ts`
Maximum Lag | `r params$cf.lags` | `cf.lags`
Prevision Horizon | `r params$n.ahead` | `n.ahead`
Unit Root Test | `r params$ur.test` | `ur.test`
ARCH Test | `r params$arch.test` | `arch.test`
Box Test | `r params$box.test` | `box.test`
Dummy | `r has.dum` | `dummy`
```{r echo = FALSE}
cf.lags = params$cf.lags
n.ahead = params$n.ahead
ur.test = params$ur.test
arch.test = params$arch.test
inf.crit = params$inf.crit
box.test = params$box.test
dummy = NULL
m.dummy = NULL
f.dummy = NULL
if(has.dum){ dummy = params$dummy }
```
```{asis echo = !custom.ts}
## Getting the Time Series from the BETS database
```
```{r eval = !custom.ts, echo = !custom.ts}
library(BETS)
data = BETSget(code)
```
### Information About the Series
```{r eval = !custom.ts, echo = !custom.ts}
info <- BETSsearch(code = ts, view = F)
```
```{r eval = custom.ts, echo = custom.ts}
data <- ts
```
```{r echo = FALSE, eval = custom.ts}
info <- data.frame(matrix(nrow = 1, ncol = 6))
names(info) <- c("Code","Description","Periodicity","Start","Source","Unit")
info[1,] <- c(code," ",frequency(data),paste0(start(data),collapse = "."),"Custom"," ")
```
```{r echo = FALSE, eval = !custom.ts}
names(info) <- c("Code","Description","Periodicity","Start","Source","Unit")
info[,"Start"] <- paste(start(data),collapse=".")
info[,"Description"] <- trimws(info[,"Description"])
```
```{r echo = FALSE}
knitr::kable(info, format = "markdown")
```
## Graph
```{r echo = F}
tryCatch({require(mFilter)
}, warning = function(w){
x <- readline("You need to install package mFilter to obtain a SARIMA report. Install mFilter now? [Y/n]")
if(x %in% c("y","Y","yes","Yes","YES")){
install.packages("mFilter")
require(mFilter)
}
})
```
```{r}
trend = fitted(hpfilter(data))
library(dygraphs)
dygraph(cbind(Series = data, Trend = trend), main = info[,"Description"]) %>%
dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>%
dyAxis("y", label = info[,"Unit"])
```
## Unit Root Tests
```{asis eval = (ur.test == "ADF")}
### Augmented Dickey-Fuller
```
```{asis eval = (ur.test == "KPSS")}
### KPSS
```
```{asis eval = (ur.test == "PP")}
### Phillips-Perron
```
```{r}
test.params = append(list(y = data), ur.test)
df = do.call(ur_test,test.params)
df$results
```
```{r echo = F}
lvl = ur.test$level
if(length(lvl) == 0){
lvl <- "5pct"
}
if(lvl == "1pc"){
ic = "99%"
alpha = 0.01
} else if(lvl == "2.5%"){
ic = "97.5%"
alpha = 0.025
} else if(lvl == "5%"){
ic = "95%"
alpha = 0.05
} else {
ic = "90%"
alpha = 0.1
}
```
```{r eval = (ur.test$mode != "KPSS"), echo = F}
uroot = FALSE
uroot = (df$results[1,"statistic"] > df$results[1,"crit.val"])
```
```{asis eval = (ur.test$mode != "KPSS") && uroot}
For the chosen confidence interval, the test statistic is greater than the critical value. We therefore conclude that there must be a unit root.
```
```{r eval = (ur.test$mode == "KPSS"), echo = F}
uroot = FALSE
uroot = (df$results[1,"statistic"] < df$results[1,"crit.val"])
```
```{asis eval = (ur.test$mode == "KPSS") && uroot}
For the chosen confidence interval, the test statistic is smaller than the critical value. We therefore conclude that there must be a unit root.
```
```{asis eval = uroot}
Now, we are going to repeatedly apply `diff` to the series and check if the diferenced series has a unit root.
```
```{asis eval = !uroot}
For a 95% confidence interval, the test statistic is smaller than the critical value. We therefore conclude that there is no non-seasonal unit root.
```
```{r eval = !uroot, echo = !uroot}
ns_roots = 0
d_ts = data
```
```{r eval = (ur.test$mode != "KPSS") && uroot, echo = (ur.test$mode != "KPSS") && uroot}
ns_roots = 0
d_ts = data
while(df$results[1,"statistic"] > df$results[1,"crit.val"]){
ns_roots = ns_roots + 1
d_ts = diff(d_ts)
test.params = append(list(y = d_ts), ur.test)
df = do.call(ur_test,test.params)
print(df$results)
}
```
```{r eval = (ur.test$mode == "KPSS") && uroot, echo = (ur.test$mode == "KPSS") && uroot}
ns_roots = 0
d_ts = data
while(df$results[1,"statistic"] < df$results[1,"crit.val"]){
ns_roots = ns_roots + 1
d_ts = diff(d_ts)
test.params = append(list(y = d_ts), ur.test)
df = do.call(ur_test,test.params)
print(df$results)
}
```
`r if(uroot) 'These tests found that there must be a total of '` `r if(uroot) ns_roots` `r if(uroot) ' unit root(s)'`
### Osborn-Chui-Smith-Birchenhall
This test will be performed for lag `r frequency(data)`, that is, the frequency of the series.
```{r echo = c(1,2,3)}
library(forecast)
s_roots = nsdiffs(data, test = "ocsb")
print(s_roots)
sroot = FALSE
if(s_roots != 0) sroot = TRUE
roots = (uroot || sroot)
```
```{asis eval = !sroot}
According to the OCSB test, there is no seasonal unit root, at least at a 5% significance level.
```
`r if(sroot) 'This result holds for a 5% signficance level and means that, according to the OCSB test, there must be a total of '` `r if(sroot) s_roots` `r if(sroot) ' seasonal unit root(s)'`
## Auto-Correlation Functions
```{r echo = FALSE}
library(plotly)
```
```{asis eval = !roots, echo = !roots }
### ACF and PACF - Original Series
```
```{asis eval = roots, echo = roots}
### ACF and PACF - After Differencing
```
`r if(roots) 'As we saw earlier, this series probably has'` `r if(uroot) ns_roots` `r if(uroot) ' non-seasonal unit root(s)'` `r if(sroot && uroot) ' and '` `r if(sroot) s_roots` `r if(sroot) ' seasonal unit root(s)'` `r if(roots) '. It means we have look into the correlograms of the differenced series.'`
```{r eval = sroot, echo = sroot}
d_ts <- diff(d_ts, lag = frequency(data), differences = s_roots)
```
```{r fig.height=3, fig.width=6.0, fig.align="center"}
corrgram(d_ts, lag.max = cf.lags, mode = "bartlett", knit = T)
corrgram(d_ts, lag.max = cf.lags, mode = "simple", type = "partial", knit = T)
```
## Model Identification and Estimation
The correlograms from last section gives us enough information to try to identify the underlying SARIMA model parameters. We can confirm our guess by running the `auto.arima` function from the package `forecast`. By default, this function uses the AICc (Akaike Information Criterion with Finite Sample Correction) for model selection. `r if(inf.crit == "AICC") 'This is the criterion we are going to use here.'` `r if(inf.crit == "BIC") 'Here, we are going to use BIC (Bayesian Information Criterion), in which the penalty term for the number of parameters in the model is larger than in AIC.'` `r if(inf.crit == "AIC") 'Here, we are going to use AIC (Akaike Information Criterion).'`
```{asis eval = has.dum}
Since a dummy has to be included, we are going to separate it in two samples, one to build the model and other to make the forecasts.
```
```{r eval = has.dum, echo = has.dum}
m.dummy = window(dummy, end = end(data))
f.dummy = tail(dummy, n.ahead)
```
```{r echo=F}
# was.adf = FALSE
# # workround until forecast package is fixed
# if(tolower(ur.test$mode) == "adf"){
# ur.test$mode <- "KPSS"
# was.adf = TRUE
# }
```
```{r echo = has.dum, eval = has.dum}
model <- auto.arima(data, ic = tolower(inf.crit), test = tolower(ur.test$mode),
max.d = ns_roots, max.D = s_roots, xreg = m.dummy)
summary(model)
```
```{r echo = !has.dum, eval = !has.dum}
model <- auto.arima(data, ic = tolower(inf.crit), test = tolower(ur.test$mode),
max.d = ns_roots, max.D = s_roots)
summary(model)
```
```{r echo = F}
desc = capture.output(model)[2]
diffs = as.numeric(gsub("\\,", "", regmatches(desc,gregexpr(",.,",desc))[[1]]))
p = model$arma[1]
d = diffs[1]
q = model$arma[2]
P = model$arma[3]
D = diffs[2]
Q = model$arma[4]
freq = model$arma[5]
desc = paste0("SARIMA(",p,",",d,",",q,")(",P,",",D,",",Q,")[",freq,"]")
```
We see that, according to `r inf.crit`, the best model is a `r desc`. Nevertheless, this is not the end. We still have to test for heteroskedasticity in the residuals. We can use an ARCH test with this purpose.
```{r echo = c(1,2,3)}
arch.params <- append(list(x = resid(model)), arch.test)
at <- do.call(arch_test, arch.params)
at
htk = at[1,"htk"]
```
The p.value of `r round(at[1,"p.value"],2)` is `r if(htk) 'larger' else 'smaller'` than the significance level of `r arch.test$alpha`. We therefore conclude that the residuals are `r if(!htk) 'not'` heteroskedastic. `r if(htk) 'This means we should have built the SARIMA model for the log of the original series.'`
```{r echo = F}
zeroes = F
if ( any(data < 0) ) {
htk = F
zeroes = T
}
htk2 = F
```
```{asis echo = htk && zeroes}
Unfortunately, the series contains numbers that are less than zero, so we cannot apply the log function.
```
```{asis echo = htk && !zeroes}
The next step, then, is to rebuild the model using log(data).
```
```{asis echo = htk}
### Unit root tests (non-seasonal)
```
```{r echo=F}
# if(was.adf){
# ur.test$mode <- "ADF"
# }
```
```{r eval = htk, echo = htk}
ldata <- log(data)
ns_roots = 0
d_ts = ldata
test.params = append(list(y = d_ts), ur.test)
df = do.call(ur_test,test.params)
```
```{r eval = (ur.test$mode != "KPSS") && htk, echo = (ur.test$mode != "KPSS") && htk}
while(df$results[1,"statistic"] > df$results[1,"crit.val"]){
ns_roots = ns_roots + 1
d_ts = diff(d_ts)
test.params = append(list(y = d_ts), ur.test)
df = do.call(ur_test,test.params)
}
ns_roots
```
```{r eval = (ur.test$mode == "KPSS") && htk, echo = (ur.test$mode == "KPSS") && htk}
while(df$results[1,"statistic"] < df$results[1,"crit.val"]){
ns_roots = ns_roots + 1
d_ts = diff(d_ts)
test.params = append(list(y = d_ts), ur.test)
df = do.call(ur_test,test.params)
}
ns_roots
```
```{asis echo = htk}
### Seasonal unit root test
```
```{r echo = htk, eval = htk}
s_roots = nsdiffs(ldata, test = "ocsb")
s_roots
if(s_roots != 0) {
d_ts <- diff(d_ts, lag = frequency(ldata), differences = s_roots)
}
```
```{asis echo = htk}
### ACF and PACF
```
```{r fig.height=3, fig.width=6.0, eval = htk, echo = htk}
corrgram(d_ts, lag.max = cf.lags, mode = "bartlett", knit = T)
corrgram(d_ts, lag.max = cf.lags, mode = "simple", type = "partial", knit = T)
```
```{asis echo = htk}
### Estimation
```
```{r echo=F}
# if(was.adf){
# ur.test$mode <- "KPSS"
# }
```
```{r echo = htk && has.dum, eval = htk && has.dum}
model = auto.arima(data, ic = tolower(inf.crit), test = tolower(ur.test$mode),
max.d = ns_roots, max.D = s_roots, xreg = m.dummy, lambda = 0)
summary(model)
```
```{r echo = htk && !has.dum, eval = htk && !has.dum}
model = auto.arima(data, ic = tolower(inf.crit), test = tolower(ur.test$mode),
max.d = ns_roots, max.D = s_roots, lambda = 0)
summary(model)
```
```{r eval = htk, echo = F}
desc = capture.output(model)[2]
diffs = as.numeric(gsub("\\,", "", regmatches(desc,gregexpr(",.,",desc))[[1]]))
p = model$arma[1]
d = diffs[1]
q = model$arma[2]
P = model$arma[3]
D = diffs[2]
Q = model$arma[4]
freq = model$arma[5]
desc = paste0("SARIMA(",p,",",d,",",q,")(",P,",",D,",",Q,")[",freq,"]")
```
`r if(htk) paste('The best model seems to be a',desc)`
```{asis echo = htk}
### ARCH Test
```
```{r echo = htk, eval = htk}
arch.params <- append(list(x = resid(model)), arch.test)
at <- do.call(arch_test, arch.params)
at
```
```{r echo = F, eval = htk}
htk2 = at[1,"htk"]
```
```{asis echo = htk2}
We are led to conclude that the residuals of the second model are also heteroskedastic. It seems that the class of SARIMA models is not suited to forecasting the original series.
```
```{asis echo = !htk2 && htk}
We are led to conclude that the residuals of the second model are not heteroskedastic, which means this model is a better choice compared to the first.
```
The next function outputs the model's standardized residuals. If they are all inside the confidence interval, it means the behaviour of the series was well captured by the model. If few residuals are outside the confidence interval, using a dummy to handle structural breaks should be considered. But if most residuals are outside the interval, then SARIMA might not be the appropriate choice.
```{r fig.height = 4}
rsd <- std_resid(model, alpha = 0.01)
```
As a final step in model evaluation, we are going to apply the `r box.test$type` test to check for autocorrelation in the residuals.
```{r echo = c(1,2,3)}
test.params <- append(list(x = resid(model)), box.test)
bt = do.call(Box.test,test.params)
bt$p.value
ac = TRUE
if(bt$p.value > 0.05){
ac = FALSE
}
```
```{asis echo = ac && !has.dum}
The p-value is smaller than 0.05, so we can reject the null hypothesis of no autocorrelation in the residuals. Maybe a dummy can solve this problem. Try running this report with a dummy as a parameter next time.
```
```{asis echo = ac && has.dum}
The p-value is smaller than 0.05, so we can reject the null hypothesis of no autocorrelation in the residuals. Since not even a dummy could solve this problem, another class of models could be applied. Try running other types of reports (e.g. GRNN or HOLT-WINTERS) to model this series.
```
```{asis echo = !ac}
The p-value is greater than 0.05, which means there is not enough statistical evidence to reject the null hypothesis of no autocorrelation in the residuals. This is a desirable result.
```
## Forecasts
```{r echo = has.dum, eval = has.dum}
predict(model, h=n.ahead, xreg = f.dummy,
main = info[,"Description"], ylab = info[,"Unit"], knit = T)
```
```{r echo = !has.dum, eval = !has.dum}
predict(model, h=n.ahead,
main = info[,"Description"], ylab = info[,"Unit"], knit = T)
```
```{r echo = F, eval = !is.na(series.file)}
if(has.dum){
preds = predict(model, h=n.ahead, xreg = f.dummy,
main = info[,"Description"], ylab = info[,"Unit"], knit = T)
} else {
preds = predict(model, h=n.ahead,
main = info[,"Description"], ylab = info[,"Unit"], knit = T)
}
data = c(data,preds$mean)
if(grepl("\\.spss$", series.file)){
saveSpss(file.name = gsub("\\.spss$", "", series.file), data = data)
} else if(grepl("\\.dta$", series.file)){
saveStata(file.name = gsub("\\.dta$", "", series.file), data = data)
} else if(grepl("\\.sas$", series.file)){
saveSas(file.name = gsub("\\.sas$", "", series.file), data = data)
}else if(grepl("\\.csv$", series.file)) {
write.csv(data, file = series.file, row.names = F)
} else if(grepl("\\.csv2$", series.file)) {
series.file = gsub("\\.csv2$", ".csv", series.file)
write.csv2(data, file = series.file, row.names = F)
}
```
<br>
`r if(!is.na(series.file)) 'The whole series and the model predictions are available at [THIS LINK]('``r if(!is.na(series.file)) series.file``r if(!is.na(series.file)) ')'`
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/analysis_SARIMA.Rmd
|
---
classoption: landscape
sansfont: Calibri Light
author: "Talitha Speranza"
output:
rmarkdown::pdf_document:
includes:
in_header: fix_figs.tex
---
\fontfamily{lmss}\selectfont
```{r echo = FALSE}
library(BETS)
```
```{r echo = FALSE, results = "hide"}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
```
\fancypagestyle{plain}{
\fancyhf{}%
\setlength{\headheight}{40pt}
\fancyhead[L]{
\includegraphics[width=\linewidth,height=35pt]{logo_business} }
\fancyfoot[L]{
{\fontfamily{lmss}\selectfont
{\small Charts generated by the \textbf{BETS R Package} on `r Sys.Date()` $\diamond$ \textbf{\url{https://github.com/pedrocostaferreira/BETS}}}}
}
\fancyfoot[R]{
\thepage\ / \pageref{LastPage} }
}
\pagestyle{plain}
```{r echo = FALSE, results = "hide"}
dir.create("graphs")
chart(ts = "sent_ind", open = F, file = "sent_ind")
chart(ts = "conf_lvl", open = F, file = "conf_lvl")
chart(ts = "iie_br", open = F, file = "iie_br")
chart(ts = "transf_ind", open = F, file = "transf_ind")
```
\begin{figure}[h]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/sent_ind}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/conf_lvl}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/iie_br}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/transf_ind}
\label{cut}
\end{subfigure}
\end{figure}
```{r echo = FALSE, results = "hide"}
chart(ts = "servc", open = F, file = "servc")
chart(ts = "retail", open = F, file = "retail")
chart(ts = "constr", open = F, file = "constr")
chart(ts = "consm", open = F, file = "consm")
```
\begin{figure}[h]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/servc}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/retail}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/constr}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/consm}
\label{cut}
\end{subfigure}
\end{figure}
```{r echo = FALSE, results = "hide"}
chart(ts = "gdp_vars", open = F, file = "gdp_vars")
chart(ts = "gdp_comps", open = F, file = "gdp_comps")
chart(ts = "cap_utl", open = F, file = "cap_utl")
chart(ts = "gdp_mon", open = F, file = "gdp_mon")
```
\begin{figure}[h]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/gdp_vars}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/gdp_comps}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/cap_utl}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/gdp_mon}
\label{cut}
\end{subfigure}
\end{figure}
```{r echo = FALSE, results = "hide"}
chart(ts = "gdp_unemp", open = F, file = "gdp_unemp")
chart(ts = "misery_index", open = F, file = "misery_index")
chart(ts = "lab_coin", open = F, file = "lab_lead")
chart(ts = "lab_lead", open = F, file = "lab_coin")
```
\begin{figure}[h]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/gdp_unemp}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/lab_lead}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/misery_index}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/lab_coin}
\label{cut}
\end{subfigure}
\end{figure}
```{r echo = FALSE, results = "hide"}
chart(ts = "lei", open = F, file = "lei")
chart(ts = "cei", open = F, file = "cei")
chart(ts = "ei_vars", open = F, file = "ei_vars")
chart(ts = "ei_comps", open = F, file = "ei_comps")
```
\begin{figure}[h]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/lei}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/cei}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/ei_vars}
\label{cut}
\end{subfigure}
~
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=11.5cm,keepaspectratio]{graphs/ei_comps}
\label{cut}
\end{subfigure}
\end{figure}
## Notes:
Shaded areas represent recessions, as dated by CODACE (FGV/IBRE).
### Economic Sentiment
**Definition**: Average between industry, services, construction, retail and consumer confidence indexes.
**Source**: FGV/IBRE
### Misery Index
**Definition**: Inflation rate (IPCA) plus Unemployment Rate (from PNAD-C)
**Source**: IBGE
### Enterprises Confidence Index
**Definition**: Average between industry, services, construction and retail confidence indexes.
**Source**: FGV/IBRE
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/business_cycle_dashboard.Rmd
|
---
classoption: landscape
sansfont: Calibri Light
author: "Talitha Speranza"
output:
rmarkdown::pdf_document:
includes:
in_header: fix_figs.tex
params:
text: !r NA
author: !r NA
email: !r NA
url: !r NA
logo: !r NA
style: !r NA
charts: !r NA
charts.opts: !r NA
---
\fontfamily{lmss}\selectfont
```{r echo = FALSE, results = "hide"}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
text = params$text
author = params$author
email = params$email
logo = params$logo
url = params$url
```
```{r echo = FALSE, results = "hide"}
library(BETS)
list.charts = params$charts
num.charts <- length(list.charts)
charts.opts <- params$charts.opts
style <- params$style
mask = 1:num.charts
has.chart = as.integer(mask == 1:16)
if(!is.na(params$style)){
if(params$style == "plotly"){
ext <- ".png"
} else {
ext <- ".pdf"
}
} else {
ext = ""
}
dir.create("graphs")
chart.FUN <- function(num){
if(has.chart[num] == 1){
chart(ts = list.charts[[num]],
file = paste0("chart", num, ext), params = charts.opts[[num]],
style = style, open = FALSE)
}
}
for(i in 1:16){
chart.FUN(i)
}
has.text = !is.na(text)
```
\def\haslogo{`r as.integer(!is.na(logo))`}
\def\haswebsite{`r as.integer(!is.na(url))`}
\def\hastext{`r as.integer(has.text)`}
\def\first{`r has.chart[1]`}
\def\second{`r has.chart[2]`}
\def\third{`r has.chart[3]`}
\def\fourth{`r has.chart[4]`}
\def\fifth{`r has.chart[5]`}
\def\sixth{`r has.chart[6]`}
\def\seventh{`r has.chart[7]`}
\def\eighth{`r has.chart[8]`}
\def\ninth{`r has.chart[9]`}
\def\tenth{`r has.chart[10]`}
\def\eleventh{`r has.chart[11]`}
\def\twelfth{`r has.chart[12]`}
\def\thirteenth{`r has.chart[13]`}
\def\fourteenth{`r has.chart[14]`}
\def\fifteenth{`r has.chart[15]`}
\def\sixteenth{`r has.chart[16]`}
\fancypagestyle{plain}{
\fancyhf{}%
\setlength{\headheight}{45pt}
\fancyhead[L]{
\includegraphics[width=\linewidth,height=40pt]{logo_custom} }
\ifnum\hastext = 1
\fancyhead[R]{
{\fontfamily{lmss}\selectfont
{\footnotesize Analysis by} \\
\ifnum\haslogo = 1
\includegraphics[width=\linewidth,height=35pt]{`r logo`} \\
\else
`r author` \\
\fi
\ifnum\haswebsite = 1
{\small \url{`r url`}}
\fi
}
}
\fi
\fancyfoot[L]{
{\fontfamily{lmss}\selectfont
{\small Charts generated by the \textbf{BETS R Package} on `r Sys.Date()` $\diamond$ \textbf{\url{https://github.com/pedrocostaferreira/BETS}}}}
}
\fancyfoot[R]{
\thepage\ / \pageref{LastPage} }
}
\pagestyle{plain}
\begin{figure}[h]
\ifnum\first = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart1",ext)`}
\label{chart1}
\end{subfigure}
\fi
~
\ifnum\second = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart2",ext)`}
\label{chart2}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
text.len = 0
if(!is.na(text)){
text <- paste(readLines(text), collapse=" ")
text = gsub(pattern = "\\\\n", replacement = " \n", x = text)
text <- strsplit(text, "##")[[1]]
cat(text[1])
text.len = length(text) - 1
}
```
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
\begin{figure}[h]
\ifnum\third = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart3",ext)`}
\label{chart3}
\end{subfigure}
\fi
~
\ifnum\fourth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart4",ext)`}
\label{chart4}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[2])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\fifth = 1
\newpage
\pagestyle{plain}
\begin{figure}[h]
\ifnum\fifth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart5",ext)`}
\label{chart5}
\end{subfigure}
\fi
~
\ifnum\sixth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart6",ext)`}
\label{chart6}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[3])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
\begin{figure}[h]
\ifnum\seventh = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart7",ext)`}
\label{chart7}
\end{subfigure}
\fi
~
\ifnum\eighth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart8",ext)`}
\label{chart8}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[4])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\ninth = 1
\newpage
\pagestyle{plain}
\begin{figure}[h]
\ifnum\ninth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart9",ext)`}
\label{chart9}
\end{subfigure}
\fi
~
\ifnum\tenth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart10",ext)`}
\label{chart10}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[5])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
\begin{figure}[h]
\ifnum\eleventh = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart11",ext)`}
\label{chart11}
\end{subfigure}
\fi
~
\ifnum\twelfth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart12",ext)`}
\label{chart12}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[6])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\thirteenth = 1
\newpage
\pagestyle{plain}
\begin{figure}[h]
\ifnum\thirteenth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart13",ext)`}
\label{chart13}
\end{subfigure}
\fi
~
\ifnum\fourteenth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart14",ext)`}
\label{chart14}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[7])
text.len <- text.len - 1
}
```
\def\hastext{`r as.integer(text.len != 0)`}
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
\begin{figure}[h]
\ifnum\fifteenth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart15",ext)`}
\label{chart15}
\end{subfigure}
\fi
~
\ifnum\sixteenth = 1
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=11.0cm,keepaspectratio]{graphs/`r paste0("chart16",ext)`}
\label{chart16}
\end{subfigure}
\fi
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text) && text.len != 0){
cat(text[8])
}
```
\fi
\fi
\fi
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/custom_dashboard.Rmd
|
## ----echo = F,message = F,warning=FALSE----------------------------------
require(BETS)
## ----eval = F------------------------------------------------------------
# BETSsearch(description, src, periodicity, unit, code, view = TRUE, lang = "en")
## ----echo = F------------------------------------------------------------
library(BETS)
## ----eval = F------------------------------------------------------------
# # Some examples
# BETSsearch(description = "sales ~ retail",view = F)
# BETSsearch(description = "'sales volume index' ~ vehicles",view = F)
# BETSsearch(description = "'distrito federal'", periodicity = 'A', src = 'IBGE',view = F)
## ----eval = F------------------------------------------------------------
# # Search for accumulated GDP series
# BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
## ----echo = F, results='hide'--------------------------------------------
#results <- BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
## ----echo = F------------------------------------------------------------
#results
## ----echo = F, results='hide'--------------------------------------------
#results <- BETSsearch(description = "consumption ~ 'seasonally adjusted' ~ private", view = F)
## ----echo = F------------------------------------------------------------
#head(results)
## ----eval = F------------------------------------------------------------
# BETSget(code, data.frame = FALSE)
## ----eval = F------------------------------------------------------------
# # Get the 12-month cumulative GDP series in dollars
# gdp_accum <- BETSget(4192)
# window(gdp_accum, start = c(2014,1))
## ----eval = F------------------------------------------------------------
# #Get the series for the GDP of the Federal District at market prices
# gdp_df <- BETSget(23992, data.frame = T)
# head(gdp_df)
## ----eval = F------------------------------------------------------------
# saveSas(code, data = NULL, file.name = "series")
# saveSpss(code, data = NULL, file.name = "series")
# saveStata(code, data = NULL, file.name = "series")
## ----eval = F------------------------------------------------------------
# # Save the series for the net public debt in the default Excel format
# saveStata(code = 2078, file.name = "series_stata.dta")
#
# # Save the series for the net public debt in the default Excel format
# saveStata(code = 2078, file.name = "series_stata.dta")
#
# # Save any series in SPSS format
# my.series <- BETSget(4447)
# saveSpss(data = my.series, file.name = "series_spss")
## ----eval = F------------------------------------------------------------
# chart(ts, file = NULL, open = TRUE, lang = "en", params = NULL)
## ----eval = F------------------------------------------------------------
# # Uncertainty Index chart
# chart(ts = 'iie_br', file = "iie_br", open = TRUE)
#
# # Leading and Coincident Labor Indicators charts
# chart(ts = "lab_mrkt", file = "lab_mrkt.png", open = TRUE)
## ----eval = F------------------------------------------------------------
# dashboard(type = "business_cycle", charts = "all", saveas = NA, parameters = NULL)
## ----eval = F------------------------------------------------------------
# dashboard(type = "business_cycle", saveas = "survey.pdf")
## ----eval = F------------------------------------------------------------
# parameters = list(author = "FGV/IBRE",
# url = "http://portalibre.fgv.br/",
# text = "text.txt",
# logo = "logo_ibre.png")
#
# dashboard(type = "macro_situation", parameters = parameters)
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/doc/BETS_basic_usage.R
|
---
title: "BETS - Brazilian Economic Time Series: Basic Usage"
bibliography:
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{"BETS - Brazilian Economic Time Series: Basic Usage""}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r,echo = F,message = F,warning=FALSE}
require(BETS)
```
## Introduction
The BETS package (an abbreviation for Brazilian Economic Time Series) for R [@R] allows easy access to the most important Brazilian economic time series and a range of tools for analyzing them. It provides a much-needed single point of access to the many Brazilian series and a simple, flexible and robust interface.
The series in the BETS database are produced by three important and respected institutions: the Central Bank of Brazil (BACEN), the Brazilian Institute of Geography and Statistics (IBGE) and the Brazilian Institute of Economics at the Fundação Getúlio Vargas (FGV/IBRE and FGV Data). BETS was originally conceived to bring together as many series from these centers as possible in a single place because of the difficulties researchers faced obtaining this information. This objective was achieved and the package now contains more than 18,000 Brazilian economic series.
Because of the extremely large size of the databases, BETS was expanded to provide mechanisms that would help analysts to search for, extract and export series. The development of the package then took a new direction and BETS was to become an integrated analysis and learning environment. A range of functionality missing from the R universe was included to make modeling and interpretation of the series available in BETS even easier. In addition, some functions already include the option of generating explanatory outputs that encourage and help users to expand their knowledge.
This article seeks to describe the structure and some of the more important functionality of BETS. Typical examples of how the package can be used are described step by step. In the next section we discuss the database in detail. Then in Section 3 we describe the structure of the package and the different elements it is currently composed of. Section 4 introduces the basic ways in which BETS can be used and shows how users can search for, read and store series. The more advanced functions are also discussed in this section.
## Database \label{banco}
Traditionally, the analyst could obtain the IBRE series in the FGV Data system[^1], which contains all the FGV statistical output and a set of selected indicators from other centers. With BETS it is no longer necessary, since we built a database to store this data and provide access to it. The figure below shows a schematic representation of the BETS database in its current form.
{ width=400 }
[^1]: FGV Data can be accessed from the IBRE portal at _web_: [Portal IBRE](http://portalibre.fgv.br/).
An important discussion during the conceptual design phase for BETS concerned the location of the database. The option of distributing the data together with the package was discarded as CRAN does not accept very large data files (`.rdas`). This left two alternatives: to collect the data dynamically using the external APIs in the Central Bank of Brazil, IBGE and IBRE systems or to maintain a database with a database management system (DBMS). In its current form, the package implements a hybrid solution. The IBGE and BACEN series are acquired on the fly using the respective APIs. This procedure is not necessary for the FGV/IBRE series as the data are provided directly by the institution, which is supporting the project. These series are kept in a database built using MySQL.
This division is invisible to the user, who does not need to know which database he should look in and will always have access to the most up-to-date data.[^2] This architecture facilitates implementation and maintenance of the structures and increases the efficiency of the package. The data are updated by a team of maintenance staff with the aid of servers, avoiding the need for the user to obtain new data manually. However, this requires that the user be connected to the Internet.
[^2]: Apart from the paid IBRE series, which are necessarily 24 months out-of-date.
Implementation of fast, easy access would not have been possible without a table of the metadata for the available series. This table, which is maintained in the MySQL database, contains information such as description, periodicity, the unit in which the data are represented and the start and end dates for all the series that can be accessed using the package. Each series has a unique code, which is treated as an index by the DBMS. The auxiliary package RMySQL [@RMySQL] acts at the interface, allowing R to connect to the MySQL database.
## Structure of the Package
In the previous section we looked at the different stages involved in data retrieval and organization. We now show how the package is structured and then explain how the end user accesses the databases.
{ width=400 }
BETS functionality can be divided into four groups:
* **Data Management**: tools for retrieving the series and information about them. This covers not only the private API for extracting data directly from the sources, but also the public API for recovering the data from the database in the package.
* **Dynamic reports**: documents giving details of the analysis and forecasts for a chosen series according to some well-established method. These are generated automatically. All the user has to do is to provide the code for the series in the BETS databases and some additional parameters. The analyses currently available are based on three approaches: Box \& Jenkins, using SARIMA models, general regression neural networks (GRNN) and Holt-Winters exponential smoothing techniques. The documents always contain explanatory comments. However, the next version of BETS will have the option of generating purely technical reports.
* **Dashboards**: scenario analysis documents containing a selection of stylized graphs of the series most frequently used to monitor confidence, uncertainty, growth and prices in the various sectors that make up the Brazilian economy.
* **Additional functions**: these complete the scope of the package and include methods that help analysts and generally make it easier for the analyst to use the information contained in the series.
The user interface with the database is quite intuitive. There is one function for searching, one for extracting the data and a class of functions for external storage (see table below), so that the data can be processed with popular software such as _SaS_, _Stata_ or _SPSS_. In the next section we will look at how these functions should be used.
| Name | Description |
|--------------:|-------------------------------------------------------------------------------:|
| BETSsearch | Searches series for different characteristics. |
| BETSget | Completely extracts the series from the database and loads it in the R environment |
| saveSpss | Exports the time series in a file with an .spss extension |
| saveSas | Exports the time series in a file with an .sas extension |
| saveStata | Exports the time series in a file with a .dta extension |
With the information provided so far, the promise that BETS holds becomes much more apparent. By going beyond the mere supply of data and providing a wide range of tools for studying time series, this pioneering package allows an inexperienced programmer anywhere in the world to analyze the Brazilian economic scenario. All that it takes to unleash the power of BETS is a simple `install.packages("BETS")`, the only command needed to install the package.
## Using BETS
In this section we discuss some of the basic ways in which the package
can be used.
### Interface with the Database
**BETSsearch**
Because the database is large, it was necessary to develop a way
of searching for series using the metadata, i.e., a search tool that
used some of the information about the series as keywords.
The `BETSsearch` function performs searches in each field of the
metadata table described in Section *Database*. It naturally allows
combinations of these characteristics, making searches more flexible.
Note that access to the BETS database is by means of the `sqldf`
package, which makes the processing of searches sufficiently fast and
ensures that the package performs well in any environment.
The `BETSsearch` prototype has the form:
```{r eval = F}
BETSsearch(description, src, periodicity, unit, code, view = TRUE, lang = "en")
```
where the arguments are, respectively
* `description` - A `character`. A search _string_ to look for matching series descriptions.
* `src` - A `character`. The source of the data.
* `periodicity` - `character`. The frequency with which the series is observed.
* `unit` - A `character`. The unit in which the data were measured.
* `code` - An `integer`. The unique code for the series in the BETS database.
* `view` - A `boolean`. By default, `TRUE`. If `FALSE`, the results will be shown directly on the R console.
* `lang` - A `character`. Search language. By default, _"en"_, for English. A search can also be performed in Portuguese by changing the value to _"pt"_.
To refine the search, there are syntax rules for the parameter `description`:
1. To look for alternative words, separate them by blank spaces. Example: `description = 'core ipca'` means that the description of the series should contain _"core"_ **and** _"ipca"_.
2. To search for complete expressions, put them inside ' '. Example: `description = 'index and 'core ipca''` means that the description of the series should contain _"core ipca"_ **and** _"index"_.
3. To exclude words from the search, insert a **~** before each word. Example: `description = 'ipca ~ core'` means that the description of the series should contain _"ipca"_ and should **not** contain _"core"_.
4. To exclude all the expressions from a search, as in the previous item, place them inside ' ' and insert a **~** before each of them. Example: `description = '~ index 'core ipca''` means that the description of the series should contain _"index"_ and should **not** contain _"core ipca"_.
5. It is possible to search for or exclude certain words as long as these rules are obeyed.
6. A blank space is not required after the exclusion sign (**~**), but is required after each expression or word.
Some examples of how this function is used are given below. We have not
shown the results in some cases as the output can be a very long table.
However, we guarantee that all the calls work and invite the reader to
test them.
```{r echo = F}
library(BETS)
```
```{r eval = F}
# Some examples
BETSsearch(description = "sales ~ retail",view = F)
BETSsearch(description = "'sales volume index' ~ vehicles",view = F)
BETSsearch(description = "'distrito federal'", periodicity = 'A', src = 'IBGE',view = F)
```
```{r eval = F}
# Search for accumulated GDP series
BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
```
```{r echo = F, results='hide'}
#results <- BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
```
```{r echo = F}
#results
```
```{r echo = F, results='hide'}
#results <- BETSsearch(description = "consumption ~ 'seasonally adjusted' ~ private", view = F)
```
```{r echo = F}
#head(results)
```
For further information on `BETSsearch`, including valid values
for each field, consult the reference manual by typing
`?BETSsearch` in the R console.
**BETSget**
`BETSget` only works with the reference code for the series, which
is obtained using `BETSsearch`. The command for this function
takes the form:
```{r eval = F}
BETSget(code, data.frame = FALSE)
```
The parameter `code` is obviously mandatory. The optional argument
`data.frame` represents the type of object that will be returned.
Its default value is `FALSE`, indicating that the object returned
by the function will be a `ts` ( _time series_ ). If
`data.frame = TRUE`, the series will be stored in an object of type
`data.frame`.
We will extract two series that we looked up previously.
```{r eval = F}
# Get the 12-month cumulative GDP series in dollars
gdp_accum <- BETSget(4192)
window(gdp_accum, start = c(2014,1))
```
```{r eval = F}
#Get the series for the GDP of the Federal District at market prices
gdp_df <- BETSget(23992, data.frame = T)
head(gdp_df)
```
**save**
To allow greater flexibility in the way BETS series are stored, files
containing the series can be created in proprietary formats, i.e.,
formats associated with proprietary software. Basically,
`save` extracts the time series from the database in the
package in the form of a `data.frame` and creates a file in the
specified format. There is a table in the file in which the first column
contains the dates and the second, the data.
There are three variations of this function, whose prototypes are shown below:
```{r eval = F}
saveSas(code, data = NULL, file.name = "series")
saveSpss(code, data = NULL, file.name = "series")
saveStata(code, data = NULL, file.name = "series")
```
Again, the parameter `code` receives the code for the series. The
user can provide his own series with the argument `data`, which can
be a `data.frame` or a `ts`. There is no need to add the
extension to the file name in the parameter `file.name`.
Some examples of how this function is used are:
```{r eval = F}
# Save the series for the net public debt in the default Excel format
saveStata(code = 2078, file.name = "series_stata.dta")
# Save the series for the net public debt in the default Excel format
saveStata(code = 2078, file.name = "series_stata.dta")
# Save any series in SPSS format
my.series <- BETSget(4447)
saveSpss(data = my.series, file.name = "series_spss")
```
### Some Additional Functions
ere we are going to discuss some of the most special BETS functions.
**chart**
`chart` was originally designed to be a private function for
use with `dashboard`. However, we felt it would be very
helpful for users to have a way to obtain the dashboard graphs
separately so that they could include them in their own work.
The `chart` prototype is:
```{r eval = F}
chart(ts, file = NULL, open = TRUE, lang = "en", params = NULL)
```
The parameter `ts` takes one of several predefined graph options or a user defined series.
There is also the option of saving the output in the working directory
by defining the name of the file with the parameter `file`. If the
file should be opened after it has been created, `open` should be
set to `TRUE`. The parameter `params` is reserved for graphs
of the user's own series, i.e., series that are not predefined. It is a
list that can contain the field `codace`, which receives a Boolean
and indicates whether shaded areas corresponding to recessions
identified by CODACE (FGV/IBRE) should be drawn, and the field
`start`, which specifies what the start date for the series should
be. As the graph concerns the economic scenario, the end date cannot be
changed and is always the last item of data available.
Let us look at two examples of how `chart` is used. The outputs are displayed in the figures below.
```{r eval = F}
# Uncertainty Index chart
chart(ts = 'iie_br', file = "iie_br", open = TRUE)
# Leading and Coincident Labor Indicators charts
chart(ts = "lab_mrkt", file = "lab_mrkt.png", open = TRUE)
```
{ width=500 }
{ width=500 }
For a complete list of the available graphs, consult the
`chart` reference manual.
**dashboard**
In a previous section, we said that BETS includes a powerful
tool for scenario analysis, the dashboards. Currently, we provide two
options of dashboards: a business cycle and a macroeconomic situation
dashboard. We also plan to extend the dashboards to cover other items in
addition to those that are implemented.
```{r eval = F}
dashboard(type = "business_cycle", charts = "all", saveas = NA, parameters = NULL)
```
To create a dashboard, we call `dashboard`, setting the
`type` parameter either to `business_cycle` or
*"macro_situation"*. It generates a `.pdf` and, if
`type` is set to *"busines_cycle"*, one of its five pages is going
to look similar to that in the next figure. In the example, the
user chooses to save the file with the name *survey.pdf*. The
graphs shown can also be chosen by the user with the `charts`
parameter, which is *"all"* by default. The reference manual contains
a complete list of the available graphs.
```{r eval = F}
dashboard(type = "business_cycle", saveas = "survey.pdf")
```
{ width=600 }
The macroeconomic situation dashboard allows the user to insert a custom explanatory text,
as well as a personal logo, email and website. For instance, the call below generates a
four page dashboard whose first page is shown below.
```{r eval = F}
parameters = list(author = "FGV/IBRE",
url = "http://portalibre.fgv.br/",
text = "text.txt",
logo = "logo_ibre.png")
dashboard(type = "macro_situation", parameters = parameters)
```
The main advantage of such dashboards is that they are a convenient instrument to
quickly understand the subject they are about. Their values are always the most
recent ones, they are organized in well defined sections and its desing is highly
informative.
{ width=600 }
## References
Ooms, J., D. James, S. DebRoy, H. Wickham, and J. Horner. 2016. RMySQL: Database Interface and Mysql Driver for R. https://cran.r-project.org/package=RMySQL.
R Core Team. 2012. R: A Language and Environment for Statistical Computing. Vienna, Austria: R Foundation for Statistical Computing. https://www.R-project.org/.
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/doc/BETS_basic_usage.Rmd
|
---
classoption: landscape
sansfont: Calibri Light
author: "Talitha Speranza"
output:
rmarkdown::pdf_document:
includes:
in_header: fix_figs.tex
params:
text: !r NA
author: !r NA
email: !r NA
url: !r NA
logo: !r NA
---
\fontfamily{lmss}\selectfont
```{r echo = FALSE, results = "hide"}
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
text = params$text
author = params$author
email = params$email
logo = params$logo
url = params$url
```
```{r echo = FALSE}
library(BETS)
```
\def\haslogo{`r as.integer(!is.na(logo))`}
\def\haswebsite{`r as.integer(!is.na(url))`}
\def\hastext{`r as.integer(!is.na(text))`}
\fancypagestyle{plain}{
\fancyhf{}%
\setlength{\headheight}{45pt}
\fancyhead[L]{
\includegraphics[width=\linewidth,height=40pt]{logo_situation} }
\ifnum\hastext = 1
\fancyhead[R]{
{\fontfamily{lmss}\selectfont
{\footnotesize Analysis by} \\
\ifnum\haslogo = 1
\includegraphics[width=\linewidth,height=35pt]{`r logo`} \\
\else
`r author` \\
\fi
\ifnum\haswebsite = 1
{\small \url{`r url`}}
\fi
}
}
\fi
\fancyfoot[L]{
{\fontfamily{lmss}\selectfont
{\small Charts generated by the \textbf{BETS R Package} on `r Sys.Date()` $\diamond$ \textbf{\url{https://github.com/pedrocostaferreira/BETS}}}}
}
\fancyfoot[R]{
\thepage\ / \pageref{LastPage} }
}
\pagestyle{plain}
```{r echo = FALSE, results = "hide"}
dir.create("graphs")
chart(ts = "ipca_with_core", file = "ipca.pdf", open = FALSE)
```
```{r echo = FALSE, results = "hide"}
chart(ts = "ulc", file = "ulc.pdf", open = FALSE)
```
\begin{figure}[h]
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/ipca}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/ulc}
\label{cut}
\end{subfigure}
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text)){
text <- paste(readLines(text), collapse=" ")
text = gsub(pattern = "\\\\n", replacement = " \n", x = text)
text <- strsplit(text, "##")[[1]]
cat(text[1])
}
```
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
```{r echo = FALSE, results = "hide"}
chart(ts = "eap", file = "eap.pdf", open = FALSE)
```
```{r echo = FALSE, results = "hide"}
chart(ts = "unemp", file = "unemp.pdf", open = FALSE)
```
\begin{figure}[h]
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/eap}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/unemp}
\label{cut}
\end{subfigure}
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text)){
cat(text[2])
}
```
\newpage
\pagestyle{plain}
```{r echo = FALSE, results = "hide"}
chart(ts = "vargdp", file = "vargdp.pdf", open = FALSE)
```
```{r echo = FALSE, results = "hide"}
chart(ts = "indprod", file = "indprod.pdf", open = FALSE)
```
\begin{figure}[h]
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/vargdp}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/indprod}
\label{cut}
\end{subfigure}
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text)){
cat(text[3])
}
```
\ifnum\hastext = 1
\newpage
\pagestyle{plain}
\fi
```{r echo = FALSE, results = "hide"}
chart(ts = "selic", file = "selic.pdf", open = FALSE)
```
```{r echo = FALSE, results = "hide"}
chart(ts = "cdb", file = "cdb.pdf", open = FALSE)
```
\begin{figure}[h]
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/selic}
\label{ipca}
\end{subfigure}
~
\begin{subfigure}{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{graphs/cdb}
\label{cut}
\end{subfigure}
\end{figure}
```{r, echo = FALSE, results='asis'}
if(!is.na(text)){
cat(text[4])
}
```
|
/scratch/gouwar.j/cran-all/cranData/BETS/inst/macro_situation_dashboard.Rmd
|
---
title: "BETS - Brazilian Economic Time Series: Basic Usage"
bibliography:
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{"BETS - Brazilian Economic Time Series: Basic Usage""}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r,echo = F,message = F,warning=FALSE}
require(BETS)
```
## Introduction
The BETS package (an abbreviation for Brazilian Economic Time Series) for R [@R] allows easy access to the most important Brazilian economic time series and a range of tools for analyzing them. It provides a much-needed single point of access to the many Brazilian series and a simple, flexible and robust interface.
The series in the BETS database are produced by three important and respected institutions: the Central Bank of Brazil (BACEN), the Brazilian Institute of Geography and Statistics (IBGE) and the Brazilian Institute of Economics at the Fundação Getúlio Vargas (FGV/IBRE and FGV Data). BETS was originally conceived to bring together as many series from these centers as possible in a single place because of the difficulties researchers faced obtaining this information. This objective was achieved and the package now contains more than 18,000 Brazilian economic series.
Because of the extremely large size of the databases, BETS was expanded to provide mechanisms that would help analysts to search for, extract and export series. The development of the package then took a new direction and BETS was to become an integrated analysis and learning environment. A range of functionality missing from the R universe was included to make modeling and interpretation of the series available in BETS even easier. In addition, some functions already include the option of generating explanatory outputs that encourage and help users to expand their knowledge.
This article seeks to describe the structure and some of the more important functionality of BETS. Typical examples of how the package can be used are described step by step. In the next section we discuss the database in detail. Then in Section 3 we describe the structure of the package and the different elements it is currently composed of. Section 4 introduces the basic ways in which BETS can be used and shows how users can search for, read and store series. The more advanced functions are also discussed in this section.
## Database \label{banco}
Traditionally, the analyst could obtain the IBRE series in the FGV Data system[^1], which contains all the FGV statistical output and a set of selected indicators from other centers. With BETS it is no longer necessary, since we built a database to store this data and provide access to it. The figure below shows a schematic representation of the BETS database in its current form.
{ width=400 }
[^1]: FGV Data can be accessed from the IBRE portal at _web_: [Portal IBRE](http://portalibre.fgv.br/).
An important discussion during the conceptual design phase for BETS concerned the location of the database. The option of distributing the data together with the package was discarded as CRAN does not accept very large data files (`.rdas`). This left two alternatives: to collect the data dynamically using the external APIs in the Central Bank of Brazil, IBGE and IBRE systems or to maintain a database with a database management system (DBMS). In its current form, the package implements a hybrid solution. The IBGE and BACEN series are acquired on the fly using the respective APIs. This procedure is not necessary for the FGV/IBRE series as the data are provided directly by the institution, which is supporting the project. These series are kept in a database built using MySQL.
This division is invisible to the user, who does not need to know which database he should look in and will always have access to the most up-to-date data.[^2] This architecture facilitates implementation and maintenance of the structures and increases the efficiency of the package. The data are updated by a team of maintenance staff with the aid of servers, avoiding the need for the user to obtain new data manually. However, this requires that the user be connected to the Internet.
[^2]: Apart from the paid IBRE series, which are necessarily 24 months out-of-date.
Implementation of fast, easy access would not have been possible without a table of the metadata for the available series. This table, which is maintained in the MySQL database, contains information such as description, periodicity, the unit in which the data are represented and the start and end dates for all the series that can be accessed using the package. Each series has a unique code, which is treated as an index by the DBMS. The auxiliary package RMySQL [@RMySQL] acts at the interface, allowing R to connect to the MySQL database.
## Structure of the Package
In the previous section we looked at the different stages involved in data retrieval and organization. We now show how the package is structured and then explain how the end user accesses the databases.
{ width=400 }
BETS functionality can be divided into four groups:
* **Data Management**: tools for retrieving the series and information about them. This covers not only the private API for extracting data directly from the sources, but also the public API for recovering the data from the database in the package.
* **Dynamic reports**: documents giving details of the analysis and forecasts for a chosen series according to some well-established method. These are generated automatically. All the user has to do is to provide the code for the series in the BETS databases and some additional parameters. The analyses currently available are based on three approaches: Box \& Jenkins, using SARIMA models, general regression neural networks (GRNN) and Holt-Winters exponential smoothing techniques. The documents always contain explanatory comments. However, the next version of BETS will have the option of generating purely technical reports.
* **Dashboards**: scenario analysis documents containing a selection of stylized graphs of the series most frequently used to monitor confidence, uncertainty, growth and prices in the various sectors that make up the Brazilian economy.
* **Additional functions**: these complete the scope of the package and include methods that help analysts and generally make it easier for the analyst to use the information contained in the series.
The user interface with the database is quite intuitive. There is one function for searching, one for extracting the data and a class of functions for external storage (see table below), so that the data can be processed with popular software such as _SaS_, _Stata_ or _SPSS_. In the next section we will look at how these functions should be used.
| Name | Description |
|--------------:|-------------------------------------------------------------------------------:|
| BETSsearch | Searches series for different characteristics. |
| BETSget | Completely extracts the series from the database and loads it in the R environment |
| saveSpss | Exports the time series in a file with an .spss extension |
| saveSas | Exports the time series in a file with an .sas extension |
| saveStata | Exports the time series in a file with a .dta extension |
With the information provided so far, the promise that BETS holds becomes much more apparent. By going beyond the mere supply of data and providing a wide range of tools for studying time series, this pioneering package allows an inexperienced programmer anywhere in the world to analyze the Brazilian economic scenario. All that it takes to unleash the power of BETS is a simple `install.packages("BETS")`, the only command needed to install the package.
## Using BETS
In this section we discuss some of the basic ways in which the package
can be used.
### Interface with the Database
**BETSsearch**
Because the database is large, it was necessary to develop a way
of searching for series using the metadata, i.e., a search tool that
used some of the information about the series as keywords.
The `BETSsearch` function performs searches in each field of the
metadata table described in Section *Database*. It naturally allows
combinations of these characteristics, making searches more flexible.
Note that access to the BETS database is by means of the `sqldf`
package, which makes the processing of searches sufficiently fast and
ensures that the package performs well in any environment.
The `BETSsearch` prototype has the form:
```{r eval = F}
BETSsearch(description, src, periodicity, unit, code, view = TRUE, lang = "en")
```
where the arguments are, respectively
* `description` - A `character`. A search _string_ to look for matching series descriptions.
* `src` - A `character`. The source of the data.
* `periodicity` - `character`. The frequency with which the series is observed.
* `unit` - A `character`. The unit in which the data were measured.
* `code` - An `integer`. The unique code for the series in the BETS database.
* `view` - A `boolean`. By default, `TRUE`. If `FALSE`, the results will be shown directly on the R console.
* `lang` - A `character`. Search language. By default, _"en"_, for English. A search can also be performed in Portuguese by changing the value to _"pt"_.
To refine the search, there are syntax rules for the parameter `description`:
1. To look for alternative words, separate them by blank spaces. Example: `description = 'core ipca'` means that the description of the series should contain _"core"_ **and** _"ipca"_.
2. To search for complete expressions, put them inside ' '. Example: `description = 'index and 'core ipca''` means that the description of the series should contain _"core ipca"_ **and** _"index"_.
3. To exclude words from the search, insert a **~** before each word. Example: `description = 'ipca ~ core'` means that the description of the series should contain _"ipca"_ and should **not** contain _"core"_.
4. To exclude all the expressions from a search, as in the previous item, place them inside ' ' and insert a **~** before each of them. Example: `description = '~ index 'core ipca''` means that the description of the series should contain _"index"_ and should **not** contain _"core ipca"_.
5. It is possible to search for or exclude certain words as long as these rules are obeyed.
6. A blank space is not required after the exclusion sign (**~**), but is required after each expression or word.
Some examples of how this function is used are given below. We have not
shown the results in some cases as the output can be a very long table.
However, we guarantee that all the calls work and invite the reader to
test them.
```{r echo = F}
library(BETS)
```
```{r eval = F}
# Some examples
BETSsearch(description = "sales ~ retail",view = F)
BETSsearch(description = "'sales volume index' ~ vehicles",view = F)
BETSsearch(description = "'distrito federal'", periodicity = 'A', src = 'IBGE',view = F)
```
```{r eval = F}
# Search for accumulated GDP series
BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
```
```{r echo = F, results='hide'}
#results <- BETSsearch(description = "gdp accumulated", unit = "US$", view = F)
```
```{r echo = F}
#results
```
```{r echo = F, results='hide'}
#results <- BETSsearch(description = "consumption ~ 'seasonally adjusted' ~ private", view = F)
```
```{r echo = F}
#head(results)
```
For further information on `BETSsearch`, including valid values
for each field, consult the reference manual by typing
`?BETSsearch` in the R console.
**BETSget**
`BETSget` only works with the reference code for the series, which
is obtained using `BETSsearch`. The command for this function
takes the form:
```{r eval = F}
BETSget(code, data.frame = FALSE)
```
The parameter `code` is obviously mandatory. The optional argument
`data.frame` represents the type of object that will be returned.
Its default value is `FALSE`, indicating that the object returned
by the function will be a `ts` ( _time series_ ). If
`data.frame = TRUE`, the series will be stored in an object of type
`data.frame`.
We will extract two series that we looked up previously.
```{r eval = F}
# Get the 12-month cumulative GDP series in dollars
gdp_accum <- BETSget(4192)
window(gdp_accum, start = c(2014,1))
```
```{r eval = F}
#Get the series for the GDP of the Federal District at market prices
gdp_df <- BETSget(23992, data.frame = T)
head(gdp_df)
```
**save**
To allow greater flexibility in the way BETS series are stored, files
containing the series can be created in proprietary formats, i.e.,
formats associated with proprietary software. Basically,
`save` extracts the time series from the database in the
package in the form of a `data.frame` and creates a file in the
specified format. There is a table in the file in which the first column
contains the dates and the second, the data.
There are three variations of this function, whose prototypes are shown below:
```{r eval = F}
saveSas(code, data = NULL, file.name = "series")
saveSpss(code, data = NULL, file.name = "series")
saveStata(code, data = NULL, file.name = "series")
```
Again, the parameter `code` receives the code for the series. The
user can provide his own series with the argument `data`, which can
be a `data.frame` or a `ts`. There is no need to add the
extension to the file name in the parameter `file.name`.
Some examples of how this function is used are:
```{r eval = F}
# Save the series for the net public debt in the default Excel format
saveStata(code = 2078, file.name = "series_stata.dta")
# Save the series for the net public debt in the default Excel format
saveStata(code = 2078, file.name = "series_stata.dta")
# Save any series in SPSS format
my.series <- BETSget(4447)
saveSpss(data = my.series, file.name = "series_spss")
```
### Some Additional Functions
ere we are going to discuss some of the most special BETS functions.
**chart**
`chart` was originally designed to be a private function for
use with `dashboard`. However, we felt it would be very
helpful for users to have a way to obtain the dashboard graphs
separately so that they could include them in their own work.
The `chart` prototype is:
```{r eval = F}
chart(ts, file = NULL, open = TRUE, lang = "en", params = NULL)
```
The parameter `ts` takes one of several predefined graph options or a user defined series.
There is also the option of saving the output in the working directory
by defining the name of the file with the parameter `file`. If the
file should be opened after it has been created, `open` should be
set to `TRUE`. The parameter `params` is reserved for graphs
of the user's own series, i.e., series that are not predefined. It is a
list that can contain the field `codace`, which receives a Boolean
and indicates whether shaded areas corresponding to recessions
identified by CODACE (FGV/IBRE) should be drawn, and the field
`start`, which specifies what the start date for the series should
be. As the graph concerns the economic scenario, the end date cannot be
changed and is always the last item of data available.
Let us look at two examples of how `chart` is used. The outputs are displayed in the figures below.
```{r eval = F}
# Uncertainty Index chart
chart(ts = 'iie_br', file = "iie_br", open = TRUE)
# Leading and Coincident Labor Indicators charts
chart(ts = "lab_mrkt", file = "lab_mrkt.png", open = TRUE)
```
{ width=500 }
{ width=500 }
For a complete list of the available graphs, consult the
`chart` reference manual.
**dashboard**
In a previous section, we said that BETS includes a powerful
tool for scenario analysis, the dashboards. Currently, we provide two
options of dashboards: a business cycle and a macroeconomic situation
dashboard. We also plan to extend the dashboards to cover other items in
addition to those that are implemented.
```{r eval = F}
dashboard(type = "business_cycle", charts = "all", saveas = NA, parameters = NULL)
```
To create a dashboard, we call `dashboard`, setting the
`type` parameter either to `business_cycle` or
*"macro_situation"*. It generates a `.pdf` and, if
`type` is set to *"busines_cycle"*, one of its five pages is going
to look similar to that in the next figure. In the example, the
user chooses to save the file with the name *survey.pdf*. The
graphs shown can also be chosen by the user with the `charts`
parameter, which is *"all"* by default. The reference manual contains
a complete list of the available graphs.
```{r eval = F}
dashboard(type = "business_cycle", saveas = "survey.pdf")
```
{ width=600 }
The macroeconomic situation dashboard allows the user to insert a custom explanatory text,
as well as a personal logo, email and website. For instance, the call below generates a
four page dashboard whose first page is shown below.
```{r eval = F}
parameters = list(author = "FGV/IBRE",
url = "http://portalibre.fgv.br/",
text = "text.txt",
logo = "logo_ibre.png")
dashboard(type = "macro_situation", parameters = parameters)
```
The main advantage of such dashboards is that they are a convenient instrument to
quickly understand the subject they are about. Their values are always the most
recent ones, they are organized in well defined sections and its desing is highly
informative.
{ width=600 }
## References
Ooms, J., D. James, S. DebRoy, H. Wickham, and J. Horner. 2016. RMySQL: Database Interface and Mysql Driver for R. https://cran.r-project.org/package=RMySQL.
R Core Team. 2012. R: A Language and Environment for Statistical Computing. Vienna, Austria: R Foundation for Statistical Computing. https://www.R-project.org/.
|
/scratch/gouwar.j/cran-all/cranData/BETS/vignettes/BETS_basic_usage.Rmd
|
#' Likelihood method for analyzing bioequivalence (BE) trial data
#'
#' This package will calculate and plot the profile likelihoods for the mean difference and standard deviation ratios of a test drug
#' to a reference drug for AUC or Cmax from various crossover designs commonly used in BE studies, such as a fully replicated crossover
#' design (e.g., 2x4 two-sequence, four-period, RTRT/TRTR), a partially replicated crossover design (e.g., 2x3, two-sequence, three-period, RTR/TRT), and a two-sequence, two-period, crossover design design (2x2, RT/TR), where "R" stands for a reference drug and "T" stands for a test drug.
#'
#' @docType package
#'
#' @importFrom mvtnorm dmvnorm
#' @importFrom stats as.formula na.exclude nlm
#' @import ggplot2
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/R/BElikelihood-package.R
|
#' Data example for bioequivalence (BE) study
#'
#' The dataset is a bioequivalence dataset from a fully repicated 2x4 crossover design with RTRT and TRTR as sequences.
#' It is a subset of Example 4.4 in Chapter 4 of Patterson and Jones's book.
#'
#' @format
#' A data frame with 176 observations (from 44 subjects) on 6 variables:
#' \describe{
#' \item{subject}{subject ID}
#' \item{sequence}{RTRT or TRTR, where T and R stand for test and reference drugs, respectively}
#' \item{period}{1 to 4 for crossover period}
#' \item{formula}{T or R stand for test and reference drugs, respectively}
#' \item{AUC}{a pharmacokinetic parameter - the area under the blood/plasma concentration-time curve}
#' \item{CMAX}{a pharmacokinetic parameter - the peak concentration}
#' }
#'
#' @usage data(dat, package = 'BElikelihood')
#'
#' @keywords dataset
#'
#' @source Patterson S and Jones B (2023). Bioequivalence and Statistics in Clinical Pharmacology. Chapman Hall/CRC Press.
#'
#' @examples
#' data(dat)
"dat"
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/R/dat.R
|
#' Print method for proLikelihood object
#'
#' Print \sQuote{poi} (mean difference, total standard deviation ratio or within-subject standard deviation ratio) and \sQuote{maxLik} (corresponding profile likelihood) elements from a proLikelihood object.
#'
#' @param x proLikelihood object
#' @param \dots unused
#'
#' @return Output the mean difference, total standard deviation ratio or within-subject standard deviation ratio values (depending on the \sQuote{method}) with
#' the calculated corresponding profile likelihood values.
#'
#' @examples
#' \donttest{
#' data(dat)
#' cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
#' l <- averageBE(dat, colSpec = cols, xlength = 300)
#' l
#' }
#'
#' @export
print.proLikelihood <- function(x, ...) {
print(cbind(x$poi, x$maxLik))
invisible(x)
}
#' Plot method for proLikelihood object
#'
#' This function generates a plot of a standardized profile likelihood after running the proLikelihood() function.
#'
#' The function generates a plot of the standardized profile likelihood (the profile likelihood relative to the maximum) with
#' the maximum likelihood estimate and 1/8 and 1/32 likelihood intervals for the parameter of interest (mean difference,
#' total standard deviation ratio or within-subject standard deviation ratio depending on the \sQuote{method}) printed inside the plot.
#'
#' @param x proLikelihood object
#' @param textx numeric value, position (x-axis) of label for the maximum likelihood estimate and the 1/8 and 1/32 likelihood intervals.
#' @param texty numeric value, position (y-axis) of label the maximum likelihood estimate and the 1/8 and 1/32 likelihood intervals.
#' @param textsize numeric value text size of the label.
#' @param \dots unused
#'
#' @return ggplot2 object, a plot of the standardized profile likelihood with the maximum likelihood estimate and 1/8 and 1/32
#' likelihood intervals printed inside the plot.
#'
#' @examples
#' \donttest{
#' data(dat)
#' cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
#' p4a <- averageBE(dat, colSpec = cols, xlength = 50)
#' p4t <- totalVarianceBE(dat, colSpec = cols, xlength = 50)
#' p4w <- withinVarianceBE(dat, colSpec = cols, xlength = 50)
#' plot(p4a)
#' plot(p4t)
#' plot(p4w)
#' # three period case
#' dd3 <- dat[dat$period < 4,]
#' p3a <- averageBE(dd3, colSpec = cols, xlength = 50)
#' plot(p3a)
#' # two period case
#' dd2 <- dat[dat$period < 3,]
#' p2a <- averageBE(dd2, colSpec = cols, xlength = 50)
#' plot(p2a)
#' }
#'
#' @export
plot.proLikelihood <- function(x, textx, texty = 0.9, textsize = 3, ...) {
a6 <- round(x$MAX, 3)
b6 <- round(x$LI['1/8 LI','lower'], 3)
c6 <- round(x$LI['1/8 LI','upper'], 3)
d6 <- round(x$LI['1/32 LI','lower'], 3)
e6 <- round(x$LI['1/32 LI','upper'], 3)
## make a data frame##
lik.norm <- x$maxLik / max(x$maxLik, na.rm = TRUE)
profile <- data.frame(poi = x$poi, lik.norm)
if(x$method == 'average') {
vlinelow <- -0.223
vlineup <- 0.223
xlabel <- expression(mu[T]-mu[R])
deftx <- 0
} else if(x$method == 'total') {
vlinelow <- 0.4
vlineup <- 2.5
xlabel <- bquote(sigma[TT]/sigma[TR])
deftx <- 2
} else if(x$method == 'within') {
vlinelow <- 0.4
vlineup <- 2.5
xlabel <- bquote(sigma[WT]/sigma[WR])
deftx <- 2
}
poi <- NA # avoid CHK warning
if(missing(textx)) {
textx <- deftx
}
l1 <- sprintf('Max at %s', a6)
l2 <- sprintf('1/8 LI (%s,%s)', b6, c6)
l3 <- sprintf('1/32 LI (%s,%s)', d6, e6)
maxlabel <- paste(l1, l2, l3, sep = '\n')
p6 <- ggplot(data = profile, aes(poi, lik.norm), colour="black")
p6 <- p6 + geom_line(size = 0.2) +
geom_text(x = textx, y = texty, label = maxlabel, size = textsize) +
geom_segment(aes(x = b6, y = 1/8, xend = c6, yend = 1/8), size = 0.2) +
geom_segment(aes(x = d6, y = 1/32, xend = e6, yend = 1/32), size = 0.2) +
geom_vline(xintercept = c(vlinelow, vlineup), linetype = 2, size = 0.2) +
ylab("Standardized profile likelihood") +
xlab(xlabel)
print(p6)
}
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/R/printplot.R
|
#' Calculate profile likelihood for bioequivalence data
#'
#' This is a general function to calculate the profile likelihoods for the mean difference, total standard deviation ratio,
#' and within-subject standard deviation ratio of the test drug to the reference drug from bioequivalence (BE) study data.
#' Standardized profile likelihood plots with the 1/8 and 1/32 likelihood intervals can be generated using the plot method.
#' The within-subject standard deviation ratio can be obtained only for a fully replicated 2x4 or a partially replicated 2x3 design.
#'
#' @details This function implements a likelihood method for evaluating BE for pharmacokinetic parameters (AUC and Cmax) (see reference below). It accepts a dataframe collected with various crossover designs commonly used in BE studies such as
#' a fully replicated crossover design (e.g., 2x4 two-sequence, four-period, RTRT/TRTR), a partially replicated crossover design
#' (e.g., 2x3, two-sequence, three-period, RTR/TRT), and a two-sequence, two-period, crossover design design (2x2, RT/TR),
#' where "R" stands for a reference drug and "T" stands for a test drug.
#' It allows missing data (for example, a subject may miss the period 2 data) and utilizes all available data. It will
#' calculate the profile likelihoods for the mean difference, total standard deviation ratio, and within-subject standard deviation ratio.
#' Plots of standardized profile likelihood can be generated and provide evidence for various quantities of interest for evaluating
#' BE in a unified framework.
#'
#' @param dat data frame contains BE data (AUC and Cmax) with missing data allowed.
#' @param colSpec a named list that should specify columns in \sQuote{dat}; \sQuote{subject} (subject ID),
#' \sQuote{formula} (must be coded as T or R, where T for test drug and R for reference drug), and \sQuote{y} (either AUC or Cmax) are
#' required. \sQuote{period} and \sQuote{seq} may also be provided.
#' The \sQuote{formula} column should identify a test or a reference drug with R and T.
#' @param theta An optional numeric vector contains initial values of the parameters for use in optimization.
#' For example, in a 2x4 fully replicated design, the vector is [mu, p2, p3, p4, S, phi,log(sbt2), log(sbr2), log(swt2), log(sbr2), rho], where
#' \sQuote{mu} is the population mean for the reference drug when there are no period or sequence effects; \sQuote{p2} to \sQuote{p4} are fixed
#' period effects with period 1 as the reference period; \sQuote{S} the fixed sequence effect with seq 1 as the reference sequence; \sQuote{phi}
#' is the mean difference between the two drugs; \sQuote{sbt2} and \sQuote{sbr2} are between-subject variances for the test and reference drugs,
#' respectively; \sQuote{swt2} and \sQuote{swr2} are within-subject variances for the test and reference drugs, respectively; \sQuote{rho} is
#' the correlation coefficient within a subject. When \sQuote{theta} (default is null) is not provided, the function
#' will choose the starting values automatically based on a linear mixed-effects model. If users want to provide these values, for method
#' \sQuote{average} (mean difference), user may put any value for \sQuote{phi}. Similarly, for method \sQuote{total}, user can put any value
#' for \sQuote{log(sbt2)}, and for method \sQuote{within}, user can put any value for \sQuote{log(swt2)}.
#' @param xlow numeric value, the lower limit of x-axis for the profile likelihood plot, at which the profile likelihood is calculated. It is
#' optional and can be automatically generated using the maximum likelihood estimate (MLE) depending on the \sQuote{method}. We strongly
#' recommend users trying a better value that would better fit for purpose.
#' @param xup numeric value, the upper limit of x-axis for the profile likelihood plot, at which the profile likelihood is calculated. It is
#' optional and can be automatically generated using the MLE depending on the \sQuote{method}. We strongly recommend users trying
#' a better value that would better fit for purpose.
#' @param xlength numeric value. Defaults to 100. It is the number of grids between the lower and upper limits, which controls smoothness of
#' the curve. It will take longer time to run for larger number of grids, but we strongly recommend users using a larger number than the default
#' value.
#' @param method character value. Should be one of \sQuote{average}, \sQuote{total}, or \sQuote{within}.
#' \sQuote{average} will provide the profile likelihood for the mean difference between test and reference drugs.
#' \sQuote{total} will provide the profile likelihood for the total standard deviation ratio of test to reference drug. \sQuote{within}
#' will provide the profile likelihood for the within-subject standard deviation ratio of test to reference drug when appropriate.
#'
#' @return A \sQuote{proLikelihood} object, with elements \sQuote{poi}, \sQuote{maxLik}, \sQuote{MAX}, \sQuote{LI}, and \sQuote{method}.
#' \sQuote{poi} and \sQuote{maxLik} are the interested parameter (mean difference, total standard deviation ratio
#' or within-subject standard deviation ratio) values and the corresponding profile likelihood values, respectively. \sQuote{MAX} is the MLE
#' estimate for that parameter. \sQuote{LI} is the likelihood intervals with the 1/4.5, 1/8 and 1/32 intervals.
#' \sQuote{method} is one of \sQuote{average},\sQuote{total}, and \sQuote{within}.
#'
#' @references Liping Du and Leena Choi, Likelihood approach for evaluating bioequivalence of highly variable drugs, Pharmaceutical Statistics, 14(2): 82-94, 2015
#'
#' @examples
#' \donttest{
#' data(dat)
#' cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
#' p4a <- proLikelihood(dat, colSpec = cols, xlength = 300, method = 'average')
#' p4t <- proLikelihood(dat, colSpec = cols, xlength = 300, method = 'total')
#' p4w <- proLikelihood(dat, colSpec = cols, xlength = 300, method = 'within')
#' # three period case
#' dd3 <- dat[dat$period < 4,]
#' p3a <- averageBE(dd3, colSpec = cols, xlength = 300)
#' p3t <- totalVarianceBE(dd3, colSpec = cols, xlength = 300)
#' p3w <- withinVarianceBE(dd3, colSpec = cols, xlength = 300)
#' # two period case
#' dd2 <- dat[dat$period < 3,]
#' p2a <- averageBE(dd2, colSpec = cols, xlength = 300)
#' p2t <- totalVarianceBE(dd2, colSpec = cols, xlength = 300)
#' }
#'
#' @export
proLikelihood <- function(dat, colSpec = list(), theta = NULL, xlow, xup, xlength = 100, method) {
m <- match.arg(method, c('average','total','within'))
e <- setup_env(dat, colSpec)
TRname <- e$TRname
TRnum <- e$TRnum
nSeq <- max(e$seq)
nPeriods <- max(e$period)
if(nSeq != 2) stop('data must contain two sequences')
if(m == 'within' && (nPeriods < 3 || nPeriods > 4)) stop('data must contain 3-4 periods')
if(nPeriods < 2 || nPeriods > 4) stop('data must contain 2-4 periods')
subject1 <- unique(e$subject[e$seq == 1]) ## unique subjects in seq 1
subject2 <- unique(e$subject[e$seq == 2]) ## unique subjects in seq 2
n1 <- length(subject1) ## number of subjects in seq 1
n2 <- length(subject2) ## number of subjects in seq 2
###get the starting value for theta if not provided###
if(is.null(theta)) theta <- select_theta(e, nPeriods)
expThetaSize <- nPeriods + 4 + 3 # period 4?x, logsigma 4x, S, phi, rho
if(length(theta) < expThetaSize) {
stop('the specified theta has too few values')
}
if(missing(xlow) && missing(xup)) {
if(m == 'average') {
spot <- theta[nPeriods + 2]
xlow <- min(-0.225, spot - 0.223)
xup <- max(0.225, spot + 0.223)
} else {
seq2 <- seq(nPeriods+3, length.out = 4)
sigma2 <- exp(theta[seq2])
bt <- sigma2[1]
br <- sigma2[2]
wt <- sigma2[3]
wr <- sigma2[4]
tt <- bt + wt
tr <- br + wr
if(m == 'total') {
spot <- tt / tr
} else if(m == 'within') {
spot <- wt / wr
}
xlow <- min(0.7, spot * 0.7)
xup <- max(1.3, spot * 1.3)
}
}
x <- seq(xlow, xup, length.out = xlength)##fixed phi values
## design matrix for TRRT and RTTR design (mu, p2, p3, p4, S, phi)
X <- lapply(seq_along(TRnum), function(i) {
design_matrix(TRnum[[i]], i)
})
names(X) <- TRname
s1xy <- lapply(seq(n1), function(i) {
yi <- e$Y[e$subject==subject1[i]]
miss.pos <- which(is.na(yi)) # missing position
if(length(miss.pos) > 0){
yi <- yi[-miss.pos]
X.m <- X[[1]][-miss.pos,]
} else{
X.m <- X[[1]]
}
list(yi, miss.pos, X.m)
})
s2xy <- lapply(seq(n2), function(i) {
yi <- e$Y[e$subject==subject2[i]]
miss.pos <- which(is.na(yi)) # missing position
if(length(miss.pos) > 0){
yi <- yi[-miss.pos]
X.m <- X[[2]][-miss.pos,]
} else{
X.m <- X[[2]]
}
list(yi, miss.pos, X.m)
})
# use TRname to generalize var-cov matrix
varcov_blueprint <- lapply(TRname, varcov_matrix)
names(varcov_blueprint) <- TRname
##negative log likelihood ###
mnormNLL <- function(theta, val) {
p <- sigma_vals(theta, method, nPeriods, val)
beta <- p[[1]]
s_vals <- p[[2]]
##var/cov matrix####
vmat <- lapply(varcov_blueprint, function(i) {
matrix(s_vals[i], nPeriods, nPeriods)
})
l <- 0 ##variable for the sum of negative log likelihood##
for (i in seq(n1)){
s_i <- s1xy[[i]]
yi <- s_i[[1]]
miss.pos <- s_i[[2]]
Xi1.m <- s_i[[3]]
if(length(miss.pos) > 0) {
vi1.m <- vmat[[1]][-miss.pos, -miss.pos, drop = FALSE]
} else {
vi1.m <- vmat[[1]]
}
y.pred1 <- Xi1.m %*% beta
l <- l - mvtnorm::dmvnorm(yi, mean=y.pred1, sigma=vi1.m, log=TRUE, checkSymmetry = FALSE)
} ## sum of negative log likelihood for subjects in seq 1
for(i in seq(n2)){
s_i <- s2xy[[i]]
yi <- s_i[[1]]
miss.pos <- s_i[[2]]
Xi2.m <- s_i[[3]]
if(length(miss.pos) > 0) {
vi2.m <- vmat[[2]][-miss.pos, -miss.pos, drop = FALSE]
} else {
vi2.m <- vmat[[2]]
}
y.pred2 <- Xi2.m %*% beta
l <- l - mvtnorm::dmvnorm(yi, mean=y.pred2, sigma=vi2.m, log=TRUE, checkSymmetry = FALSE)
} ## sum of log likelihood for subjects in seq 2
return(l)
}
#################get the profilelikelihood################
###get the profile likelihood for fixed phi##
maxLik <- rep(NA, xlength)
for(i in seq(xlength)) {
## get minimized negative log likelihood using nlm###
op <- stats::nlm(mnormNLL, theta, x[i])
# would `optim` work here?
## convert to maximum likelihood##
if (op$code <= 2) {
maxLik[i]<- exp(-op$minimum)
}
}
lik.norm <- maxLik / max(maxLik, na.rm = TRUE)
xmax <- x[which.max(lik.norm)]
xli4_5 <- range(x[lik.norm >=1/4.5], na.rm = TRUE)
xli8 <- range(x[lik.norm >=1/8], na.rm = TRUE)
xli32 <- range(x[lik.norm >=1/32], na.rm = TRUE)
li <- rbind(xli4_5, xli8, xli32)
rownames(li) <- c('1/4.5 LI', '1/8 LI', '1/32 LI')
colnames(li) <- c('lower', 'upper')
obj <- list(poi = x, maxLik = maxLik, MAX = xmax, LI = li, method = m)
class(obj) <- 'proLikelihood'
obj
}
#' @rdname proLikelihood
#' @export
averageBE <- function(dat, colSpec = list(), theta = NULL, xlow, xup, xlength) {
proLikelihood(dat, colSpec, theta, xlow, xup, xlength, 'average')
}
#' @rdname proLikelihood
#' @export
totalVarianceBE <- function(dat, colSpec = list(), theta = NULL, xlow, xup, xlength) {
proLikelihood(dat, colSpec, theta, xlow, xup, xlength, 'total')
}
#' @rdname proLikelihood
#' @export
withinVarianceBE <- function(dat, colSpec = list(), theta = NULL, xlow, xup, xlength) {
proLikelihood(dat, colSpec, theta, xlow, xup, xlength, 'within')
}
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/R/proLikelihood.R
|
#' Internal functions
#'
#' Internal functions are not intended to be called by user.
#'
#' \code{design_matrix}: design matrix for given sequence
#'
#' \code{select_theta}: select good starting values for theta
#'
#' \code{setup_env}: create environment with required variables
#'
#' \code{sigma_vals}: calculate sigma values used in varcov matrix
#'
#' \code{validateColumns}: ensure accurate column specification
#'
#' \code{varcov_matrix}: design varcov matrix
#'
#' @name beInternal
#' @aliases design_matrix select_theta setup_env
#' sigma_vals validateColumns varcov_matrix
#' @keywords internal
NULL
# taken from EHR::validateColumns
validateColumns <- function(df, columnSpecs, defaultSpecs = list()) {
# KEY = colname(s)
# if default is NULL, not required
# if default is NA, required
n <- names(df)
u <- names(columnSpecs)
if(length(defaultSpecs) == 0) {
defaultSpecs <- as.list(rep(NA, length(columnSpecs)))
names(defaultSpecs) <- u
}
x <- names(defaultSpecs)
cst <- function(t, v) {
sprintf('%s"%s"', t, paste(v, collapse = '", "'))
}
errors <- character(4)
# user should provide named list, or list of given length
if(is.null(u)) {
if(length(columnSpecs) == length(x)) {
u <- x
names(columnSpecs) <- x
} else {
errors[1] <- cst('column specification is incorrect; please identify all columns: ', x)
}
}
# user should not provide unexpected columns
bad_col <- setdiff(u, x)
if(length(bad_col)) {
errors[2] <- cst('column specification is incorrect; the following column(s) should not be present: ', bad_col)
}
# provide defaults, including NULL/NA
add_col <- setdiff(x, u)
columnSpecs[add_col] <- defaultSpecs[add_col]
# safely remove NULL
columnSpecs <- columnSpecs[lengths(columnSpecs, FALSE) > 0L]
u <- names(columnSpecs)
# require NA
req_col <- u[is.na(sapply(columnSpecs, `[`, 1))]
if(length(req_col)) {
errors[3] <- cst('column specification is incorrect; please identify the following columns: ', req_col)
}
# check for missing columns
mycols <- unlist(columnSpecs)
mycols <- mycols[!is.na(mycols)]
miss_col <- setdiff(mycols, c(n, seq_along(n)))
if(length(miss_col)) {
errors[4] <- cst('data set is missing expected columns; the following column(s) are missing: ', miss_col)
}
err <- paste(errors[errors != ''], collapse = '\n ')
if(err != '') stop(err)
# convert any numeric columns into names
for(i in seq_along(columnSpecs)) {
csix <- match(columnSpecs[[i]], seq_along(n))
columnSpecs[[i]][!is.na(csix)] <- n[csix[!is.na(csix)]]
}
columnSpecs
}
setup_env <- function(dat, colSpec = list()) {
dat.req <- list(subject = NA, formula = NA, y = NA, period = NULL, seq = NULL)
dat.col <- validateColumns(dat, colSpec, dat.req)
if('period' %in% names(colSpec)) {
# need to reorder by subject|period
dat <- dat[order(dat[,colSpec$subject], dat[,colSpec$period]),]
e <- list2env(list(subject = dat[,colSpec$subject], formula = dat[,colSpec$formula]))
id_rows <- tapply(seq(nrow(dat)), e$subject, I)
e$period <- dat[,colSpec$period]
} else {
e <- list2env(list(subject = dat[,colSpec$subject], formula = dat[,colSpec$formula]))
id_rows <- tapply(seq(nrow(dat)), e$subject, I)
e$period <- unsplit(lapply(id_rows, seq_along), e$subject)
}
e$Y <- log(dat[,colSpec$y]) ##take log of outcome
ind <- match(e$formula, c('R','T'))
e$Rind <- c(1,0)[ind]
e$Tind <- ind-1
TRseq <- vapply(id_rows, function(i) paste(e$Tind[i], collapse = ''), character(1))
uTRseq <- unname(unique(TRseq))
genseq <- unsplit(match(TRseq, uTRseq), e$subject)
if('seq' %in% names(colSpec)) {
## this needs a solution
provseq <- dat[,colSpec$seq]
if(any(provseq != genseq)) {
uTRseq <- uTRseq[c(2,1)]
genseq <- unsplit(match(TRseq, uTRseq), e$subject)
}
if(any(provseq != genseq)) {
stop('sequence provided does not match generated')
}
e$seq <- provseq
} else {
e$seq <- genseq
}
e$TRname <- chartr('01', 'RT', uTRseq)
e$TRnum <- lapply(strsplit(uTRseq, ''), as.numeric)
e
}
select_theta <- function(e, nPeriods) {
if(!requireNamespace("nlme", quietly = TRUE)) {
stop('please install "nlme" package -- install.packages(\'nlme\')')
}
###########
f <- stats::as.formula(Y ~ factor(seq)+factor(period)+formula)
m <- nlme::lme(f, data = e, method="REML", random=list(~0+Rind+Tind|subject), weights= nlme::varIdent(form= ~1|formula), na.action = stats::na.exclude)
##########
##extract ratio of swt/swr
devratio <- 1/unique(nlme::varWeights(m$modelStruct))[2]
if(is.na(devratio)) devratio <- 1
##extract sbr and sbt and correlation coefficient etc.##
vcorr <- nlme::VarCorr(m)
sbr2 <- as.numeric(vcorr[1,1])
sbt2 <- as.numeric(vcorr[2,1])
rho <- as.numeric(vcorr[2,3])
swr2 <- as.numeric(vcorr[3,1])
swr <- as.numeric(vcorr[3,2])
swt2 <- (swr*devratio)^2
##extract the fixed effects##
sumTT <- summary(m)$tTable
sumTT1 <- sumTT[seq(nPeriods + 1), 1]
# theta1 (mu, p2, p3, p4, S, phi)
theta1 <- unname(c(sumTT1[-2], sumTT1[2], sumTT[nPeriods+2]))
c(theta1, log(sbt2), log(sbr2), log(swt2), log(swr2), rho)
}
sigma_vals <- function(theta, method = c('average','total','within'), nPeriods, val) {
meth <- match.arg(method)
# theta
## seq1: mu, p2, p3, p4, S, phi
## seq2: logsigmaBT2, logsigmaBR2, logsigmaWT2, logsigmaWR2
## seq3: rho
seq1 <- seq(nPeriods+2)
seq2 <- seq(nPeriods+3, length.out = 4)
seq3 <- nPeriods + 7
beta <- theta[seq1]
rho <- theta[seq3]
sigma2 <- exp(theta[seq2])
bt <- sigma2[1]
br <- sigma2[2]
wt <- sigma2[3]
wr <- sigma2[4]
tt <- bt + wt
tr <- br + wr
s_vals <- c(
BT = bt,
BR = br,
TT = tt,
TR = tr,
BRT = NA
)
if(method == 'average') {
beta[nPeriods+2] <- val
} else if(method == 'total') {
s_vals['TT'] <- val^2 * tr
s_vals['BT'] <- s_vals['TT'] - wt
# if "px" is small, this could be negative (and a problem)
# if(s_vals['BT'] < 0) warning('possible "xlow" issue')
} else if(method == 'within') {
s_vals['TT'] <- bt + val^2 * wr
}
s_vals['BRT'] <- rho * prod(sqrt(s_vals[c('BT','BR')]))
list(beta, s_vals)
}
design_matrix <- function(x, seqno = 1) {
n <- length(x)
m <- cbind(diag(n), seqno-1, x)
m[,1] <- 1
colnames(m) <- c('mu', paste0('p', seq(2, n)), 'S', 'phi')
m
}
varcov_matrix <- function(x) {
RTname <- strsplit(x, '')[[1]]
n <- length(RTname)
mmm <- matrix('', n, n)
diag(mmm) <- paste0('T', RTname)
j1 <- matrix(RTname[which(upper.tri(mmm), arr.ind = TRUE)], ncol = 2)
j2 <- matrix(c('BR', 'BRT', 'BRT', 'BT'), 2, 2, dimnames = list(c('R','T'), c('R','T')))
j3 <- j2[j1]
mmm[upper.tri(mmm)] <- j3
mmm[lower.tri(mmm)] <- j3
mmm
}
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/R/utils.R
|
---
output:
rmarkdown::pdf_document:
extra_dependencies: ["pdfpages"]
vignette: >
%\VignetteIndexEntry{BElikelihood}
%\VignettePackage{BElikelihood}
%\VignetteEngine{knitr::rmarkdown}
---
\includepdf[pages=-, fitpaper=true]{precompiled-BElikelihood.pdf}
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/inst/doc/BElikelihood.Rmd
|
---
title: "Likelihood Approach and an R Package BElikelihood"
author: Liping Du and Leena Choi
output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{BElikelihood}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The major utility of an R package `BElikelihood` is to provide the profile likelihoods for presenting evidence to evaluate bioequivalence (BE). It can handle various crossover designs commonly used in BE studies, such as a fully replicated crossover design (e.g., 2x4 two-sequence, four-period, RTRT/TRTR), a partially replicated crossover design (e.g., 2x3, two-sequence, three-period, RTR/TRT), and the conventional two-sequence, two-period, crossover design design (2x2, RT/TR), where "R" stands for a reference drug and "T" stands for a test drug. As long as a 2-sequence design is used, the functions can work with any order of "R" and "T". Additional functions to analyze the BE data using frequentist methods will be added in the near future.
# Introduction
Bioequivalence is required for approval of a generic drug. The two one-sided test (TOST) procedure is recommended by the US Food and Drug Administration (FDA) when evaluating average BE (ABE) on the pharmacokinetic parameters such as the area under the blood concentration-time curve (AUC) and the peak concentration (Cmax). Due to the low power of TOST for highly variable drugs (HVDs) defined by the magnitude of the sample within-subject standard deviation of the reference drug, $s_{WR}$ [e.g., $s_{WR} \geq 0.294$, or equivalently %coefficient of variation (CV) $\geq$ 30%], both the FDA and European Medicines Agency (EMA) recommend similar but not identical reference scaled average bioequivalence (RSABE) approaches for HVDs . Although the power is improved, the new approaches may not guarantee a high level of confidence for the true difference between two drugs at the ABE boundaries. It is also difficult for these approaches to address the differences in the variance between the test and reference drugs. We advocate the use of a likelihood approach for presenting and interpreting BE data as evidence as discussed in Royal (1997)$^{1}$ and Choi *et al.*$^{2}$. In fact, `BElikelihood` package can be used to generate the profile likelihoods for the mean difference and standard deviation ratios of the test and reference drugs for any types of drugs, including HVDs and drugs with narrow therapeutic index (NTI), thus demonstrating evidence for equivalence in both mean and variance$^{3}$, applicable to a wide range of drugs. Note that the pharmacokinetic parameters (AUC and Cmax) are assumed to be logarithmically transformed in the analysis as recommended by the FDA.
# Profile likelihood
The `proLikelihood()` function calculates profile likelihoods for the mean difference, total standard deviation ratio and within-subject standard deviation ratio of two drugs. The likelihood function is based on a linear mixed-effects model with fixed effects of period, sequence, and formula. The parameter (mean difference called as `average`, total standard deviation ratio called as `total` and within-subject standard deviation ratio called as `within`) values in the range (defined by `xlow` and `xup`) and the corresponding profile likelihood values are the major output of this function. The `xlength` is the number of evenly spaced parameter values in the range, thus determining smoothness of the plot when the profile likelihood is plotted by `plot` function. The `plot` function presents statistical evidence graphically using the profile likelihood standardized by its maximum along with the maximum likelihood estimate (MLE) and likelihood intervals (1/8 and 1/32 as default). The output also provides the 1/4.5*th* likelihood interval (i.e., $k$ = 4.5) that approximately corresponds to the 90% confidence interval (which is operationally equivalent to the TOST). For a general approach for obtaining the profile likelihood from various statistical models, see an R package `ProfileLikelihood`$^{4}$, which also can be used to evaluate ABE.
## Example data
The example dataset is collected from a fully replicated 2x4 crossover design with 2 sequences of RTRT and TRTR, where R and T denote the reference and test drugs, respectively. The data are a subset of Example 4.4 in Chapter 4$^{5}$. This data can be also used as an example dataset for a 2x3 (RTR/TRT) design by specifying `period < 4`, or a 2x2 design by specifying `period < 3`, as shown below.
```{r, echo=TRUE, results='asis'}
## install package 'BElikelihood' using command below if not installed yet
# install.packages("BElikelihood")
library(BElikelihood)
data(dat)
```
```{r, echo=FALSE, results='asis'}
knitr::kable(head(dat, 10))
```
## Profile likelihood for the mean difference
```{r, echo=TRUE, warning=FALSE, out.width="70%"}
cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
####### for 2x4 design
p4a <- proLikelihood(dat, colSpec = cols, xlength = 200, method = 'average')
plot(p4a)
## this is equivalent to
p4a <- averageBE(dat, colSpec = cols, xlength = 200)
plot(p4a)
## get values for likelihood intervals: k = 4.5, 8, 32
p4a$LI
###### for 2x3 design
dd3 <- dat[dat$period < 4,]
p3a <- proLikelihood(dd3, colSpec = cols, xlength = 200, method = 'average')
plot(p3a)
###### for 2x2 design
dd2 <- dat[dat$period < 3,]
p2a <- proLikelihood(dd2, colSpec = cols, xlength = 200, method = 'average')
plot(p2a)
```
Note that you may get warnings about the optimization when using the function. If the optimization fails for a critical range of parameters, consider providing better initials using `theta` (an optional argument) which is a vector of initial values of the parameters for use in the optimization. For example, for 2x4 design, they are `mu`, `p2`, `p3`, `p4`, `S`, `phi`, `log(sbt2)`, `log(sbr2)`, `log(swt2)`, `log(sbr2)`, and `rho`, where `mu` is the population mean for the reference drug when there are no period or sequence effects; `p2` to `p4` are fixed period effects with period 1 as the reference period; `S` is the fixed sequence effect with seq 1 as the reference sequence; `phi` is the mean difference between the two drugs; `sbt2` and `sbr2` are between-subject variances for the test and reference drugs, respectively; `swt2` and `swr2` are within-subject variances for the test and reference drugs, respectively; `rho` is the correlation coefficient within a subject. When `theta=NULL` (default), the function will choose the starting values automatically based on a linear mixed-effects model. If the user wants to provide these values, for method `avarage`, user may put any value for `phi`. Similarly, for method `total`, user can put any value for `log(sbt2)`, and for `within`, user can put any value for `log(swt2)`.
`proLikelihood()` with method `avarage` is equivalent to `averageBE()` without method. These functions allow missing data and will use all the available data.
## Profile likelihood for the total standard deviation ratio
```{r, echo=TRUE, warning=FALSE, out.width="70%"}
cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
####### for 2x4 design
p4t <- proLikelihood(dat, colSpec = cols, xlength = 200, method = 'total')
plot(p4t)
## this is equivalent to:
p4t <- totalVarianceBE(dat,xlength=50, colSpec = cols)
plot(p4t)
####### for 2x2 design; adjust the range of x-axis
p2t <- proLikelihood(dd2, colSpec = cols, xlow=0.5, xup=1.5, xlength = 200, method = 'total')
plot(p2t)
```
## Profile likelihoods for the within-subject standard deviation ratio
```{r, echo=TRUE, warning=FALSE, out.width="70%"}
cols <- list(subject = 'subject', formula = 'formula', y = 'AUC')
####### for 2x4 design
p4w <- proLikelihood(dat, colSpec = cols, xlength = 200, method = 'within')
# adjust the position of text inside plot
plot(p4w, textx=1.3, texty=0.7)
## this is equivalent to:
p4w <- withinVarianceBE(dat,xlength=50, colSpec = cols)
plot(p4w)
####### for 2x3 design; adjust the range of x-axis
p3w <- proLikelihood(dd3, colSpec = cols, xlow=0.35, xup=1.8, xlength = 200, method = 'within')
plot(p3w)
```
The x-axis range in these plots is controlled by `xlow` and `xup`, which are automatically provided if not provided. However, we strongly recommend using a better range of values that would better fit for purpose. The dashed vertical lines represent the ABE limits (for the mean difference), or some recommended limits (e.g., 2.5 is recommended by the FDA for within-subject standard deviation ratios). Users can make their own plot using the output of `proLikelihood` which provides the profile likelihood along with the parameter values.
The standardized profile likelihood plots present the evidence provided by the data for the parameter of interest. For the BE data analysis, this likelihood approach can provide evidence for the mean and variance in a unified framework.
# References
1. Royall RM (1997). Statistical Evidence: A Likelihood Paradigm. Chapman & Hall/CRC.
2. Choi L, Caffo B, Rohde C. A survey of the likelihood approach to bioequivalence trials. Statistics in Medicine 2008; 27(24): 4874–4894.
3. Du L and Choi L. Likelihood approach for evaluating bioequivalence of highly variable drugs, Pharmaceutical Statistics 2015; 14(2): 82-94.
4. Choi L (2023). `ProfileLikelihood`: Profile Likelihood for a Parameter in Commonly Used Statistical
Models_. R package version 1.3, <https://CRAN.R-project.org/package=ProfileLikelihood>.
5. Patterson S and Jones B (2023). Bioequivalence and Statistics in Clinical Pharmacology. Chapman Hall/CRC Press.
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/inst/vignette-source/BElikelihood.Rmd
|
---
output:
rmarkdown::pdf_document:
extra_dependencies: ["pdfpages"]
vignette: >
%\VignetteIndexEntry{BElikelihood}
%\VignettePackage{BElikelihood}
%\VignetteEngine{knitr::rmarkdown}
---
\includepdf[pages=-, fitpaper=true]{precompiled-BElikelihood.pdf}
|
/scratch/gouwar.j/cran-all/cranData/BElikelihood/vignettes/BElikelihood.Rmd
|
#' @keywords internal
"_PACKAGE"
# Imports: start ----
# Imports: end ----
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/BFF-package.R
|
# source("~/Desktop/Research/BFF/R/FINAL_SUPPORT_hypergeometric.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_tau2.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_plotting.R")
# source("~/Desktop/Research/BFF/R/FINAL_support_functions.R")
# library(gsl)
################# chih2 functions if r is an integer and equal to 1
G_val_r1 = function(tau2, chi2_stat, df)
{
BFF = (tau2 + 1) ^ (-df / 2 - 1) * (1 + tau2 * chi2_stat / (df * (tau2 + 1))) * exp(tau2 *
chi2_stat / (2 * (tau2 + 1)))
to_return = log(BFF)
return(to_return)
}
################# chi2 functions if r is an integer and greater than 1
# ################# Gamma functions / chi^2
# prod_val_g = function(k, r, n)
# {
# one = k/2 + r - 1 - n
# return(one)
# }
#
# prod_val_function_g = function(k, m, r)
# {
# val = 1
# if (m > 0)
# {
# for (nn in 0:(m-1))
# {
# val = val * prod_val_g(k=k, r=r, n=nn)
# }
# }
# return(val)
# }
#
# sum_val_g = function(k, r, m, tau, x)
# {
# one = choose(r, m)
# two = prod_val_function_g(k = k, m=m, r=r)
# three = (tau*x/(2*(1+tau)))^(r-m)
# four = 1/((1+tau)^(k/2+r))
#
# to_return = one*two*three*four
# return(to_return)
# }
#
# sum_val_function_g = function(k, r, tau,x)
# {
# val = 0
# for (mm in 0:r)
# {
# val = val + sum_val_g(k=k, r = r, m =mm, tau=tau, x=x)
# }
#
# return(val)
# }
#
# log_G = function(tau, h, k, r)
# {
# num1 = sterling_gamma(k/2)
# den1 = sterling_gamma(k/2 + r)
# first_term = num1/den1
#
# second_term = exp(tau * h / (2*(1+tau)))
#
# third_term = sum_val_function_g(k=k, r=r, tau=tau, x=h)
#
# to_return = first_term * second_term * third_term
#
# # log version
# to_return = log(first_term) + log(second_term) + log(third_term)
# return(to_return)
# }
################# T functions if r is a fraction
log_G_frac = function(tau2, h, k, r)
{
tp1 = 1 + tau2
one = 1 / (tp1 ^ (k / 2 + r))
a = k / 2 + r
b = k / 2
c = tau2 * h / (2 * (1 + tau2))
two = hypergeom1F1(a, b, c)$f
to_return = log(one) + log(two)
return(to_return)
}
####################### backend implementation
backend_chi2 = function(r,
chi2_stat,
n = NULL,
df = NULL,
pearsons = TRUE,
r1 = FALSE,
tau2 = NULL)
{
# same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
user_supplied_tau2 = TRUE
if (is.null(tau2))
user_supplied_tau2 = FALSE
r1 = r1
frac_r = !r1
log_vals = rep(0, length(effect_size))
if (r1) {
if (pearsons) {
if (!user_supplied_tau2)
tau2 = get_count_tau2(n = n, k = df, w = effect_size)
} else {
if (!user_supplied_tau2)
tau2 = get_LRT_tau2(n = n, k = df, w = effect_size)
}
log_vals = unlist(lapply(tau2, G_val_r1, chi2_stat = chi2_stat, df = df))
}
if (frac_r) {
if (pearsons)
{
if (!user_supplied_tau2)
tau2 = get_count_tau2(n = n,
w = effect_size,
k = df,
r = r)
} else {
if (!user_supplied_tau2)
tau2 = get_LRT_tau2(n = n,
w = effect_size,
k = df,
r = r)
}
log_vals = unlist(lapply(
tau2,
log_G_frac,
h = chi2_stat,
r = r,
k = df
))
}
# stuff to return
BFF = log_vals
# check the results are finite
if (!all(is.finite(BFF)))
{
stop(
"Values entered produced non-finite numbers.
The most likely scenario is the evidence was so strongly in favor of the
alternative that there was numeric overflow. Please contact the maintainer for more information."
)
}
return(BFF)
}
################# T function user interaction
#' chi2_test_BFF
#'
#' chi2_test_BFF constructs BFFs based on the chi-squared test. BFFs depend on hyperparameters r and tau^2 which determine the shape and scale of the prior distributions which define the alternative hypotheses.
#' By setting r > 1, we use higher-order moments for replicated studies. Fractional moments are set with r > 1 and r not an integer.
#' All results are on the log scale.
#' Plot saved to working directory unless a full path is specified in the 'savename' variable of the function.
#'
#' @param chi2_stat chi^2 statistic
#' @param df degrees of freedom
#' @param n sample size
#' @param pearsons Is this a test of Pearson’s chi^2 test for goodness-of-fit? Default is TRUE. FALSE assumes a likelihood ratio test
#' @param savename optional, filename for saving the pdf of the final plot
#' @param maximize Should the value of r be maximized? Default is FALSE. Only set to TRUE if analyzing multiple studies
#' @param r r value
#' @param tau2 tau2 values (can be a single entry or a vector of values)
#' @param save should a copy of the plot be saved?
#' @param xlab optional, x label for plot
#' @param ylab optional, y label for plot
#' @param main optional, main label for plot
#'
#' @return Returns Bayes factor function results
#' \tabular{ll}{
#' \code{BFF} \tab The log of the Bayes Factor Function values \cr
#' \tab \cr
#' \code{effect_size} \tab Effect sizes tested (seq(0, 1, by = 0.01)) \cr
#' \tab \cr
#' \code{BFF_max_RMSE} \tab Maximum BFF value \cr
#' \tab \cr
#' \code{max_RMSE} \tab Effect size that maximizes BFF\cr
#' \tab \cr
#' \code{tau2} \tab tau^2 values tested\cr
#' }
#' @export
#'
#' @examples
#' chi2BFF = chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, save = FALSE)
#' chi2BFF = chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, save = FALSE, tau2 = 0.5)
#' chi2BFF = chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, save = FALSE, tau2 = c(0.5, 0.8))
#' chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, pearsons = FALSE, save = FALSE)
#' chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, r = 2, save = FALSE)
#' chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, r = 2, pearsons = FALSE, save = FALSE)
#' chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, r = 2.5, save = FALSE)
#' chi2_test_BFF(chi2_stat = 2.5, n = 50, df = 49, r = 2.5, pearsons = FALSE, save = FALSE)
#' chi2_test_BFF(chi2_stat=2.5, n = 50, df = 49, maximize = TRUE)
#' chi2_test_BFF(chi2_stat=2.5, n = 50, df = 49, maximize = TRUE, tau2 = 0.5)
#' chi2_test_BFF(chi2_stat=2.5, n = 50, df = 49, maximize = TRUE, tau2 = c(0.5, 0.8))
#' chi2BFF$BFF_max_RMSE # maximum BFF value
#' chi2BFF$max_RMSE # effect size which maximizes the BFF
chi2_test_BFF = function(chi2_stat,
n = NULL,
df = NULL,
pearsons = TRUE,
savename = NULL,
maximize = FALSE,
r = 1,
tau2 = NULL,
save = TRUE,
xlab = NULL,
ylab = NULL,
main = NULL)
{
if (is.null(df))
stop("df is required")
##### same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
##### is tau2 supplied as an argument?
user_supplied_tau2 = TRUE
if (is.null(tau2))
{
user_supplied_tau2 = FALSE
}
##### call results
r1 = FALSE
if (r == 1)
r1 = TRUE
results = backend_chi2(
chi2_stat = chi2_stat,
n = n,
pearsons = pearsons,
r = r,
tau2 = tau2,
r1 = r1,
df = df
)
##### plotting if tau2 is not specified
if (!user_supplied_tau2 && !maximize) {
bff_plot = c()
bff_plot[[1]] = results
plot_BFF(
effect_size = effect_size,
BFF = bff_plot,
save = save,
savename = savename,
xlab = xlab,
ylab = ylab,
main = main,
r = r
)
}
##### optimzation logic
if (maximize)
{
if (is.null(tau2))
tau2 = seq(0, 1, 0.1)
optimal_r = vector(length = length(tau2))
count = 1
for (i in tau2)
{
optimal_r[count] = optimize(
backend_chi2,
c(1, 20),
tol = 0.001,
chi2_stat = chi2_stat,
n = n,
df = df,
pearsons = pearsons,
r1 = FALSE,
tau2 = i,
maximum = TRUE
)$maximum
count = count + 1
}
maximized_values = as.data.frame(cbind(tau2, optimal_r))
}
###### return logic
BFF = results
effect_size = effect_size
idx_max = which.max(BFF)
BFF_max_RMSE = BFF[idx_max]
max_RMSE = effect_size[idx_max]
if (maximize) {
print(
"The maximum r value for each specified tau2 is given. Re-run the test with the desired r to generate plots and get the BFF value."
)
to_return = maximized_values
} else if (user_supplied_tau2) {
to_return = list(BFF = BFF,
tau2 = tau2)
} else {
to_return = list(
log_BFF = BFF,
effect_size = effect_size,
log_BFF_max_RMSE = BFF_max_RMSE,
max_RMSE = max_RMSE
)
}
return(to_return)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_chi2_test.R
|
# source("~/Desktop/Research/BFF/R/FINAL_SUPPORT_hypergeometric.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_tau2.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_plotting.R")
# source("~/Desktop/Research/BFF/R/FINAL_support_functions.R")
# library(gsl)
################# F functions if r is an integer and equal to 1
f_val_r1 = function(tau2, f_stat, df1, df2)
{
v = df2 * (tau2 + 1)
term_one = (tau2 + 1) ^ (-df1 / 2 - 1)
term_two = (1 + df1 * f_stat / df2) / (1 + df1 * f_stat / v)
term_three = 1 + (df1 + df2) * tau2 * f_stat / (v * (1 + df1 * f_stat /
v))
BFF = term_one * term_two * term_three
to_return = log(BFF)
return(to_return)
}
# ################# F functions if r is an integer and greater than 1
# prod_val_f = function(k, r, n)
# {
# term_one = k / 2 + r - 1 - n
#
# return(term_one)
# }
#
# prod_val_function_f = function(i, k, r)
# {
# val = 1
# if (i > 0)
# {
# for (nn in 0:(i - 1))
# {
# val = val * prod_val_f(k = k, r = r, n = nn)
# }
# }
# return(val)
# }
#
# sum_val_f = function(i, k, f, m, r, tau)
# {
# w = k * f / m
# c = 1 + tau
# a = tau / c
#
# one = choose(r, i)
# two = prod_val_function_f(i = i, k = k, r = r)
# three = (a * w / (1 + w / c)) ^ (r - i)
# four = gamma((k + m) / 2 + r - i)
#
# to_return = one * two * three * four
# return(to_return)
# }
#
#
# sum_val_function_f = function(k, f, m, r, tau)
# {
# val = 0
# for (ii in 0:r)
# {
# val = val + sum_val_f(
# i = ii,
# k = k,
# f = f,
# m = m,
# r = r,
# tau = tau
# )
# }
#
# return(val)
# }
#
# log_F = function(tau, f, k, m, r)
# {
# num1 = sterling_gamma(k / 2)
# den1 = sterling_gamma(k / 2 + r)
# den2 = sterling_gamma(k / 2 + m / 2)
# first_term = num1 / (den1 * den2)
#
# second_term = (1 + tau) ^ (-k / 2 - r)
#
# w = k * f / m
# num3 = 1 + w
# den3 = 1 + w / (1 + tau)
# third_term = (num3 / den3) ^ ((k + m) / 2)
#
# fourth_term = sum_val_function_f(
# k = k,
# f = f,
# m = m,
# r = r,
# tau = tau
# )
#
# to_return = first_term * second_term * third_term * fourth_term
#
# # log version
# to_return = log(first_term) + log(second_term) + log(third_term) + log(fourth_term)
# return(to_return)
# }
################# F functions if r is a fraction
log_F_frac = function(tau2, f, k, m, r)
{
one = 1 / ((1 + tau2) ^ (k / 2 + r))
a = k / 2 + r
b = (k + m) / 2
c = k / 2
d = k * f * tau2 / ((1 + tau2) * (m + k * f))
two = Gauss2F1(a, b, c, d)
to_return = log(one) + log(two)
return(to_return)
}
####################### backend implementation
backend_f = function(r,
f_stat,
n,
df1,
df2,
r1,
tau2 = NULL)
{
# same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
user_supplied_tau2 = TRUE
if (is.null(tau2))
user_supplied_tau2 = FALSE
r1 = r1
frac_r = !r1
log_vals = rep(0, length(effect_size))
if (r1) {
if (!user_supplied_tau2)
tau2 = get_linear_tau2(n = n, k = df1, w = effect_size)
log_vals = unlist(lapply(
tau2,
f_val_r1,
f_stat = f_stat,
df1 = df1,
df2 = df2
))
}
if (frac_r) {
if (!user_supplied_tau2)
tau2 = get_linear_tau2(n = n,
k = df1,
w = effect_size,
r = r)
log_vals = unlist(lapply(
tau2,
log_F_frac,
f = f_stat,
k = df1,
m = df2,
r = r
))
}
# stuff to return
BFF = log_vals
# check the results are finite
if (!all(is.finite(BFF)))
{
stop(
"Values entered produced non-finite numbers.
The most likely scenario is the evidence was so strongly in favor of the
alternative that there was numeric overflow. Please contact the maintainer for more information."
)
}
return(BFF)
}
################# F function user interaction
#' f_test_BFF
#'
#' f_test_BFF constructs BFFs based on the F test. BFFs depend on hyperparameters r and tau^2 which determine the shape and scale of the prior distributions which define the alternative hypotheses.
#' By setting r > 1, we use higher-order moments for replicated studies. Fractional moments are set with r > 1 and r not an integer.
#' All results are on the log scale.
#' Plot saved to working directory unless a full path is specified in the 'savename' variable of the function.
#'
#' @param f_stat F statistic
#' @param df1 first degree of freedom
#' @param df2 first degree of freedom
#' @param n sample size
#' @param savename optional, filename for saving the pdf of the final plot
#' @param maximize should the function be maximzied over all possible r values? Default is FALSE. Only set to TRUE if analyzing multiple studies
#' @param r r value
#' @param tau2 tau2 values (can be a single entry or a vector of values)
#' @param save should a copy of the plot be saved?
#' @param xlab optional, x label for plot
#' @param ylab optional, y label for plot
#' @param main optional, main label for plot
#'
#' @return Returns Bayes factor function results
#' \tabular{ll}{
#' \code{BFF} \tab The log of the Bayes Factor Function values \cr
#' \tab \cr
#' \code{effect_size} \tab Effect sizes tested (seq(0, 1, by = 0.01)) \cr
#' \tab \cr
#' \code{BFF_max_RMSE} \tab Maximum BFF value \cr
#' \tab \cr
#' \code{max_RMSE} \tab Effect size that maximizes BFF\cr
#' \tab \cr
#' \code{tau2} \tab tau^2 values tested\cr
#' }
#' @export
#'
#' @examples
#' fBFF = f_test_BFF(f_stat = 2.5, n = 50, df1 = 20, df2 = 48, save = FALSE)
#' f_test_BFF(f_stat = 2.5, n = 50, df1 = 20, df2 = 48, save = FALSE, tau2 = 0.5)
#' f_test_BFF(f_stat = 2.5, n = 50, df1 = 20, df2 = 48, save = FALSE, tau2 = c(0.5, 0.8))
#' f_test_BFF(f_stat = 2.5, n = 50, df1 = 20, df2 = 48, r = 2, save = FALSE)
#' f_test_BFF(f_stat = 2.5, n = 50, df1 = 20, df2 = 48, r = 2.5, save = FALSE)
#' f_test_BFF(f_stat=2.5, n = 50, df1 = 20, df2 = 48, maximize = TRUE)
#' f_test_BFF(f_stat=2.5, n = 50, df1 = 20, df2 = 48, maximize = TRUE, tau2 = 0.5)
#' f_test_BFF(f_stat=2.5, n = 50, df1 = 20, df2 = 48, maximize = TRUE, tau2 = c(0.5, 0.8))
#' fBFF$BFF_max_RMSE # maximum BFF value
#' fBFF$max_RMSE # effect size which maximizes the BFF value
#'
f_test_BFF = function(f_stat,
n,
df1,
df2,
savename = NULL,
maximize = FALSE,
r = 1,
tau2 = NULL,
save = TRUE,
xlab = NULL,
ylab = NULL,
main = NULL)
{
##### same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
##### is tau2 supplied as an argument?
user_supplied_tau2 = TRUE
if (is.null(tau2))
{
user_supplied_tau2 = FALSE
}
##### call results
r1 = FALSE
if (r == 1)
r1 = TRUE
results = backend_f(
f_stat = f_stat,
n = n,
df1 = df1,
df2 = df2,
r = r,
tau2 = tau2,
r1 = r1
)
##### plotting if tau2 is not specified
if (!user_supplied_tau2 && !maximize) {
bff_plot = c()
bff_plot[[1]] = results
plot_BFF(
effect_size = effect_size,
BFF = bff_plot,
save = save,
savename = savename,
xlab = xlab,
ylab = ylab,
main = main,
r = r
)
}
##### optimzation logic
if (maximize)
{
if (is.null(tau2))
tau2 = seq(0, 1, 0.1)
optimal_r = vector(length = length(tau2))
count = 1
for (i in tau2)
{
optimal_r[count] = optimize(
backend_f,
c(1, 20),
tol = 0.001,
f_stat = f_stat,
n = n,
df1 = df1,
df2 = df2,
r1 = FALSE,
tau2 = i,
maximum = TRUE
)$maximum
count = count + 1
}
maximized_values = as.data.frame(cbind(tau2, optimal_r))
}
###### return logic
BFF = results
effect_size = effect_size
idx_max = which.max(BFF)
BFF_max_RMSE = BFF[idx_max]
max_RMSE = effect_size[idx_max]
if (maximize) {
print(
"The maximum r value for each specified tau2 is given. Re-run the test with the desired r to generate plots and get the BFF value."
)
to_return = maximized_values
} else if (user_supplied_tau2) {
to_return = list(BFF = BFF,
tau2 = tau2)
} else {
to_return = list(
log_BFF = BFF,
effect_size = effect_size,
log_BFF_max_RMSE = BFF_max_RMSE,
max_RMSE = max_RMSE
)
}
return(to_return)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_f_test.R
|
require(ggplot2)
substrRight <- function(x, n) {
substr(x, nchar(x) - n + 1, nchar(x))
}
plot_BFF = function(effect_size,
BFF,
save = FALSE,
savename = NULL,
xlab = NULL,
ylab = NULL,
main = NULL,
r = NULL) {
num_lines = length(BFF)
plot_legend_names = vector()
count = 1
for (i in c(1:length(r))) {
str_temp = paste("r=", as.character(r[i]), sep = "")
plot_legend_names[count] = str_temp
count = count + 1
}
# custom plotting
if (is.null(xlab))
xlab = expression(paste("RMSE ", tilde(omega)))
if (is.null(ylab))
ylab = "Bayes Factor Against Null Hypothesis"
if (is.null(main))
main = "BFF"
if (is.null(savename) && save) {
print("No savename argument given, plot saving as BFF_plot.pdf")
savename = "BFF_plot.pdf"
}
if (!is.null(savename) && !save) {
## TODO string check for .pdf extension
check_pdf = substrRight(savename, 4)
if (check_pdf != ".pdf")
savename = paste(savename, ".pdf", sep = "")
save = TRUE
print(savename)
}
#Making the plot
data <- data.frame(effect_size = effect_size, BFF = BFF[[1]])
p <- ggplot(data, aes(x = effect_size, y = BFF)) +
geom_line() +
theme_bw()
p <- p + labs(x = xlab, y = ylab) +
ggtitle(main) +
theme(plot.title = element_text(hjust = 0.5))
maxval = 1e50
if (num_lines > 1)
{
count = 2
for (k in c(2:num_lines))
{
values = BFF[[k]]
data2 <- data.frame(effect_size = effect_size, BFF = BFF[[k]])
p <- ggplot(data2, aes(x = effect_size, y = BFF)) +
geom_line(linetype = count, colour = k) +
theme_bw()
count = count+1
}
}
min_lim_x = min(effect_size)
max_lim_x = max(effect_size)
#Adding rectangles
p <-
p + annotate(
"rect",
xmin = -1,
xmax = 0.1,
ymin = -Inf,
ymax = Inf,
fill = adjustcolor("red", 0.1)
)
p <-
p + annotate(
"rect",
xmin = 0.1,
xmax = 0.35,
ymin = -Inf,
ymax = Inf,
fill = adjustcolor("orange", 0.1)
)
p <-
p + annotate(
"rect",
xmin = 0.35,
xmax = 0.65,
ymin = -Inf,
ymax = Inf,
fill = adjustcolor("blue", 0.1)
)
p <-
p + annotate(
"rect",
xmin = 0.65,
xmax = 2,
ymin = -Inf,
ymax = Inf,
fill = adjustcolor("green", 0.1)
)
p <-
p + geom_vline(xintercept = c(-1, 0.1, 0.35, 0.65, 2), lwd = 0.2)
p <- p + coord_cartesian(xlim = c(min_lim_x, max_lim_x))
#Ading y axis labels
positive = c(50000,
10000,
5000,
2000,
1000,
500,
200,
150,
100,
75,
50,
40,
30,
20,
10,
5,
2)
negative = -1*positive
positive_labels = vector()
negative_labels = vector()
for (k in 1:length(positive)) {
positive_labels[k] = paste(as.character(format(positive[k],
scientific = FALSE)),
":1", sep = "")
negative_labels[k] = paste("1:", as.character(format(positive[k],
scientific = FALSE)),sep = "")
}
axis_position = c(positive, log(1), negative)
axis_labels = c(positive_labels, "1:1", negative_labels)
p <-
p + scale_y_continuous(breaks = axis_position, labels = axis_labels, guide = guide_axis(check.overlap = TRUE))+
geom_hline(yintercept = log(1), lwd = 0.2)
p <- p + theme(panel.grid = element_blank())
if (save) {
ggsave(savename, plot = p, device = "pdf")
}
print(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_plotting.R
|
# source("~/Desktop/Research/BFF/R/FINAL_SUPPORT_hypergeometric.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_tau2.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_plotting.R")
# source("~/Desktop/Research/BFF/R/FINAL_support_functions.R")
# library(gsl)
################# T functions if r is an integer and equal to 1
t_val_r1 = function(tau2, t_stat, df)
{
r = 1 + t_stat ^ 2 / df
s = 1 + t_stat ^ 2 / (df * (1 + tau2))
q = tau2 * (df + 1) / (df * (1 + tau2))
BF = (tau2 + 1) ^ (-3 / 2) * (r / s) ^ ((df + 1) / 2) * (1 + q * t_stat ^
2 / s)
to_return = log(BF)
return(to_return)
}
################# T functions if r is an integer and greater than 1
#
# get_w = function(tau, v, t = t)
# {
# num = 1 + t ^ 2 / (v * (1 + tau))
# den = 1 + t ^ 2 / v
#
# to_return = num / den
# return(to_return)
# }
#
# sum_val_t = function(r, m, tau, t, v, w)
# {
# one = choose(2 * r, 2 * m)
# two = (2 * tau * t ^ 2 / ((t ^ 2 + v) * (tau + 1) * w)) ^ m
# three = sterling_gamma((v + 2 * m + 1) / 2) * double_factorial(2 * r - 2 *
# m - 1)
#
# to_return = one * two * three
# return(to_return)
# }
#
# sum_val_function_t = function(r, tau, t, v, w)
# {
# val = 0
# for (mm in 0:r)
# {
# val = val + sum_val_t(
# r = r,
# m = mm,
# tau = tau,
# t = t,
# v = v,
# w = w
# )
# }
# return(val)
# }
#
# log_T = function(t, r, tau, v)
# {
# w = get_w(tau = tau, v = v, t = t)
# num1 = 1
# den1 = double_factorial(2 * r - 1) * (1 + tau) ^ (r + 1 / 2) * sterling_gamma((v +
# 1) / 2) * w ^ ((v + 1) / 2)
# first_term = num1 / den1
#
# second_term = sum_val_function_t(
# r = r,
# tau = tau,
# t = t,
# v = v,
# w = w
# )
#
# to_return = first_term * second_term
#
# # log_version
# to_return = log(first_term) + log(second_term)
#
# return(to_return)
# }
################# T functions if r is a fraction
log_T_frac = function(tau2, t, v, r)
{
tp1 = 1 + tau2 # one plus tau^2
tau = sqrt(tau2)
c = 1 / (tp1 ^ (r + 1 / 2)) # c
y = tau * t / sqrt((t ^ 2 + v) * tp1)
a1 = (v + 1) / 2
b1 = r + 1 / 2
c1 = 1 / 2
first_hypergeo = Gauss2F1(a1, b1, c1, y ^ 2)
four = sterling_gamma(v / 2 + 1) * sterling_gamma(r + 1)
five = sterling_gamma((v + 1) / 2) * sterling_gamma(r + 1 / 2)
gamma_term = four / five
aa = v / 2 + 1
bb = r + 1
cc = 3 / 2
second_hypergeo = Gauss2F1(aa, bb, cc, y ^ 2)
to_return = c * (first_hypergeo + y * gamma_term * second_hypergeo)
to_return = log(to_return)
return(to_return)
}
log_T_frac_onesided = function(tau2, t, v, r)
{
tp1 = 1 + tau2
tau = sqrt(tau2)
c = 1 / (tp1 ^ (r + 1 / 2)) # c
y = tau * t / sqrt((t ^ 2 + v) * tp1)
a1 = (v + 1) / 2
b1 = r + 1 / 2
c1 = 1 / 2
first_hypergeo = Gauss2F1(a1, b1, c1, y ^ 2)
four = sterling_gamma(v / 2 + 1) * sterling_gamma(r + 1)
five = sterling_gamma((v + 1) / 2) * sterling_gamma(r + 1 / 2)
gamma_term = four / five
aa = v / 2 + 1
bb = r + 1
cc = 3 / 2
second_hypergeo = Gauss2F1(aa, bb, cc, y ^ 2)
to_return = c * (first_hypergeo + 2 * y * gamma_term * second_hypergeo)
to_return = log(to_return)
return(to_return)
}
####################### backend implementation
backend_t = function(r,
t_stat,
n = NULL,
df = NULL,
one_sample = TRUE,
n1 = NULL,
n2 = NULL,
savename = NULL,
r1 = FALSE,
tau2 = NULL)
{
# same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
user_supplied_tau2 = TRUE
if (is.null(tau2))
user_supplied_tau2 = FALSE
r1 = r1
frac_r = !r1
log_vals = rep(0, length(effect_size))
if (r1) {
if (one_sample)
{
if (!user_supplied_tau2)
tau2 = get_one_sample_tau2(n = n, w = effect_size)
} else {
if (!user_supplied_tau2)
tau2 = get_two_sample_tau2(n1 = n1, n2 = n2, w = effect_size)
}
log_vals = unlist(lapply(tau2, t_val_r1, t_stat = t_stat, df = df))
}
if (frac_r) {
if (one_sample)
{
if (!user_supplied_tau2)
tau2 = get_one_sample_tau2(n = n, w = effect_size, r = r)
log_vals = unlist(lapply(
tau2,
log_T_frac,
r = r,
v = df,
t = t_stat
))
} else {
if (!user_supplied_tau2)
tau2 = get_two_sample_tau2(
n1 = n1,
n2 = n2,
w = effect_size,
r = r
)
log_vals = unlist(lapply(
tau2,
log_T_frac_onesided,
r = r,
v = df,
t = t_stat
))
}
}
# stuff to return
BFF = log_vals
# check the results are finite
if (!all(is.finite(BFF)))
{
warning(
"Values entered produced non-finite numbers for some effect sizes.
The most likely scenario is the evidence was so strongly in favor of the alternative that there was numeric overflow.
Only effect sizes with non-NaN values are kept in the plots.
Please contact the maintainer for more information."
)
}
return(BFF)
}
################# T function user interaction
#' t_test_BFF
#'
#' t_test_BFF constructs BFFs based on the t test. BFFs depend on hyperparameters r and tau^2 which determine the shape and scale of the prior distributions which define the alternative hypotheses.
#' By setting r > 1, we use higher-order moments for replicated studies. Fractional moments are set with r > 1 and r not an integer.
#' All results are on the log scale.
#' Plot saved to working directory unless a full path is specified in the 'savename' variable of the function.
#'
#' @param t_stat T statistic
#' @param df degrees of freedom
#' @param n sample size (if one sample test)
#' @param one_sample is test one sided? Default is TRUE
#' @param n1 sample size of group one for two sample test
#' @param n2 sample size of group two for two sample test
#' @param savename optional, filename for saving the pdf of the final plot
#' @param maximize should the function be maximzied over all possible r values? Default is FALSE. Only set to TRUE if analyzing multiple studies
#' @param r r value
#' @param tau2 tau2 values (can be a single entry or a vector of values)
#' @param save should a copy of the plot be saved?
#' @param xlab optional, x label for plot
#' @param ylab optional, y label for plot
#' @param main optional, main label for plot
#'
#' @return Returns Bayes factor function results
#' \tabular{ll}{
#' \code{BFF} \tab The log of the Bayes Factor Function values \cr
#' \tab \cr
#' \code{effect_size} \tab Effect sizes tested (seq(0, 1, by = 0.01)) \cr
#' \tab \cr
#' \code{BFF_max_RMSE} \tab Maximum BFF value \cr
#' \tab \cr
#' \code{max_RMSE} \tab Effect size that maximizes BFF\cr
#' \tab \cr
#' \code{tau2} \tab tau^2 values tested\cr
#' }
#' @export
#'
#' @examples
#' tBFF = t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE)
#' t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE, tau2 = 0.5)
#' t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE, tau2 = c(0.5, 0.2))
#' t_test_BFF(t_stat = 2.5, n1 = 50, n2 = 40, df = 88, save = FALSE, one_sample = FALSE)
#' t_test_BFF(t_stat = 2.5, n = 50, r = 2, df = 49, save = FALSE)
#' t_test_BFF(t_stat = 2.5, r = 2, n1 = 50, n2 = 30, df = 78, one_sample = FALSE, save = FALSE)
#' t_test_BFF(t_stat = 2.5, n = 50, r = 2.5, df = 49, save = FALSE)
#' t_test_BFF(t_stat=2.5, r = 2.5, n1 = 50, n2 = 30, df = 78, one_sample = FALSE, save=FALSE)
#' t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE, maximize = TRUE)
#' t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE, maximize = TRUE, tau2 = 0.5)
#' t_test_BFF(t_stat = 2.5, n = 50, df = 49, save = FALSE, maximize = TRUE, tau2 = c(0.5, 0.8))
#' tBFF$BFF_max_RMSE # maximum BFF value
#' tBFF$max_RMSE # effect size which maximizes the BFF value
#'
t_test_BFF = function(t_stat,
n = NULL,
df = NULL,
one_sample = TRUE,
n1 = NULL,
n2 = NULL,
savename = NULL,
maximize = FALSE,
r = 1,
tau2 = NULL,
save = TRUE,
xlab = NULL,
ylab = NULL,
main = NULL)
{
if (is.null(n) &
(is.null(n1) &
is.null(n2)))
stop("Either n or n1 and n2 is required")
if (is.null(df))
stop("df is required")
##### same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
##### is tau2 supplied as an argument?
user_supplied_tau2 = TRUE
if (is.null(tau2))
{
user_supplied_tau2 = FALSE
}
##### call results
r1 = FALSE
if (r == 1)
r1 = TRUE
results = backend_t(
t_stat = t_stat,
n = n,
df = df,
r = r,
n1 = n1,
n2 = n2,
tau2 = tau2,
r1 = r1,
one_sample = one_sample
)
##### plotting if tau2 is not specified
if (!user_supplied_tau2 && !maximize) {
bff_plot = c()
bff_plot[[1]] = results
plot_BFF(
effect_size = effect_size,
BFF = bff_plot,
save = save,
savename = savename,
xlab = xlab,
ylab = ylab,
main = main,
r = r
)
}
##### optimzation logic
if (maximize)
{
if (is.null(tau2))
tau2 = seq(0, 1, 0.1)
optimal_r = vector(length = length(tau2))
count = 1
for (i in tau2)
{
optimal_r[count] = optimize(
backend_t,
c(1, 20),
tol = 0.001,
t_stat = t_stat,
n = n,
n1 = n1,
n2 = n2,
df = df,
one_sample = one_sample,
r1 = FALSE,
tau2 = i,
maximum = TRUE
)$maximum
count = count + 1
}
maximized_values = as.data.frame(cbind(tau2, optimal_r))
}
###### return logic
BFF = results
effect_size = effect_size
idx_max = which.max(BFF)
BFF_max_RMSE = BFF[idx_max]
max_RMSE = effect_size[idx_max]
if (maximize) {
print(
"The maximum r value for each specified tau2 is given. Re-run the test with the desired r to generate plots and get the BFF value."
)
to_return = maximized_values
} else if (user_supplied_tau2) {
to_return = list(BFF = BFF,
tau2 = tau2)
} else {
to_return = list(
log_BFF = BFF,
effect_size = effect_size,
log_BFF_max_RMSE = BFF_max_RMSE,
max_RMSE = max_RMSE
)
}
return(to_return)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_t_test.R
|
# source("~/Desktop/Research/BFF/R/FINAL_SUPPORT_hypergeometric.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_tau2.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_plotting.R")
###########################################################################################################
###########functions to set tau2 - user does not interact with these ######################################
###########################################################################################################
get_one_sample_tau2 = function(n, w, r = 1)
{
to_return = n * w ^ 2 / (2 * r)
return(to_return)
}
get_two_sample_tau2 = function(n1, n2, w, r = 1)
{
to_return = n1 * n2 * w ^ 2 / (2 * r * (n1 + n2))
return(to_return)
}
get_count_tau2 = function(n, w, k, r = 1)
{
top = n * w ^ 2 * k
bottom = 2 * (k / 2 + r - 1)
to_return = top / bottom
return(to_return)
}
get_LRT_tau2 = function(n, w, k, r = 1)
{
top = n * k * w ^ 2
bottom = 2 * (k / 2 + r - 1)
to_return = top / bottom
return(to_return)
}
get_linear_tau2 = function(n, w, k, r = 1)
{
top = 2 * k * w ^ 2
bottom = 4 * (k / 2 + r - 1)
to_return = top / bottom
return(to_return)
}
################## for fractional cases
# get_wbar = function(w, k)
# {
# # a = 1/k
# # b = sum(w^2)
# # to_return = sqrt(a * b)
#
# to_return = w^2
#
# return(to_return)
# }
# get_tau_z_t_one_sample_frac = function(n, w, r)
# {
# to_return = n * w^2 / (2*r)
# return(to_return)
# }
# get_tau_z_t_two_sample_frac = function(n1, n2, w, r)
# {
# N = n1 + n2
# to_return = n1 * n2 * w^2 / (2*r*N)
# return(to_return)
# }
#
# get_tau_poisson_frac = function(n, k, w, r)
# {
#
# w_bar = get_wbar(w, k)
# num = n * k * w_bar^2
# denom = 2 * (k/2 + r - 1)
#
# to_return = num/denom
# return(to_return)
# }
#
# get_tau_linear_frac = function(n, k, w, r)
# {
#
# w_bar = get_wbar(w, k)
# num = n * k * w_bar^2
# denom = 4 * (k/2 + r - 1)
#
# to_return = num/denom
# return(to_return)
# }
#
# get_tau_likelihood_frac = function(n, k, w, r)
# {
# w_bar = get_wbar(w, k)
# num = n * k * w_bar^2
# denom = 2 * (k/2 + r - 1)
#
# to_return = num/denom
# return(to_return)
# }
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_tau2.R
|
# rm(list = ls())
# source("~/Desktop/Research/BFF/R/FINAL_SUPPORT_hypergeometric.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_tau2.R")
# source("~/Desktop/Research/BFF/R/FINAL_FUNCTIONS_plotting.R")
# source("~/Desktop/Research/BFF/R/FINAL_support_functions.R")
################# Z functions if r is an integer and equal to 1
z_val_r1 = function(tau2, z_stat)
{
term_one = (tau2 + 1) ^ (-3 / 2)
term_two = 1 + tau2 * z_stat ^ 2 / (tau2 + 1)
term_three = exp(tau2 * z_stat ^ 2 / (2 * (tau2 + 1)))
to_return = term_one * term_two * term_three
to_return = log(to_return)
return(to_return)
}
################# Z functions if r is an integer and greater than 1
# sum_val_z = function(r, k, z, tau)
# {
# # constructing third term of z statistic
# one = choose(2 * r, 2 * k)
# two = (tau * z / (1 + tau)) ^ (2 * k)
# three = (tau / (1 + tau)) ^ (r - k)
# four = double_factorial(2 * r - 2 * k - 1)
#
# final_val = one * two * three * four
# return(final_val)
# }
#
# sum_val_function_z = function(r, z, tau)
# {
# val = 0
# for (kk in 0:r)
# {
# val = val + sum_val_z(r = r,
# k = kk,
# z = z,
# tau = tau)
# }
# return(val)
# }
#
# log_Z = function(z, r, tau)
# {
# num1 = 1
# den1 = sqrt(1 + tau) * tau ^ (r) * double_factorial(2 * r - 1)
# first_term = num1 / den1
#
# num2 = tau * z ^ 2
# den2 = 2 * (1 + tau)
# second_term = exp(num2 / den2)
#
# third_term = sum_val_function_z(r = r, z = z, tau = tau)
#
# to_return = first_term * second_term * third_term
#
# # log version
# to_return = log(first_term) + log(second_term) + log(third_term)
# return(to_return)
# }
################# Z functions if r is a f raction
log_Z_frac = function(tau2, z, r)
{
c = 1 / ((1 + tau2) ^ (r + 1 / 2))
third_term = tau2 * z ^ 2 / (2 * (1 + tau2))
hyper_term = hypergeom1F1(r + 1 / 2, 1 / 2, third_term)$f # is this log or not already?
to_return = c * hyper_term
to_return = log(to_return)
return(to_return)
}
log_Z_frac_onesided = function(tau2, z, r)
{
tau = sqrt(tau2)
c = 1 / ((1 + tau2) ^ (r + 1 / 2))
y = tau * z / sqrt(2 * (1 + tau2))
first_hyper = hypergeom1F1(r + 1 / 2, 1 / 2, y ^ 2)$f
gamma_term = sterling_gamma(r + 1) / sterling_gamma(r + 1 / 2)
second_hper = hypergeom1F1(r + 1, 3 / 2, y ^ 2)$f
to_return = c * (first_hyper + 2 * y * gamma_term * second_hper)
to_return = log(to_return)
return(to_return)
}
####################### backend implementation
backend_z = function(r,
z_stat,
n,
one_sample = TRUE,
n1 = NULL,
n2 = NULL,
r1 = FALSE,
tau2 = NULL)
{
# same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
user_supplied_tau2 = TRUE
if (is.null(tau2))
user_supplied_tau2 = FALSE
r1 = r1
frac_r = !r1
log_vals = rep(0, length(effect_size))
if (r1) {
if (one_sample)
{
if (!user_supplied_tau2)
tau2 = get_one_sample_tau2(n = n, w = effect_size)
} else {
if (!user_supplied_tau2)
tau2 = get_two_sample_tau2(n1 = n1, n2 = n2, w = effect_size)
}
log_vals = z_val_r1(tau2 = tau2, z_stat = z_stat)
}
if (frac_r) {
if (one_sample)
{
if (!user_supplied_tau2)
tau2 = get_one_sample_tau2(n = n, w = effect_size, r = r)
log_vals = log_Z_frac_onesided(z = z_stat,
r = r,
tau2 = tau2)
} else {
if (!user_supplied_tau2)
tau2 = get_two_sample_tau2(
n1 = n1,
n2 = n2,
w = effect_size,
r = r
)
log_vals = log_Z_frac(z = z_stat,
r = r,
tau2 = tau2)
}
}
# stuff to return
BFF = log_vals
# check the results are finite
if (!all(is.finite(BFF)))
{
stop(
"Values entered produced non-finite numbers.
The most likely scenario is the evidence was so strongly in favor of the
alternative that there was numeric overflow. Please contact the maintainer for more information."
)
}
return(BFF)
}
################# Z function user interaction
#' z_test_BFF
#'
#' z_test_BFF constructs BFFs based on the z test. BFFs depend on hyperparameters r and tau^2 which determine the shape and scale of the prior distributions which define the alternative hypotheses.
#' By setting r > 1, we use higher-order moments for replicated studies. Fractional moments are set with r > 1 and r not an integer.
#' All results are on the log scale.
#' Plot saved to working directory unless a full path is specified in the 'savename' variable of the function.
#'
#' @param z_stat z statistic
#' @param n sample size (if one sample test)
#' @param one_sample is test one sided? Default is TRUE
#' @param n1 sample size of group one for two sample test
#' @param n2 sample size of group two for two sample test
#' @param savename optional, filename for saving the pdf of the final plot
#' @param maximize Should the value of r be maximized? Default is FALSE. Only set to TRUE if analyzing multiple studies
#' @param r r value
#' @param tau2 tau2 values (can be a single entry or a vector of values)
#' @param save should a copy of the plot be saved?
#' @param xlab optional, x label for plot
#' @param ylab optional, y label for plot
#' @param main optional, main label for plot
#'
#' @return Returns Bayes factor function results
#' \tabular{ll}{
#' \code{BFF} \tab The log of the Bayes Factor Function values \cr
#' \tab \cr
#' \code{effect_size} \tab Effect sizes tested (seq(0, 1, by = 0.01)) \cr
#' \tab \cr
#' \code{BFF_max_RMSE} \tab Maximum BFF value \cr
#' \tab \cr
#' \code{max_RMSE} \tab Effect size that maximizes BFF\cr
#' \tab \cr
#' \code{tau2} \tab tau^2 values tested\cr
#' }
#' @export
#'
#' @examples
#' zBFF = z_test_BFF(z_stat = 2.5, n = 50, save = FALSE)
#' z_test_BFF(z_stat = 2.5, n = 50, save = FALSE, tau2 = 0.5)
#' z_test_BFF(z_stat = 2.5, n = 50, save = FALSE, tau2 = c(0.5, 0.8))
#' z_test_BFF(z_stat = 2.5, n1 = 50, n2 = 35, one_sample = FALSE, save = FALSE) ##
#' z_test_BFF(z_stat = 2.5, n = 50, r = 2, save = FALSE)
#' z_test_BFF(z_stat = 2.5, r = 2, n1 = 50, n2 = 30, one_sample = FALSE, save = FALSE) ##
#' z_test_BFF(z_stat = 2.5, n = 50, r = 2.5, save = FALSE)
#' z_test_BFF(z_stat = 2.5, r = 2.5, n1 = 50, n2 = 30, one_sample = FALSE, save = FALSE) ##
#' z_test_BFF(z_stat=2.5, n = 50, maximize = TRUE)
#' z_test_BFF(z_stat=2.5, n = 50, maximize = TRUE, tau2 = 0.5)
#' z_test_BFF(z_stat=2.5, n = 50, maximize = TRUE, tau2 = c(0.5, 0.8))
#' zBFF$BFF_max_RMSE # maximum BFF value
#' zBFF$max_RMSE # effect size which maximizes the BFF value
#'
z_test_BFF = function(z_stat,
n = NULL,
one_sample = TRUE,
n1 = NULL,
n2 = NULL,
savename = NULL,
maximize = FALSE,
r = 1,
tau2 = NULL,
save = TRUE,
xlab = NULL,
ylab = NULL,
main = NULL)
{
if (is.null(n) &
(is.null(n1) &
is.null(n2)))
stop("Either n or n1 and n2 is required")
##### same effect sizes for all tests
effect_size = seq(0.01, 1, by = 0.01)
##### is tau2 supplied as an argument?
user_supplied_tau2 = TRUE
if (is.null(tau2))
{
user_supplied_tau2 = FALSE
}
##### call results
r1 = FALSE
if (r == 1)
r1 = TRUE
results = backend_z(
z_stat = z_stat,
n = n,
one_sample = one_sample,
r = r,
tau2 = tau2,
r1 = r1,
n1 = n1,
n2 = n2
)
##### plotting if tau2 is not specified
if (!user_supplied_tau2 && !maximize) {
bff_plot = c()
bff_plot[[1]] = results
plot_BFF(
effect_size = effect_size,
BFF = bff_plot,
save = save,
savename = savename,
xlab = xlab,
ylab = ylab,
main = main,
r = r
)
}
##### optimzation logic
if (maximize)
{
if (is.null(tau2))
tau2 = seq(0, 1, 0.1)
optimal_r = vector(length = length(tau2))
count = 1
for (i in tau2)
{
optimal_r[count] = optimize(
backend_z,
c(1, 20),
tol = 0.001,
z_stat = z_stat,
n = n,
n1 = n1,
n2 = n2,
one_sample = one_sample,
r1 = FALSE,
tau2 = i,
maximum = TRUE
)$maximum
count = count + 1
}
maximized_values = as.data.frame(cbind(tau2, optimal_r))
}
###### return logic
BFF = results
effect_size = effect_size
idx_max = which.max(BFF)
BFF_max_RMSE = BFF[idx_max]
max_RMSE = effect_size[idx_max]
if (maximize) {
print(
"The maximum r value for each specified tau2 is given. Re-run the test with the desired r to generate plots and get the BFF value."
)
to_return = maximized_values
} else if (user_supplied_tau2) {
to_return = list(BFF = BFF,
tau2 = tau2)
} else {
to_return = list(
log_BFF = BFF,
effect_size = effect_size,
log_BFF_max_RMSE = BFF_max_RMSE,
max_RMSE = max_RMSE
)
}
return(to_return)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_FUNCTIONS_z_test.R
|
# library(gsl)
Gauss2F1 <- function(a,b,c,x){} # at bottom of file
#hypergeom1F1: Confluent hypergeometric function
#In gajdosandrej/CharFunToolR: Numerical Computation Cumulative Distribution Function and Probability Density Function from Characteristic Function
hypergeom1F1 <- function(a, b, z, n) {
## CHECK THE INPUT PARAMETERS
if(missing(n)) {
n <- numeric()
}
if(length(n) == 0) {
n <- 64
}
done <- FALSE
transf <- FALSE
szz <- dim(z)
z <- c(z)
sz <- length(z)
f <- rep(NaN, sz)
method <- -rep(1, sz)
loops <- rep(0, sz)
# 1F1(a,b,z) for special cases of the parameters a and b and any argument z
if(b == 0 | b == -as.integer(abs(b))) {
f <- Inf
done <- TRUE
} else if(a == b) {
f <- exp(z)
done <- TRUE
} else if(a - b == 1) {
f <- (1 + z / b) * exp(z)
done <- TRUE
} else if(a == 0) {
f <- 1
done <- TRUE
} else if(a == -1) {
f <- 1 - z / b
} else if(a == 1 && b == 2) {
f <- (exp(z) - 1) / z
} else if(a == as.integer(a) && a < 0) {
m <- -a
cr <- 1
f <- 1
for(k in 1:m) {
cr <- cr * (a + k - 1) / k / (b + k - 1) * z
f <- f + cr
}
done <- TRUE
}
# 1F1(a,b,z) for other cases of the parameters a ,b and the argument z
if(!done) {
# If b < a set 1F1(a,b,z) = exp(z)*1F1(b-a,b,-z)
if(b < a){
transf <- TRUE
a <- b - a
z <- -z
}
im_z <- Im(z)
re_z <- Re(z)
ind0 <- z == 0
ind1 <- ((abs(z) < 10 & abs(im_z) < abs(re_z)) | abs(z) < 20 + abs(b) | a < 0) & !ind0
ind2 <- (abs(z) >= 10) & (abs(im_z) >= abs(re_z)) & (a > 0) & (b > a)
ind3 <- (!ind0 & !ind1 & !ind2)
# z == 0 set 1F1(a,b,0) = 1
if(any(ind0 != 0)) {
f[ind0] <- 1
method[ind0] <- 0
}
# 1F1(a,b,z) for small abs(z) or negative a:
# abs(z) < 10 & abs(im_z) < abs(re_z) & ~ind0, OR
# abs(z) < 20 + abs(b) & ~ind0, OR
# a < 0
if(any(ind1 != 0)) {
chg <- 1
crg <- 1
chw <- 0
zz <- z[ind1]
for(j in 1:500) {
crg <- crg * (a + j - 1) / (j * (b + j -1)) * zz
chg <- chg + crg
if(all(abs((chg - chw) / chg) < 1e-15)) {
break
}
chw <- chg
}
method[ind1] <- 1
loops[ind1] <- j
f[ind1] <- chg
}
# 1F1(a,b,z) for large abs(z) such that abs(imag(z)) >= abs(re_z)
# and a > b > 0 by using the steepest descent integration:
# abs(z) >= 10 & abs(im_z) >= abs(re_z) & a > 0 & b > a
if(any(ind2 != 0)) {
x_w <- GaussLaguerre(n)
x <- x_w$x
w <- x_w$w
gba <- log(gamma(b)) - (log(gamma(a)) + log(gamma(b - a)))
rez <- re_z[ind2]
imz <- im_z[ind2]
ewa <- 1 / imz
ewb <- exp(imz * 1i) / imz
a1 <- a - 1
ba1 <- b - a - 1
r1 <- 0
r2 <- 0
for(j in 1:n) {
x_i <- x[j] / imz * 1i
aux1 <- rez * x_i + a1 * log(x_i) + ba1 * log(1 - x_i)
aux2 <- rez * (1 + x_i) + a1 * log(1+x_i) + ba1 * log(-x_i)
r1 <- r1 + w[j] * exp(gba + aux1)
r2 <- r2 + w[j] * exp(gba + aux2)
}
method[ind2] <- 2
loops[ind2] <- j
f[ind2] <- (ewa * r1 - ewb * r2) * 1i
}
# 1F1(a,b,z) for (otherwise) large z by using asymptotic expansion
if(any(ind3 != 0)) {
zz <- z[ind3]
g1 <- gamma(a)
g2 <- gamma(b)
ba <- b - a
g3 <- gamma(ba)
cs1 <- 1
cs2 <- 1
cr1 <- 1
cr2 <- 1
for(j in 1:500) {
cr1 <- -cr1 * (a + j - 1) * (a - b + j) / (zz * j)
cr2 <- cr2 * (b - a + j - 1) * (j - a) / (zz * j)
cs1 <- cs1 + cr1
cs2 <- cs2 + cr2
if(all(abs(cr1+cr2) < 1e-15)) {
break
}
}
x <- Re(zz)
y <- Im(zz)
phi <- atan(y / x)
phi[x == 0 & y > 0] <- 0.5 * pi
phi[x == 0 & y <= 0] <- -0.5 * pi
ns <- rep(1,length(x))
ns[phi > -1.5*pi & phi <= -0.5*pi] <- -1
cfac <- exp(1i * pi * a * ns)
cfac[y == 0] <- cos(pi * a)
chg1 <- (g2 / g3) * zz^(-a) * cfac * cs1
chg2 <- (g2 / g1) * exp(zz) * zz^(a-b) * cs2
chg <- chg1 + chg2
method[ind3] <- 3
loops[ind3] <- j
f[ind3] <- chg
}
}
if(transf) {
f <- exp(-z) * f
}
dim(f) <- szz
dim(method) <- szz
return(list("f" = f, "method" = method, "loops" = loops))
}
GaussLaguerre <- function(n, alpha) {
# GaussLaguerre evaluates the Gauss-Laguerre Nodes and Weights on the interval (alpha,Inf).
if(missing(alpha)) {
alpha <- numeric()
}
if(length(alpha) == 0) {
alpha <- 0
}
if(n == 64) {
x <- c(2.241587414670593e-02, 1.181225120967662e-01, 2.903657440180303e-01,
5.392862212279714e-01, 8.650370046481124e-01, 1.267814040775241e+00,
1.747859626059435e+00, 2.305463739307505e+00, 2.940965156725248e+00,
3.654752650207287e+00, 4.447266343313093e+00, 5.318999254496396e+00,
6.270499046923656e+00, 7.302370002587399e+00, 8.415275239483027e+00,
9.609939192796107e+00, 1.088715038388638e+01, 1.224776450424431e+01,
1.369270784554751e+01, 1.522298111152473e+01, 1.683966365264873e+01,
1.854391817085919e+01, 2.033699594873023e+01, 2.222024266595088e+01,
2.419510487593325e+01, 2.626313722711848e+01, 2.842601052750102e+01,
3.068552076752596e+01, 3.304359923643782e+01, 3.550232389114120e+01,
3.806393216564646e+01, 4.073083544445863e+01, 4.350563546642153e+01,
4.639114297861618e+01, 4.939039902562468e+01, 5.250669934134629e+01,
5.574362241327837e+01, 5.910506191901708e+01, 6.259526440015138e+01,
6.621887325124754e+01, 6.998098037714681e+01, 7.388718723248294e+01,
7.794367743446311e+01, 8.215730377831930e+01, 8.653569334945649e+01,
9.108737561313303e+01, 9.582194001552071e+01, 1.007502319695140e+02,
1.058845994687999e+02, 1.112392075244396e+02, 1.168304450513065e+02,
1.226774602685386e+02, 1.288028787692377e+02, 1.352337879495258e+02,
1.420031214899315e+02, 1.491516659000494e+02, 1.567310751326712e+02,
1.648086026551505e+02, 1.734749468364243e+02, 1.828582046914315e+02,
1.931511360370729e+02, 2.046720284850595e+02, 2.180318519353285e+02,
2.348095791713262e+02)
w = c(5.625284233902887e-02, 1.190239873124205e-01, 1.574964038621475e-01,
1.675470504157746e-01, 1.533528557792381e-01, 1.242210536093313e-01,
9.034230098648389e-02, 5.947775576835545e-02, 3.562751890403607e-02,
1.948041043116659e-02, 9.743594899382018e-03, 4.464310364166234e-03,
1.875359581323119e-03, 7.226469815750032e-04, 2.554875328334960e-04,
8.287143534397105e-05, 2.465686396788568e-05, 6.726713878829501e-06,
1.681785369964073e-06, 3.850812981546759e-07, 8.068728040991898e-08,
1.545723706757564e-08, 2.704480147613762e-09, 4.316775475431567e-10,
6.277752541794292e-11, 8.306317376250609e-12, 9.984031787119531e-13,
1.088353887008957e-13, 1.074017402970290e-14, 9.575737246084761e-16,
7.697028063946171e-17, 5.564881054436309e-18, 3.609756216814263e-19,
2.095095239662055e-20, 1.084792493734732e-21, 4.994712583627291e-23,
2.037932077329677e-24, 7.340603648778086e-26, 2.324586950075985e-27,
6.464528714804253e-29, 1.582906573670680e-30, 2.881154588676925e-32,
5.412535994048359e-34, 2.241048506440640e-34, 5.283742844838896e-36,
1.338202299148180e-34, 1.586340564468588e-35, 1.215192241351559e-34,
1.217775335792122e-34, 1.673365556974291e-35, 2.735714461640009e-34,
2.185380020634853e-34, 5.648495554594729e-35, 9.997398610925997e-36,
1.500478177990158e-36, 1.416076744376295e-37, 5.444799396304293e-39,
1.153008451969226e-40, 2.474260963687568e-42, 9.293338889710336e-45,
2.974573897074668e-47, 1.941796748832940e-50, 5.776547415033449e-54,
1.794991571658772e-58)
return(list("x" = x, "w" = w))
} else {
idx <- 1:n
a <- (2 * idx - 1) + alpha
b <- sqrt(idx[1:(n-1)] * ((1:(n-1)) + alpha)) # bandSparse() Matrix package; sdiag() mgcv package
CM <- diag(a) + as.matrix(matrix(0, n, n) + Matrix::bandSparse(n, n, c(1, -1), list(b, b)))
eig <- eigen(CM)
V <- eig$vectors
L <- diag(eig$values)
ind <- idxInOrigSeq(diag(L))
x <- sort(diag(L))
V <- t(Conj(V[,ind]))
w <- gamma(alpha + 1) * V[,1] ^ 2
return(list("x" = x, "w" = w))
}
}
idxInOrigSeq <- function(orig_seq) {
sort_seq <- sort(orig_seq)
indices <- vector()
used_indices <- vector()
for(i in 1:length(orig_seq)) {
idx <- 0
for(j in 1:length(orig_seq)) {
if(sort_seq[i] == orig_seq[j] && !j %in% used_indices) {
idx <- j
used_indices <- c(used_indices, j)
break
}
}
indices <- c(indices, idx)
}
return(indices)
}
# library(gsl)
Gauss2F1 <- function(a,b,c,x){
if(x>=0 & x<1){
to_return = hyperg_2F1(a,b,c,x)
}else{
to_return = hyperg_2F1(c-a,b,c,1-1/(1-x))/(1-x)^b
}
return(to_return)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_SUPPORT_hypergeometric.R
|
double_factorial_even = function(n) {
first = 2^(n/2)
second = factorial(n/2)
to_return = first * second
return(to_return)
}
double_factorial_odd = function(n) {
n1 = n+1
numerator = factorial(n1)
first = 2^(n1/2)
second = factorial(n1/2)
denomonator = first*second
to_return = numerator / denomonator
return(to_return)
}
# double factorial expression
double_factorial = function(n) {
if (n %% 2 == 1) {
to_return = double_factorial_odd(n)
} else {
to_return = double_factorial_even(n)
}
return(to_return)
}
# approximation of gamma for large n
sterling_gamma = function(n)
{
# if (n<7)
# {
# const = gamma(n)
# } else {
# const = n*log(n) - n
# }
return(exp(lgamma(n)))
# return(const)
}
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/FINAL_support_functions.R
|
# # .onLoad <- function(libname, pkgname)
# # {
# # library.dynam("BFF", pkgname, libname)
# # }
#
BFFStartupMessage <- function()
{
# Startup message obtained as
# > figlet -f slant BFF
msg <- c(paste("version", utils::packageVersion("BFF")), "\nType 'citation(\"BFF\")' for citing this R package in publications")
return(msg)
}
.onAttach <- function(lib, pkg)
{
# unlock .BFF variable allowing its modification
# unlockBinding(".BFF", asNamespace("BFF"))
# startup message
msg <- BFFStartupMessage()
if (!interactive())
msg[1] <- paste("Package 'BFF' version", utils::packageVersion("BFF"), "for Bayesian hypothesis testing.")
packageStartupMessage(msg)
invisible()
}
# .onLoad = function(libname, pkgname)
# {
# msg = BFFStartupMessage()
# packageStartupMessage(msg)
# invisible()
# }
|
/scratch/gouwar.j/cran-all/cranData/BFF/R/zzz.R
|
## ---- include = FALSE---------------------------------------------------------
# knitr::opts_chunk$set(
# collapse = TRUE
# )
## ----setup, echo = FALSE------------------------------------------------------
library(BFF)
library(BSDA)
## ----z-statistics-------------------------------------------------------------
# generating some data
n = 100
data_one = rnorm(n = n, mean = 0.2, sd = 1)
data_two = rnorm(n = n, mean = 0.1, sd = 1)
# calculating test statistics using z.test
# one-sample z-test
z_score_one = z.test(x = data_one, sigma.x = 1)$statistic
# two-sample z-test
z_score_two = z.test(x = data_one, y = data_two, sigma.x = 1, sigma.y = 1)$statistic
## ----calculating BFF for z test-----------------------------------------------
# default r and tau2
z_BFF_one = z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample z-test
# default r and user specified tau2
# single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample z-test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample z-test
# user specified r and default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2) #one sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2.5) #one sample z-test, continuous r (fractional moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample z-test, continuous r (fractional moments)
## ----t-statistics-------------------------------------------------------------
# generating some data
n = 100
data_one = rnorm(n = n, mean = -0.1)
data_two = rnorm(n = n, mean = 0.1)
# calculating test statistics using t.test
t_one = t.test(x = data_one)
t_two = t.test(x = data_one, y = data_two)
t_score_one = t_one$statistic
t_score_two = t_two$statistic
t_df_one = n - 1
t_df_two = 197.9
## ----calculating BFF for t test-----------------------------------------------
# default r and tau2
t_BFF_one = t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE) #one sample t-test
t_BFF_two = t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample t-test
# default r and user specified tau2
# single tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = 0.5) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample t-test
# vector of tau2 values
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample t-test
# user specified r and default tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2) #one sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2.5) #one sample t-test, continuous r (fractional moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample t-test, continuous r (fractional moments)
## ----chi2-test----------------------------------------------------------------
# generate some data
x <- matrix(c(12, 5, 7, 7), ncol = 2)
# calculating chi2 test statistic from chisq.test
chi2_stat = chisq.test(x)$statistic
## ----calculating BFF for chi2 test--------------------------------------------
# default r and tau2
chi2_BFF_pear = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE) #Pearson's chi2 test
chi2_BFF_lrt = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE) #Likelihood ratio chi2 test
# default r and user specified tau2
# single tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = 0.5) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = 0.5) #Likelihood ratio chi2 test
# vector of tau2 values
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = c(0.5, 0.8)) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = c(0.5, 0.8)) #Likelihood ratio chi2 test
# user specified r and default tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2) #Pearson's chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2) #Likelihood ratio chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2.5) #Pearson's chi2 test, continuous r (fractional moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2.5) #Likelihood ratio chi2 test, continuous r (fractional moments)
## ----f statistics-------------------------------------------------------------
# generate some data
n = 100
p = 3
X = matrix(rnorm(n*p), nrow = n)
beta = c(1,1,0)
y = X %*% beta + rnorm(n)
model1 = lm(y ~ X)
anova_model = anova(model1)
F_stat = anova_model$`F value`[1]
## ----calculating BFF for f test-----------------------------------------------
# default r and tau2
F_BFF_one = f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, save = FALSE)
# default r and user specified tau2
# single tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = 0.5, save = FALSE)
# vector of tau2 values
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = c(0.5, 0.8), save = FALSE)
# user specified r and default tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2, save = FALSE) #integer r >1 (higher order moments)
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2.5, save = FALSE) #continuous r (fractional moments)
## ----maximing r for z test----------------------------------------------------
# default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, maximize = TRUE) #two sample z-test
# user specified tau2
#single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #two sample z- test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #two sample z-test
## ----plotting for z test------------------------------------------------------
# saving the plot as a pdf with default name (BFF_plot.pdf). Stored in working directory.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE) #two sample z-test
# saving the plot as a pdf with user specified name.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, savename = "z-BFF-one.pdf") #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, savename = "z-BFF-two.pdf") #two sample z-test
# customizing x-axis labels, y-axis labels and main title
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, xlab = "RMSE", ylab = "Logarithm of Bayes Factor", main = "BFF curves") #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, xlab = expression(tilde(omega)), ylab = expression(log(BF[10])), main = "BFF curves") #two sample z-test
|
/scratch/gouwar.j/cran-all/cranData/BFF/inst/doc/BFF_vignette.R
|
---
title: "BFF"
output: pdf_document
extra_dependencies: ["multirow"]
vignette: >
%\VignetteIndexEntry{BFF_vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
# knitr::opts_chunk$set(
# collapse = TRUE
# )
```
```{r setup, echo = FALSE}
library(BFF)
library(BSDA)
```
Bayes factors are an alterntive to p-values for evaluating hypothesis tests. However, unlike p-values, bayes factors are able to provide evidence for a null hypothesis. Bayes factors also have a clear interpretation: a larger bayes factor shows more evidence for a hypothesis, as opposed to p-values (can anyone tell the difference between 0.05 and 0.06?). Bayes factors have in the past had limited acceptance due to computational issues and difficulty in selecting a prior. Recent work (see 1Bayes factor functions for reporting outcomes of hypothesis tests,' 2023 and 'On the use of non-local prior densities in Bayesian hypothesis tests,' 2010) introduced the idea of using non-local priors to calculate Bayes factors. This package implements "Bayes Factor Functions" (or BFFS). In contrast to a single bayes factor, BFFs express Bayes factors as a function of the prior densities used to
define the alternative hypotheses.
Interpreting bayes factors is usuall done on the log scale (also called the weight of evidence, or WoE) On this scale, a positive bayes factor represents evidence for the alternative hypothesis. A negative bayes factor represents evidence for the null hypothesis. As a rule of thumb, the following table can be used to interpret a bayes factor. However, these are just guidlines and some fields may require higher or lower thresholds of evidence.
```{=latex}
\begin{table}[!ht]
\centering
\begin{tabular}{c|c} \hline\hline
WoE & Interpretation \\ \hline
(-1, 1) & No strong evidence for either $H_0$ or $H_1$ \\
(1, 3) & Positive evidence for $H_1$ \\
(-1, -3) & Positive evidence for $H_0$ \\
(3, 5) & Strong evidence for $H_1$ \\
(-3, -5) & Strong evidence for $H_0$ \\
(5, $\infty$) & Very strong evidence for $H_1$ \\
(-5, -$\infty$) & Very strong evidence for $H_0$ \\ \hline
\end{tabular}
\caption{Common interpretations of the Weight of Evidence}
\label{thresholds}
\end{table}
```
This package provides the bayes factor values for different effect sizes from 0 to 1. A small effect size is usually considered from 0.2 to 0.5,, medium effect sizes from 0.5 to 0.8, and large effect sizes as greater than 0.8.
Using this package is very similar to using the familiar t, z, chi^2, and F tests in R. You will need the same information - the test statistic, degrees of freedom, and sample size. A graph is produced that shows the BFF curve over the different effect sizes.
For evaludating evidence from multiple studies (see 'Bayes factor functions', 2023 (arxiv)), the parameter 'r' can also be set. The default value for r is 1, but 'r' can be suggested that maximizes the bayes factor at each tau by setting the 'maximization' argument in each test to "TRUE."
# The following examples will show how the BFF package calculates Bayes factors based on test statistics
## z - test
```{r z-statistics}
# generating some data
n = 100
data_one = rnorm(n = n, mean = 0.2, sd = 1)
data_two = rnorm(n = n, mean = 0.1, sd = 1)
# calculating test statistics using z.test
# one-sample z-test
z_score_one = z.test(x = data_one, sigma.x = 1)$statistic
# two-sample z-test
z_score_two = z.test(x = data_one, y = data_two, sigma.x = 1, sigma.y = 1)$statistic
```
Calculating BFF using z_test_BFF
```{r calculating BFF for z test}
# default r and tau2
z_BFF_one = z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample z-test
# default r and user specified tau2
# single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample z-test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample z-test
# user specified r and default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2) #one sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2.5) #one sample z-test, continuous r (fractional moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample z-test, continuous r (fractional moments)
```
## t - test
```{r t-statistics}
# generating some data
n = 100
data_one = rnorm(n = n, mean = -0.1)
data_two = rnorm(n = n, mean = 0.1)
# calculating test statistics using t.test
t_one = t.test(x = data_one)
t_two = t.test(x = data_one, y = data_two)
t_score_one = t_one$statistic
t_score_two = t_two$statistic
t_df_one = n - 1
t_df_two = 197.9
```
Calculating BFF using t_test_BFF
```{r calculating BFF for t test}
# default r and tau2
t_BFF_one = t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE) #one sample t-test
t_BFF_two = t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample t-test
# default r and user specified tau2
# single tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = 0.5) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample t-test
# vector of tau2 values
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample t-test
# user specified r and default tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2) #one sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2.5) #one sample t-test, continuous r (fractional moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample t-test, continuous r (fractional moments)
```
## chi^2 - test
```{r chi2-test}
# generate some data
x <- matrix(c(12, 5, 7, 7), ncol = 2)
# calculating chi2 test statistic from chisq.test
chi2_stat = chisq.test(x)$statistic
```
Calculating BFF using chi2_test_BFF
```{r calculating BFF for chi2 test}
# default r and tau2
chi2_BFF_pear = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE) #Pearson's chi2 test
chi2_BFF_lrt = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE) #Likelihood ratio chi2 test
# default r and user specified tau2
# single tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = 0.5) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = 0.5) #Likelihood ratio chi2 test
# vector of tau2 values
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = c(0.5, 0.8)) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = c(0.5, 0.8)) #Likelihood ratio chi2 test
# user specified r and default tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2) #Pearson's chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2) #Likelihood ratio chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2.5) #Pearson's chi2 test, continuous r (fractional moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2.5) #Likelihood ratio chi2 test, continuous r (fractional moments)
```
## F - test
```{r f statistics}
# generate some data
n = 100
p = 3
X = matrix(rnorm(n*p), nrow = n)
beta = c(1,1,0)
y = X %*% beta + rnorm(n)
model1 = lm(y ~ X)
anova_model = anova(model1)
F_stat = anova_model$`F value`[1]
```
Calculating BFF using f_test_BFF
```{r calculating BFF for f test}
# default r and tau2
F_BFF_one = f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, save = FALSE)
# default r and user specified tau2
# single tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = 0.5, save = FALSE)
# vector of tau2 values
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = c(0.5, 0.8), save = FALSE)
# user specified r and default tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2, save = FALSE) #integer r >1 (higher order moments)
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2.5, save = FALSE) #continuous r (fractional moments)
```
## Maximizing r for each specified tau2 (the same maximization parameter applies to all tests, examples are using the z test)
```{r maximing r for z test}
# default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, maximize = TRUE) #two sample z-test
# user specified tau2
#single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #two sample z- test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #two sample z-test
```
Plotting (the same maximization parameter applies to all tests, examples are using the z test)
Plots can be saved by setting "save = TRUE." If plots are saved, they are saved in working directory.
```{r plotting for z test}
# saving the plot as a pdf with default name (BFF_plot.pdf). Stored in working directory.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE) #two sample z-test
# saving the plot as a pdf with user specified name.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, savename = "z-BFF-one.pdf") #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, savename = "z-BFF-two.pdf") #two sample z-test
# customizing x-axis labels, y-axis labels and main title
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, xlab = "RMSE", ylab = "Logarithm of Bayes Factor", main = "BFF curves") #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, xlab = expression(tilde(omega)), ylab = expression(log(BF[10])), main = "BFF curves") #two sample z-test
```
## Default choices of tau2 for common statistical tests
<!-- \begin{table}[h] -->
<!-- \caption{Default choices of $\tau_{\omega,r}$} -->
<!-- \label{parset} -->
<!-- % -->
<!-- \begin{tabular}{lccc} \hline \hline -->
<!-- {\bf Test }& Statistic & Standardized Effect ($\omega$) & $\tau^2_{\omega,r}$ \\ \hline \hline -->
<!-- {1-sample z} & {$\frac{\sqrt{n}\bar{x}}{\sigma}$} & $\frac{\mu}{\sigma}$ & $ \frac{n\omega^2}{2r}$ -->
<!-- \\ -->
<!-- {1-sample t} & {$\frac{\sqrt{n}\bar{x}}{s}$} & $\frac{\mu}{\sigma}$ & $ \frac{n\omega^2}{2r}$ -->
<!-- \\ -->
<!-- {2-sample z} -->
<!-- & $\frac{\sqrt{n_1 n_2}(\bar{x}_1-\bar{x}_2)}{\sigma\sqrt{n_1+n_2}} $ & $\frac{\mu_1-\mu_2}{\sigma}$ -->
<!-- & $\frac{n_1 n_2\omega^2}{2r(n_1+n_2)}$ \\ -->
<!-- {2-sample t} -->
<!-- & $\frac{\sqrt{n_1 n_2}(\bar{x}_1-\bar{x}_2)}{s\sqrt{n_1+n_2}} $ & $\frac{\mu_1-\mu_2}{\sigma}$ -->
<!-- & $\frac{n_1 n_2\omega^2}{2r(n_1+n_2)}$ \\ -->
<!-- Multinomial/Poisson & -->
<!-- \multirow{2}{*}{$\chi^2_{\nu} = \sum\limits_{i=1}^k \frac{(n_i-nf_i(\hat{\theta}))^2}{nf_i(\hat{\theta})}$} & -->
<!-- \multirow{2}{*}{{ $ \left( \frac{p_{i}-f_i(\theta)}{\sqrt{f_i(\theta)}} \right)_{k\times 1} $} }& -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{2(\frac{k}{2}+r-1)}$} \\ -->
<!-- & & & \\ -->
<!-- Linear model& \multirow{2}{*}{$F_{k,n-p} = \frac{(RSS_0-RSS_1)/k}{[(RSS_1)/(n-p)]}$} & -->
<!-- \multirow{2}{*}{$\frac{\mathbf{L}^{-1}(\mathbf{A}\boldsymbol{\beta}-\mathbf{a})}{\sigma}$} & -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{4(\frac{k}{2}+r-1)}$} -->
<!-- \\ -->
<!-- \\ -->
<!-- Likelihood Ratio & -->
<!-- \multirow{2}{*}{$ \chi^2_{k} = -2\log\left[\frac{l(\theta_{r0},\hat{\theta_{s}})}{l(\hat{\theta})} \right]$} & -->
<!-- \multirow{2}{*}{${\bf L}^{-1}(\theta_{r}-\theta_{r0}) $ } & -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{2(\frac{k}{2}+r-1)}$} \\ -->
<!-- & \\ -->
<!-- \hline -->
<!-- \end{tabular} -->
<!-- % -->
<!-- \end{table} -->
|
/scratch/gouwar.j/cran-all/cranData/BFF/inst/doc/BFF_vignette.Rmd
|
---
title: "BFF"
output: pdf_document
extra_dependencies: ["multirow"]
vignette: >
%\VignetteIndexEntry{BFF_vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
# knitr::opts_chunk$set(
# collapse = TRUE
# )
```
```{r setup, echo = FALSE}
library(BFF)
library(BSDA)
```
Bayes factors are an alterntive to p-values for evaluating hypothesis tests. However, unlike p-values, bayes factors are able to provide evidence for a null hypothesis. Bayes factors also have a clear interpretation: a larger bayes factor shows more evidence for a hypothesis, as opposed to p-values (can anyone tell the difference between 0.05 and 0.06?). Bayes factors have in the past had limited acceptance due to computational issues and difficulty in selecting a prior. Recent work (see 1Bayes factor functions for reporting outcomes of hypothesis tests,' 2023 and 'On the use of non-local prior densities in Bayesian hypothesis tests,' 2010) introduced the idea of using non-local priors to calculate Bayes factors. This package implements "Bayes Factor Functions" (or BFFS). In contrast to a single bayes factor, BFFs express Bayes factors as a function of the prior densities used to
define the alternative hypotheses.
Interpreting bayes factors is usuall done on the log scale (also called the weight of evidence, or WoE) On this scale, a positive bayes factor represents evidence for the alternative hypothesis. A negative bayes factor represents evidence for the null hypothesis. As a rule of thumb, the following table can be used to interpret a bayes factor. However, these are just guidlines and some fields may require higher or lower thresholds of evidence.
```{=latex}
\begin{table}[!ht]
\centering
\begin{tabular}{c|c} \hline\hline
WoE & Interpretation \\ \hline
(-1, 1) & No strong evidence for either $H_0$ or $H_1$ \\
(1, 3) & Positive evidence for $H_1$ \\
(-1, -3) & Positive evidence for $H_0$ \\
(3, 5) & Strong evidence for $H_1$ \\
(-3, -5) & Strong evidence for $H_0$ \\
(5, $\infty$) & Very strong evidence for $H_1$ \\
(-5, -$\infty$) & Very strong evidence for $H_0$ \\ \hline
\end{tabular}
\caption{Common interpretations of the Weight of Evidence}
\label{thresholds}
\end{table}
```
This package provides the bayes factor values for different effect sizes from 0 to 1. A small effect size is usually considered from 0.2 to 0.5,, medium effect sizes from 0.5 to 0.8, and large effect sizes as greater than 0.8.
Using this package is very similar to using the familiar t, z, chi^2, and F tests in R. You will need the same information - the test statistic, degrees of freedom, and sample size. A graph is produced that shows the BFF curve over the different effect sizes.
For evaludating evidence from multiple studies (see 'Bayes factor functions', 2023 (arxiv)), the parameter 'r' can also be set. The default value for r is 1, but 'r' can be suggested that maximizes the bayes factor at each tau by setting the 'maximization' argument in each test to "TRUE."
# The following examples will show how the BFF package calculates Bayes factors based on test statistics
## z - test
```{r z-statistics}
# generating some data
n = 100
data_one = rnorm(n = n, mean = 0.2, sd = 1)
data_two = rnorm(n = n, mean = 0.1, sd = 1)
# calculating test statistics using z.test
# one-sample z-test
z_score_one = z.test(x = data_one, sigma.x = 1)$statistic
# two-sample z-test
z_score_two = z.test(x = data_one, y = data_two, sigma.x = 1, sigma.y = 1)$statistic
```
Calculating BFF using z_test_BFF
```{r calculating BFF for z test}
# default r and tau2
z_BFF_one = z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample z-test
# default r and user specified tau2
# single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample z-test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample z-test
# user specified r and default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2) #one sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample z-test, integer r >1 (higher order moments)
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, r = 2.5) #one sample z-test, continuous r (fractional moments)
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample z-test, continuous r (fractional moments)
```
## t - test
```{r t-statistics}
# generating some data
n = 100
data_one = rnorm(n = n, mean = -0.1)
data_two = rnorm(n = n, mean = 0.1)
# calculating test statistics using t.test
t_one = t.test(x = data_one)
t_two = t.test(x = data_one, y = data_two)
t_score_one = t_one$statistic
t_score_two = t_two$statistic
t_df_one = n - 1
t_df_two = 197.9
```
Calculating BFF using t_test_BFF
```{r calculating BFF for t test}
# default r and tau2
t_BFF_one = t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE) #one sample t-test
t_BFF_two = t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE) #two sample t-test
# default r and user specified tau2
# single tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = 0.5) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5) #two sample t-test
# vector of tau2 values
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8)) #one sample t-test
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8)) #two sample t-test
# user specified r and default tau2
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2) #one sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2) #two sample t-test, integer r >1 (higher order moments)
t_test_BFF(t_stat = t_score_one, df = t_df_one, n = 100, save = FALSE, r = 2.5) #one sample t-test, continuous r (fractional moments)
t_test_BFF(t_stat = t_score_two, df = t_df_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, r = 2.5) #two sample t-test, continuous r (fractional moments)
```
## chi^2 - test
```{r chi2-test}
# generate some data
x <- matrix(c(12, 5, 7, 7), ncol = 2)
# calculating chi2 test statistic from chisq.test
chi2_stat = chisq.test(x)$statistic
```
Calculating BFF using chi2_test_BFF
```{r calculating BFF for chi2 test}
# default r and tau2
chi2_BFF_pear = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE) #Pearson's chi2 test
chi2_BFF_lrt = chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE) #Likelihood ratio chi2 test
# default r and user specified tau2
# single tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = 0.5) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = 0.5) #Likelihood ratio chi2 test
# vector of tau2 values
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, tau2 = c(0.5, 0.8)) #Pearson's chi2 test
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, tau2 = c(0.5, 0.8)) #Likelihood ratio chi2 test
# user specified r and default tau2
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2) #Pearson's chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2) #Likelihood ratio chi2 test, integer r >1 (higher order moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, r = 2.5) #Pearson's chi2 test, continuous r (fractional moments)
chi2_test_BFF(chi2_stat = chi2_stat, df = 1, n = 4, save = FALSE, pearsons = FALSE, r = 2.5) #Likelihood ratio chi2 test, continuous r (fractional moments)
```
## F - test
```{r f statistics}
# generate some data
n = 100
p = 3
X = matrix(rnorm(n*p), nrow = n)
beta = c(1,1,0)
y = X %*% beta + rnorm(n)
model1 = lm(y ~ X)
anova_model = anova(model1)
F_stat = anova_model$`F value`[1]
```
Calculating BFF using f_test_BFF
```{r calculating BFF for f test}
# default r and tau2
F_BFF_one = f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, save = FALSE)
# default r and user specified tau2
# single tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = 0.5, save = FALSE)
# vector of tau2 values
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, tau2 = c(0.5, 0.8), save = FALSE)
# user specified r and default tau2
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2, save = FALSE) #integer r >1 (higher order moments)
f_test_BFF(f_stat = F_stat, df1 = anova_model$Df[1], df2 = anova_model$Df[2], n = n, r = 2.5, save = FALSE) #continuous r (fractional moments)
```
## Maximizing r for each specified tau2 (the same maximization parameter applies to all tests, examples are using the z test)
```{r maximing r for z test}
# default tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, maximize = TRUE) #two sample z-test
# user specified tau2
#single tau2
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = 0.5, maximize = TRUE) #two sample z- test
# vector of tau2 values
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = FALSE, tau2 = c(0.5, 0.8), maximize = TRUE) #two sample z-test
```
Plotting (the same maximization parameter applies to all tests, examples are using the z test)
Plots can be saved by setting "save = TRUE." If plots are saved, they are saved in working directory.
```{r plotting for z test}
# saving the plot as a pdf with default name (BFF_plot.pdf). Stored in working directory.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE) #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE) #two sample z-test
# saving the plot as a pdf with user specified name.
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, savename = "z-BFF-one.pdf") #one sample z-test
z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, savename = "z-BFF-two.pdf") #two sample z-test
# customizing x-axis labels, y-axis labels and main title
z_test_BFF(z_stat = z_score_one, n = 100, save = FALSE, xlab = "RMSE", ylab = "Logarithm of Bayes Factor", main = "BFF curves") #one sample z-test
z_BFF_two = z_test_BFF(z_stat = z_score_two, one_sample = FALSE, n1 = 100, n2 = 100, save = TRUE, xlab = expression(tilde(omega)), ylab = expression(log(BF[10])), main = "BFF curves") #two sample z-test
```
## Default choices of tau2 for common statistical tests
<!-- \begin{table}[h] -->
<!-- \caption{Default choices of $\tau_{\omega,r}$} -->
<!-- \label{parset} -->
<!-- % -->
<!-- \begin{tabular}{lccc} \hline \hline -->
<!-- {\bf Test }& Statistic & Standardized Effect ($\omega$) & $\tau^2_{\omega,r}$ \\ \hline \hline -->
<!-- {1-sample z} & {$\frac{\sqrt{n}\bar{x}}{\sigma}$} & $\frac{\mu}{\sigma}$ & $ \frac{n\omega^2}{2r}$ -->
<!-- \\ -->
<!-- {1-sample t} & {$\frac{\sqrt{n}\bar{x}}{s}$} & $\frac{\mu}{\sigma}$ & $ \frac{n\omega^2}{2r}$ -->
<!-- \\ -->
<!-- {2-sample z} -->
<!-- & $\frac{\sqrt{n_1 n_2}(\bar{x}_1-\bar{x}_2)}{\sigma\sqrt{n_1+n_2}} $ & $\frac{\mu_1-\mu_2}{\sigma}$ -->
<!-- & $\frac{n_1 n_2\omega^2}{2r(n_1+n_2)}$ \\ -->
<!-- {2-sample t} -->
<!-- & $\frac{\sqrt{n_1 n_2}(\bar{x}_1-\bar{x}_2)}{s\sqrt{n_1+n_2}} $ & $\frac{\mu_1-\mu_2}{\sigma}$ -->
<!-- & $\frac{n_1 n_2\omega^2}{2r(n_1+n_2)}$ \\ -->
<!-- Multinomial/Poisson & -->
<!-- \multirow{2}{*}{$\chi^2_{\nu} = \sum\limits_{i=1}^k \frac{(n_i-nf_i(\hat{\theta}))^2}{nf_i(\hat{\theta})}$} & -->
<!-- \multirow{2}{*}{{ $ \left( \frac{p_{i}-f_i(\theta)}{\sqrt{f_i(\theta)}} \right)_{k\times 1} $} }& -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{2(\frac{k}{2}+r-1)}$} \\ -->
<!-- & & & \\ -->
<!-- Linear model& \multirow{2}{*}{$F_{k,n-p} = \frac{(RSS_0-RSS_1)/k}{[(RSS_1)/(n-p)]}$} & -->
<!-- \multirow{2}{*}{$\frac{\mathbf{L}^{-1}(\mathbf{A}\boldsymbol{\beta}-\mathbf{a})}{\sigma}$} & -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{4(\frac{k}{2}+r-1)}$} -->
<!-- \\ -->
<!-- \\ -->
<!-- Likelihood Ratio & -->
<!-- \multirow{2}{*}{$ \chi^2_{k} = -2\log\left[\frac{l(\theta_{r0},\hat{\theta_{s}})}{l(\hat{\theta})} \right]$} & -->
<!-- \multirow{2}{*}{${\bf L}^{-1}(\theta_{r}-\theta_{r0}) $ } & -->
<!-- \multirow{2}{*}{$ \frac{n \omega'\omega}{2(\frac{k}{2}+r-1)} $ = $\frac{nk\Tilde{\omega}^2}{2(\frac{k}{2}+r-1)}$} \\ -->
<!-- & \\ -->
<!-- \hline -->
<!-- \end{tabular} -->
<!-- % -->
<!-- \end{table} -->
|
/scratch/gouwar.j/cran-all/cranData/BFF/vignettes/BFF_vignette.Rmd
|
#' Download a BFS asset file
#'
#' This function uses the DAM API
#' \url{https://dam-api.bfs.admin.ch/hub/swagger-ui/index.html}
#' to download a BFS file by asset number or BFS number. The file is downloaded
#' using `curl::curl_download()` under the hood.
#'
#' @param number_bfs The BFS number of a dataset.
#' @param number_asset The asset number of a dataset
#' @param destfile A character string with the name where the downloaded file is saved. Tilde-expansion is performed.
#' @param quiet If TRUE, suppress status messages (if any), and the progress bar.
#' @param mode A character string specifying the mode with which to write the file. Useful values are "w", "wb" (binary), "a" (append) and "ab".
#' @param handle a curl handle object
#'
#' @importFrom curl curl_download
#'
#' @return Returns the file path where the file has been downloaded. Returns NULL if no connection.
#'
#' @export
bfs_download_asset <- function(number_asset = NULL, number_bfs = NULL, destfile, quiet = TRUE, mode = "wb", handle = curl::new_handle()) {
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
if (is.null(number_asset) && is.null(number_bfs)) {
stop("Please specify number_bfs or number_asset")
} else if (!is.null(number_asset) && !is.null(number_bfs)) {
stop("Please only specify number_bfs or number_asset")
} else if (!is.null(number_asset)) {
id <- number_asset
} else if (!is.null(number_bfs)) {
id <- paste0("orderNr:", number_bfs)
}
file_path <- curl::curl_download(
url = paste0("https://dam-api.bfs.admin.ch/hub/api/dam/assets/", id, "/master"),
destfile = destfile,
quiet = quiet,
mode = mode,
handle = handle
)
file_path
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_download_asset.R
|
#' Download a geographic file from the Swiss geo-portal
#'
#' Download assets from the STAC API on the geodata catalog
#' of the Swiss Confederation (\url{https://data.geo.admin.ch/api/stac/v0.9/}).
#'
#' @details The use of the data and services is free but
#' subject to the provisions on fair use (see \url{https://www.geo.admin.ch/terms-of-use}).
#'
#' @param collection_id collection_id
#' @param output_dir output_dir
#' @param overwrite overwrite
#' @param create_json create_json
#' @param bbox bbox
#' @param asset_names asset_names
#' @param datetime datetime
#' @param feature_id feature_id
#'
#' @importFrom magrittr %>%
#'
#' @return Returns the file path where the file has been downloaded. Returns NULL if no connection.
#'
#' @export
bfs_download_geodata <- function(
collection_id = collection_id,
output_dir = tempdir(),
overwrite = FALSE,
create_json = FALSE,
bbox = NULL,
asset_names = NULL,
datetime = NULL,
feature_id = NULL) {
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
if (!exists(output_dir)) dir.create(output_dir, showWarnings = FALSE)
items <- rstac::stac("https://data.geo.admin.ch/api/stac/v0.9/") %>%
rstac::collections(collection_id = collection_id) %>%
rstac::items(bbox = bbox, datetime = datetime, feature_id = feature_id) %>%
rstac::get_request() %>%
rstac::assets_download(asset_names = asset_names, output_dir = output_dir, overwrite = overwrite, create_json = create_json)
list_assets <- rstac::items_assets(items)
files <- list.files(path = output_dir, pattern = paste0(list_assets, collapse = "|"), recursive = TRUE, full.names = TRUE)
return(files)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_download_geodata.R
|
#' Get asset metadata in a given language
#'
#' This function uses the DAM API \url{https://dam-api.bfs.admin.ch/hub/swagger-ui/index.html}
#' to get the metadata of a BFS file by asset number or BFS number in a given language.
#'
#' @seealso [bfs_download_asset()]
#'
#' @param number_bfs The BFS number of a dataset.
#' @param number_asset The asset number of a dataset
#' @param language character The language of a BFS catalog, i.e. "de", "fr", "it" or "en".
#'
#' @importFrom httr2 request req_headers req_url_path_append req_perform resp_body_json
#' @importFrom magrittr %>%
#'
#' @return list Returns a list containing asset metadata information. Returns NULL if no connection.
#'
#' @export
bfs_get_asset_metadata <- function(number_asset = NULL, number_bfs = NULL, language = c("de", "fr", "it", "en")) {
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
if (is.null(number_asset) && is.null(number_bfs)) {
stop("Please specify number_asset or number_bfs")
} else if (!is.null(number_asset) && !is.null(number_bfs)) {
stop("Please only specify number_asset or number_bfs")
} else if (!is.null(number_asset)) {
id <- number_asset
} else if (!is.null(number_bfs)) {
id <- paste0("orderNr:", number_bfs)
}
asset_metadata <- httr2::request(base_url = "https://dam-api.bfs.admin.ch/hub/api/dam/assets/") %>%
httr2::req_headers("accept" = "application/json") %>%
httr2::req_headers("accept-language" = language) %>%
httr2::req_url_path_append(id) %>%
httr2::req_perform() %>%
httr2::resp_body_json(simplifyVector = TRUE)
return(asset_metadata)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_asset_metadata.R
|
#' Get Switzerland base maps data
#'
#' This functions helps to get base maps data from the ThemaKart project
#' as an sf object. The geom names and the general structure of the files
#' can be found in the offical BFS documentation,
#' see \url{https://www.bfs.admin.ch/asset/en/24025645}. When using this data, please read the condition of use
#' and copyright mentions.
#'
#' If you want to get ThemaKart data from previous years, you can change
#' the `asset_number` for the related zip file. For example, for the map
#' set of year 2020, the asset number is "11927607".
#'
#' This function is caching the base map data using
#' `tools::R_user_dir(package = "BFS")`.
#'
#' @param geom Geometry such as "suis", "kant", "bezk", "polg", "voge", etc.
#' @param category Category such as 'total_area' ("gf" for "Gesamtflaeche") or
#' 'vegetation_area' ("vf" for "Vegetationsflaeche").
#' @param type The type of data, i.e. "Poly" or "Pnts".
#' @param date Date (yyyymmdd) of reference / validity. If not specified, the
#' `most_recent` argument is used.
#' @param most_recent Get the most recent by sorting the files in decreasing order,
#' if FALSE then read the first file available.
#' @param format Format of the file, by default SHP format.
#' @param asset_number Asset number of the base maps zip file.
#'
#' @importFrom sf read_sf
#' @importFrom tools R_user_dir
#' @importFrom fs dir_create dir_ls
#' @importFrom zip unzip
#'
#' @return sf object with geometries. Returns NULL if no connection.
#'
#' @export
bfs_get_base_maps <- function(geom = NULL, category = "gf", type = "Poly", date = NULL, most_recent = TRUE, format = "shp", asset_number = "24025646") {
if (is.null(geom)) {
stop("Please choose a geom, such as 'suis', 'kant' or 'polg'.\nGeometry names are listed here: \nhttps://www.bfs.admin.ch/asset/en/24025645", call. = FALSE)
}
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
# get base map files if not present in cache folder
dir <- tools::R_user_dir(package = "BFS")
path_base_map <- paste0(dir, "/base_maps_", asset_number)
if (!fs::dir_exists(path_base_map)) {
fs::dir_create(path_base_map, showWarnings = FALSE)
BFS::bfs_download_asset(
number_asset = asset_number,
destfile = paste0(path_base_map, ".zip")
)
# unzip all files in same directory because of encoding issues with subfolders
zip::unzip(zipfile = paste0(path_base_map, ".zip"), junkpaths = TRUE, exdir = path_base_map)
}
# list all files
files_all <- fs::dir_ls(path_base_map, recurse = TRUE, type = "file")
if (identical(files_all, character(0))) {
stop("Error in listing available base map files", call. = FALSE)
}
files_format <- grep(pattern = paste0(".", format, "$"), x = files_all, value = TRUE)
# category, i.e. search file with "gf_ch" or "vf_ch"
if (category == "total_area" || category == "gf") {
category_selected <- "gf_ch"
} else if (category == "vegetation_area" || category == "vf") {
category_selected <- "vf_ch"
} else {
category_selected <- category # other options, for example for 'k4seenyyyymmdd11_ch2007Poly'
}
files_cat <- grep(pattern = category_selected, x = files_format, value = TRUE)
# type, i.e. "Poly" or "Pnts"
files_poly <- grep(pattern = paste0(type, ".", format, "$"), x = files_cat, value = TRUE)
# by geom
files_geom <- grep(pattern = geom, x = files_poly, value = TRUE)
# by date
if (!is.null(date)) {
file_selected <- grep(pattern = date, x = files_geom, value = TRUE)
} else if (isTRUE(most_recent)) { # get most recent file by sorting in decreasing order
files_geom_sorted <- sort(files_geom, decreasing = TRUE)
# get first file
file_selected <- files_geom_sorted[1]
} else {
file_selected <- files_geom[1]
}
if (length(file_selected) > 1) {
file_selected <- file_selected[1]
warning(paste0("Multiple file selected.\nUsing the first file\n", file_selected), call. = FALSE)
}
if (identical(file_selected, character(0))) {
stop("No related file found. Please use other argument values.", call. = FALSE)
}
sf::read_sf(file_selected)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_base_maps.R
|
#' Get the BFS data or table catalog
#'
#' This function scraps a given RSS Feed of the Swiss Federal Statistical Office.
#'
#' @param language character The language of a BFS catalog.
#' @param type character A BFS catalog
#'
#' @return A data frame. Returns NULL if no connection.
#'
#' @importFrom tidyRSS tidyfeed
#' @importFrom janitor clean_names
#' @importFrom lifecycle deprecate_warn
#'
#' @seealso \code{\link{bfs_get_data}}
#'
#' @export
bfs_get_catalog <- function(language = "de", type = "data") {
lifecycle::deprecate_warn("0.5.6", "bfs_get_catalog()", details = "Please use `bfs_get_catalog_data()` or 'bfs_get_catalog_tables()' instead")
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
type <- match.arg(arg = type, choices = c("data", "tables"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
if (type == "data") {
catalog <- BFS::bfs_get_catalog_data(language = language)
}
if (type == "tables") {
catalog <- BFS::bfs_get_catalog_tables(language = language)
}
return(catalog)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_catalog.R
|
#' Get the BFS data catalog
#'
#' Get the list of the data available in the official \href{https://www.bfs.admin.ch/bfs/en/home/statistiken/kataloge-datenbanken/daten/_jcr_content/par/ws_catalog.rss.xml}{RSS Feed} of the Swiss Federal Statistical Office data catalog.
#'
#' @param language character The language of a BFS catalog, i.e. "de", "fr", "it" or "en".
#' @param title character String to search in title, subtitle and supertitle
#' @param spatial_division BFS datasets by spatial division, choose between "Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions" or "International"
#' @param prodima numeric Get only specific BFS themes using one or multiple prodima numbers
#' @param inquiry character BFS datasets for an inquiry
#' @param institution character BFS datasets for an institution
#' @param publishing_year_start character BFS datasets for a publishing year start
#' @param publishing_year_end character BFS datasets for a publishing year end
#' @param order_nr character Filter by BFS Number (FSO number)
#' @param skip_limit boolean skip limit, TRUE or FALSE
#'
#' @return A data frame. Returns NULL if no connection.
#'
#' @importFrom tidyRSS tidyfeed
#' @importFrom janitor clean_names
#' @importFrom purrr pmap_dfr possibly
#' @importFrom tibble tibble
#'
#' @seealso \code{\link{bfs_get_data}}
#'
#' \describe{
#' \item{title}{A character column with the title of the BFS dataset}
#' \item{language}{A character column with the language of the BFS dataset}
#' \item{publication_date}{The published date of the BFS dataset in the data catalog}
#' \item{number_asset}{The BFS asset number}
#' \item{url_bfs}{A character column with the URL of the related BFS
#' webpage}
#' \item{url_px}{A character column with the URL of the PX file}
#' \item{catalog_date}{The released date of the current BFS data catalog}
#' }
#'
#' @examples
#' \donttest{
#' bfs_get_catalog_data(language = "en", title = "students", prodima = c(900212))
#' }
#'
#' @return A tbl_df (a type of data frame; see tibble or
#' dplyr packages). Returns NULL if no connection.
#'
#' @export
bfs_get_catalog_data <- function(language = "de", title = NULL, spatial_division = NULL, prodima = NULL, inquiry = NULL, institution = NULL, publishing_year_start = NULL, publishing_year_end = NULL, order_nr = NULL, skip_limit = TRUE) {
# if (missing(language)) stop("must choose a language, either 'de', 'fr', 'it' or 'en'", call. = FALSE)
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
# Construct geography query based on spatial division names
geography_names <- c("Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions", "International")
geography_numbers <- c(900091, 900092, 900093, 900004, 900008, 900068)
names(geography_numbers) <- geography_names
if (is.null(spatial_division)) {
geography <- ""
} else {
spatial_division <- match.arg(arg = spatial_division, choices = geography_names)
geography <- geography_numbers[names(geography_numbers) == spatial_division]
}
if (length(institution) != 1 && !is.null(institution)) stop("`institution` should be unique")
if (is.null(institution)) institution <- ""
if (length(inquiry) != 1 && !is.null(inquiry)) stop("`inquiry` should be unique")
if (is.null(inquiry)) inquiry <- ""
if (length(publishing_year_start) != 1 && !is.null(publishing_year_start)) stop("`publishing_year_start` should be unique")
if (is.null(publishing_year_start)) publishing_year_start <- ""
if (length(publishing_year_end) != 1 && !is.null(publishing_year_end)) stop("`publishing_year_end` should be unique")
if (is.null(publishing_year_end)) publishing_year_end <- ""
if (length(title) != 1 && !is.null(title)) stop("`title` should be unique")
if (is.null(title)) title <- ""
if (length(order_nr) != 1 && !is.null(order_nr)) stop("`order_nr` should be unique")
if (is.null(order_nr)) order_nr <- ""
# Construct prodima query
# themes_names <- c("Statistical basis and overviews 00", "Population 01", "Territory and environment 02", "Work and income 03", "National economy 04", "Prices 05", "Industry and services 06", "Agriculture and forestry 07", "Energy 08", "Construction and housing 09", "Tourism 10", "Mobility and transport 11", "Money, banks and insurance 12", "Social security 13", "Health 14", "Education and science 15", "Culture, media, information society, sports 16", "Politics 17", "General Government and finance 18", "Crime and criminal justice 19", "Economic and social situation of the population 20", "Sustainable development, regional and international disparities 21")
themes_prodima <- c(900001, 900010, 900035, 900051, 900075, 900084, 900092, 900104, 900127, 900140, 900160, 900169, 900191, 900198, 900210, 900212, 900214, 900226, 900239, 900257, 900269, 900276)
# query by prodima (theme) because RSS feed limitation to 350 entities, see issue #5
if (is.null(prodima)) {
prodima <- themes_prodima
} else {
prodima <- prodima
}
# # TODO: allow multiple elements queries for each argument
#
# queries <- list(
# prodima = prodima,
# language = language,
# skipLimit = skip_limit,
# institution = institution,
# geography = geography,
# inquiry = inquiry,
# publishingyearstart = publishing_year_start,
# publishingyearend = publishing_year_end,
# title = title,
# orderNr = order_nr
# )
#
# # test if multiple elements in arguments
# lengths_rss_queries <- lengths(queries)
# final list for querying with a loop ---------------------------------------
# get length prodima to create list to loop on
length_prodima <- length(prodima)
rss_queries <- list(
prodima = prodima,
language = rep(language, length_prodima),
skipLimit = rep(skip_limit, length_prodima),
institution = rep(institution, length_prodima),
geography = rep(geography, length_prodima),
inquiry = rep(inquiry, length_prodima),
publishingyearstart = rep(publishing_year_start, length_prodima),
publishingyearend = rep(publishing_year_end, length_prodima),
title = rep(title, length_prodima),
orderNr = rep(order_nr, length_prodima)
)
get_rss_feed_data <- function(language, skipLimit, prodima, institution, geography, inquiry, publishingyearstart, publishingyearend, title, orderNr) {
feed <- paste0("https://www.bfs.admin.ch/bfs/", language, "/home/statistiken/kataloge-datenbanken/daten/_jcr_content/par/ws_catalog.rss.xml?skipLimit=", skipLimit, "&prodima=", prodima, "&institution=", institution, "&geography=", geography, "&inquiry=", inquiry, "&publishingyearstart=", publishingyearstart, "&publishingyearend=", publishingyearend, "&title=", title, "&orderNr=", orderNr)
df_feed <- suppressMessages(tidyRSS::tidyfeed(feed = feed))
colnames(df_feed) <- gsub("feed_", "", colnames(df_feed)) # cleaning
colnames(df_feed) <- gsub("item_", "", colnames(df_feed)) # cleaning
df_feed <- janitor::clean_names(df_feed, "small_camel") # cleaning
base_url_bfs <- paste0("https://www.bfs.admin.ch/content/bfs/", language, "/home/statistiken/kataloge-datenbanken/daten.assetdetail.")
base_url_px <- "https://www.bfs.admin.ch/bfsstatic/dam/assets/"
if (any("title_2" == names(df_feed))) df_feed$title <- df_feed$title_2
if (any("link_2" == names(df_feed))) df_feed$url_bfs <- df_feed$link_2
if (any("pubDate_2" == names(df_feed))) df_feed$publication_date <- df_feed$pubDate_2
if (any("pubDate" == names(df_feed))) df_feed$catalog_date <- df_feed$pubDate
df_feed$url_px <- gsub(base_url_bfs, base_url_px, df_feed$url_bfs)
df_feed$url_px <- gsub(".html$", "/master", df_feed$url_px)
# get `number_asset`
df_feed$number_asset <- gsub(pattern = "\\D", replacement = "", x = df_feed$guid)
df_feed$number_asset <- as.numeric(df_feed$number_asset)
# select variables
vars <- c("title", "language", "publication_date", "number_asset", "url_bfs", "url_px", "catalog_date")
df_feed[vars]
}
df <- purrr::pmap_dfr(rss_queries, purrr::possibly(get_rss_feed_data, otherwise = tibble::tibble()), .progress = TRUE)
df2 <- df[!duplicated(df), ] # no duplication
return(df2)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_catalog_data.R
|
#' Get the geodata catalog of the Swiss Confederation
#'
#' Display geo-information catalog of the Swiss Confederation (\url{https://data.geo.admin.ch/}),
#' including some geographic datasets provided by the Swiss Federal Statistical Office.
#' Note that this geodata catalog is not hosted by the Swiss Federal Statistical Office.
#'
#' @param include_metadata boolean TRUE to loop on each geodata to add metadata.
#'
#' @details
#' For now only Stac API datasets are accessible.
#'
#' @return A tbl_df (a type of data frame; see tibble or
#' dplyr packages). Returns NULL if no connection.
#'
#' @export
bfs_get_catalog_geodata <- function(include_metadata = TRUE) {
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
elements_html <- xml2::read_html("https://data.geo.admin.ch/") %>%
rvest::html_element("#data") %>%
rvest::html_elements("a")
collection_ids <- tibble::tibble(
type = rvest::html_text2(elements_html),
href = rvest::html_attr(elements_html, "href")
) %>%
dplyr::filter(type %in% c("API", "download")) %>%
dplyr::mutate(
collection_id = gsub(".*collections/", "", href),
collection_id = gsub("collections/", "", collection_id)
) %>%
dplyr::group_by(collection_id) %>%
dplyr::arrange(collection_id, type) %>%
dplyr::filter(dplyr::row_number() == 1) %>%
dplyr::ungroup() %>%
dplyr::select(collection_id, type, href) %>%
# fix "h.bafu.wasserbau-vermessungsstrecken"
dplyr::mutate(collection_id = ifelse(collection_id == "h.bafu.wasserbau-vermessungsstrecken", "ch.bafu.wasserbau-vermessungsstrecken", collection_id)) %>%
# ONLY STAC API FOR NOW
dplyr::filter(type == "API")
# loop on each href to get metadata
if (include_metadata) {
geo_get_metadata <- function(collection_id) {
# if too many requests HTTP 429
json <- httr2::request("https://data.geo.admin.ch/api/stac/v0.9/collections/") %>%
httr2::req_url_path_append(collection_id) %>%
httr2::req_perform() %>%
httr2::resp_body_json()
tibble::tibble(
collection_id = json$id,
title = json$title,
description = json$description,
created = json$created,
updated = json$updated,
crs = json$crs[[1]],
license = json$license,
provider_name = json$providers[[1]]$name,
bbox = json$extent$spatial$bbox,
inverval = json$extent$temporal$interval
)
}
df <- purrr::map_dfr(collection_ids$collection_id, purrr::possibly(geo_get_metadata, otherwise = tibble::tibble()), .progress = TRUE)
collection_ids <- collection_ids %>%
dplyr::left_join(df, by = "collection_id")
}
return(collection_ids)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_catalog_geodata.R
|
#' Get the BFS tables catalog
#'
#' Get the list of the tables available in the official \href{https://www.bfs.admin.ch/bfs/en/home/statistiken/kataloge-datenbanken/daten/_jcr_content/par/ws_catalog.rss.xml}{RSS Feed} of the Swiss Federal Statistical Office tables catalog.
#'
#' @param language character The language of a BFS catalog, i.e. "de", "fr", "it" or "en".
#' @param title character String to search in title, subtitle and supertitle
#' @param spatial_division BFS datasets by spatial division, choose between "Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions" or "International"
#' @param prodima numeric Get only specific BFS themes using one or multiple prodima numbers
#' @param inquiry character BFS datasets for an inquiry
#' @param institution character BFS datasets for an institution
#' @param publishing_year_start character BFS datasets for a publishing year start
#' @param publishing_year_end character BFS datasets for a publishing year end
#' @param order_nr character Filter by BFS Number (FSO number)
#' @param skip_limit boolean skip limit, TRUE or FALSE
#'
#' @return A data frame. Returns NULL if no connection.
#'
#' @importFrom tidyRSS tidyfeed
#' @importFrom janitor clean_names
#' @importFrom purrr pmap_dfr possibly
#' @importFrom tibble tibble
#'
#' @seealso \code{\link{bfs_get_data}}
#'
#' @return A tbl_df (a type of data frame; see tibble or
#' dplyr packages). Returns NULL if no connection.
#'
#' \describe{
#' \item{title}{A character column with the title of the BFS dataset}
#' \item{language}{A character column with the language of the BFS dataset}
#' \item{publication_date}{The published date of the BFS dataset in the tables catalog}
#' \item{number_asset}{The BFS asset number}
#' \item{url_bfs}{A character column with the URL of the related BFS
#' webpage}
#' \item{url_table}{A character column with the URL of the PX file}
#' \item{catalog_date}{The released date of the current BFS tables catalog}
#' }
#'
#' @examples
#' \donttest{
#' bfs_get_catalog_tables(language = "en", title = "students", prodima = c(900212))
#' }
#'
#' @export
bfs_get_catalog_tables <- function(language = "de", title = NULL, spatial_division = NULL, prodima = NULL, inquiry = NULL, institution = NULL, publishing_year_start = NULL, publishing_year_end = NULL, order_nr = NULL, skip_limit = TRUE) {
# if (missing(language)) stop("must choose a language, either 'de', 'fr', 'it' or 'en'", call. = FALSE)
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
# Construct geography query based on spatial division names
geography_names <- c("Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions", "International")
geography_numbers <- c(900091, 900092, 900093, 900004, 900008, 900068)
names(geography_numbers) <- geography_names
if (is.null(spatial_division)) {
geography <- ""
} else {
spatial_division <- match.arg(arg = spatial_division, choices = geography_names)
geography <- geography_numbers[names(geography_numbers) == spatial_division]
}
if (length(institution) != 1 && !is.null(institution)) stop("`institution` should be unique")
if (is.null(institution)) institution <- ""
if (length(inquiry) != 1 && !is.null(inquiry)) stop("`inquiry` should be unique")
if (is.null(inquiry)) inquiry <- ""
if (length(publishing_year_start) != 1 && !is.null(publishing_year_start)) stop("`publishing_year_start` should be unique")
if (is.null(publishing_year_start)) publishing_year_start <- ""
if (length(publishing_year_end) != 1 && !is.null(publishing_year_end)) stop("`publishing_year_end` should be unique")
if (is.null(publishing_year_end)) publishing_year_end <- ""
if (length(title) != 1 && !is.null(title)) stop("`title` should be unique")
if (is.null(title)) title <- ""
if (length(order_nr) != 1 && !is.null(order_nr)) stop("`order_nr` should be unique")
if (is.null(order_nr)) order_nr <- ""
# Construct prodima query
# themes_names <- c("Statistical basis and overviews 00", "Population 01", "Territory and environment 02", "Work and income 03", "National economy 04", "Prices 05", "Industry and services 06", "Agriculture and forestry 07", "Energy 08", "Construction and housing 09", "Tourism 10", "Mobility and transport 11", "Money, banks and insurance 12", "Social security 13", "Health 14", "Education and science 15", "Culture, media, information society, sports 16", "Politics 17", "General Government and finance 18", "Crime and criminal justice 19", "Economic and social situation of the population 20", "Sustainable development, regional and international disparities 21")
themes_prodima <- c(900001, 900010, 900035, 900051, 900075, 900084, 900092, 900104, 900127, 900140, 900160, 900169, 900191, 900198, 900210, 900212, 900214, 900226, 900239, 900257, 900269, 900276)
# query by prodima (theme) because RSS feed limitation to 350 entities, see issue #5
if (is.null(prodima)) {
prodima <- themes_prodima
} else {
prodima <- prodima
}
# # TODO: allow multiple elements queries for each argument
#
# queries <- list(
# prodima = prodima,
# language = language,
# skipLimit = skip_limit,
# institution = institution,
# geography = geography,
# inquiry = inquiry,
# publishingyearstart = publishing_year_start,
# publishingyearend = publishing_year_end,
# title = title,
# orderNr = order_nr
# )
#
# # test if multiple elements in arguments
# lengths_rss_queries <- lengths(queries)
# final list for querying with a loop ---------------------------------------
# get lenght prodima to create list to loop on
length_prodima <- length(prodima)
rss_queries <- list(
prodima = prodima,
language = rep(language, length_prodima),
skipLimit = rep(skip_limit, length_prodima),
institution = rep(institution, length_prodima),
geography = rep(geography, length_prodima),
inquiry = rep(inquiry, length_prodima),
publishingyearstart = rep(publishing_year_start, length_prodima),
publishingyearend = rep(publishing_year_end, length_prodima),
title = rep(title, length_prodima),
orderNr = rep(order_nr, length_prodima)
)
get_rss_feed_data <- function(language, skipLimit, prodima, institution, geography, inquiry, publishingyearstart, publishingyearend, title, orderNr) {
feed <- paste0("https://www.bfs.admin.ch/bfs/", language, "/home/statistiken/kataloge-datenbanken/tabellen/_jcr_content/par/ws_catalog.rss.xml?skipLimit=", skipLimit, "&prodima=", prodima, "&institution=", institution, "&geography=", geography, "&inquiry=", inquiry, "&publishingyearstart=", publishingyearstart, "&publishingyearend=", publishingyearend, "&title=", title, "&orderNr=", orderNr)
df_feed <- suppressMessages(tidyRSS::tidyfeed(feed = feed))
colnames(df_feed) <- gsub("feed_", "", colnames(df_feed)) # cleaning
colnames(df_feed) <- gsub("item_", "", colnames(df_feed)) # cleaning
df_feed <- janitor::clean_names(df_feed, "small_camel") # cleaning
base_url_bfs <- paste0("https://www.bfs.admin.ch/content/bfs/", language, "/home/statistiken/kataloge-datenbanken/tabellen.assetdetail.")
base_url_table <- "https://www.bfs.admin.ch/bfsstatic/dam/assets/"
if (any("title_2" == names(df_feed))) df_feed$title <- df_feed$title_2
if (any("link_2" == names(df_feed))) df_feed$url_bfs <- df_feed$link_2
if (any("pubDate_2" == names(df_feed))) df_feed$publication_date <- df_feed$pubDate_2
if (any("pubDate" == names(df_feed))) df_feed$catalog_date <- df_feed$pubDate
df_feed$url_table <- gsub(base_url_bfs, base_url_table, df_feed$url_bfs)
df_feed$url_table <- gsub(".html$", "/master", df_feed$url_table)
# get `number_asset`
df_feed$number_asset <- gsub(pattern = "\\D", replacement = "", x = df_feed$guid)
df_feed$number_asset <- as.numeric(df_feed$number_asset)
# select variables
vars <- c("title", "language", "publication_date", "number_asset", "url_bfs", "url_table", "catalog_date")
df_feed[vars]
}
df <- purrr::pmap_dfr(rss_queries, purrr::possibly(get_rss_feed_data, otherwise = tibble::tibble()), .progress = TRUE)
df2 <- df[!duplicated(df), ] # no duplication
return(df2)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_catalog_tables.R
|
#' Get BFS data in a given language
#'
#' Get a dataset using the PXWEB API v1.
#' You should choose either the BFS number (FSO number) of the BFS offical url
#' of a given dataset. You can query particulary variables using the `query` argument.
#'
#' @param number_bfs The BFS number (FSO number) of a dataset.
#' @param language Language of the dataset to be translated if exists, i.e. "de", "fr", "it" or "en".
#' @param query A list with named values, a json query file or json query string using \code{pxweb::pxweb_query()}.
#' @param column_name_type Column name type as "text" or as "code".
#' @param variable_value_type Variable value type as "text" or as "code".
#' @param clean_names Clean column names using \code{janitor::clean_names()}.
#' @param delay Integer Number of seconds to wait before query using \code{Sys.sleep()}.
#'
#' @importFrom magrittr %>%
#'
#' @seealso \code{\link{bfs_get_data_comments}}
#'
#' @return A tbl_df (a type of data frame; see tibble or
#' dplyr packages). Returns NULL if no connection.
#'
#' @export
bfs_get_data <- function(number_bfs, language = "de", query = NULL, column_name_type = "text", variable_value_type = "text", clean_names = FALSE, delay = NULL) {
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
pxweb_api_url <- paste0("https://www.pxweb.bfs.admin.ch/api/v1/", language, "/", number_bfs, "/", number_bfs, ".px")
# if too many requests HTTP 429
df_json <- httr2::request("https://www.pxweb.bfs.admin.ch/api/v1") %>%
httr2::req_url_path_append(paste0(language, "/", number_bfs, "/", number_bfs, ".px")) %>%
httr2::req_retry(max_tries = 2, max_seconds = 10) %>%
httr2::req_perform() %>%
httr2::resp_body_json(simplifyVector = TRUE)
if(!is.null(delay)) {
Sys.sleep(delay) # waiting time in seconds before query
}
if (is.null(query)) {
variables <- df_json$variables$code
values <- df_json$variables$values
df <- rbind(rep("*", length(values)))
names(df) <- variables
dims <- as.list(df)
pxq <- pxweb::pxweb_query(dims)
} else {
if (!is.list(query)) {
variables <- paste(df_json$variables$code, collapse = ", ")
stop(paste0("`query` should be a list using the variables: ", variables, "."), call. = FALSE)
}
dims <- query
pxq <- pxweb::pxweb_query(dims)
}
df_pxweb <- pxweb::pxweb_get_data(url = pxweb_api_url, query = pxq, column.name.type = column_name_type, variable.value.type = variable_value_type)
tbl <- tibble::as_tibble(df_pxweb, .name_repair = "minimal")
if (clean_names) {
tbl <- janitor::clean_names(tbl)
}
return(tbl)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_data.R
|
#' Get the comments/footnotes of a BFS dataset in a given language
#'
#' Get the comments/footnotes of a BFS dataset using PXWEB BFS API v1.
#'
#' @param number_bfs The BFS number of a dataset.
#' @param language Language of the dataset to be translated if exists, i.e. "de", "fr", "it" or "en".
#' @param query a list with named values, a json query file or json query string using \code{pxweb::pxweb_query()}.
#' @param clean_names Clean column names using \code{janitor::clean_names()}
#' @param delay Integer Number of seconds to wait before query using \code{Sys.sleep()}.
#'
#' @seealso \code{\link{bfs_get_data}}
#'
#' @return A tbl_df (a type of data frame; see tibble or
#' dplyr packages). Returns NULL if no connection.
#'
#' @importFrom magrittr %>%
#'
#' @export
bfs_get_data_comments <- function(number_bfs, language = "de", query = NULL, clean_names = FALSE, delay = NULL) {
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
pxweb_api_url <- paste0("https://www.pxweb.bfs.admin.ch/api/v1/", language, "/", number_bfs, "/", number_bfs, ".px")
# check if too many requests HTTP 429
df_json <- httr2::request("https://www.pxweb.bfs.admin.ch/api/v1") %>%
httr2::req_url_path_append(paste0(language, "/", number_bfs, "/", number_bfs, ".px")) %>%
httr2::req_retry(max_tries = 2, max_seconds = 10) %>%
httr2::req_perform() %>%
httr2::resp_body_json(simplifyVector = TRUE)
if(!is.null(delay)) {
Sys.sleep(delay) # waiting time in seconds before query
}
if (is.null(query)) {
variables <- df_json$variables$code
values <- df_json$variables$values
df <- rbind(rep("*", length(values)))
names(df) <- variables
dims <- as.list(df)
pxq <- pxweb::pxweb_query(dims)
} else {
if (!is.list(query)) {
variables <- paste(df_json$variables$code, collapse = ", ")
stop(paste0("`query` should be a list using the variables: ", variables, "."), call. = FALSE)
}
dims <- query
pxq <- pxweb::pxweb_query(dims)
}
df_pxweb <- pxweb::pxweb_get(url = pxweb_api_url, query = pxq)
comments <- pxweb::pxweb_data_comments(df_pxweb)
df_comments <- as.data.frame(comments)
tbl <- tibble::as_tibble(df_comments)
if (clean_names) {
tbl <- janitor::clean_names(tbl)
}
return(tbl)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_data_comments.R
|
#' Get metadata of a BFS data in a given language
#'
#' Get the metadata of a BFS dataset using the PXWEB API v1.
#' You should choose either the bfs number of the bfs offical url of a given dataset.
#'
#' @param number_bfs The BFS number of a dataset.
#' @param language Language of the dataset to be translated if exists.
#'
#' @return A tbl_df (a type of data frame; see tibble or dplyr packages). Returns NULL if no connection.
#'
#' @importFrom magrittr %>%
#' @importFrom tibble as_tibble
#'
#' @export
bfs_get_metadata <- function(number_bfs, language = "de") {
language <- match.arg(arg = language, choices = c("de", "fr", "it", "en"))
# fail gracefully if no internet connection
if (!curl::has_internet()) {
message("No internet connection")
return(NULL)
}
# if too many requests HTTP 429
df <- httr2::request("https://www.pxweb.bfs.admin.ch/api/v1") %>%
httr2::req_url_path_append(paste0(language, "/", number_bfs, "/", number_bfs, ".px")) %>%
httr2::req_retry(max_tries = 2, max_seconds = 10) %>%
httr2::req_perform() %>%
httr2::resp_body_json(simplifyVector = TRUE)
df2 <- df$variables
df2$title <- df$title
tibble::as_tibble(df2)
}
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/bfs_get_metadata.R
|
#' Swiss official commune register GDE
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_gde`
#' A data frame with 2,136 rows and 8 columns:
#' \describe{
#' \item{GDEKT}{Kantonskuerzel}
#' \item{GDEBZNR}{Bezirksnummer}
#' \item{GDENR}{BFS-Gemeindenummer}
#' \item{GDENAME}{Amtlicher Gemeindename}
#' \item{GDENAMK}{Gemeindename, kurz}
#' \item{GDEBZNA}{Bezirksname}
#' \item{GDEKTNA}{Kantonsname}
#' \item{GDEMUTDAT}{Datum der letzten Anderung}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_gde"
#' Swiss official commune register GDE Other
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_gde_other`
#' A data frame with 3 rows and 4 columns:
#' \describe{
#' \item{GDENR}{BFS-Gemeindenummer}
#' \item{GDENAME}{Amtlicher Gemeindename}
#' \item{KTNR}{Kantonsnummer}
#' \item{GDEBZNR}{Bezirksnummer}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_gde_other"
#' Swiss official commune register BZN
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_bzn`
#' A data frame with 143 rows and 3 columns:
#' \describe{
#' \item{GDEKT}{Kantonskuerzel}
#' \item{GDEBZNR}{Bezirksnummer}
#' \item{GDEBZNA}{Bezirksname}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_bzn"
#' Swiss official commune register KT
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_kt`
#' A data frame with 26 rows and 3 columns:
#' \describe{
#' \item{KTNR}{Kantonsnummer}
#' \item{GDEKT}{Kantonskuerzel}
#' \item{GDEKTNA}{Kantonsname}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_kt"
#' Swiss official commune register KT Seeanteile
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_kt_seeanteile`
#' A data frame with 9 rows and 5 columns:
#' \describe{
#' \item{GDENR}{BFS-Gemeindenummer}
#' \item{GDENAME}{Amtlicher Gemeindename}
#' \item{KTNR}{Kantonsnummer}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_kt_seeanteile"
#' Swiss official commune register Dictionary
#'
#' The official commune register is structured according to cantons and
#' districts or comparable administrative entities. Various federal, cantonal
#' and communal governments and private businesses use this register when
#' identifying and referring to communes.
#'
#' The Federal Statistical Office assigns a number to each commune and creates,
#' administers and publishes the Swiss official commune register.
#'
#' @format ## `register_dic`
#' A data frame with 27 rows and 5 columns:
#' \describe{
#' \item{language}{Language}
#' \item{abbreviation}{Abbreviation}
#' \item{title}{Title}
#' \item{spec}{Specifications}
#' \item{notes}{General notes}
#' }
#' @source <https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.html>
"register_dic"
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/data.R
|
utils::globalVariables(
c(
"url_px",
"published",
"title",
"url_bfs",
"type",
"href",
"collection_id"
)
)
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/globals.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFS/R/utils-pipe.R
|
#BF method for coeftest class objects
#' @method BF coeftest
#' @export
BF.coeftest <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
Sigma <- diag(x[, 2L]^2)
n <- attr(x, "nobs")
if(is.null(n)) stop("'BF.coeftest' only works if 'nobs.coeftest' gives the number of observations.")
if(!is.null(hypothesis)) warning("constrained hypothesis testing is not supported for objects of class 'coeftest'")
if(!is.null(prior.hyp)) warning("prior specification via 'prior.hyp' is not supported for objects of class 'coeftest'")
#if(!exploratory) stop("only exploratory hypothesis testing is supported for objects of class 'coeftest'")
out <- BF.default(x[, 1L], Sigma = Sigma, n = n, ...)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.coeftest.R
|
#BF method for coxph class objects
#' @method BF coxph
#' @export
BF.coxph <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
#Extract summary statistics
Args <- as.list(match.call()[-1])
get_est <- get_estimates(x)
Args$x <- get_est$estimate
Args$Sigma <- get_est$Sigma[[1]]
Args$n <- x$nevent
Args$hypothesis <- hypothesis
Args$prior.hyp <- prior.hyp
Args$complement <- complement
out <- do.call(BF, Args)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.coxph.R
|
#BF method for ergm class
#' @importFrom sandwich sandwich
#' @importFrom ergm ergmMPLE
#' @importFrom stats as.formula
#' @method BF ergm
#' @export
BF.ergm <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
# extract coefficients
estimate <- coef(x)
K1 <- length(estimate)
# get design matrix of pseudo likelihood to construct prior covariance matrix
nw <- x$network
form.char <- paste(format(x$formula), collapse = '')
location_tilde <- regexpr("~",form.char)[1]
form.new <- as.formula(paste0("nw ~",substr(form.char,start=location_tilde+1,stop=nchar(form.char))))
x_MPLE <- ergmMPLE(form.new,output="dyadlist")
design.X <- x_MPLE$predictor[,2+1:K1]
which.edges <- which(colnames(design.X)=="edges")
if(length(which.edges)==0){ #no intercept 'edges'
Xdelta <- as.matrix(design.X)
priorcov <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta)
}else{
Xdelta <- as.matrix(design.X[,-which.edges])
priorcov.Xdelta <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta)
priorcov <- matrix(0,ncol=K1,nrow=K1)
priorcov[which.edges,which.edges] <- 100000 #flat prior for the 'edges' parameter
if(which.edges==1){
priorcov[2:K1,2:K1] <- priorcov.Xdelta
}else{
if(which.edges==K1){
priorcov[1:(K1-1),1:(K1-1)] <- priorcov.Xdelta
}else{
priorcov[1:(which.edges-1),1:(which.edges-1)] <- priorcov.Xdelta[1:(which.edges-1),1:(which.edges-1)]
priorcov[(which.edges+1):K1,(which.edges+1):K1] <- priorcov.Xdelta[which.edges:(K1-1),which.edges:(K1-1)]
priorcov[1:(which.edges-1),(which.edges+1):K1] <- priorcov.Xdelta[1:(which.edges-1),which.edges:(K1-1)]
priorcov[(which.edges+1):K1,1:(which.edges-1)] <- t(priorcov[1:(which.edges-1),(which.edges+1):K1])
}
}
}
Bergm.out <- Bergm::bergm(form.new,prior.mean=rep(0,K1),prior.sigma=priorcov,...)
#get robust estimates for the Gaussian mean and covariance matrix
post.mean <- apply(Bergm.out$Theta,2,median)
names(post.mean) <- names(estimate)
#get robust estimate of posterior covariance matrix
mlm1 <- lm(Bergm.out$Theta ~ 1)
post.Sigma <- sandwich(mlm1) * nrow(Bergm.out$Theta)
# use Savage-Dickey approximation of the BF
if(length(which.edges)==0){
prior.mean = rep(0,K1)
names(prior.mean) <- names(estimate)
row.names(priorcov) <- colnames(priorcov) <- names(estimate)
BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean,
prior.sigma = priorcov,
post.mean = post.mean,
post.sigma = post.Sigma,
hypothesis = hypothesis,
prior.hyp = prior.hyp,
complement = complement)
}else{
prior.mean = rep(0,K1)
names(prior.mean) <- names(estimate)
row.names(priorcov) <- colnames(priorcov) <- names(estimate)
BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean[-which.edges],
prior.sigma = priorcov[-which.edges,-which.edges],
post.mean = post.mean[-which.edges],
post.sigma = post.Sigma[-which.edges,-which.edges],
hypothesis = hypothesis,
prior.hyp = prior.hyp,
complement = complement)
}
postestimates <- cbind(apply(Bergm.out$Theta,2,mean),
apply(Bergm.out$Theta,2,median),
apply(Bergm.out$Theta,2,quantile,.025),
apply(Bergm.out$Theta,2,quantile,.975))
rownames(postestimates) <- names(estimate)
colnames(postestimates) <- c("mean","median","2.5%","97.5%")
BFergm_out$estimates <- postestimates
BFergm_out$model <- x
BFergm_out$call <- match.call()
BFergm_out$bayesfactor <- "Bayes factors based on unit-information priors and Gaussian approximations"
BFergm_out$parameter <- "ERGM coefficients"
BFergm_out$model_update <- Bergm.out
BFergm_out$prior.parameters <- list(prior.mean=prior.mean,prior.cov=priorcov)
return(BFergm_out)
}
#' @method get_estimates ergm
#' @export
get_estimates.ergm <- function(x, ...){
nw <- x$network
form.char <- paste(format(x$formula), collapse = '')
location_tilde <- regexpr("~",form.char)[1]
form.new <- as.formula(paste0("nw ~",substr(form.char,start=location_tilde+1,stop=nchar(form.char))))
estimate <- coef(x)
K1 <- length(estimate)
x_MPLE <- ergmMPLE(form.new,output="dyadlist")
design.X <- x_MPLE$predictor[,2+1:K1]
which.edges <- which(colnames(design.X)=="edges")
out <- list()
if(length(which.edges)==0){ #no intercept 'edges'
out$estimate <- coef(x)
out$Sigma <- list(vcov(x))
}else{
out$estimate <- coef(x)[-which.edges]
out$Sigma <- list(as.matrix(vcov(x)[-which.edges,-which.edges]))
}
class(out) <- "model_estimates"
attr(out, "analysisType") <- "ergm"
out
}
#' @method BF bergm
#' @export
BF.bergm <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
form.char <- paste(format(x$formula), collapse = '')
location_tilde <- regexpr("~",form.char)[1]
name.nw <- substr(form.char,start=1,stop=location_tilde-2)
if(!exists(name.nw)){
stop(paste0("For an object of class 'bergm', the function 'BF()' only runs if the network data object '",name.nw,
"' is also present in the environment."))
}
# first check if effect names in hypothesis argument correspond with names in x
coef_names_hyp <- names(get_estimates(x)$estimate)
if(!is.null(hypothesis)){
test0 <- parse_hypothesis(coef_names_hyp,hypothesis)
}
# extract coefficients
estimate <- apply(x$Theta,2,median)
K1 <- length(estimate)
coef_names <- paste0("theta",1:K1)
# get design matrix of pseudo likelihood to construct prior covariance matrix
x_MPLE <- ergmMPLE(formula=x$formula,output="dyadlist")
design.X <- x_MPLE$predictor[,2+1:K1]
which.edges <- which(colnames(design.X)=="edges")
if(length(which.edges)==0){ #no intercept 'edges'
Xdelta <- as.matrix(design.X)
priorcov <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta)
}else{
Xdelta <- as.matrix(design.X[,-which.edges])
priorcov.Xdelta <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta)
priorcov <- matrix(0,ncol=K1,nrow=K1)
priorcov[which.edges,which.edges] <- 100000 #flat prior for the 'edges' parameter
if(which.edges==1){
priorcov[2:K1,2:K1] <- priorcov.Xdelta
}else{
if(which.edges==K1){
priorcov[1:(K1-1),1:(K1-1)] <- priorcov.Xdelta
}else{
priorcov[1:(which.edges-1),1:(which.edges-1)] <- priorcov.Xdelta[1:(which.edges-1),1:(which.edges-1)]
priorcov[(which.edges+1):K1,(which.edges+1):K1] <- priorcov.Xdelta[which.edges:(K1-1),which.edges:(K1-1)]
priorcov[1:(which.edges-1),(which.edges+1):K1] <- priorcov.Xdelta[1:(which.edges-1),which.edges:(K1-1)]
priorcov[(which.edges+1):K1,1:(which.edges-1)] <- t(priorcov[1:(which.edges-1),(which.edges+1):K1])
}
}
}
Bergm.out <- Bergm::bergm(x$formula,prior.mean=rep(0,K1),prior.sigma=priorcov,...)
#get robust estimates for the Gaussian mean and covariance matrix
post.mean <- apply(Bergm.out$Theta,2,median)
names(post.mean) <- paste0("theta",1:K1)
#get robust estimate of posterior covariance matrix
mlm1 <- lm(Bergm.out$Theta ~ 1)
post.Sigma <- sandwich(mlm1) * nrow(Bergm.out$Theta)
# use Savage-Dickey approximation of the BF
if(length(which.edges)==0){
prior.mean = rep(0,K1)
names(prior.mean) <- coef_names
row.names(priorcov) <- colnames(priorcov) <- coef_names
BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean,
prior.sigma = priorcov,
post.mean = post.mean,
post.sigma = post.Sigma,
hypothesis = hypothesis,
prior.hyp = prior.hyp,
complement = complement)
}else{
prior.mean = rep(0,K1)
names(prior.mean) <- coef_names
row.names(priorcov) <- colnames(priorcov) <- coef_names
BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean[-which.edges],
prior.sigma = priorcov[-which.edges,-which.edges],
post.mean = post.mean[-which.edges],
post.sigma = post.Sigma[-which.edges,-which.edges],
hypothesis = hypothesis,
prior.hyp = prior.hyp,
complement = complement)
}
postestimates <- cbind(apply(Bergm.out$Theta,2,mean),
apply(Bergm.out$Theta,2,median),
apply(Bergm.out$Theta,2,quantile,.025),
apply(Bergm.out$Theta,2,quantile,.975))
rownames(postestimates) <- names(estimate)
colnames(postestimates) <- c("mean","median","2.5%","97.5%")
BFergm_out$estimates <- postestimates
BFergm_out$model <- x
BFergm_out$call <- match.call()
BFergm_out$bayesfactor <- "Bayes factors based on unit-information priors and Gaussian approximations"
BFergm_out$parameter <- "ERGM coefficients"
BFergm_out$model_update <- Bergm.out
BFergm_out$prior.parameters <- list(prior.mean=prior.mean,prior.cov=priorcov)
return(BFergm_out)
}
#' @method get_estimates bergm
#' @export
get_estimates.bergm <- function(x, ...){
form.char <- paste(format(x$formula), collapse = '')
location_tilde <- regexpr("~",form.char)[1]
name.nw <- substr(form.char,start=1,stop=location_tilde-2)
if(!exists(name.nw)){
stop(paste0("For an object of class 'bergm', the function 'BF()' only runs if the network data object '",name.nw,
"' is also present in the environment."))
}
K1 <- length(apply(x$Theta,2,median))
names.bergm.coef <- paste0("theta",1:K1)
x_MPLE <- ergmMPLE(formula=x$formula,output="dyadlist")
design.X <- x_MPLE$predictor[,2+1:K1]
which.edges <- which(colnames(design.X)=="edges")
out <- list()
if(length(which.edges)==0){ #no intercept 'edges'
out$estimate <- apply(x$Theta,2,median)
names(out$estimate) <- names.bergm.coef
mlm1 <- lm(x$Theta ~ 1)
out$Sigma <- list(sandwich(mlm1) * nrow(x$Theta))
colnames(out$Sigma[[1]]) <- row.names(out$Sigma[[1]]) <- names(out$estimate)
}else{
out$estimate <- apply(x$Theta,2,median)
names(out$estimate) <- names.bergm.coef
out$estimate <- out$estimate[-which.edges]
mlm1 <- lm(x$Theta ~ 1)
out$Sigma <- list(sandwich(mlm1)[-which.edges,-which.edges] * nrow(x$Theta))
colnames(out$Sigma[[1]]) <- row.names(out$Sigma[[1]]) <- names(out$estimate)
}
class(out) <- "model_estimates"
attr(out, "analysisType") <- "ergm"
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.ergm.R
|
#
#' @importFrom stats qnorm dnorm pnorm
#' @describeIn BF S3 method for a named vector 'x'
#' @method BF default
#' @export
BF.default <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
Sigma,
n,
...){
#Input is a named mean vector x, covariance matrix and number of observations
#These are extracted by the relevant method functions from a model object and
#passed together with the hypothesis and prior to the Gaussian_estimator
# use Savage-Dickey approximation of the BF
BF_out <- Savage.Dickey.Gaussian(prior.mean = rep(0, length(x)),
prior.sigma = Sigma * n,
post.mean = x,
post.sigma = Sigma,
hypothesis = hypothesis,
prior.hyp = prior.hyp,
complement = complement)
BF_out$model <- x
BF_out$call <- match.call()
BF_out$bayesfactor <- "adjusted fractional Bayes factors using Gaussian approximations"
BF_out$parameter <- "general parameters"
BF_out
}
# extended Savage-Dickey density ratio for multivariate normal prior and posterior
Savage.Dickey.Gaussian <- function(prior.mean,
prior.sigma,
post.mean,
post.sigma,
hypothesis,
prior.hyp,
complement){
#prior.mean is a normal prior mean of key parameters
#prior.sigma is a normal prior covariance matrix of key parameters
#post.mean is a normal posterior mean of key parameters
#post.sigma is a normal posterior covariance matrix of key parameters
#These are passed together with the hypothesis and prior to the Gaussian_estimator
#the prior of the nuisance parameters under the constrained models is a conditional version of the full prior
meanN <- post.mean #the posterior mean is approximated with the estimate
covmN <- post.sigma #the posterior covariance matrix is approximated with the error covariance matrix
names_coef <- names(meanN)
covm0 <- prior.sigma
mean0 <- prior.mean # for constrained testing prior mean is relocated to 'boundary of constrained space'
# compute exploratory BFs for each parameter
relfit <- matrix(c(dnorm(0,mean=meanN,sd=sqrt(diag(covmN))),
pnorm(0,mean=meanN,sd=sqrt(diag(covmN))),
1-pnorm(0,mean=meanN,sd=sqrt(diag(covmN)))),ncol=3)
relcomp <- matrix(c(dnorm(0,mean=mean0,sd=sqrt(diag(covm0))),
pnorm(0,mean=mean0,sd=sqrt(diag(covm0))),
1-pnorm(0,mean=mean0,sd=sqrt(diag(covm0)))),ncol=3)
BFtu_exploratory <- relfit / relcomp
PHP_exploratory <- round(BFtu_exploratory / apply(BFtu_exploratory,1,sum),3)
colnames(PHP_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
row.names(PHP_exploratory) <- names_coef
# compute posterior estimates
postestimates <- cbind(meanN,meanN,
t(matrix(unlist(lapply(1:length(meanN),function(coef){
ub <- qnorm(p=.975)*sqrt(covmN[coef,coef])+meanN[coef]
lb <- qnorm(p=.025)*sqrt(covmN[coef,coef])+meanN[coef]
return(c(lb,ub))
})),nrow=2))
)
row.names(postestimates) <- names_coef
colnames(postestimates) <- c("mean","median","2.5%","97.5%")
if(is.null(hypothesis)){
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relfit <-
relcomp <- BFtable <- hypotheses <- priorprobs <- NULL
}else{
# confirmatory tests based on input constraints
parse_hyp <- parse_hypothesis(names_coef,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
#create coefficient with equality and order constraints
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
# RrStack is used to check conflicting constraints, and for the default prior location
if(length(RrE)==1){
RrStack <- rbind(do.call(rbind,RrE),do.call(rbind,RrO))
RrStack <- interval_RrStack(RrStack)
}else{
RrStack_list <- lapply(1:length(RrE),function(h){
interval_RrStack(rbind(RrE[[h]],RrO[[h]]))
})
RrStack <- do.call(rbind,RrStack_list)
}
K <- length(meanN)
if(nrow(RrStack)>1){
RStack <- RrStack[,-(K+1)]
rStack <- RrStack[,(K+1)]
}else{
RStack <- matrix(RrStack[,-(K+1)],nrow=1)
rStack <- RrStack[,(K+1)]
}
# check if a common boundary exists for prior location under all constrained hypotheses
if(nrow(RrStack) > 1){
rref_ei <- rref(RrStack)
nonzero <- rref_ei[,K+1]!=0
if(max(nonzero)>0){
row1 <- max(which(nonzero))
if(sum(abs(rref_ei[row1,1:K]))==0){
stop("No common boundary point for prior location. Conflicting constraints.")
}
}
#determine fraction via number of independent rows (constraints)
if(is.matrix(rref_ei[,-(K+1)])){
numindep <- sum(apply(abs(rref_ei[,-(K+1)]),1,sum)!=0)
}else{
numindep <- sum(apply(abs(as.matrix(rref_ei[,-(K+1)])),1,sum)!=0)
}
} else {
numindep <- 1
}
#get relative fit and complexity of hypotheses
numhyp <- length(RrE)
relcomp <- t(matrix(unlist(lapply(1:numhyp,function(h){
Gaussian_measures(mean1 = mean0, Sigma1 = covm0, RrE1 = RrE[[h]], RrO1 = RrO[[h]],
names1=names_coef,constraints1=parse_hyp$original_hypothesis[h])
})),nrow=2))
relfit <- t(matrix(unlist(lapply(1:numhyp,function(h){
Gaussian_measures(mean1 = meanN, Sigma1 = covmN, RrE1 = RrE[[h]], RrO1 = RrO[[h]],
names1=names_coef,constraints1=parse_hyp$original_hypothesis[h])
})),nrow=2))
row.names(relfit) <- row.names(relcomp) <- parse_hyp$original_hypothesis
if(complement == TRUE){
# get relative fit and complexity of complement hypothesis
relcomp <- Gaussian_prob_Hc(mean1 = mean0, Sigma1 = covm0, relmeas = relcomp, RrO = RrO) #Note that input is a bit strange here, Gaussian_prob_Hc needs fixing
relfit <- Gaussian_prob_Hc(mean1 = meanN, Sigma1 = covmN, relmeas = relfit, RrO = RrO)
}
hypothesisshort <- unlist(lapply(1:nrow(relfit),function(h) paste0("H",as.character(h))))
row.names(relfit) <- row.names(relfit) <- hypothesisshort
colnames(relcomp) <- c("c_E", "c_0")
colnames(relfit) <- c("f_E", "f_0")
# the BF for the complement hypothesis vs Hu needs to be computed.
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
names(priorprobs) <- hypothesisshort
PHP_confirmatory <- round(BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs),3)
BFtable <- cbind(relcomp,relfit,relfit[,1]/relcomp[,1],relfit[,2]/relcomp[,2],
apply(relfit,1,prod)/apply(relcomp,1,prod),PHP_confirmatory)
row.names(BFtable) <- names(PHP_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
# row.names(BFmatrix_confirmatory) <- Hnames
# colnames(BFmatrix_confirmatory) <- Hnames
if(nrow(relfit)==length(parse_hyp$original_hypothesis)){
hypotheses <- parse_hyp$original_hypothesis
}else{
hypotheses <- c(parse_hyp$original_hypothesis,"complement")
}
}
BF_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=postestimates,
model=NULL,
bayesfactor="Bayes factors using Gaussian approximations",
parameter="general parameters",
call=NULL)
class(BF_out) <- "BF"
BF_out
}
# compute relative meausures (fit or complexity) under a multivariate Gaussian distribution
#' @importFrom mvtnorm dmvnorm pmvnorm
Gaussian_measures <- function(mean1,Sigma1,n1=0,RrE1,RrO1,names1=NULL,constraints1=NULL){
K <- length(mean1)
relE <- relO <- 1
if(!is.null(RrE1) && is.null(RrO1)){ #only equality constraints
RE1 <- RrE1[,-(K+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K)
}
rE1 <- RrE1[,(K+1)]
qE1 <- nrow(RE1)
meanE <- RE1%*%mean1
SigmaE <- RE1%*%Sigma1%*%t(RE1)
relE <- dmvnorm(rE1,mean=c(meanE),sigma=SigmaE,log=FALSE)
}
if(is.null(RrE1) && !is.null(RrO1)){ #only order constraints
RO1 <- RrO1[,-(K+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K+1)]
if(Rank(RO1)==nrow(RO1)){ #RO1 is of full row rank. So use transformation.
meanO <- c(RO1%*%mean1)
SigmaO <- RO1%*%Sigma1%*%t(RO1)
check_vcov(SigmaO)
relO <- pmvnorm(lower=rO1,upper=Inf,mean=meanO,sigma=SigmaO)[1]
}else{ #no linear transformation can be used; pmvt cannot be used. Use bain with a multivariate normal approximation
names(mean1) <- names1
if(n1>0){ # we need prior measures
mean1vec <- c(mean1)
names(mean1vec) <- names1
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=Sigma1,n=n1)
relO <- bain_res$fit[1,4]
}else { # we need posterior measures (there is very little information)
mean1vec <- c(mean1)
names(mean1vec) <- names1
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=Sigma1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}
}
}
if(!is.null(RrE1) && !is.null(RrO1)){ #hypothesis with equality and order constraints
RE1 <- RrE1[,-(K+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K)
}
rE1 <- RrE1[,(K+1)]
qE1 <- nrow(RE1)
RO1 <- RrO1[,-(K+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K+1)]
Rr1 <- rbind(RrE1,RrO1)
if(Rank(Rr1) == nrow(Rr1)){
R1 <- rbind(RE1,RO1)
#b)
Tmean1 <- R1 %*% mean1
TSigma1 <- R1 %*% Sigma1 %*% t(R1)
# relative meausure for equalities
relE <- dmvnorm(x=rE1,mean=Tmean1[1:qE1],sigma=matrix(TSigma1[1:qE1,1:qE1],ncol=qE1),log=FALSE)
# Partitioning equality part and order part
Tmean1E <- Tmean1[1:qE1]
Tmean1O <- Tmean1[qE1+1:qO1]
TSigma1EE <- TSigma1[1:qE1,1:qE1]
TSigma1OE <- matrix(c(TSigma1[qE1+1:qO1,1:qE1]),nrow=qO1)
TSigma1OO <- TSigma1[qE1+1:qO1,qE1+1:qO1]
#conditional location and covariance matrix
Tmean1OgE <- Tmean1O + TSigma1OE %*% solve(TSigma1EE) %*% (rE1-Tmean1E)
TSigma1OgE <- TSigma1OO - TSigma1OE %*% solve(TSigma1EE) %*% t(TSigma1OE)
relO <- pmvnorm(lower=rO1,upper=Inf,mean=c(Tmean1OgE),sigma=TSigma1OgE)[1]
}else{ #use bain for the computation of the probability
names(mean1) <- names1
if(n1>0){ # we need prior measures
bain_res <- bain(x=c(mean1),hypothesis=constraints1,Sigma=Sigma1,n=n1)
relO <- bain_res$fit[1,4]
relE <- bain_res$fit[1,2]
}else { # we need posterior measures (there is very little information)
bain_res <- bain(x=c(mean1),hypothesis=constraints1,Sigma=Sigma1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
relE <- bain_res$fit[1,1]
}
}
}
return(c(relE,relO))
}
# The function computes the probability of an unconstrained draw falling in the complement subspace under a multivariate Gaussian distribution.
#' @importFrom mvtnorm rmvnorm
Gaussian_prob_Hc <- function(mean1,Sigma1,relmeas,RrO){
numpara <- length(mean1)
numhyp <- nrow(relmeas)
which_eq <- relmeas[,1] != 1
if(sum(which_eq)==numhyp){ # Then the complement is equivalent to the unconstrained hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So there is at least one hypothesis with only order constraints
welk <- which(!which_eq)
if(length(welk)==1){ # There is one hypothesis with only order constraints. Hc is complement of this hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - relmeas[welk,2]
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So more than one hypothesis with only order constraints
# First we check whether ther is an overlap between the order constrained spaces.
draws2 <- 1e4
randomDraws <- rmvnorm(draws2,mean=rep(0,numpara),sigma=diag(numpara))
#get draws that satisfy the constraints of the separate order constrained hypotheses
checksOC <- lapply(welk,function(h){
Rorder <- as.matrix(RrO[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,draws2)%*%t(rorder),1,prod)
})
checkOCplus <- Reduce("+",checksOC)
if(sum(checkOCplus > 0) < draws2){ #then the joint order constrained hypotheses do not completely cover the parameter space.
if(sum(checkOCplus>1)==0){ # then order constrained spaces are nonoverlapping
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - sum(relmeas[welk,2])
rownames(relmeas)[numhyp+1] <- "complement"
}else{ #the order constrained subspaces at least partly overlap
# funtion below gives a rough estimate of the posterior probability under Hc
# a bain type of algorithm would be better of course. but for now this is ok.
randomDraws <- rmvnorm(draws2,mean=mean1,sigma=Sigma1)
checksOCpost <- lapply(welk,function(h){
Rorder <- as.matrix(RrO[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,draws2)%*%t(rorder),1,prod)
})
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- sum(Reduce("+",checksOCpost) == 0) / draws2
rownames(relmeas)[numhyp+1] <- "complement"
}
}
}
}
return(relmeas)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.gaussian.R
|
#BF method for glm classes
#' @method BF glm
#' @export
BF.glm <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
Args <- as.list(match.call()[-1])
get_est <- get_estimates(x)
Args$x <- get_est$estimate
Args$Sigma <- get_est$Sigma[[1]]
Args$n <- nobs(x)
Args$hypothesis <- hypothesis
Args$prior.hyp <- prior.hyp
Args$complement <- complement
out <- do.call(BF, Args)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.glm.R
|
#' @importFrom pracma rref Rank
#' @importFrom mvtnorm dmvnorm pmvnorm rmvnorm dmvt pmvt rmvt
#' @importFrom stats rWishart qt
#' @importFrom MASS ginv
#' @describeIn BF S3 method for an object of class 'lm'
#' @method BF lm
#' @export
BF.lm <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
BF.type = 2,
...){
if(is.null(BF.type)){
stop("The argument 'BF.type' must be the integer 1 (for the fractional BF) or 2 (for the adjusted fractional BF).")
}
if(!is.null(BF.type)){
if(is.na(BF.type) | (BF.type!=1 & BF.type!=2))
stop("The argument 'BF.type' must be the integer 1 (for the fractional BF) or 2 (for the adjusted fractional BF).")
}
if(BF.type==2){
bayesfactor <- "generalized adjusted fractional Bayes factors"
}else{
bayesfactor <- "generalized fractional Bayes factors"
}
testedparameter <- "regression coefficients"
# default BF on location parameters in a univarite normal linear model
# Note that it is recommended that the fitten model is based on standardized covariates.
x$coefficients <- as.matrix(x$coefficients)
x$residuals <- as.matrix(x$residuals)
P <- ncol(x$residuals)
N <- nrow(x$residuals)
K <- length(x$coefficients)/P # dimension of predictors per dependent variable
dummyX <- rep(F,K)
names(dummyX) <- row.names(x$coefficients)
Xmat <- model.matrix(x)
Ymat <- model.matrix(x)%*%x$coefficients + x$residuals
# Exploratory testing of regression coefficients
if(length(x$xlevels)==0){ #no grouping covariates: 1 group
J <- 1
dummyX <- rep(F,K)
Nj <- nrow(Xmat)
dvec <- rep(1,Nj)
#set minimal fractions for the group
bj <- ((P+K)/J)/Nj
#no dummy covariates for factors
dummy01TRUE <- FALSE
}else{
#check if the dummy group variables have 0 / 1 coding
dummy01TRUE <- prod(unlist(lapply(1:length(x$contrasts),function(fac){
x$contrasts[[fac]] == "contr.treatment"
}))) == 1
if(dummy01TRUE){
numlevels <- unlist(lapply(x$xlevels,length))
mains <- unlist(lapply(1:length(x$xlevels),function(fac){
unlist(lapply(1:length(x$xlevels[[fac]]),function(lev){
paste0(names(x$xlevels)[fac],x$xlevels[[fac]][lev])
}))
}))
intercept <- attr(x$terms,"intercept")==1
names_coef <- row.names(x$coefficients)
# dummyX1 checks which columns of the design matrix X are dummy's for a
# main effect or interaction effect
dummyX1 <- apply(matrix(unlist(lapply(1:length(mains),function(faclev){
unlist(lapply(1:length(names_coef),function(cf){
grepl(mains[faclev],names_coef[cf],fixed=TRUE)
}))
})),nrow=length(names_coef)),1,max)==1
}else{
dummyX1 <- rep(TRUE,K)
}
# dummyX2 checks which columns of the design matrix have two possible outcomes,
# which indicates a dummy variable
dummyX2 <- unlist(lapply(1:K,function(k){
length(table(Xmat[,k])) == 2
}))
# dummyX indicate which columns contain dummy group covariates
dummyX <- dummyX2 * dummyX1 == 1
#number of groups on variations of dummy combinations
groupcode <- as.matrix(unique(Xmat[,dummyX]))
rownames(groupcode) <- unlist(lapply(1:nrow(groupcode),function(r){
paste0("groupcode",r)
}))
J <- nrow(groupcode)
if(J==nrow(Xmat)){
stop("Not enough observations for every group. Try fitting the model without factors.")
}
# group membership of each observation
dvec <- unlist(lapply(1:N,function(i){
which(rowSums(abs(t(matrix(rep(Xmat[i,dummyX],J),ncol=J)) - groupcode))==0)
}))
Nj <- c(table(dvec))
#set minimal fractions for each group
bj <- ((P+K)/J)/Nj
if(max(bj)>1){#then too few observations in certain groups, then use one single minimal fraction
bj <- rep((P+K)/sum(Nj),length=J)
if(bj[1]>1){
stop("Not enough observations to compute a fractional Bayes factor.")
}
}
}
#Compute sufficient statistics for all groups
tXXj <- lapply(1:J,function(j){
if(Nj[j]==1){
Xmat[dvec==j,]%*%t(Xmat[dvec==j,])
}else t(Xmat[dvec==j,])%*%Xmat[dvec==j,]
})
tXXj_b <- lapply(1:J,function(j){
tXXj[[j]]*bj[j]
})
tXYj <- lapply(1:J,function(j){
if(Nj[j]==1){
as.matrix(Xmat[dvec==j,])%*%t(Ymat[dvec==j,])
} else {t(Xmat[dvec==j,])%*%Ymat[dvec==j,]}
})
tXYj_b <- lapply(1:J,function(j){
tXYj[[j]]*bj[j]
})
tYYj <- lapply(1:J,function(j){
if(Nj[j]==1){
Ymat[dvec==j,]%*%t(Ymat[dvec==j,])
}else t(Ymat[dvec==j,])%*%Ymat[dvec==j,]
})
tYYj_b <- lapply(1:J,function(j){
tYYj[[j]]*bj[j]
})
tXX <- Reduce("+",tXXj)
if(min(eigen(tXX)$values)<0){
stop("Model matrix does not seem to be of full row rank.")
}
tXXi <- solve(tXX)
tXY <- Reduce("+",tXYj)
tYY <- Reduce("+",tYYj)
tXX_b <- Reduce("+",tXXj_b)
tXXi_b <- solve(tXX_b)
tXY_b <- Reduce("+",tXYj_b)
tYY_b <- Reduce("+",tYYj_b)
BetaHat <- solve(tXX)%*%tXY # same as x$coefficients
S <- tYY - t(tXY)%*%solve(tXX)%*%tXY # same as sum((x$residuals)**2)
# sufficient statistics based on fraction of the data
BetaHat_b <- solve(tXX_b)%*%tXY_b
S_b <- tYY_b - t(tXY_b)%*%solve(tXX_b)%*%tXY_b
# BF computation for exploratory analysis of separate parameters
if(P==1){
names_coef <- row.names(x$coefficients)
}else{
names_coef1 <- row.names(x$coefficients)
names_coef2 <- colnames(x$coefficients)
names_coef <- unlist(lapply(1:P,function(p){
lapply(1:K,function(k){
paste0(names_coef1[k],"_on_",names_coef2[p])
})
}))
}
# prior hyperparameters
df0 <- 1 # should be the same as sum(rep(bj,times=Nj))-K-P+1
Scale0 <- kronecker(S_b,tXXi_b)
if(BF.type==2){
mean0 <- as.matrix(rep(0,K*P))
}else{
mean0 <- as.matrix(c(BetaHat))
}
# posterior hyperparameters
dfN <- N-K-P+1
ScaleN <- kronecker(S,tXXi)/(N-K-P+1) # off-diagonal elements have no meaning
meanN <- as.matrix(c(BetaHat))
row.names(meanN) <- row.names(mean0) <- names_coef
# Hypotheses for exploratory test
# H0: beta = 0
# H1: beta < 0
# H2: beta > 0
relfit <- t(matrix(unlist(lapply(1:(K*P),function(k){
c(dt((0-meanN[k,1])/sqrt(ScaleN[k,k]),df=dfN)/sqrt(ScaleN[k,k]),
pt((0-meanN[k,1])/sqrt(ScaleN[k,k]),df=dfN,lower.tail = TRUE),
pt((0-meanN[k,1])/sqrt(ScaleN[k,k]),df=dfN,lower.tail = FALSE))
})),nrow=3))
relcomp <- t(matrix(unlist(lapply(1:(K*P),function(k){
c(dt((0-mean0[k,1])/sqrt(Scale0[k,k]),df=df0)/sqrt(Scale0[k,k]),
pt((0-mean0[k,1])/sqrt(Scale0[k,k]),df=df0,lower.tail = TRUE),
pt((0-mean0[k,1])/sqrt(Scale0[k,k]),df=df0,lower.tail = FALSE))
})),nrow=3))
colnames(relfit) <- colnames(relcomp) <- c("p(=0)","Pr(<0)","Pr(>0)")
row.names(relcomp) <- row.names(relfit) <- names_coef
BFtu_exploratory <- relfit / relcomp
colnames(BFtu_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
PHP_exploratory <- BFtu_exploratory /
apply(BFtu_exploratory,1,sum)
#compute estimates
postestimates <- cbind(meanN,meanN,
t(matrix(unlist(lapply(1:length(meanN),function(coef){
ub <- qt(p=.975,df=dfN)*sqrt(ScaleN[coef,coef])+meanN[coef,1]
lb <- qt(p=.025,df=dfN)*sqrt(ScaleN[coef,coef])+meanN[coef,1]
return(c(ub,lb))
})),nrow=2))
)
row.names(postestimates) <- names_coef
colnames(postestimates) <- c("mean","median","2.5%","97.5%")
# Additional exploratory tests of main effects and interaction effects
# in the case of an aov type object
if(sum(class(x)=="aov")==1 & J > 1 & dummy01TRUE){
testedparameter <- "group means"
# check main effects
BFmain <- unlist(lapply(1:length(numlevels),function(fac){
name1 <- names(numlevels[fac])
mains1 <- mains[sum(numlevels[1:fac])-numlevels[fac]+1:numlevels[fac]]
which0 <- unlist(lapply(1:length(colnames(Xmat)),function(col){
sum(colnames(Xmat)[col]==mains1)==1
}))
if(sum(which0) > 0){
if(P > 1){
RrE_f <- matrix(0,nrow=sum(which0),ncol=length(colnames(Xmat)))
for(r1 in 1:sum(which0)){RrE_f[r1,which(which0)[r1]]<-1}
RrE_f <- cbind(kronecker(diag(P),RrE_f),rep(0,sum(which0)*P))
relcomp_f <- MatrixStudent_measures(Mean1=matrix(mean0,ncol=P),Scale1=S_b,tXXi1=tXXi_b,
df1=df0,RrE1=RrE_f,RrO1=NULL,Names1=NULL,constraints1=NULL,
MCdraws=1e4)
relfit_f <- MatrixStudent_measures(Mean1=BetaHat,Scale1=S,tXXi1=tXXi,df1=dfN,RrE1=RrE_f,
RrO1=NULL,Names1=NULL,constraints1=NULL,MCdraws=1e4)
}else{
RrE_f <- matrix(0,nrow=sum(which0),ncol=length(colnames(Xmat))+1)
for(r1 in 1:sum(which0)){RrE_f[r1,which(which0)[r1]]<-1}
relcomp_f <- Student_measures(mean1=mean0,Scale1=Scale0,df1=df0,RrE1=RrE_f,RrO1=NULL)
relfit_f <- Student_measures(mean1=meanN,Scale1=ScaleN,df1=dfN,RrE1=RrE_f,RrO1=NULL)
}
BFtu <- relfit_f[1]/relcomp_f[1]
names(BFtu) <- name1
return(c(BFtu,relfit_f[1],relcomp_f[1]))
}
}))
#compute Bayes factors for testing main effects if present
if(length(BFmain)>0){ # then there are main effects
names_main <- names(BFmain[(0:(length(BFmain)/3-1))*3+1])
BFtu_main <- matrix(c(BFmain[(0:(length(BFmain)/3-1))*3+1],rep(1,length(BFmain)/3)),
nrow=length(BFmain)/3)
row.names(BFtu_main) <- names_main
colnames(BFtu_main) <- c("BFtu","BFuu")
PHP_main <- BFtu_main / apply(BFtu_main,1,sum)
colnames(PHP_main) <- c("Pr(no effect)","Pr(complement)")
}else{ PHP_main <- BFtu_main <- NULL}
#check whether interaction effects are present
prednames <- names(attr(x$term,"dataClasses"))
matcov <- cbind(matrix(unlist(lapply(1:K,function(col){
colx <- colnames(Xmat)[col]
unlist(lapply(1:length(prednames),function(pred){
grepl(prednames[pred],colx)
}))
})),nrow=length(prednames)),rep(F,length(prednames)))
row.names(matcov) <- prednames
colnames(matcov) <- c(colnames(Xmat),"dummy")
BFtu_interaction0 <- list()
count_interaction <- 0
for(c1 in 1:ncol(matcov)){
if(c1 < ncol(matcov)){
numeffects_c <- sum(matcov[,c1])
if(numeffects_c>1){
count_interaction <- count_interaction + 1
interactionset <- names(which(matcov[,c1]))
whichx <- apply(matcov[which(matcov[,c1]),],2,sum)==length(interactionset)
if(P > 1){
RrE_ia <- matrix(0,nrow=sum(whichx),ncol=K)
for(r1 in 1:sum(whichx)){RrE_ia[r1,which(whichx)[r1]]<-1}
RrE_ia <- cbind(kronecker(diag(P),RrE_ia),rep(0,sum(whichx)*P))
relcomp_ia <- MatrixStudent_measures(Mean1=matrix(mean0,ncol=P),Scale1=S_b,tXXi1=tXXi_b,
df1=df0,RrE1=RrE_ia,RrO1=NULL,Names1=NULL,
constraints1=NULL,MCdraws=1e4)
relfit_ia <- MatrixStudent_measures(Mean1=BetaHat,Scale1=S,tXXi1=tXXi,df1=dfN,RrE1=RrE_ia,
RrO1=NULL,Names1=NULL,constraints1=NULL,MCdraws=1e4)
}else{
RrE_ia <- matrix(0,nrow=sum(whichx),ncol=K+1)
for(r1 in 1:sum(whichx)){RrE_ia[r1,which(whichx)[r1]]<-1}
relcomp_ia <- Student_measures(mean1=mean0,Scale1=Scale0,df1=df0,RrE1=RrE_ia,RrO1=NULL)
relfit_ia <- Student_measures(mean1=meanN,Scale1=ScaleN,df1=dfN,RrE1=RrE_ia,RrO1=NULL)
}
names(relcomp_ia) <- c("c=","c>")
names(relfit_ia) <- c("f=","f>")
BFtu_ia <- relfit_ia[1]/relcomp_ia[1]
names(BFtu_ia) <- paste(interactionset,collapse=":")
BFtu_interaction0[[count_interaction]] <- c(BFtu_ia,relcomp_ia[1],relfit_ia[1])
#exclude other columns from Xmat that have been used to avoid double results
matcov <- matcov[,-(which(whichx[-c1])+1)]
}
}
}
#compute Bayes factors for testing interaction effects if present
if(count_interaction>0){ # then there are main effects
BFtu_interaction0 <- unlist(BFtu_interaction0)
names_interaction <- names(BFtu_interaction0[(0:(length(BFtu_interaction0)/3-1))*3+1])
BFtu_interaction <- matrix(c(BFtu_interaction0[(0:(length(BFtu_interaction0)/3-1))*3+1],
rep(1,length(BFtu_interaction0)/3)),nrow=length(BFtu_interaction0)/3)
row.names(BFtu_interaction) <- names_interaction
colnames(BFtu_interaction) <- c("BFtu","BFuu")
PHP_interaction <- BFtu_interaction / apply(BFtu_interaction,1,sum)
colnames(PHP_interaction) <- c("Pr(no effect)","Pr(complement)")
}else{ PHP_interaction <- BFtu_interaction <- NULL}
BFtu_exploratory <- rbind(BFtu_main,BFtu_interaction)
PHP_exploratory <- rbind(PHP_main,PHP_interaction)
}
# confirmatory BF test
if(!is.null(hypothesis)){
#then constraints on regression coefficients
matrixnames <- matrix(names_coef,nrow=K)
# translate named constraints to matrices with coefficients for constraints
parse_hyp <- parse_hypothesis(names_coef,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
if(length(RrE)==1){
RrStack <- rbind(do.call(rbind,RrE),do.call(rbind,RrO))
RrStack <- interval_RrStack(RrStack)
}else{
RrStack_list <- lapply(1:length(RrE),function(h){
interval_RrStack(rbind(RrE[[h]],RrO[[h]]))
})
RrStack <- do.call(rbind,RrStack_list)
}
if(nrow(RrStack)>1){
RStack <- RrStack[,-(K*P+1)]
rStack <- RrStack[,(K*P+1)]
}else{
RStack <- matrix(RrStack[,-(K*P+1)],nrow=1)
rStack <- RrStack[,(K*P+1)]
}
# check if a common boundary exists for prior location under all constrained hypotheses
if(nrow(RrStack) > 1){
rref_ei <- rref(RrStack)
nonzero <- rref_ei[,P*K+1]!=0
if(max(nonzero)>0){
row1 <- max(which(nonzero))
if(sum(abs(rref_ei[row1,1:(P*K)]))==0){
stop("No common boundary point for prior location. Conflicting constraints.")
}
}
}
#number of hypotheses that are specified
numhyp <- length(RrO)
if(P > 1){
#default prior location
if(BF.type==2){
Mean0 <- matrix(c(ginv(RStack)%*%rStack),nrow=K,ncol=P)
}else{
Mean0 <- BetaHat #mean0
}
#Mean0 <- matrix(c(ginv(RStack)%*%rStack),nrow=K,ncol=P)
relmeasunlist <- unlist(lapply(1:numhyp,function(h){
# Check whether the constraints are on a single row or column, if so
# use the analytic expression, else using a Monte Carlo estimate.
RrStack <- rbind(RrE[[h]],RrO[[h]])
Rcheck <- Reduce("+",lapply(1:nrow(RrStack),function(row1){
abs(matrix(RrStack[row1,-(K*P+1)],nrow=K))
}))
RcheckRow <- apply(Rcheck,1,sum)
RcheckCol <- apply(Rcheck,2,sum)
if(sum(RcheckRow!=0)==1){ # use multivariate Student distributions
K1 <- which(RcheckRow!=0)
# posterior hyperparameters
dfN <- N-K-P+1
ScaleN <- S*tXXi[K1,K1]/(N-K-P+1) # off-diagonal elements have no meaning
meanN <- as.matrix(c(BetaHat[K1,]))
# exclude inactive rows
if(is.null(RrE[[h]])){RrE_h=NULL
}else{
if(nrow(RrE[[h]])==1){
RrE_h <- t(as.matrix(RrE[[h]][,c((0:(P-1))*K+K1,P*K+1)]))
}else{
RrE_h <- RrE[[h]][,c((0:(P-1))*K+K1,P*K+1)]
}
}
if(is.null(RrO[[h]])){RrO_h=NULL
}else{
if(nrow(RrO[[h]])==1){
RrO_h <- t(as.matrix(RrO[[h]][,c((0:(P-1))*K+K1,P*K+1)]))
}else{
RrO_h <- RrO[[h]][,c((0:(P-1))*K+K1,P*K+1)]
}
}
# prior hyperparameters
df0 <- 1 # should be the same as sum(rep(bj,times=Nj))-K-P+1
Scale0 <- S_b*tXXi_b[K1,K1]
mean0 <- Mean0[K1,]
# compute relative measures of fit and complexity
relcomp_h <- Student_measures(mean1=mean0,Scale1=Scale0,df1=df0,RrE1=RrE_h,
RrO1=RrO_h,names1=matrixnames[K1,],
constraints1=parse_hyp$original_hypothesis[h])
relfit_h <- Student_measures(mean1=meanN,Scale1=ScaleN,df1=dfN,RrE1=RrE_h,
RrO1=RrO_h,names1=matrixnames[K1,],
constraints1=parse_hyp$original_hypothesis[h])
}else if(sum(RcheckCol!=0)==1){ # use multivariate Student distributions
P1 <- which(RcheckCol!=0)
# prior hyperparameters
df0 <- 1 # should be the same as sum(rep(bj,times=Nj))-K-P+1
Scale0 <- S_b[P1,P1]*tXXi_b
mean0 <- Mean0[,P1]
# posterior hyperparameters
dfN <- N-K-P+1
ScaleN <- S[P1,P1]*tXXi/(N-K-P+1) # off-diagonal elements have no meaning
meanN <- as.matrix(c(BetaHat[,P1]))
# exclude inactive rows
if(is.null(RrE[[h]])){RrE_h=NULL
}else{
if(nrow(RrE[[h]])==1){
RrE_h <- t(as.matrix(RrE[[h]][,c((P1-1)*K+1:K,P*K+1)]))
}else{
RrE_h <- RrE[[h]][,c((P1-1)*K+1:K,P*K+1)]
}
}
if(is.null(RrO[[h]])){RrO_h=NULL
}else{
if(nrow(RrO[[h]])==1){
RrO_h <- t(as.matrix(RrO[[h]][,c((P1-1)*K+1:K,P*K+1)]))
}else{
RrO_h <- RrO[[h]][,c((P1-1)*K+1:K,P*K+1)]
}
}
# compute relative measures of fit and complexity
relcomp_h <- Student_measures(mean0,Scale0,df0,RrE_h,RrO_h,names1=matrixnames[,P1],
constraints1=parse_hyp$original_hypothesis[h])
relfit_h <- Student_measures(meanN,ScaleN,dfN,RrE_h,RrO_h,names1=matrixnames[,P1],
constraints1=parse_hyp$original_hypothesis[h])
}else{ #use Matrix-Student distributions with Monte Carlo estimate
df0 <- 1
dfN <- N-K-P+1
relfit_h <- MatrixStudent_measures(Mean1=BetaHat,Scale1=S,tXXi1=tXXi,df1=dfN,RrE1=RrE[[h]],RrO1=RrO[[h]],
Names1=matrix(names_coef,ncol=P),constraints1=parse_hyp$original_hypothesis[h],
MCdraws=1e4)
relcomp_h <- MatrixStudent_measures(Mean1=Mean0,Scale1=S_b,tXXi1=tXXi_b,df1=df0,RrE1=RrE[[h]],RrO1=RrO[[h]],
Names1=matrix(names_coef,ncol=P),constraints1=parse_hyp$original_hypothesis[h],
MCdraws=1e4)
}
return(list(relfit_h,relcomp_h))
}))
relfit <- t(matrix(unlist(relmeasunlist)[rep((0:(numhyp-1))*4,each=2)+rep(1:2,numhyp)],nrow=2))
row.names(relfit) <- parse_hyp$original_hypothesis
colnames(relfit) <- c("f_E","f_O")
relcomp <- t(matrix(unlist(relmeasunlist)[rep((0:(numhyp-1))*4,each=2)+rep(3:4,numhyp)],nrow=2))
row.names(relcomp) <- parse_hyp$original_hypothesis
colnames(relcomp) <- c("c_E","c_O")
# Compute relative fit/complexity for the complement hypothesis
if(complement==TRUE){
relfit <- MatrixStudent_prob_Hc(BetaHat,S,tXXi,N-K-P+1,as.matrix(relfit),RrO)
relcomp <- MatrixStudent_prob_Hc(Mean0,S_b,tXXi_b,1,as.matrix(relcomp),RrO)
hypothesisshort <- unlist(lapply(1:nrow(relfit),function(h) paste0("H",as.character(h))))
row.names(relfit) <- row.names(relfit) <- hypothesisshort
}
# the BF for the complement hypothesis vs Hu needs to be computed.
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
names(priorprobs) <- names(BFtu_confirmatory)
PHP_confirmatory <- BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs)
BFtable <- cbind(relcomp,relfit,relfit[,1]/relcomp[,1],relfit[,2]/relcomp[,2],
apply(relfit,1,prod)/apply(relcomp,1,prod),PHP_confirmatory)
row.names(BFtable) <- names(PHP_confirmatory)
colnames(BFtable) <- c("comp_E","comp_O","fit_E","fit_O","BF_E","BF_O","BF","PHP")
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory)
if(nrow(relfit)==length(parse_hyp$original_hypothesis)){
hypotheses <- parse_hyp$original_hypothesis
}else{
hypotheses <- c(parse_hyp$original_hypothesis,"complement")
}
}else{
# one dependent variable and posterior/prior have Student t distributions
# prior hyperparameters
df0 <- 1 # should be the same as sum(rep(bj,times=Nj))-K-P+1
Scale0 <- kronecker(S_b,tXXi_b)
#default prior location
if(BF.type==2){
mean0 <- ginv(RStack)%*%rStack
}else{
mean0 <- BetaHat #mean0
}
# posterior hyperparameters
dfN <- N-K-P+1
ScaleN <- kronecker(S,tXXi)/(N-K-P+1) # off-diagonal elements have no meaning
meanN <- as.matrix(c(BetaHat))
relcomp <- t(matrix(unlist(lapply(1:numhyp,function(h){
Student_measures(mean1=mean0,Scale1=Scale0,df1=df0,RrE1=RrE[[h]],RrO1=RrO[[h]],
names1=names_coef,constraints1=parse_hyp$original_hypothesis[h])
})),nrow=2))
relfit <- t(matrix(unlist(lapply(1:numhyp,function(h){
Student_measures(meanN,ScaleN,dfN,RrE[[h]],RrO[[h]],
names1=names_coef,constraints1=parse_hyp$original_hypothesis[h])
})),nrow=2))
colnames(relcomp) <- c("c_E","c_O")
colnames(relfit) <- c("f_E","f_O")
row.names(relcomp)[1:numhyp] <- parse_hyp$original_hypothesis
row.names(relfit)[1:numhyp] <- parse_hyp$original_hypothesis
if(complement == TRUE){
# Compute relative fit/complexity for the complement hypothesis
relfit <- Student_prob_Hc(meanN,ScaleN,dfN,relfit,hypothesis,RrO)
relcomp <- Student_prob_Hc(mean0,Scale0,df0,relcomp,hypothesis,RrO)
row.names(relcomp)[1:numhyp] <- parse_hyp$original_hypothesis
row.names(relfit)[1:numhyp] <- parse_hyp$original_hypothesis
}
# the BF for the complement hypothesis vs Hu needs to be computed.
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
names(priorprobs) <- names(BFtu_confirmatory)
PHP_confirmatory <- BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs)
BFtable <- cbind(relcomp,relfit,relfit[,1]/relcomp[,1],relfit[,2]/relcomp[,2],
BFtu_confirmatory,PHP_confirmatory)
row.names(BFtable) <- names(BFtu_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory)
#tested hypotheses
hypotheses <- row.names(relfit)
}
}else{
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
}
BFlm_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior=priorprobs,
hypotheses=hypotheses,
estimates=postestimates,
model=x,
bayesfactor=bayesfactor,
parameter=testedparameter,
call=match.call())
class(BFlm_out) <- "BF"
return(BFlm_out)
}
# compute relative meausures (fit or complexity) under a multivariate Student t distribution
MatrixStudent_measures <- function(Mean1,Scale1,tXXi1,df1,RrE1,RrO1,Names1=NULL,
constraints1=NULL,MCdraws=1e4){
# constraints1 = parse_hyp$original_hypothesis
# RrE1 <- matrix(0,nrow=1,ncol=ncol(RrO1))
# RrE1[1,1:2] <- c(-1,1)
K <- nrow(Mean1)
P <- ncol(Mean1)
# vectorize the mean
mean1 <- c(Mean1)
relE <- relO <- 1
if(!is.null(RrE1) && is.null(RrO1)){ #only equality constraints
RE1 <- RrE1[,-(K*P+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K*P)
}
rE1 <- RrE1[,(K*P+1)]
qE1 <- nrow(RE1)
temp1 <- rWishart(MCdraws,df1+P-1,solve(Scale1))
temp2 <- lapply(seq(dim(temp1)[3]), function(x) temp1[,,x])
SigmaList <- lapply(temp2,solve)
covm1_E <- lapply(SigmaList,function(temp) RE1%*%(kronecker(temp,tXXi1))%*%t(RE1) )
mean1_E <- c(RE1 %*% mean1)
relE <- mean(unlist(lapply(covm1_E,function(temp) dmvnorm(rE1,mean=mean1_E,sigma=temp))))
}else{
if(is.null(RrE1) && !is.null(RrO1)){ #only order constraints
RO1 <- RrO1[,-(K*P+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K*P)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K*P+1)]
if(Rank(RO1)==nrow(RO1)){ #RO1 is of full row rank. So use transformation.
Scale1inv <- solve(Scale1)
relO <- unlist(lapply(1:1e3,function(s){
Sigma1 <- solve(rWishart(1,df=df1+P-1,Sigma=Scale1inv)[,,1])
meanO <- c(RO1%*%mean1)
covmO <- RO1%*%kronecker(Sigma1,tXXi1)%*%t(RO1)
pmvnorm(lower=rO1,upper=Inf,mean=meanO,sigma=covmO)[1]
}))
relO <- mean(relO[relO!="NaN"])
}else{ #no linear transformation can be used; pmvt cannot be used. Use bain with a multivariate normal approximation
#compute covariance matrix for multivariate normal distribution
mean1 <- c(Mean1)
names(mean1) <- c(Names1)
if(df1>2){ #posterior measures
covm1 <- kronecker(Scale1,tXXi1)/(df1-2)
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else if(df1==2){ #posterior measures
covm1 <- kronecker(Scale1,tXXi1)/(df1-1)
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else{
covm1 <- kronecker(Scale1,tXXi1) #for prior with df1==1, probability independent of common factor of scale1
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=df1) #n not used in computation
relO <- bain_res$fit[1,4]
}
# bain1 <- bain::bain(mean1,Sigma1=covm1,RrE1,RrO1,n=10) # choice of n does not matter
# extract posterior probability (Fit_eq) from bain-object)
# warning("Check if this works now")
}
}else{ #hypothesis with equality and order constraints
RE1 <- RrE1[,-(K*P+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K*P)
}
rE1 <- RrE1[,(K*P+1)]
qE1 <- nrow(RE1)
RO1 <- RrO1[,-(K*P+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K*P)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K*P+1)]
Rr1 <- rbind(RrE1,RrO1)
R1 <- rbind(RE1,RO1)
r1 <- c(rE1,rO1)
qC1 <- length(r1)
temp1 <- rWishart(MCdraws,df1+P-1,solve(Scale1))
temp2 <- lapply(seq(dim(temp1)[3]), function(x) temp1[,,x])
SigmaList <- lapply(temp2,solve)
covm1_E <- lapply(SigmaList,function(temp) RE1%*%(kronecker(temp,tXXi1))%*%t(RE1) )
mean1_E <- RE1 %*% mean1
relE <- mean(unlist(lapply(covm1_E,function(temp) dmvnorm(rE1,mean=mean1_E,sigma=temp))))
if(Rank(Rr1) == nrow(Rr1)){
covm1_O <- lapply(SigmaList,function(temp) R1%*%(kronecker(temp,tXXi1))%*%t(R1) )
mean1_O <- c(R1%*%mean1)
mean1_OE <- lapply(covm1_O,function(temp){
as.vector(mean1_O[(qE1+1):qC1] +
c(matrix(temp[(qE1+1):qC1,1:qE1],ncol=qE1)%*%solve(temp[1:qE1,1:qE1])%*%(rE1-mean1_E)))
})
covm1_OE <- lapply(covm1_O,function(temp) temp[(qE1+1):qC1,(qE1+1):qC1] -
matrix(temp[(qE1+1):qC1,1:qE1],ncol=qE1)%*%solve(temp[1:qE1,1:qE1])%*%
matrix(temp[1:qE1,(qE1+1):qC1],nrow=qE1))
#check covariance because some can be nonsymmetric due to a generation error
welk1 <- which(unlist(lapply(covm1_OE,function(temp) isSymmetric(temp,
tol = sqrt(.Machine$double.eps),check.attributes = FALSE) &&
min(eigen(temp)$values)>sqrt(.Machine$double.eps) )))
covm1_OE <- covm1_OE[welk1]
mean1_OE <- mean1_OE[welk1]
relO <- mean(mapply(function(mu_temp,Sigma_temp) pmvnorm(lower=rO1,
upper=rep(Inf,qO1),mean=mu_temp,sigma=Sigma_temp)[1],mean1_OE,covm1_OE))
}else{ #use bain for the computation of the probability
mean1 <- c(Mean1)
names(mean1) <- c(Names1)
if(df1>2){ #posterior measures
covm1 <- kronecker(Scale1,tXXi1)/(df1-2)
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else if(df1==2){ #posterior measures
covm1 <- kronecker(Scale1,tXXi1)/(df1-1)
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else{
covm1 <- kronecker(Scale1,tXXi1) #for prior with df1==1, probability independent of common factor of scale1
bain_res <- bain(x=mean1,hypothesis=constraints1,Sigma=covm1,n=df1) #n not used in computation
relO <- bain_res$fit[1,4]
}
}
}
}
return(c(relE,relO))
}
# compute relative meausures (fit or complexity) under a multivariate Student t distribution
Student_measures <- function(mean1,Scale1,df1,RrE1,RrO1,names1=NULL,constraints1=NULL){ # Volgens mij moet je hier ook N meegeven
K <- length(mean1)
relE <- relO <- 1
if(!is.null(RrE1) && is.null(RrO1)){ #only equality constraints
RE1 <- RrE1[,-(K+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K)
}
rE1 <- RrE1[,(K+1)]
qE1 <- nrow(RE1)
meanE <- RE1%*%mean1
scaleE <- RE1%*%Scale1%*%t(RE1)
relE <- dmvt(rE1,delta=c(meanE),sigma=scaleE,df=df1,log=FALSE)
}
if(is.null(RrE1) && !is.null(RrO1)){ #only order constraints
RO1 <- RrO1[,-(K+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K+1)]
if(Rank(RO1)==nrow(RO1)){ #RO1 is of full row rank. So use transformation.
meanO <- c(RO1%*%mean1)
scaleO <- RO1%*%Scale1%*%t(RO1)
relO <- ifelse(nrow(scaleO)==1,
pt((rO1-meanO)/sqrt(scaleO[1,1]),df=df1,lower.tail=FALSE), #univariate
pmvt(lower=rO1,upper=Inf,delta=meanO,sigma=scaleO,df=df1,type="shifted")) #multivariate
}else{ #no linear transformation can be used; pmvt cannot be used. Use bain with a multivariate normal approximation
#compute covariance matrix for multivariate normal distribution
row.names(mean1) <- names1
if(df1>2){ # we need posterior measures
covm1 <- Scale1/(df1-2)
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else if(df1==2){ # we need posterior measures (there is very little information)
covm1 <- Scale1/(df1-1)
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else{ #then df=1, so we need prior measures
covm1 <- Scale1 #for prior with df1==1, probability independent of common factor of scale1
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=df1) #n not used in computation
relO <- bain_res$fit[1,4]
}
}
}
if(!is.null(RrE1) && !is.null(RrO1)){ #hypothesis with equality and order constraints
RE1 <- RrE1[,-(K+1)]
if(!is.matrix(RE1)){
RE1 <- matrix(RE1,ncol=K)
}
rE1 <- RrE1[,(K+1)]
qE1 <- nrow(RE1)
RO1 <- RrO1[,-(K+1)]
if(!is.matrix(RO1)){
RO1 <- matrix(RO1,ncol=K)
}
qO1 <- nrow(RO1)
rO1 <- RrO1[,(K+1)]
#a)Transformation matrix
D <- diag(K) - t(RE1) %*% solve(RE1 %*% t(RE1)) %*% RE1
D2 <- unique(round(D, 5))
if(length(as.logical(rowSums(D2 != 0)))==1){
D2 <- matrix(D2[as.logical(rowSums(D2 != 0)),],nrow=1)
}else{
D2 <- D2[as.logical(rowSums(D2 != 0)),]
}
if(!is.matrix(D2)){
D2 <- t(D2)
}
Tm <- rbind(RE1, D2)
#b)
Tmean1 <- Tm %*% mean1
Tscale1 <- Tm %*% Scale1 %*% t(Tm)
# relative meausure for equalities
relE <- dmvt(x = t(rE1), delta = Tmean1[1:qE1], sigma = matrix(Tscale1[1:qE1, 1:qE1], ncol = qE1), df = df1, log = FALSE)
# transform order constraints
RO1tilde <- RO1 %*% ginv(D2)
rO1tilde <- rO1 - RO1 %*% ginv(RE1) %*% rE1
# Partitioning equality part and order part
Tmean1E <- Tmean1[1:qE1]
Tmean1O <- Tmean1[(qE1 + 1):K]
Tscale1EE <- as.matrix(Tscale1[1:qE1, 1:qE1],nrow=qE1)
Tscale1OE <- matrix(Tscale1[(qE1 + 1):K, 1:qE1],ncol=qE1)
Tscale1OO <- as.matrix(Tscale1[(qE1 + 1):K, (qE1 + 1):K],ncol=K-qE1)
#conditional location and scale matrix
Tmean1OgE <- Tmean1O + Tscale1OE %*% solve(Tscale1EE) %*% matrix(rE1 - Tmean1E)
Tscale1OgE <- as.vector((df1 + (t(matrix(rE1 - Tmean1E)) %*% solve(Tscale1EE) %*% matrix(rE1 - Tmean1E))) /
(df1 + qE1)) * (Tscale1OO - Tscale1OE %*% solve(Tscale1EE) %*% t(Tscale1OE))
if(Rank(RO1tilde) == nrow(RO1tilde)){
rO1tilde <- as.vector(rO1tilde)
delta_trans <- as.vector(RO1tilde %*% Tmean1OgE)
scale1_trans <- RO1tilde %*% Tscale1OgE %*% t(RO1tilde)
if(nrow(scale1_trans) == 1){ # univariate
relO <- pt((rO1tilde - delta_trans) / sqrt(scale1_trans), df = df1+qE1, lower.tail = FALSE)[1]
} else { # multivariate
relO <- pmvt(lower = rO1tilde, upper = Inf, delta = delta_trans, sigma = scale1_trans, df = df1+qE1, type = "shifted")[1]
}
}else{ #use bain for the computation of the probability
#compute covariance matrix for multivariate normal distribution
row.names(mean1) <- names1
if(df1>2){ # we need posterior measures
covm1 <- Scale1/(df1-2)
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else if(df1==2){ # we need posterior measures (there is very little information)
covm1 <- Scale1/(df1-1)
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=999) #n not used in computation
relO <- bain_res$fit[1,3]
}else{ #then df=1, so we need prior measures
covm1 <- Scale1 #for prior with df1==1, probability independent of common factor of scale1
mean1vec <- c(mean1)
names(mean1vec) <- row.names(mean1)
bain_res <- bain(x=mean1vec,hypothesis=constraints1,Sigma=covm1,n=df1) #n not used in computation
relO <- bain_res$fit[1,4]
}
}
}
return(c(relE,relO))
}
# The function computes the probability of an unconstrained draw falling in the complement subspace.
MatrixStudent_prob_Hc <- function(Mean1,Scale1,tXXi1,df1,relmeas,RrO1){
P <- ncol(Mean1)
K <- nrow(Mean1)
numpara <- P*K
numhyp <- nrow(relmeas)
# relmeas <- relmeas[1:numhyp,]
which_eq <- relmeas[,1] != 1
if(sum(which_eq)==numhyp){ # Then the complement is equivalent to the unconstrained hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So there is at least one hypothesis with only order constraints
welk <- which(!which_eq)
if(length(welk)==1){ # There is one hypothesis with only order constraints. Hc is complement of this hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - relmeas[welk,2]
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So more than one hypothesis with only order constraints
# First we check whether ther is an overlap between the order constrained spaces.
# Caspar, here we need the RE and RO which are lists of
# matrices for equality and order constraints under the hypotheses. We can probably do this
# using the R-code you wrote and a vector of names of the correlations but I don't know
# how exactly. When running your function I also get an error message saying that he
# does not know the function "rename_function".
draws2 <- 1e4
randomDraws <- rmvnorm(draws2,mean=rep(0,numpara),sigma=diag(numpara))
#get draws that satisfy the constraints of the separate order constrained hypotheses
checksOC <- lapply(welk,function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,draws2)%*%t(rorder),1,prod)
})
checkOCplus <- Reduce("+",checksOC)
if(sum(checkOCplus > 0) < draws2){ #then the joint order constrained hypotheses do not completely cover the parameter space.
if(sum(checkOCplus>1)==0){ # then order constrained spaces are nonoverlapping
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - sum(relmeas[welk,2])
rownames(relmeas)[numhyp+1] <- "complement"
}else{ #the order constrained subspaces at least partly overlap
# funtion below gives a rough estimate of the posterior probability under Hc
# a bain type of algorithm would be better of course. but for now this is ok.
temp1 <- rWishart(draws2,df1+P-1,solve(Scale1))
temp2 <- lapply(seq(dim(temp1)[3]), function(x) temp1[,,x])
SigmaList <- lapply(temp2,solve)
randomDraws <- matrix(unlist(lapply(SigmaList,function(temp){
rmvnorm(1,mean=c(Mean1),sigma=kronecker(temp,tXXi1))
})),nrow=numpara)
checksOC <- lapply(welk,function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(Rorder%*%randomDraws > rorder%*%t(rep(1,draws2)),2,prod)
})
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,] <- c(1,sum(Reduce("+",checksOC)==0)/draws2)
rownames(relmeas)[numhyp+1] <- "complement"
}
}
}
}
return(relmeas)
}
# The function computes the probability of an unconstrained draw falling in the complement subspace.
Student_prob_Hc <- function(mean1,scale1,df1,relmeas1,constraints,RrO1=NULL){
numpara <- length(mean1)
numhyp <- nrow(relmeas1)
if(numhyp==1){
relmeas <- t(relmeas1[1:numhyp,])
}else{ relmeas <- relmeas1[1:numhyp,]}
which_eq <- relmeas[,1] != 1
if(sum(which_eq)==numhyp){ # Then the complement is equivalent to the unconstrained hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So there is at least one hypothesis with only order constraints
welk <- which(!which_eq)
if(length(welk)==1){ # There is one hypothesis with only order constraints. Hc is complement of this hypothesis.
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - relmeas[welk,2]
rownames(relmeas)[numhyp+1] <- "complement"
}else{ # So more than one hypothesis with only order constraints
# First we check whether ther is an overlap between the order constrained spaces.
draws2 <- 1e4
randomDraws <- rmvnorm(draws2,mean=rep(0,numpara),sigma=diag(numpara))
#get draws that satisfy the constraints of the separate order constrained hypotheses
checksOC <- lapply(welk,function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,draws2)%*%t(rorder),1,prod)
})
checkOCplus <- Reduce("+",checksOC)
if(sum(checkOCplus > 0) < draws2){ #then the joint order constrained hypotheses do not completely cover the parameter space.
if(sum(checkOCplus>1)==0){ # then order constrained spaces are nonoverlapping
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- 1 - sum(relmeas[welk,2])
rownames(relmeas)[numhyp+1] <- "complement"
}else{ #the order constrained subspaces at least partly overlap
# the function below gives a rough estimate of the posterior probability under Hc
# a bain type of algorithm would be better of course.
randomDraws <- rmvt(draws2,delta=mean1,sigma=scale1,df=df1)
checksOC <- lapply(welk,function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,draws2)%*%t(rorder),1,prod)
})
relmeas <- rbind(relmeas,rep(1,2))
relmeas[numhyp+1,2] <- sum(Reduce("+",checksOC) == 0) / draws2
rownames(relmeas)[numhyp+1] <- "complement"
}
}
}
}
return(relmeas)
}
# from the output of the constraints in 'parse_hypothesis' create lists for the equality and order matrices
make_RrList <- function(parse_hyp){
numhyp <- length(parse_hyp$hyp_mat)
RrE <- lapply(1:numhyp,function(h){
qE <- parse_hyp$n_constraints[h*2-1]
if(qE==1){
RrE_h <- t(as.matrix(parse_hyp$hyp_mat[[h]][1:qE,]))
}else if(qE>1){
RrE_h <- parse_hyp$hyp_mat[[h]][1:qE,]
}else {RrE_h=NULL}
RrE_h
})
RrO <- lapply(1:numhyp,function(h){
qE <- parse_hyp$n_constraints[h*2-1]
qO <- parse_hyp$n_constraints[h*2]
if(qO==1){
RrO_h <- t(as.matrix(parse_hyp$hyp_mat[[h]][qE+1:qO,]))
}else if(qO>1){
RrO_h <- parse_hyp$hyp_mat[[h]][qE+1:qO,]
}else {RrO_h=NULL}
RrO_h
})
return(list(RrE,RrO))
}
# from the output of the constraints in 'parse_hypothesis' create lists for the equality and order matrices
# different format parse_hyp object
make_RrList2 <- function(parse_hyp2){
numhyp <- length(parse_hyp2$original_hypothesis)
qE <- parse_hyp2$n_constraints[(0:(numhyp-1))*2+1]
qO <- parse_hyp2$n_constraints[(1:numhyp)*2]
RrE <- lapply(1:numhyp,function(h){
startcon <- sum(qE[1:h]+qO[1:h])-qE[h]-qO[h]
if(qE[h]==1){
RrE_h <- t(as.matrix(parse_hyp2$hyp_mat[startcon+1:qE[h],]))
}else if(qE[h]>1){
RrE_h <- parse_hyp2$hyp_mat[startcon+1:qE[h],]
}else {RrE_h=NULL}
RrE_h
})
RrO <- lapply(1:numhyp,function(h){
startcon <- sum(qE[1:h]+qO[1:h])-qE[h]-qO[h]
if(qO[h]==1){
RrO_h <- t(as.matrix(parse_hyp2$hyp_mat[startcon+qE[h]+1:qO[h],]))
}else if(qO[h]>1){
RrO_h <- parse_hyp2$hyp_mat[startcon+qE[h]+1:qO[h],]
}else {RrO_h=NULL}
RrO_h
})
return(list(RrE,RrO))
}
#for checking whether constraints are conflicting replace interval constraints by equality constraints
interval_RrStack <- function(RrStack){
q1 <- nrow(RrStack)
q2 <- ncol(RrStack)
RrStack_out <- RrStack
if(q1 > 1){
row1 <- 1
while(row1 < q1){
for(row2 in (row1+1):q1){
# print(row2)
if(sum(abs(RrStack_out[row1,-q2] + RrStack_out[row2,-q2]))==0){ # && RrStack_out[row1,q2]!=RrStack_out[row2,q2] ){
#together row1 and row2 imply an interval constraint
whichcol <- abs(RrStack_out[row1,-q2])!=0
whichcol1 <- which(whichcol)
if(sum(whichcol)==1){
welkpos <- ifelse(RrStack_out[row1,c(whichcol,F)]>0,row1,row2)
welkneg <- ifelse(RrStack_out[row1,c(whichcol,F)]<0,row1,row2)
lb <- RrStack_out[welkpos,q2]
ub <- -RrStack_out[welkneg,q2]
RrStack_out[row1,] <- RrStack_out[welkpos,]
RrStack_out[row1,q2] <- (ub+lb)/2
RrStack_out <- RrStack_out[-row2,]
q1 <- q1 - 1
}else{
RrStack_out[row1,q2] <- 0
RrStack_out <- RrStack_out[-row2,]
q1 <- q1 - 1
}
break
}
}
row1 <- row1 + 1
}
}
if(is.matrix(RrStack_out)==F){
RrStack_out <- t(RrStack_out)
}
return(RrStack_out)
}
params_in_hyp <- function(hyp){
params_in_hyp <- trimws(unique(strsplit(hyp, split = "[ =<>,\\(\\);&\\*+-]+", perl = TRUE)[[1]]))
params_in_hyp <- params_in_hyp[!sapply(params_in_hyp, grepl, pattern = "^[0-9]*\\.?[0-9]+$")]
params_in_hyp[grepl("^[a-zA-Z]", params_in_hyp)]
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.lm.R
|
#BF method for polr classes
#' @method BF polr
#' @export
BF.polr <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
#Extract summary statistics
Args <- as.list(match.call()[-1])
get_est <- get_estimates(x)
Args$x <- get_est$estimate
Args$Sigma <- get_est$Sigma[[1]]
Args$n <- nrow(x$fitted.values)
Args$hypothesis <- hypothesis
Args$prior.hyp <- prior.hyp
Args$complement <- complement
out <- do.call(BF, Args)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.polr.R
|
#' @method print BF
#' @export
print.BF <- function(x,
digits = 3,
na.print = "", ...){
cat("Call:")
cat("\n")
print(x$call)
cat("\n")
digits <- 3
if(is.null(x$BFtu_confirmatory)){
cat("Bayesian hypothesis test","\n", sep = "")
cat("Type: exploratory","\n", sep = "")
cat("Object: ",class(x$model)[1],"\n", sep = "")
cat("Parameter: ",x$parameter,"\n", sep = "")
cat("Method: ",x$bayesfactor,"\n\n", sep = "")
cat("Posterior probabilities:","\n", sep = "")
print(round(x$PHP_exploratory,digits))
cat("\n")
}else{
cat("Bayesian hypothesis test","\n", sep = "")
cat("Type: confirmatory","\n", sep = "")
cat("Object: ",class(x$model)[1],"\n", sep = "")
cat("Parameter: ",x$parameter,"\n", sep = "")
cat("Method: ",x$bayesfactor,"\n\n", sep = "")
cat("Posterior probabilities:")
cat("\n")
PHPmatrix <- as.matrix(round(x$PHP_confirmatory,digits))
colnames(PHPmatrix) <- "Pr(hypothesis|data)"
hypnumbers <- unlist(lapply(1:nrow(PHPmatrix),function(r){
paste0("H",as.character(r))
}))
row.names(PHPmatrix) <- hypnumbers
print(PHPmatrix)
cat("\n")
cat("Evidence matrix (Bayes factors):")
cat("\n")
BFmat <- round(x$BFmatrix_confirmatory,digits)
row.names(BFmat) <- colnames(BFmat) <- hypnumbers
print(BFmat)
cat("\n")
cat("Hypotheses:")
cat("\n")
for(h in 1:length(x$hypotheses)){
cat(paste0(hypnumbers[h],": ",x$hypotheses[h]))
cat("\n")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.print.R
|
#' @method summary BF
#' @export
summary.BF <- function(object, ...){
cat("Call:")
cat("\n")
print(object$call)
cat("\n")
digits <- 3
cat("Bayesian hypothesis test","\n", sep = "")
cat("Type: exploratory","\n", sep = "")
cat("Object: ",class(object$model)[1],"\n", sep = "")
cat("Parameter: ",object$parameter,"\n", sep = "")
cat("Method: ",object$bayesfactor,"\n\n", sep = "")
cat("Posterior probabilities:","\n", sep = "")
print(round(object$PHP_exploratory,digits))
cat("\n")
if(!is.null(object$BFtu_confirmatory)){
cat("Bayesian hypothesis test","\n", sep = "")
cat("Type: confirmatory","\n", sep = "")
cat("Object: ",class(object$model)[1],"\n", sep = "")
cat("Parameter: ",object$parameter,"\n", sep = "")
cat("Method: ",object$bayesfactor,"\n\n", sep = "")
cat("Posterior probabilities:")
cat("\n")
PHPmatrix <- as.matrix(round(object$PHP_confirmatory,digits))
colnames(PHPmatrix) <- "Pr(hypothesis|data)"
hypnumbers <- unlist(lapply(1:nrow(PHPmatrix),function(r){
paste0("H",as.character(r))
}))
row.names(PHPmatrix) <- hypnumbers
print(PHPmatrix)
cat("\n")
cat("Evidence matrix (Bayes factors):")
cat("\n")
BFmat <- round(object$BFmatrix_confirmatory,digits)
row.names(BFmat) <- colnames(BFmat) <- hypnumbers
print(BFmat)
cat("\n")
cat("Specification table:")
cat("\n")
BFtable <- round(object$BFtable_confirmatory,digits)
row.names(BFtable) <- hypnumbers
print(BFtable)
cat("\n")
cat("Hypotheses:")
cat("\n")
for(h in 1:length(object$hypotheses)){
cat(paste0(hypnumbers[h],": ",object$hypotheses[h]))
cat("\n")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.summary.R
|
#BF method for survreg classes
#' @method BF survreg
#' @export
BF.survreg <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
#Extract summary statistics
get_est <- get_estimates(x)
Args <- as.list(match.call()[-1])
get_est <- get_estimates(x)
Args$x <- get_est$estimate
Args$Sigma <- get_est$Sigma[[1]]
Args$n <- length(x$y)
Args$hypothesis <- hypothesis
Args$prior.hyp <- prior.hyp
Args$complement <- complement
out <- do.call(BF, Args)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.survreg.R
|
#BF method for zeroinfl classes
#' @method BF zeroinfl
#' @export
BF.zeroinfl <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
#Extract summary statistics
Args <- as.list(match.call()[-1])
get_est <- get_estimates(x)
Args$x <- get_est$estimate
Args$Sigma <- get_est$Sigma[[1]]
Args$n <- length(x$residuals)
Args$hypothesis <- hypothesis
Args$prior.hyp <- prior.hyp
Args$complement <- complement
out <- do.call(BF, Args)
out$model <- x
out$call <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF.zeroinfl.R
|
#' @title Bayesian correlation analysis
#'
#' @name cor_test
#'
#' @description Estimate the unconstrained posterior for the correlations using a joint uniform prior.
#'
#' @param ... matrices (or data frames) of dimensions \emph{n} (observations) by \emph{p} (variables)
#' for different groups (in case of multiple matrices or data frames).
#'
#' @param formula an object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (e.g., \code{~ education}).
#'
#' @param iter number of iterations from posterior (default is 5000).
#'
#' @param burnin number of iterations for burnin (default is 3000).
#'
#' @return list of class \code{cor_test}:
#' \itemize{
#' \item \code{meanF} posterior means of Fisher transform correlations
#' \item \code{covmF} posterior covariance matrix of Fisher transformed correlations
#' \item \code{correstimates} posterior estimates of correlation coefficients
#' \item \code{corrdraws} list of posterior draws of correlation matrices per group
#' \item \code{corrnames} names of all correlations
#' }
#'
#' @examples
#' \donttest{
#' # Bayesian correlation analysis of the 6 variables in 'memory' object
#' # we consider a correlation analysis of the first three variable of the memory data.
#' fit <- cor_test(BFpack::memory[,1:3])
#'
#' # Bayesian correlation of variables in memory object in BFpack while controlling
#' # for the Cat variable
#' fit <- cor_test(BFpack::memory[,c(1:4)],formula = ~ Cat)
#'
#' # Example of Bayesian estimation of polyserial correlations
#' memory_example <- memory[,c("Im","Rat")]
#' memory_example$Rat <- as.ordered(memory_example$Rat)
#' fit <- cor_test(memory_example)
#'
#' # Bayesian correlation analysis of first three variables in memory data
#' # for two different groups
#' HC <- subset(BFpack::memory[,c(1:3,7)], Group == "HC")[,-4]
#' SZ <- subset(BFpack::memory[,c(1:3,7)], Group == "SZ")[,-4]
#' fit <- cor_test(HC,SZ)
#'
#' }
#' @rdname cor_test
#' @export
cor_test <- function(..., formula = NULL, iter = 5e3, burnin = 3e3){
Y_groups <- list(...)
numG <- length(Y_groups)
if(is.null(formula)){
formula <- ~ 1
}
Xnames <- attr(terms(formula), "term.labels")
whichDV <- lapply(Y_groups,function(y){
unlist(lapply(colnames(y),function(x){sum(x==Xnames)==0}))
})
if(numG>1){ #check that the same number of DVs are present in each group (that's how dimensions are coded)
numDV <- rep(NA,numG)
for(gg in 1:numG){
numDV[gg] <- sum(whichDV[[gg]])
}
if(sum(abs(diff(numDV)))!=0){
stop("Each group should contain same number of dependent variables.")
}
}
#check measurement level of dependent variables, and convert to numericals
P <- sum(whichDV[[1]])
ordi <- numcats <- matrix(0,nrow=numG,ncol=P)
teller <- 1
for(gg in 1:numG){
for(pp in which(whichDV[[gg]])){
if(class(Y_groups[[gg]][,pp])[1] == "numeric" | class(Y_groups[[gg]][,pp])[1] == "integer"){
teller <- teller + 1
}else{
if(class(Y_groups[[gg]][,pp])[1] == "ordered"){
levels(Y_groups[[gg]][,pp]) <- 1:length(levels(Y_groups[[gg]][,pp]))
Y_groups[[gg]][,pp] <- as.numeric(Y_groups[[gg]][,pp])
ordi[gg,teller] <- 1
numcats[gg,teller] <- max(Y_groups[[gg]][,pp])
teller <- teller + 1
if(max(Y_groups[[gg]][,pp])>11){
stop("Ordinal variables are not allowed to have more than 11 categories")
}
}else{
if(class(Y_groups[[gg]][,pp])[1] == "factor"){
if(length(levels(Y_groups[[gg]][,pp]))==2){
levels(Y_groups[[gg]][,pp]) <- 1:length(levels(Y_groups[[gg]][,pp]))
Y_groups[[gg]][,pp] <- as.numeric(Y_groups[[gg]][,pp])
ordi[gg,teller] <- 1
numcats[gg,teller] <- 2
teller <- teller + 1
}else{
stop("Outcome variables should be either of class 'numeric', 'ordered', or a 2-level 'factor'.")
}
}else{
stop("Outcome variables should be either of class 'numeric', 'ordered', or a 2-level 'factor'.")
}
}
}
}
}
#because ordinal variables are not yet supported we set these indicators to '0'
ordi <- numcats <- matrix(0,nrow=numG,ncol=P)
model_matrices <- lapply(seq_len(numG) , function(x) {
model.matrix(formula, Y_groups[[x]])
})
correlate <- remove_predictors_helper(Y_groups = Y_groups, formula)
YXlist <- lapply(1:length(model_matrices),function(g){
list(as.matrix(correlate[[g]]),as.matrix(model_matrices[[g]]))
})
K <- ncol(YXlist[[1]][[2]])
numcorr <- numG*P*(P-1)/2
ngroups <- unlist(lapply(1:numG,function(g){nrow(YXlist[[g]][[1]])}))
Ntot <- max(ngroups)
Ygroups <- array(0,dim=c(numG,Ntot,P))
Xgroups <- array(0,dim=c(numG,Ntot,K))
XtXi <- array(0,dim=c(numG,K,K))
BHat <- array(0,dim=c(numG,K,P))
sdHat <- matrix(0,nrow=numG,ncol=P)
CHat <- array(0,dim=c(numG,P,P))
SumSq <- array(0,dim=c(numG,P,P))
SumSqInv <- array(0,dim=c(numG,P,P))
sdsd <- matrix(0,nrow=numG,ncol=P)
for(g in 1:numG){
Y_g <- YXlist[[g]][[1]]
for(p in 1:P){
if(ordi[g,p]==0){
Y_g[,p] <- c(scale(Y_g[,p]))
}
}
X_g <- YXlist[[g]][[2]]
Ygroups[g,1:ngroups[g],] <- Y_g
#standardize data to get a more stable sampler for the correlations.
tableX <- apply(X_g,2,table)
catX <- unlist(lapply(1:length(tableX),function(xcol){
length(tableX[[xcol]])
}))
if(sum(catX>1)){
X_g[1:ngroups[g],which(catX>1)] <- apply(as.matrix(X_g[1:ngroups[g],which(catX>1)]),2,scale)
}
Xgroups[g,1:ngroups[g],] <- X_g
XtXi[g,,] <- solve(t(X_g)%*%X_g)
BHat[g,,] <- XtXi[g,,]%*%t(X_g)%*%Y_g
SumSq[g,,] <- t(Y_g - X_g%*%BHat[g,,])%*%(Y_g - X_g%*%BHat[g,,])
SumSqInv[g,,] <- solve(SumSq[g,,])
Sigma_g <- SumSq[g,,]/ngroups[g]
sdHat[g,] <- sqrt(diag(Sigma_g))
CHat[g,,] <- diag(1/sdHat[g,])%*%Sigma_g%*%diag(1/sdHat[g,])
#get rough estimate of posterior sd of the standard deviations (used for random walk sd)
drawsSigma_g <- rWishart(1e2,df=ngroups[g],Sigma=SumSqInv[g,,])
sdsd[g,] <- unlist(lapply(1:P,function(p){
sd(sqrt(drawsSigma_g[p,p,]))
}))
}
samsize0 <- iter
gLiuSab <- array(0,dim=c(samsize0,numG,P))
# call Fortran subroutine for Gibbs sampling using noninformative improper priors
# for regression coefficients, Jeffreys priors for standard deviations, and a proper
# joint uniform prior for the correlation matrices.
res <- .Fortran("estimate_postmeancov_fisherz",
postZmean=matrix(0,numcorr,1),
postZcov=matrix(0,numcorr,numcorr),
P=as.integer(P),
numcorr=as.integer(numcorr),
K=as.integer(K),
numG=as.integer(numG),
BHat=round(BHat,3),
sdHat=rbind(sdHat,sdsd),
CHat=round(CHat,3),
XtXi=XtXi,
samsize0=as.integer(samsize0),
Njs=as.integer(ngroups),
Ygroups=Ygroups,
Xgroups=Xgroups,
Ntot=as.integer(Ntot),
C_quantiles=array(0,dim=c(numG,P,P,3)),
sigma_quantiles=array(0,dim=c(numG,P,3)),
B_quantiles=array(0,dim=c(numG,K,P,3)),
BDrawsStore=array(0,dim=c(samsize0,numG,K,P)),
sigmaDrawsStore=array(0,dim=c(samsize0,numG,P)),
CDrawsStore=array(0,dim=c(samsize0,numG,P,P)),
seed=as.integer( sample.int(1e6,1) ))
varnames <- lapply(1:numG,function(g){
names(correlate[[g]])
})
corrnames <- lapply(1:numG,function(g){
matrix(unlist(lapply(1:P,function(p2){
unlist(lapply(1:P,function(p1){
if(numG==1){
paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2])
}else{
paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2],"_in_g",as.character(g))
}
}))
})),nrow=P)
})
FmeansCovCorr <- lapply(1:numG,function(g){
Fdraws_g <- FisherZ(t(matrix(unlist(lapply(1:samsize0,function(s){
res$CDrawsStore[s,g,,][lower.tri(diag(P))]
})),ncol=samsize0)))
mean_g <- apply(Fdraws_g,2,mean)
names(mean_g) <- corrnames[[g]][lower.tri(diag(P))]
covm_g <- cov(Fdraws_g)
### DELETE THIS
#covm_g <- diag(numcorr/numG)
### DELETE THIS
return(list(mean_g,covm_g))
})
meansCovCorr <- lapply(1:numG,function(g){
matcor_g <- unlist(lapply(1:(P-1),function(p2){
unlist(lapply((p2+1):P,function(p1){
mean(res$CDrawsStore[,g,p1,p2])
}))
}))
names(matcor_g) <- corrnames[[g]][lower.tri(diag(P))]
return(matcor_g)
})
meanN <- unlist(lapply(1:numG,function(g){
FmeansCovCorr[[g]][[1]]
}))
covmN <- matrix(0,nrow=numcorr,ncol=numcorr)
numcorrg <- numcorr/numG
corrdraws <- lapply(1:numG,function(g){
array_g <- res$CDrawsStore[,g,,]
dimnames(array_g) <- list(NULL,varnames[[g]],varnames[[g]])
return(array_g)
})
for(g in 1:numG){
covmN[(g-1)*numcorrg+1:numcorrg,(g-1)*numcorrg+1:numcorrg] <- FmeansCovCorr[[g]][[2]]
}
# posterior estimates
postestimates_correlations <- Reduce(rbind,
lapply(1:numG,function(g){
means <- meansCovCorr[[g]]
medians <- res$C_quantiles[g,,,2][lower.tri(diag(P))]
lb <- res$C_quantiles[g,,,1][lower.tri(diag(P))]
ub <- res$C_quantiles[g,,,3][lower.tri(diag(P))]
return(cbind(means,medians,lb,ub))
}))
colnames(postestimates_correlations) <- c("mean","median","2.5%","97.5%")
cor_out <- list(meanF=meanN,covmF=covmN,correstimates=postestimates_correlations,
corrdraws=corrdraws,corrnames=corrnames,variables=varnames)
class(cor_out) <- "cor_test"
return(cor_out)
}
#' @importFrom stats terms
remove_predictors_helper <- function(Y_groups, formula){
# number of groups
groups <- length(Y_groups)
# model matrix terms
mm_terms <- attr(terms(formula), "term.labels")
if(length(mm_terms) == 0){
Y_groups
} else {
lapply(seq_len(groups), function(x){
# check for factors
factor_pred <- which(paste0("as.factor(", colnames(Y_groups[[x]]), ")") %in% mm_terms)
# check for non factors
cont_pred <- which(colnames(Y_groups[[x]]) %in% mm_terms)
# remove predictors
Y_groups[[x]][,-c(factor_pred, cont_pred)]
})
}
}
FisherZ <- function(r){.5*log((1+r)/(1-r))}
globalVariables(c("Fcor"))
#' @importFrom mvtnorm dmvnorm pmvnorm rmvnorm
#' @importFrom utils globalVariables
#' @importFrom stats dnorm pnorm
#' @importFrom QRM fit.st
#' @method BF cor_test
#' @export
BF.cor_test <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
bayesfactor <- "Bayes factors based on joint uniform priors"
testedparameter <- "correlation coefficients"
P <- dim(x$corrdraws[[1]])[2]
numG <- length(x$corrdraws)
numcorrgroup <- P*(P-1)/2
get_est <- get_estimates(x)
corrmeanN <- get_est$estimate
corrcovmN <- get_est$Sigma[[1]]
# Exploratory testing of correlation coefficients
#get height of prior density at 0 of Fisher transformed correlation
if(sum(P==Fcor$P)==0){
#number of draws to get 1e7 draws for the marginal of 1 Fisher transformation correlation
numdraws <- round(1e7/(P*(P-1)/2))
drawsJU <- draw_ju_r(P,samsize=numdraws,Fisher=1)
approx_studt <- QRM::fit.st(c(drawsJU))$par.ests[c(1,3)]
}else{
approx_studt <- unlist(c(Fcor[which(P==Fcor$P),1:2]))
}
relcomp0 <- dt(0,df=approx_studt[1])/approx_studt[2] # all marginal priors are the same
# compute exploratory BFs
corr_names <- rownames(x$correstimates)
numcorr <- length(corrmeanN)
relfit <- matrix(c(dnorm(0,mean=corrmeanN,sd=sqrt(diag(corrcovmN))),
pnorm(0,mean=corrmeanN,sd=sqrt(diag(corrcovmN))),
1-pnorm(0,mean=corrmeanN,sd=sqrt(diag(corrcovmN)))),ncol=3)
relcomp <- matrix(c(rep(relcomp0,numcorr),rep(.5,numcorr*2)),ncol=3)
colnames(relcomp) <- colnames(relfit) <- c("p(=0)","Pr(<0)","Pr(>0)")
BFtu_exploratory <- relfit / relcomp
row.names(BFtu_exploratory) <- rownames(x$correstimates)
colnames(BFtu_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
PHP_exploratory <- round(BFtu_exploratory /
apply(BFtu_exploratory,1,sum),3)
# posterior estimates
postestimates <- x$correstimates
# confirmatory testing if hypothesis argument is used
if(!is.null(hypothesis)){
#check if constraints are formulated on correlations in different populations
#if so, then the correlation names contains the string "_in_g" at the end
params_in_hyp1 <- params_in_hyp(hypothesis)
corr_names <- unlist(lapply(1:length(x$corrnames),function(g){
c(x$corrnames[[g]][lower.tri(x$corrnames[[g]])],
t(x$corrnames[[g]])[lower.tri(x$corrnames[[g]])])
})) #which includes Y1_with_Y2 and Y2_with_Y1
parse_hyp <- parse_hypothesis(corr_names,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
if(nrow(parse_hyp$hyp_mat)==1){
select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
parse_hyp$hyp_mat <-
t(as.matrix(c(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1])))
}else{
#combine equivalent correlations, e.g., cor(Y1,Y2)=corr(Y2,Y1).
select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
parse_hyp$hyp_mat <-
cbind(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1])
}
#create coefficient with equality and order constraints
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
numhyp <- length(RrE)
relfit <- t(matrix(unlist(lapply(1:numhyp,function(h){
Gaussian_measures(corrmeanN,corrcovmN,RrE1=RrE[[h]],RrO1=RrO[[h]])
})),nrow=2))
#names1 and constraints1 ... to fix ...
# approximate unconstrained Fisher transformed correlations with a multivariate Student t
if(numcorrgroup==1){
if(numcorr==1){
Scale0 <- as.matrix(approx_studt[2]**2)
}else{
Scale0 <- diag(rep(approx_studt[2]**2,numG))
}
mean0 <- rep(0,numG)
df0 <- round(approx_studt[1])
}else{
mean0 <- rep(0,numcorrgroup*numG)
Scale0 <- diag(rep(approx_studt[2]**2,numcorrgroup*numG))
df0 <- round(approx_studt[1])
}
relcomp <- t(matrix(unlist(lapply(1:numhyp,function(h){
relcomp_h <- Student_measures(mean1=mean0,
Scale1=Scale0,
df1=df0,
RrE1=RrE[[h]],
RrO1=RrO[[h]])
return(relcomp_h)
})),nrow=2))
row.names(relfit) <- row.names(relcomp) <- parse_hyp$original_hypothesis
if(complement == TRUE){
relfit <- Gaussian_prob_Hc(corrmeanN,corrcovmN,relfit,RrO)
relcomp <- Student_prob_Hc(mean1=mean0,scale1=Scale0,df1=df0,relmeas1=relcomp,constraints=NULL,RrO1=RrO)
}
hypothesisshort <- unlist(lapply(1:nrow(relfit),function(h) paste0("H",as.character(h))))
row.names(relfit) <- row.names(relfit) <- hypothesisshort
# the BF for the complement hypothesis vs Hu needs to be computed.
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
names(priorprobs) <- names(BFtu_confirmatory)
PHP_confirmatory <- BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs)
BFtable <- cbind(relcomp,relfit,relfit[,1]/relcomp[,1],relfit[,2]/relcomp[,2],
apply(relfit,1,prod)/apply(relcomp,1,prod),PHP_confirmatory)
row.names(BFtable) <- names(BFtu_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory)
if(nrow(relfit)==length(parse_hyp$original_hypothesis)){
hypotheses <- parse_hyp$original_hypothesis
}else{
hypotheses <- c(parse_hyp$original_hypothesis,"complement")
}
}else{
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
}
BFcorr_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=postestimates,
model=x,
bayesfactor=bayesfactor,
parameter=testedparameter,
call=match.call())
class(BFcorr_out) <- "BF"
return(BFcorr_out)
}
#get draws from joint uniform prior in Fisher transformed space
#Call Fortran subroutine in from bct_prior.f90
draw_ju_r <- function(P, samsize=50000, Fisher=1){
testm <- matrix(0,ncol=.5*P*(P-1),nrow=samsize)
# random1 <- rnorm(1)
# random1 <- (random1 - floor(random1))*1e6
res <-.Fortran("draw_ju",P = as.integer(P),
drawscorr=testm,
samsize=as.integer(samsize),
numcorrgroup=as.integer(.5*P*(P-1)),
Fisher=as.integer(Fisher),
seed=as.integer( sample.int(1e6,1) ),PACKAGE="BFpack")
return(res$drawscorr)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF_cortest.R
|
#' @title Bayes factors for Bayesian exploratory and confirmatory hypothesis
#' testing
#' @description The \code{BF} function can be used for hypothesis testing and
#' model
#' selection using the Bayes factor. By default exploratory hypothesis tests are
#' performed of whether each model parameter equals zero, is negative, or is
#' positive.
#' Confirmatory hypothesis tests can be executed by specifying hypotheses with
#' equality and/or order constraints on the parameters of interest.
#'
#' @param x An R object containing the outcome of a statistical analysis.
#' An R object containing the outcome of a statistical analysis. Currently, the
#' following objects can be processed: t_test(), bartlett_test(), lm(), aov(),
#' manova(), cor_test(), lmer() (only for testing random intercep variances),
#' glm(), coxph(), survreg(), polr(), zeroinfl(), rma(), ergm(), or named vector objects.
#' In the case \code{x} is a named vector, the arguments \code{Sigma} and \code{n}
#' are also needed. See vignettes for elaborations.
#' @param hypothesis A character string containing the constrained (informative) hypotheses to
#' evaluate in a confirmatory test. The default is NULL, which will result in standard exploratory testing
#' under the model \code{x}.
#' @param prior.hyp A vector specifying the prior probabilities of the hypotheses.
#' The default is NULL which will specify equal prior probabilities.
#' @param complement a logical specifying whether the complement should be added
#' to the tested hypothesis under \code{hypothesis}.
#' @param BF.type An integer that specified the type of Bayes factor (or prior) that is used for the test.
#' Currently, this argument is only used for models of class 'lm' and 't_test',
#' where \code{BF.type=2} implies an adjusted fractional Bayes factor with a 'fractional prior mean' at the null value (Mulder, 2014),
#' and \code{BF.type=1} implies a regular fractional Bayes factor (based on O'Hagan (1995)) with a 'fractional prior mean' at the MLE.
#' @param Sigma An approximate posterior covariance matrix (e.g,. error covariance
#' matrix) of the parameters of interest. This argument is only required when \code{x}
#' is a named vector.
#' @param n The (effective) sample size that was used to acquire the estimates in the named vector
#' \code{x} and the error covariance matrix \code{Sigma}. This argument is only required when \code{x}
#' is a named vector.
#' @param ... Parameters passed to and from other functions.
#' @usage NULL
#' @return The output is an object of class \code{BF}. The object has elements:
#' \itemize{
#' \item BFtu_exploratory: The Bayes factors of the constrained hypotheses against
#' the unconstrained hypothesis in the exploratory test.
#' \item PHP_exploratory: The posterior probabilities of the constrained hypotheses
#' in the exploratory test.
#' \item BFtu_confirmatory: The Bayes factors of the constrained hypotheses against
#' the unconstrained hypothesis in the confirmatory test using the \code{hypothesis}
#' argument.
#' \item PHP_confirmatory: The posterior probabilities of the constrained hypotheses
#' in the confirmatory test using the \code{hypothesis} argument.
#' \item BFmatrix_confirmatory: The evidence matrix which contains the Bayes factors
#' between all possible pairs of hypotheses in the confirmatory test.
#' \item BFtable_confirmatory: The \code{Specification table} (output when printing the
#' \code{summary} of a \code{BF} for a confirmatory test) which contains the different
#' elements of the extended Savage Dickey density ratio where
#' \itemize{
#' \item The first column `\code{complex=}' quantifies the relative complexity of the
#' equality constraints of a hypothesis (the prior density at the equality constraints in the
#' extended Savage Dickey density ratio).
#' \item The second column `\code{complex>}' quantifies the relative complexity of the
#' order constraints of a hypothesis (the prior probability of the order constraints in the extended
#' Savage Dickey density ratio).
#' \item The third column `\code{fit=}' quantifies the relative fit of the equality
#' constraints of a hypothesis (the posterior density at the equality constraints in the extended
#' Savage Dickey density ratio).
#' \item The fourth column `\code{fit>}' quantifies the relative fit of the order
#' constraints of a hypothesis (the posterior probability of the order constraints in the extended
#' Savage Dickey density ratio)
#' \item The fifth column `\code{BF=}' contains the Bayes factor of the equality constraints
#' against the unconstrained hypothesis.
#' \item The sixth column `\code{BF>}' contains the Bayes factor of the order constraints
#' against the unconstrained hypothesis.
#' \item The seventh column `\code{BF}' contains the Bayes factor of the constrained hypothesis
#' against the unconstrained hypothesis.
#' \item The eighth column `\code{BF=}' contains the posterior probabilities of the
#' constrained hypotheses.
#' }
#' \item prior: The prior probabilities of the constrained hypotheses in a confirmatory test.
#' \item hypotheses: The tested constrained hypotheses in a confirmatory test.
#' \item estimates: The unconstrained estimates.
#' \item model: The input model \code{x}.
#' \item call: The call of the \code{BF} function.
#' }
#' @details The function requires a fitted modeling object. Current analyses
#' that are supported: \code{\link[bain]{t_test}},
#' \code{\link[BFpack]{bartlett_test}},
#' \code{\link[stats]{aov}}, \code{\link[stats]{manova}},
#' \code{\link[stats]{lm}}, \code{mlm},
#' \code{\link[stats]{glm}}, \code{\link[polycor]{hetcor}},
#' \code{\link[lme4]{lmer}}, \code{\link[survival]{coxph}},
#' \code{\link[survival]{survreg}}, \code{\link[ergm]{ergm}},
#' \code{\link[Bergm]{bergm}},
#' \code{\link[pscl]{zeroinfl}}, \code{\link[metafor]{rma}} and \code{\link[MASS]{polr}}.
#'
#' For testing parameters from the results of t_test(), lm(), aov(),
#' manova(), and bartlett_test(), hypothesis testing is done using
#' adjusted fractional Bayes factors are computed (using minimal fractions).
#' For testing measures of association (e.g., correlations) via \code{cor_test()},
#' Bayes factors are computed using joint uniform priors under the correlation
#' matrices. For testing intraclass correlations (random intercept variances) via
#' \code{lmer()}, Bayes factors are computed using uniform priors for the intraclass
#' correlations. For all other tests, approximate adjusted fractional Bayes factors
#' (with minimal fractions) are computed using Gaussian approximations, similar as
#' a classical Wald test.
#'
#' @references Mulder, J., D.R. Williams, Gu, X., A. Tomarken,
#' F. Böing-Messing, J.A.O.C. Olsson-Collentine, Marlyne Meyerink, J. Menke,
#' J.-P. Fox, Y. Rosseel, E.J. Wagenmakers, H. Hoijtink., and van Lissa, C.
#' (2021). BFpack: Flexible Bayes Factor Testing of Scientific Theories
#' in R. Journal of Statistical Software. <DOI:10.18637/jss.v100.i18>
#' @examples
#' # EXAMPLE 1. One-sample t test
#' ttest1 <- t_test(therapeutic, mu = 5)
#' print(ttest1)
#' # confirmatory Bayesian one sample t test
#' BF1 <- BF(ttest1, hypothesis = "mu = 5")
#' summary(BF1)
#' # exploratory Bayesian one sample t test
#' BF(ttest1)
#'
#' # EXAMPLE 2. ANOVA
#' aov1 <- aov(price ~ anchor * motivation,data = tvprices)
#' BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow;
#' anchorrounded < motivationlow")
#' summary(BF1)
#'
#' # EXAMPLE 3. linear regression
#' lm1 <- lm(mpg ~ cyl + hp + wt, data = mtcars)
#' BF(lm1, hypothesis = "wt < cyl < hp = 0")
#'
#' # EXAMPLE 4. Logistic regression
#' fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
#' tattoos, family = binomial(), data = wilson)
#' BF1 <- BF(fit, hypothesis = "ztrust > zfWHR > 0;
#' ztrust > 0 & zfWHR = 0")
#' summary(BF1)
#'
#' # EXAMPLE 5. Correlation analysis
#' set.seed(123)
#' cor1 <- cor_test(memory[1:20,1:3])
#' BF1 <- BF(cor1)
#' summary(BF1)
#' BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0;
#' Wmn_with_Im = Wmn_with_Del = 0")
#' summary(BF2)
#'
#' # EXAMPLE 6. Bayes factor testing on a named vector
#' # A Poisson regression model is used to illustrate the computation
#' # of Bayes factors with a named vector as input
#' poisson1 <- glm(formula = breaks ~ wool + tension,
#' data = datasets::warpbreaks, family = poisson)
#' # extract estimates, error covariance matrix, and sample size:
#' estimates <- poisson1$coefficients
#' covmatrix <- vcov(poisson1)
#' samplesize <- nobs(poisson1)
#' # compute Bayes factors on equal/order constrained hypotheses on coefficients
#' BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis =
#' "woolB > tensionM > tensionH; woolB = tensionM = tensionH")
#' summary(BF1)
#' @rdname BF
#' @export
#' @useDynLib BFpack, .registration = TRUE
#'
BF <- function(x, hypothesis, prior.hyp, complement, ...) {
UseMethod("BF", x)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BF_methods.R
|
#' @method BF hetcor
#' @export
BF.hetcor <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
get_est <- get_estimates(x)
P <- nrow(x$std.errors)
numcorr <- P*(P-1)/2
estimates <- get_est$estimate
errcov <- get_est$Sigma[[1]]
# use Fisher transformed for both exploratory and confirmatory to get consistent results.
# skewness in the likelihood is ignored.
est.var.F <- do.call(cbind,lapply(1:numcorr,function(c){
draws.norm <- rnorm(1e5,mean=estimates[c],sd=sqrt(errcov[c,c]))
draws.norm.F <- FisherZ(draws.norm[draws.norm < 1 & draws.norm > -1])
return(c(median(draws.norm.F),var(draws.norm.F)))
}))
estimates.F <- est.var.F[1,]
if(numcorr > 1){
errcov.F <- diag(est.var.F[2,])
}else{
errcov.F <- as.matrix(est.var.F[2,])
}
corr_names <- names(get_est$estimate)
matrix_names <- matrix(corr_names,nrow=P)
names(estimates.F) <- colnames(errcov.F) <- row.names(errcov.F) <- corr_names
#exploratory BF testing
relfit <- matrix(c(dnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F))),
pnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F))),
1-pnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F)))),ncol=3)
# get draws from joint uniform prior to compute relative measures
if(sum(P==Fcor$P)==0){
numdraws <- round(1e7/(P*(P-1)/2))
drawsJU <- draw_ju_r(P,samsize=numdraws,Fisher=1)
approx_studt <- QRM::fit.st(c(drawsJU))$par.ests[c(1,3)]
}else{
approx_studt <- unlist(c(Fcor[which(P==Fcor$P),1:2]))
}
relcomp0 <- dt(0,df=approx_studt[1])/approx_studt[2] # all marginal priors are the same
relcomp <- matrix(c(rep(relcomp0,numcorr),rep(.5,numcorr*2)),ncol=3)
row.names(relfit) <- row.names(relcomp) <- names(estimates.F)
BFtu_exploratory <- relfit / relcomp
colnames(BFtu_exploratory) <- colnames(BFtu_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
PHP_exploratory <- BFtu_exploratory / apply(BFtu_exploratory,1,sum)
#confirmatory BF testing
if(!is.null(hypothesis)){
numG <- 1
numcorrgroup <- numcorr
varnames <- list(row.names(x$correlations))
# get all names combinations for correlations (similar as BF.cor_test)
corrnames <- lapply(1:numG,function(g){
matrix(unlist(lapply(1:P,function(p2){
unlist(lapply(1:P,function(p1){
if(numG==1){
paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2])
}else{
paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2],"_in_g",as.character(g))
}
}))
})),nrow=P)
})
x$corrnames <- corrnames
params_in_hyp1 <- params_in_hyp(hypothesis)
corr_names <- unlist(lapply(1:length(x$corrnames),function(g){
c(x$corrnames[[g]][lower.tri(x$corrnames[[g]])],
t(x$corrnames[[g]])[lower.tri(x$corrnames[[g]])])
})) #which includes Y1_with_Y2 and Y2_with_Y1
parse_hyp <- parse_hypothesis(corr_names,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
if(nrow(parse_hyp$hyp_mat)==1){
select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
parse_hyp$hyp_mat <-
t(as.matrix(c(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1])))
}else{
#combine equivalent correlations, e.g., cor(Y1,Y2)=corr(Y2,Y1).
select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup)
parse_hyp$hyp_mat <-
cbind(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1])
}
#create coefficient with equality and order constraints
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
numhyp <- length(RrE)
relfit <- t(matrix(unlist(lapply(1:numhyp,function(h){
Gaussian_measures(estimates,errcov,RrE1=RrE[[h]],RrO1=RrO[[h]],names1=names(estimates),
constraints1=parse_hyp$original_hypothesis[h])
})),nrow=2))
# approximate unconstrained Fisher transformed correlations with a multivariate Student t
mean0 <- rep(0,numcorr)
if(numcorr==1){
Scale0 <- as.matrix(approx_studt[2]**2)
df0 <- round(approx_studt[1])
}else{
Scale0 <- diag(rep(approx_studt[2]**2,numcorr))
df0 <- round(approx_studt[1])
}
mean0 <- rep(0,numcorr)
relcomp <- t(matrix(unlist(lapply(1:numhyp,function(h){
relcomp_h <- Student_measures(mean1=mean0,
Scale1=Scale0,
df1=df0,
RrE1=RrE[[h]],
RrO1=RrO[[h]])
return(relcomp_h)
})),nrow=2))
row.names(relcomp) <- parse_hyp$original_hypothesis
row.names(relfit) <- parse_hyp$original_hypothesis
# evaluation of complement hypothesis
if(complement == TRUE){
relfit <- Gaussian_prob_Hc(estimates.F,errcov.F,relfit,RrO)
relcomp <- Student_prob_Hc(mean1=mean0,scale1=Scale0,df1=df0,relmeas1=relcomp,constraints=NULL,RrO1=RrO)
}
hypothesisshort <- unlist(lapply(1:nrow(relfit),function(h) paste0("H",as.character(h))))
row.names(relfit) <- row.names(relfit) <- hypothesisshort
colnames(relcomp) <- c("c_E","c_O")
colnames(relfit) <- c("f_E","f_O")
# computation of exploratory BFs and PHPs
# the BF for the complement hypothesis vs Hu needs to be computed.
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
names(priorprobs) <- names(BFtu_confirmatory)
PHP_confirmatory <- BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs)
BFtable <- cbind(relcomp,relfit,relfit[,1]/relcomp[,1],relfit[,2]/relcomp[,2],
apply(relfit,1,prod)/apply(relcomp,1,prod),PHP_confirmatory)
row.names(BFtable) <- names(BFtu_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory)
hypotheses <- row.names(relcomp)
}else{
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
}
# Store in output
BF_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=estimates,
model=x,
bayesfactor="Bayes factors based on joint uniform priors",
parameter="Correlations",
call=match.call())
class(BF_out) <- "BF"
return(BF_out)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFhetcor.R
|
### Joris Mulder 2019. Bayes factor testing of multiple random intercept models
### via multiple lmer-objects based on Mulder & Fox (2013, 2019).
### with extension to unbalanced data
#' @importFrom stats rgamma rnorm rbeta dbeta runif
#' @importFrom lme4 getME VarCorr
#' @method BF lmerMod
#' @export
BF.lmerMod <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
numcat <- length(x@cnms)
namescat <- unlist(lapply(1:numcat,function(ca){
x@cnms[[ca]]
}))
if(numcat>1){
iccnames <- unlist(lapply(1:numcat,function(nc){namescat[nc]}))
}else{ iccnames <- "icc" }
#check if names of effects in hypothesis argument match
if(!is.null(hypothesis)){ # confirmatory test with constrained hypotheses on icc's.
test1 <- parse_hypothesis(iccnames,hypothesis)
}
# check if the lmer-model only has a random intercept or category specific random intercepts
Zstack <- Reduce(cbind,getME(x,"mmList"))
if(numcat>1){ #check if the random effects are category specific random intercepts
for(ca in 1:numcat){
freq_ca <- table(Zstack[,ca])
if(sum(abs(sort(as.integer(names(freq_ca))) - c(0,1))) !=0){
stop("only models with a single random intercept or category specific random intercepts are currently supported when testing icc's.")
}
}
}else{
freq_ca <- table(getME(x,"mmList")[[1]])
if(as.integer(names(freq_ca))!=1 || length(as.integer(names(freq_ca)))!=1){
stop("only models with a single random intercept or category specific random intercepts are currently supported when testing icc's.")
}
}
# sort data per cluster
clusterindex <- x@flist[[1]]
if(length(table(table(clusterindex)))>1){
balanced <- FALSE
pvec <- table(clusterindex)
}else{
balanced <- TRUE
pvec <- rep(table(clusterindex)[1],length(table(clusterindex)))
}
levels(clusterindex) <- 1:length(levels(clusterindex))
reorder1 <- order(as.integer(clusterindex))
nclusters <- length(levels(clusterindex)) #total number of groups/clusters
ystack <- getME(x,"y")[reorder1]
Xstack <- as.matrix(getME(x,"X")[reorder1,])
Zstack <- Zstack[reorder1,]
# next sort data per category
if(numcat>1){
# sort rows per category
firstofcluster <- cumsum(c(0,pvec[1:(nclusters-1)])) + 1
catassign <- unlist(lapply(firstofcluster,function(cluster){
ifelse(sum(Zstack[cluster,1:numcat])==1,
which(Zstack[cluster,1:numcat]==1),
0)
}))
if(sum(names(table(catassign))=="0")==0){ #all groups belong to a category
ngroups <- table(catassign)
numgroups <- sum(ngroups)
names(ngroups) <- NULL
#reorder data matrices according to categories
reorder2 <- unlist(lapply(1:numcat,function(ca){
welk_ca <- which(catassign==ca)
names(welk_ca) <- NULL
unlist(lapply(welk_ca,function(cluster){
firstofcluster[cluster]+0:(pvec[cluster]-1)
}))
}))
#update order of group sizes
pvec <- pvec[unlist(lapply(1:numcat,function(ca){
welk_ca <- which(catassign==ca)
}))]
Xstack <- Xstack[reorder2,]
if(is.null(dim(Xstack))){ #then create column matrix from vector
Xstack <- t(t(Xstack))
}
ystack <- ystack[reorder2]
Zstack <- Zstack[reorder2,]
}else{ #only include groups that belong to a category
stop("Some groups don't belong to a group category. Exclude these groups from the data.")
}
}else{
ngroups <- numgroups <- nclusters
}
#transform data matrices with Helmert matrix
firstofcluster0 <- c(0,cumsum(pvec[1:(numgroups-1)]))
zWstack <- do.call(rbind,
lapply(1:length(pvec),function(j){
H_j <- Helmert(pvec[j])
H_j%*%cbind(ystack[firstofcluster0[j]+1:pvec[j]],
as.matrix(Xstack[firstofcluster0[j]+1:pvec[j],]) )
})
)
#extract ML estimates for rho
tau2ML <- unlist(VarCorr(x))
sigma2ML <- attr(VarCorr(x),"sc")**2
rhoML <- tau2ML/(tau2ML+sigma2ML)
shape0 <- c(1,1) # set uniform priors for icc's
cat("First, unconstrained icc analysis...")
cat("\n")
cat("\n")
numuncdraws <- 5e4
marglike_Hu <- MargLikeICC_Hq(rhoML,zW=zWstack,ngroups,pvec,samsize1=numuncdraws,
samsize2=4e4,unique1=1:numcat)
postestimates <- marglike_Hu[[4]]
colnames(postestimates) <- iccnames
cat("Second, exploratory testing of icc's...")
cat("\n")
BFtu_exploratory_icc <- t(matrix(unlist(lapply(1:numcat,function(nc){
cat(paste0(iccnames[nc],"; "))
marglike_explo <- rep(0,3)
if(numcat>1){
unique_c <- rep(1,numcat)
unique_c[nc] <- 0
unique_c[-nc] <- 1:(numcat-1)
}else {
unique_c <- 0
}
# zero icc
marglike_explo[1] <- MargLikeICC_Hq(rhoML,zWstack,ngroups,pvec,unique1=unique_c)[[1]]
# positive icc
marglike_positive <- marglike_Hu[[1]] + log(marglike_Hu$postprobpositive[nc]) -
log(marglike_Hu$priorprobpositive[nc])
marglike_explo[3] <- marglike_positive
# negative icc
marglike_negative <- marglike_Hu[[1]] + log(1-marglike_Hu$postprobpositive[nc]) -
log(1-marglike_Hu$priorprobpositive[nc])
marglike_explo[2] <- marglike_negative
return(exp(marglike_explo - marglike_Hu[[1]]))
})),nrow=3))
colnames(BFtu_exploratory_icc) <- c("icc=0","icc<0","icc>0")
row.names(BFtu_exploratory_icc) <- iccnames
PHP_exploratory_icc <- round(BFtu_exploratory_icc / apply(BFtu_exploratory_icc,1,sum),3)
priorprobs <- rep(1,3)/3 #prior probs for exploratory tests
cat("\n")
cat("\n")
if(!is.null(hypothesis)){ # confirmatory test with constrained hypotheses on icc's.
cat("Third, confirmatory testing of icc's...")
cat("\n")
parse_hyp <- parse_hypothesis(iccnames,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
# check if icc's are only tested against each other or against zero
numhyp <- length(RrE)
for(h in 1:numhyp){
if(!is.null(RrE[[h]])){
for(r in 1:nrow(RrE[[h]])){
row1 <- RrE[[h]][r,]
if( !(sum(abs(row1))==1 || sum(row1)==0) ){
stop("icc's can only be compared with each other or to zero.")
}
}
}
if(!is.null(RrO[[h]])){
for(r in 1:nrow(RrO[[h]])){
freq1 <- table(sort(RrO[[h]][r,]))
row1 <- RrO[[h]][r,]
if( !(sum(abs(row1))==1 || sum(row1)==0) ){
stop("icc's can only be compared with each other or to zero.")
}
}
}
}
output_marglike_icc <- t(matrix(unlist(lapply(1:numhyp, function(h){
cat(paste0(parse_hyp$original_hypothesis[h],"; "))
cat("\n")
# get unconstrained prior draws, if needed for computing prior probabilities
# in case of only order constraints
pcat <- rep(1:length(ngroups),times=ngroups)
unique1 <- 1:numcat
LB <- unlist(unlist(lapply(1:numcat,function(c){
-1/(max(pvec[pcat==c])-1)
})))
priordraws <- matrix(unlist(lapply(1:numcat,function(c){
rbeta(numuncdraws,1,1) * (1 - LB[c]) + LB[c]
})),ncol=numcat)
# code equal icc's with same integer for marglike2_Hq function
unique_h <- 1:numcat
if(!is.null(RrE[[h]])){
unique_h <- rep(NA,numcat)
zeroMat <- RrE[[h]][which(apply(RrE[[h]],1,sum)==1),]
if(!is.matrix(zeroMat)){
zeroMat <- matrix(zeroMat,nrow=1)
}
unique_h[which(apply(zeroMat,2,sum)==1)] <- 0
teller <- 0
for(row1 in which(apply(RrE[[h]],1,sum)==0)){
welk1 <- which(RrE[[h]][row1,]!=0)
isna_h <- is.na(unique_h[welk1])
if(sum(isna_h)==2){
teller <- teller + 1
unique_h[welk1] <- teller
}else{ #one is already assigned a unique code
unique_h[welk1] <- unique_h[welk1[!isna_h]]
#
}
}
if(sum(is.na(unique_h))>0){ #unconstrained icc's receive unique code
unique_h[is.na(unique_h)] <- teller + 1:sum(is.na(unique_h))
teller <- teller + sum(is.na(unique_h))
}
}
if(!is.null(RrO[[h]])){
unicum <- unique(unique_h[unique_h!=0])
inequalities_h <- matrix(0,nrow=nrow(RrO[[h]]),ncol=max(unicum)+1)
for(u in sort(unicum)){
welk <- which(unique_h == u)
if(length(welk) > 1){
if(nrow(RrO[[h]]) > 1){
inequalities_h[,u] <- apply(RrO[[h]][,which(unique_h == u)],1,sum)
}else{
inequalities_h[,u] <- apply(t(RrO[[h]][,which(unique_h == u)]),1,sum)
}
}else{ #length is 1
inequalities_h[,u] <- RrO[[h]][,which(unique_h == u)]
}
}
} else inequalities_h = 0
if(is.null(RrE[[h]])){ #only order constraints; use output from unconstrained analysis
priorprob_h <- mean(apply(cbind(priordraws,rep(-1,length(numuncdraws)))%*%t(inequalities_h)>0,1,prod))
postprob_h <- mean(apply(cbind(marglike_Hu$postdraws,rep(-1,length(numuncdraws)))%*%
t(inequalities_h)>0,1,prod))
marglike2_h <- list(marglike_Hu[[1]],postprob_h,priorprob_h)
}else{
marglike2_h <- MargLikeICC_Hq(rhoML,zW=zWstack,ngroups,pvec,unique1=unique_h,
inequalities=inequalities_h)[1:3]
}
return(c(unlist(marglike2_h),ifelse(is.null(RrE[[h]]),1,0)))
})),nrow=4))
if(complement == TRUE){
#compute BF for complement hypothesis
if(sum(output_marglike_icc[,4])==0){ #the complement is equivalent to the unconstrained model
output_marglike_icc <- rbind(output_marglike_icc,c(unlist(marglike_Hu)[1:3],1))
} else { #the complement is the complement of the joint of the order hypotheses
which_order <- which(output_marglike_icc[,4]==1)
if(length(which_order)==1){
probs <- output_marglike_icc[which_order,2:3]
marglike_Hc <- marglike_Hu[[1]] #+ log(1-probs[1]) - log(1-probs[2])
output_marglike_icc <- rbind(output_marglike_icc,c(marglike_Hc,1-probs[1],1-probs[2],1))
}else {
probs <- apply(output_marglike_icc[which_order,2:3],2,sum)
marglike_Hc <- marglike_Hu[[1]] #+ log(1-probs[1]) - log(1-probs[2])
output_marglike_icc <- rbind(output_marglike_icc,c(marglike_Hc,1-probs[1],1-probs[2],1))
}
}
row.names(output_marglike_icc) <- c(parse_hyp$original_hypothesis,"complement")
}else{
row.names(output_marglike_icc) <- c(parse_hyp$original_hypothesis)
}
relcomp <- matrix(c(rep(NA,nrow(output_marglike_icc)),output_marglike_icc[,3]),ncol=2)
relfit <- matrix(c(rep(NA,nrow(output_marglike_icc)),output_marglike_icc[,2]),ncol=2)
#compute log marginal likelihood for H* without order constraints
BF_E <- exp(output_marglike_icc[,1] - marglike_Hu[[1]])
output_marglike_icc <- cbind(output_marglike_icc,output_marglike_icc[,1] + log(output_marglike_icc[,2]) -
log(output_marglike_icc[,3]))
BFtu_confirmatory_icc <- exp(output_marglike_icc[,5] - marglike_Hu[[1]])
#compute BFmatrix and PHPs
logBFmatrix <- matrix(rep(output_marglike_icc[,5],numhyp+complement),nrow=numhyp+complement) -
matrix(rep(output_marglike_icc[,5],each=numhyp+complement),nrow=numhyp+complement)
diag(logBFmatrix) <- 0
if(complement == TRUE){
row.names(logBFmatrix) <- colnames(logBFmatrix) <- c(parse_hyp$original_hypothesis,"complement")
}else{
row.names(logBFmatrix) <- colnames(logBFmatrix) <- c(parse_hyp$original_hypothesis)
}
BFmatrix_confirmatory_icc <- round(exp(logBFmatrix),3)
diag(BFmatrix_confirmatory_icc) <- 1
BFta_confirmatory_icc <- exp(output_marglike_icc[,5] - max(output_marglike_icc[,5]))
# Change prior probs in case of default setting
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory_icc),length(BFtu_confirmatory_icc))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory_icc)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory_icc)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory_icc),length(BFtu_confirmatory_icc))
}else{
priorprobs <- prior.hyp
}
}
PHP_confirmatory_icc <- priorprobs*BFta_confirmatory_icc / sum(priorprobs*BFta_confirmatory_icc)
BFtable <- cbind(relcomp,relfit,BF_E,relfit[,2]/relcomp[,2],
BF_E*relfit[,2]/relcomp[,2],PHP_confirmatory_icc)
row.names(BFtable) <- names(PHP_confirmatory_icc)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
hypotheses <- names(BFta_confirmatory_icc)
}else{
BFmatrix_confirmatory_icc <- PHP_confirmatory_icc <- BFtu_confirmatory_icc <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
}
BFlm_out <- list(
BFtu_exploratory=BFtu_exploratory_icc,
PHP_exploratory=PHP_exploratory_icc,
BFtu_confirmatory=BFtu_confirmatory_icc,
PHP_confirmatory=PHP_confirmatory_icc,
BFmatrix_confirmatory=BFmatrix_confirmatory_icc,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=postestimates,
model=x,
bayesfactor="Bayes factors based on uniform priors",
parameter="intraclass correlations",
call=match.call())
class(BFlm_out) <- "BF"
return(BFlm_out)
}
int_lhood <- function(rhoS,ngroups,pvec,N,K,Wmat1,zvec1,tWW2,tWz2,tzz2){
#the log integrated likelihood
diagDi <- c( (1-rep(rhoS,times=ngroups))/(1+(pvec-1)*rep(rhoS,times=ngroups)) )
tWDiW1 <- t(Wmat1)%*%(diagDi*Wmat1)
tWDiz1 <- t(Wmat1)%*%(diagDi*zvec1)
tzDiz1 <- sum(zvec1**2*diagDi)
if(K > 0){
s2 <- tzDiz1 + tzz2 - c(t(tWDiz1+tWz2)%*%solve(tWDiW1+tWW2)%*%(tWDiz1+tWz2) )
return(
.5*sum(log(diagDi)) - .5*log(det(tWDiW1+tWW2)) - (N-K)/2*log(s2)
)
}else{
s2 <- tzDiz1 + tzz2
return(
.5*sum(log(diagDi)) - (N-K)/2*log(s2)
)
}
}
#' @importFrom stats rnorm rbeta dbeta
MargLikeICC_Hq <- function(rhoML,zW,ngroups,pvec,samsize1=5e4,samsize2=5e4,
unique1,inequalities=0,complement=FALSE){
#E.g., for categories=5, unique1=c(0,2,0,1,1),inequalities=[1 -1 0],
#the hypothesis equals, Hq:rho1=rho3=0, rho4=rho5>rho2
#samsize1 sets the sample size for the number of draws from the proposal distribution for the importance
#sample estimate of the marginal likelihood excluding the inequality constraints.
#samsize2 sets the sample size for computing the probability that the inequality constraints hold,
#and for sampling from the proposal distribution. to get the IS estimate.
numcat <- length(ngroups)
N <- sum(pvec)
zvec <- zW[,1]
Wmat <- as.matrix(zW[,-1])
K <- ncol(Wmat)
numgroups <- sum(ngroups)
firstofcluster <- c(0,cumsum(pvec[1:(numgroups-1)]))+1
restcluster <- (1:N)[-firstofcluster]
zvec1 <- zvec[firstofcluster]
Wmat1 <- as.matrix(Wmat[firstofcluster,])
zvec2 <- zvec[restcluster]
Wmat2 <- as.matrix(Wmat[restcluster,])
tWW2 <- t(Wmat2)%*%Wmat2
tWz2 <- t(Wmat2)%*%zvec2
tzz2 <- sum(zvec2**2)
if(sum(unique1)==0){ #null model with no random effects
marglikeHstar <- int_lhood(rhoS=rep(0,length=numcat),ngroups,pvec,N,K,Wmat1,zvec1,tWW2,tWz2,tzz2) +
lgamma((N-K)/2) - (N-K)/2*log(pi)
postprobpositive <- priorprobpositive <- postestimates <- postdraws <- drawsMat <- NULL
priorprob <- postprob <- 1
}else{
numHrho <- max(unique1)
transMatrix <- matrix(0,nrow=numcat,ncol=numHrho)
for(cc in 1:numHrho){
transMatrix[which(unique1==cc),cc] <- 1
}
#minimal value for rho under H (without order constraints)
pcat <- rep(1:length(ngroups),times=ngroups)
LB <- unlist(unlist(lapply(1:numHrho,function(c){
welk <- which(unique1==c)
-1/(max(pvec[unlist(lapply(1:length(welk),function(j){
which(pcat==welk[j])
}))])-1)
})))
#initial value rho
rhoH0ML <- unlist(lapply(1:numHrho,function(c){
mean(rhoML[which(unique1==c)])
}))
rhoH <- rhoH0ML
rhoS <- c(transMatrix%*%rhoH)
RWsd <- rep(.1,numHrho)
#initial unstandardized posterior (with uniform prior on rhoH)
unpost <- int_lhood(rhoS,ngroups,pvec,N,K,Wmat1,zvec1,tWW2,tWz2,tzz2)
#start sampling rho
check1 <- 300 #check every 'check1' draws whether random walk sd needs to be increased/decreased
acceptMat <- drawsMat <- matrix(0,nrow=samsize1,ncol=numHrho)
#wer <- Sys.time()
for(s in 1:samsize1){
#sampling unique rho's under H
for(j in 1:numHrho){
#random walks
rhoH_can <- rhoH
rhoH_can[j] <- rnorm(1,mean=rhoH[j],sd=RWsd[j])
if(rhoH_can[j]>LB[j] && rhoH_can[j]<1){
#MH acceptance
welk <- which(unique1==j)
rhoS_can <- c(transMatrix%*%rhoH_can)
unpost_can <- int_lhood(rhoS_can,ngroups,pvec,N,K,Wmat1,zvec1,tWW2,tWz2,tzz2)
MHprob <- exp(unpost_can - unpost)
if(runif(1) < MHprob){
rhoH <- rhoH_can
rhoS <- rhoS_can
unpost <- unpost_can
acceptMat[s,j] <- 1
}
}
}
drawsMat[s,] <- rhoH
if(ceiling(s/check1)==s/check1){ #adjust sd of random walk based on acceptance proportions of last 'check1' draws
probs <- apply(as.matrix(acceptMat[(s-check1+1):s,]),2,mean)
upper1 <- .5
lower1 <- .15
RWsd[probs>upper1] <- RWsd[probs>upper1] * ( (probs[probs>upper1]-upper1)/(1-upper1) + 1)
RWsd[probs<lower1] <- RWsd[probs<lower1] * 1/( 2 - (probs[probs<lower1])/lower1 )
}
}
#Sys.time() - wer
discard <- .2
drawsMat <- as.matrix(drawsMat[(discard*samsize1+1):samsize1,]) #discard with 20% for burn-in
samsize1 <- nrow(drawsMat)
# get a tailored importance sampling estimate
meanICC <- apply(drawsMat,2,mean)
varICC <- apply(drawsMat,2,var)
shape1IM <- - ((LB-meanICC)*((meanICC-1)*meanICC-meanICC*LB+LB+varICC)/((LB-1)*varICC))
shape2IM <- - shape1IM*(meanICC-1)/(meanICC-LB)
postestimates <- rbind(t(apply(drawsMat,2,mean)),
t(apply(drawsMat,2,median)),
apply(drawsMat,2,quantile,probs=c(.025,.975)))
row.names(postestimates)[c(1,2)] <- c("mean","median")
iccnames <- unlist(lapply(1:ncol(drawsMat),function(nc){paste0("icc",as.character(nc))}))
colnames(postestimates) <- iccnames
#compute posterior probs of positive icc of the free parameters
postprobpositive <- apply(drawsMat>0,2,mean)
priorprobpositive <- 1/(1 - LB)
if(is.matrix(inequalities)){
#compute prior probability of order constraints
priordraws <- matrix(unlist(lapply(1:numHrho,function(cc){
rbeta(samsize1,shape1=1,shape2=1)*(1-LB[cc])+LB[cc]
})),ncol=numHrho)
priorprob <- mean(apply(cbind(priordraws,rep(-1,samsize1))%*%t(inequalities) > 0,1,prod) )
priorprob <- priorprob * (1-complement) + (1 - priorprob) * complement
remove(priordraws)
#compute posterior probability of order constraints
postprob <- mean(apply(cbind(drawsMat,rep(-1,samsize1))%*%t(inequalities) > 0,1,prod) )
postprob <- postprob * (1-complement) + (1 - postprob) * complement
}else{
priorprob <- postprob <- 1
}
#wer <- Sys.time()
factor1 <- .6
shape1IM <- shape1IM * factor1
shape2IM <- shape2IM * factor1
ISdraws <- matrix(unlist(lapply(1:numHrho,function(c){
(rbeta(samsize2,shape1IM[c],shape2IM[c]) * (1 - LB[c]) + LB[c] ) * .99999
})),ncol=numHrho)
logintegrands <- unlist(lapply(1:samsize2,function(s){
int_lhood(rhoS=c(transMatrix%*%ISdraws[s,]),ngroups,pvec,N,K,Wmat1,zvec1,tWW2,tWz2,tzz2) +
sum(log(1/(1-LB))) -
sum(dbeta((ISdraws[s,]-LB)/(1-LB),shape1=shape1IM,shape2=shape2IM,log=TRUE) +
log(1/(1-LB)) )
}))
#marginal likelihood for H*, which excludes the order constraints probabilities
marglikeHstar <- log(mean(exp(logintegrands - max(logintegrands)))) +
max(logintegrands) + lgamma((N-K)/2) - (N-K)/2*log(pi)
}
return(list(marglike=marglikeHstar,postprob=postprob,priorprob=priorprob,
postestimates=postestimates,postprobpositive=postprobpositive,
priorprobpositive=priorprobpositive,postdraws=drawsMat))
}
Helmert = function(p){
Helm <- diag(p)
Helm[1,] <- 1/sqrt(p)
for(pp in 2:p){
Helm[pp,1:(pp-1)] <- 1/sqrt(pp*(pp-1))
Helm[pp,pp] <- -(pp-1)/sqrt(pp*(pp-1))
}
return(Helm)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFicc.R
|
##############################################################################
##### BF testing for meta-analysis based on Van Aert & Mulder (in prep.) #####
##### via the metafor package #####
##############################################################################
#' @importFrom extraDistr rtnorm
#' @method BF rma.uni
#' @export
BF.rma.uni <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...){
# x should be of class rma.uni
if(class(x)[1]!="rma.uni"){
stop("Only objects of class 'rma.uni' are currently supported.")
}
if(!is.null(x$formula.mods)){
stop("Only an object without moderators is currently supported.")
}
if(!is.null(hypothesis)){
message("Note that confirmatory testing via the 'hypothesis' argument is currently not supported for object of class 'rma.uni'.")
}
### Extract effect sizes and sampling variances from the metafor object
yi <- x$yi
vi <- x$vi
wi <- 1/vi # Weights equal-effects model
typ_vi <- sum(wi*(length(wi)-1))/(sum(wi)^2 - sum(wi^2)) # Typical within-study sampling variance
### Robbie: If statement for methods that are not "EE" or "FE"
if (x$method != "EE" & x$method != "FE")
{
### Minimal rho (add 0.001 to make sure that marginal likelihood and likelihood
# can be evaluated)
rho_min <- (-min(vi)+0.001)/(-min(vi)+0.001+typ_vi)
### Compute likelihood of model with unconstrained delta and rho = 0
logmu0 <- marg_lik(yi = yi, vi = vi, rho = 0, rho_min = rho_min,
typ_vi = typ_vi)
### Compute likelihood of model with delta = 0 and rho unconstrained
logm0u <- get_condpost_rho(yi = yi, vi = vi, rho_min = rho_min,
typ_vi = typ_vi, start_rho = x$I2/100)$logm0u
### Compute marginal likelihood of model with unconstrained delta and unconstrained rho
### Compute posterior probability of rho > 0 and rho < 0, delta unconstrained
post_rho <- get_post_rho(yi = yi, vi = vi, rho_min = rho_min, typ_vi = typ_vi,
start_rho = x$I2/100)
logmuu <- post_rho$logmuu
prior_rho <- 1/(abs(rho_min) + 1)
logmul <- logmuu + log(post_rho$post_rho_l/prior_rho) # Likelihood of model delta unconstrained and rho > 0
logmus <- logmuu + log(post_rho$post_rho_s/(1-prior_rho)) # Likelihood of model delta unconstrained and rho < 0
#get unconstrained estimates
rhodraws <- post_rho$rhodraws
rhostats <- c(mean(rhodraws),median(rhodraws),quantile(rhodraws,.025),quantile(rhodraws,.975))
tau2draws <- rhodraws/(1-rhodraws)*typ_vi # Compute tau2 based on generated I^2-statistic
mean_prior_delta <- 0
sd_prior_delta <- length(yi)/sum(1/(vi+mean(tau2draws)))
mean_delta <- unlist(lapply(1:length(tau2draws), function(i){
(mean_prior_delta/sd_prior_delta^2+sum(yi/(vi+tau2draws[i])))/
(1/sd_prior_delta^2+sum(1/(vi+tau2draws[i])))
}))
sd_delta <- unlist(lapply(1:length(tau2draws), function(i){
1/sqrt(1/sd_prior_delta^2+sum(1/(vi+tau2draws[i])))
}))
deltadraws <- rnorm(length(rhodraws),mean=mean_delta,sd=sd_delta)
deltastats <- c(mean(deltadraws),median(deltadraws),quantile(deltadraws,.025),quantile(deltadraws,.975))
uncestimates <- t(matrix(c(rhostats,deltastats),ncol=2))
row.names(uncestimates) <- c("I^2","mu")
colnames(uncestimates) <- c("mean","median","2.5%","97.5%")
### Compute posterior probability of mu > 0 and mu < 0
postdeltapositive <- mean(deltadraws>0)
logmlu <- logmuu + log(postdeltapositive/.5)
logmsu <- logmuu + log((1-postdeltapositive)/.5)
### Compute Bayes factors model vs. unconstrained mu and I^2
BF0rhoUnc <- exp(logmu0 - logmuu)
BF1rhoUnc <- exp(logmus - logmuu)
BF2rhoUnc <- exp(logmul - logmuu)
BF0deltaUnc <- exp(logm0u - logmuu)
BF1deltaUnc <- exp(logmsu - logmuu)
BF2deltaUnc <- exp(logmlu - logmuu)
BFtu_exploratory <- matrix(c(BF0rhoUnc,BF0deltaUnc,BF1rhoUnc,BF1deltaUnc,BF2rhoUnc,BF2deltaUnc),nrow=2)
PHP_exploratory <- BFtu_exploratory / apply(BFtu_exploratory,1,sum)
colnames(BFtu_exploratory) <- colnames(PHP_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
row.names(BFtu_exploratory) <- row.names(PHP_exploratory) <- c("I^2","mu")
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- BFtable <-
priorprobs <- hypotheses <- estimates <- NULL
# BF_delta <- diag(rep(1, 3))
# colnames(BF_delta) <- rownames(BF_delta) <- c("H1", "H2", "H3")
# BF_delta[1,2] <- exp(logm0u - logmsu)
# BF_delta[2,1] <- exp(logmsu - logm0u)
# BF_delta[1,3] <- exp(logm0u - logmlu)
# BF_delta[3,1] <- exp(logmlu - logm0u)
# BF_delta[2,3] <- exp(logmsu - logmlu)
# BF_delta[3,2] <- exp(logmlu - logmsu)
# BF_rho <- diag(rep(1, 2))
# colnames(BF_rho) <- rownames(BF_rho) <- c("H1", "H2")
# BF_rho[1,2] <- exp(logmus - logmul)
# BF_rho[2,1] <- exp(logmul - logmus)
# sum_BF_rho <- BF1rhoUnc+BF2rhoUnc
# PP_rho <- matrix(c(BF1rhoUnc/sum_BF_rho, BF2rhoUnc/sum_BF_rho), nrow = 1)
# colnames(PP_rho) <- c("Pr(<0)", "Pr(>0)")
##############################################################################
### Robbie: equal-effect and fixed-effect model
} else if (x$method == "EE" | x$method == "FE")
{
### Compute likelihood of model with delta = 0 and rho = 0
logmu <- lik(yi = yi, vi = vi, delta = 0, rho = 0, rho_min = 0, typ_vi = typ_vi)
### Compute likelihood of model with unconstrained delta and rho = 0
logmu0 <- marg_lik(yi = yi, vi = vi, rho = 0, rho_min = 0,
typ_vi = typ_vi)
mean_prior_delta <- 0
sd_prior_delta <- length(yi)/sum(1/vi)
mean_delta <- (mean_prior_delta/sd_prior_delta^2+sum(yi/vi))/
(1/sd_prior_delta^2+sum(1/vi))
sd_delta <- 1/sqrt(1/sd_prior_delta^2+sum(1/vi))
deltadraws <- rnorm(20000,mean=mean_delta,sd=sd_delta)
deltastats <- c(mean(deltadraws),median(deltadraws),quantile(deltadraws,.025),
quantile(deltadraws,.975))
uncestimates <- t(matrix(deltastats,ncol=1))
row.names(uncestimates) <- "mu"
colnames(uncestimates) <- c("mean","median","2.5%","97.5%")
### Compute posterior probability of mu > 0 and mu < 0
postdeltapositive <- mean(deltadraws>0)
logml <- logmu0 + log(postdeltapositive/.5)
logms <- logmu0 + log((1-postdeltapositive)/.5)
### Compute Bayes factors model vs. unconstrained mu
BF0delta <- exp(logmu - logmu0)
BF1delta <- exp(logms - logmu0)
BF2delta <- exp(logml - logmu0)
BFtu_exploratory <- matrix(c(BF0delta,BF1delta,BF2delta),nrow=1)
PHP_exploratory <- BFtu_exploratory / apply(BFtu_exploratory,1,sum)
colnames(BFtu_exploratory) <- colnames(PHP_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
row.names(BFtu_exploratory) <- row.names(PHP_exploratory) <- "mu"
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- BFtable <-
priorprobs <- hypotheses <- estimates <- NULL
}
############################
BF_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=uncestimates,
model=x,
bayesfactor="Bayes factor using uniform prior for icc & unit information prior for effect",
parameter="between-study heterogeneity & effect size",
call=match.call()
#rhodraws = rhodraws,
#deltadraws = deltadraws,
#BF_delta = BF_delta,
#BF_rho = BF_rho,
#PP_rho = PP_rho
)
class(BF_out) <- "BF"
BF_out
}
# rm(list = ls())
#################
### FUNCTIONS ###
#################
### Likelihood where delta is integrated out. JORIS: I made some changes in this to make it more efficient.
marg_lik <- function(yi, vi, rho, rho_min, typ_vi)
{
tau2 <- rho/(1-rho)*typ_vi # Compute tau2 based on rho
wi_star <- 1/(vi+tau2) # Weights random-effects model
delta_hat <- sum(wi_star*yi)/sum(wi_star) # Estimate of delta
k <- length(yi) # Number of studies in meta-analysis
diagSigma <- vi+tau2
diagSigmaInv <- 1/diagSigma
out <- -k/2*log(2*pi) +
-0.5*sum(log(vi+tau2)) +
-0.5*sum((yi-delta_hat)^2 * diagSigmaInv) +
-0.5*log(k/sum(diagSigmaInv)) +
-log(1-rho_min) +
-0.5*sum(diagSigmaInv)*(delta_hat^2-delta_hat^2/(1+1/k)) +
-0.5*log(sum(diagSigmaInv)) +
-0.5*log(1+1/k)
return(out)
}
### Likelihood
lik <- function(yi, vi, delta, rho, rho_min, typ_vi)
{
k <- length(yi) # Number of studies in meta-analysis
tau2 <- rho/(1-rho)*typ_vi # Compute tau2 based on rho
out <- -(k+1)/2*log(2*pi) +
-0.5*sum(log(prod(vi+tau2))) +
-0.5*sum((yi-delta)^2/(vi+tau2)) +
-log(1-rho_min) +
-0.5*log(k/sum((vi+tau2)^(-1))) +
-0.5*delta^2/(k/sum((vi+tau2)^(-1)))
return(out)
}
### General method-of-moments estimate (Eq. 6 in DerSimonian and Kacker, 2007)
MM <- function(yi, vi, ai)
{
yw <- sum(ai*yi)/sum(ai)
tau2_hat <- (sum(ai*(yi-yw)^2)-(sum(ai*vi)-sum(ai^2*vi)/sum(ai)))/(sum(ai)-sum(ai^2)/sum(ai))
return(tau2_hat)
}
get_post_rho <- function(yi, vi, rho_min, typ_vi, start_rho, iters = 20000)
{
rho_s <- numeric(iters) # Empty object for storing results
check1 <- 100
#burn in
itersBI <- 5000 # length of burn-in
rhoBI <- start_rho # some starting value for rho...
sdstep <- .1
acceptMat <- rep(0, length = iters + itersBI)
rhoBIseq <- rep(0, length = itersBI)
sdstepseq <- rep(0, length = (iters + itersBI) / check1)
sdsteptel <- 1
upper1 <- .5 # define region where the sdstep does not have to be changed
lower1 <- .15
#start burn-in
for(i in 1:itersBI){
#draw candidate from truncated normal
rho_star <- rtnorm(1, mean = rhoBI, sd = sdstep, a = rho_min, b = 1)
#evaluate Metropolis-Hastings acceptance probability
R_MH <- exp( marg_lik(yi = yi, vi = vi, rho = rho_star,
rho_min = rho_min, typ_vi = typ_vi) -
marg_lik(yi = yi, vi = vi, rho = rhoBI,
rho_min = rho_min, typ_vi = typ_vi) ) *
(pnorm(1,mean=rhoBI,sd=sdstep) - pnorm(rho_min,mean=rhoBI,sd=sdstep)) /
(pnorm(1,mean=rho_star,sd=sdstep) - pnorm(rho_min,mean=rho_star,sd=sdstep))
rhoBI <- ifelse(runif(1) < R_MH, rho_star, rhoBI)
acceptMat[i] <- rho_star == rhoBI
rhoBIseq[i] <- rhoBI
#if needed update random walk sd depending on acceptance rate
if(ceiling(i/check1)==i/check1){
probs <- mean(acceptMat[(i-check1+1):i])
if(probs>upper1){
sdstep <- sdstep * ( (probs-upper1)/(1-upper1) + 1)
}else if(probs < lower1){
sdstep <- sdstep * 1 / ( 2 - probs/lower1 )
}
sdstep <- ifelse(sdstep>1,1,sdstep)
sdstepseq[sdsteptel] <- sdstep
sdsteptel <- sdsteptel + 1
}
}
#now actual drawing
rho_s[1] <- rhoBI
for(i in 2:iters){
#draw candidate from truncated normal
rho_star <- rtnorm(1, mean = rho_s[i-1], sd = sdstep, a = rho_min, b = 1)
#evaluate Metropolis-Hastings acceptance probability
R_MH <- exp( marg_lik(yi = yi, vi = vi, rho = rho_star,
rho_min = rho_min, typ_vi = typ_vi) -
marg_lik(yi = yi, vi = vi, rho = rho_s[i-1],
rho_min = rho_min, typ_vi = typ_vi) ) *
(pnorm(1,mean=rho_s[i-1],sd=sdstep) - pnorm(rho_min,mean=rho_s[i-1],sd=sdstep)) /
(pnorm(1,mean=rho_star,sd=sdstep) - pnorm(rho_min,mean=rho_star,sd=sdstep))
rho_s[i] <- ifelse(runif(1)<R_MH,rho_star,rho_s[i-1])
acceptMat[i] <- rho_star==rho_s[i]
#if needed update random walk sd depending on acceptance rate
if(ceiling(i/check1)==i/check1){
probs <- mean(acceptMat[(i-check1+1):i])
if(probs>upper1){
sdstep <- sdstep * ( (probs-upper1)/(1-upper1) + 1)
}else if(probs < lower1){
sdstep <- sdstep * 1/( 2 - probs/lower1 )
}
#given the bounds on rho ststep should not have to be larger than 1
#this also avoids unlimited growth of sdstep in case of relatively diffuse distribution
sdstep <- ifelse(sdstep > 1,1,sdstep)
sdstepseq[sdsteptel] <- sdstep
sdsteptel <- sdsteptel + 1
}
}
### Compute posterior model probabilities
post_rho_s <- mean(rho_s < 0)
post_rho_l <- 1 - post_rho_s
### Compute marginal likelihood of rho and delta unconstrained
meanICC <- mean(rho_s)
varICC <- var(rho_s)
shape1IM <- - ((rho_min-meanICC)*((meanICC-1)*meanICC-meanICC*rho_min+rho_min+varICC)/
((rho_min-1)*varICC))
shape2IM <- - shape1IM*(meanICC-1)/(meanICC-rho_min)
factor1 <- .6
shape1IM <- shape1IM * factor1
shape2IM <- shape2IM * factor1
ISnum <- 1e4
#importance sampler from stretched beta distribution
ISdraws <- (rbeta(ISnum,shape1IM,shape2IM) * (1 - rho_min) + rho_min) * .99999
logintegrands <- unlist(lapply(1:ISnum,function(s){
marg_lik(yi = yi, vi = vi, rho = ISdraws[s], rho_min = rho_min, typ_vi = typ_vi) -
dbeta((ISdraws[s]-rho_min)/(1-rho_min),shape1=shape1IM,shape2=shape2IM,log=TRUE) +
log(1/(1-rho_min))
}))
#Sys.time() - wer
logmuu <- log(mean(exp(logintegrands-max(logintegrands)))) + max(logintegrands)
return(list(post_rho_s = post_rho_s, post_rho_l = post_rho_l, rhodraws = rho_s, logmuu = logmuu))
}
get_condpost_rho <- function(yi, vi, rho_min, typ_vi, start_rho, iters = 20000)
{
#condition on delta = 0
rho_s <- numeric(iters) # Empty object for storing results
check1 <- 100
#burn in
itersBI <- 5000 # length of burn-in
rhoBI <- start_rho # some starting value for rho...
sdstep <- .1
acceptMat <- rep(0, length = iters + itersBI)
rhoBIseq <- rep(0, length = itersBI)
sdstepseq <- rep(0, length = (iters + itersBI) / check1)
sdsteptel <- 1
upper1 <- .5 # define region where the sdstep does not have to be changed
lower1 <- .15
#start burn-in
for(i in 1:itersBI){
#draw candidate from truncated normal
rho_star <- rtnorm(1, mean = rhoBI, sd = sdstep, a = rho_min, b = 1)
#evaluate Metropolis-Hastings acceptance probability
R_MH <- exp(lik(yi = yi, vi = vi, delta = 0, rho = rho_star,
rho_min = rho_min, typ_vi = typ_vi) -
lik(yi = yi, vi = vi, delta = 0, rho = rhoBI,
rho_min = rho_min, typ_vi = typ_vi) ) *
(pnorm(1,mean=rhoBI,sd=sdstep) - pnorm(rho_min,mean=rhoBI,sd=sdstep)) /
(pnorm(1,mean=rho_star,sd=sdstep) - pnorm(rho_min,mean=rho_star,sd=sdstep))
rhoBI <- ifelse(runif(1) < R_MH, rho_star, rhoBI)
acceptMat[i] <- rho_star == rhoBI
rhoBIseq[i] <- rhoBI
#if needed update random walk sd depending on acceptance rate
if(ceiling(i/check1)==i/check1){
probs <- mean(acceptMat[(i-check1+1):i])
if(probs>upper1){
sdstep <- sdstep * ( (probs-upper1)/(1-upper1) + 1)
}else if(probs < lower1){
sdstep <- sdstep * 1 / ( 2 - probs/lower1 )
}
sdstep <- ifelse(sdstep>1,1,sdstep)
sdstepseq[sdsteptel] <- sdstep
sdsteptel <- sdsteptel + 1
}
}
#now actual drawing
rho_s[1] <- rhoBI
for(i in 2:iters){
#draw candidate from truncated normal
rho_star <- rtnorm(1, mean = rho_s[i-1], sd = sdstep, a = rho_min, b = 1)
#evaluate Metropolis-Hastings acceptance probability
R_MH <- exp( lik(yi = yi, vi = vi, delta = 0, rho = rho_star,
rho_min = rho_min, typ_vi = typ_vi) -
lik(yi = yi, vi = vi, delta = 0, rho = rho_s[i-1],
rho_min = rho_min, typ_vi = typ_vi) ) *
(pnorm(1,mean=rho_s[i-1],sd=sdstep) - pnorm(rho_min,mean=rho_s[i-1],sd=sdstep)) /
(pnorm(1,mean=rho_star,sd=sdstep) - pnorm(rho_min,mean=rho_star,sd=sdstep))
rho_s[i] <- ifelse(runif(1) < R_MH, rho_star, rho_s[i-1])
acceptMat[i] <- rho_star==rho_s[i]
#if needed update random walk sd depending on acceptance rate
if(ceiling(i/check1)==i/check1){
probs <- mean(acceptMat[(i-check1+1):i])
if(probs>upper1){
sdstep <- sdstep * ( (probs-upper1)/(1-upper1) + 1)
}else if(probs < lower1){
sdstep <- sdstep * 1/( 2 - probs/lower1 )
}
#given the bounds on rho ststep should not have to be larger than 1
#this also avoids unlimited growth of sdstep in case of relatively diffuse distribution
sdstep <- ifelse(sdstep > 1,1,sdstep)
sdstepseq[sdsteptel] <- sdstep
sdsteptel <- sdsteptel + 1
}
}
### Compute marginal likelihood of rho and delta unconstrained
meanICC <- mean(rho_s)
varICC <- var(rho_s)
shape1IM <- - ((rho_min-meanICC)*((meanICC-1)*meanICC-meanICC*rho_min+rho_min+varICC)/
((rho_min-1)*varICC))
shape2IM <- - shape1IM*(meanICC-1)/(meanICC-rho_min)
factor1 <- .6
shape1IM <- shape1IM * factor1
shape2IM <- shape2IM * factor1
ISnum <- 1e4
#importance sampler from stretched beta distribution
ISdraws <- (rbeta(ISnum,shape1IM,shape2IM) * (1 - rho_min) + rho_min) * .99999
logintegrands <- unlist(lapply(1:ISnum,function(s){
lik(yi = yi, vi = vi, delta = 0, rho = ISdraws[s], rho_min = rho_min, typ_vi = typ_vi) -
dbeta((ISdraws[s]-rho_min)/(1-rho_min),shape1=shape1IM,shape2=shape2IM,log=TRUE) +
log(1/(1-rho_min))
}))
#Sys.time() - wer
logm0u <- log(mean(exp(logintegrands-max(logintegrands)))) + max(logintegrands)
return(list(rhodraws = rho_s, logm0u = logm0u))
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFmeta.R
|
#' BFpack: Flexible Bayes factor testing of scientific expectations
#'
#'
#' @description The \code{R} package \strong{BFpack} provides tools for exploratory and
#' confirmatory Bayesian hypothesis testing using Bayes factors and posterior probabilities
#' under common statistical models. The main function `BF` needs a fitted model `x` as input
#' argument. Depending on the class of the fitted model, a standard hypothesis test is
#' executed by default. For example, if `x` is a
#' fitted regression model of class `lm` then posterior probabilities are computed of whether
#' each separate coefficient is zero, negative, or positive (assuming equal prior probabilities).
#' If one has specific hypotheses with
#' equality and/or order constraints on the parameters under the fitted model `x` then these
#' can be formulated using the `hypothesis` argument (a character string), possibly together
#' prior probabilities for the hypotheses via the `prior` argument (default all hypotheses are
#' equally likely a priori), and the `complement` argument which is a logical stating whether
#' the complement hypotheses should be included in the case (`TRUE` by default).
#'
#' Use compilation for Fortran functions
#'
#' @references
#' Mulder, J., D.R. Williams, Gu, X., A. Tomarken,
#' F. Böing-Messing, J.A.O.C. Olsson-Collentine, Marlyne Meyerink, J. Menke,
#' J.-P. Fox, Y. Rosseel, E.J. Wagenmakers, H. Hoijtink., and van Lissa, C.
#' (submitted). BFpack: Flexible Bayes Factor Testing of Scientific Theories
#' in R. \url{https://arxiv.org/abs/1911.07728}
#'
#' Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams,
#' D. R., Fox, J.-P., Menke, J., et al. (2019). BFpack: Flexible Bayes Factor Testing of
#' Scientific Expectations. (Version 0.2.1) \url{https://CRAN.R-project.org/package=BFpack}
#'
#'
#' @examples
#' \dontrun{
#' # EXAMPLE 1. One-sample t test
#' ttest1 <- t_test(therapeutic, mu = 5)
#' print(ttest1)
#' # confirmatory Bayesian one sample t test
#' BF1 <- BF(ttest1, hypothesis = "mu = 5")
#' summary(BF1)
#' # exploratory Bayesian one sample t test
#' BF(ttest1)
#'
#' # EXAMPLE 2. ANOVA
#' aov1 <- aov(price ~ anchor * motivation,data = tvprices)
#' BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow;
#' anchorrounded < motivationlow")
#' summary(BF1)
#'
#' # EXAMPLE 3. Logistic regression
#' fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
#' tattoos, family = binomial(), data = wilson)
#' BF1 <- BF(fit, hypothesis = "ztrust > zfWHR > 0;
#' ztrust > 0 & zfWHR = 0")
#' summary(BF1)
#'
#' # EXAMPLE 4. Correlation analysis
#' set.seed(123)
#' cor1 <- cor_test(memory[1:20,1:3])
#' BF1 <- BF(cor1)
#' summary(BF1)
#' BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0;
#' Wmn_with_Im = Wmn_with_Del = 0")
#' summary(BF2)
#' }
#'
#' @docType package
#'
#'
#' @name BFpack-package
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFpack-package.R
|
### Joris Mulder 2019. Bayes factor for a one sample Student t test
### via adjusted FBFs (Mulder, 2014) using (m)lm-objects using a t.test object.
#' @method BF htest
#' @export
BF.htest <-
function(x,
hypothesis = NULL,
prior.hyp = NULL,
...) {
stop("Please use the function t_test() from the 'bain' package for a t-test or bartlett_test() from the 'BFpack' package for a test on group variances.")
}
#' @importFrom stats approxfun
#' @describeIn BF BF S3 method for an object of class 't_test'
#' @method BF t_test
#' @export
BF.t_test <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
BF.type = 2,
...){
numpop <- length(x$estimate)
if(is.null(BF.type)){
stop("The argument 'BF.type' must be the integer 1 (for the fractional BF) or 2 (for the adjusted fractional BF).")
}
if(!is.null(BF.type)){
if(is.na(BF.type) | (BF.type!=1 & BF.type!=2))
stop("The argument 'BF.type' must be the integer 1 (for the fractional BF) or 2 (for the adjusted fractional BF).")
}
if(BF.type==2){
bayesfactor <- "generalized adjusted fractional Bayes factors"
}else{
bayesfactor <- "generalized fractional Bayes factors"
}
if(numpop==1){ #one sample t test
tvalue <- x$statistic
mu0 <- x$null.value
xbar <- x$estimate
df <- x$parameter
n <- df + 1
stderr <- (xbar - mu0) / tvalue #standard error
sigmaML <- stderr*sqrt(n-1)
#evaluation of posterior
relfit0 <- dt((xbar-mu0)/stderr,df=df,log=TRUE) - log(stderr)
relfit1 <- log(1-pt((xbar-mu0)/stderr,df=df))
relfit2 <- log(pt((xbar-mu0)/stderr,df=df))
#evaluation of prior
if(BF.type==2){
relcomp0 <- dt(0,df=1,log=T) - log(sigmaML)
relcomp1 <- relcomp2 <- log(.5)
}else{
relcomp0 <- dt((xbar-mu0)/sigmaML,df=1,log=TRUE) - log(sigmaML)
relcomp1 <- log(1-pt((xbar-mu0)/sigmaML,df=1))
relcomp2 <- log(pt((xbar-mu0)/sigmaML,df=1))
}
#exploratory BFs
if(x$method=="Paired t-test"){
hypotheses_exploratory <- c(paste0("difference=",as.character(mu0)),paste0("difference<",as.character(mu0)),
paste0("difference>",as.character(mu0)))
}else{
hypotheses_exploratory <- c(paste0("mu=",as.character(mu0)),paste0("mu<",as.character(mu0)),paste0("mu>",as.character(mu0)))
}
logBFtu <- c(relfit0-relcomp0,relfit1-relcomp1,relfit2-relcomp2)
names(logBFtu) <- hypotheses_exploratory
BFtu_exploratory <- matrix(exp(logBFtu),nrow=1)
colnames(BFtu_exploratory) <- hypotheses_exploratory
row.names(BFtu_exploratory) <- "BFtu"
PHP_exploratory <- BFtu_exploratory/sum(BFtu_exploratory)
colnames(PHP_exploratory) <- c(paste0("Pr(=",as.character(mu0),")"),paste0("Pr(<",as.character(mu0),")"),
paste0("Pr(>",as.character(mu0),")"))
if(x$method=="Paired t-test"){
row.names(PHP_exploratory) <- "difference"
}else{
row.names(PHP_exploratory) <- "mu"
}
relfit_exploratory <- matrix(c(exp(relfit0),rep(1,3),exp(relfit1),exp(relfit2)),ncol=2)
relcomp_exploratory <- matrix(c(exp(relcomp0),rep(1,3),rep(.5,2)),ncol=2)
row.names(relfit_exploratory) <- row.names(relcomp_exploratory) <- hypotheses_exploratory
colnames(relfit_exploratory) <- c("f=","f>")
colnames(relcomp_exploratory) <- c("c=","c>")
if(!is.null(hypothesis)){ #perform confirmatory tests
#execute via BF.lm
sd1 <- sqrt(n) * (xbar - mu0) / tvalue
y1 <- rnorm(n)
y1 <- y1 - mean(y1)
y1 <- sd1*y1/sd(y1) + xbar
lm1 <- lm(y1 ~ 1)
if(x$method=="Paired t-test"){
names(lm1$coefficients) <- "difference"
}else{
names(lm1$coefficients) <- "mu"
}
BFlm1 <- BF(lm1,hypothesis,prior.hyp=prior.hyp,complement=complement,BF.type=BF.type)
BFtu_confirmatory <- BFlm1$BFtu_confirmatory
PHP_confirmatory <- BFlm1$PHP_confirmatory
BFmatrix_confirmatory <- BFlm1$BFmatrix_confirmatory
BFtable <- BFlm1$BFtable_confirmatory
hypotheses <- row.names(BFtable)
priorprobs <- BFlm1$prior
}
if(x$method=="Paired t-test"){
parameter <- "difference in means"
}else{
parameter <- "mean"
}
}else{ # two samples t test
if(!grepl("Welch",x$method)){ # equal variances assumed
# compute via a lm-analysis
x1 <- c(rep(1,x$n[1]),rep(0,x$n[2]))
y1 <- c(rep(0,x$n[1]),rep(1,x$n[2]))
matx1y1 <- cbind(x1,y1)
draw1 <- rnorm(x$n[1])
out1 <- (draw1 - mean(draw1))/sd(draw1)*sqrt(x$v[1])+x$estimate[1]
draw2 <- rnorm(x$n[2])
out2 <- (draw2 - mean(draw2))/sd(draw2)*sqrt(x$v[2])+x$estimate[2]
out <- c(out2,out1)
# perform the test via a lm object using a factor for the group indicator
# such that the name of the key variable (the difference between the means)
# is called 'difference'
df1 <- data.frame(out=out,differenc=factor(c(rep("a",x$n[2]),rep("e",x$n[1]))))
lm1 <- lm(out ~ differenc,df1)
BFlm1 <- BF(lm1,hypothesis=hypothesis,prior.hyp=prior.hyp,complement=complement,BF.type=BF.type)
BFtu_exploratory <- t(as.matrix(BFlm1$BFtu_exploratory[2,]))
PHP_exploratory <- t(as.matrix(BFlm1$PHP_exploratory[2,]))
row.names(BFtu_exploratory) <- row.names(PHP_exploratory) <- "difference"
#
if(!is.null(hypothesis)){
BFtu_confirmatory <- BFlm1$BFtu_confirmatory
PHP_confirmatory <- BFlm1$PHP_confirmatory
BFmatrix_confirmatory <- BFlm1$BFmatrix_confirmatory
BFtable <- BFlm1$BFtable_confirmatory
hypotheses <- row.names(BFtable)
priorprobs <- BFlm1$prior
}
}else{ #equal variances not assumed. BF.lm cannot be used
meanN <- x$estimate
scaleN <- (x$v)/(x$n)
dfN <- x$n-1
scale0 <- (x$v)*(x$n-1)/(x$n)
nulldiff <- x$null.value
df0 <- rep(1,2)
samsize <- 1e7
drawsN <- rt(samsize,df=dfN[1])*sqrt(scaleN[1]) + meanN[1] - rt(samsize,df=dfN[2])*sqrt(scaleN[2]) - meanN[2]
densN <- approxfun(density(drawsN),yleft=0,yright=0)
relfit0 <- log(densN(nulldiff))
relfit1 <- log(mean(drawsN<nulldiff))
relfit2 <- log(mean(drawsN>nulldiff))
if(BF.type == 2){
draws0 <- rt(samsize,df=df0[1])*sqrt(scale0[1]) - rt(samsize,df=df0[2])*sqrt(scale0[2])
relcomp0 <- log(mean((draws0<1)*(draws0> -1))/2)
relcomp1 <- log(mean(draws0<0))
relcomp2 <- log(mean(draws0>0))
}else{
draws0 <- rt(samsize,df=df0[1])*sqrt(scale0[1]) + meanN[1] - rt(samsize,df=df0[2])*sqrt(scale0[2]) - meanN[2]
relcomp0 <- log(mean((draws0 < nulldiff + 1)*(draws0 > nulldiff - 1))/2)
relcomp1 <- log(mean(draws0<nulldiff))
relcomp2 <- log(mean(draws0>nulldiff))
}
#exploratory Bayes factor test
hypotheses_exploratory <- c("difference=0","difference<0","difference>0")
logBFtu_exploratory <- c(relfit0-relcomp0,relfit1-relcomp1,relfit2-relcomp2)
names(logBFtu_exploratory) <- hypotheses_exploratory
BFtu_exploratory <- exp(logBFtu_exploratory)
PHP_exploratory <- matrix(BFtu_exploratory/sum(BFtu_exploratory),nrow=1)
colnames(PHP_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)")
row.names(PHP_exploratory) <- "difference"
relfit <- matrix(c(exp(relfit0),rep(1,3),exp(relfit1),exp(relfit2)),ncol=2)
relcomp <- matrix(c(exp(relcomp0),rep(1,3),rep(.5,2)),ncol=2)
row.names(relfit) <- row.names(relcomp) <- hypotheses_exploratory
colnames(relfit) <- c("f=","f>")
colnames(relcomp) <- c("c=","c>")
if(!is.null(hypothesis)){
name1 <- "difference"
parse_hyp <- parse_hypothesis(name1,hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
# if(ncol(do.call(rbind,RrE))>2 || ncol(do.call(rbind,RrO))>2){
# stop("hypothesis should be formulated on the only parameter 'difference'.")
# }
relfit <- t(matrix(unlist(lapply(1:length(RrE),function(h){
if(!is.null(RrE[[h]]) & is.null(RrO[[h]])){ #only an equality constraint
nullvalue <- RrE[[h]][1,2]/RrE[[h]][1,1]
relfit_h <- c(log(densN(nullvalue)),0)
}else if(is.null(RrE[[h]]) & !is.null(RrO[[h]])){
relfit_h <- log(c(1,mean(apply(as.matrix(RrO[[h]][,1])%*%t(drawsN) - as.matrix(RrO[[h]][,2])%*%t(rep(1,samsize)) > 0,2,prod)==1)))
}else stop("hypothesis should either contain one equality constraint or inequality constraints on 'difference'.")
return(relfit_h)
})),nrow=2))
#relfit <- exp(relfit)
relcomp <- t(matrix(unlist(lapply(1:length(RrE),function(h){
if(!is.null(RrE[[h]]) & is.null(RrO[[h]])){ #only an equality constraint
nullvalue <- RrE[[h]][1,2]/RrE[[h]][1,1]
relcomp_h <- log(c((sum((draws0<1+nullvalue)*(draws0> -1+nullvalue))/samsize)/2,1))
}else if(is.null(RrE[[h]]) & !is.null(RrO[[h]])){ #order constraint(s)
relcomp_h <- log(c(1,mean(apply(as.matrix(RrO[[h]][,1])%*%t(draws0) - as.matrix(RrO[[h]][,2])%*%t(rep(1,samsize)) > 0,2,prod)==1)))
}else stop("hypothesis should either contain one equality constraint or inequality constraints on 'difference'.")
return(relcomp_h)
})),nrow=2))
#relcomp <- exp(relcomp)
row.names(relfit) <- row.names(relcomp) <- parse_hyp$original_hypothesis
colnames(relfit) <- c("f=","f>")
colnames(relcomp) <- c("c=","c>")
if(complement == TRUE){
#add complement to analysis
welk <- (relcomp==1)[,2]==F
if(sum((relcomp==1)[,2])>0){ #then there are only order hypotheses
relcomp_c <- 1-sum(exp(relcomp[welk,2]))
if(relcomp_c!=0){ # then add complement
relcomp <- rbind(relcomp,c(0,log(relcomp_c)))
relfit_c <- 1-sum(exp(relfit[welk,2]))
relfit <- rbind(relfit,c(0,log(relfit_c)))
row.names(relfit) <- row.names(relcomp) <- c(parse_hyp$original_hypothesis,"complement")
}
}else{ #no order constraints
relcomp <- rbind(relcomp,c(0,0))
relfit <- rbind(relfit,c(0,0))
row.names(relcomp) <- row.names(relfit) <- c(parse_hyp$original_hypothesis,"complement")
}
}
relfit <- exp(relfit)
relcomp <- exp(relcomp)
# Check input of prior probabilies
if(is.null(prior.hyp)){
priorprobs <- rep(1/nrow(relcomp),nrow(relcomp))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=nrow(relcomp)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(nrow(relcomp)),". Equal prior probabilities are used."))
priorprobs <- rep(1/nrow(relcomp),nrow(relcomp))
}else{
priorprobs <- prior.hyp
}
}
rm(drawsN)
rm(draws0)
#compute Bayes factors and posterior probabilities for confirmatory test
BFtu_confirmatory <- c(apply(relfit / relcomp, 1, prod))
PHP_confirmatory <- BFtu_confirmatory*priorprobs / sum(BFtu_confirmatory*priorprobs)
BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))/
t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)))
diag(BFmatrix_confirmatory) <- 1
row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory)
relative_fit <- relfit
relative_complexity <- relcomp
BFtable <- cbind(relative_complexity,relative_fit,relative_fit[,1]/relative_complexity[,1],
relative_fit[,2]/relative_complexity[,2],apply(relative_fit,1,prod)/
apply(relative_complexity,1,prod),PHP_confirmatory)
row.names(BFtable) <- names(BFtu_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
hypotheses <- row.names(relative_complexity)
}
}
parameter <- "difference in means"
}
if(is.null(hypothesis)){
BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relative_fit <-
relative_complexity <- BFtable <- hypotheses <- priorprobs <- NULL
}
BFlm_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=x$coefficients,
model=x,
bayesfactor=bayesfactor,
parameter=parameter,
call=match.call())
class(BFlm_out) <- "BF"
return(BFlm_out)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFttest.R
|
#' @title Bartlett Test of Homogeneity of Variances
#' @description Performs Bartlett's test of the null that the
#' variances in each of the groups (samples) are the same.
#'
#'@details \code{x} must be a numeric data vector, and \code{g}
#'must be a vector or factor object of the same length as \code{x}
#'giving the group for the corresponding elements of \code{x}.
#'
#'@section Bain t_test:
#'In order to allow users to enjoy the functionality of bain with the familiar
#'stats-function \code{bartlett.test}, we have had to make minor changes to the
#'function \code{bartlett.test.default}. All rights to, and credit for, the
#'function \code{bartlett.test.default}
#'belong to the R Core Team, as indicated in the original license below.
#'We make no claims to copyright and incur no liability with regard to the
#'changes implemented in \code{bartlett_test}.
#'
#'This the original copyright notice by the R core team:
#'File src/library/stats/R/bartlett_test.R
#'Part of the R package, https://www.R-project.org
#'
#'Copyright (C) 1995-2015 The R Core Team
#'
#' This program is free software; you can redistribute it and/or modify
#' it under the terms of the GNU General Public License as published by
#' the Free Software Foundation; either version 2 of the License, or
#' (at your option) any later version.
#'
#' This program is distributed in the hope that it will be useful,
#' but WITHOUT ANY WARRANTY; without even the implied warranty of
#' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#' GNU General Public License for more details.
#'
#' A copy of the GNU General Public License is available at
#' https://www.R-project.org/Licenses/
#'
#'@aliases bartlett_test bartlett_test.default
#'@param x a numeric vector of data values, or a list of
#'numeric data vectors representing the respective samples,
#'or fitted linear model objects (inheriting from class "lm").
#'@param g a vector or factor object giving the group for
#'the corresponding elements of x. Ignored if x is a list.
#'@param ... further arguments to be passed to or from methods.
#'
#'@return A list with class \code{"bartlett_htest"} containing the following
#'components: \item{statistic}{Bartlett's K-squared test statistic.}
#'\item{parameter}{the degrees of freedom of the approximate chi-squared
#'distribution of the test statistic.}
#'\item{p.value}{the p-value of the test.} \item{conf.int}{a confidence
#'interval for the mean appropriate to the specified alternative hypothesis.}
#'\item{method}{the character string "Bartlett test of homogeneity of variances".}
#'\item{data.name}{a character string giving the names of the data.}
#'\item{vars}{the sample variances across groups (samples).}
#'\item{n}{the number of observations per group (sample)}
#'
#'@references Bartlett, M. S. (1937). Properties of sufficiency
#'and statistical tests. Proceedings of the Royal Society of
#'London Series A 160, 268–282. DOI: 10.1098/rspa.1937.0109.
#'
#'@examples
#'require(graphics)
#'
#'plot(count ~ spray, data = InsectSprays)
#'bartlett_test(InsectSprays$count, InsectSprays$spray)
#'
#' @rdname bartlett_test
#' @export
bartlett_test <- function(x, g, ...) UseMethod("bartlett_test", x)
#' @importFrom stats bartlett.test
#' @method bartlett_test default
#' @rdname bartlett_test
#' @export
bartlett_test.default <- function(x, g, ...){
cl <- match.call()
cl[[1]] <- as.name("bartlett.test")
bart <- eval.parent(cl)
vars <- tapply(x, g, var, na.rm = TRUE)
n <- table(g)
names(vars) <- names(n)
class(bart) <- c("bartlett_htest", class(bart))
bart$vars <- vars
bart$n <- n
bart
}
# require(stats)
# exists("bartlett.test.default") # false
# getS3method("bartlett.test", "default")
#' @importFrom stats rgamma
#' @importFrom stats rchisq
#' @method BF bartlett_htest
#' @export
BF.bartlett_htest <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...) {
get_est <- get_estimates(x)
nsim <- 1e5
s2 <- get_est$estimate
n <- c(x$n)
b <- 2/n
J <- length(n)
names_coef <- names(get_est$estimate)
# exploratory BF for equality of variances:
logmx0 <- - 1 / 2 * sum((1 - b) * n) * log(pi) + 1 / 2 * log(prod(b)) +
lgamma((sum(n) - J) / 2) - lgamma((sum(b * n) - J) / 2) -
1 / 2 * (sum(n) - J) * log(sum((n - 1) * s2)) +
1 / 2 * (sum(b * n) - J) * log(sum(b * (n - 1) * s2))
logmxu <- - 1 / 2 * sum((1 - b) * n) * log(pi) + 1 / 2 * log(prod(b)) +
sum(lgamma((n - 1) / 2) - lgamma((b * n - 1) / 2) -
1 / 2 * (n - 1) * log((n - 1) * s2) +
1 / 2 * (b * n - 1) * log(b * (n - 1) * s2))
BF0u <- exp(logmx0 - logmxu)
BFtu_exploratory <- c(BF0u,1)
names(BFtu_exploratory) <- c("homogeneity of variances","no homogeneity of variances")
PHP_exploratory <- BFtu_exploratory / sum(BFtu_exploratory)
if (!is.null(hypothesis)){
parse_hyp <- parse_hypothesis(names_coef, hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
}
if (is.null(hypothesis)) {
BFmatrix_confirmatory <- PHP_confirmatory <- BFtu_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
} else if (all(unlist(lapply(append(RrE, RrO), is.null)))) {
BFmatrix_confirmatory <- PHP_confirmatory <- BFtu_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
} else { # execute confirmatory Bayes factor test based on hypothesis input
# check if hypotheses are admissible:
RrCheck <- do.call(rbind, append(RrE, RrO))
RrCheck_count <- t(apply(RrCheck[, -ncol(RrCheck), drop = FALSE], 1,
function(x) {sapply(list(-1, 1), function(y) {sum(y == x)})}))
if (any(RrCheck_count != 1) || any(RrCheck[, ncol(RrCheck)] != 0)) {
stop(paste0("The hypotheses contain inadmissible constraints."))
}
Th <- length(RrE)
logmx <- relfit <- relcomp <- logmxE <- rep(NA, times = Th)
names(logmx) <- names(relfit) <- names(relcomp) <- names(logmxE) <-
parse_hyp$original_hypothesis
for (h in 1:Th) {
if (is.null(RrE[[h]])) {
unique_vars <- as.list(1:J)
} else {
RrEh <- RrE[[h]][, -ncol(RrE[[h]])]
if (!is.matrix(RrEh)) {
RrEh <- t(as.matrix(RrEh))
}
RrEh_pos <- t(apply(RrEh, 1, function(x) which(!(x == 0))))
unique_vars <- list()
rows <- 1:nrow(RrEh_pos)
while (length(rows) > 0) {
equal_vars <- RrEh_pos[min(rows), ]
row_check <- min(rows)
for (i in setdiff(rows, row_check)) {
if (any(equal_vars %in% RrEh_pos[i, ])) {
equal_vars <- unique(c(equal_vars, RrEh_pos[i, ]))
row_check <- c(row_check, i)
}
}
unique_vars <- c(unique_vars, list(equal_vars))
rows <- setdiff(rows, row_check)
}
unique_vars <- c(unique_vars, setdiff(1:J, unlist(unique_vars)))
}
K <- length(unique_vars)
Jk <- sapply(unique_vars, length)
s2list <- lapply(unique_vars, function(x) s2[x])
nlist <- lapply(unique_vars, function(x) n[x])
blist <- lapply(unique_vars, function(x) b[x])
df <- dfb <- SS <- SSb <- rep(NA, times = K)
for (i in 1:K) {
df[i] <- sum(nlist[[i]]) - Jk[i]
dfb[i] <- sum(blist[[i]] * nlist[[i]]) - Jk[i]
SS[i] <- sum((nlist[[i]] - 1) * s2list[[i]])
SSb[i] <- sum(blist[[i]] * (nlist[[i]] - 1) * s2list[[i]])
}
logmxE[h] <- - 1 / 2 * sum((1 - unlist(blist)) * unlist(nlist)) * log(pi) +
1 / 2 * log(prod(unlist(blist))) + sum(lgamma(df / 2) - lgamma(dfb / 2) -
1 / 2 * df * log(SS) + 1 / 2 * dfb * log(SSb))
if (is.null(RrO[[h]])) {
logmx[h] <- logmxE[h]
} else {
RrOh <- RrO[[h]][, -ncol(RrO[[h]])]
if (!is.matrix(RrOh)) {
RrOh <- t(as.matrix(RrOh))
}
RrOh_pos <- t(apply(RrOh, 1, function(x) c(which(x == -1), which(x == 1))))
unique_vars_order <- cbind(
apply(as.matrix(RrOh_pos[, 1]), 1, function(x) {
which(unlist(lapply(unique_vars, function(y) {x %in% y})))}),
apply(as.matrix(RrOh_pos[, 2]), 1, function(x) {
which(unlist(lapply(unique_vars, function(y) {x %in% y})))})
)
post_samp <- prior_samp <- matrix(NA, nrow = nsim, ncol = K)
indi_post <- indi_prior <- rep(1, times = nsim)
for (i in unique(c(unique_vars_order))) {
post_samp[, i] <- SS[i] / rchisq(nsim, df = df[i])
prior_samp[, i] <- dfb[i] / rchisq(nsim, df = dfb[i])
}
for (i in 1:nrow(unique_vars_order)) {
indi_post <- indi_post * (post_samp[, unique_vars_order[i, 1]] <
post_samp[, unique_vars_order[i, 2]])
indi_prior <- indi_prior * (prior_samp[, unique_vars_order[i, 1]] <
prior_samp[, unique_vars_order[i, 2]])
}
relfit[h] <- sum(indi_post) / nsim
relcomp[h] <- sum(indi_prior) / nsim
logmx[h] <- log(relfit[h] / relcomp[h]) + logmxE[h]
}
}
if(complement==TRUE){
#compute marginal likelihood for complement hypothesis
relfit <- inversegamma_prob_Hc(shape1=(n-1)/2,scale1=s2*(n-1)/(2*n),relmeas=relfit,RrE1=RrE,RrO1=RrO)
relcomp <- inversegamma_prob_Hc(shape1=rep(.5,length(n)),scale1=rep(.5,length(n)),relmeas=relcomp,RrE1=RrE,RrO1=RrO)
if(length(relfit)>Th){
logmxE <- c(logmxE,logmxu)
logmx <- c(logmx,logmxu + log(relfit[Th+1]/relcomp[Th+1]))
names(logmx)[Th+1] <- "complement"
}
}
hypotheses <- names(logmx)
BFtu_confirmatory <- exp(logmx - logmxu)
BFmatrix_confirmatory <- BFtu_confirmatory %*% t(1 / BFtu_confirmatory)
diag(BFmatrix_confirmatory) <- 1
names(BFtu_confirmatory) <- row.names(BFmatrix_confirmatory) <-
colnames(BFmatrix_confirmatory) <- hypotheses
diag(BFmatrix_confirmatory) <- 1
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
PHP_confirmatory <- BFtu_confirmatory * priorprobs / sum(BFtu_confirmatory * priorprobs)
relcomp[which(is.na(relcomp))] <- 1
relfit[which(is.na(relfit))] <- 1
BF_E <- exp(logmxE - logmxu)
BFtable <- cbind(rep(NA,length(relfit)),relcomp,rep(NA,length(relfit)),relfit,BF_E,
relfit/relcomp,BF_E*relfit/relcomp,PHP_confirmatory)
row.names(BFtable) <- names(PHP_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
}
BFlm_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=s2,
model=x,
bayesfactor="generalized adjusted fractional Bayes factors",
parameter="group variances",
call=match.call())
class(BFlm_out) <- "BF"
return(BFlm_out)
}
# The function computes the probability of the complement hypothesis
inversegamma_prob_Hc <- function(shape1,scale1,relmeas,RrE1,RrO1,samsize1=1e5){
numhyp <- length(RrE1)
whichO <- unlist(lapply(1:numhyp,function(h){is.null(RrE1[[h]])}))
numO <- sum(whichO)
numpara <- length(shape1)
if(numO==length(RrE1)){ # Then the complement is equivalent to the unconstrained hypothesis.
relmeas <- c(relmeas,1)
names(relmeas)[numhyp+1] <- "complement"
}else{ # So there is at least one hypothesis with only order constraints
if(numO==1){ # There is one hypothesis with only order constraints. Hc is complement of this hypothesis.
relmeas <- c(relmeas,1-relmeas[whichO])
names(relmeas)[numhyp+1] <- "complement"
}else{ # So more than one hypothesis with only order constraints
randomDraws <- rmvnorm(samsize1,mean=rep(0,numpara),sigma=diag(numpara))
#get draws that satisfy the constraints of the separate order constrained hypotheses
checksOC <- lapply(which(whichO),function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,samsize1)%*%t(rorder),1,prod)
})
checkOCplus <- Reduce("+",checksOC)
if(sum(checkOCplus > 0) < samsize1){ #then the joint order constrained hypotheses do not completely cover the parameter space.
if(sum(checkOCplus>1)==0){ # then order constrained spaces are nonoverlapping
relmeas <- c(relmeas,1-sum(relmeas[whichO]))
names(relmeas)[numhyp+1] <- "complement"
}else{ #the order constrained subspaces at least partly overlap
randomDraws <- matrix(unlist(lapply(1:numpara,function(par){
1/rgamma(1e5,shape=shape1[par]/2,rate=scale1[par])
#rinvgamma(1e5,shape=shape1[par]/2,scale=scale1[par])
})),ncol=numpara)
checksOCpost <- lapply(which(whichO),function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,samsize1)%*%t(rorder),1,prod)
})
relmeas <- c(relmeas,sum(Reduce("+",checksOCpost) == 0) / samsize1)
rownames(relmeas)[numhyp+1] <- "complement"
}
}
}
}
return(relmeas)
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/BFvar.R
|
#' Student t approximations of Fisher transformed correlations
#'
#' Approximated degrees of freedom and approximated scale of the Fisher transformed
#' correlations depending on the dimension of the vector of dependent variables P
#' based on a joint uniform prior.
#'
#' \tabular{lll}{
#' \strong{nu} \tab \code{numeric} \tab Approximated degrees of freedom\cr
#' \strong{sigma} \tab \code{numeric} \tab Approximated scale\cr
#' \strong{P} \tab \code{integer} \tab Dimension of vector of dependent variables\cr
#' }
#' @docType data
#' @keywords datasets
#' @name Fcor
#' @usage data(Fcor)
#' @format A data.frame with 3 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/Fcor.R
|
#' Actors from a small hypothetical network
#'
#' The related data files 'events', 'same_location', 'same_culture' contain
#' information on the event sequence and the two event statistics respectively.
#'
#'
#' @name actors
#' @docType data
#' @usage data(actors)
#' @keywords datasets
#' @format dataframe (25 rows, 4 columns)
#'
#' \tabular{lll}{
#' \strong{actors$id} \tab \code{integer} \tab ID of the employee, corresponding to
#' the sender and receiver IDs in the events dataframe \cr
#' \strong{actors$location} \tab \code{numeric} \tab Location of the actor,
#' ranging from 1-4 \cr
#' \strong{actors$culture} \tab \code{character} \tab Categorical variable, indicating the
#' culture of the employee \cr
#' }
#'
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/actors.R
|
#' Multiple Sources of Attentional Dysfunction in Adults With Tourette's Syndrome
#'
#' Data from a psychological study comparing attentional performances of
#' Tourette's syndrome (TS) patients, ADHD patients, and controls.
#' These data were simulated using the sufficient statistics from Silverstein,
#' Como, Palumbo, West, and Osborn (1995).
#'
#' \tabular{lll}{
#' \strong{accuracy} \tab \code{numeric} \tab Participant's accuracy in the attentional task\cr
#' \strong{group} \tab \code{factor} \tab Participant's group membership (TS patient, ADHD patient, or control)\cr
#' }
#' @docType data
#' @keywords datasets
#' @name attention
#' @usage data(attention)
#' @references Silverstein, S. M., Como, P. G., Palumbo, D. R., West, L. L., & Osborn, L. M. (1995). Multiple sources of attentional dysfunction in adults with Tourette's syndrome: Comparison with attention deficit-hyperactivity disorder. Neuropsychology, 9(2), 157-164. doi:10.1037/0894-4105.9.2.157
#' @format A data.frame with 51 rows and 2 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/attention.R
|
#' @method print cor_test
#' @export
print.cor_test <- function(x,
digits = 3,
na.print = "", ...){
estimates <- x$correstimates
names <- x$corrnames
groups <- length(names)
P <- nrow(names[[1]])
numcorr <- P*(P-1)/2
countg = 0
corrlist <- lapply(1:groups,function(g){
lapply(1:3,function(b){
matje <- matrix(NA,P,P)
row.names(matje) <- colnames(matje) <- x$variables[[1]]
matje[lower.tri(diag(P))] <- estimates[numcorr*(g-1)+1:numcorr,1+b]
matje
})
})
cat("\n")
cat("Unconstrained Bayesian estimates","\n", sep = "")
cat("\n")
if(groups > 1){
for(g in 1:groups){
cat(paste0("Group g",as.character(g),":"),"\n", sep = "")
cat("\n")
cat("Posterior 2.5% lower bounds:","\n", sep = "")
print(round(corrlist[[g]][[2]],digits), na.print = "")
cat("\n")
cat("Posterior median:","\n", sep = "")
print(round(corrlist[[g]][[1]],digits), na.print = "")
cat("\n")
cat("Posterior 97.5% upper bounds:","\n", sep = "")
print(round(corrlist[[g]][[3]],digits), na.print = "")
cat("\n")
}
}else{
cat("Posterior 2.5% lower bounds:","\n", sep = "")
print(round(corrlist[[1]][[2]],digits), na.print = "")
cat("\n")
cat("Posterior median:","\n", sep = "")
print(round(corrlist[[1]][[1]],digits), na.print = "")
cat("\n")
cat("Posterior 97.5% upper bounds:","\n", sep = "")
print(round(corrlist[[1]][[3]],digits), na.print = "")
cat("\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/cor_test.print.R
|
#' fMRI data
#'
#' fMRI data assessing relation between individual differences in the ability to recognize
#' faces and cars and thickness of the superficial, middle, and deep layers of the
#' fusiform face area, as assessed by high-resolution fMRI recognition (Williams et al, 2019, under review)
#'
#' \tabular{lll}{
#' \strong{Subject}\tab\code{numeric}\tab Particicpant ID number\cr
#' \strong{Face} \tab \code{numeric} \tab Standardized score on face recognition battery\cr
#' \strong{Vehicle} \tab \code{numeric} \tab Standardized score on vehicle recognition battery\cr
#' \strong{Superficial} \tab \code{numeric} \tab Depth in mm of superficial layer of FFA\cr
#' \strong{Middle} \tab \code{numeric} \tab Depth in mm of middle layer of FFA\cr
#' \strong{Bform} \tab \code{numeric} \tab Depth in mm of deep layer of FFA\cr
#' }
#' @docType data
#' @keywords datasets
#' @name fmri
#' @usage data(fmri)
#' @references McGuigin, R.W., Newton, A.T., Tamber-Rosenau, B., Tomarken, A.J, & Gauthier, I. (under review). Thickness of deep layers in the fusiform face area predicts face recognition.
#' @format A data.frame with 13 rows and 6 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/fmri.R
|
#' @method get_estimates hetcor
#' @import bain
#' @export
get_estimates.hetcor <- function(x, ...){
cl <- match.call()
cl[[1]] <- as.name("get_estimates")
cl[["x"]] <- x$correlations
P <- nrow(x$std.errors)
out <- eval.parent(cl)
retain <- matrix(1:length(out$estimate), nrow = nrow(x$std.errors))
out$estimate <- out$estimate[retain[lower.tri(retain)]]
errcov <- x$std.errors**2
errcov <- errcov[lower.tri(retain)]
if(length(errcov) == 1){
out$Sigma <- list(matrix(errcov))
} else {
out$Sigma <- list(diag(errcov))
}
class(out) <- "model_estimates"
attr(out, "analysisType") <- "hetcor"
out
}
#' @method get_estimates coxph
#' @export
get_estimates.coxph <- function(x, ...){
out <- list()
out$estimate <- coef(x)
out$Sigma <- list(vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "cophx"
out
}
#' @method get_estimates glm
#' @export
get_estimates.glm <- function(x, ...){
out <- list()
out$estimate <- coef(x)
out$Sigma <- list(vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "glm"
out
}
#' @method get_estimates polr
#' @export
get_estimates.polr <- function(x, ...){
out <- list()
out$estimate <- c(coef(x),x$zeta)
out$Sigma <- list(vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "polr"
out
}
#' @method get_estimates bartlett_htest
#' @export
get_estimates.bartlett_htest <- function(x, ...){
out <- list()
out$estimate <- x$vars
out$Sigma <- NULL
class(out) <- "model_estimates"
attr(out, "analysisType") <- "bartlett_htest"
out
}
#' @method get_estimates survreg
#' @export
get_estimates.survreg <- function(x, ...){
out <- list()
out$estimate <- x$coefficients
out$Sigma <- list(x$var)
class(out) <- "model_estimates"
attr(out, "analysisType") <- "survreg"
out
}
#' @method get_estimates zeroinfl
#' @export
get_estimates.zeroinfl <- function(x, ...){
out <- list()
out$estimate <- c(coef(x),x$zeta)
out$Sigma <- list(vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "zeroinfl"
out
}
#' @method get_estimates lm
#' @export
get_estimates.lm <- function(x, ...){
out <- list()
P <- ncol(x$coefficients)
K <- nrow(x$coefficients)
N <- nrow(x$residuals)
if(!is.matrix(x$coefficients)){
out$estimate <- coef(x)
out$Sigma <- list(vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "lm"
out
}else{
names_coef1 <- row.names(x$coefficients)
names_coef2 <- colnames(x$coefficients)
names_coef <- unlist(lapply(1:P,function(p){
lapply(1:K,function(k){
paste0(names_coef1[k],"_on_",names_coef2[p])
})
}))
# estimates of regression coefficients
estimatesBeta <- c(x$coefficients)
names(estimatesBeta) <- names_coef
Xmat <- model.matrix(x)
Ymat <- model.matrix(x)%*%x$coefficients + x$residuals
SigmaEst <- t(x$residuals)%*%x$residuals/N
covmBeta <- kronecker(SigmaEst,solve(t(Xmat)%*%Xmat))
row.names(covmBeta) <- colnames(covmBeta) <- names_coef
out$estimate <- estimatesBeta
out$Sigma <- list(covmBeta)
class(out) <- "model_estimates"
attr(out, "analysisType") <- "mlm"
out
}
}
#' @method get_estimates cor_test
#' @export
get_estimates.cor_test <- function(x, ...){
out <- list()
out$estimate <- x$meanF
out$Sigma <- list(x$covmF)
class(out) <- "model_estimates"
attr(out, "analysisType") <- "corr_htest"
out
}
#' @method get_estimates t_test
#' @export
get_estimates.t_test <- function(x, ...){
out <- list()
if(length(x$estimate)>1){
difference <- x$estimate[1] - x$estimate[2]
names(difference) <- "difference"
out$estimate <- difference
out$Sigma <- list((x$stderr)**2)
}else if(names(x$estimate) == "mean of the differences"){
difference <- x$estimate
names(difference) <- "difference"
out$estimate <- difference
out$Sigma <- list((x$stderr)**2)
}else{
populationmean <- x$estimate
names(populationmean) <- "mu"
out$estimate <- populationmean
out$Sigma <- list((x$stderr)**2)
}
class(out) <- "model_estimates"
attr(out, "analysisType") <- "t_test"
out
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/get_estimates_unique_to_BFpack.R
|
check_vcov <- function(x){
if (!isTRUE(all.equal(x, t(x))) || any(diag(x) < 0)){
saveRDS(x, "c:/git_repositories/BFpack/erordump.RData")
stop(sQuote("sigma"), " is not a covariance matrix")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/helper_functions.R
|
#' @importFrom utils getFromNamespace
#' @importFrom stats approxfun coef complete.cases cov dbeta density dnorm dt lm median model.matrix
#' @importFrom stats nobs pchisq pnorm pt quantile rWishart rbeta rgamma rnorm rt sd setNames var vcov
parse_hypothesis <- getFromNamespace("parse_hypothesis", "bain")
constraint_to_equation <- getFromNamespace("constraint_to_equation", "bain")
constraint_to_row <- getFromNamespace("constraint_to_row", "bain")
expand_compound_constraints <- getFromNamespace("expand_compound_constraints", "bain")
expand_parentheses <- getFromNamespace("expand_parentheses", "bain")
flip_inequality <- getFromNamespace("flip_inequality", "bain")
order_terms <- getFromNamespace("order_terms", "bain")
params_in_hyp <- getFromNamespace("params_in_hyp", "bain")
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/import_parser.R
|
#' Memory data on health and schizophrenic patients
#'
#' Data set from study assessing differences between schizophrenic patients and
#' healthy control participants in patterns of correlations among 6 verbal memory
#' tasks (Ichinose et al., 2019).
#' \tabular{lll}{
#' \strong{Im} \tab \code{numeric} \tab Percent correct on immediate recall of 3 word lists\cr
#' \strong{Del} \tab \code{numeric} \tab Percent correct on delayed recall of 3 word lists\cr
#' \strong{Wmn} \tab \code{numeric} \tab Number correct on letter-number span test of auditory working memory\cr
#' \strong{Cat} \tab \code{numeric} \tab Number correct on category fluency task\cr
#' \strong{Fas} \tab \code{numeric} \tab Number correct on letter fluency task\cr
#' \strong{Rat} \tab \code{numeric} \tab Number correct on remote associates task\cr
#' \strong{Group} \tab \code{factor} \tab Participant Group (HC = Healthy Control; SZ = Schizophrenia) \cr
#' }
#' @docType data
#' @keywords datasets
#' @name memory
#' @usage data(memory)
#' @references Ichinose, M.C., Han, G., Polyn, S., Park, S., & Tomarken, A.J. (2019). Verbal memory performance discordance in schizophrenia: A reflection of cognitive dysconnectivity.
#' Unpublished manuscript.
#' @format A data.frame with 40 rows and 8 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/memory.R
|
#' A sequence of innovation-related e-mail messages
#'
#' A time-ordered sequence of 247 communication messages between 25 actors.
#'
#' The related data files 'actors', 'same_location', 'same_culture' contain information
#' on the actors and three event statistics respectively.
#'
#'
#' @name relevents
#' @docType data
#' @usage data(relevents)
#' @format dataframe (247 rows, 3 columns)
#'
#' \tabular{lll}{
#' \strong{relevents$time} \tab \code{numeric} \tab Time of the e-mail message,
#' in seconds since onset of the observation \cr
#' \strong{relevents$sender} \tab \code{integer} \tab ID of the sender, corresponding to
#' the employee IDs in the actors dataframe \cr
#' \strong{relevents$receiver} \tab \code{integer} \tab ID of the receiver \cr
#' }
#'
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/relevents.R
|
#' Same culture event statistic
#'
#' A matrix coding whether senders of events (in the rows) and receivers of events
#' (in the column) have the background culture. Related to the 'events' data object,
#' that contains a relational event sequence, and the 'actors' object, that contains
#' information on the 25 actors involved in the relational event sequence.
#'
#'
#' @name same_culture
#' @docType data
#' @usage data(same_culture)
#' @format dataframe (25 rows, 4 columns)
#'
#' \tabular{lll}{
#' \strong{same_culture} \tab \code{integer} \tab Event statistic. Matrix with senders in the
#' rows and receivers in the columns. The event statistic is 1 if sender and receiver have
#' the same culture and 0 otherwise. \cr
#' }
#'
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/same_culture.R
|
#' Same location event statistic
#'
#' A matrix coding whether senders of events (in the rows) and receivers of events
#' (in the column) have the same location. Related to the 'events' data object,
#' that contains a relational event sequence, and the 'actors' object, that contains
#' information on the 25 actors involved in the relational event sequence.
#'
#'
#' @name same_location
#' @docType data
#' @usage data(same_location)
#' @format dataframe (25 rows, 4 columns)
#'
#' \tabular{lll}{
#' \strong{same_location} \tab \code{integer} \tab Event statistic. Matrix with senders in the
#' rows and receivers in the columns. The event statistic is 1 if sender and receiver have
#' the same location and 0 otherwise. \cr
#' }
#'
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/same_location.R
|
#' Wason task performance and morality
#'
#' Data from an experimental study, using the Wason selection task (Wason 1968)
#' to examine whether humans have cognitive adaptations for detecting violations
#' of rules in multiple moral domains. Moral domains are operationalized in
#' terms of the five domains of the Moral Foundations Questionnaire
#' (Graham et al. 2011).
#' These data were simulated using the
#' R-package \code{synthpop}, based on the characteristics of the original data.
#'
#' \tabular{lll}{
#' \strong{sex} \tab \code{factor} \tab Participant sex\cr
#' \strong{age} \tab \code{integer} \tab Participant age\cr
#' \strong{nationality} \tab \code{factor} \tab Participant nationality\cr
#' \strong{politics} \tab \code{integer} \tab How would you define your political opinions? Likert type scale, from 1 (Liberal) to 6 (Conservative)\cr
#' \strong{WasonOrder} \tab \code{factor} \tab Was the Wason task presented before, or after the MFQ? \cr
#' \strong{Harm} \tab \code{numeric} \tab MFQ harm domain.\cr
#' \strong{Fairness} \tab \code{numeric} \tab MFQ fairness domain.\cr
#' \strong{Loyalty} \tab \code{numeric} \tab MFQ loyalty domain.\cr
#' \strong{Purity} \tab \code{numeric} \tab MFQ purity domain.\cr
#' \strong{Tasktype} \tab \code{ordered} \tab How was the Wason task framed?\cr
#' \strong{GotRight} \tab \code{factor} \tab Did the participant give the correct answer to the Wason task?
#' }
#' @docType data
#' @keywords datasets
#' @name sivan
#' @usage data(sivan)
#' @references Sivan, J., Curry, O. S., & Van Lissa, C. J. (2018). Excavating the Foundations: Cognitive Adaptations for Multiple Moral Domains. Evolutionary Psychological Science, 4(4), 408–419. doi:10.1007/s40806-018-0154-8
#' @format A data.frame with 887 rows and 12 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/sivan.R
|
#'
#' Data come from an experimental study (Rosa, Rosa, Sarner, and Barrett, 1998)
#' that were also used in Howell (2012, p.196).
#' An experiment was conducted to investigate if Therapeutic Touch practitioners
#' who were blindfolded can effectively identify which of their hands is below the experimenter¡¯s.
#' Twenty-eight practitioners were involved and tested 10 times in the experiment.
#' Researchers expected an average of 5 correct answers from each practitioner
#' as it is the number by chance if they do not outperform others.
#'
#' \tabular{lll}{
#' \strong{correct} \tab \code{integer} \tab How many correct answers are from each practitioner)\cr
#' }
#' @docType data
#' @keywords datasets
#' @name therapeutic
#' @usage data(therapeutic)
#' @references Howell, D. (2012). Statistical methods for psychology (8th ed.). Belmont, CA: Cengage Learning.
#' @format A data.frame with 22 rows and 1 column.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/therapeutic.R
|
#' Trends in International Mathematics and Science Study (TIMSS) 2011-2015
#'
#' A stratified sample was drawn by country and school to obtain a balanced
#' sample of p = 15 grade-4 students
#' per school for each of four countries (The Netherlands (NL), Croatia (HR),
#' Germany
#' (DE), and Denmark (DK)) and two measurement occasions (2011, 2015).
#' Achievement scores
#' (first plausible value) of overall mathematics were considered. Performances
#' of fourth
#' and eight graders from more than 50 participating countries around the world
#' can be found at (https://www.iea.nl/timss)
#' The TIMSS achievement scale is centered at 500 and the standard deviation is
#' equal to 100 scale score points.
#' The TIMSS data set has a three-level structure, where students are nested
#' within classrooms/schools, and
#' the classrooms/schools are nested within countries. Only one classroom was
#' sampled per school.
#' Changes in the mathematics achievement can be investigated by examining the
#' grouping of
#' students in schools across countries. Changes in country-specific intraclass
#' correlation coefficient
#' from 2011 to 2015, representing heterogeneity in mathematic achievements
#' within and between schools across years,
#' can be tested. When detecting a decrease in average performance together
#' with an increase
#' of the intraclass correlation, a subset of schools performed worse. For a
#' constant
#' intraclass correlation across years the drop in performance applied to the
#' entire population
#' of schools. For different countries, changes in the intraclass correlation
#' across years
#' can be tested concurrently to examine also differences across countries.
#'
#' \tabular{lll}{
#' \strong{math} \tab \code{numeric} \tab math score child\cr
#' \strong{groupNL11} \tab \code{numeric} \tab
#' Indicator for child from NL in 2011\cr
#' \strong{groupNL15} \tab \code{numeric} \tab
#' Indicator for child from NL in 2015\cr
#' \strong{groupHR11} \tab \code{numeric} \tab
#' Indicator for child from HR in 2011\cr
#' \strong{groupHR15} \tab \code{numeric} \tab
#' Indicator for child from HR in 2015\cr
#' \strong{groupDE11} \tab \code{numeric} \tab
#' Indicator for child from DE in 2011\cr
#' \strong{groupDE15} \tab \code{numeric} \tab
#' Indicator for child from DE in 2015\cr
#' \strong{groupDR11} \tab \code{numeric} \tab
#' Indicator for child from DK in 2011\cr
#' \strong{groupDR15} \tab \code{numeric} \tab
#' Indicator for child from DK in 2015\cr
#' \strong{gender} \tab \code{numeric} \tab Female=0,Male=1 \cr
#' \strong{weight} \tab \code{numeric} \tab Child sampling weight \cr
#' \strong{yeargender} \tab \code{numeric} \tab
#' Interaction for occassion and gender \cr
#' \strong{lln} \tab \code{numeric} \tab
#' total number of children in school-class \cr
#' \strong{groupschool} \tab \code{factor} \tab
#' Nested indicator for school in country\cr
#' \strong{schoolID} \tab \code{factor} \tab
#' Unique indicator for school
#' }
#' @docType data
#' @keywords datasets
#' @name timssICC
#' @usage data(timssICC)
#' @references Mulder, J. & Fox, J.-P. (2019). Bayes factor testing of multiple
#' intraclass correlations. Bayesian Analysis. 14, 2, p. 521-552.
#' @format A data.frame with 16770 rows and 15 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/timssICC.r
|
#' Precision of the Anchor Influences the Amount of Adjustment
#'
#' Data from an experimental study where participants have to guess the price
#' of a plasma tv. There were two experimental conditions.
#' These data were simulated using the sufficient statistics from Janiszewski &
#' Uy (2008).
#'
#' \tabular{lll}{
#' \strong{price} \tab \code{numeric} \tab Participant z-scores of price\cr
#' \strong{anchor} \tab \code{factor} \tab Participant anchor\cr
#' \strong{motivation} \tab \code{factor} \tab motivation to change\cr
#' }
#' @docType data
#' @keywords datasets
#' @name tvprices
#' @usage data(tvprices)
#' @references Janiszewski, C., & Uy, D. (2008). Precision of the anchor influences the amount of adjustment. Psychological Science, 19(2), 121–127. doi:10.1111/j.1467-9280.2008.02057.x
#' @format A data.frame with 59 rows and 3 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/tvprices.R
|
#' Facial trustworthiness and criminal sentencing
#'
#' Data from a correlational study in which the correlation between ratings of
#' facial trustworthiness of inmates was correlated with whether they had
#' received the death penalty or not (wilson and Rule, 2015). These data were
#' simulated using the R-package \code{synthpop}, based on the characteristics
#' of the original data.
#'
#'
#'
#' \tabular{lll}{
#' \strong{stim} \tab \code{integer} \tab Stimulus Number\cr
#' \strong{sent} \tab \code{integer} \tab Sentence: 1 = Death, 0 = Life\cr
#' \strong{race} \tab \code{integer} \tab Race: 1 = White, -1 = Black\cr
#' \strong{glasses} \tab \code{integer} \tab Glasses: 1 = Yes, 0 = No\cr
#' \strong{tattoos} \tab \code{integer} \tab Tattoos: 1 = Yes, 0 = No \cr
#' \strong{ztrust} \tab \code{numeric} \tab Trustworthiness \cr
#' \strong{trust_2nd} \tab \code{numeric} \tab Trustworthiness ratings with 2nd control group; Death targets are same as in primary analysis, Life targets are different.\cr
#' \strong{afro} \tab \code{numeric} \tab raw Afrocentricity ratings.\cr
#' \strong{zAfro} \tab \code{numeric} \tab Afrocentricity ratings normalized within target race. Analyses in paper were done with this variable.\cr
#' \strong{attract} \tab \code{numeric} \tab Attractiveness\cr
#' \strong{fWHR} \tab \code{numeric} \tab facial width-to-height \cr
#' \strong{afWHR} \tab \code{numeric} \tab fWHR normalized within target race. Analyses in paper were done with this variable \cr
#' \strong{maturity} \tab \code{numeric} \tab Maturity
#' }
#' @docType data
#' @keywords datasets
#' @name wilson
#' @usage data(wilson)
#' @references Wilson, J. P., & Rule, N. O. (2015). Facial Trustworthiness
#' Predicts Extreme Criminal-Sentencing Outcomes. Psychological Science,
#' 26(8), 1325–1331. doi: 10.1177/0956797615590992
#' @format A data.frame with 742 rows and 13 columns.
NULL
|
/scratch/gouwar.j/cran-all/cranData/BFpack/R/wilson.r
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BFpack/inst/doc/vignette_BFpack.R
|
---
title: "Introduction to BFpack"
author: "Mulder, J., Williams, D. R., Gu, X., Tomarken, A., Boeing-Messing, F., Olsson-Collentine, A., Meijerink, M., Menke, J., van Aert, R., Fox, J.-P., Hoijtink, H., Rosseel, Y., Wagenmakers, E.-J., and van Lissa, C."
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BFpack_introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Introduction
`BFpack` contains a collection of functions for Bayesian hypothesis testing using Bayes factors and posterior probabilities in R. The main function `BF` needs a fitted model `x` as input argument. Depending on the class of the fitted model, a standard hypothesis test is executed by default. For example, if `x` is a fitted regression model of class `lm` then posterior probabilities are computed of whether each separate coefficient is zero, negative, or positive (assuming equal prior probabilities). If one has specific hypotheses with equality and/or order constraints on the parameters under the fitted model `x` then these can be formulated using the `hypothesis` argument (a character string), possibly together prior probabilities for the hypotheses via the `prior.hyp` argument (default all hypotheses are equally likely a priori), and the `complement` argument which is a logical stating whether the complement hypotheses should be included in the case (`TRUE` by default).
Alternatively, when the model of interest is not of a class that is currently supported, `x` can also be a named numeric vector containing the estimates of the model parameters of interest, together with the error covariance matrix in the argument `Sigma`, and the sample size used to obtain the estimates, to perform an approximate Bayes factor test using large sample theory.
## Reference
The key references for the package are
Mulder, J., Williams, D. R., Gu, X., Tomarken, A., Boeing-Messing, F., Olsson-Collentine, A., Meijerink, M., Menke, J., van Aert, R., Fox, J.-P., Hoijtink, H., Rosseel, Y., Wagenmakers, E.-J., and van Lissa, C. (2021). BFpack: Flexible Bayes Factor Testing of Scientific Theories in R. *Journal of Statistical Software*. <https://www.jstatsoft.org/article/view/v100i18>
Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams, D. R., Fox, J.-P., Menke, J., et al. (2021). BFpack: Flexible Bayes Factor Testing of Scientific Expectations. (Version 0.3.2) <https://CRAN.R-project.org/package=BFpack>
## Usage
`BF(x, hypothesis, prior.hyp = NULL, complement = TRUE, ...)`
## Arguments
* `x`, a fitted model object that is obtained using a R-function. The object can be obtained via the following R functions:
+ `t_test` for t testing,
+ `bartlett_test` for testing independent group variances,
+ `aov` for AN(C)OVA testing,
+ `manova` for MAN(C)OVA testing,
+ `lm` for linear regresssion analysis,
+ `cor_test` for correlation analysis,
+ `lmer` currently for testing intraclass correlations in random intercept models,
+ `glm` for generalized linear models,
+ `coxph` for survival analysis,
+ `survreg` for survival analysis,
+ `polr` for ordinal regression,
+ `zeroinfl` for zero-inflated regression,
+ `rma` for meta-analysis,
+ `ergm` or `bergm` for an exponential random graph,
+ `x` can also be a named vector with estimates of the key parameters.
* `hypothesis`, a character string specifying the hypotheses with equality and/or order constraints on the key parameters of interest.
+ By default `hypothesis = NULL` which executes exploratory hypothesis tests (examples below).
+ The parameter names are based on the names of the estimated key parameters. An overview of the key parameters is given using the function `get_estimates`, e.g., `get_estimates(model1),` where `model1` is a fitted model object.
+ Separate constraints within a hypothesis are separated with an ampersand `&`. Hypotheses are separated using a semi-colon `;`. For example `hypothesis = "weight > height & height > 0; weight = height = 0"` implies that the first hypothesis assumes that the parameter `weight` is larger than the parameter `height` and that the parameter `height` is positive, and the second hypothesis assumes that the two parameters are equal to zero. Note that the first hypothesis could equivalently have been written as `weight > height > 0`.
* `prior.hyp`, a numeric vector specifying the prior probabilities of the hypotheses of the `hypothesis` argument. The default setting is `prior.hyp = NULL` which sets equal prior probabilities.
* `complement`, a logical value which specified if a complement hypothesis is included in the tested hypotheses specified under `hypothesis`. The default setting is `TRUE`. The complement hypothesis covers the remaining parameters space that is not covered by the constrained hypotheses. For example, if an equality hypothesis and an order hypothesis are formulated, say, `hypothesis = "weight = height = length; weight > height > length"`, the complement hypothesis covers the remaining subspace where neither `"weight = height = length"` holds, nor `"weight > height > length"` holds.
Alternatively if one is interested in testing hypotheses under a model class which that is currently not supported, an approximate Bayesian test can be executed with the following (additional) arguments
* `x`, a named numeric vector of the estimates (e.g., MLE) of the parameters of interest where the labels are equal to the names of the parameters which are used for the `hypothesis` argument.
* `Sigma`, the approximate posterior covariance matrix (e.g,. error covariance matrix) of the parameters of interest.
* `n`, the sample size that was used to acquire the estimates and covariance matrix.
## Output
The output is of class `BF`. By running the `print` function on the `BF` object, a short overview of the results are presented. By running the `summary` function on the `BF` object, a comprehensive overview of the results are presented.
## Example analyses
### Bayesian t testing
First a classical one sample t test is executed for the test value
\(\mu = 5\) on the therapeutic data
``` r
ttest1 <- bain::t_test(therapeutic, alternative = "greater", mu = 5)
```
The `t_test` function is part of the ***bain*** package. The function is
equivalent to the standard `t.test` function with the addition that the
returned object contains additional output than the standard `t.test`
function.
To see which parameters can be tested on this object run
``` r
get_estimates(ttest1)
```
which shows that the only parameter that can be tested is the population mean which has name `mu`.
To perform an exploratory Bayesian t test of whether the population mean is equal to, smaller than, or larger than the null value (which is `5` here, as specified when defining the `ttest1` object), one needs to run `BF` function on the object.
``` r
library(BFpack)
BF1 <- BF(ttest1)
```
This executes an exploratory ('exhaustive') test around the null value: `H1: mu = 5`
versus `H2: mu < 5` versus `H3: mu > 5` assuming equal prior
probabilities for `H1`, `H2`, and `H3` of 1/3. The output presents the
posterior probabilities for the three hypotheses.
The same test would be executed when the same hypotheses are explicitly
specified using the `hypothesis` argument.
``` r
hypothesis <- "mu = 5; mu < 5; mu > 5"
BF(ttest1, hypothesis = hypothesis)
```
In the above test the complement hypothesis is excluded automatically as the formualted hypothesis under the `hypothesis` argument cover the complete parameter space. Furthermore, when testing hypotheses via the `hypothesis` argument, the output also presents an `Evidence matrix` containing the Bayes factors between the hypotheses formulated in the `hypothesis` argument.
A standard two-sided test around the null value `mu` is executed by setting the hypothesis argument equal to the precise null hypothesis so that the complement hypothesis (which is included by default) corresponds to the hypothesis that assumes that the population mean is anything but the null value
``` r
hypothesis <- "mu = 5"
BF(ttest1, hypothesis = hypothesis)
```
The argument `prior.hyp` can be used to specify different prior probabilities
for the hypotheses. For example, when the left one-tailed hypothesis is not possible
based on prior considerations (e.g., see [Mulder et al. (2021, Section 4.1)](https://www.jstatsoft.org/article/view/v100i18)) while the precise (null) hypothesis and the right
one-tailed hypothesis are equally likely, the argument `prior.hyp` should be a vector
specifying the prior probabilities of the respective hypotheses
``` r
BF(ttest1, hypothesis = "mu = 5; mu < 5; mu > 5", prior.hyp = c(.5,0,.5))
```
For more information about the methodology, we refer the interested reader to [Mulder et al. (2021)](https://www.jstatsoft.org/article/view/v100i18) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809).
### Analysis of variance
First an analysis of variance (ANOVA) model is fitted using the `aov`
fuction in `R`.
``` r
aov1 <- aov(price ~ anchor * motivation, data = tvprices)
```
Next a Bayesian test can be performed on the fitted object. By default
exploratory tests are executed of whether the individual main and interaction effects
are zero or not (corresponding to the full model) (see [Mulder et al. (2021, Section 4.2)](https://www.jstatsoft.org/article/view/v100i18))
``` r
BF(aov1)
```
One can also test for specific equal/order hypotheses based on scientific expectations
such as whether `anchorrounded` is positive, `motivationlow` is negative, and the interaction
effect `anchorrounded:motivationlow` is negative (see [Mulder et al. (2021, Section 4.2)](https://www.jstatsoft.org/article/view/v100i18)) versus null hypothesis versus the complement hypothesis
(which assumes that the constraints of neither two hypotheses hold). This test can be executed
as follows:
``` r
constraints2 <- "anchorrounded > 0 & motivationlow < 0 &
anchorrounded:motivationlow < 0; anchorrounded = 0 &
motivationlow = 0 & anchorrounded:motivationlow = 0"
set.seed(1234)
BF(aov1, hypothesis = constraints2)
```
For more information about the methodology, we refer the interested reader to [Mulder et al. (2021) ](https://www.jstatsoft.org/article/view/v100i18) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809).
### Testing independent group variances
First a classical significance test is executed using the `bartlett_test` function, which is part of the ***BFpack*** package. The function is equivalent to the standard `bartlett.test` function with the addition that the returned object contains additional output needed for the test using the `BF` function.
``` r
bartlett1 <- bartlett_test(x = attention$accuracy, g = attention$group)
```
On an object of this class, by default `BF` executes an exploratory test of homogeneity (equality) of variances against an unconstrained (full) model
``` r
BF(bartlett1)
```
The group variances have names `ADHD`, `Controls`, and `TS`. This can be retrieved by running
``` r
get_estimates(bartlett1)
```
Let's say we want to test whether a hypothesis (H1) which assumes that group variances of groups `Controls` and `TS` are equal and smaller than the group variance of the `ADHD` group, a hypothesis (H2) which assumes that the group variances of `ADHD` and `TS` are equal and larger than the `Controls` group, a hypothesis (H3) which assumes all group variances are equal, and a complement hypothesis (H4). To do this we run the following:
``` r
hypothesis <- "Controls = TS < ADHD; Controls < TS = ADHD; Controls = TS = ADHD"
set.seed(358)
BF_var <- BF(bartlett1, hypothesis)
```
A comprehensive output of this analysis can be obtained by running:
``` r
summary(BF_var)
```
which presents the results of an exploratory analysis and the results of a confirmatory analysis (based on the hypotheses formulated under the `hypothesis` argument). The exploratory analysis tests a hypothesis which assumes that the variances are equal across groups (homogeneity of variances) versus an alternative unrestricted hypothesis. The output shows that the posterior probabilities of these two hypotheses are approximately 0.803 and 0.197 (assuming equal priori probabilities). Note that the p value in the classical Bartlett test for these data equals 0.1638 which implies that the hypothesis of homogeneity of variances cannot be rejected using common significance levels, such as 0.05 or 0.01. Note however that this p value cannot be used as a measure for the evidence in the data in favor of homogeneity of group variances. This can be done using the proposed Bayes factor test which shows that the probability that the variances are equal is approximately 0.803. Also note that the exploratory test could equivalently tested via the `hypothesis` argument by running `BF(bartlett1, "Controls = TS = ADHD")`.
The confirmatory test shows that H1 receives strongest support from the data, but H2 and H3 are viable competitors. It appears that even the complement H3 cannot be ruled out entirely given a posterior prob- ability of 0.058. To conclude, the results indicate that TS population are as heterogeneous in their attentional performances as the healthy control population in this specific task, but further research would be required to obtain more conclusive evidence.
For more information about the methodology, we refer the interested reader to [Boing-Messing et al. (2017)](https://www.researchgate.net/publication/317418250_Bayesian_evaluation_of_constrained_hypotheses_on_variances_of_multiple_independent_groups)
### Logistic regression
An example hypothesis test is considered under a logistic regression
model. First a logistic regression model is fitted using the `glm`
function
``` r
fit_glm <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
tattoos, family = binomial(), data = wilson)
```
By default exploratory exhaustive tests are executed of whether the separate
regression coefficients are zero, negative, or positive:
``` r
BF(fit_glm)
```
The names of the regression coefficients on which constrained hypotheses
can be formualted can be extracted using the `get_estimates` function.
``` r
get_estimates(fit_glm)
```
Two different hypotheses are formulated with competing equality and/or
order constraints on the regression coefficients of interest [Mulder et al. (2021, Section 4.4) ](https://www.jstatsoft.org/article/view/v100i18)
``` r
BF_glm <- BF(fit_glm, hypothesis = "ztrust > (zfWHR, zAfro) > 0;
ztrust > zfWHR = zAfro = 0")
summary(BF_glm)
```
By calling the `summary` function on the output object of class `BF`,
the results of the exploratory tests are presented of whether each
separate parameter is zero, negative, or positive, and the results of
the confirmatory test of the hypotheses under the `hypothesis` argument
are presented. When the hypotheses do not cover the complete parameter
space, by default the complement hypothesis is added which covers the
remaining parameter space that is not covered by the constraints under
the hypotheses of interest. In the above example, the complement
hypothesis covers the parameter space where neither `"ztrust > (zfWHR,
zAfro) > 0"` holds, nor where `"ztrust > zfWHR = zAfro = 0"` holds.
For more information about the methodology, we refer the interested reader to [Gu et al. (2018)](https://bpspsychub.onlinelibrary.wiley.com/doi/full/10.1111/bmsp.12110?casa_token=tjWwoVqLI7QAAAAA%3Ac84DWKNw8pf23ybOgZTs1pzgyZMuCc-BeTaPFj8vnfnTytzHd0gr-D9ymVxYbKj_MTO1ITGFW8tGBZ1J) and [Mulder et al. (2021)](https://www.jstatsoft.org/article/view/v100i18)
### Correlation analysis
By default `BF` performs exhaustice tests of whether the separate
correlations are zero, negative, or positive.
``` r
set.seed(123)
cor1 <- cor_test(memory[,1:3])
BF1 <- BF(cor1)
print(BF1)
```
The names of the
correlations is constructed using the names of the variables separated
by `_with_`:
``` r
get_estimates(cor1)
```
Specific hypotheses based on prior/theoretical considerations can be tested using the
`hypothesis` argument. As an example we show here how to test whether all
correlations are equal and positive versus its complement.
``` r
BF2 <- BF(cor1, hypothesis = "Del_with_Im = Wmn_with_Im = Wmn_with_Del > 0")
print(BF2)
```
We can also test equality and order constraints on correlations across different groups. As the seventh column of the `memory` object is a group indicator, let us first create different objects for the two different groups, and perform Bayesian estimation on the correlation matrices of the two different groups
``` r
memoryHC <- subset(memory,Group=="HC")[,-(4:7)]
memorySZ <- subset(memory,Group=="SZ")[,-(4:7)]
set.seed(123)
cor1 <- cor_test(memoryHC,memorySZ)
```
In this case with multiple groups by default exploratory tests are executed of whether the correlations
are zero, negative, or positive for each separate group (e.g., correlations in group `gr1`
are denoted by `_in_gr1` at the end of the name)
``` r
get_estimates(cor1)
```
Next we test the one-sided hypothesis that the respective correlations in the first group (`g1`) are larger than the correlations in the second group (`g2`) via
``` r
set.seed(123)
BF6_cor <- BF(cor1, hypothesis =
"Del_with_Im_in_g1 > Del_with_Im_in_g2 &
Del_with_Wmn_in_g1 > Del_with_Wmn_in_g2 &
Im_with_Wmn_in_g1 > Im_with_Wmn_in_g2")
```
By running `print(BF6_cor)`, the output shows that the one-sided hypothesis received a posterior probability of 0.991 and the alternative received a posterior probability of .009 (assuming equal prior probabilities).
For more information about the methodology, we refer the interested reader to [Mulder (2016)](https://www.researchgate.net/publication/280256902_Bayes_factors_for_testing_order-constrained_hypotheses_on_correlations) and [Mulder and Gelissen (2019)](https://doi.org/10.1080/02664763.2021.1992360)
### Univariate/Multivariate multiple regression
For a univariate regression model, by default an exhaustive test is
executed of whether an effect is zero, negative, or postive.
``` r
lm1 <- lm(Superficial ~ Face + Vehicle, data = fmri)
BF1 <- BF(lm1)
print(BF1)
```
Hypotheses can be tested with equality and/or order constraints on the
effects of interest. If prefered the complement hypothesis can be
omitted using the `complement`
argument
``` r
BF2 <- BF(lm1, hypothesis = "Vehicle > 0 & Face < 0; Vehicle = Face = 0",
complement = FALSE)
print(BF2)
```
In a multivariate regression model hypotheses can be tested on the
effects on the same dependent variable, and on effects across different
dependent variables. The name of an effect is constructed as the name of
the predictor variable and the dependent variable separated by `_on_`.
Testing hypotheses with both constraints within a dependent variable and
across dependent variables makes use of a Monte Carlo estimate which may
take a few seconds.
``` r
lm2 <- lm(cbind(Superficial, Middle, Deep) ~ Face + Vehicle,
data = fmri)
constraint2 <- "Face_on_Deep = Face_on_Superficial = Face_on_Middle < 0 <
Vehicle_on_Deep = Vehicle_on_Superficial = Vehicle_on_Middle;
Face_on_Deep < Face_on_Superficial = Face_on_Middle < 0 < Vehicle_on_Deep =
Vehicle_on_Superficial = Vehicle_on_Middle"
set.seed(123)
BF3 <- BF(lm2, hypothesis = constraint2)
summary(BF3)
```
For more information about the methodology, we refer the interested reader to [Mulder and Olsson-Collentine (2019)](https://link.springer.com/article/10.3758/s13428-018-01196-9) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809)
### Testing the effect size and beteen-study heterogeneity in a meta-analysis
For illustrative purposes we generate a hypothetical simulated dataset
``` r
set.seed(123)
tau2 <- 0.05
vi <- runif(50, min = 0.01, max = 0.2)
yi <- rnorm(50, mean = 0, sd = sqrt(vi+tau2))
```
where `tau2` denotes the true between-study heterogeneity, `vi` is a vector containing the squared standard errors of 50 studies, and `yi` is a vector containing the estimated effects sizes in the 50 studies. To test the overall effect size and the between-study heterogeneity using `BFpack`, an initial meta-analysis needs to be executed using the `metafor` package. Subsequently the output is plugged into the `BF` function:
``` r
res <- metafor::rma(yi = yi, vi = vi)
BFmeta <- BF(res)
```
The `summary` output gives the posterior probabilities for a zero, negative, and positive between-study heterogeneity `I^2` and overall effect size `mu` assuming equal prior probabilities:
``` r
summary(BFmeta)
```
The results indicate evidence for positive between-study heterogeneity (suggesting that a random effects meta-analysis model is appropriate) and for a zero overall effect size.
The unconstrained estimates (posterior mean and median) and the lower and upper bound of the 95% Bayesian credible intervals can be obtained by calling:
``` r
BFmeta$estimates
```
For more information about the methodology, we refer the interested reader to [Van Aert and Mulder (2021)](http://doi.org/10.3758/s13423-021-01918-9/)
### Running `BF` on a named vector
The input for the `BF` function can also be a named vector containing
the estimates of the parameters of interest. In this case the error
covariance matrix of the estimates is also needed via the `Sigma`
argument, as well as the sample size that was used for obtaining the
estimates via the `n` argument. Bayes factors are then computed using
Gaussian approximations of the likelihood (and posterior), similar as in
classical Wald test.
We illustrate this for a Poisson regression
model
``` r
poisson1 <- glm(formula = breaks ~ wool + tension, data = datasets::warpbreaks,
family = poisson)
```
The estimates, the error covariance matrix, and the sample size are
extracted from the fitted model
``` r
estimates <- poisson1$coefficients
covmatrix <- vcov(poisson1)
samplesize <- nobs(poisson1)
```
Constrained hypotheses on the parameters `names(estimates)` can then be
tested as follows
``` r
BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis =
"woolB > tensionM > tensionH; woolB = tensionM = tensionH")
```
Note that the same hypothesis test would be executed when calling
``` r
BF2 <- BF(poisson1, hypothesis = "woolB > tensionM > tensionH;
woolB = tensionM = tensionH")
```
because the same Bayes factor is used when running `BF` on an object of
class `glm` (see `Method: Bayes factor using Gaussian approximations`
when calling `print(BF1)` and `print(BF2)`).
For more information about the methodology, we refer the interested reader to [Gu et al. (2018)](https://bpspsychub.onlinelibrary.wiley.com/doi/full/10.1111/bmsp.12110?casa_token=tjWwoVqLI7QAAAAA%3Ac84DWKNw8pf23ybOgZTs1pzgyZMuCc-BeTaPFj8vnfnTytzHd0gr-D9ymVxYbKj_MTO1ITGFW8tGBZ1J)
|
/scratch/gouwar.j/cran-all/cranData/BFpack/inst/doc/vignette_BFpack.Rmd
|
---
title: "Introduction to BFpack"
author: "Mulder, J., Williams, D. R., Gu, X., Tomarken, A., Boeing-Messing, F., Olsson-Collentine, A., Meijerink, M., Menke, J., van Aert, R., Fox, J.-P., Hoijtink, H., Rosseel, Y., Wagenmakers, E.-J., and van Lissa, C."
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BFpack_introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Introduction
`BFpack` contains a collection of functions for Bayesian hypothesis testing using Bayes factors and posterior probabilities in R. The main function `BF` needs a fitted model `x` as input argument. Depending on the class of the fitted model, a standard hypothesis test is executed by default. For example, if `x` is a fitted regression model of class `lm` then posterior probabilities are computed of whether each separate coefficient is zero, negative, or positive (assuming equal prior probabilities). If one has specific hypotheses with equality and/or order constraints on the parameters under the fitted model `x` then these can be formulated using the `hypothesis` argument (a character string), possibly together prior probabilities for the hypotheses via the `prior.hyp` argument (default all hypotheses are equally likely a priori), and the `complement` argument which is a logical stating whether the complement hypotheses should be included in the case (`TRUE` by default).
Alternatively, when the model of interest is not of a class that is currently supported, `x` can also be a named numeric vector containing the estimates of the model parameters of interest, together with the error covariance matrix in the argument `Sigma`, and the sample size used to obtain the estimates, to perform an approximate Bayes factor test using large sample theory.
## Reference
The key references for the package are
Mulder, J., Williams, D. R., Gu, X., Tomarken, A., Boeing-Messing, F., Olsson-Collentine, A., Meijerink, M., Menke, J., van Aert, R., Fox, J.-P., Hoijtink, H., Rosseel, Y., Wagenmakers, E.-J., and van Lissa, C. (2021). BFpack: Flexible Bayes Factor Testing of Scientific Theories in R. *Journal of Statistical Software*. <https://www.jstatsoft.org/article/view/v100i18>
Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams, D. R., Fox, J.-P., Menke, J., et al. (2021). BFpack: Flexible Bayes Factor Testing of Scientific Expectations. (Version 0.3.2) <https://CRAN.R-project.org/package=BFpack>
## Usage
`BF(x, hypothesis, prior.hyp = NULL, complement = TRUE, ...)`
## Arguments
* `x`, a fitted model object that is obtained using a R-function. The object can be obtained via the following R functions:
+ `t_test` for t testing,
+ `bartlett_test` for testing independent group variances,
+ `aov` for AN(C)OVA testing,
+ `manova` for MAN(C)OVA testing,
+ `lm` for linear regresssion analysis,
+ `cor_test` for correlation analysis,
+ `lmer` currently for testing intraclass correlations in random intercept models,
+ `glm` for generalized linear models,
+ `coxph` for survival analysis,
+ `survreg` for survival analysis,
+ `polr` for ordinal regression,
+ `zeroinfl` for zero-inflated regression,
+ `rma` for meta-analysis,
+ `ergm` or `bergm` for an exponential random graph,
+ `x` can also be a named vector with estimates of the key parameters.
* `hypothesis`, a character string specifying the hypotheses with equality and/or order constraints on the key parameters of interest.
+ By default `hypothesis = NULL` which executes exploratory hypothesis tests (examples below).
+ The parameter names are based on the names of the estimated key parameters. An overview of the key parameters is given using the function `get_estimates`, e.g., `get_estimates(model1),` where `model1` is a fitted model object.
+ Separate constraints within a hypothesis are separated with an ampersand `&`. Hypotheses are separated using a semi-colon `;`. For example `hypothesis = "weight > height & height > 0; weight = height = 0"` implies that the first hypothesis assumes that the parameter `weight` is larger than the parameter `height` and that the parameter `height` is positive, and the second hypothesis assumes that the two parameters are equal to zero. Note that the first hypothesis could equivalently have been written as `weight > height > 0`.
* `prior.hyp`, a numeric vector specifying the prior probabilities of the hypotheses of the `hypothesis` argument. The default setting is `prior.hyp = NULL` which sets equal prior probabilities.
* `complement`, a logical value which specified if a complement hypothesis is included in the tested hypotheses specified under `hypothesis`. The default setting is `TRUE`. The complement hypothesis covers the remaining parameters space that is not covered by the constrained hypotheses. For example, if an equality hypothesis and an order hypothesis are formulated, say, `hypothesis = "weight = height = length; weight > height > length"`, the complement hypothesis covers the remaining subspace where neither `"weight = height = length"` holds, nor `"weight > height > length"` holds.
Alternatively if one is interested in testing hypotheses under a model class which that is currently not supported, an approximate Bayesian test can be executed with the following (additional) arguments
* `x`, a named numeric vector of the estimates (e.g., MLE) of the parameters of interest where the labels are equal to the names of the parameters which are used for the `hypothesis` argument.
* `Sigma`, the approximate posterior covariance matrix (e.g,. error covariance matrix) of the parameters of interest.
* `n`, the sample size that was used to acquire the estimates and covariance matrix.
## Output
The output is of class `BF`. By running the `print` function on the `BF` object, a short overview of the results are presented. By running the `summary` function on the `BF` object, a comprehensive overview of the results are presented.
## Example analyses
### Bayesian t testing
First a classical one sample t test is executed for the test value
\(\mu = 5\) on the therapeutic data
``` r
ttest1 <- bain::t_test(therapeutic, alternative = "greater", mu = 5)
```
The `t_test` function is part of the ***bain*** package. The function is
equivalent to the standard `t.test` function with the addition that the
returned object contains additional output than the standard `t.test`
function.
To see which parameters can be tested on this object run
``` r
get_estimates(ttest1)
```
which shows that the only parameter that can be tested is the population mean which has name `mu`.
To perform an exploratory Bayesian t test of whether the population mean is equal to, smaller than, or larger than the null value (which is `5` here, as specified when defining the `ttest1` object), one needs to run `BF` function on the object.
``` r
library(BFpack)
BF1 <- BF(ttest1)
```
This executes an exploratory ('exhaustive') test around the null value: `H1: mu = 5`
versus `H2: mu < 5` versus `H3: mu > 5` assuming equal prior
probabilities for `H1`, `H2`, and `H3` of 1/3. The output presents the
posterior probabilities for the three hypotheses.
The same test would be executed when the same hypotheses are explicitly
specified using the `hypothesis` argument.
``` r
hypothesis <- "mu = 5; mu < 5; mu > 5"
BF(ttest1, hypothesis = hypothesis)
```
In the above test the complement hypothesis is excluded automatically as the formualted hypothesis under the `hypothesis` argument cover the complete parameter space. Furthermore, when testing hypotheses via the `hypothesis` argument, the output also presents an `Evidence matrix` containing the Bayes factors between the hypotheses formulated in the `hypothesis` argument.
A standard two-sided test around the null value `mu` is executed by setting the hypothesis argument equal to the precise null hypothesis so that the complement hypothesis (which is included by default) corresponds to the hypothesis that assumes that the population mean is anything but the null value
``` r
hypothesis <- "mu = 5"
BF(ttest1, hypothesis = hypothesis)
```
The argument `prior.hyp` can be used to specify different prior probabilities
for the hypotheses. For example, when the left one-tailed hypothesis is not possible
based on prior considerations (e.g., see [Mulder et al. (2021, Section 4.1)](https://www.jstatsoft.org/article/view/v100i18)) while the precise (null) hypothesis and the right
one-tailed hypothesis are equally likely, the argument `prior.hyp` should be a vector
specifying the prior probabilities of the respective hypotheses
``` r
BF(ttest1, hypothesis = "mu = 5; mu < 5; mu > 5", prior.hyp = c(.5,0,.5))
```
For more information about the methodology, we refer the interested reader to [Mulder et al. (2021)](https://www.jstatsoft.org/article/view/v100i18) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809).
### Analysis of variance
First an analysis of variance (ANOVA) model is fitted using the `aov`
fuction in `R`.
``` r
aov1 <- aov(price ~ anchor * motivation, data = tvprices)
```
Next a Bayesian test can be performed on the fitted object. By default
exploratory tests are executed of whether the individual main and interaction effects
are zero or not (corresponding to the full model) (see [Mulder et al. (2021, Section 4.2)](https://www.jstatsoft.org/article/view/v100i18))
``` r
BF(aov1)
```
One can also test for specific equal/order hypotheses based on scientific expectations
such as whether `anchorrounded` is positive, `motivationlow` is negative, and the interaction
effect `anchorrounded:motivationlow` is negative (see [Mulder et al. (2021, Section 4.2)](https://www.jstatsoft.org/article/view/v100i18)) versus null hypothesis versus the complement hypothesis
(which assumes that the constraints of neither two hypotheses hold). This test can be executed
as follows:
``` r
constraints2 <- "anchorrounded > 0 & motivationlow < 0 &
anchorrounded:motivationlow < 0; anchorrounded = 0 &
motivationlow = 0 & anchorrounded:motivationlow = 0"
set.seed(1234)
BF(aov1, hypothesis = constraints2)
```
For more information about the methodology, we refer the interested reader to [Mulder et al. (2021) ](https://www.jstatsoft.org/article/view/v100i18) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809).
### Testing independent group variances
First a classical significance test is executed using the `bartlett_test` function, which is part of the ***BFpack*** package. The function is equivalent to the standard `bartlett.test` function with the addition that the returned object contains additional output needed for the test using the `BF` function.
``` r
bartlett1 <- bartlett_test(x = attention$accuracy, g = attention$group)
```
On an object of this class, by default `BF` executes an exploratory test of homogeneity (equality) of variances against an unconstrained (full) model
``` r
BF(bartlett1)
```
The group variances have names `ADHD`, `Controls`, and `TS`. This can be retrieved by running
``` r
get_estimates(bartlett1)
```
Let's say we want to test whether a hypothesis (H1) which assumes that group variances of groups `Controls` and `TS` are equal and smaller than the group variance of the `ADHD` group, a hypothesis (H2) which assumes that the group variances of `ADHD` and `TS` are equal and larger than the `Controls` group, a hypothesis (H3) which assumes all group variances are equal, and a complement hypothesis (H4). To do this we run the following:
``` r
hypothesis <- "Controls = TS < ADHD; Controls < TS = ADHD; Controls = TS = ADHD"
set.seed(358)
BF_var <- BF(bartlett1, hypothesis)
```
A comprehensive output of this analysis can be obtained by running:
``` r
summary(BF_var)
```
which presents the results of an exploratory analysis and the results of a confirmatory analysis (based on the hypotheses formulated under the `hypothesis` argument). The exploratory analysis tests a hypothesis which assumes that the variances are equal across groups (homogeneity of variances) versus an alternative unrestricted hypothesis. The output shows that the posterior probabilities of these two hypotheses are approximately 0.803 and 0.197 (assuming equal priori probabilities). Note that the p value in the classical Bartlett test for these data equals 0.1638 which implies that the hypothesis of homogeneity of variances cannot be rejected using common significance levels, such as 0.05 or 0.01. Note however that this p value cannot be used as a measure for the evidence in the data in favor of homogeneity of group variances. This can be done using the proposed Bayes factor test which shows that the probability that the variances are equal is approximately 0.803. Also note that the exploratory test could equivalently tested via the `hypothesis` argument by running `BF(bartlett1, "Controls = TS = ADHD")`.
The confirmatory test shows that H1 receives strongest support from the data, but H2 and H3 are viable competitors. It appears that even the complement H3 cannot be ruled out entirely given a posterior prob- ability of 0.058. To conclude, the results indicate that TS population are as heterogeneous in their attentional performances as the healthy control population in this specific task, but further research would be required to obtain more conclusive evidence.
For more information about the methodology, we refer the interested reader to [Boing-Messing et al. (2017)](https://www.researchgate.net/publication/317418250_Bayesian_evaluation_of_constrained_hypotheses_on_variances_of_multiple_independent_groups)
### Logistic regression
An example hypothesis test is considered under a logistic regression
model. First a logistic regression model is fitted using the `glm`
function
``` r
fit_glm <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
tattoos, family = binomial(), data = wilson)
```
By default exploratory exhaustive tests are executed of whether the separate
regression coefficients are zero, negative, or positive:
``` r
BF(fit_glm)
```
The names of the regression coefficients on which constrained hypotheses
can be formualted can be extracted using the `get_estimates` function.
``` r
get_estimates(fit_glm)
```
Two different hypotheses are formulated with competing equality and/or
order constraints on the regression coefficients of interest [Mulder et al. (2021, Section 4.4) ](https://www.jstatsoft.org/article/view/v100i18)
``` r
BF_glm <- BF(fit_glm, hypothesis = "ztrust > (zfWHR, zAfro) > 0;
ztrust > zfWHR = zAfro = 0")
summary(BF_glm)
```
By calling the `summary` function on the output object of class `BF`,
the results of the exploratory tests are presented of whether each
separate parameter is zero, negative, or positive, and the results of
the confirmatory test of the hypotheses under the `hypothesis` argument
are presented. When the hypotheses do not cover the complete parameter
space, by default the complement hypothesis is added which covers the
remaining parameter space that is not covered by the constraints under
the hypotheses of interest. In the above example, the complement
hypothesis covers the parameter space where neither `"ztrust > (zfWHR,
zAfro) > 0"` holds, nor where `"ztrust > zfWHR = zAfro = 0"` holds.
For more information about the methodology, we refer the interested reader to [Gu et al. (2018)](https://bpspsychub.onlinelibrary.wiley.com/doi/full/10.1111/bmsp.12110?casa_token=tjWwoVqLI7QAAAAA%3Ac84DWKNw8pf23ybOgZTs1pzgyZMuCc-BeTaPFj8vnfnTytzHd0gr-D9ymVxYbKj_MTO1ITGFW8tGBZ1J) and [Mulder et al. (2021)](https://www.jstatsoft.org/article/view/v100i18)
### Correlation analysis
By default `BF` performs exhaustice tests of whether the separate
correlations are zero, negative, or positive.
``` r
set.seed(123)
cor1 <- cor_test(memory[,1:3])
BF1 <- BF(cor1)
print(BF1)
```
The names of the
correlations is constructed using the names of the variables separated
by `_with_`:
``` r
get_estimates(cor1)
```
Specific hypotheses based on prior/theoretical considerations can be tested using the
`hypothesis` argument. As an example we show here how to test whether all
correlations are equal and positive versus its complement.
``` r
BF2 <- BF(cor1, hypothesis = "Del_with_Im = Wmn_with_Im = Wmn_with_Del > 0")
print(BF2)
```
We can also test equality and order constraints on correlations across different groups. As the seventh column of the `memory` object is a group indicator, let us first create different objects for the two different groups, and perform Bayesian estimation on the correlation matrices of the two different groups
``` r
memoryHC <- subset(memory,Group=="HC")[,-(4:7)]
memorySZ <- subset(memory,Group=="SZ")[,-(4:7)]
set.seed(123)
cor1 <- cor_test(memoryHC,memorySZ)
```
In this case with multiple groups by default exploratory tests are executed of whether the correlations
are zero, negative, or positive for each separate group (e.g., correlations in group `gr1`
are denoted by `_in_gr1` at the end of the name)
``` r
get_estimates(cor1)
```
Next we test the one-sided hypothesis that the respective correlations in the first group (`g1`) are larger than the correlations in the second group (`g2`) via
``` r
set.seed(123)
BF6_cor <- BF(cor1, hypothesis =
"Del_with_Im_in_g1 > Del_with_Im_in_g2 &
Del_with_Wmn_in_g1 > Del_with_Wmn_in_g2 &
Im_with_Wmn_in_g1 > Im_with_Wmn_in_g2")
```
By running `print(BF6_cor)`, the output shows that the one-sided hypothesis received a posterior probability of 0.991 and the alternative received a posterior probability of .009 (assuming equal prior probabilities).
For more information about the methodology, we refer the interested reader to [Mulder (2016)](https://www.researchgate.net/publication/280256902_Bayes_factors_for_testing_order-constrained_hypotheses_on_correlations) and [Mulder and Gelissen (2019)](https://doi.org/10.1080/02664763.2021.1992360)
### Univariate/Multivariate multiple regression
For a univariate regression model, by default an exhaustive test is
executed of whether an effect is zero, negative, or postive.
``` r
lm1 <- lm(Superficial ~ Face + Vehicle, data = fmri)
BF1 <- BF(lm1)
print(BF1)
```
Hypotheses can be tested with equality and/or order constraints on the
effects of interest. If prefered the complement hypothesis can be
omitted using the `complement`
argument
``` r
BF2 <- BF(lm1, hypothesis = "Vehicle > 0 & Face < 0; Vehicle = Face = 0",
complement = FALSE)
print(BF2)
```
In a multivariate regression model hypotheses can be tested on the
effects on the same dependent variable, and on effects across different
dependent variables. The name of an effect is constructed as the name of
the predictor variable and the dependent variable separated by `_on_`.
Testing hypotheses with both constraints within a dependent variable and
across dependent variables makes use of a Monte Carlo estimate which may
take a few seconds.
``` r
lm2 <- lm(cbind(Superficial, Middle, Deep) ~ Face + Vehicle,
data = fmri)
constraint2 <- "Face_on_Deep = Face_on_Superficial = Face_on_Middle < 0 <
Vehicle_on_Deep = Vehicle_on_Superficial = Vehicle_on_Middle;
Face_on_Deep < Face_on_Superficial = Face_on_Middle < 0 < Vehicle_on_Deep =
Vehicle_on_Superficial = Vehicle_on_Middle"
set.seed(123)
BF3 <- BF(lm2, hypothesis = constraint2)
summary(BF3)
```
For more information about the methodology, we refer the interested reader to [Mulder and Olsson-Collentine (2019)](https://link.springer.com/article/10.3758/s13428-018-01196-9) and [Mulder and Gu (2021)](https://doi.org/10.1080/00273171.2021.1904809)
### Testing the effect size and beteen-study heterogeneity in a meta-analysis
For illustrative purposes we generate a hypothetical simulated dataset
``` r
set.seed(123)
tau2 <- 0.05
vi <- runif(50, min = 0.01, max = 0.2)
yi <- rnorm(50, mean = 0, sd = sqrt(vi+tau2))
```
where `tau2` denotes the true between-study heterogeneity, `vi` is a vector containing the squared standard errors of 50 studies, and `yi` is a vector containing the estimated effects sizes in the 50 studies. To test the overall effect size and the between-study heterogeneity using `BFpack`, an initial meta-analysis needs to be executed using the `metafor` package. Subsequently the output is plugged into the `BF` function:
``` r
res <- metafor::rma(yi = yi, vi = vi)
BFmeta <- BF(res)
```
The `summary` output gives the posterior probabilities for a zero, negative, and positive between-study heterogeneity `I^2` and overall effect size `mu` assuming equal prior probabilities:
``` r
summary(BFmeta)
```
The results indicate evidence for positive between-study heterogeneity (suggesting that a random effects meta-analysis model is appropriate) and for a zero overall effect size.
The unconstrained estimates (posterior mean and median) and the lower and upper bound of the 95% Bayesian credible intervals can be obtained by calling:
``` r
BFmeta$estimates
```
For more information about the methodology, we refer the interested reader to [Van Aert and Mulder (2021)](http://doi.org/10.3758/s13423-021-01918-9/)
### Running `BF` on a named vector
The input for the `BF` function can also be a named vector containing
the estimates of the parameters of interest. In this case the error
covariance matrix of the estimates is also needed via the `Sigma`
argument, as well as the sample size that was used for obtaining the
estimates via the `n` argument. Bayes factors are then computed using
Gaussian approximations of the likelihood (and posterior), similar as in
classical Wald test.
We illustrate this for a Poisson regression
model
``` r
poisson1 <- glm(formula = breaks ~ wool + tension, data = datasets::warpbreaks,
family = poisson)
```
The estimates, the error covariance matrix, and the sample size are
extracted from the fitted model
``` r
estimates <- poisson1$coefficients
covmatrix <- vcov(poisson1)
samplesize <- nobs(poisson1)
```
Constrained hypotheses on the parameters `names(estimates)` can then be
tested as follows
``` r
BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis =
"woolB > tensionM > tensionH; woolB = tensionM = tensionH")
```
Note that the same hypothesis test would be executed when calling
``` r
BF2 <- BF(poisson1, hypothesis = "woolB > tensionM > tensionH;
woolB = tensionM = tensionH")
```
because the same Bayes factor is used when running `BF` on an object of
class `glm` (see `Method: Bayes factor using Gaussian approximations`
when calling `print(BF1)` and `print(BF2)`).
For more information about the methodology, we refer the interested reader to [Gu et al. (2018)](https://bpspsychub.onlinelibrary.wiley.com/doi/full/10.1111/bmsp.12110?casa_token=tjWwoVqLI7QAAAAA%3Ac84DWKNw8pf23ybOgZTs1pzgyZMuCc-BeTaPFj8vnfnTytzHd0gr-D9ymVxYbKj_MTO1ITGFW8tGBZ1J)
|
/scratch/gouwar.j/cran-all/cranData/BFpack/vignettes/vignette_BFpack.Rmd
|
# Convert ff_matrix into an S4 class
setOldClass("ff_matrix")
setClassUnion("geno", c("LinkedMatrix", "BEDMatrix", "big.matrix", "ff_matrix", "matrix"))
setClass("BGData", slots = c(geno = "geno", pheno = "data.frame", map = "data.frame"))
BGData <- function(geno, pheno = NULL, map = NULL) {
if (!is(geno, "geno")) {
stop("Only LinkedMatrix, BEDMatrix, big.matrix, ff_matrix, or regular matrix objects are allowed for geno.")
}
if (is.null(pheno)) {
if (is.null(rownames(geno))) {
sampleIDs <- paste0("sample_", seq_len(nrow(geno)))
} else {
sampleIDs <- rownames(geno)
}
pheno <- data.frame(sample_id = sampleIDs, row.names = sampleIDs, stringsAsFactors = FALSE)
}
if (is.null(map)) {
if (is.null(colnames(geno))) {
variantIDs <- paste0("variant_", seq_len(ncol(geno)))
} else {
variantIDs <- colnames(geno)
}
map <- data.frame(variant_id = variantIDs, row.names = variantIDs, stringsAsFactors = FALSE)
}
obj <- new("BGData", geno = geno, pheno = pheno, map = map)
return(obj)
}
setValidity("BGData", function(object) {
if (nrow(slot(object, "geno")) != nrow(slot(object, "pheno"))) {
return("Number of rows of geno and number of rows of pheno do not match.")
}
# Do not assume that geno has row names, but if it does, it should match
# the row names of pheno
if (!is.null(rownames(slot(object, "geno"))) && any(rownames(slot(object, "geno")) != rownames(slot(object, "pheno")))) {
warning("Row names of geno and row names of pheno do not match.")
}
if (ncol(slot(object, "geno")) != nrow(slot(object, "map"))) {
return("Number of columns of geno and number of rows of map do not match.")
}
# Do not assume that geno has column names, but if it does, it should match
# the row names of map
if (!is.null(colnames(slot(object, "geno"))) && any(colnames(slot(object, "geno")) != rownames(slot(object, "map")))) {
warning("Column names of geno and row names of map do not match.")
}
return(TRUE)
})
setGeneric("geno", function(x) standardGeneric("geno"))
setMethod("geno", "BGData", function(x) slot(x, "geno"))
setGeneric("geno<-", function(x, value) standardGeneric("geno<-"))
setMethod("geno<-", "BGData", function(x, value) {
slot(x, "geno") <- value
validObject(x)
x
})
setGeneric("pheno", function(x) standardGeneric("pheno"))
setMethod("pheno", "BGData", function(x) slot(x, "pheno"))
setGeneric("pheno<-", function(x, value) standardGeneric("pheno<-"))
setMethod("pheno<-", "BGData", function(x, value) {
slot(x, "pheno") <- value
validObject(x)
x
})
setGeneric("map", function(x) standardGeneric("map"))
setMethod("map", "BGData", function(x) slot(x, "map"))
setGeneric("map<-", function(x, value) standardGeneric("map<-"))
setMethod("map<-", "BGData", function(x, value) {
slot(x, "map") <- value
validObject(x)
x
})
pedDims <- function(fileIn, header, n, p, sep = "", nColSkip = 6L) {
if (is.null(n)) {
n <- getLineCount(fileIn, header)
}
if (header) {
headerLine <- getFileHeader(fileIn, sep)
p <- length(headerLine) - nColSkip
} else {
if (is.null(p)) {
p <- getColumnCount(fileIn, sep) - nColSkip
}
}
return(list(n = n, p = p))
}
parseRAW <- function(BGData, fileIn, header, dataType, nColSkip = 6L, idCol = c(1L, 2L), sep = "", na.strings = "NA", verbose = FALSE) {
p <- ncol(geno(BGData))
pedFile <- file(fileIn, open = "r")
# Update colnames
if (header) {
headerLine <- scan(pedFile, nlines = 1L, what = character(), sep = sep, quiet = TRUE)
# Suppress warnings here to not get in trouble with validity method
suppressWarnings(colnames(pheno(BGData)) <- headerLine[seq_len(nColSkip)])
suppressWarnings(colnames(geno(BGData)) <- headerLine[-(seq_len(nColSkip))])
suppressWarnings(rownames(map(BGData)) <- colnames(geno(BGData)))
}
# Parse file
for (i in seq_len(nrow(geno(BGData)))) {
xSkip <- scan(pedFile, n = nColSkip, what = character(), sep = sep, quiet = TRUE)
x <- scan(pedFile, n = p, what = dataType, sep = sep, na.strings = na.strings, quiet = TRUE)
pheno(BGData)[i, ] <- xSkip
geno(BGData)[i, ] <- x
if (verbose) {
message("Subject ", i, " / ", nrow(geno(BGData)))
}
}
close(pedFile)
# Update rownames
IDs <- apply(pheno(BGData)[, idCol, drop = FALSE], 1L, paste, collapse = "_")
rownames(pheno(BGData)) <- IDs
rownames(geno(BGData)) <- IDs
# Convert types in pheno
pheno(BGData)[] <- lapply(pheno(BGData), type.convert, as.is = TRUE)
return(BGData)
}
readRAW <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), nNodes = NULL, linked.by = "rows", folderOut = paste0("BGData_", sub("\\.[[:alnum:]]+$", "", basename(fileIn))), outputType = "byte", dimorder = if (linked.by == "rows") 2L:1L else 1L:2L, verbose = FALSE) {
# Create output directory
if (file.exists(folderOut)) {
stop(paste("Output folder", folderOut, "already exists. Please move it or pick a different one."))
}
dir.create(folderOut)
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
# Determine number of nodes
if (is.null(nNodes)) {
if (linked.by == "columns") {
chunkSize <- min(dims[["p"]], floor(.Machine[["integer.max"]] / dims[["n"]] / 1.2))
nNodes <- ceiling(dims[["p"]] / chunkSize)
} else {
chunkSize <- min(dims[["n"]], floor(.Machine[["integer.max"]] / dims[["p"]] / 1.2))
nNodes <- ceiling(dims[["n"]] / chunkSize)
}
} else {
if (linked.by == "columns") {
chunkSize <- ceiling(dims[["p"]] / nNodes)
if (chunkSize * dims[["n"]] >= .Machine[["integer.max"]] / 1.2) {
stop("More nodes are needed")
}
} else {
chunkSize <- ceiling(dims[["n"]] / nNodes)
if (chunkSize * dims[["p"]] >= .Machine[["integer.max"]] / 1.2) {
stop("More nodes are needed")
}
}
}
dataType <- normalizeType(dataType)
if (!typeof(dataType) %in% c("integer", "double")) {
stop("dataType must be either integer() or double()")
}
if (!linked.by %in% c("columns", "rows")) {
stop("linked.by must be either columns or rows")
}
# Prepare geno
geno <- LinkedMatrix(nrow = dims[["n"]], ncol = dims[["p"]], nNodes = nNodes, linkedBy = linked.by, nodeInitializer = ffNodeInitializer, vmode = outputType, folderOut = folderOut, dimorder = dimorder)
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims[["n"]], ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- BGData(geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, verbose = verbose)
# Save BGData object
attr(BGData, "origFile") <- list(path = fileIn, dataType = typeof(dataType))
attr(BGData, "dateCreated") <- date()
save(BGData, file = paste0(folderOut, "/BGData.RData"))
return(BGData)
}
readRAW_matrix <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), verbose = FALSE) {
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
dataType <- normalizeType(dataType)
# Prepare geno
geno <- matrix(nrow = dims[["n"]], ncol = dims[["p"]])
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims[["n"]], ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- BGData(geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, verbose = verbose)
return(BGData)
}
readRAW_big.matrix <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), folderOut = paste0("BGData_", sub("\\.[[:alnum:]]+$", "", basename(fileIn))), outputType = "char", verbose = FALSE) {
if (file.exists(folderOut)) {
stop(paste("Output folder", folderOut, "already exists. Please move it or pick a different one."))
}
dataType <- normalizeType(dataType)
if (!typeof(dataType) %in% c("integer", "double")) {
stop("dataType must be either integer() or double()")
}
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
options(bigmemory.typecast.warning = FALSE)
options(bigmemory.allow.dimnames = TRUE)
# Create output directory
dir.create(folderOut)
# Prepare geno
geno <- filebacked.big.matrix(nrow = dims[["n"]], ncol = dims[["p"]], type = outputType, backingpath = folderOut, backingfile = "BGData.bin", descriptorfile = "BGData.desc")
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims[["n"]], ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- BGData(geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, verbose = verbose)
# Save BGData object
attr(BGData, "origFile") <- list(path = fileIn, dataType = typeof(dataType))
attr(BGData, "dateCreated") <- date()
save(BGData, file = paste0(folderOut, "/BGData.RData"))
return(BGData)
}
loadFamFile <- function(path) {
if (!file.exists(path)) {
stop(path, " not found")
}
message("Extracting phenotypes from .fam file...")
# It was considered to read the PHENOTYPE column as double, but the PLINK
# documentation mentions non-numeric case/control values.
if (requireNamespace("data.table", quietly = TRUE)) {
pheno <- data.table::fread(path, col.names = c(
"FID",
"IID",
"PAT",
"MAT",
"SEX",
"PHENOTYPE"
), colClasses = "character", data.table = FALSE, showProgress = FALSE)
} else {
pheno <- read.table(path, col.names = c(
"FID",
"IID",
"PAT",
"MAT",
"SEX",
"PHENOTYPE"
), colClasses = "character", stringsAsFactors = FALSE)
}
return(pheno)
}
generatePheno <- function(x) {
# Extract path to .bed file
bedPath <- attr(x, "path")
# Try to load .fam file, generate pheno otherwise
ex <- try({
pheno <- loadFamFile(sub("\\.bed", "\\.fam", bedPath))
}, silent = TRUE)
if (inherits(ex, "try-error")) {
# x may not have rownames (e.g., when a BEDMatrix is created using the
# n parameter)
if (is.null(rownames(x))) {
pheno <- data.frame(FID = "0", IID = as.character(1:nrow(x)), stringsAsFactors = FALSE)
} else {
# Make no assumptions about the structure of the rownames of x
# here, i.e., do not try to extract FID and IID.
pheno <- data.frame(FID = "0", IID = rownames(x), stringsAsFactors = FALSE)
}
}
# Preserve rownames of x (if not NULL)
rownames(pheno) <- rownames(x)
return(pheno)
}
loadBimFile <- function(path) {
if (!file.exists(path)) {
stop(path, " not found")
}
message("Extracting map from .bim file...")
if (requireNamespace("data.table", quietly = TRUE)) {
map <- data.table::fread(path, col.names = c(
"chromosome",
"snp_id",
"genetic_distance",
"base_pair_position",
"allele_1",
"allele_2"
), colClasses = c(
"character",
"character",
"double",
"integer",
"character",
"character"
), data.table = FALSE, showProgress = FALSE)
} else {
map <- read.table(path, col.names = c(
"chromosome",
"snp_id",
"genetic_distance",
"base_pair_position",
"allele_1",
"allele_2"
), colClasses = c(
"character",
"character",
"double",
"integer",
"character",
"character"
), stringsAsFactors = FALSE)
}
return(map)
}
generateMap <- function(x) {
# Extract path to .bed file
bedPath <- attr(x, "path")
# Try to load .bim file, generate map otherwise
ex <- try({
map <- loadBimFile(sub("\\.bed", "\\.bim", bedPath))
}, silent = TRUE)
if (inherits(ex, "try-error")) {
# x may not have colnames (e.g., when a BEDMatrix is created using the
# p parameter)
if (is.null(colnames(x))) {
map <- data.frame(snp_id = as.character(1:ncol(x)), stringsAsFactors = FALSE)
} else {
# Make no assumptions about the structure of the colnames of x
# here, i.e., do not try to extract minor allele.
map <- data.frame(snp_id = colnames(x), stringsAsFactors = FALSE)
}
}
# Preserve colnames of x (if not NULL)
rownames(map) <- colnames(x)
return(map)
}
loadAlternatePhenotypeFile <- function(path, ...) {
if (!file.exists(path)) {
stop("Alternate phenotype file does not exist.")
} else {
message("Merging alternate phenotype file...")
if (requireNamespace("data.table", quietly = TRUE)) {
alternatePhenotypes <- data.table::fread(path, colClasses = list(
character = 1:2
), data.table = FALSE, showProgress = FALSE, ...)
} else {
# Check if the file has a header, i.e. if the first row starts with
# an FID and an IID entry
hasHeader = FALSE
if (grepl("FID\\s+IID", readLines(path, n = 1L))) {
hasHeader = TRUE
}
alternatePhenotypes <- read.table(path, header = hasHeader, stringsAsFactors = FALSE, ...)
alternatePhenotypes[[1]] <- as.character(alternatePhenotypes[[1]]) # FID
alternatePhenotypes[[2]] <- as.character(alternatePhenotypes[[2]]) # IID
}
}
return(alternatePhenotypes)
}
orderedMerge <- function(x, y, by = c(1L, 2L)) {
# Add artificial sort column to preserve order after merging
# (merge's `sort = FALSE` order is unspecified)
x[[".sortColumn"]] <- seq_len(nrow(x))
# Merge phenotypes and alternate phenotypes
merged <- merge(x, y, by = by, all.x = TRUE)
# Reorder phenotypes to match original order and delete artificial
# column
merged <- merged[order(merged[[".sortColumn"]]), ]
merged <- merged[, names(merged) != ".sortColumn"]
# Restore rownames (assuming order is retained and no rows disappear...)
rownames(merged) <- rownames(x)
return(merged)
}
as.BGData <- function(x, alternatePhenotypeFile = NULL, ...) {
UseMethod("as.BGData")
}
as.BGData.BEDMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
# Read in pheno file
fam <- generatePheno(x)
# Read in map file
map <- generateMap(x)
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- orderedMerge(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
as.BGData.ColumnLinkedMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
n <- nNodes(x)
# For now, all elements have to be of type BEDMatrix
if (!all(vapply(x, inherits, TRUE, "BEDMatrix"))) {
stop("Only BEDMatrix instances are supported as elements of the LinkedMatrix right now.")
}
# Read in the fam file of the first node
message("Extracting phenotypes from .fam file, assuming that the .fam file of the first BEDMatrix instance is representative of all the other nodes...")
fam <- suppressMessages(generatePheno(x[[1L]]))
# Read in map files
message("Extracting map from .bim files...")
map <- do.call(rbind, lapply(x, function(node) {
suppressMessages(generateMap(node))
}))
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- orderedMerge(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
as.BGData.RowLinkedMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
n <- nNodes(x)
# For now, all elements have to be of type BEDMatrix
if (!all(vapply(x, inherits, TRUE, "BEDMatrix"))) {
stop("Only BEDMatrix instances are supported as elements of the LinkedMatrix right now.")
}
# Read in the fam files
message("Extracting phenotypes from .fam files...")
fam <- do.call(rbind, lapply(x, function(node) {
suppressMessages(generatePheno(node))
}))
# Read in the map file of the first node
message("Extracting map from .bim file, assuming that the .bim file of the first BEDMatrix instance is representative of all the other nodes...")
map <- suppressMessages(generateMap(x[[1L]]))
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- orderedMerge(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
load.BGData <- function(file, envir = parent.frame()) {
# Load data into new environment
loadingEnv <- new.env()
load(file = file, envir = loadingEnv)
names <- ls(envir = loadingEnv)
for (name in names) {
object <- get(name, envir = loadingEnv)
# Initialize genotypes of BGData objects
if (inherits(object, "BGData")) {
geno(object) <- initializeGeno(geno(object), path = dirname(file))
}
# Assign object to envir
assign(name, object, envir = envir)
}
message("Loaded objects: ", paste0(names, collapse = ", "))
}
initializeGeno <- function(x, ...) {
UseMethod("initializeGeno")
}
initializeGeno.LinkedMatrix <- function(x, path, ...) {
for (i in seq_len(nNodes(x))) {
x[[i]] <- initializeGeno(x[[i]], path = path)
}
return(x)
}
# Absolute paths to ff files are not stored, so the ff objects have to be
# loaded from the same directory as the RData file.
initializeGeno.ff_matrix <- function(x, path, ...) {
# Store current working directory and set working directory to path
cwd <- getwd()
setwd(path)
# Open ff object
open(x)
# Restore the working directory
setwd(cwd)
return(x)
}
initializeGeno.big.matrix <- function(x, path, ...) {
return(attach.big.matrix(paste0(path, "/BGData.desc")))
}
initializeGeno.BEDMatrix <- function(x, ...) {
dnames <- attr(x, "dnames")
dims <- attr(x, "dims")
path <- attr(x, "path")
x <- BEDMatrix(path = path, n = dims[1L], p = dims[2L])
dimnames(x) <- dnames
return(x)
}
initializeGeno.default <- function(x, ...) {
return(x)
}
ffNodeInitializer <- function(nodeIndex, nrow, ncol, vmode, folderOut, ...) {
filename <- paste0("geno_", nodeIndex, ".bin")
node <- ff(dim = c(nrow, ncol), vmode = vmode, filename = paste0(folderOut, "/", filename), ...)
# Change ff path to a relative one
physical(node)[["filename"]] <- filename
return(node)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/BGData.R
|
FWD <- function(y, X, df = 20, tol = 1e-7, maxIter = 1000, centerImpute = TRUE, verbose = TRUE) {
y <- y - mean(y)
if (centerImpute) {
X <- BGData::preprocess(X, center = TRUE, impute = TRUE)
}
if (is.null(colnames(X))) {
colNames <- paste0("X", 1:ncol(X))
} else {
colNames <- colnames(X)
}
X <- cbind(1, X)
df <- df + 1
colNames <- c("Int", colNames)
C <- crossprod(X)
rhs <- crossprod(X, y)
n <- length(y)
p <- ncol(X)
active <- rep(FALSE, p)
names(active) <- colNames
B <- matrix(data = 0, nrow = p, ncol = df)
rownames(B) <- colNames
RSS <- rep(NA_real_, df)
DF <- rep(NA_real_, df)
VARE <- rep(NA_real_, df)
LogLik <- rep(NA_real_, df)
AIC <- rep(NA_real_, df)
BIC <- rep(NA_real_, df)
path <- rep(NA_character_, df)
active[1] <- TRUE
B[1, 1] <- mean(y)
RSS[1] <- sum((y - B[1, 1])^2)
DF[1] <- 1
VARE[1] <- RSS[1] / (n - DF[1])
LogLik[1] <- -(n / 2) * log(2 * pi * VARE[1]) - RSS[1] / (2 * VARE[1])
AIC[1] <- -2 * LogLik[1] + 2 * DF[1]
BIC[1] <- -2 * LogLik[1] + log(n) * (DF[1] + 1)
path[1] <- colNames[1]
tol <- tol * RSS[1]
for (i in 2:df) {
tmp <- addOne(
C = C,
rhs = rhs,
active = active,
b = B[, i - 1],
RSS = RSS[i - 1],
maxIter = maxIter,
tol = tol
)
B[, i] <- tmp[["b"]]
if (length(tmp[["newPred"]]) > 0) {
active[tmp[["newPred"]]] <- TRUE
path[i] <- colNames[tmp[["newPred"]]]
} else {
path[i] <- NA
}
RSS[i] <- tmp[["RSS"]]
DF[i] <- sum(active)
VARE[i] <- RSS[i] / (n - DF[i])
LogLik[i] <- -(n / 2) * log(2 * pi * VARE[i]) - RSS[i] / VARE[i] / 2
AIC[i] <- -2 * LogLik[i] + 2 * (DF[i] + 1)
BIC[i] <- -2 * LogLik[i] + log(n) * (DF[i] + 1)
if (verbose) {
message(" ", DF[i] - 1, " predictors, AIC=", round(AIC[i], 2))
}
}
OUT <- list(
B = B,
path = data.frame(
variable = path,
RSS = RSS,
LogLik = LogLik,
VARE = VARE,
DF = DF,
AIC = AIC,
BIC = BIC
)
)
return(OUT)
}
addOne <- function(C, rhs, active, b, RSS, maxIter, tol) {
activeSet <- which(active)
inactiveSet <- which(!active)
nActive <- length(activeSet)
nInactive <- length(inactiveSet)
# if model is not null
if (nActive > 1) {
RSSNew <- rep(NA_real_, nInactive)
for (i in 1:nInactive) {
fm <- fitSYS(
C = C,
rhs = rhs,
b = b,
active = c(inactiveSet[i], activeSet),
RSS = RSS,
maxIter = maxIter,
tol = tol
)
RSSNew[i] <- fm[["RSS"]]
}
k <- which.min(RSSNew)
fm <- fitSYS(
C = C,
rhs = rhs,
b = b,
active = c(inactiveSet[k], activeSet),
RSS = RSS,
maxIter = maxIter,
tol = tol
)
ans <- list(b = fm[["b"]], newPred = inactiveSet[k], RSS = fm[["RSS"]])
# if model is null
} else {
bOLS <- rhs / diag(C)
dRSS <- diag(C) * bOLS^2
k <- which.max(dRSS)
b[k] <- bOLS[k]
RSS <- RSS - bOLS[k]^2 * C[k, k]
ans <- list(b = b, newPred = k, RSS = RSS)
}
return(ans)
}
fitSYS <- function(C, rhs, b, active, RSS, maxIter, tol) {
active <- active - 1L # for the 0-based index
ans <- .Call(C_fitLSYS, C, rhs, b, active, RSS, maxIter, tol)
return(list(b = ans[[1]], RSS = ans[[2]]))
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/FWD.R
|
GWAS <- function(formula, data, method = "lsfit", i = seq_len(nrow(geno(data))), j = seq_len(ncol(geno(data))), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE, ...) {
if (!inherits(data, "BGData")) {
stop("data must BGData")
}
if (!method %in% c("rayOLS", "lsfit", "lm", "lm.fit", "glm", "lmer", "SKAT")) {
stop("Only rayOLS, lsfit, lm, lm.fit, glm, lmer, and SKAT have been implemented so far.")
}
i <- convertIndex(geno(data), i, "i")
j <- convertIndex(geno(data), j, "j")
if (method == "rayOLS") {
if (length(labels(terms(formula))) > 0L) {
stop("method rayOLS can only be used with y~1 formula, if you want to add covariates pre-adjust your phenotype.")
}
OUT <- GWAS.rayOLS(formula = formula, data = data, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose, ...)
} else if (method == "lsfit") {
OUT <- GWAS.lsfit(formula = formula, data = data, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose, ...)
} else if (method == "SKAT") {
if (!requireNamespace("SKAT", quietly = TRUE)) {
stop("SKAT needed for this function to work. Please install it.", call. = FALSE)
}
OUT <- GWAS.SKAT(formula = formula, data = data, i = i, j = j, verbose = verbose, ...)
} else {
if (method == "lmer") {
if (!requireNamespace("lme4", quietly = TRUE)) {
stop("lme4 needed for this function to work. Please install it.", call. = FALSE)
}
FUN <- lme4::lmer
} else {
FUN <- match.fun(method)
}
GWAS.model <- update(formula, ".~z+.")
OUT <- chunkedApply(X = geno(data), MARGIN = 2L, FUN = function(col, ...) {
df <- pheno(data)[i, , drop = FALSE]
df[["z"]] <- col
fm <- FUN(GWAS.model, data = df, ...)
getCoefficients(fm)
}, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose, ...)
OUT <- t(OUT)
rownames(OUT) <- colnames(geno(data))[j]
}
return(OUT)
}
GWAS.rayOLS <- function(formula, data, i = seq_len(nrow(geno(data))), j = seq_len(ncol(geno(data))), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE, ...) {
y <- pheno(data)[i, getResponse(formula)]
y <- as.numeric(y)
res <- chunkedMap(X = geno(data), FUN = rayOLS, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose, y = y, ...)
res <- do.call(rbind, res)
colnames(res) <- c("Estimate", "Std.Err", "t-value", "Pr(>|t|)", "n", "allele_freq")
rownames(res) <- colnames(geno(data))[j]
return(res)
}
GWAS.lsfit <- function(formula, data, i = seq_len(nrow(geno(data))), j = seq_len(ncol(geno(data))), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE, ...) {
# The subset argument of model.frame is evaluated in the environment of the
# formula, therefore subset after building the frame.
frame <- model.frame(formula = formula, data = pheno(data), na.action = na.pass)[i, , drop = FALSE]
model <- model.matrix(formula, frame)
y <- pheno(data)[i, getResponse(formula)]
res <- chunkedApply(X = geno(data), MARGIN = 2L, FUN = function(col, ...) {
fm <- lsfit(x = cbind(col, model), y = y, intercept = FALSE)
ls.print(fm, print.it = FALSE)[["coef.table"]][[1L]][1L, ]
}, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose, ...)
res <- t(res)
rownames(res) <- colnames(geno(data))[j]
return(res)
}
# formula: the formula for the GWAS model without including the markers, e.g.
# y~1 or y~factor(sex)+age
# all the variables in the formula must be in data@pheno (BGData)
# containing slots @pheno and @geno
# groups: a vector mapping markers into groups (can be integer, character or
# factor)
GWAS.SKAT <- function(formula, data, groups, i = seq_len(nrow(geno(data))), j = seq_len(ncol(geno(data))), verbose = FALSE, ...) {
uniqueGroups <- unique(groups)
OUT <- matrix(data = double(), nrow = length(uniqueGroups), ncol = 2L)
colnames(OUT) <- c("nMrk", "p-value")
rownames(OUT) <- uniqueGroups
H0 <- SKAT::SKAT_Null_Model(formula, data = pheno(data)[i, , drop = FALSE], ...)
for (group in seq_along(uniqueGroups)) {
Z <- geno(data)[i, groups == uniqueGroups[group], drop = FALSE]
fm <- SKAT::SKAT(Z = Z, obj = H0, ...)
OUT[group, ] <- c(ncol(Z), fm[["p.value"]])
if (verbose) {
message("Group ", group, " of ", length(uniqueGroups), " ...")
}
}
return(OUT)
}
rayOLS <- function(x, y) {
.Call(C_rayOLS, x, y)
}
getCoefficients <- function(x) {
UseMethod("getCoefficients")
}
getCoefficients.lm <- function(x) {
coef(summary(x))[2L, ]
}
getCoefficients.glm <- function(x) {
coef(summary(x))[2L, ]
}
getCoefficients.lmerMod <- function(x) {
ans <- coef(summary(x))[2L, ]
ans <- c(ans, c(1L - pnorm(ans[3L])))
return(ans)
}
getResponse <- function(formula) {
# Extract component from parse tree (see https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Language-objects)
sym <- formula[[2L]]
# Convert symbol to character
as.character(sym)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/GWAS.R
|
chunkedMap <- function(X, FUN, i = seq_len(nrow(X)), j = seq_len(ncol(X)), chunkBy = 2L, chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE, ...) {
if (length(dim(X)) != 2L) {
stop("X must be a matrix-like object")
}
i <- convertIndex(X, i, "i")
j <- convertIndex(X, j, "j")
dim <- c(length(i), length(j))
if (is.null(chunkSize)) {
chunkSize <- dim[chunkBy]
nChunks <- 1L
} else {
nChunks <- ceiling(dim[chunkBy] / chunkSize)
}
chunkApply <- function(curChunk, ...) {
if (verbose) {
if (nCores > 1) {
message("Process ", Sys.getpid(), ": Chunk ", curChunk, " of ", nChunks, " ...")
} else {
message("Chunk ", curChunk, " of ", nChunks, " ...")
}
}
range <- seq(
((curChunk - 1L) * chunkSize) + 1L,
min(curChunk * chunkSize, dim[chunkBy])
)
if (chunkBy == 2L) {
chunk <- X[i, j[range], drop = FALSE]
} else {
chunk <- X[i[range], j, drop = FALSE]
}
FUN(chunk, ...)
}
if (nCores == 1L) {
res <- lapply(X = seq_len(nChunks), FUN = chunkApply, ...)
} else {
# Suppress warnings because of custom error handling
res <- suppressWarnings(mclapply(X = seq_len(nChunks), FUN = chunkApply, ..., mc.cores = nCores))
errors <- which(vapply(res, inherits, TRUE, "try-error"))
if (length(errors) > 0L) {
# With mc.preschedule = TRUE (the default), if a job fails, the
# remaining jobs will fail as well with the same error message.
# Therefore, the number of errors does not tell how many errors
# actually occurred and only the first error message is forwarded.
errorMessage <- attr(res[[errors[1L]]], "condition")[["message"]]
stop("in chunk ", errors[1L], " (only first error is shown)", ": ", errorMessage, call. = FALSE)
}
}
return(res)
}
chunkedApply <- function(X, MARGIN, FUN, i = seq_len(nrow(X)), j = seq_len(ncol(X)), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE, ...) {
res <- chunkedMap(X = X, FUN = function(chunk, ...) {
apply2(X = chunk, MARGIN = MARGIN, FUN = FUN, ...)
}, i = i, j = j, chunkBy = MARGIN, chunkSize = chunkSize, nCores = nCores, verbose = verbose, ...)
simplifyList(res)
}
# A more memory-efficient version of apply.
#
# apply always makes a copy of the data.
apply2 <- function(X, MARGIN, FUN, ...) {
d <- dim(X)
if (MARGIN == 1L) {
subset <- X[1L, ]
} else {
subset <- X[, 1L]
}
sample <- FUN(subset, ...)
if (is.table(sample)) {
stop("tables are not supported.")
} else if (is.list(sample)) {
# List
OUT <- vector(mode = "list", length = d[MARGIN])
names(OUT) <- dimnames(X)[[MARGIN]]
OUT[[1L]] <- sample
if (d[MARGIN] > 1L) {
for (i in seq(2L, d[MARGIN])) {
if (MARGIN == 1L) {
subset <- X[i, ]
} else {
subset <- X[, i]
}
OUT[[i]] <- FUN(subset, ...)
}
}
} else {
if (length(sample) > 1L) {
# Matrix or atomic vector of length > 1
OUT <- matrix(data = normalizeType(typeof(sample)), nrow = length(sample), ncol = d[MARGIN])
if (!is.matrix(sample) && !is.null(names(sample))) {
if (MARGIN == 1L) {
dimnames(OUT) <- list(NULL, names(sample))
} else {
dimnames(OUT) <- list(names(sample), NULL)
}
}
OUT[, 1L] <- sample
if (d[MARGIN] > 1L) {
for (i in seq(2L, d[MARGIN])) {
if (MARGIN == 1L) {
subset <- X[i, ]
} else {
subset <- X[, i]
}
OUT[, i] <- FUN(subset, ...)
}
}
} else {
# Atomic vector of length 1
OUT <- vector(mode = typeof(sample), length = d[MARGIN])
names(OUT) <- dimnames(X)[[MARGIN]]
OUT[1L] <- sample
if (d[MARGIN] > 1L) {
for (i in seq(2L, d[MARGIN])) {
if (MARGIN == 1L) {
subset <- X[i, ]
} else {
subset <- X[, i]
}
OUT[i] <- FUN(subset, ...)
}
}
}
}
return(OUT)
}
simplifyList <- function(x) {
sample <- x[[1L]]
if (is.matrix(sample)) {
x <- matrix(data = unlist(x), nrow = nrow(sample), byrow = FALSE)
rownames(x) <- rownames(sample)
} else {
x <- unlist(x)
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/chunkedApply.R
|
findRelated <- function(x, ...) {
UseMethod("findRelated")
}
findRelated.matrix <- function(x, cutoff = 0.03, ...) {
x[lower.tri(x, diag = TRUE)] <- 0
pairs <- which(x > cutoff, arr.ind = TRUE, useNames = FALSE)
samples <- unique(pairs[, 1L])
rownames(x)[samples]
}
findRelated.symDMatrix <- function(x, cutoff = 0.03, verbose = FALSE, ...) {
n <- nBlocks(x)
pairs <- lapply(seq_len(n), function(i) {
lapply(seq(i, n), function(j) {
if (verbose) {
message("Working on block ", i, " ", j)
}
block <- x[[i]][[j]][]
# Remove lower triangle in blocks that contain the diagonal
if (i == j) {
block[lower.tri(block, diag = TRUE)] <- 0
}
pairs <- which(block > cutoff, arr.ind = TRUE, useNames = FALSE)
# Remap local indices to sample names
remap <- matrix(character(), nrow = nrow(pairs), ncol = ncol(pairs))
remap[, 1L] <- rownames(block)[pairs[, 1L]]
remap[, 2L] <- colnames(block)[pairs[, 2L]]
return(remap)
})
})
pairs <- do.call(rbind, lapply(pairs, function(x) do.call(rbind, x)))
unique(pairs[, 1L])
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/findRelated.R
|
padDigits <- function(x, total) {
formatC(x, width = as.integer(log10(total) + 1L), format = "d", flag = "0")
}
getG <- function(X, center = TRUE, scale = TRUE, impute = TRUE, scaleG = TRUE, minVar = 1e-05, i = seq_len(nrow(X)), j = seq_len(ncol(X)), i2 = NULL, chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE) {
# compute XY' rather than XX'
hasY <- !is.null(i2)
if (hasY) {
if (is.logical(center) && center == TRUE) {
stop("centers need to be precomputed.")
}
if (is.logical(scale) && scale == TRUE) {
stop("scales need to be precomputed.")
}
}
i <- convertIndex(X, i, "i")
j <- convertIndex(X, j, "j")
if (hasY) {
i2 <- convertIndex(X, i2, "i")
}
nX <- nrow(X)
pX <- ncol(X)
if (min(i) < 1L || max(i) > nX) {
stop("Index out of bounds")
}
if (min(j) < 1L || max(j) > pX) {
stop("Index out of bounds")
}
if (hasY) {
if (min(i2) < 1L || max(i2) > nX) {
stop("Index out of bounds")
}
}
n <- length(i)
p <- length(j)
if (hasY) {
n2 <- length(i2)
}
if (is.null(chunkSize)) {
chunkSize <- p
nChunks <- 1L
} else {
nChunks <- ceiling(p / chunkSize)
}
if (hasY) {
G <- big.matrix(nrow = n, ncol = n2, type = "double", init = 0.0, dimnames = list(rownames(X)[i], rownames(X)[i2]))
} else {
G <- big.matrix(nrow = n, ncol = n, type = "double", init = 0.0, dimnames = list(rownames(X)[i], rownames(X)[i]))
}
mutex <- boost.mutex()
chunkApply <- function(curChunk) {
if (verbose) {
if (nCores > 1) {
message("Process ", Sys.getpid(), ": Chunk ", curChunk, " of ", nChunks, " ...")
} else {
message("Chunk ", curChunk, " of ", nChunks, " ...")
}
}
# subset
range <- seq(
((curChunk - 1L) * chunkSize) + 1L,
min(curChunk * chunkSize, p)
)
X1 <- X[i, j[range], drop = FALSE]
if (hasY) {
X2 <- X[i2, j[range], drop = FALSE]
}
# compute centers
if (is.logical(center) && center == TRUE) {
center.chunk <- colMeans(X1, na.rm = TRUE)
} else if (is.numeric(center)) {
center.chunk <- center[j[range]]
} else {
center.chunk = FALSE
}
# compute scales
if (is.logical(scale) && scale == TRUE) {
scale.chunk <- apply(X = X1, MARGIN = 2L, FUN = sd, na.rm = TRUE)
} else if (is.numeric(scale)) {
scale.chunk <- scale[j[range]]
} else {
scale.chunk <- FALSE
}
# remove constant columns
if (is.numeric(scale.chunk)) {
removeCols <- which(scale.chunk < minVar)
if (length(removeCols) > 0L) {
X1 <- X1[, -removeCols]
if (hasY) {
X2 <- X2[, -removeCols]
}
scale.chunk <- scale.chunk[-removeCols]
center.chunk <- center.chunk[-removeCols]
}
}
p <- ncol(X1)
# compute XX'
if (p > 0L) {
# center, scale and impute without duplications
# set nCores to 1 here because section is already parallelized
X1 <- preprocess(X1, center = center.chunk, scale = scale.chunk, impute = impute, nCores = 1)
if (hasY) {
X2 <- preprocess(X2, center = center.chunk, scale = scale.chunk, impute = impute, nCores = 1)
}
if (hasY) {
G_chunk <- tcrossprod(x = X1, y = X2)
} else {
G_chunk <- tcrossprod(X1)
}
lock(mutex)
G[] <- G[] + G_chunk
unlock(mutex)
}
return(p)
}
if (nCores == 1L) {
res <- lapply(X = seq_len(nChunks), FUN = chunkApply)
} else {
res <- mclapply(X = seq_len(nChunks), FUN = chunkApply, mc.cores = nCores)
}
# Convert big.matrix to matrix
G <- G[]
if (scaleG) {
if (hasY) {
K <- do.call(sum, res)
} else {
# Use seq instead of diag to avoid copy as it does not increase ref count
K <- mean(G[seq(from = 1L, to = n * n, by = n + 1L)])
}
G[] <- G / K
}
return(G)
}
getG_symDMatrix <- function(X, center = TRUE, scale = TRUE, impute = TRUE, scaleG = TRUE, minVar = 1e-05, blockSize = 5000L, folderOut = paste0("symDMatrix_", randomString()), vmode = "double", i = seq_len(nrow(X)), j = seq_len(ncol(X)), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE) {
i <- convertIndex(X, i, "i")
j <- convertIndex(X, j, "j")
nX <- nrow(X)
pX <- ncol(X)
if (min(i) < 1L || max(i) > nX) {
stop("Index out of bounds")
}
if (min(j) < 1L || max(j) > pX) {
stop("Index out of bounds")
}
n <- length(i)
p <- length(j)
if (is.null(chunkSize)) {
chunkSize <- p
nChunks <- 1L
} else {
nChunks <- ceiling(p / chunkSize)
}
if (is.logical(center) && center == TRUE) {
if (verbose) {
message("Computing centers ...")
}
center <- rep(0, pX)
names(center) <- colnames(X)
center[j] <- chunkedApply(X = X, MARGIN = 2L, FUN = mean, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = FALSE, na.rm = TRUE)
}
if (is.logical(scale) && scale == TRUE) {
if (verbose) {
message("Computing scales ...")
}
scale <- rep(1, pX)
names(scale) <- colnames(X)
scale[j] <- chunkedApply(X = X, MARGIN = 2L, FUN = sd, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = FALSE, na.rm = TRUE)
}
if (file.exists(folderOut)) {
stop(folderOut, " already exists")
}
dir.create(folderOut)
if (is.null(blockSize)) {
blockSize <- n
nBlocks <- 1L
} else {
nBlocks <- ceiling(n / blockSize)
}
blockIndices <- split(i, ceiling(seq_along(i) / blockSize))
args <- vector(mode = "list", length = nBlocks)
counter <- 1L
for (rowIndex in 1L:nBlocks) {
rowArgs <- vector(mode = "list", length = nBlocks)
for (colIndex in 1L:nBlocks) {
if (verbose) {
message("Block ", rowIndex, "-", colIndex, " ...")
}
if (colIndex >= rowIndex) {
blockName <- paste0("data_", padDigits(rowIndex, nBlocks), "_", padDigits(colIndex, nBlocks), ".bin")
block <- as.ff(getG(X, center = center, scale = scale, impute = impute, scaleG = FALSE, minVar = minVar, i = blockIndices[[rowIndex]], j = j, i2 = blockIndices[[colIndex]], chunkSize = chunkSize, nCores = nCores, verbose = FALSE), filename = paste0(folderOut, "/", blockName), vmode = vmode)
# Change ff path to a relative one
physical(block)[["filename"]] <- blockName
rowArgs[[colIndex]] <- block
counter <- counter + 1L
} else {
rowArgs[[colIndex]] <- vt(args[[colIndex]][[rowIndex]])
}
}
args[[rowIndex]] <- do.call(ColumnLinkedMatrix, rowArgs)
}
G <- do.call(symDMatrix, args)
if (scaleG) {
K <- mean(diag(G))
for (rowIndex in seq_len(nBlocks)) {
for (colIndex in seq(rowIndex, nBlocks)) {
G[[rowIndex]][[colIndex]][] <- G[[rowIndex]][[colIndex]][] / K
}
}
}
save(G, file = paste0(folderOut, "/G.RData"))
return(G)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/getG.R
|
preprocess <- function(X, center = FALSE, scale = FALSE, impute = FALSE, nCores = getOption("mc.cores", 2L)) {
if (!(is.numeric(X) && length(dim(X)) == 2)) {
stop("'X' needs to be a numeric matrix")
}
if (!(is.logical(center) && length(center) == 1L) && !(is.numeric(center) && length(center) == ncol(X))) {
stop("'center' needs to be either a logical vector of size 1 or a numeric vector of size 'ncol(X)'")
}
if (!(is.logical(scale) && length(scale) == 1L) && !(is.numeric(scale) && length(scale) == ncol(X))) {
stop("'scale' needs to be either a logical vector of size 1 or a numeric vector of size 'ncol(X)'")
}
if (!(is.logical(impute) && length(impute) == 1L)) {
stop("'impute' needs to be a logical vector of size 1")
}
if (!(is.numeric(nCores) && nCores > 0L)) {
stop("'nCores' needs to be a positive number")
}
.Call(C_preprocess, X, center, scale, impute, as.integer(nCores))
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/preprocess.R
|
segments <- function(statistic, chr, bp, threshold, gap, trim = FALSE, verbose = FALSE) {
if (length(unique(c(length(statistic), length(chr), length(bp)))) != 1) {
stop("statistic, chr, and bp need to match in length")
}
if (!is.numeric(statistic)) {
stop("'statistic' needs to be a numeric vector")
}
if (!(is.numeric(chr) || is.character(chr))) {
stop("'chr' needs to be a either a character or numeric vector")
}
if (!is.numeric(bp)) {
stop("'bp' needs to be a numeric vector")
}
if (!is.numeric(threshold)) {
stop("'threshold' needs to a number")
}
if (!is.numeric(gap)) {
stop("'gap' needs to a number")
}
uniqueChr <- unique(chr)
out <- vector(mode = "list", length = length(uniqueChr))
for (curChr in uniqueChr) {
if (verbose) {
message("Working on chromosome ", curChr)
}
# Extract chromosome data
chrFilter <- which(chr == curChr)
statisticChr <- statistic[chrFilter]
bpChr <- bp[chrFilter]
# Determine variants below threshold
discoverySet <- which(statisticChr <= threshold)
# Set discoveries and all variants within +/- gap to 1, leave rest as 0
signal <- rep(0, length(chrFilter))
for (discovery in discoverySet) {
signal[abs(bpChr - bpChr[discovery]) <= gap] <- 1
}
# Determine the runs in the 0/1 signal
runs <- rle(signal)
# Determine at what positions within the chromosome the runs start and
# end while removing 0-runs
runStart <- c(1, cumsum(runs[["lengths"]][-length(runs[["lengths"]])]) + 1)
withinSegment <- runs[["values"]] == 1
runStart <- runStart[withinSegment]
runEnd <- runStart + runs[["lengths"]][withinSegment] - 1
runLength <- runs[["lengths"]][withinSegment]
# Determine value and position of smallest variant within segment, and
# optionally trim segment (i.e., remove variants that are not internal
# to the segment containing GWAS-significant variants)
# Would be nice to vectorize this like the other operations ...
minValue <- vector(mode = "numeric", length = length(runStart))
minValuePos <- vector(mode = "integer", length = length(runStart))
for (curSeg in seq_along(runStart)) {
segFilter <- seq(runStart[curSeg], runEnd[curSeg])
statisticSeq <- statisticChr[segFilter]
minValuePosSeg <- which.min(statisticSeq)
minValue[curSeg] <- statisticSeq[minValuePosSeg]
minValuePos[curSeg] <- chrFilter[1] + segFilter[1] + minValuePosSeg - 2
if (trim) {
# Determine which variants in the segment passed the threshold
significantVariants <- which(statisticSeq <= threshold)
# Set start of run to first significant variant and end of run
# to last significant variant
runStart[curSeg] <- segFilter[significantVariants[1]]
runEnd[curSeg] <- segFilter[significantVariants[length(significantVariants)]]
runLength[curSeg] <- runEnd[curSeg] - runStart[curSeg] + 1
}
}
# Determine at what base-pair positions the runs start and end
bpStart <- bpChr[runStart]
bpEnd <- bpChr[runEnd]
bpLength <- bpEnd - bpStart + 1
# Determine at what positions within x the runs start and end (more
# useful information than chromosome by chromosome because it is easier
# to extract)
xStart <- chrFilter[runStart]
xEnd <- chrFilter[runEnd]
# Prepare chromosome summary (there might be no segments, so do not
# rely on recycling)
outChr <- data.frame(
chr = rep(curChr, times = length(runStart)),
start = xStart,
end = xEnd,
length = runLength,
bpStart = bpStart,
bpEnd = bpEnd,
bpLength = bpLength,
minValue = minValue,
minValuePos = minValuePos
)
out[[curChr]] <- outChr
}
# Combine chromosomes
out <- do.call(rbind, out)
rownames(out) <- NULL
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/segments.R
|
summarize <- function(X, i = seq_len(nrow(X)), j = seq_len(ncol(X)), chunkSize = 5000L, nCores = getOption("mc.cores", 2L), verbose = FALSE) {
res <- chunkedMap(X = X, FUN = function(chunk) {
summaries <- .Call(C_summarize, chunk)
rownames(summaries) <- colnames(chunk)
colnames(summaries) <- c("freq_na", "allele_freq", "sd")
return(summaries)
}, i = i, j = j, chunkSize = chunkSize, nCores = nCores, verbose = verbose)
res <- do.call(rbind, res)
as.data.frame(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/summarize.R
|
getLineCount <- function(path, header) {
file <- file(path, open = "r")
n <- 0L
while (length(readLines(file, n = 1L)) > 0L) {
n <- n + 1L
}
if (header) {
n <- n - 1L
}
close(file)
return(n)
}
getFileHeader <- function(path, sep = "") {
file <- file(path, open = "r")
header <- scan(file, nlines = 1L, what = character(), sep = sep, quiet = TRUE)
close(file)
return(header)
}
getColumnCount <- function(path, sep = "") {
header <- getFileHeader(path, sep)
p <- length(header)
return(p)
}
randomString <- function() {
paste(sample(c(0L:9L, letters, LETTERS), size = 5L, replace = TRUE), collapse = "")
}
normalizeType <- function(val) {
type <- typeof(val)
# detect strings
if (type == "character" && length(val) > 0L) {
# convert to type if type and value match
convert <- try(vector(mode = val), silent = TRUE)
if (inherits(convert, "try-error")) {
# return a character type if conversion failed
warning("could no convert type, using character instead")
character()
} else {
# return conversion result otherwise
convert
}
# value doesn't contain type information and can be handled by typeof
} else {
val
}
}
loadExample <- function() {
path <- system.file("extdata", package = "BGData")
message("Loading chromosomes as .bed files...")
m <- do.call(ColumnLinkedMatrix, lapply(c("chr1", "chr2", "chr3"), function(chr) {
suppressMessages(BEDMatrix(paste0(path, "/", chr)))
}))
as.BGData(m, alternatePhenotypeFile = paste0(path, "/pheno.txt"))
}
|
/scratch/gouwar.j/cran-all/cranData/BGData/R/utils.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.