content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
quick_check_evds <- function(key = "..") {
url <- create_evds_url("subject", key = key)
gelen <- request_httr2_helper(url, cache = F)
if (!is_response(gelen)) {
return(false)
}
T
}
mock_data_evds <- function() {
dates <- seq(from = lubridate::ymd("2010-1-1"), to = lubridate::ymd("2025-1-1"), by = "month")
num <- 100
tibble::as_tibble(data.frame(list(
date = dates[1:num],
a = 1:num,
b = 2:(num + 1)
)))
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/internal_evds.R
|
quick_check_fred <- function(key = "..") {
url <- "https://api.stlouisfed.org/fred/category/series?category_id=125&api_key=%s&file_type=json"
url <- sprintf(url, key)
gelen <- request_httr2_helper(url, cache = F)
if (!is_response(gelen)) {
return(false)
}
true
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/internal_fred.R
|
#' lag_df
#' @description
#' The `lag_df` function creates additional columns based on a list of column names
#' and lag sequences. This feature is beneficial for scenarios where you need
#' varying lag selections for certain columns, allowing flexibility in specifying
#' different lags for different columns or opting for no lag at all.
#' @param df A data.frame or tibble.
#' @param laglist A list of column names where each index corresponds to a column
#' name and the associated value is the lag sequence.
#' @return tibble
#' @export
#'
#' @examples
#' df <- data.frame(a = 1:15, b = 2:16)
#' tb <- lag_df(df, laglist = list(a = 1:5, b = 1:3))
lag_df <- function(df, laglist) {
.Call(`_vyos_lag_df2_c`, df, laglist)
}
as_tibblex <- function(df) {
.Call(`_vyos_as_tibblex`, df)
}
#' lag_df2
#' @description
#' The `lag_df2` function creates additional columns based on a list of column names
#' and lag sequences. This feature is beneficial for scenarios where you need
#' varying lag selections for certain columns, allowing flexibility in specifying
#' different lags for different columns or opting for no lag at all.
#' @param df A data.frame or tibble.
#' @param laglist A list of column names where each index corresponds to a column
#' name and the associated value is the lag sequence.
#' @return data.frame
#' @export
#'
#' @examples
#' df <- data.frame(a = 1:15, b = 2:16)
#' df2 <- lag_df2(df, laglist = list(a = 1:5, b = 1:3))
lag_df2 <- function(df, laglist) {
.Call(`_vyos_lag_df_c`, df, laglist)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/lag_df.R
|
check_result_and_message <- function(data_router, concept) {
if (is.null(data_router)) {
msg <- "
{concept} cannot be null
"
message(glue::glue(msg))
stop()
}
}
message_api_key <- function(source_name = "evds", .stop = F) {
g <- glue::glue
msg <- "
============================
API KEY NOT SET {source_name}
============================
you may save your api key such as below
{format_message_set_api_key(source_name)}
"
message(g(msg))
if (.stop) {
stop()
}
Sys.sleep(2)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/messages.R
|
limit_years_of_data <- function(.data, dots_params) {
if (is.null(.data)) {
return(null)
}
.data <- .data %>% dplyr::filter(date >= dots_params$start_date)
if (!is.null(dots_params$end_date)) {
.data <- .data %>% dplyr::filter(date < dots_params$end_date)
}
.data
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/post_process_data.R
|
# **************** cache_name_format ****************************************cache_name_format**
cache_name_format_request <- function(prop_name) {
return(paste("api_req", prop_name, sep = "_"))
}
# **************** .check_cache ****************************************.check_cache**
.check_cache <- function(prop_name) {
check_cache(cache_name_format_request(prop_name))
}
# **************** .save_cache ****************************************.save_cache**
.save_cache <- function(prop_name, data) {
save_cache(cache_name_format_request(prop_name), data)
}
# **************** .load_cache ****************************************.load_cache**
.load_cache <- function(prop_name) {
load_cache(cache_name_format_request(prop_name))
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_cache_funcs.R
|
die_if_not_api_key <- function(api_object, force = F) {
fnc <- api_object$check_api_key_fnc
api_key_name <- api_object$api_key_name
if (!fnc() |
force == T) {
die_if_not_api_key_helper(api_key_name)
}
}
die_if_not_api_key_helper <- function(api_key_name = "xx") {
msg <- glue::glue("{api_key_name} not found")
message(msg)
stop()
}
is_response <- function(gelen) {
"httr2_response" %in% class(gelen) ||
"httr_response" %in% class(gelen) ||
"response" %in% class(gelen)
}
die_if_bad_response <- function(response, currentObj) {
if (is_response(response) && response$status_code == 200) {
return(invisible(TRUE))
}
if (!is_response(response)) {
return(false)
}
die_if_bad_response_helper(response$status_code)
}
die_if_bad_response_helper <- function(status_code = 500) {
problem <- error_means(status_code)
message(problem)
stop()
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_checks.R
|
error_list <- function() {
nums <- c(400, 404, 423, 429, 500)
errors <- c(
"Bad Request",
"Not Found",
"Locked",
"Too Many Requests",
"Internal Server Error"
)
return(
base::data.frame(
nums = nums,
errors = errors
)
)
}
error_means <- function(error_code = 400) {
crayon::red(
error_list() %>%
dplyr::filter(nums == error_code) %>%
.$errors
)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_errors.R
|
request_httr <- function(currentObj) {
# fred
prop_value <- get_prop_value_from_source_object(currentObj)
gelen <- httr::RETRY(
verb = "GET",
url = currentObj$url,
path = currentObj$observations_url,
query = currentObj$series_fnc(prop_value),
terminate_on = error_list()$nums,
times = 2
)
}
request_httr2 <- function(currentObj) {
# evds
url <- createUrlForSeries(currentObj)
request_httr2_helper(url, currentObj$cache)
}
seriesCollapse <- function(liste) {
names_ <- names(liste)
url_parts <- c()
for (name in names_) {
value <- liste[[name]]
yeni <- paste(name, value, sep = "=")
url_parts <- append(yeni, url_parts)
}
vector_to_template_in(url_parts, collapse = "&")
}
vector_to_template_in <- function(v, collapse = "") {
f <- function(item) {
glue::glue("{item}")
}
s <- sapply(v, f)
paste0(s, collapse = collapse)
}
get_prop_value_from_source_object <- function(currentObj) {
if (!is.null(currentObj$seriesID)) {
prop_value <- currentObj$seriesID
} else {
prop_value <- currentObj$datagroup
}
if (is.null(prop_value)) {
stop("request_httr function requires seriesID or datagroup")
}
prop_value
}
# FRED ........................... create_url_for_series_fred
create_url_for_series_fred <- function(currentObj) {
prop_value <- get_prop_value_from_source_object(currentObj)
urlParts <- currentObj$series_fnc(prop_value)
paste0(
currentObj$url, currentObj$observations_url, "?",
seriesCollapse(urlParts)
)
}
check_series_ID_for_dots <- function(currentObj, urlParts) {
.base <- toString(attr(urlParts$series, "base"))
.source <- toString(attr(urlParts$series, "source"))
if (!(is_(.base, "series") && is_(.source, "evds"))) {
return(urlParts)
}
.f <- function(a) {
gsub("_", ".", a, fixed = T)
}
urlParts$series <- .f(urlParts$series)
urlParts
}
get_freq_number_evds <- function(freq) {
liste <- list(
day = 1,
workday = 2,
week = 3,
bimonth = 4,
month = 5,
quarter = 6,
sixmonth = 7,
year = 8
)
toLower_local <- function(x) {
if (is.null(x)) {
return("null")
}
tolower(x)
}
freq_unify <- function(string) {
.liste <- list(
m = "month",
y = "year",
q = "quarter",
"null" = "day"
) # series will default to most freq possible so
# null should be the most frequent
u_freq <- .liste[[toLower_local(string)]]
if (is.null(u_freq)) {
u_freq <- toLower_local(string)
}
u_freq
}
..f2 <- function() {
assert(is_(freq_unify("week"), "week"))
# freq_unify( toLower_local( currentObj$freq) )
liste[[freq_u]]
assert(is_(get_freq_number_evds("week"), 3))
}
freq_u <- freq_unify(toLower_local(freq))
freq_u <- liste[[freq_u]]
freq_u
}
check_freq_only_evds_series <- function(currentObj, urlParts) {
assign_("d_currentObj", currentObj)
.base <- toString(attr(urlParts$series, "base"))
.source <- toString(attr(urlParts$series, "source"))
if (!(is_(.base, "series") && is_(.source, "evds"))) {
return(urlParts)
}
# only for evds series ( not table ones )
# convert freq option to number
# urlParts
urlParts$frequency <- get_freq_number_evds(currentObj$freq)
# start_date
.date <- currentObj$start_date
urlParts$startDate <- date_to_str_1(.date)
urlParts
}
createUrlForSeries <- function(currentObj) {
if (currentObj$name == "fred") {
return(create_url_for_series_fred(currentObj))
}
# https://evds2.tcmb.gov.tr/service/evds/datagroup=bie_pbtablo2&type=json&startDate=01-01-1960&endDate=01-02-2200&key=
# or the other one
prop_value <- get_prop_value_from_source_object(currentObj)
urlParts <- currentObj$series_fnc(prop_value)
# freq is not needed in table ones only series of evds
urlParts <- check_freq_only_evds_series(currentObj, urlParts) # side effect start year will be checked
urlParts <- check_series_ID_for_dots(currentObj, urlParts) # replace '_' , '.'
paste0(
currentObj$url,
currentObj$observations_url,
seriesCollapse(urlParts)
)
}
# ...................................................... request_httr2_helper
request_httr2_helper <- function(url, cache = TRUE) {
assign_("d_url", url)
cache_name <- cache_name_format("request_httr2_helper", url)
check <- check_cache_comp(cache_name, cache)
if (check) {
return(load_cache(cache_name))
}
# check if vector
check_url_for_request(url)
# ..................... 1
req <- try_or_default(
{
httr2::request(url)
},
.default = null
)
# ..................... 2
resp <- try_or_default(
{
httr2::req_perform(req)
},
.default = null
)
if (is_response(resp)) {
save_cache(cache_name, resp)
}
inv(resp)
}
# ...................................................... check_proxy_set_2
check_proxy_set_2 <- function() {
sonuc <- ifelse(nchar(Sys.getenv("https_proxy")) > 2, T, F)
invisible(sonuc)
}
# ...................................................... check_proxy_setting
check_proxy_setting <- function(requires_proxy = T, die_for_test = F) {
if (!requires_proxy) {
return(invisible(T))
}
sonuc <- check_proxy_set_2()
if (!sonuc || die_for_test) {
message("
====================================\n\r
Proxy setting should be checked!
====================================\n\r
")
stop()
}
return(invisible(T))
}
requestNow <- function(currentObj) {
check_proxy_setting(currentObj$requires_proxy)
# NOTE request_httr VS request_httr2
list_fncs <- list(
evds = request_httr2,
evds_datagroup = request_httr2,
fred = request_httr
)
request_fnc <- list_fncs[[currentObj$name]]
should_I_wait_for_request(currentObj$name)
result <- request_fnc(currentObj)
invisible(result)
}
check_url_for_request <- function(url) {
if (is.null(url)) {
g <- glue::glue
message(g(
"
...............................................
call : {str_call}
...............................................
"
))
stop()
}
if (length(url) > 1) {
g <- glue::glue
message(g(
"
...............................................
call : {str_call}
...............................................
"
))
stop()
}
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_funcs.R
|
get_params_fred_fnc <- function(seriesID) {
params <- list(
series_id = seriesID,
api_key = get_api_key("fred"),
file_type = "json"
)
}
get_params_evds_fnc <- function(seriesID) {
params <- list(
series = seriesID,
key = get_api_key("evds"),
startDate = date_to_str_1(default_start_date()),
endDate = date_to_str_1(default_end_date()),
aggregationTypes = "avg",
formulas = 0,
frequency = 1,
type = "json"
)
}
# EVDS series .................
# Level: 0
# Percentage change: 1
# Difference: 2
# Year-to-year Percent Change: 3
# Year-to-year Differences: 4
# Percentage Change Compared to End-of-Previous Year: 5
# Difference Compared to End-of-Previous Year : 6
# Moving Average: 7
# Moving Sum: 8
# EVDS freqs ....................
# Daily: 1
# Business: 2
# Weekly(Friday): 3
# Twicemonthly: 4
# Monthly: 5
# Quarterly: 6
# Semiannual: 7
# Annual: 8
get_params_evds_datagroup_fnc <- function(datagroup = "bie_yssk") {
params <- list(
datagroup = datagroup,
key = get_api_key("evds"),
startDate = date_to_str_1(default_start_date()),
endDate = date_to_str_1(default_end_date()),
type = "json"
)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_prepare.R
|
# ............................................... check_last_requested_from_source
check_last_requested_from_source <- function(kaynak = "evds") {
last_time <- get_safe("last_request_times",
default = list(
evds = get_now(),
fred = get_now()
)
)
value <- ifelse(kaynak %in% names(last_time),
last_time[[kaynak]],
get_yesterday()
)
value
}
# ............................................... save_time_request_kaynak
save_time_request_kaynak <- function(kaynak = "evds") {
last_time <- check_last_requested_from_source(kaynak)
if (is.null(last_time)) {
last_time <- list(
evds = Sys.time(),
fred = Sys.time()
)
} else {
last_time[[kaynak]] <- Sys.time()
}
last_time
}
# ...................................................................... get_now
get_now <- function(num = 0) {
time <- Sys.time()
hrs <- lubridate::hours(num)
mod_time <- time - hrs
mod_time
}
# ...................................................................... get_yesterday
get_yesterday <- function() {
get_now(24)
}
# ...................................................................... time_is_ok
time_is_ok <- function(last_requested_time = get_yesterday(), seconds = 1) {
now_ <- get_now()
fark <- now_ - last_requested_time # days
diff_seconds <- (fark * 24 * 60) # *60/60
diff_seconds > seconds
}
# ...................................................................... should_I_wait_for_request
should_I_wait_for_request <- function(source_name = "evds", seconds = 1, .verbose = FALSE) {
last_request_time <- check_last_requested_from_source(source_name)
last_request_time <- as.POSIXct(last_request_time, origin = "1970-01-01")
if (time_is_ok(last_request_time, seconds)) {
save_time_request_kaynak(source_name)
return(inv(T))
}
msg <- "pausing before a new request."
if (.verbose) {
.blue("->[{source_name}]: {msg}")
}
# debug_message( "->[{source_name}]: {msg}")
Sys.sleep(seconds)
save_time_request_kaynak(source_name)
return(inv(T))
}
# ...................................................................... test_should_I_wait_for_request
test_should_I_wait_for_request <- function() {
should_I_wait_for_request("evds", 3) == T
should_I_wait_for_request("evds", 3) == T
should_I_wait_for_request("evds", 4) == F
should_I_wait_for_request("evds", 2) == T
should_I_wait_for_request("fred", 5) == F
assert(
should_I_wait_for_request("fred", 5) == T
)
.green("done")
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/request_wait_times.R
|
getLineEvdsResponse <- function(item) {
names_ <- names(item)
if ("YEARWEEK" %in% names_) {
return(getLineEvdsResponse_patch_week(item))
}
tarih <- item$Tarih
value <- item[[names_[[2]]]]
unix_time <- item[["UNIXTIME"]]
unix_time <- unix_time$`$numberLong`
list(date = unix_time, value = value)
}
getLineEvdsResponse_patch_week <- function(item) {
assign_("d_item", item)
# $Tarih
# [1] "06-01-1950"
#
# $YEARWEEK
# [1] "1950-1"
#
# $`TP_AB_N02-1`
# [1] "ND"
#
# $UNIXTIME
# $UNIXTIME$`$numberLong`
# [1] "-630723600"
#
#
# $TP_AB_N02
# NULL
names_ <- names(item)
tarih <- item$Tarih
value <- item[[names_[[3]]]]
unix_time <- item[["UNIXTIME"]]
unix_time <- unix_time$`$numberLong`
list(date = unix_time, value = value)
}
getLineEvdsResponse2 <- function(item) {
names_ <- names(item)
new_list <- list()
for (name in names_) {
value <- item[[name]]
if (is.list(value) || is.null(value) || is.na(value)) {
new_list[[name]] <- NA
} else {
new_list[[name]] <- value
}
}
new_list
}
getLineEvdsResponse3 <- function(item) {
# item_name = "item"
# if( !dynamic_exists( item_name)){
# assign( "item" ,item, envir = .GlobalEnv )
# }
# snames<- names_[ names_[c("Tarih" , "UNIXTIME")] ]
names_ <- names(item)
yliste <- list()
for (name in names_) {
value <- item[[name]]
if (is.list(value) || is.null(value) || is.na(value)) {
# assign_( paste0("dbg_" , name ) , value )
yliste[[name]] <- NA
} else {
yliste[[name]] <- value
}
}
yliste
# as.data.frame( yliste)
# tarih = item$Tarih
#
# value <- item[[names_[[2]]]]
#
# value
#
# unix_time <- item[[ "UNIXTIME" ]]
#
# unix_time <- unix_time$`$numberLong`
#
#
#
# list( date = unix_time , value = value )
}
#
convert_list_df_evds <- function(items, strategy = getLineEvdsResponse) {
assign_("d_items", items)
lines_ <- null
make_df_local <- function(line) {
line$value <- line$value %||% NA
as.data.frame(line)
}
for (item in items) {
line <- strategy(item) # CHECK 2
if (is.null(lines_)) {
lines_ <- make_df_local(line)
} else {
try({
line <- make_df_local(line)
lines_ <- rbind(lines_, line)
})
}
}
tb <- tibble::as_tibble(lines_)
tb$date <- makeDate(tb$date)
tb$value <- as_numeric(tb$value)
serie_code <- names(items)[[2]]
tb <- structure(tb, serie_code = serie_code)
tb
}
convert_list_df_evds_patch_week <- function(items, strategy = getLineEvdsResponse) {
lines_ <- null
for (item in items) {
line <- strategy(item) # CHECK 2
if (is.null(lines_)) {
lines_ <- as.data.frame(line)
} else {
line <- as.data.frame(line)
try({
lines_ <- rbind(lines_, line)
})
}
}
tb <- tibble::as_tibble(lines_)
tb$date <- makeDate(tb$date)
tb$value <- as_numeric(tb$value)
serie_code <- names(items)[[2]]
tb <- structure(tb, serie_code = serie_code)
tb
}
convert_list_df_evds_OLD <- function(items) {
lines_ <- null
for (item in items) {
line <- getLineEvdsResponse(item) # CHECK 2
if (is.null(lines_)) {
lines_ <- as.data.frame(line)
} else {
line <- as.data.frame(line)
try({
lines_ <- rbind(lines_, line)
})
}
}
tb <- tibble::as_tibble(lines_)
tb$date <- makeDate(tb$date)
tb$value <- as_numeric(tb$value)
serie_code <- names(items)[[2]]
tb <- structure(tb, serie_code = serie_code)
tb
}
makeDate <- function(date_str_vector_unixtime) {
suppressWarnings({
dt <- as_numeric(date_str_vector_unixtime)
dt <- dt + 3 * 60 * 60
x <- lubridate::as_datetime(dt)
as.Date(x)
})
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/response_evds.R
|
convert_list_df_evds2 <- function(items) {
lines_ <- NULL
for (item in items) {
line <- getLineEvdsResponse2(item)
if (is.null(lines_)) {
try({
lines_ <- as.data.frame(line)
})
} else {
try({
lines_ <- rbind(lines_, line)
})
}
}
tb <- tibble::as_tibble(lines_)
tb
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/response_evds2.R
|
response_fnc_fred <- function(gelen, currentObj) {
parsed <- jsonlite::fromJSON(httr::content(gelen, "text"))
df <- parsed$observations
df <- df %>% dplyr::select(c("date", "value"))
df$date <- lubridate::ymd(df$date)
df$value <- as_numeric(df$value)
result <- tibble::as_tibble(df)
structure(result, series_code = currentObj$seriesID)
}
response_fnc_evds <- function(gelen, currentObj) {
...f <- function() {
contentList <- gelen %>% httr2::resp_body_json()
convert_list_df_evds(contentList$items)
}
contentList <- gelen %>% httr2::resp_body_json()
convert_list_df_evds(contentList$items)
}
convert_list_df_general <- function(response_list) {
lines_ <- null
for (item in response_list) {
line <- item # getLineFromResponse( item )
if (is.null(lines_)) {
lines_ <- as.data.frame(line)
} else {
line <- as.data.frame(line)
try({
lines_ <- rbind_safe(lines_, line)
})
}
}
tibble::as_tibble(lines_)
}
rbind_safe <- function(df1, df2) {
cols_diff <- setdiff(colnames(df1), colnames(df2))
cols_diff2 <- setdiff(colnames(df2), colnames(df1))
cols_diff <- append(cols_diff, cols_diff2)
if (length(cols_diff) == 0) {
return(
rbind(df1, df2)
)
}
for (col in cols_diff) {
df1 <- fill_na_df(df1, col)
df2 <- fill_na_df(df2, col)
}
df3 <- rbind(df1, df2)
}
cbind_safe <- function(df1, df2) {
if (nrow(df1) == nrow(df2)) {
} else {
if (nrow(df1) > nrow(df2)) {
fark <- nrow(df1) - nrow(df2)
df2 <- append(df2[[1]], rep(NA, fark))
} else {
fark <- nrow(df2) - nrow(df1)
df1 <- append(df1[[1]], rep(NA, fark))
}
}
cbind(df1, df2)
}
fill_na_df <- function(df, colname) {
if (colname %in% colnames(df)) {
return(df)
}
num <- nrow(df)
if (is.numeric(num)) {
df[[colname]] <- rep(NA, times = num)
}
df
}
convertResponseVector_evds <- function(tb) {
if (is_false_false(tb)) {
return(false)
}
structure(tb$value, dates = tb$date, serie_code = attr(tb, "serie_code"))
}
convertResponseVector_fred <- function(tb) {
structure(tb$value,
dates = tb$date,
serie_code = attr(tb, "series_code")
)
}
convertResponseVector_general <- function(df, currentObject) {
liste <- list(
evds = convertResponseVector_evds,
fred = convertResponseVector_fred
)
fnc <- liste[[currentObject$name]]
if (!is.function(fnc)) {
return(null)
}
fnc(df)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/response_funcs.R
|
#' Remove a column or columns from a data.frame.
#'
#' @param df Data.frame or tibble.
#' @param column_names Column name or column names as a character vector.
#' @param verbose Boolean, provides extra information when removing a column.
#' @usage remove_columns(df, column_names, verbose = FALSE)
#' @return Data.frame.
#' @export
#'
#' @examples
#' df <- remove_columns(cars, "speed")
#'
remove_columns <- function(df, column_names, verbose = FALSE) {
for (column_name in column_names) {
if (column_name %inn% df) {
if (verbose) {
.blue(" removing ...{column_name}\n\r")
}
try({
df <- remove_column(df, column_name)
})
}
}
df
}
remove_column <- function(df, column_name) {
if (!column_name %in% colnames(df)) {
return(df)
}
valid_cols <- colnames(df)
valid_cols <- valid_cols[valid_cols != column_name]
df <- df[, valid_cols]
if (length(valid_cols) == 1) {
liste <- list()
liste[[valid_cols]] <- df
df <- as.data.frame(liste)
}
df
}
#' remove_na_safe
#' @description
#' This function removes rows from both ends of a data frame until it identifies
#' a row where all columns have non-NA values. Starting from the beginning, it
#' removes rows until it encounters a row with complete data at a specific row
#' index (e.g., row 5).
#' It then proceeds to remove rows from the end of the data frame, eliminating
#' any rows with at least one NA value in any column.
#' The process stops when it finds a row where all columns contain non-NA values,
#' and the resulting data frame is returned.
#'
#' @param df data.frame to remove na rows from the beginning and from the end
#' @param verbose give detailed info while removing NA values
#'
#' @usage remove_na_safe(df , verbose = FALSE )
#' @return data.frame returns data.frame after removing rows if all columns are NA
#' from the beginning and after
#' @export
#' @examples
#'
#' df <- data.frame(
#' a = c(NA, 2:7, NA),
#' b = c(NA, NA, 5, NA, 12, NA, 8, 9)
#' )
#' df2 <- remove_na_safe(df)
remove_na_safe <- function(df, verbose = FALSE) {
df <- df_check_remove(df, verbose = verbose)
invisible(df)
}
remove_any_na <- function(df) {
df <- dplyr::filter(df, rowSums(is.na(df)) == 0)
}
first_row_that_ok <- function(df_, reverse = FALSE, except = null) {
if (!is.null(except)) {
df_ <- df_ %>% dplyr::select(-c(except))
}
numbers <- seq(from = 1, to = nrow(df_))
if (reverse) {
numbers <- seq(from = nrow(df_), to = 1)
}
for (num in numbers) {
a <- df_[num:num, ]
cond <- all(!is.na(unlist(as.vector(a))))
if (cond) {
# print( as.vector(a) )
return(num)
}
}
return(NA)
}
looks_like_template <- function(x) {
is.character(x) && length(x) == 1 && grepl("\n", x[[1]])
}
last_row_that_ok <- function(df_) {
first_row_that_ok(df_, reverse = TRUE)
}
safe_remove_col <- function(df, colname) {
if (!colname %in% colnames(df)) {
return(df)
}
df %>% dplyr::select(-c(!!colname))
}
df_check_remove <- function(df, verbose = FALSE) {
first_row <- first_row_that_ok(df)
last_row <- last_row_that_ok(df)
if (any(purrr::map_vec(c(first_row, last_row), is.na))) {
return(df)
}
df2 <- df[first_row:last_row, ]
n_ <- nrow(df)
fark <- nrow(df) - nrow(df2)
if (verbose) {
.green(
"
first_row : {first_row }
last_row : {last_row }
{fark} rows removed ...
"
)
}
return(df2)
}
# .................................................. convert_df_numeric_helper
convert_df_numeric_helper <- function(df) {
df <- remove_column(df, "Tarih")
df <- remove_column(df, "YEARWEEK")
if (!is.data.frame(df)) {
return(df)
}
return(
try_or_default(
{
df %>% dplyr::mutate_if(is.character, as.numeric)
},
.default = df
)
)
}
# ........................................................................ limit_ilk_yil
limit_start_date <- function(sonuc, start_date = default_start_date()) {
if (!is.data.frame(sonuc) || is.null(start_date)) {
return(sonuc)
}
if ("date" %in% colnames(sonuc)) {
sonuc <- sonuc |> dplyr::filter(date >= lubridate::ymd(start_date))
}
sonuc
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/sp_df_utils.R
|
#' inn
#' @description
#' Checks if the second parameter includes the first one as a value, a column name, or a name.
#'
#' @param x Character to check if it exists in a vector or list.
#' @param table List, data frame, or any vector.
#'
#' @return Logical value TRUE if it exists, FALSE if it does not.
#' @export
#'
#' @examples
#' .check <- inn("a", list(a = 1:5))
inn <- function(x, table) {
if (is.data.frame(table)) {
return(x %in% colnames(table))
}
if (is.list(table)) {
return(x %in% names(table))
}
return(base::match(x, table, nomatch = 0L) > 0L)
}
#' %inn%
#' @description
#' Checks if the second parameter includes the first one as a value, a column name, or a name.
#'
#' @param x Character to check if it exists in a vector or list.
#' @param table List, data frame, or any vector.
#'
#' @return Logical value TRUE if it exists, FALSE if it does not.
#' @export
#'
#' @examples
#' .check <- "a" %inn% data.frame(a = 1:5)
"%inn%" <- function(x, table) {
inn(x, table)
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/sp_op_utils.R
|
#' Turn Off Verbose Mode
#'
#' This function turns off verbose mode, suppressing additional informational
#' output. It is useful when you want to limit the amount of information
#' displayed during the execution of certain operations.
#'
#' @details
#' Verbose mode is often used to provide detailed information about the
#' progress of a function or operation. By calling \code{verbose_off}, you can
#' disable this additional output.
#'
#' The \code{options("VYOS_verbose" = FALSE)} line sets the verbose option to
#' \code{FALSE}, silencing additional messages.
#'
#' @return
#' The function has no return value.
#'
#' @export
#'
#' @examples
#' verbose_off()
#'
#' @seealso
#' \code{\link{verbose_on}}: Turn on verbose mode.
#'
verbose_off <- function() {
options("VYOS_verbose" = FALSE)
success_force("Verbose mode is now OFF. You may call `verbose_on()` anytime to enable it.")
}
#' Turn On Verbose Mode
#'
#' This function turns on verbose mode, enabling additional informational
#' output. It is useful when you want to receive detailed information about
#' the progress of certain operations.
#'
#' @details
#' Verbose mode is designed to provide detailed information during the execution
#' of a function or operation. By calling \code{verbose_on}, you can enable
#' this additional output.
#'
#' The \code{options("VYOS_verbose" = TRUE)} line sets the verbose option to
#' \code{TRUE}, allowing functions to produce more detailed messages.
#'
#' @return
#' The function has no explicit return value.
#'
#' @export
#'
#' @examples
#' verbose_on()
#'
#' @seealso
#' \code{\link{verbose_off}}: Turn off verbose mode.
#'
verbose_on <- function() {
options("VYOS_verbose" = TRUE)
success_force("Verbose mode is now ON.\n")
success_force("You will receive additional information during function execution.\n")
inv(NULL)
}
check_verbose_option <- function() {
.check <- getOption("VYOS_verbose")
if (is.null(.check)) {
options(VYOS_verbose = FALSE)
return(FALSE)
}
return(.check)
}
print_if_verbose <- function(msg) {
.check <- check_verbose_option()
if (!.check) {
return(inv(NULL))
}
.blue(msg)
return(inv(NULL))
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/verbose.R
|
.testing_ff <- function() {
testthat::test_dir("tests/testthat/core")
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/z_testing_f.R
|
on_development_read_api_keys_of_developers <- function() {
file_name <- file.path("..", "api_keys.txt")
if (!file.exists(file_name)) {
return("FileNotFound")
}
content <- read(file_name)
assign_apikey <- function(parts) {
if (length(parts) < 2) {
return(character())
}
source_name <- sp_trim(parts[1])
key <- sp_trim(parts[2])
if (source_name %in% c("evds", "fred")) {
set_api_key(key, source_name)
}
}
split <- stringr::str_split_1
lines <- split(content, "\n")
for (line in lines) {
parts <- split(line, "=")
assign_apikey(parts)
}
}
before_check <- function() {
on_development_read_api_keys_of_developers()
# system( "R CMD check --as-cran ." , stdout = T )
content <- system2("R",
c(
"CMD",
"check",
"--as-cran",
"."
),
stdout = T
)
content <- paste0(content, collapse = "\n")
cat(content,
file = file.path(
"logs",
sprintf(
"CMD_CHECK_RESULTS-%s.txt",
get_hash(1)
)
)
)
content
}
|
/scratch/gouwar.j/cran-all/cranData/vyos/R/zz_before_check.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cont <- function(x, y, w) {
.Call(`_wCorr_cont`, x, y, w)
}
wrankFast <- function(x, w) {
.Call(`_wCorr_wrankFast`, x, w)
}
fixxFast <- function(x, w) {
.Call(`_wCorr_fixxFast`, x, w)
}
mapThetaFast <- function(v) {
.Call(`_wCorr_mapThetaFast`, v)
}
optFcFast <- function(par, x, w, M, temp3, theta0) {
.Call(`_wCorr_optFcFast`, par, x, w, M, temp3, theta0)
}
optFFast <- function(par, x, w, M, temp3) {
.Call(`_wCorr_optFFast`, par, x, w, M, temp3)
}
theta <- function(M) {
.Call(`_wCorr_theta`, M)
}
imapThetaFast2 <- function(theta0) {
.Call(`_wCorr_imapThetaFast2`, theta0)
}
mainF <- function(x, M, w, theta0) {
.Call(`_wCorr_mainF`, x, M, w, theta0)
}
imapThetaFast <- function(theta0) {
.Call(`_wCorr_imapThetaFast`, theta0)
}
fscale_cutsFast <- function(par) {
.Call(`_wCorr_fscale_cutsFast`, par)
}
tableFast <- function(x, y, w) {
.Call(`_wCorr_tableFast`, x, y, w)
}
discord <- function(xytab) {
.Call(`_wCorr_discord`, xytab)
}
lnlFast <- function(xytab, pm) {
.Call(`_wCorr_lnlFast`, xytab, pm)
}
rcpparma_hello_world <- function() {
.Call(`_wCorr_rcpparma_hello_world`)
}
rcpparma_outerproduct <- function(x) {
.Call(`_wCorr_rcpparma_outerproduct`, x)
}
rcpparma_innerproduct <- function(x) {
.Call(`_wCorr_rcpparma_innerproduct`, x)
}
rcpparma_bothproducts <- function(x) {
.Call(`_wCorr_rcpparma_bothproducts`, x)
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/RcppExports.R
|
contCorr <- function(x,y,w, method=c("Pearson", "Spearman")) {
if(!is.numeric(x)) {
x <- as.numeric(x)
}
if(!is.numeric(y)) {
y <- as.numeric(y)
}
if(!is.numeric(w)) {
w <- as.numeric(w)
}
pm <- pmatch(tolower(method[[1]]), tolower(c("Pearson", "Spearman")))
if(pm == 2) {
# Spearman
x <- wrank(x,w)
y <- wrank(y,w)
}
xb <- sum(w*x)/sum(w)
yb <- sum(w*y)/sum(w)
numerator <- sum(w*(x-xb)*(y-yb))
denom <- sqrt( sum(w*(x-xb)^2) * sum(w*(y-yb)^2))
numerator/denom
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/contCorr.R
|
contCorrFast <- function(x,y,w, method=c("Pearson", "Spearman")) {
if(!is.numeric(x)) {
x <- as.numeric(x)
}
if(!is.numeric(y)) {
y <- as.numeric(y)
}
if(!is.numeric(w)) {
w <- as.numeric(w)
}
if(tolower(method[[1]])=="spearman") {
x <- as.vector(wrankFast(x,w))
y <- as.vector(wrankFast(y,w))
}
cont(x,y,w)
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/contCorrFast.R
|
# based losely on Olsson, Ulf (1979), "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient", Psychometrica, 44(4), 443-460.
#' @importFrom mnormt biv.nt.prob
#' @importFrom minqa bobyqa
#' @importFrom stats qnorm
#' @importFrom stats optimize
#' @importFrom stats cor
polycFast <- function(x,y,w,ML=FALSE) {
lnl <- function(xytab, cc, rc, corr) {
cc <- c(-Inf, cc, Inf)
rc <- c(-Inf, rc, Inf)
pm <- sapply(1:(length(cc)-1), function(c) {
sapply(1:(length(rc)-1), function(r) {
biv.nt.prob(df=Inf,
lower=c(cc[c], rc[r]),
upper=c(cc[c+1], rc[r+1]),
mean=c(0,0),
S=matrix(c(1,corr,corr,1), nrow=2, ncol=2, byrow=TRUE))
})
})
lnlFast(xytab, pm)
}
optf_all <- function(par, xytab) {
c1 <- ncol(xytab)-1
c2 <- c1 + nrow(xytab)-1
-1 * lnl(xytab, cc=fscale_cutsFast(par[1:c1]), rc=fscale_cutsFast(par[(c1+1):c2]), corr=fscale_corr(par[length(par)] ))
}
optf_corr <- function(par, xytab, theta1, theta2) {
c1 <- ncol(xytab)-1
c2 <- c1 + nrow(xytab)-1
-1 * lnl(xytab, cc=fscale_cutsFast(theta2), rc=fscale_cutsFast(theta1), corr=fscale_corr(par))
}
fscale_corr <- function(par) {
tanh(par)
}
xytab <- tableFast(x,y,w)
temp <- discord(xytab)
if(temp==-1 | temp == 1)
return(temp)
ux <- sort(unique(x))
cut1 <- imapThetaFast( sapply(ux[-length(ux)],function(z) qnorm(sum(w[x<=z])/sum(w)) ))
uy <- sort(unique(y))
cut2 <- imapThetaFast( sapply(uy[-length(uy)],function(z) qnorm(sum(w[y<=z])/sum(w)) ))
cor0 <- atanh(cor(as.numeric(x),as.numeric(y)))
#bob <- bobyqa(c(cut1,cut2,cor0), fn=optf_all, xytab=xytab)
if(ML) {
bob <- bobyqa(c(cut1,cut2,cor0), fn=optf_all, xytab=xytab)
return(fscale_corr(bob$par[length(bob$par)]))
} else {
opt <- optimize(optf_corr, interval=cor0+c(-3,3), xytab=xytab, theta1=cut1,theta2=cut2)
return( fscale_corr(opt$minimum))
}
# should return above
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/polycFast.R
|
# based losely on Olsson, Ulf (1979), "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient", Psychometrica, 44(4), 443-460.
#' @importFrom mnormt biv.nt.prob
#' @importFrom minqa bobyqa
#' @importFrom stats qnorm
#' @importFrom stats optimize
#' @importFrom stats cor
polycSlow <- function(x,y,w,ML=FALSE) {
lnl <- function(xytab, cc, rc, corr) {
cc <- c(-Inf, cc, Inf)
rc <- c(-Inf, rc, Inf)
pm <- sapply(1:(length(cc)-1), function(c) {
sapply(1:(length(rc)-1), function(r) {
biv.nt.prob(df=Inf,
lower=c(cc[c], rc[r]),
upper=c(cc[c+1], rc[r+1]),
mean=c(0,0),
S=matrix(c(1,corr,corr,1), nrow=2, ncol=2, byrow=TRUE))
})
})
suppressWarnings(lpm <- log(pm))
#lpm[is.nan(lpm)] <- 0
lpm[(is.nan(lpm)) | (!is.finite(lpm))] <- log(.Machine$double.xmin)
sum(xytab * lpm)
}
optf_all <- function(par, xytab) {
c1 <- ncol(xytab)-1
c2 <- c1 + nrow(xytab)-1
-1 * lnl(xytab, cc=fscale_cuts(par[1:c1]), rc=fscale_cuts(par[(c1+1):c2]), corr=fscale_corr(par[length(par)] ))
}
optf_corr <- function(par, xytab, theta1, theta2) {
c1 <- ncol(xytab)-1
c2 <- c1 + nrow(xytab)-1
-1 * lnl(xytab, cc=fscale_cuts(theta2), rc=fscale_cuts(theta1), corr=fscale_corr(par))
}
fscale_cuts <- function(par) {
cumsum(c(par[1],exp(par[-1])))
}
fscale_corr <- function(par) {
tanh(par)
}
weightedTable <- function(x,y,w=rep(1,length(x))) {
tab <- table(x,y)
for(i in 1:nrow(tab)) {
for(j in 1:ncol(tab)) {
tab[i,j] <- sum(w[ x==dimnames(tab)[[1]][i] & y == dimnames(tab)[[2]][j] ])
}
}
tab
}
imapTheta <- function(theta0) {
c(theta0[1], log(theta0[-1]-theta0[-length(theta0)]))
}
xytab <- weightedTable(x,y,w)
# first check for perfect correlations which throw the optimizer for a loop because of the infinite bounds of the mapped correlation
i <- 1
j <- 1
foundConcord <- FALSE
foundDiscord <- FALSE
while(j<ncol(xytab)) {
if(i<nrow(xytab) & j < ncol(xytab)) {
if(xytab[i,j]>0 & sum(xytab[(i+1):nrow(xytab), (j+1):ncol(xytab)]) > 0) {
foundConcord <- TRUE
break
}
}
if(i>1 & j > 1) {
if(xytab[i,j]>0 & sum(xytab[1:(i-1), 1:(j-1)]) > 0) {
foundConcord <- TRUE
break
}
}
# incriment
i <- i + 1
if(i>nrow(xytab)) {
i <- 1
j <- j + 1
}
}
i <- 1
j <- 1
while(j<ncol(xytab)) {
if(i>1 & j < ncol(xytab)) {
if(xytab[i,j]>0 & sum(xytab[1:(i-1), (j+1):ncol(xytab)]) > 0) {
foundDiscord <- TRUE
break
}
}
if(i<nrow(xytab) & j > 1) {
if(xytab[i,j]>0 & sum(xytab[(i+1):nrow(xytab), 1:(j-1)]) > 0) {
foundDiscord <- TRUE
break
}
}
# incriment
i <- i + 1
if(i>nrow(xytab)) {
i <- 1
j <- j + 1
}
}
if(!foundDiscord){
return(1)
}
if(!foundConcord) {
return(-1)
}
ux <- sort(unique(x))
cut1 <- imapTheta( sapply(ux[-length(ux)],function(z) qnorm(sum(w[x<=z])/sum(w)) ))
uy <- sort(unique(y))
cut2 <- imapTheta( sapply(uy[-length(uy)],function(z) qnorm(sum(w[y<=z])/sum(w)) ))
cor0 <- atanh(cor(as.numeric(x),as.numeric(y)))
if(ML) {
bob <- bobyqa(c(cut1,cut2,cor0), fn=optf_all, xytab=xytab)
return(fscale_corr(bob$par[length(bob$par)]))
} else {
opt <- optimize(optf_corr, interval = cor0 + c(-3, 3), xytab = xytab, theta1 = cut1,theta2 = cut2)
return(fscale_corr(opt$minimum))
}
# returns in above if/else
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/polycSlow.R
|
polys.mcmc <- function(x,y,w=rep(1,length(x)), verbose=FALSE, nmax=1e3) {
t1 <- list(xbar=Inf,xsd=Inf,wlpx=Inf)
lnl <- function(x_, y_, w_, y_c, x_bar, x_sd, corr_) {
# following notation in Olsson, Drasgow, and Dorans, 1982
y_c <- c(-Inf,y_c,Inf)
ti <- get("t1")
z <- (x_ - x_bar)/x_sd
if(ti$xbar==x_bar & ti$xsd==x_sd) {
# we're good
} else {
lpx <- dnorm(x_, mean=x_bar, sd=x_sd, log=TRUE)
wlpx <- sum(w_ * lpx)
ti$xbar <- x_bar
ti$xsd <- x_sd
ti$wlpx <- wlpx
t1 <<- ti
}
tj <- y_c[y_+1]
tjm1 <- y_c[y_]
tjstar <- (tj - corr_*z) / ((1-corr_^2)^0.5)
tjm1star <- (tjm1 - corr_*z) / ((1-corr_^2)^0.5)
lpy <- log( pnorm(tjstar) - pnorm(tjm1star))
lpy[lpy==-Inf] <- min(lpy[lpy>-Inf])
ti$wlpx + sum(w_*lpy)
}
yi <- as.integer(y)
yc <- 1:(length(unique(yi))-1)
yc <- yc - mean(yc)
mux <- mean(x)
sdx <- sd(x)
corr <- cor(yi,x)
lnl0 <- lnl(x, y, w, yc, mux, sdx, corr)
hist <- data.frame(mux=rep(NA,nmax),
sdx=rep(NA,nmax),
corr=rep(NA,nmax),
lnl=rep(NA,nmax))
factx <- 1
factsd <- 1
factcorr <- 1
facty <- rep(1,length(yc))
for(i in 1:nmax) {
muxp <- mux + rnorm(1,0,1) * sdx / sqrt(length(y)) * factx
lnli <- lnl(x, y, w, yc, muxp, sdx, corr)
a <- exp(lnli - lnl0)
if(runif(1) < a) {
mux <- muxp
lnl0 <- lnli
if(runif(1) < 0.2) {
factx <- factx * 2
}
} else {
if(runif(1) < 0.2) {
factx <- factx * 0.5
}
}
sdxp <- abs(sdx + rnorm(1,0,1/100) * factsd)
lnli <- lnl(x, y, w, yc, mux, sdxp, corr)
a <- exp(lnli - lnl0)
if(runif(1) < a) {
sdx <- sdxp
lnl0 <- lnli
if(runif(1) < 0.2) {
factsd <- factsd * 2
}
} else {
if(runif(1) < 0.2) {
factsd <- factsd * 0.5
}
}
corrp <- tanh( atanh(corr) + rnorm(1,0,0.01) * factcorr)
lnli <- lnl(x, y, w, yc, mux, sdx, corrp)
a <- exp(lnli - lnl0)
if(runif(1) < a) {
corr <- corrp
lnl0 <- lnli
if(runif(1) < 0.2) {
factcorr <- factcorr * 2
}
} else {
if(runif(1) < 0.2) {
factcorr <- factcorr * 0.5
}
}
for(yci in 1:length(yc)) {
ycp <- yc
ycp[yci] <- ycp[yci] + rnorm(1,0,sd=0.01) * facty[yci]
lnli <- lnl(x, y, w, ycp, mux, sdx, corr)
a <- exp(lnli - lnl0)
if(runif(1) < a) {
yc <- ycp
lnl0 <- lnli
if(runif(1) < 0.2) {
facty[yci] <- facty[yci] * 2
}
} else {
if(runif(1) < 0.2) {
facty[yci] <- facty[yci] * 0.5
}
}
}
hist$mux[i] <- mux
hist$sdx[i] <- sdx
hist$corr[i] <- corr
hist$lnl[i] <- lnl0
}
mean(hist$corr)
}
polys <- function(x,y,w=rep(1,length(x)), verbose=FALSE) {
t1 <- list(xbar=Inf,xsd=Inf,wlpx=Inf)
lnl <- function(x_, y_, w_, y_c, x_bar, x_sd, corr_) {
# following notation in Olsson, Drasgow, and Dorans, 1982
y_c <- c(-Inf,y_c,Inf)
ti <- get("t1")
z <- (x_ - x_bar)/x_sd
if(ti$xbar==x_bar & ti$xsd==x_sd) {
# we're good
} else {
lpx <- dnorm(x_, mean=x_bar, sd=x_sd, log=TRUE)
wlpx <- sum(w_ * lpx)
ti$xbar <- x_bar
ti$xsd <- x_sd
ti$wlpx <- wlpx
t1 <<- ti
}
tj <- y_c[y_+1]
tjm1 <- y_c[y_]
tjstar <- (tj - corr_*z) / ((1-corr_^2)^0.5)
tjm1star <- (tjm1 - corr_*z) / ((1-corr_^2)^0.5)
lpy <- log( pnorm(tjstar) - pnorm(tjm1star))
lpy[lpy==-Inf] <- min(lpy[lpy>-Inf])
ti$wlpx + sum(w_*lpy)
}
optf_all <- function(par, x, y, w) {
print(par)
c1 <- length(unique(y)) - 1
lnl(x_=x, y_=y, w_=w, y_c=fscale_cuts(par[1:c1]), x_bar=par[c1+1], x_sd=fscale_sd(par[c1+2]), corr_=fscale_corr(par[c1+3]))
}
optf_corr <- function(par, parm1, x, y, w) {
c1 <- length(unique(y)) - 1
lnl(x_=x, y_=y, w_=w, y_c=fscale_cuts(parm1[1:c1]), x_bar=parm1[c1+1], x_sd=fscale_sd(parm1[c1+2]), corr_=fscale_corr(par))
}
optf_cut <- function(par, parm1, x, y, w) {
lnl(x_=x, y_=y, w_=w, y_c=fscale_cuts(par), x_bar=parm1[1], x_sd=fscale_sd(parm1[2]), corr_=fscale_corr(parm1[3]))
}
optf_xpar <- function(par, parm1, x, y, w) {
c1 <- length(unique(y)) - 1
lnl(x_=x, y_=y, w_=w, y_c=fscale_cuts(parm1[1:c1]), x_bar=par[1], x_sd=fscale_sd(par[2]), corr_=fscale_corr(parm1[c1+1]))
}
fscale_cuts <- function(par) {
cumsum(c(par[1],exp(par[-1])))
}
fscale_corr <- function(par) {
tanh(par)
}
fscale_sd <- function(par) {
exp(par)
}
yi <- as.integer(y)
yc0 <- 1:(length(unique(yi))-1) - 2
dx <- Inf
mux <- mean(x)
sdx <- sd(x)
cor0 <- cor(yi,x)
cor0m1 <- -1*sign(cor0)*Inf # a long way for cor0
niter <- 1
maxit <- 20 * length(yc0)
while(dx > 1E-6 & niter <100) {
# optimize y cuts
if(length(yc0) > 1) {
op <- optim(par=yc0, parm1=c(mux, sdx, cor0), optf_cut, x=x, y=yi, w=w, control=list(fnscale=-1, maxit=maxit))
yc0 <- op$par
} else {
op <- optimize(optf_cut, interval=c(yc0-2, yc0+2), parm1=c(mux, sdx, cor0), x=x, y=yi, w=w, maximum=TRUE)
yc0 <- op$maximum
}
# optimize x mean and se
op <- optim(par=c(mux, sdx), parm1=c(yc0, cor0), optf_xpar, x=x, y=yi, w=w, control=list(fnscale=-1, maxit=maxit))
mux <- op$par[1]
sdx <- op$par[2]
# optimize correlation
op <- optimize(optf_corr, interval=c(cor0-2, cor0+2), parm1=c(yc0, mux, sdx), x=x, y=yi, w=w, maximum=TRUE)
dx2 <- abs(fscale_corr(cor0m1) - fscale_corr(op$maximum))
cor0m1 <- cor0
dx <- abs(fscale_corr(cor0) - fscale_corr(op$maximum))
if(abs(dx2) < abs(dx)) { # looks like a loop, skip some itterations
niter <- niter * 2
cor0 <- mean(c(cor0, op$maximum)) # split the difference
maxit <- 20*length(yc0)
} else {
niter <- niter + 1
maxit <- 10
}
cor0 <- op$maximum
if(verbose) {
cat("cor0=",cor0, " (",fscale_corr(cor0),") yc=",fscale_cuts(yc0)," mux=",mux,"sdx=",fscale_sd(sdx)," niter=",niter,"dx=",dx," dx2=",dx2,"\n")
}
}
fscale_corr(cor0)
}
if(FALSE) {
nr <- 2100
df <- data.frame(n=rep(0,nr),
cor=rep(NA,nr),
cuts1=rep(NA,nr),
cuts2=rep(NA,nr),
ps1c=rep(NA,nr),
psm=rep(NA,nr),
Pe=rep(NA,nr))
corv <- seq(-0.95,0.95,len=21)
for(i in 1:nr) {
cat("i=",i,"\n")
n <- 1e4
cuts <- c(-0.2,0.2)
if(runif(1) < 0.2) { cuts <- c(-1,1) }
if(runif(1) < 0.2) { cuts <- c(-1,0,1) }
if(runif(1) < 0.2) { cuts <- c(-0.5,0,0.5) }
cor <- sample(corv,1)
df$cuts1[i] <- cuts[1]
df$cuts2[i] <- cuts[2]
df$cor[i] <- cor
df$n[i] <- n
S <- matrix(c(1,cor,cor,1), nrow=2)
xy <- mvrnorm(n=n, mu=c(0,0), Sigma=S)
x <- xy[,1]
y <- length(cuts)+1
for(j in rev(1:length(cuts))) {
y <- ifelse(xy[,2] <= cuts[j], j, y)
}
df$ps1c[i] <- polys(x,y)
# df$psm[i]<- polys.mcmc(x,y)
df$Pe[i] <- cor(xy)[1,2]
print(df[i,])
dfi <- df[1:i,]
dfi$dps1c <- dfi$ps1c - dfi$cor
dfi$dPe <- dfi$Pe - dfi$cor
dfi$dpsm <- dfi$psm - dfi$cor
df2 <- sqldf("SELECT avg(dps1c) AS dps1c, avg(dPe) as dPe, avg(dpsm) as dpsm, cor FROM dfi GROUP BY cor")
df2 <- df2[order(df2$cor),]
plot(c(-1,1), range(c(df2$dps1c, df2$dPe, df2$dpsm)), , type="n", lwd=3, lty=1, col="black")
abline(h = 0)
lines(df2$cor, df2$dps1c , lwd=1, lty=2, col="blue")
lines(df2$cor, df2$dPe , lwd=1, lty=1, col="orange")
lines(df2$cor, df2$dpsm , lwd=1, lty=3, col="green")
}
require(lattice)
df <- df[order(df$cor),]
plot(df$cor, df$ps1c, type="l", lwd=1, lty=2, col="blue")
abline(0,1)
lines(df$cor, df$Pe, lwd=3, lty=1)
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/polys.mcmc.R
|
#' @importFrom minqa bobyqa
#' @importFrom stats dnorm
#' @importFrom stats optimize
#' @importFrom stats cor
#' @importFrom stats weighted.mean
polysFast <- function(x, M, w, ML=FALSE) {
M <- as.numeric(as.factor(M))
uM <- sort(unique(M))
mapTheta <- function(v) {
vv <- cumsum(c(v[1],exp(v[-1])))
c(NA,-Inf,vv,Inf)
}
theta0 <- sapply(uM[-length(uM)],function(z) qnorm(weighted.mean(M<=z, w)) )
if(ML) {
temp = w/sum(w)
temp2 = fixxFast(x, temp)
temp3 = sum(temp*dnorm(temp2,log=TRUE))
bob <- suppressWarnings(bobyqa(par=c(atanh(cor(x,M)),imapThetaFast2(theta0)),
fn=optFFast, x=temp2, w=temp, M=M, temp3 = temp3))
return(tanh(bob$par[1]))
} else {
temp = w/sum(w)
temp2 = fixxFast(x, temp)
temp3 = sum(temp*dnorm(temp2, log=TRUE))
values = mainF(x, M, w, theta0)
opt <- suppressWarnings(optimize(optFcFast, interval=unlist(values[1]),
x=temp2, w=temp, theta0=(imapThetaFast2(theta0)),
M=M, temp3= temp3))
return( tanh(opt$minimum) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/polysFast.R
|
#' @importFrom minqa bobyqa
#' @importFrom stats pnorm
#' @importFrom stats dnorm
#' @importFrom stats optimize
#' @importFrom stats cor
polysSlow <- function(x, M, w, ML=FALSE) {
# polyserial log likelihood function
polysLnL <- function(x,M,rho,theta,w) {
R <- (1-rho^2)^0.5
Qp2 <- (theta[M+2] - rho*x) / R
Qp1 <- (theta[M+1] - rho*x) / R
sum(w * dnorm(x, log=TRUE)) + sum(w * log(pnorm(Qp2) - pnorm(Qp1)))
}
# weighted normalization of x
fixx <- function(x,w) {
mux <- sum(x*w)
sdx <- sum(w*(x-mux)^2)
(x-mux)/sqrt(sdx)
}
# to allow the correltion to be fit on R instead of [0,1] use this remapping
mapCor <- function(v) {
tanh(v)
}
# puts the theta into the correct format for the olysLnL function
mapTheta <- function(v) {
vv <- cumsum(c(v[1],exp(v[-1])))
c(NA, -Inf, vv, Inf)
}
optF <- function(x,M,w) {
w <- w/sum(w)
fx <- fixx(x,w)
function(par) {
res <- polysLnL(fx, M, mapCor(par[1]), mapTheta(par[-1]), w)
ifelse(res==-Inf, .Machine$double.xmax, -1*res)
}
}
optFc <- function(x,M,w,theta0) {
w <- w/sum(w)
fx <- fixx(x,w)
ftheta0 <- mapTheta(theta0)
function(par) {
res <- polysLnL(fx, M, mapCor(par[1]), ftheta0, w)
ifelse(res==-Inf, .Machine$double.xmax, -1*res)
}
}
imapTheta <- function(theta0) {
c(theta0[1], log(theta0[-1] - theta0[-length(theta0)]))
}
#
imapCor <- function(cor) {
atanh(cor)
}
# map cor so that the optimization can be over R
mapCor <- function(v) {
tanh(v)
}
M <- as.numeric(as.factor(M)) # make discrete values that are adjacent.
uM <- sort(unique(M))
theta0 <- sapply(uM[-length(uM)], function(z) qnorm( weighted.mean(M<=z, w) ) )
if(ML) {
bob <- bobyqa(par=c(imapCor(cor(x,M)), imapTheta(theta0)), fn=optF(x,M,w))
return(mapCor(bob$par[1]))
} else {
opt <- optimize(optFc(x, M, w, imapTheta(theta0)), imapCor(cor(x,M)) + c(-3,3))
return(mapCor(opt$minimum))
}
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/polysSlow.R
|
bias <- function(workingDir) {
setwd(workingDir)
iter <- NULL
n <- 0
grid <- expand.grid(ML=FALSE,
iter=1:2,
n = c(10,100,1000),
rho = c(-0.99,seq(-0.95,0.95,by=0.2), 0.99),
fast=TRUE)
grid$reset <- TRUE
grid <- subset(grid, ! ( (iter > 100) & (n == 1000) | ( (iter > 1000) & (n==100) ) ) )
bias <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE)
save(bias, file="bias.RData")
bias$rmse <- sqrt( (bias$est - bias$rho)^2 )
bias$bias <- bias$est - bias$rho
aggbiasA <- aggregate(bias ~ n + rho + type, data=bias, FUN=mean, na.rm=TRUE)
aggbiasB <- aggregate(rmse ~ n + rho + type, data=bias, FUN=mean, na.rm=TRUE)
aggbias <- merge(aggbiasA, aggbiasB, by=c("n", "rho", "type"), all=TRUE)
names(aggbias)[names(aggbias) == "bias"] <- "bias.mean"
names(aggbias)[names(aggbias) == "rmse"] <- "rmse.mean"
aggbias2 <- aggregate(rmse ~ n+type, data=bias, FUN=mean, na.rm=TRUE)
names(aggbias2)[names(aggbias2) == "rmse"] <- "rmse.mean"
save(aggbias, aggbias2, file="aggbias.RData")
}
fast <- function(workingDir){
setwd(workingDir)
grid <- expand.grid(fast=c(TRUE,FALSE),
iter=1:10,
n = c(10,100,1000),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99),
ML=FALSE)
grid$reset <- grid$fast
fast <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE, outstr="fast")
save(fast, file="fast.RData")
if(FALSE) {
x <- rnorm(10)
y <- rnorm(10)+x/2
afast <- weightedCorr(x,y, method="Pearson", weights=rep(1,10), fast=TRUE, ML=FALSE)
aslow <- weightedCorr(x,y, method="Pearson", weights=rep(1,10), fast=FALSE, ML=FALSE)
c(afast-aslow,afast,aslow)
afast2 <- weightedCorr(x,y, method="Pearson", weights=rep(1,10), fast=TRUE, ML=FALSE)
c(afast-afast2,afast,afast2)
afast <- weightedCorr(x,y, method="Spearman", weights=rep(1,10), fast=TRUE, ML=FALSE)
aslow <- weightedCorr(x,y, method="Spearman", weights=rep(1,10), fast=FALSE, ML=FALSE)
c( (afast-aslow)*10^16,afast,aslow)
M <- 0 + (x > 0)
P <- 9 + (y > 0)
afast <- weightedCorr(x,y, method="polychoric", weights=rep(1,10), fast=TRUE, ML=FALSE)
aslow <- weightedCorr(x,y, method="polychoric", weights=rep(1,10), fast=FALSE, ML=FALSE)
c( (afast-aslow),afast,aslow)
}
fast$i <- rep(1:(nrow(fast)/2),each=2)
mfast <- merge(subset(fast,fast),
subset(fast,!fast, c("i", "est")),
by="i",
suffixes=c(".fast",".slow"))
mfast$fast <- NULL
mfast$absdrho <- pmax(abs(mfast$est.fast - mfast$est.slow), 1E-16)
aggfast <- aggregate(absdrho ~ n + rho + type, data=mfast, FUN=mean, na.rm=TRUE)
names(aggfast)[names(aggfast) == "absdrho"] <- "absdrho.mean"
save(aggfast, file="aggfast.RData")
}
ML <- function(workingDir) {
type <- NULL
grid <- expand.grid(ML=c(TRUE,FALSE),
iter=1:500,
n = c(10,100,1000),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99),
fast=TRUE)
grid$reset <- grid$ML
ML <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE)
save(ML, file="ML.RData")
ml <- subset(ML, type %in% c("Polychoric", "Polyserial"))
ml$rmse <- (ml$est - ml$rho)^2
aggml <- aggregate(rmse ~ n + rho + type + ML, data=ml, FUN=mean, na.rm=TRUE)
names(aggml)[names(aggml) == "rmse"] <- "rmse.mean"
aggml$rmse.mean <- sqrt(aggml$rmse.mean)
aggml$ml <- ifelse(aggml$ML==TRUE, "ML=TRUE", "ML=FALSE")
aggml$nt <- factor(paste("n=",aggml$n))
ml$i <- rep(1:(nrow(ml)/2),each=2)
mml <- merge(subset(ml,ML),
subset(ml,!ML, c("i", "est")),
by="i",
suffixes=c(".ml",".nonml"))
mml$absd <- abs(mml$est.ml - mml$est.nonml)
aggt1_0 <- aggregate(absd ~ type + n + ML, data=subset(mml, type=="Polychoric"), FUN=mean, na.rm=TRUE)
names(aggt1_0)[names(aggt1_0) == "absd"] <- "absd.mean"
aggt1_0$ML <- NULL
aggt1 <- aggregate(rmse ~ type + n + ML, data=subset(ml, type=="Polychoric"), FUN=mean, na.rm=TRUE)
names(aggt1)[names(aggt1) == "rmse"] <- "rmse.mean"
aggt2_0 <- aggregate(absd ~ type + n + ML, data=subset(mml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
names(aggt2_0)[names(aggt2_0) == "absd"] <- "absd.mean"
aggt2_0$ML <- NULL
aggt2 <- aggregate(rmse ~ type + n + ML, data=subset(ml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
names(aggt2)[names(aggt2) == "rmse"] <- "rmse.mean"
aggt2$rmse.mean <- sqrt(aggt2$rmse.mean)
save(aggml, aggt1_0, aggt1, aggt2_0, aggt2, file="aggML.RData")
}
ntime <- function(workingDir) {
setwd(workingDir)
ntime1 <- ntime2 <- NULL
grid <- expand.grid(ML=FALSE,
iter=1:5,
n = round(10^seq(1,6,by=0.25)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99),
fast=TRUE)
ntime <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=TRUE, usew=FALSE, outstr="ntime")
save(ntime, file="ntime.RData")
######
setwd(workingDir)
grid <- expand.grid(ML=FALSE,
iter=1:5,
n = round(10^seq(6.5,7,by=0.5)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99),
fast=TRUE)
ntime2 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=TRUE, usew=FALSE, outstr="ntime2")
save(ntime2, file="ntime2.RData")
######
setwd(workingDir)
load("ntime1.RData")
load("ntime2.RData")
ntime <- rbind(ntime1, ntime2)
save(ntime, file="ntime.RData")
aggTime <- aggregate(t ~ n + type, data=ntime, FUN=mean, na.rm=TRUE)
names(aggTime)[names(aggTime) == "t"] <- "t.mean"
aggTime$t.mean <- ifelse(aggTime$t.mean==0, 0.001,aggTime$t.mean)
save(aggTime, file="aggTime.RData")
}
speed <- function(workingDir) {
setwd(workingDir)
grid1 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=80,
n = round(10^seq(1,4.75,by=0.25)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid2 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=20,
n = round(10^seq(5,7,by=0.5)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid <- rbind(grid1, grid2)
grid$reset <- (grid$ML) & (grid$fast)
speed1 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE,outstr="speed")
save(speed, file="speed.RData")
##############
setwd(workingDir)
grid1 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=80,
n = round(10^seq(1,4.75,by=0.25)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid <- rbind(grid1)
grid$reset <- (grid$ML) & (grid$fast)
speed1 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE,outstr="speed1")
save(speed1, file="speed1.RData")
###################
require(wCorr)
setwd(workingDir)
grid2 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=20,
n = round(10^seq(5,6,by=0.5)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid <- rbind(grid2)
grid$reset <- (grid$ML) & (grid$fast)
speed2 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE,outstr="speed2")
save(speed2, file="speed2.RData")
###################
require(wCorr)
setwd(workingDir)
grid2 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=20,
n = round(10^seq(6.5,6.5,by=0.5)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid <- rbind(grid2)
grid$reset <- (grid$ML) & (grid$fast)
speed3 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE,outstr="speed3")
save(speed3, file="speed3.RData")
###################
require(wCorr)
setwd(workingDir)
grid2 <- expand.grid(fast=c(TRUE,FALSE),
ML=c(TRUE,FALSE),
iter=10,
n = round(10^seq(7,7,by=0.5)),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid <- rbind(grid2)
grid$reset <- (grid$ML) & (grid$fast)
speed4 <- wCorrSim(n=grid$n, rho=grid$rho, ML=grid$ML, fast=grid$fast, reset=grid$reset, usew=FALSE,outstr="speed4")
save(speed4, file="speed4.RData")
####
setwd(workingDir)
load("speed1.RData")
load("speed2.RData")
speed <- rbind(speed1, speed2)
load("speed3.RData")
speed <- rbind(speed, speed3)
load("speed4.RData")
speed <- rbind(speed, speed4)
save(speed, file="speed.RData")
speed$class <- ifelse(speed$ML, "ML=T,", "ML=F,")
speed$class <- paste0(speed$class, ifelse(speed$fast, "fast=T", "fast=F"))
speed$t <- pmax(speed$t, 0.001)
aggSpeed <- aggregate(t ~ n + type + class, data=speed, FUN=mean, na.rm=TRUE)
names(aggSpeed)[names(aggSpeed) == "t"] <- "t.mean"
save(aggSpeed, file="aggSpeed.RData")
}
spearmanSpeed <- function(workingDir) {
###################
require(wCorr)
setwd(workingDir)
grid <- expand.grid(usew=c(FALSE,TRUE),
iter=1:2,
n = c(10,100,1000, 10000),
rho = c(-0.99,seq(-0.95,0,by=0.05)))
grid$reset <- !grid$usew
spear <- spearmanSim(n=grid$n, rho=grid$rho, usew=grid$usew, outstr="spear")
save(spear, file="spear.RData")
load("../wgtvn.RData")
spear$rho <- spear$SpearmanOrig
spear$SpearmanOrig <- NULL
spear$N <- NULL
wgtvn <- wgtvn[wgtvn$type!= "Spearman",]
wgt <- rbind(wgtvn, spear)
wgt$mserho <- (wgt$est - wgt$rho)^2
aggWgtvn <- aggregate(mserho ~ n + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
names(aggWgtvn)[names(aggWgtvn) == "mserho"] <- "mserho.mean"
aggWgtvn$rmserho <- sqrt(aggWgtvn$mserho)
aggWgtvn$weight <- ifelse(aggWgtvn$usew, "Weighted", "Unweighted")
save(aggWgtvn, file="aggWgtvn.RData")
}
wgtvrho <- function(workingDir) {
setwd(workingDir)
grid <- expand.grid(usew=c(FALSE,TRUE),
iter=1:100,
n = c(10,100,1000),
rho = c(-0.99,seq(-0.95,0.95,by=0.05), 0.99))
grid$reset <- !grid$usew
wgtvrho <- wCorrSim(n=grid$n, rho=grid$rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=grid$usew)
save(wgtvrho, file="wgtvrho.RData")
wgt <- wgtvrho
wgt$absdrho <- abs(wgt$est - wgt$rho)
aggWgtvrho <- aggregate(absdrho ~ rho + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
names(aggWgtvrho)[names(aggWgtvrho) == "absdrho"] <- "absdrho.mean"
aggWgtvrho$weight <- ifelse(aggWgtvrho$usew, "Weighted", "Unweighted")
save(aggWgtvrho, file="aggWgtvrho.RData")
}
wgtvn <- function(workingDir) {
setwd(workingDir)
grid <- expand.grid(usew=c(FALSE,TRUE),
iter=1:20,
n = c(10,100,1000,10000),
rho = c(-0.99,seq(-0.95,0,by=0.05)))
grid$reset <- !grid$usew
wgtvn <- wCorrSim(n=grid$n, rho=grid$rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=grid$usew)
save(wgtvn, file="wgtvn.RData")
}
# createRDA <- function() {
# devtools::use_data(aggfast,aggml, aggt1, aggt1_0, aggt2, aggt2_0, aggSpeed, aggbias, aggbias2,
# aggTime, aggWgtvrho, aggWgtvn,speed, internal = TRUE, overwrite = TRUE)
# }
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/sim.R
|
#' Computes weighted or unweighted Pearson, Spearman, polyserial, and polychoric correlations coefficients.
#'
#' See \code{\link{weightedCorr}} for examples.
#'
#' @docType package
#' @name wCorr-package
NULL
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/wCorr-package.R
|
wCorrSim <- function(n, rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=FALSE, outstr="") {
len <- max(c(length(n), length(rho), length(ML), length(fast), length(reset), length(usew)))
vec <- c("n", "rho", "ML", "fast", "reset", "usew")
for(i in 1:length(vec)) {
var <- get(vec[i])
if(length(var) != len) {
if(length(var) != 1) {
stop("length of ", sQuote(vec[i]), " must be 1 or the same as the longest vector passed to sim")
} else {
var <- rep(var,len)
}
}
assign(vec[i],var)
}
everusew <- sum(usew)>0
ns <- n
cor0 <- rho
df <- data.frame(n=n,rho=rho, ML=ML, fast=fast, reset=reset, usew=usew)
df$spear <- df$speart <- NA
df$Q <- df$M <- NA
df$pear <- df$peart <- NA
df$pc <- df$pct <- NA
df$ps <- df$pst <- NA
ii <- 1
while(ii <= nrow(df)) {
cori <- df$rho[ii]
n <- df$n[ii]
ML <- df$ML[ii]
fast <- df$fast[ii]
reset <- df$reset[ii]
usew <- df$usew[ii]
if(interactive()) {
cat(outstr,"n=",n,"cori=",cori,"pct=",100*ii/nrow(df),"\n")
cat(" fast=",fast,"ml=",ML,"reset=",reset,"\n")
}
if(reset) {
n <- ifelse(everusew, 10*df$n[ii], df$n[ii])
cr <- cori
x <- y <- w <- M <- Q <- c()
while(length(w) < df$n[ii]) {
xp <- rnorm(n)
yp <- sqrt(1-cr^2)*rnorm(n) + cr*xp
if(everusew) {
wp <- (xp-yp)^2+1
pr <- 1/wp
pr <- df$n[ii] * pr/(sum(pr) * 100)
wp <- 1/pr
samp <- (1:n)[runif(n)<pr]
x <- c(x,xp[samp])
y <- c(y,yp[samp])
w <- c(w,wp[samp])
} else {
x <- xp
y <- yp
w <- rep(1/n, n)
}
}
M <- 1
Q <- 1
nm <- sample(2:5,1)
nq <- sample(2:5,1)
x <- x[1:df$n[ii]]
y <- y[1:df$n[ii]]
w <- w[1:df$n[ii]]
iter <- 1
while( ((length(unique(M)) < 2) | (length(unique(Q)) < 2)) & (iter < 100)) {
iter <- iter + 1
tm <- sort(rnorm(nm))
tq <- sort(rnorm(nq))
theta1 <- c(NA,-Inf,tq,Inf)
theta2 <- c(NA,-Inf,tm,Inf)
Q <- rep(NA,n)
for(i in 2:length(theta1)) {
Q <- ifelse(x>theta1[i], i, Q)
}
Q <- Q - 1
Q <- as.numeric(as.factor(Q))
M <- rep(NA,n)
for(i in 2:length(theta2)) {
M <- ifelse(y>theta2[i], i, M)
}
M <- M - 1
M <- as.numeric(as.factor(M))
}
if(iter >=99) {
cat("could not get multiple bins\n")
cat("x <- c(",paste(x,collapse=","),")\n")
cat("y <- c(",paste(y,collapse=","),")\n")
cat("M <- c(",paste(M,collapse=","),")\n")
cat("Q <- c(",paste(Q,collapse=","),")\n")
}
df$M[ii] <- length(unique(M))
df$Q[ii] <- length(unique(Q))
} else {
df$cor[ii] <- df$cor[ii-1]
df$M[ii] <- length(unique(M))
df$Q[ii] <- length(unique(Q))
}
if(usew) {
wu <- w
} else {
wu <- rep(1,length(x))
}
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Pearson", weights=wu, fast=fast, ML=ML))
df$peart[ii] <- sum(st0[1:2])
df$pear[ii] <- fcorp
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Spearman", weights=wu, fast=fast, ML=ML))
df$speart[ii] <- sum(st0[1:2])
df$spear[ii] <- cor(x,y)
st0 <- system.time(fcorp <- weightedCorr(x,M, method="Polyserial", weights=wu, fast=fast, ML=ML))
df$pst[ii] <- sum(st0[1:2])
df$ps[ii] <- fcorp
st0 <- system.time(fcorp <- weightedCorr(M, Q, method="Polychoric", weights=wu, fast=fast, ML=ML))
df$pct[ii] <- sum(st0[1:2])
df$pc[ii] <- fcorp
ii <- ii + 1
}
dfout <- data.frame(n=rep(df$n,4),
rho=rep(df$rho,4),
ML=rep(df$ML,4),
usew=rep(df$usew,4),
fast=rep(df$fast,4),
est=c(df$pear, df$spear, df$ps, df$pc),
t=c(df$peart, df$speart, df$pst, df$pct),
type=rep(c("Pearson", "Spearman", "Polyserial", "Polychoric"),each=nrow(df)))
dfout
}
spearmanSim <- function(n, rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=TRUE, outstr="") {
len <- max(c(length(n), length(rho), length(ML), length(fast), length(reset), length(usew)))
vec <- c("n", "rho", "ML", "fast", "reset", "usew", "Spearman", "N")
N <- n
Spearman <- rho
for(i in 1:length(vec)) {
var <- get(vec[i])
if(length(var) != len) {
if(length(var) != 1) {
stop("length of ", sQuote(vec[i]), " must be 1 or the same as the longest vector passed to sim")
} else {
var <- rep(var,len)
}
}
assign(vec[i],var)
}
everusew <- sum(usew)>0
ns <- n
cor0 <- rho
df <- data.frame(n=n,rho=rho, ML=ML, fast=fast, reset=reset, usew=usew, Spearman = rho)
df$spear <- df$speart <- NA
df$pear <- df$peart <- NA
ii <- 1
while(ii <= nrow(df)) {
cori <- df$rho[ii]
n <- df$n[ii]
ML <- df$ML[ii]
fast <- df$fast[ii]
reset <- df$reset[ii]
usew <- df$usew[ii]
if(interactive()) {
cat(outstr,"n=",n,"cori=",cori,"pct=",100*ii/nrow(df),"\n")
cat(" fast=",fast,"ml=",ML,"reset=",reset,"\n")
}
if(reset) {
n <- ifelse(everusew, 50*df$n[ii], df$n[ii])
cr <- cori
x <- y <- w <- M <- Q <- c()
while(length(w) < df$n[ii]) {
xp <- rnorm(n)
yp <- sqrt(1-cr^2)*rnorm(n) + cr*xp
df$Spearman[ii] <- cor(xp, yp, method="spearman")
if(everusew) {
wp <- (xp-yp)^2+1
pr <- 1/wp
pr <- pr*df$n[ii]/(sum(pr))
wp <- 1/pr
#samp <- sample(1:n, size=df$n[ii], replace=FALSE, prob=pr)
samp <- (1:n)[runif(n)<pr]
#print(samp)
df$N[ii] <- length(samp)
x <- c(x,xp[samp])
y <- c(y,yp[samp])
w <- c(w,wp[samp])
} else {
x <- xp
y <- yp
w <- rep(1/n, n)
}
}
x <- x[1:df$n[ii]]
y <- y[1:df$n[ii]]
w <- w[1:df$n[ii]]
} else {
df$cor[ii] <- df$cor[ii-1]
}
if(usew) {
wu <- w
} else {
wu <- rep(1,length(x))
}
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Spearman", weights=wu, fast=fast, ML=ML))
df$speart[ii] <- sum(st0[1:2])
df$spear[ii] <- fcorp
ii <- ii + 1
}
dfout <- data.frame(n=rep(df$n,1),
rho=rep(df$rho,1),
ML=rep(df$ML,1),
usew=rep(df$usew,1),
fast=rep(df$fast,1),
est=c(df$spear),
t=c(df$speart),
SpearmanOrig = df$Spearman,
N = df$N,
type=rep(c("Spearman"),each=nrow(df)))
dfout
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/wCorrSim.R
|
#' @title Calculates bivariate Pearson, Spearman, polychoric, and polyserial correlation coefficients
#'
#' @description Calculates bivariate Pearson, Spearman, polychoric, and polyserial correlation
#' coefficients in weighted or unweighted form, on discrete or continuous variables. Also
#' calculates tetrachoric and biserial correlation coefficients as described below.
#'
#' @param x a numeric (or numeric factor in case of polychoric) vector or an object that can be
#' coerced to a numeric or factor vector.
#' @param y a numeric vector (or factor in case of polychoric and polyserial) or an object that
#' can be coerced to a numeric or factor vector.
#' @param method a character string indicating which correlation coefficient is
#' to be computed. These include "Pearson" (default), "Spearman", "Polychoric", or "Polyserial".
#' For tetrachoric use "Polychoric" and for biserial use "Polyserial".
#' @param weights a numeric vector of weights. By default, the unweighted correlation coefficient is calculated
#' by setting the weights to a vector of all 1s.
#' @param ML a Boolean value indicating if full Maximum Likelihood (ML) is to be used (polyserial and polychoric only,
#' has no effect on Pearson or Spearman results). This substantially increases the
#' compute time. See the 'wCorr Arguments' vignette for a description of the effect of this argument.
#' @param fast a Boolean value indicating if the Rcpp methods should be used. Setting this value to FALSE
#' uses the pure R implementation and is included primarily for comparing the implementations
#' to each other. See the 'wCorr Arguments' vignette for a description of the effect of this argument.
#'
#' @details
#' In case of polyserial, x must be the observed ordinal variable, and y the observed continuous variable. For
#' polychoric, both must be categorical. The correlation methods are calculated as described in the 'wCorr Formulas'
#' vignette.
#'
#' For Spearman the data is first ranked and then a Pearson type correlation coefficient is calculated on
#' the result. The ranking method gives averages for ties.
#'
#' The details of computation are given in the 'wCorr Formulas' vignette.
#'
#' @return
#' A scalar that is the estimated correlation.
#'
#' @references
#' Polyserial computation based on the likelihood function in Cox, N. R. (1974), "Estimation of the Correlation between a Continuous and a Discrete Variable." Biometrics, 30 (1), pp 171-178.
#'
#' Polychoric computation based on the likelihood function in Olsson, U. (1979) "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient." Psyhometrika, 44 (4), pp 443-460.
#'
#' The weighted Pearson formula appears in many places, including the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.
#'
#'
#' @examples
#' # run a polyserial correlation
#' attach(mtcars)
#' weightedCorr(gear, x=cyl, method="polyserial")
#' # weight by MPG
#' weightedCorr(y=gear, x=cyl, method="polyserial", weights=mpg)
#' # unweight
#' weightedCorr(y=gear, x=cyl, method="polyserial")
#'
#' # run a polychoric correlation
#' weightedCorr(gear, x=cyl, method="polychoric")
#' # weight by MPG
#' weightedCorr(y=gear, x=cyl, method="polychoric", weights=mpg)
#' # unwiehgted
#' weightedCorr(y=gear, x=cyl, method="polychoric")
#' detach(mtcars)
#'
#' @seealso \ifelse{latex}{\code{cor}}{\code{\link[stats]{cor}}}
#'
#' @export
#' @import Rcpp
#' @import stats
#' @useDynLib wCorr, .registration = TRUE
weightedCorr <- function(x, y, method = c("Pearson", "Spearman", "Polyserial", "Polychoric"), weights=rep(1,length(x)), ML=FALSE, fast=TRUE) {
x <- as.numeric(x)
y <- as.numeric(y)
weights <- as.numeric(weights)
if(!is.vector(x)) stop(paste0("The argument ",sQuote("x"), " must be a vector."))
if(!is.vector(y)) stop(paste0("The argument ",sQuote("y"), " must be a vector."))
if(!is.vector(weights)) stop(paste0("The argument ",sQuote("weights"), " must be a vector."))
if(length(x) != length(y)) stop(paste0("The vectors ", sQuote("x"), ", ", sQuote("y"), ", must be the same length."))
if(length(x) != length(weights)) stop(paste0("The vectors ", sQuote("x"), ", ", sQuote("y"), ", and ", sQuote("weights") ," must all be of the same length."))
value <- 0
foundMethod <- FALSE
#if (method == "Polyserial") {
# value <- polys(x, y, weights)
#}
method0 <- method
method <- tolower(method)
if(method == "polyserial") {
if(length(unique(y)) == length(y) & length(unique(x)) < length(x)) {
stop(paste0("Check argument definitions for ", sQuote("y"), " and ", sQuote("x") ,". The number of levels in the discrete variable ",sQuote("y")," is equal to the number of observations while the number of levels in continuous variable ", sQuote("x")," is less than the number of observations. Try transposing these two arguments."))
}
if(length(unique(y)) > length(unique(x))) {
warning(paste0("Check argument definitions for ", sQuote("y"), " and ", sQuote("x") ,". The number of levels in the discrete variable ",sQuote("y")," is larger than the number of levels in continuous variable ", sQuote("x")," indicating a possible transposition of the arguments."))
}
if(is.factor(x)) {
stop(paste0("The argument ", sQuote("X"), " is a factor but must be continuous in the Polyserial correlation."))
}
if(fast){
value <- polysFast(x, y, weights, ML=ML)
}
else {
value <- polysSlow(x, y, weights, ML=ML)
}
foundMethod <- TRUE
}
if (method == "polychoric") {
if (fast) {
value <- polycFast(x, y, w=weights, ML=ML)
}
else {
value <- polycSlow(x, y, w=weights, ML=ML)
}
foundMethod <- TRUE
}
if (method == "pearson" | method == "spearman") {
if(fast){
value <- contCorrFast(x, y, w=weights, method=method)
}
else {
value <- contCorr(x, y, w=weights, method=method)
}
foundMethod <- TRUE
}
if(!foundMethod) {
stop(paste0("Could not find method ",sQuote(method0), " see help for available methods."))
}
value
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/weightedCorr.R
|
# weighted ranks
# @param x the vector of values to rank
# @param w the (survey) weights for x
# returns vector of ranks
# not exported
wrank <- function(x, w=rep(1,length(x))) {
# sort by x so we can just traverse once
ord <- order(x)
rord <- (1:length(x))[order(ord)] # reverse order
xp <- x[ord] # x, permuted
wp <- w[ord] # weights, permuted
rnk <- rep(NA, length(x)) # blank ranks vector
# setup first itteration
t1 <- 0 # total weight of lower ranked elements
i <- 1 # index
t2 <- 0 # total weight of tied elements (including self)
n <- 0 # number of tied elements
while(i < length(x)) {
t2 <- t2 + wp[i] # tied weight increases by this unit
n <- n + 1
if(xp[i+1] != xp[i]) { # the next one is not a tie
# find the rank of all tied elements
rnki <- t1 + (1 + (t2-1)/2)
# push that rank to all tied units
for(ii in 1:n) {
rnk[i-ii+1] <- rnki
}
# reset for next itteration
t1 <- t1 + t2 # new total weight for lower values
t2 <- 0 # new tied weight starts at 0
n <- 0
}
i <- i + 1
}
# final row
t2 <- t2 + wp[i] # add final weight to tied weight
rnki <- t1 + (1 + (t2-1)/2) # final rank
# push that rank to all final tied units
for(ii in 1:n) {
rnk[i-ii+1] <- rnki
}
# order by incoming index, so put in the original order
rnk[rord]
}
wrank_old <- function(x, w=rep(1,length(x))) {
sapply(1:length(x), function(i) {
t1 <- sum(w[x<x[i]]) # ranked below every unit below it
t2 <- w[x==x[i]] # ties
# Note: when selecting the range to average over you have to figure out which unit is first.
# this method assumes that all of the units are exchangable and integrates over all
# units in the tie.
# mean(t2) brings the unit up to the smallest rank
# sum(t2) - mean(t2) /2 is then the middle of the ranks
t1 + mean(t2) + (sum(t2) -mean(t2))/2
})
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/wrank.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage(paste0("wCorr v", utils::packageDescription("wCorr")$Version, "\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/wCorr/R/zzz.R
|
## ----packages and data, echo=FALSE, results="hide", message=FALSE,warning=FALSE----
if(!requireNamespace("knitr")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(knitr)
require(wCorr)
require(lattice)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
## ----setup fast, echo=FALSE, results="hide", message=FALSE, warning=FALSE-----
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
# fast$i <- rep(1:(nrow(fast)/2),each=2)
# mfast <- merge(subset(fast,fast),
# subset(fast,!fast, c("i", "est")),
# by="i",
# suffixes=c(".fast",".slow"))
# mfast$fast <- NULL
# mfast$absdrho <- pmax(abs(mfast$est.fast - mfast$est.slow), 1E-16)
# aggfast <- summaryBy(absdrho ~ n + rho + type, data=mfast, FUN=mean, na.rm=TRUE)
fmax <- max(aggfast$absdrho.mean)
fmax10 <- ceiling(log10(fmax))
## ----tables and figures, echo=FALSE, results="hide", message=FALSE,warning=FALSE----
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
MLRMSE <- fig_nums("MLRMSE")
Polychoric <- table_nums("Polychoric")
Polyserial <- table_nums("Polyserial")
fastMAD <- table_nums("fastMAD")
speedi <- table_nums("speedi")
## ----MLRMSEplot, echo=FALSE,fig.width=7, fig.height=5.5-----------------------
#ml <- subset(ML, type %in% c("Polychoric", "Polyserial"))
#ml$rmse <- (ml$est - ml$rho)^2
#aggml <- summaryBy(rmse ~ n + rho + type + ML, data=ml, FUN=mean, na.rm=TRUE)
#aggml$rmse.mean <- sqrt(aggml$rmse.mean)
#aggml$ml <- ifelse(aggml$ML==TRUE, "ML=TRUE", "ML=FALSE")
#aggml$nt <- factor(paste("n=",aggml$n))
xyplot(rmse.mean ~ rho|type + nt,
data=aggml,
groups=ml,
scales=list(y=list(log=10, cex=0.7), x = list(cex=0.7)),
type=c("l", "g"),
ylab="RMSE",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----ML RMSE table polyc, echo=FALSE------------------------------------------
#ml$i <- rep(1:(nrow(ml)/2),each=2)
#mml <- merge(subset(ml,ML),
# subset(ml,!ML, c("i", "est")),
# by="i",
# suffixes=c(".ml",".nonml"))
#mml$absd <- abs(mml$est.ml - mml$est.nonml)
#aggt1_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, #type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1_0$ML <- NULL
#aggt1 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1$rmse.mean <- sqrt(aggt1$rmse.mean)
mg <- merge(subset(aggt1, ML==TRUE, c("type", "n", "rmse.mean")),
subset(aggt1, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt1_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg1 <- mg
#knitr::asis_output("\\")
## ----ML RMSE table polys, echo=FALSE------------------------------------------
#aggt2_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2_0$ML <- NULL
#aggt2 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2$rmse.mean <- sqrt(aggt2$rmse.mean)
mg <- merge(subset(aggt2, ML==TRUE, c("n", "type", "rmse.mean")),
subset(aggt2, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt2_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg2 <- mg
## ----fast MAD plot, echo=FALSE,fig.width=7, fig.height=3.5--------------------
xyplot(absdrho.mean ~ rho|type,
data=aggfast,
groups=n,
type=c("l", "g"),
ylab="RMAD",
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
## ----plot speed, echo=FALSE,fig.width=7, fig.height=3.5-----------------------
# speed$class <- ifelse(speed$ML, "ML=T,", "ML=F,")
# speed$class <- paste0(speed$class, ifelse(speed$fast, "fast=T", "fast=F"))
# speed$t <- pmax(speed$t, 0.001)
# agg <- summaryBy(t ~ n + type + class, data=speed, FUN=mean, na.rm=TRUE)
xyplot(t.mean ~ n|type,
data=subset(aggSpeed, type %in% c("Polyserial", "Polychoric")),
type=c("l", "g"),
ylab="Computing Time",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
xlab="n",
groups=class,
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
|
/scratch/gouwar.j/cran-all/cranData/wCorr/inst/doc/wCorrArguments.R
|
---
title: "wCorr Arguments"
author: "Paul Bailey, Ahmad Emad, Ting Zhang, Qingshu Xie"
date: '`r Sys.Date()`'
output: pdf_document
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{wCorr Arguments}
\usepackage[utf8]{inputenc}
---
```{r packages and data, echo=FALSE, results="hide", message=FALSE,warning=FALSE}
if(!requireNamespace("knitr")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(knitr)
require(wCorr)
require(lattice)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
```
```{r setup fast, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
# fast$i <- rep(1:(nrow(fast)/2),each=2)
# mfast <- merge(subset(fast,fast),
# subset(fast,!fast, c("i", "est")),
# by="i",
# suffixes=c(".fast",".slow"))
# mfast$fast <- NULL
# mfast$absdrho <- pmax(abs(mfast$est.fast - mfast$est.slow), 1E-16)
# aggfast <- summaryBy(absdrho ~ n + rho + type, data=mfast, FUN=mean, na.rm=TRUE)
fmax <- max(aggfast$absdrho.mean)
fmax10 <- ceiling(log10(fmax))
```
```{r tables and figures, echo=FALSE, results="hide", message=FALSE,warning=FALSE}
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
MLRMSE <- fig_nums("MLRMSE")
Polychoric <- table_nums("Polychoric")
Polyserial <- table_nums("Polyserial")
fastMAD <- table_nums("fastMAD")
speedi <- table_nums("speedi")
```
The wCorr package can be used to calculate Pearson, Spearman, polyserial, and polychoric correlations, in weighted or unweighted form.^[The estimation procedure used by the wCorr package for the polyserial is based on the likelihood function in by Cox, N. R. (1974), "Estimation of the Correlation between a Continuous and a Discrete Variable." *Biometrics*, **30** (1), pp 171-178. The likelihood function for polychoric is from Olsson, U. (1979) "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient." *Psyhometrika*, **44** (4), pp 443-460. The likelihood used for Pearson and Spearman is written down many places. One is the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.] The package implements the tetrachoric correlation as a specific case of the polychoric correlation and biserial correlation as a specific case of the polyserial correlation. When weights are used, the correlation coefficients are calculated with so called sample weights or inverse probability weights.^[Sample weights are comparable to `pweight` in Stata.]
This vignette describes the use of applying two Boolean switches in the wCorr package. It describes the implications and uses simulation to show the impact of these switches on resulting correlation estimates.
First, the Maximum Likelihood, or `ML` switch uses the Maximum Likelihood Estimator (MLE) when `ML=TRUE` or uses a consistent but non-MLE estimator for the nuisance parameters when `ML=FALSE`. The simulations show that using `ML=FALSE` is preferable because it speeds computation and decreases the root mean square error (RMSE) of the estimator.
Second the `fast` argument gives the option to use a pure R implementation (`fast=FALSE`) or an implementation that relies on the `Rcpp` and `RcppArmadillo` packages (`fast=TRUE`). The simulations show agreement to within $10^{`r fmax10`}$, showing the implementations agree. At the same time the `fast=TRUE` option is always as fast or faster.
In addition to this vignette, the *wCorr Formulas* vignette describes the statistical properties of the correlation estimators in the package and has a more complete derivation of the likelihood functions.
# The `ML` switch
The wCorr package computes correlation coefficients between two vectors of random variables that are jointly bivariate normal. We call the two vectors ***X*** and ***Y***.
$$\begin{pmatrix} X \\ Y \end{pmatrix} \sim N \left[ \begin{pmatrix} \mu_x \\ \mu_y \end{pmatrix}, \boldsymbol{\Sigma} \right] $$
where $N(\mathbf{A},\boldsymbol{\Sigma})$ is the bivariate normal distribution with mean ***A*** and covariance $\boldsymbol{\Sigma}$.
## Computation of polyserial correlation
The likelihood function for an individual observation of the polyserial correlation is^[See the *wCorr Formulas* vignette for a more complete description of the polyserial correlations' likelihood function.]
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \left[ \Phi\left( \frac{\theta_{m_i+2} - r \cdot z_i}{\sqrt{1-r^2}} \right) - \Phi \left( \frac{\theta_{m_i+1} - r \cdot z_i}{\sqrt{1-r^2}} \right) \right]$$
where $\rho$ is the correlation between ***X*** and ***Y***, ***Z*** is the normalized version of ***X***, and ***M*** is a discretized version of ***Y***, using $\boldsymbol{\theta}$ as cut points as described in the *wCorr Formulas* vignette. Here an *i* is used to index the observed units.
The log-likelihood function ($\ell$) is then
$$\ell(\rho, \boldsymbol{\Theta}=\boldsymbol{\theta};\mathbf{Z}=\mathbf{z},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[ \mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) \right]$$
The derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), the values of $\boldsymbol{\theta}$ are computed using a consistent estimator^[The value of the nuisance parameter $\boldsymbol{\theta}$ is chosen to be $\Phi^{-1}(n/N)$ where $n$ is the number of values to the left of the cut point ($\theta_i$ value) and $N$ is the number of data points overall. For the weighted cause $n$ is replaced by the sum of the weights to the left of the cut point and $N$ is replaced by the total weight of all units. See the **wCorr Formulas** vignette for a more complete description.] and a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$ and $\boldsymbol{\theta}$ using the `bobyqa` function in the `minqa` package.
## Computation of polychoric correlation
For the polychoric correlation the observed data is expressed in ordinal form for both variables. Here the discretized version of ***X*** is ***P*** and the discretized version of ***Y*** remains ***M***.^[See the "wCorr Formulas" vignette for a more complete description of the polychoric correlations' likelihood function.] The likelihood function for the polychoric is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) = \int_{\theta_{p_i+1}'}^{\theta_{p_i+2}'} \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \mkern-40mu f(x,y|\rho=r) dy dx$$
where $f(x,y|r)$ is the normalized bivariate normal distribution with correlation $\rho$, $\boldsymbol{\theta}$ are the cut points used to discretize ***Y*** into ***M***, and $\boldsymbol{\theta'}$ are the cut points used to discretize ***X*** into ***P***.
The log-likelihood is then
$$\ell(\rho, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ;\mathbf{P}=\mathbf{p}, \mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta},\boldsymbol{\Theta}'= \boldsymbol{\theta}' ; P=p_i, M=m_i \right) \right] $$
The derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), the values of $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ are computed using a consistent estimator and a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$, $\boldsymbol{\theta}$, $\boldsymbol{\theta}'$ using the `bobyqa` function in the `minqa` package.
# Simulation study
To demonstrate the effect of the `ML` and `fast` switches a few simulation studies are performed to compare the similarity of the results when the switch is set to `TRUE` to the result when the switch is set to `FALSE`. This is done first for the `ML` switch and then for the `fast` switch.
Finally, simulations show the implications of these switches on the speed of the computation.
# General procedures of the simulation study of unweighted correlations
A simulation is run several times.^[The exact number is noted for each specific simulation.] For each iteration, the following procedure is used:^[When the exact method of selecting a parameter (such as $n$) is not noted above, it is described as part of each simulation.]
* select a true correlation coefficient $\rho$;
* select the number of observations $n$;
* generate ***X*** and ***Y*** to be bivariate normally distributed using a pseudo-Random Number Generator (RNG);
* using a pseudo-RNG, select the number of bins for ***M*** and ***P*** ($t$ and $t'$) independently from the set \{2, 3, 4, 5\};
* select the bin boundaries for ***M*** and ***P*** ($\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$) by sorting the results of $(t-1)$ and $(t'-1)$ draws, respectively, from a normal distribution using a pseudo-RNG;
* confirm that at least 2 levels of each of ***M*** and ***P*** are occupied (if not, return to the previous step); and
* calculate and record the correlation coefficients.
One of a few possible statistics is then calculated. To compare two levels of a switch the Relative Mean Absolute Deviation is used
$$RMAD= \frac{1}{m} \sum_{j=1}^m | r_{j, \mathtt{TRUE}} - r_{j, \mathtt{FALSE}} | $$
where there are $m$ simulations run, $r_{j, \mathtt{TRUE}}$ and $r_{j, \mathtt{FALSE}}$ are the estimated correlation coefficient for the $j$th simulated dataset when the switch is set to `TRUE` and `FALSE`, respectively. This statistic is called "relative" because it is compared to the other method of computing the statistic, not the true value.
To compare either level to the true correlation coefficient the Root Mean Square Error is used
$$RMSE= \sqrt{ \frac{1}{m} \sum_{j=1}^m (r_j - \rho_j)^2 } $$
where, for the $j$th simulated dataset, $r_j$ is an estimated correlation coefficient and $\rho_j$ is the value used to generate the data (***X***, ***Y***, ***M***, and ***P***).
# ML switch
A simulation was done using the Cartesian product (all possible combinations of) $\mathtt{ML} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. Each iteration is run three times to increase the precision of the simulation. The same values of the variables are used in the computation for `ML=TRUE` as well as for `ML=FALSE`; and then the statistics are compared between the two sets of results (e.g. `ML=TRUE` and `ML=FALSE`).
\newpage
**`r fig_nums("MLRMSE", display="cite")`.** *Root Mean Square Error for `ML=TRUE` and `ML=FALSE`.*
```{r MLRMSEplot, echo=FALSE,fig.width=7, fig.height=5.5}
#ml <- subset(ML, type %in% c("Polychoric", "Polyserial"))
#ml$rmse <- (ml$est - ml$rho)^2
#aggml <- summaryBy(rmse ~ n + rho + type + ML, data=ml, FUN=mean, na.rm=TRUE)
#aggml$rmse.mean <- sqrt(aggml$rmse.mean)
#aggml$ml <- ifelse(aggml$ML==TRUE, "ML=TRUE", "ML=FALSE")
#aggml$nt <- factor(paste("n=",aggml$n))
xyplot(rmse.mean ~ rho|type + nt,
data=aggml,
groups=ml,
scales=list(y=list(log=10, cex=0.7), x = list(cex=0.7)),
type=c("l", "g"),
ylab="RMSE",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
The RMSE for these two options is so similar that the two lines cannot be distinguished for most of the plot. The exact differences are shown for Polychoric in `r table_nums("Polychoric", display="cite")` and for Polyserial in `r table_nums("Polyserial", display="cite")`. The column labeled, "RMSE difference" shows how much larger the RMSE is for `ML=TRUE` than `ML=FALSE`. Because this difference is always positive the RMSE of the `ML=FALSE` option is always lower. Because of this the `ML=TRUE` option is preferable only in situations where there is a reason to prefer MLE for some other reason.
\
**`r table_nums("Polychoric", display="cite")`.** *Relative Mean Absolute Deviation between `ML=TRUE` and `ML=FALSE` for Polychoric.*
```{r ML RMSE table polyc, echo=FALSE}
#ml$i <- rep(1:(nrow(ml)/2),each=2)
#mml <- merge(subset(ml,ML),
# subset(ml,!ML, c("i", "est")),
# by="i",
# suffixes=c(".ml",".nonml"))
#mml$absd <- abs(mml$est.ml - mml$est.nonml)
#aggt1_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, #type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1_0$ML <- NULL
#aggt1 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1$rmse.mean <- sqrt(aggt1$rmse.mean)
mg <- merge(subset(aggt1, ML==TRUE, c("type", "n", "rmse.mean")),
subset(aggt1, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt1_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg1 <- mg
#knitr::asis_output("\\")
```
\
**`r table_nums("Polyserial", display="cite")`.** *Relative Mean Absolute Deviation between `ML=TRUE` and `ML=FALSE` for Polyserial.*
```{r ML RMSE table polys, echo=FALSE}
#aggt2_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2_0$ML <- NULL
#aggt2 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2$rmse.mean <- sqrt(aggt2$rmse.mean)
mg <- merge(subset(aggt2, ML==TRUE, c("n", "type", "rmse.mean")),
subset(aggt2, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt2_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg2 <- mg
```
For the Polychoric, the agreement between these two methods, in terms of MSE is within `r round(mg1[1,5],3)` for $n$ of 10 and decreases to within less than `r formatC(round(mg1[2,5],4), format="f", digits=4)` for $n$ of 100 or more. Given the magnitude of these differences the faster method will be preferable.
The final column in the above tables shows the RMAD which compares how similar the `ML=TRUE` and `ML=FALSE` results are to each other. Because these values are larger than 0, they indicate that there is not complete agreement between the two sets of estimates. If a user considers the MLE to be the correct estimate then they show the deviation of the `ML=FALSE` results from the correct results.
# fast switch
This section examines the agreement between the pure R implementation of the function that calculates the correlation and the `Rcpp` and `RcppArmadillo` implementation, which is expected to be faster. The code can compute with either option by setting `fast=FALSE` (pure R) or `fast=TRUE` (Rcpp).
A simulation was done at each level of the Cartesian product of $\mathtt{fast} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, \newline $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. Each iteration was run 100 times. The same values of the variables are used in the computation for `fast=TRUE` as well as for `fast=FALSE`; and then the statistics are compared between the two sets of results.
The plot below shows all differences between the `fast=TRUE` and `fast=FALSE runs` for the four types of correlations. Note that differences smaller than $10^{-16}$ are indistinguishable from 0 by the machine. Because of this, all values were shown as being at least $10^{-16}$ so that they could all be shown on a log scale.
\
**`r fig_nums("fastMAD", display="cite")`.** *Relative Mean Absolute Differences between `fast=TRUE` and `fast=FALSE`.*
```{r fast MAD plot, echo=FALSE,fig.width=7, fig.height=3.5}
xyplot(absdrho.mean ~ rho|type,
data=aggfast,
groups=n,
type=c("l", "g"),
ylab="RMAD",
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
```
The above shows that differences as a result of the `fast` argument are never expected to be larger than $10^{`r fmax10`}$ for any type or correlation type. The Spearman never shows any difference that is different from zero and the Pearson show differences just larger than the smallest observable difference when using double precision floating point values (about $1 \times 10^{-16}$). This indicates that the computation differences are completely irrelevant for these two types.
For the other two types, it is unclear which one is correct and the agreement that is never more distant than the $10^{`r fmax10`}$ level indicates that any use that requires precision of less than $10^{`r fmax10`}$ can use the `fast=TRUE` argument for faster computation.
# Implications for speed
To show the effect of the `ML` and `fast` switches on computation a simulation was done at each level of the Cartesian product of $\mathtt{ML} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\mathtt{fast} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10^1, 10^{1.25}, 10^{1.5}, ..., 10^7\}$. Each iteration is run 80 times when $n<10^5$ and 20 times when $n\geq 10^5$. The same values of the variables are used in the computations at all four combinations of `ML` and `fast`. A variety of correlations are chosen so that the results represent an average of possible values of $\rho$.
The following plot shows the mean computing time (in seconds) versus $n$.
\
**`r fig_nums("speedi", display="cite")`.** *Computation time comparison.*
```{r plot speed, echo=FALSE,fig.width=7, fig.height=3.5}
# speed$class <- ifelse(speed$ML, "ML=T,", "ML=F,")
# speed$class <- paste0(speed$class, ifelse(speed$fast, "fast=T", "fast=F"))
# speed$t <- pmax(speed$t, 0.001)
# agg <- summaryBy(t ~ n + type + class, data=speed, FUN=mean, na.rm=TRUE)
xyplot(t.mean ~ n|type,
data=subset(aggSpeed, type %in% c("Polyserial", "Polychoric")),
type=c("l", "g"),
ylab="Computing Time",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
xlab="n",
groups=class,
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
```
In all cases setting the `ML` option to `FALSE` and the `fast` option to `TRUE` speeds up--or does not slow down computation. Users wishing for the fastest computation speeds will use `ML=FALSE` and `fast=TRUE`.
For the Polychoric, when $n$ is a million observations ($n=10^7$), the speed of a correlation when `fast=FALSE` is `r round(with(subset(speed, fast==FALSE & n==1e7 & type=="Polychoric"), mean(t)))` seconds and when the `fast=TRUE` it is and `r round(with(subset(speed, fast==TRUE & n==1e7 & type=="Polychoric"), mean(t)))` seconds. When `fast=TRUE`, setting `ML=FALSE` speeds computation by `r round(with(subset(speed, fast==TRUE & ML==TRUE & n==1e7 & type=="Polychoric"), mean(t)) - with(subset(speed, fast==TRUE & ML==FALSE & n==1e7 & type=="Polychoric"), mean(t)))` seconds.
For the Polyserial, when $n$ is a million observations, the speed of a correlation when `ML=TRUE` is `r round(with(subset(speed, ML==TRUE & n==1e7 & type=="Polyserial"), mean(t)))` seconds and when the `ML=FALSE` it is and `r round(with(subset(speed, ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)))` seconds. When `ML=FALSE`, setting `fast=TRUE` speeds computation by `r round(with(subset(speed, fast==FALSE & ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)) - with(subset(speed, fast==TRUE & ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)))` seconds.
#Conclusion
Overall the simulations show that the `ML` option is not more accurate but does add computation burden.
The `fast=TRUE` and `fast=FALSE` option are a `Rcpp` version of the correlation code and an `R` version, respectively and agree with each other--the differences are not expected to be larger than $10^{`r fmax10`}$.
Thus users wishing for fastest computation speeds and accurate results can use `ML=FALSE` and `fast=TRUE`.
|
/scratch/gouwar.j/cran-all/cranData/wCorr/inst/doc/wCorrArguments.Rmd
|
## ----packages and data, echo=FALSE, results="hide", message=FALSE, warning=FALSE----
require(wCorr)
if(!requireNamespace("doBy")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(lattice)
require(doBy)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
## ----tables and figures, echo=FALSE, results="hide", message=FALSE, warning=FALSE----
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
theta <- fig_nums("theta")
biasVsRho <- fig_nums("biasVsRho")
rmseVsRho <- table_nums("rmseVsRho")
rmseVsRho2 <- table_nums("rmseVsN")
speedi <- table_nums("speedi")
rmseVsRho3 <- table_nums("rmseVsRho2")
rmseVsN <- table_nums("rmseVsN2")
## ----theta2,echo=FALSE,results="hide",fig.width=7, fig.height=3---------------
#hi
## ----theta,echo=FALSE,results="hide",fig.width=7, fig.height=3----------------
x <- seq(-3,3,by=0.01)
y <- dnorm(x)
par0 <- par(no.readonly=TRUE)
par(ann=FALSE)
par(mar=c(5,2,1,1)+0.1)
plot(x,y,type="l",xlab="y",ylab="Density", xaxt="n", yaxt="n")
axis(1,at=c(-2,-0.5,1.6), labels=expression(theta[3],theta[4],theta[5]))
text(x=c(-2.5,-1.25,0.55,2.3),y=c(0.05,0.05,0.05,0.08), labels=paste0("m=",1:4))
theta <- c(-2,-0.5,1.6)
for(i in 1:3) {
lines(rep(theta[i],2), c(-1,dnorm(theta[i])))
}
par(ann=TRUE)
par(mgp=c(0.5,0,0))
title(ylab="density")
par(mgp=c(3,1,0))
title(xlab="Y")
par(par0)
## ----biasVersusrho, echo=FALSE,fig.width=7, fig.height=5----------------------
#bias$rmse <- sqrt( (bias$est - bias$rho)^2 )
#bias$bias <- bias$est - bias$rho
#aggbias <- summaryBy(bias + rmse ~ n + rho + type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(bias.mean ~ rho|type,
data=aggbias,
groups=n,
type=c("l","g"),
ylab="Bias",
xlab=expression(rho),
scales=list(x=list(cex=0.7), y=list(cex=0.7)),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----rmseVersusrho, echo=FALSE,fig.width=7, fig.height=3.5--------------------
xyplot(rmse.mean ~ rho|type,
data=aggbias,
groups=n,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
ylab="RMSE",
xlab=expression(rho),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----rmse Versus n, echo=FALSE,fig.width=7, fig.height=3.5--------------------
#aggbias2 <- summaryBy(rmse ~ n+type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(rmse.mean ~ n,
groups=type,
data=aggbias2,
ylab="RMSE",
xlab="n",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----time Versus n, echo=FALSE,fig.width=7, fig.height=4----------------------
# agg <- summaryBy(t ~ n + type, data=ntime, FUN=mean, na.rm=TRUE)
# agg$t.mean <- ifelse(agg$t.mean==0, 0.001,agg$t.mean)
xyplot(t.mean ~ n,
data=aggTime,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
groups=type,
type=c("l","g"),
ylab="Computing time (s)",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----wgt Versus rho plot, echo=FALSE,fig.width=7, fig.height=5.5--------------
# wgt <- wgtvrho
# wgt$absdrho <- abs(wgt$est - wgt$rho)
#
# agg <- summaryBy(absdrho ~ rho + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(absdrho.mean ~ rho|type,
data=aggWgtvrho,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
type=c("l","g"),
ylab="MAD",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
## ----wgt v n plot, echo=FALSE,fig.width=7, fig.height=5.5---------------------
# wgtvn <- wgtvn[wgtvn$type!= "Spearman",]
#
# wgt <- rbind(wgtvn, spear)
# wgt$mserho <- (wgt$est - wgt$rho)^2
#
# agg <- summaryBy(mserho ~ n + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$rmserho <- sqrt(agg$mserho)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(rmserho ~ n|type,
data=aggWgtvn,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
ylab="RMSE",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
|
/scratch/gouwar.j/cran-all/cranData/wCorr/inst/doc/wCorrFormulas.R
|
---
title: "wCorr Formulas"
author: "Paul Bailey, Ahmad Emad, Ting Zhang, Qingshu Xie"
date: '`r Sys.Date()`'
output:
pdf_document: default
vignette: |
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{wCorr Formulas}
\usepackage[utf8]{inputenc}
\usepackage{amssymb}
---
```{r packages and data, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
require(wCorr)
if(!requireNamespace("doBy")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(lattice)
require(doBy)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
```
```{r tables and figures, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
theta <- fig_nums("theta")
biasVsRho <- fig_nums("biasVsRho")
rmseVsRho <- table_nums("rmseVsRho")
rmseVsRho2 <- table_nums("rmseVsN")
speedi <- table_nums("speedi")
rmseVsRho3 <- table_nums("rmseVsRho2")
rmseVsN <- table_nums("rmseVsN2")
```
The wCorr package can be used to calculate Pearson, Spearman, polyserial, and polychoric correlations, in weighted or unweighted form.^[The estimation procedure used by the wCorr package for the polyserial is based on the likelihood function in Cox, N. R. (1974), "Estimation of the Correlation between a Continuous and a Discrete Variable." *Biometrics*, **30** (1), pp 171-178. The likelihood function for polychoric is from Olsson, U. (1979) "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient." *Psyhometrika*, **44** (4), pp 443-460. The likelihood used for Pearson and Spearman is written down in many places. One is the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.] The package implements the tetrachoric correlation as a specific case of the polychoric correlation and biserial correlation as a specific case of the polyserial correlation. When weights are used, the correlation coefficients are calculated with so called sample weights or inverse probability weights.^[Sample weights are comparable to `pweight` in Stata.]
This vignette introduces the methodology used in the wCorr package for computing the Pearson, Spearman, polyserial, and polychoric correlations, with and without weights applied. For the polyserial and polychoric correlations, the coefficient is estimated using a numerical likelihood maximization.
The weighted (and unweighted) likelihood functions are presented. Then simulation evidence is presented to show correctness of the methods, including an examination of the bias and consistency. This is done separately for unweighted and weighted correlations.
Numerical simulations are used to show:
* The bias of the methods as a function of the true correlation coefficient ($\rho$) and the number of observations ($n$) in the unweighted and weighted cases; and
* The accuracy [measured with root mean squared error (RMSE) and mean absolute deviation (MAD)] of the methods as a function of $\rho$ and $n$ in the unweighted and weighed cases.
Note that here *bias* is used for the mean difference between true correlation and estimated correlation.
The *wCorr Arguments* vignette describes the effects the `ML` and `fast` arguments have on computation and gives examples of calls to wCorr.
# Specification of estimation formulas
Here we focus on specification of the correlation coefficients between two vectors of random variables that are jointly bivariate normal. We call the two vectors ***X*** and ***Y***. The $i^{th}$ members of the vectors are then called $x_i$ and $y_i$.
## Formulas for Pearson correlations with and without weights
The weighted Pearson correlation is computed using the formula
$$r_{Pearson}=\frac{\sum_{i=1}^n \left[ w_i (x_i-\bar{x})(y_i-\bar{y}) \right]}{\sqrt{\sum_{i=1}^n \left( w_i (x_i-\bar{x})^2 \right)\sum_{i=1}^n \left( w_i (y_i-\bar{y})^2 \right) }} $$
where $w_i$ is the weights, $\bar{x}$ is the weighted mean of the ***X*** variable ($\bar{x}=\frac{1}{\sum_{i=1}^n w_i}\sum_{i=1}^n w_i x_i$), $\bar{y}$ is the weighted mean of the ***Y*** variable ($\bar{y}=\frac{1}{\sum_{i=1}^n w_i}\sum_{i=1}^n w_i y_i$), and $n$ is the number of elements in ***X*** and ***Y***.^[See the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.]
The unweighted Pearson correlation is calculated by setting all of the weights to one.
## Formulas for Spearman correlations with and without weights
For the Spearman correlation coefficient the unweighted coefficient is calculated by ranking the data and then using those ranks to calculate the Pearson correlation coefficient--so the ranks stand in for the ***X*** and ***Y*** data. Again, similar to the Pearson, for the unweighted case the weights are all set to one.
For the unweighted case the highest rank receives a value of 1 and the second highest 2, and so on down to the $n$th value. In addition, when data are ranked, ties must be handled in some way. The chosen method is to use the average of all tied ranks. For example, if the second and third rank units are tied then both units would receive a rank of 2.5 (the average of 2 and 3).
For the weighted case there is no commonly accepted weighted Spearman correlation coefficient. Stata does not estimate a weighted Spearman and SAS does not document their methodology in either of the corr or freq procedures.
The weighted case presents two issues. First, the ranks must be calculated. Second, the correlation coefficient must be calculated.
Calculating the weighted rank for an individual level is done via two terms. For the $j$th element the rank is
$$rank_j = a_j + b_j$$
The first term $a_j$ is the sum of all weights ***W*** less than or equal to this value of the outcome being ranked ($\xi_j$)
$$a_j = \sum_{i=1}^n w_i \mathbf{1}\left( \xi_i < \xi_j \right)$$
where $\mathbf{1}(\cdot)$ is the indicator function that is one when the condition is true and 0 when the condition is false, $w_i$ is the $i$th weight and $\xi_i$ and $\xi_j$ are the $i$th and $j$th value of the vector being ranked, respectively.
The term $b_j$ then deals with ties. When there are ties each unit receives the mean rank for all of the tied units. When the weights are all one and there are $n$ tied units the vector of tied ranks would be $\mathbf{v}=\left(a_j+1, a_j+2, \dots, a_j+n \right)$. The mean of this vector (here called $rank^1$ to indicate it is a specific case of $rank$ when the weights are all one) is then
$$rank_j^1=\frac{1}{n} \sum_{i=1}^n \left(a_j + i \right)$$
$$=\frac{1}{n} \left( n a_j + \frac{n(n+1)}{2} \right)$$
$$=a_j + \frac{n+1}{2}$$
thus
$$b_j^1=\frac{n+1}{2}$$
where the superscript one is again used to indicate that this is only for the unweighted case where all weights are set to one.
For the weighted case this could be $\mathbf{v}=\left(a_j+w_1', a_j+w_1'+w_2', \dots, a_j+\sum_{k=1}^n w_k' \right)^T$ where ***W'*** is a vector containing the weights of the tied units. It is readily apparent that the mean of this vector value will depend on the ordering of the weights. To avoid this, the overall mean of all possible permutations of the weights is calculated. The following formula does just that
$$b_j = \frac{n+1}{2}\bar{w}_j$$
where $\bar{w}_j$ is the mean weight of all of the tied units. It is easy to see that when the weights are all one $\bar{w}_j=1$ and $b_j = b_j^1$. The latter (more general) formula is used for all cases.
After the ***X*** and ***Y*** vectors are ranked they are plugged into the weighted Pearson correlation coefficient formula shown earlier.
##Formulas for polyserial correlation with and without weights
For the polyserial correlation, it is again assumed that there are two continuous variables ***X*** and ***Y*** that have a bivariate normal distribution.^[For a more complete treatment of the polyserial correlation, see Cox, N. R., "Estimation of the Correlation between a Continuous and a Discrete Variable" *Biometrics*, **50** (March), 171-187, 1974.]
$$\begin{pmatrix} X \\ Y \end{pmatrix} \sim N \left[ \begin{pmatrix} \mu_x \\ \mu_y \end{pmatrix}, \boldsymbol{\Sigma} \right]$$
where $N(\mathbf{A},\boldsymbol{\Sigma})$ is a bivariate normal distribution with mean vector ***A*** and covariance matrix $\boldsymbol{\Sigma}$. For the polyserial correlation, ***Y*** is discretized into the random variable ***M*** according to
$$m_i= \begin{cases} 1 \quad \mathrm{if} \theta_2 < y_i < \theta_3 \\ 2 \quad \mathrm{if} \theta_3 < y_i < \theta_4 \\ \vdots \\ t \quad \mathrm{if} \theta_{t+1} < y_i < \theta_{t+2} \end{cases}$$
where $\theta$ indicates the cut points used to discretize ***Y*** into ***M***, and $t$ is the number of bins. For notational convenience, $\theta_2 \equiv -\infty$ and $\theta_{t+2} \equiv \infty$.^[The indexing is somewhat odd to be consistent with Cox (1974). Nevertheless, this treatment does not use the Cox definition of $\theta_0$, $\theta_1$ or $\theta_2$ which are either not estimated (as is the case for $\theta_0$, and $\theta_1$) or are reappropriated (as is the case for $\theta_2$). Cox calls the correlation coefficient $\theta_2$ while this document uses $\rho$ and uses $\theta_2$ to store $-\infty$ as a convenience so that the vector $\boldsymbol{\theta}$ includes the (infinite) bounds as well as the interior points.]
To give a concrete example, the following figure shows the density of ***Y*** when the cuts points are, for this example, $\theta=\left(-\infty,-2,-0.5,1.6,\infty\right)$. In this example, any value of $-2 < y_i < -0.5$ would have $m_i=2$.
**`r fig_nums("theta", display="cite")`.** *Density of Y for cutpoints $\theta = (-\infty, -2, -0.5, 1.6, \infty$).*
```{r theta2,echo=FALSE,results="hide",fig.width=7, fig.height=3}
#hi
```
hi
**`r fig_nums("theta", display="cite")`.** *Density of Y for cutpoints $\theta = (-\infty, -2, -0.5, 1.6, \infty$).*
```{r theta,echo=FALSE,results="hide",fig.width=7, fig.height=3}
x <- seq(-3,3,by=0.01)
y <- dnorm(x)
par0 <- par(no.readonly=TRUE)
par(ann=FALSE)
par(mar=c(5,2,1,1)+0.1)
plot(x,y,type="l",xlab="y",ylab="Density", xaxt="n", yaxt="n")
axis(1,at=c(-2,-0.5,1.6), labels=expression(theta[3],theta[4],theta[5]))
text(x=c(-2.5,-1.25,0.55,2.3),y=c(0.05,0.05,0.05,0.08), labels=paste0("m=",1:4))
theta <- c(-2,-0.5,1.6)
for(i in 1:3) {
lines(rep(theta[i],2), c(-1,dnorm(theta[i])))
}
par(ann=TRUE)
par(mgp=c(0.5,0,0))
title(ylab="density")
par(mgp=c(3,1,0))
title(xlab="Y")
par(par0)
```
Notice that $\mu_y$ is not identified (or is irrelevant) because, for any $a \in \mathbb{R}$, setting $\tilde{\mu}_y = \mu_y + a$ and $\tilde{\boldsymbol{\theta}}=\boldsymbol{\theta} + a$ lead to exactly the same values of $\mathbf{M}$ and so one of the two must be arbitrarily assigned. A convenient decision is to decide $\mu_y \equiv 0$. A similar argument holds for $\sigma_y$ so that $\sigma_y \equiv 1$.
For ***X***, Cox (1974) observes that the MLE mean and standard deviation of ***X*** are simply the average and (population) standard deviation of the data and do not depend on the other parameters.^[The population standard deviation is used because it is the MLE for the standard deviation. Notice that, while the sample variance is an unbiased estimator of the variance and the population variance is not an unbaised estimator of the variance, they are very similar and the variance is also a nuisance parameter, not a parameter of interest when finding the correlation.] This can be taken advantage of by defining $z$ to be the standardized score of $x$ so that $z \equiv \frac{x- \bar{x}}{ \hat\sigma_x}$.
Combining these simplifications, the probability of any given $x_i$, $m_i$ pair is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \!\!\!\!\!\!\!\!\!\!\!\! f(y|Z=z_i,\rho=r)dy$$
where $\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}; Z=z_i, M=m_i \right)$ is the probability of the event $\rho=r$ and the cuts points are $\boldsymbol{\theta}$, given the $i$th data point $z_i$ and $m_i$; $\phi(\cdot)$ is the standard normal; and $f(Y|Z,\rho)$ is the distribution of ***Y*** conditional on ***Z*** and $\rho$. Because ***Y*** and ***Z*** are jointly normally distributed (by assumption)
$$f(Y|Z=z_i,\rho=r) =N\left(\mu_y + \frac{\sigma_y}{\sigma_z}r(z_i-\mu_z), (1-r^2){\sigma_y}^2 \right)$$
because both ***Z*** and ***Y*** are standard normals
$$f(y|Z=z_i,\rho=r) =N\left(r \cdot z_i, (1-r^2) \right)$$
Noticing that $\frac{y-r\cdot z}{\sqrt{1-r^2}}$ has a standard normal distribution
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \left[ \Phi\left( \frac{\theta_{m_i+2} - r \cdot z_i}{\sqrt{1-r^2}} \right) - \Phi \left( \frac{\theta_{m_i+1} - r \cdot z_i}{\sqrt{1-r^2}} \right) \right]$$
where $\Phi(\cdot)$ is the standard normal cumulative density function. Using the above probability function as an objective, the log-likelihood is then maximized.
$$\ell(\rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta};\mathbf{Z}=\mathbf{z},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[ \mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) \right]$$
where $w_i$ is the weight of the $i^{th}$ members of the vectors ***Z*** and ***Y***. For the unweighted case, all of the weights are set to one.
The value of the nuisance parameter $\boldsymbol{\theta}$ is chosen to be
$$\hat{\theta}_{j+2} = \Phi^{-1}(n/N)$$
where $n$ is the number of values to the left of the $j$th cut point ($\theta_{j+2}$ value) and $N$ is the number of data points overall. Here two is added to $j$ to make the indexing of $\theta$ agree with Cox (1974) as noted before. For the weighted cause $n$ is replaced by the sum of the weights to the left of the $j$th cut point and $N$ is replaced by the total weight of all units
$$\hat{\theta}_{j+2} = \Phi^{-1}\left( \frac{\sum_{i=1}^N w_i \mathbf{1}(m_i < j) }{\sum_{i=1}^N w_i} \right)$$
where $\mathbf{1}$ is the indicator function that is 1 when the condition is true and 0 otherwise.
###Computation of polyserial correlation
For the polyserial, derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package and the values of $\boldsymbol{\theta}$ from the previous paragraph. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$ and $\boldsymbol{\theta}$ using the `bobyqa` function in the ``minqa`` package. See the *wCorr Arguments* vignette for a comparison of these two methods.
Because the numerical optimization is not perfect when the correlation is in a boundary condition ($\rho \in \{-1,1\}$), a check for perfect correlation is performed before the above optimization by simply examining if the values of ***X*** and ***M*** have agreeing order (or opposite but agreeing order) and then the MLE correlation of 1 (or -1) is returned.
##Methodology for polychoric correlation with and without weights
Similar to the polyserial correlation, the polychoric correlation is a simple case of two continuous variables ***X*** and ***Y*** that have a bivariate normal distribution. In the case of the polyserial correlation the continuous (latent) variable ***Y*** was observed as a discretized variable ***M***. For the polychoric correlation, this is again true but now the continuous (latent) variable ***X*** is observed as a discrete variable ***P*** according to
$$p_i= \begin{cases} 1 \quad \mathrm{if} \theta'_2 < x_i < \theta'_3 \\ 2 \quad \mathrm{if} \theta'_3 < x_i < \theta'_4 \\ \vdots \\ t \quad \mathrm{if} \theta'_{t'+1} < x_i < \theta'_{t'+2} \end{cases}$$
where $\boldsymbol{\theta}$ remains the cut points for the distibution defining the transformation of ***Y*** to ***M***, $\boldsymbol{\theta}'$ is the cut points for the transformation from ***X*** to ***P***, and $t'$ is the number of bins for ***P***. Similar to $\boldsymbol{\theta}$, $\boldsymbol{\theta}'$ has $\theta'_2 \equiv -\infty$ and $\theta'_{t'+2} \equiv \infty$.
As in the polyserial correlation, $\mu_y$ is not identified (or is irrelevant) because, for any $a \in \mathbb{R}$, setting $\tilde{\mu}_y = \mu_y + a$ and $\tilde{\boldsymbol{\theta}}=\boldsymbol{\theta} + a$ lead to exactly the same values of $\mathbf{M}$ and so one of the two must be arbitrarily assigned. The same is true for $\mu_x$. A convenient decision is to decide $\mu_y = \mu_x \equiv 0$. A similar argument holds for $\sigma_y$ and $\sigma_x$ so that $\sigma_y = \sigma_x \equiv 1$
Then the probability of any given $m_i$, $p_i$ pair is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) = \int_{\theta'_{p_i+1}}^{\theta'_{p_i+2}} \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \!\!\!\!\!\!\!\!\!\!\!\! f(x,y|\rho=r)dydx$$
where $\rho$ is the correlation coefficient.
Using this function as an objective, the log-likelihood is then maximized.
$$\ell(\rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}';\mathbf{P}=\mathbf{p},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) \right] $$
This is the weighted log-likelihood function. For the unweighted case all of the weights are set to one.
###Computation of polychoric correlation
This again mirrors the treatment of the polyserial. The derivatives of $\ell$ for the polychoric can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), a one dimensional optimization of $\rho$ is calculated using the `optimize` function from the `stats` package and values of $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ are computed using the last equation in the section titled, "Formulas for polyserial correlation with and without weights". When the `ML` argument is set to `TRUE` a multi-dimensional optimization is done for $\rho$, $\boldsymbol{\theta}$, and $\boldsymbol{\theta}'$ using the `bobyqa` function in the `minqa` package. See the *wCorr Arguments* vignette for a comparison of these two methods.
Because the optimization is not perfect when the correlation is in a boundary condition ($\rho \in \{-1,1\}$), a check for perfect correlation is performed before the above optimization by simply examining if the values of ***P*** and ***M*** have a Goodman-Kruskal correlation coefficient of -1 or 1. When this is the case, the MLE of -1 or 1, respectively, is returned.
#Simulation evidence on the correctness of the estimating methods
It is easy to prove the consistency of the $\boldsymbol{\theta}$ for the polyserial correlation and $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ for the polychoric correlation using the non-ML case. Similarly, for $\rho$, because it is an MLE that can be obtained by taking a derivative and setting it equal to zero, the results are asymptotically unbiased and obtain the Cramer-Rao lower bound.
This does not speak to the small sample properties of these correlation coefficients. Previous work has described their properties by simulation; and that tradition is continued below.^[See, for example, the introduction to Rigdon, E. E. and Ferguson C. E., "The Performance of the Polychoric Correlation Coefficient and Selected Fitting Functions in Confirmatory Factor Analysis With Ordinal Data" *Journal of Marketing Research* **28** (4), pp. 491-497.]
## Simulation study of unweighted correlations
In what follows, when the exact method of selecting a parameter (such as $n$) is not noted in the above descriptions it is described as part of each simulation.
Across a number of iterations (the exact number of times will be stated for each simulation), the following procedure is used:
* select a true Pearson correlation coefficient $\rho$;
* select the number of observations $n$;
* generate ***X*** and ***Y*** to be bivariate normally distributed using a pseudo-Random Number Generator (RNG);
* using a pseudo-RNG, select the number of bins for ***M*** and ***P*** ($t$ and $t'$) independantly from the set \{2, 3, 4, 5\};^[This means that the simulation uses discrete ordinal variables (***M*** and ***P***) that have 2, 3, 4, or 5 discrete levels. Note that the number of levels in ***M*** and ***P*** are chosen independently so that one could be 2 while the other is 5 (or any other possible combination).]
* select the bin boundaries for ***M*** and ***P*** ($\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$) by sorting the results of $(t-1)$ and $(t'-1)$ draws, respectively, from a normal distribution using a pseudo-RNG;
* confirm that at least 2 levels of each of ***M*** and ***P*** are occupied (if not, return to previous step); and
* calculate and record relevant statistics.
## Bias, and RMSE of the unweighted correlations
This sections shows the bias of the correlations as a function of the true correlation coefficient, $\rho$. To that end, a simulation was done at each level of the cartesian product of $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. For precision, each level of $\rho$ and $n$ was run fifty times. The bias is the mean difference between the true correlation coefficient ($\rho_i$) and estimate correlation coefficient ($r_i$). The RMSE is the square root of the mean squared error.
$$\mathrm{RMSE}= \sqrt{ \frac{1}{n} \sum_{i=1}^n \left( r_i - \rho_i \right)^2 }$$
And the bias is given by
$$bias=\frac{1}{n}\sum_{i=1}^n \left(r_i - \rho_i \right)$$
`r fig_nums("biasVsRho", display="cite")` shows the bias as a function of the true correlation $\rho$. Only the polyserial shows no bias at any level of $n$, shown by no clear deviation from 0 at any level of $\rho$. For the Pearson correlation there is bias when $n=10$ that is not present when $n=100$ or 1,000. This is a well known property of the estimator.^[see, for example, Olkin I. and Pratt, J. W. (1958), Unbiased Estimation of Certain Correlation Coefficients. *Annals of Mathematical Statistics*, *29* (1), 201--211.] Similarly, the polychoric shows bias when $n=10$.
The Spearman correlation shows bias at all of the tested levels of $n$. The bias is zero when the true correlation is 1, 0, or -1; is positive when $\rho$ is below 0 (negative correlation); and is negative when $\rho$ is above 0 (positive correlation). In this section, the Spearman correlation coefficient is compared with the true Pearson correlation coefficient. When this is done, the bias is expected because the Spearman correlation is not intended to recover a Pearson type correlation coefficient; it is designed to measure a separate quantity.
**`r fig_nums("biasVsRho", display="cite")`.** *Bias Versus $\rho$ for Unweighted Correlations.*
```{r biasVersusrho, echo=FALSE,fig.width=7, fig.height=5}
#bias$rmse <- sqrt( (bias$est - bias$rho)^2 )
#bias$bias <- bias$est - bias$rho
#aggbias <- summaryBy(bias + rmse ~ n + rho + type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(bias.mean ~ rho|type,
data=aggbias,
groups=n,
type=c("l","g"),
ylab="Bias",
xlab=expression(rho),
scales=list(x=list(cex=0.7), y=list(cex=0.7)),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
\newpage
`r fig_nums("rmseVsRho", display="cite")` shows the RMSE as a function of $\rho$. All of the correlation coefficients have a uniform RMSE as a function of $\rho$ near $\rho=0$ that decreases near $|\rho|=1$. All plots also show a decrease in RMSE as $n$ increases. This plot shows that there is no appreciable RMSE differences as a functions of $\rho$. In addition, it show that our attention to the MLE correlation of -1 or 1 at edge cases did not make the RMSE much worse in the neighborhood of the edges ($|\rho| \sim 1$).
**`r fig_nums("rmseVsRho", display="cite")`.** *Root Mean Square Error Versus $\rho$ for Unweighted Correlations.*
```{r rmseVersusrho, echo=FALSE,fig.width=7, fig.height=3.5}
xyplot(rmse.mean ~ rho|type,
data=aggbias,
groups=n,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
ylab="RMSE",
xlab=expression(rho),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#### Consistency of the correlations
`r fig_nums("rmseVsN", display="cite")` shows the RMSE as a function of $n$. The purpose of this plot is not to show an individual value but to show that the estimator is consistent. The plot shows a slope of about $-\frac{1}{2}$ for the Pearson, polychoric, and polyserial correlations. This is consistent with the expected first order convergence for each correlation coefficient under the assumptions of this simulation. Results for the Spearman also show approximate first order convergence but the slope increases slightly as $n$ increases. Again, the Spearman is not estimating the same quantity as the Pearson and so is expected to diverge.
The plot also shows that the RMSE is less than 0.1 for all methods when $n>100$.
**`r fig_nums("rmseVsN", display="cite")`.** *Root Mean Square Error Versus sample size for Unweighted Correlations.*
```{r rmse Versus n, echo=FALSE,fig.width=7, fig.height=3.5}
#aggbias2 <- summaryBy(rmse ~ n+type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(rmse.mean ~ n,
groups=type,
data=aggbias2,
ylab="RMSE",
xlab="n",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#### Computing Time
`r fig_nums("speedi", display="cite")` shows the mean time (in seconds) to compute a single correlation coefficient as a function of $\rho$ by $n$ size. The plot shows linearly rising computation times with slopes of about one. This is consistent with a linear computation cost. Using Big O notation, the computation cost is, in the range shown, O($n$). The slope of the Spearman is slightly faster and the algorithm has a O($n \mathrm{lg}(n)$) sort involved, so this is, again, expected.
**`r fig_nums("speedi", display="cite")`.** *Computation time.*
```{r time Versus n, echo=FALSE,fig.width=7, fig.height=4}
# agg <- summaryBy(t ~ n + type, data=ntime, FUN=mean, na.rm=TRUE)
# agg$t.mean <- ifelse(agg$t.mean==0, 0.001,agg$t.mean)
xyplot(t.mean ~ n,
data=aggTime,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
groups=type,
type=c("l","g"),
ylab="Computing time (s)",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
## Simulation study of weighted correlations
When complex sampling (other than simple random sampling with replacement) is used, unweighted correlations may or may not be consistent. In this section the consistency of the weighted coefficients is examined.
When generating simulated data, decisions about the generating functions have to be made. These decisions affect how the results are interpreted. For the weighted case, if these decisions lead to something about the higher weight cases being different from the lower weight cases then the test will be more informative about the role of weights. Thus, while it is not reasonable to always assume that there is a difference between the high and low weight cases, the assumption (used in the simulations below) that there is an association between weights and the correlation coefficients serves as a more robust test of the methods in this package.
##Results of weighted correlation simulations
Simulations are carried out in the same fashion as previously described but include a few extra steps to accommodate weights. The following changes were made:
* Weights are assigned according to $w_i = (x-y)^2 + 1$, and the probability of inclusion in the sample was then $Pr_i = \frac{1}{w_i}$.
* For each unit, a uniformly distributed random number was drawn. When that value was less than the probability of inclusion ($Pr_i$), the unit was included.
Units were generated until $n$ units were in the sample.
Two simulations were run. The first shows the mean absolute deviation (MAD)
$$MAD=\frac{1}{n}\sum_{i=1}^n |r_i - \rho_i|$$
as a function of $\rho$ and was run for $n=100$ and $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, with 100 iterations run for each value of $\rho$.
The following plot shows the MAD for the weighted and unweighted results as a function of $\rho$ when $n=100$. This shows that for values of $\rho$ near zero, under our simulation assumptions (for all but the Spearman correlation) the weighted correlation performs better than (that is, has lower MAD than) the unweighted correlation for all correlation coefficients. Over the entire range, the difference between the two is never such that the unweighted has a lower MAD. Thus, under the simulated conditions at least, the weighted correlation has lower or approximately equal MAD for every value of the true correlation coefficient ($\rho$).
**`r fig_nums("rmseVsRho2", display="cite")`.** *Mean Absolute Deviation Versus $\rho$ (Weighted).*
```{r wgt Versus rho plot, echo=FALSE,fig.width=7, fig.height=5.5}
# wgt <- wgtvrho
# wgt$absdrho <- abs(wgt$est - wgt$rho)
#
# agg <- summaryBy(absdrho ~ rho + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(absdrho.mean ~ rho|type,
data=aggWgtvrho,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
type=c("l","g"),
ylab="MAD",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
The second simulation (shown in `r fig_nums("rmseVsN2", display="cite")`) used the same values of $\rho$ and used $n \in \{10, 100, 1000, 10000 \}$ and shows how RMSE and sample size are related. In particular, it shows first-order convergence of the weighted Pearson, polyserial, and polychoric correlation coefficient.
For the previous plots the calculated Spearman correlation coefficient was compared to the generating Pearson correlation coefficient. For this plot only, the Spearman correlation coefficient to the true Spearman correlation coefficient. This is because the Spearman coefficient is not attempting to estimate the Pearson correlation. To do this the simulation is modified slightly. A population of data is generated and the true Spearman correlation coefficient then is calculated as the population coefficient.^[The R `stats` package `cor` function is used to calculate the population Spearman correlation coefficient; this results in an unweighted coefficient, which is appropriate for the population parameter.] Then, a sample from the population with varying probability as described in the weighted simulation section is used to calculate sample Spearman correlation coefficient. Then the root mean squared difference between the sample and population coefficients are calculated as with the Pearson--except that the population Spearman correlation coefficient is used in place of the Pearson correlation coefficient ($\rho$).
Thus, the results in `r fig_nums("rmseVsN2", display="cite")` show that, when compared to the true Spearman correlation coefficient, the weighted Spearman correlation coefficient is consistent.
In all cases the RMSE is lower for the weighted than the unweighted. Again, the fact that the simulations show that the unweighted correlation coefficient is not consistent does not imply that it will always be that way--only that this is possible for these coefficients to not be consistent.
**`r fig_nums("rmseVsN2", display="cite")`.** *Root Mean Square Error Versus $\rho$ (Polyserial, Pearson, Polychoric panels) or Population Spearman correlation coefficient (Spearman panel) for Weighted Correlations*
```{r wgt v n plot, echo=FALSE,fig.width=7, fig.height=5.5}
# wgtvn <- wgtvn[wgtvn$type!= "Spearman",]
#
# wgt <- rbind(wgtvn, spear)
# wgt$mserho <- (wgt$est - wgt$rho)^2
#
# agg <- summaryBy(mserho ~ n + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$rmserho <- sqrt(agg$mserho)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(rmserho ~ n|type,
data=aggWgtvn,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
ylab="RMSE",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#Conclusion
Overall the simulations show first order convergence for each unweighted correlation coefficient with an approximately linear computation cost. Further, under our simulation assumptions, the weighted correlation performs better than (has lower MAD or RMSE than) the unweighted correlation for all correlation coefficients.
We show the first order convergence of the weighted Pearson, polyserial, and polychoric correlation coefficient. The Spearman is shown to not consistently estimate the population Pearson correlation coefficient but is shown to consistently estimate the population Spearman correlation coefficient--under the assumptions of our simulation.
# Appendix Proof of consistency of Horvitz-Thompson (HT) estimator of a mean
An HT estimator of a sum takes the form
$$\hat{Y} = \sum_{i=1}^n \frac{1}{\pi_i} y_i$$
where there are $n$ sampled units from a population of $N$ units, each unit has a value $y\in R$, each unit is sampled with probability $\pi_i$, and $\hat{Y}$ is the estimated of $Y$ in a population. Here there is no assumed covariance between sampling of unit $i$ and unit $j$, and the inverse probability is also the unit's weight $w_i$, so that an alternative specification of (1) is
$$\hat{Y} = \sum_{i=1}^n w_i y_i \ .$$
|
/scratch/gouwar.j/cran-all/cranData/wCorr/inst/doc/wCorrFormulas.Rmd
|
---
title: "wCorr Arguments"
author: "Paul Bailey, Ahmad Emad, Ting Zhang, Qingshu Xie"
date: '`r Sys.Date()`'
output: pdf_document
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{wCorr Arguments}
\usepackage[utf8]{inputenc}
---
```{r packages and data, echo=FALSE, results="hide", message=FALSE,warning=FALSE}
if(!requireNamespace("knitr")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(knitr)
require(wCorr)
require(lattice)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
```
```{r setup fast, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
# fast$i <- rep(1:(nrow(fast)/2),each=2)
# mfast <- merge(subset(fast,fast),
# subset(fast,!fast, c("i", "est")),
# by="i",
# suffixes=c(".fast",".slow"))
# mfast$fast <- NULL
# mfast$absdrho <- pmax(abs(mfast$est.fast - mfast$est.slow), 1E-16)
# aggfast <- summaryBy(absdrho ~ n + rho + type, data=mfast, FUN=mean, na.rm=TRUE)
fmax <- max(aggfast$absdrho.mean)
fmax10 <- ceiling(log10(fmax))
```
```{r tables and figures, echo=FALSE, results="hide", message=FALSE,warning=FALSE}
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
MLRMSE <- fig_nums("MLRMSE")
Polychoric <- table_nums("Polychoric")
Polyserial <- table_nums("Polyserial")
fastMAD <- table_nums("fastMAD")
speedi <- table_nums("speedi")
```
The wCorr package can be used to calculate Pearson, Spearman, polyserial, and polychoric correlations, in weighted or unweighted form.^[The estimation procedure used by the wCorr package for the polyserial is based on the likelihood function in by Cox, N. R. (1974), "Estimation of the Correlation between a Continuous and a Discrete Variable." *Biometrics*, **30** (1), pp 171-178. The likelihood function for polychoric is from Olsson, U. (1979) "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient." *Psyhometrika*, **44** (4), pp 443-460. The likelihood used for Pearson and Spearman is written down many places. One is the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.] The package implements the tetrachoric correlation as a specific case of the polychoric correlation and biserial correlation as a specific case of the polyserial correlation. When weights are used, the correlation coefficients are calculated with so called sample weights or inverse probability weights.^[Sample weights are comparable to `pweight` in Stata.]
This vignette describes the use of applying two Boolean switches in the wCorr package. It describes the implications and uses simulation to show the impact of these switches on resulting correlation estimates.
First, the Maximum Likelihood, or `ML` switch uses the Maximum Likelihood Estimator (MLE) when `ML=TRUE` or uses a consistent but non-MLE estimator for the nuisance parameters when `ML=FALSE`. The simulations show that using `ML=FALSE` is preferable because it speeds computation and decreases the root mean square error (RMSE) of the estimator.
Second the `fast` argument gives the option to use a pure R implementation (`fast=FALSE`) or an implementation that relies on the `Rcpp` and `RcppArmadillo` packages (`fast=TRUE`). The simulations show agreement to within $10^{`r fmax10`}$, showing the implementations agree. At the same time the `fast=TRUE` option is always as fast or faster.
In addition to this vignette, the *wCorr Formulas* vignette describes the statistical properties of the correlation estimators in the package and has a more complete derivation of the likelihood functions.
# The `ML` switch
The wCorr package computes correlation coefficients between two vectors of random variables that are jointly bivariate normal. We call the two vectors ***X*** and ***Y***.
$$\begin{pmatrix} X \\ Y \end{pmatrix} \sim N \left[ \begin{pmatrix} \mu_x \\ \mu_y \end{pmatrix}, \boldsymbol{\Sigma} \right] $$
where $N(\mathbf{A},\boldsymbol{\Sigma})$ is the bivariate normal distribution with mean ***A*** and covariance $\boldsymbol{\Sigma}$.
## Computation of polyserial correlation
The likelihood function for an individual observation of the polyserial correlation is^[See the *wCorr Formulas* vignette for a more complete description of the polyserial correlations' likelihood function.]
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \left[ \Phi\left( \frac{\theta_{m_i+2} - r \cdot z_i}{\sqrt{1-r^2}} \right) - \Phi \left( \frac{\theta_{m_i+1} - r \cdot z_i}{\sqrt{1-r^2}} \right) \right]$$
where $\rho$ is the correlation between ***X*** and ***Y***, ***Z*** is the normalized version of ***X***, and ***M*** is a discretized version of ***Y***, using $\boldsymbol{\theta}$ as cut points as described in the *wCorr Formulas* vignette. Here an *i* is used to index the observed units.
The log-likelihood function ($\ell$) is then
$$\ell(\rho, \boldsymbol{\Theta}=\boldsymbol{\theta};\mathbf{Z}=\mathbf{z},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[ \mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) \right]$$
The derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), the values of $\boldsymbol{\theta}$ are computed using a consistent estimator^[The value of the nuisance parameter $\boldsymbol{\theta}$ is chosen to be $\Phi^{-1}(n/N)$ where $n$ is the number of values to the left of the cut point ($\theta_i$ value) and $N$ is the number of data points overall. For the weighted cause $n$ is replaced by the sum of the weights to the left of the cut point and $N$ is replaced by the total weight of all units. See the **wCorr Formulas** vignette for a more complete description.] and a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$ and $\boldsymbol{\theta}$ using the `bobyqa` function in the `minqa` package.
## Computation of polychoric correlation
For the polychoric correlation the observed data is expressed in ordinal form for both variables. Here the discretized version of ***X*** is ***P*** and the discretized version of ***Y*** remains ***M***.^[See the "wCorr Formulas" vignette for a more complete description of the polychoric correlations' likelihood function.] The likelihood function for the polychoric is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) = \int_{\theta_{p_i+1}'}^{\theta_{p_i+2}'} \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \mkern-40mu f(x,y|\rho=r) dy dx$$
where $f(x,y|r)$ is the normalized bivariate normal distribution with correlation $\rho$, $\boldsymbol{\theta}$ are the cut points used to discretize ***Y*** into ***M***, and $\boldsymbol{\theta'}$ are the cut points used to discretize ***X*** into ***P***.
The log-likelihood is then
$$\ell(\rho, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ;\mathbf{P}=\mathbf{p}, \mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta},\boldsymbol{\Theta}'= \boldsymbol{\theta}' ; P=p_i, M=m_i \right) \right] $$
The derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), the values of $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ are computed using a consistent estimator and a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$, $\boldsymbol{\theta}$, $\boldsymbol{\theta}'$ using the `bobyqa` function in the `minqa` package.
# Simulation study
To demonstrate the effect of the `ML` and `fast` switches a few simulation studies are performed to compare the similarity of the results when the switch is set to `TRUE` to the result when the switch is set to `FALSE`. This is done first for the `ML` switch and then for the `fast` switch.
Finally, simulations show the implications of these switches on the speed of the computation.
# General procedures of the simulation study of unweighted correlations
A simulation is run several times.^[The exact number is noted for each specific simulation.] For each iteration, the following procedure is used:^[When the exact method of selecting a parameter (such as $n$) is not noted above, it is described as part of each simulation.]
* select a true correlation coefficient $\rho$;
* select the number of observations $n$;
* generate ***X*** and ***Y*** to be bivariate normally distributed using a pseudo-Random Number Generator (RNG);
* using a pseudo-RNG, select the number of bins for ***M*** and ***P*** ($t$ and $t'$) independently from the set \{2, 3, 4, 5\};
* select the bin boundaries for ***M*** and ***P*** ($\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$) by sorting the results of $(t-1)$ and $(t'-1)$ draws, respectively, from a normal distribution using a pseudo-RNG;
* confirm that at least 2 levels of each of ***M*** and ***P*** are occupied (if not, return to the previous step); and
* calculate and record the correlation coefficients.
One of a few possible statistics is then calculated. To compare two levels of a switch the Relative Mean Absolute Deviation is used
$$RMAD= \frac{1}{m} \sum_{j=1}^m | r_{j, \mathtt{TRUE}} - r_{j, \mathtt{FALSE}} | $$
where there are $m$ simulations run, $r_{j, \mathtt{TRUE}}$ and $r_{j, \mathtt{FALSE}}$ are the estimated correlation coefficient for the $j$th simulated dataset when the switch is set to `TRUE` and `FALSE`, respectively. This statistic is called "relative" because it is compared to the other method of computing the statistic, not the true value.
To compare either level to the true correlation coefficient the Root Mean Square Error is used
$$RMSE= \sqrt{ \frac{1}{m} \sum_{j=1}^m (r_j - \rho_j)^2 } $$
where, for the $j$th simulated dataset, $r_j$ is an estimated correlation coefficient and $\rho_j$ is the value used to generate the data (***X***, ***Y***, ***M***, and ***P***).
# ML switch
A simulation was done using the Cartesian product (all possible combinations of) $\mathtt{ML} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. Each iteration is run three times to increase the precision of the simulation. The same values of the variables are used in the computation for `ML=TRUE` as well as for `ML=FALSE`; and then the statistics are compared between the two sets of results (e.g. `ML=TRUE` and `ML=FALSE`).
\newpage
**`r fig_nums("MLRMSE", display="cite")`.** *Root Mean Square Error for `ML=TRUE` and `ML=FALSE`.*
```{r MLRMSEplot, echo=FALSE,fig.width=7, fig.height=5.5}
#ml <- subset(ML, type %in% c("Polychoric", "Polyserial"))
#ml$rmse <- (ml$est - ml$rho)^2
#aggml <- summaryBy(rmse ~ n + rho + type + ML, data=ml, FUN=mean, na.rm=TRUE)
#aggml$rmse.mean <- sqrt(aggml$rmse.mean)
#aggml$ml <- ifelse(aggml$ML==TRUE, "ML=TRUE", "ML=FALSE")
#aggml$nt <- factor(paste("n=",aggml$n))
xyplot(rmse.mean ~ rho|type + nt,
data=aggml,
groups=ml,
scales=list(y=list(log=10, cex=0.7), x = list(cex=0.7)),
type=c("l", "g"),
ylab="RMSE",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
The RMSE for these two options is so similar that the two lines cannot be distinguished for most of the plot. The exact differences are shown for Polychoric in `r table_nums("Polychoric", display="cite")` and for Polyserial in `r table_nums("Polyserial", display="cite")`. The column labeled, "RMSE difference" shows how much larger the RMSE is for `ML=TRUE` than `ML=FALSE`. Because this difference is always positive the RMSE of the `ML=FALSE` option is always lower. Because of this the `ML=TRUE` option is preferable only in situations where there is a reason to prefer MLE for some other reason.
\
**`r table_nums("Polychoric", display="cite")`.** *Relative Mean Absolute Deviation between `ML=TRUE` and `ML=FALSE` for Polychoric.*
```{r ML RMSE table polyc, echo=FALSE}
#ml$i <- rep(1:(nrow(ml)/2),each=2)
#mml <- merge(subset(ml,ML),
# subset(ml,!ML, c("i", "est")),
# by="i",
# suffixes=c(".ml",".nonml"))
#mml$absd <- abs(mml$est.ml - mml$est.nonml)
#aggt1_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, #type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1_0$ML <- NULL
#aggt1 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polychoric"), FUN=mean, na.rm=TRUE)
#aggt1$rmse.mean <- sqrt(aggt1$rmse.mean)
mg <- merge(subset(aggt1, ML==TRUE, c("type", "n", "rmse.mean")),
subset(aggt1, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt1_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg1 <- mg
#knitr::asis_output("\\")
```
\
**`r table_nums("Polyserial", display="cite")`.** *Relative Mean Absolute Deviation between `ML=TRUE` and `ML=FALSE` for Polyserial.*
```{r ML RMSE table polys, echo=FALSE}
#aggt2_0 <- summaryBy(absd ~ type + n + ML, data=subset(mml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2_0$ML <- NULL
#aggt2 <- summaryBy(rmse ~ type + n + ML, data=subset(ml, type=="Polyserial"), FUN=mean, na.rm=TRUE)
#aggt2$rmse.mean <- sqrt(aggt2$rmse.mean)
mg <- merge(subset(aggt2, ML==TRUE, c("n", "type", "rmse.mean")),
subset(aggt2, ML==FALSE, c("type", "n", "rmse.mean")),
by=c("type", "n"))
mg$rmse.mean.diff <- mg$rmse.mean.x - mg$rmse.mean.y
mg <- merge(mg, aggt2_0, by=c("type", "n"))
colnames(mg) <- c("Correlation type", "n", "RMSE ML=TRUE", "RMSE ML=FALSE", "RMSE difference", "RMAD")
mg[,3:6] <- round(mg[,3:5],4)
kable(mg)
mg2 <- mg
```
For the Polychoric, the agreement between these two methods, in terms of MSE is within `r round(mg1[1,5],3)` for $n$ of 10 and decreases to within less than `r formatC(round(mg1[2,5],4), format="f", digits=4)` for $n$ of 100 or more. Given the magnitude of these differences the faster method will be preferable.
The final column in the above tables shows the RMAD which compares how similar the `ML=TRUE` and `ML=FALSE` results are to each other. Because these values are larger than 0, they indicate that there is not complete agreement between the two sets of estimates. If a user considers the MLE to be the correct estimate then they show the deviation of the `ML=FALSE` results from the correct results.
# fast switch
This section examines the agreement between the pure R implementation of the function that calculates the correlation and the `Rcpp` and `RcppArmadillo` implementation, which is expected to be faster. The code can compute with either option by setting `fast=FALSE` (pure R) or `fast=TRUE` (Rcpp).
A simulation was done at each level of the Cartesian product of $\mathtt{fast} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, \newline $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. Each iteration was run 100 times. The same values of the variables are used in the computation for `fast=TRUE` as well as for `fast=FALSE`; and then the statistics are compared between the two sets of results.
The plot below shows all differences between the `fast=TRUE` and `fast=FALSE runs` for the four types of correlations. Note that differences smaller than $10^{-16}$ are indistinguishable from 0 by the machine. Because of this, all values were shown as being at least $10^{-16}$ so that they could all be shown on a log scale.
\
**`r fig_nums("fastMAD", display="cite")`.** *Relative Mean Absolute Differences between `fast=TRUE` and `fast=FALSE`.*
```{r fast MAD plot, echo=FALSE,fig.width=7, fig.height=3.5}
xyplot(absdrho.mean ~ rho|type,
data=aggfast,
groups=n,
type=c("l", "g"),
ylab="RMAD",
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
```
The above shows that differences as a result of the `fast` argument are never expected to be larger than $10^{`r fmax10`}$ for any type or correlation type. The Spearman never shows any difference that is different from zero and the Pearson show differences just larger than the smallest observable difference when using double precision floating point values (about $1 \times 10^{-16}$). This indicates that the computation differences are completely irrelevant for these two types.
For the other two types, it is unclear which one is correct and the agreement that is never more distant than the $10^{`r fmax10`}$ level indicates that any use that requires precision of less than $10^{`r fmax10`}$ can use the `fast=TRUE` argument for faster computation.
# Implications for speed
To show the effect of the `ML` and `fast` switches on computation a simulation was done at each level of the Cartesian product of $\mathtt{ML} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\mathtt{fast} \in \{\mathtt{TRUE}, \mathtt{FALSE} \}$, $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10^1, 10^{1.25}, 10^{1.5}, ..., 10^7\}$. Each iteration is run 80 times when $n<10^5$ and 20 times when $n\geq 10^5$. The same values of the variables are used in the computations at all four combinations of `ML` and `fast`. A variety of correlations are chosen so that the results represent an average of possible values of $\rho$.
The following plot shows the mean computing time (in seconds) versus $n$.
\
**`r fig_nums("speedi", display="cite")`.** *Computation time comparison.*
```{r plot speed, echo=FALSE,fig.width=7, fig.height=3.5}
# speed$class <- ifelse(speed$ML, "ML=T,", "ML=F,")
# speed$class <- paste0(speed$class, ifelse(speed$fast, "fast=T", "fast=F"))
# speed$t <- pmax(speed$t, 0.001)
# agg <- summaryBy(t ~ n + type + class, data=speed, FUN=mean, na.rm=TRUE)
xyplot(t.mean ~ n|type,
data=subset(aggSpeed, type %in% c("Polyserial", "Polychoric")),
type=c("l", "g"),
ylab="Computing Time",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
xlab="n",
groups=class,
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2))
)
```
In all cases setting the `ML` option to `FALSE` and the `fast` option to `TRUE` speeds up--or does not slow down computation. Users wishing for the fastest computation speeds will use `ML=FALSE` and `fast=TRUE`.
For the Polychoric, when $n$ is a million observations ($n=10^7$), the speed of a correlation when `fast=FALSE` is `r round(with(subset(speed, fast==FALSE & n==1e7 & type=="Polychoric"), mean(t)))` seconds and when the `fast=TRUE` it is and `r round(with(subset(speed, fast==TRUE & n==1e7 & type=="Polychoric"), mean(t)))` seconds. When `fast=TRUE`, setting `ML=FALSE` speeds computation by `r round(with(subset(speed, fast==TRUE & ML==TRUE & n==1e7 & type=="Polychoric"), mean(t)) - with(subset(speed, fast==TRUE & ML==FALSE & n==1e7 & type=="Polychoric"), mean(t)))` seconds.
For the Polyserial, when $n$ is a million observations, the speed of a correlation when `ML=TRUE` is `r round(with(subset(speed, ML==TRUE & n==1e7 & type=="Polyserial"), mean(t)))` seconds and when the `ML=FALSE` it is and `r round(with(subset(speed, ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)))` seconds. When `ML=FALSE`, setting `fast=TRUE` speeds computation by `r round(with(subset(speed, fast==FALSE & ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)) - with(subset(speed, fast==TRUE & ML==FALSE & n==1e7 & type=="Polyserial"), mean(t)))` seconds.
#Conclusion
Overall the simulations show that the `ML` option is not more accurate but does add computation burden.
The `fast=TRUE` and `fast=FALSE` option are a `Rcpp` version of the correlation code and an `R` version, respectively and agree with each other--the differences are not expected to be larger than $10^{`r fmax10`}$.
Thus users wishing for fastest computation speeds and accurate results can use `ML=FALSE` and `fast=TRUE`.
|
/scratch/gouwar.j/cran-all/cranData/wCorr/vignettes/wCorrArguments.Rmd
|
---
title: "wCorr Formulas"
author: "Paul Bailey, Ahmad Emad, Ting Zhang, Qingshu Xie"
date: '`r Sys.Date()`'
output:
pdf_document: default
vignette: |
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{wCorr Formulas}
\usepackage[utf8]{inputenc}
\usepackage{amssymb}
---
```{r packages and data, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
require(wCorr)
if(!requireNamespace("doBy")) {
stop("Cannot build vignette without knitr package")
}
if(!requireNamespace("lattice")) {
stop("Cannot build vignette without lattice package")
}
require(lattice)
require(doBy)
# set layout so a figure label appears to go with the figure
trellis.device()
trellis.par.set(list(layout.widths = list(left.padding = 3, right.padding = 3),
layout.heights = list(top.padding = -1, bottom.padding = 3)))
load("../R/sysdata.rda")
```
```{r tables and figures, echo=FALSE, results="hide", message=FALSE, warning=FALSE}
# replicate captioner functionality we used to use
cp <- function(prefix="Figure") {
pf <- prefix
cw <- data.frame(name="__XX__UNUSED", print="Table 99")
i <- 1
function(x, display=c("save", "cite", "cw")) {
if(display[1] %in% "cw") {
return(cw)
}
display <- match.arg(display)
if(is.null(x)) {
stop("must define argument x")
}
if(display %in% "cite" && !x %in% cw$name) {
display <- "save"
}
if(display %in% "cite") {
return(cw$print[cw$name == x])
}
if(display %in% "save") {
if(x %in% cw$name) {
stop("Label:",dQuote(x)," already in use.")
}
cw[i, "name"] <<- x
res <- paste(pf, i, ":")
cw[i, "print"] <<- res
i <<- i + 1
return(res)
}
}
}
fig_nums <- cp()
table_nums <- cp(prefix = "Table")
theta <- fig_nums("theta")
biasVsRho <- fig_nums("biasVsRho")
rmseVsRho <- table_nums("rmseVsRho")
rmseVsRho2 <- table_nums("rmseVsN")
speedi <- table_nums("speedi")
rmseVsRho3 <- table_nums("rmseVsRho2")
rmseVsN <- table_nums("rmseVsN2")
```
The wCorr package can be used to calculate Pearson, Spearman, polyserial, and polychoric correlations, in weighted or unweighted form.^[The estimation procedure used by the wCorr package for the polyserial is based on the likelihood function in Cox, N. R. (1974), "Estimation of the Correlation between a Continuous and a Discrete Variable." *Biometrics*, **30** (1), pp 171-178. The likelihood function for polychoric is from Olsson, U. (1979) "Maximum Likelihood Estimation of the Polychoric Correlation Coefficient." *Psyhometrika*, **44** (4), pp 443-460. The likelihood used for Pearson and Spearman is written down in many places. One is the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.] The package implements the tetrachoric correlation as a specific case of the polychoric correlation and biserial correlation as a specific case of the polyserial correlation. When weights are used, the correlation coefficients are calculated with so called sample weights or inverse probability weights.^[Sample weights are comparable to `pweight` in Stata.]
This vignette introduces the methodology used in the wCorr package for computing the Pearson, Spearman, polyserial, and polychoric correlations, with and without weights applied. For the polyserial and polychoric correlations, the coefficient is estimated using a numerical likelihood maximization.
The weighted (and unweighted) likelihood functions are presented. Then simulation evidence is presented to show correctness of the methods, including an examination of the bias and consistency. This is done separately for unweighted and weighted correlations.
Numerical simulations are used to show:
* The bias of the methods as a function of the true correlation coefficient ($\rho$) and the number of observations ($n$) in the unweighted and weighted cases; and
* The accuracy [measured with root mean squared error (RMSE) and mean absolute deviation (MAD)] of the methods as a function of $\rho$ and $n$ in the unweighted and weighed cases.
Note that here *bias* is used for the mean difference between true correlation and estimated correlation.
The *wCorr Arguments* vignette describes the effects the `ML` and `fast` arguments have on computation and gives examples of calls to wCorr.
# Specification of estimation formulas
Here we focus on specification of the correlation coefficients between two vectors of random variables that are jointly bivariate normal. We call the two vectors ***X*** and ***Y***. The $i^{th}$ members of the vectors are then called $x_i$ and $y_i$.
## Formulas for Pearson correlations with and without weights
The weighted Pearson correlation is computed using the formula
$$r_{Pearson}=\frac{\sum_{i=1}^n \left[ w_i (x_i-\bar{x})(y_i-\bar{y}) \right]}{\sqrt{\sum_{i=1}^n \left( w_i (x_i-\bar{x})^2 \right)\sum_{i=1}^n \left( w_i (y_i-\bar{y})^2 \right) }} $$
where $w_i$ is the weights, $\bar{x}$ is the weighted mean of the ***X*** variable ($\bar{x}=\frac{1}{\sum_{i=1}^n w_i}\sum_{i=1}^n w_i x_i$), $\bar{y}$ is the weighted mean of the ***Y*** variable ($\bar{y}=\frac{1}{\sum_{i=1}^n w_i}\sum_{i=1}^n w_i y_i$), and $n$ is the number of elements in ***X*** and ***Y***.^[See the "correlate" function in Stata Corp, Stata Statistical Software: Release 8. College Station, TX: Stata Corp LP, 2003.]
The unweighted Pearson correlation is calculated by setting all of the weights to one.
## Formulas for Spearman correlations with and without weights
For the Spearman correlation coefficient the unweighted coefficient is calculated by ranking the data and then using those ranks to calculate the Pearson correlation coefficient--so the ranks stand in for the ***X*** and ***Y*** data. Again, similar to the Pearson, for the unweighted case the weights are all set to one.
For the unweighted case the highest rank receives a value of 1 and the second highest 2, and so on down to the $n$th value. In addition, when data are ranked, ties must be handled in some way. The chosen method is to use the average of all tied ranks. For example, if the second and third rank units are tied then both units would receive a rank of 2.5 (the average of 2 and 3).
For the weighted case there is no commonly accepted weighted Spearman correlation coefficient. Stata does not estimate a weighted Spearman and SAS does not document their methodology in either of the corr or freq procedures.
The weighted case presents two issues. First, the ranks must be calculated. Second, the correlation coefficient must be calculated.
Calculating the weighted rank for an individual level is done via two terms. For the $j$th element the rank is
$$rank_j = a_j + b_j$$
The first term $a_j$ is the sum of all weights ***W*** less than or equal to this value of the outcome being ranked ($\xi_j$)
$$a_j = \sum_{i=1}^n w_i \mathbf{1}\left( \xi_i < \xi_j \right)$$
where $\mathbf{1}(\cdot)$ is the indicator function that is one when the condition is true and 0 when the condition is false, $w_i$ is the $i$th weight and $\xi_i$ and $\xi_j$ are the $i$th and $j$th value of the vector being ranked, respectively.
The term $b_j$ then deals with ties. When there are ties each unit receives the mean rank for all of the tied units. When the weights are all one and there are $n$ tied units the vector of tied ranks would be $\mathbf{v}=\left(a_j+1, a_j+2, \dots, a_j+n \right)$. The mean of this vector (here called $rank^1$ to indicate it is a specific case of $rank$ when the weights are all one) is then
$$rank_j^1=\frac{1}{n} \sum_{i=1}^n \left(a_j + i \right)$$
$$=\frac{1}{n} \left( n a_j + \frac{n(n+1)}{2} \right)$$
$$=a_j + \frac{n+1}{2}$$
thus
$$b_j^1=\frac{n+1}{2}$$
where the superscript one is again used to indicate that this is only for the unweighted case where all weights are set to one.
For the weighted case this could be $\mathbf{v}=\left(a_j+w_1', a_j+w_1'+w_2', \dots, a_j+\sum_{k=1}^n w_k' \right)^T$ where ***W'*** is a vector containing the weights of the tied units. It is readily apparent that the mean of this vector value will depend on the ordering of the weights. To avoid this, the overall mean of all possible permutations of the weights is calculated. The following formula does just that
$$b_j = \frac{n+1}{2}\bar{w}_j$$
where $\bar{w}_j$ is the mean weight of all of the tied units. It is easy to see that when the weights are all one $\bar{w}_j=1$ and $b_j = b_j^1$. The latter (more general) formula is used for all cases.
After the ***X*** and ***Y*** vectors are ranked they are plugged into the weighted Pearson correlation coefficient formula shown earlier.
##Formulas for polyserial correlation with and without weights
For the polyserial correlation, it is again assumed that there are two continuous variables ***X*** and ***Y*** that have a bivariate normal distribution.^[For a more complete treatment of the polyserial correlation, see Cox, N. R., "Estimation of the Correlation between a Continuous and a Discrete Variable" *Biometrics*, **50** (March), 171-187, 1974.]
$$\begin{pmatrix} X \\ Y \end{pmatrix} \sim N \left[ \begin{pmatrix} \mu_x \\ \mu_y \end{pmatrix}, \boldsymbol{\Sigma} \right]$$
where $N(\mathbf{A},\boldsymbol{\Sigma})$ is a bivariate normal distribution with mean vector ***A*** and covariance matrix $\boldsymbol{\Sigma}$. For the polyserial correlation, ***Y*** is discretized into the random variable ***M*** according to
$$m_i= \begin{cases} 1 \quad \mathrm{if} \theta_2 < y_i < \theta_3 \\ 2 \quad \mathrm{if} \theta_3 < y_i < \theta_4 \\ \vdots \\ t \quad \mathrm{if} \theta_{t+1} < y_i < \theta_{t+2} \end{cases}$$
where $\theta$ indicates the cut points used to discretize ***Y*** into ***M***, and $t$ is the number of bins. For notational convenience, $\theta_2 \equiv -\infty$ and $\theta_{t+2} \equiv \infty$.^[The indexing is somewhat odd to be consistent with Cox (1974). Nevertheless, this treatment does not use the Cox definition of $\theta_0$, $\theta_1$ or $\theta_2$ which are either not estimated (as is the case for $\theta_0$, and $\theta_1$) or are reappropriated (as is the case for $\theta_2$). Cox calls the correlation coefficient $\theta_2$ while this document uses $\rho$ and uses $\theta_2$ to store $-\infty$ as a convenience so that the vector $\boldsymbol{\theta}$ includes the (infinite) bounds as well as the interior points.]
To give a concrete example, the following figure shows the density of ***Y*** when the cuts points are, for this example, $\theta=\left(-\infty,-2,-0.5,1.6,\infty\right)$. In this example, any value of $-2 < y_i < -0.5$ would have $m_i=2$.
**`r fig_nums("theta", display="cite")`.** *Density of Y for cutpoints $\theta = (-\infty, -2, -0.5, 1.6, \infty$).*
```{r theta2,echo=FALSE,results="hide",fig.width=7, fig.height=3}
#hi
```
hi
**`r fig_nums("theta", display="cite")`.** *Density of Y for cutpoints $\theta = (-\infty, -2, -0.5, 1.6, \infty$).*
```{r theta,echo=FALSE,results="hide",fig.width=7, fig.height=3}
x <- seq(-3,3,by=0.01)
y <- dnorm(x)
par0 <- par(no.readonly=TRUE)
par(ann=FALSE)
par(mar=c(5,2,1,1)+0.1)
plot(x,y,type="l",xlab="y",ylab="Density", xaxt="n", yaxt="n")
axis(1,at=c(-2,-0.5,1.6), labels=expression(theta[3],theta[4],theta[5]))
text(x=c(-2.5,-1.25,0.55,2.3),y=c(0.05,0.05,0.05,0.08), labels=paste0("m=",1:4))
theta <- c(-2,-0.5,1.6)
for(i in 1:3) {
lines(rep(theta[i],2), c(-1,dnorm(theta[i])))
}
par(ann=TRUE)
par(mgp=c(0.5,0,0))
title(ylab="density")
par(mgp=c(3,1,0))
title(xlab="Y")
par(par0)
```
Notice that $\mu_y$ is not identified (or is irrelevant) because, for any $a \in \mathbb{R}$, setting $\tilde{\mu}_y = \mu_y + a$ and $\tilde{\boldsymbol{\theta}}=\boldsymbol{\theta} + a$ lead to exactly the same values of $\mathbf{M}$ and so one of the two must be arbitrarily assigned. A convenient decision is to decide $\mu_y \equiv 0$. A similar argument holds for $\sigma_y$ so that $\sigma_y \equiv 1$.
For ***X***, Cox (1974) observes that the MLE mean and standard deviation of ***X*** are simply the average and (population) standard deviation of the data and do not depend on the other parameters.^[The population standard deviation is used because it is the MLE for the standard deviation. Notice that, while the sample variance is an unbiased estimator of the variance and the population variance is not an unbaised estimator of the variance, they are very similar and the variance is also a nuisance parameter, not a parameter of interest when finding the correlation.] This can be taken advantage of by defining $z$ to be the standardized score of $x$ so that $z \equiv \frac{x- \bar{x}}{ \hat\sigma_x}$.
Combining these simplifications, the probability of any given $x_i$, $m_i$ pair is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \!\!\!\!\!\!\!\!\!\!\!\! f(y|Z=z_i,\rho=r)dy$$
where $\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}; Z=z_i, M=m_i \right)$ is the probability of the event $\rho=r$ and the cuts points are $\boldsymbol{\theta}$, given the $i$th data point $z_i$ and $m_i$; $\phi(\cdot)$ is the standard normal; and $f(Y|Z,\rho)$ is the distribution of ***Y*** conditional on ***Z*** and $\rho$. Because ***Y*** and ***Z*** are jointly normally distributed (by assumption)
$$f(Y|Z=z_i,\rho=r) =N\left(\mu_y + \frac{\sigma_y}{\sigma_z}r(z_i-\mu_z), (1-r^2){\sigma_y}^2 \right)$$
because both ***Z*** and ***Y*** are standard normals
$$f(y|Z=z_i,\rho=r) =N\left(r \cdot z_i, (1-r^2) \right)$$
Noticing that $\frac{y-r\cdot z}{\sqrt{1-r^2}}$ has a standard normal distribution
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) = \phi(z_i) \left[ \Phi\left( \frac{\theta_{m_i+2} - r \cdot z_i}{\sqrt{1-r^2}} \right) - \Phi \left( \frac{\theta_{m_i+1} - r \cdot z_i}{\sqrt{1-r^2}} \right) \right]$$
where $\Phi(\cdot)$ is the standard normal cumulative density function. Using the above probability function as an objective, the log-likelihood is then maximized.
$$\ell(\rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta};\mathbf{Z}=\mathbf{z},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[ \mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta} ; Z=z_i, M=m_i \right) \right]$$
where $w_i$ is the weight of the $i^{th}$ members of the vectors ***Z*** and ***Y***. For the unweighted case, all of the weights are set to one.
The value of the nuisance parameter $\boldsymbol{\theta}$ is chosen to be
$$\hat{\theta}_{j+2} = \Phi^{-1}(n/N)$$
where $n$ is the number of values to the left of the $j$th cut point ($\theta_{j+2}$ value) and $N$ is the number of data points overall. Here two is added to $j$ to make the indexing of $\theta$ agree with Cox (1974) as noted before. For the weighted cause $n$ is replaced by the sum of the weights to the left of the $j$th cut point and $N$ is replaced by the total weight of all units
$$\hat{\theta}_{j+2} = \Phi^{-1}\left( \frac{\sum_{i=1}^N w_i \mathbf{1}(m_i < j) }{\sum_{i=1}^N w_i} \right)$$
where $\mathbf{1}$ is the indicator function that is 1 when the condition is true and 0 otherwise.
###Computation of polyserial correlation
For the polyserial, derivatives of $\ell$ can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), a one dimensional optimization of $\rho$ is calculated using the `optimize` function in the `stats` package and the values of $\boldsymbol{\theta}$ from the previous paragraph. When the `ML` argument is set to `TRUE`, a multi-dimensional optimization is done for $\rho$ and $\boldsymbol{\theta}$ using the `bobyqa` function in the ``minqa`` package. See the *wCorr Arguments* vignette for a comparison of these two methods.
Because the numerical optimization is not perfect when the correlation is in a boundary condition ($\rho \in \{-1,1\}$), a check for perfect correlation is performed before the above optimization by simply examining if the values of ***X*** and ***M*** have agreeing order (or opposite but agreeing order) and then the MLE correlation of 1 (or -1) is returned.
##Methodology for polychoric correlation with and without weights
Similar to the polyserial correlation, the polychoric correlation is a simple case of two continuous variables ***X*** and ***Y*** that have a bivariate normal distribution. In the case of the polyserial correlation the continuous (latent) variable ***Y*** was observed as a discretized variable ***M***. For the polychoric correlation, this is again true but now the continuous (latent) variable ***X*** is observed as a discrete variable ***P*** according to
$$p_i= \begin{cases} 1 \quad \mathrm{if} \theta'_2 < x_i < \theta'_3 \\ 2 \quad \mathrm{if} \theta'_3 < x_i < \theta'_4 \\ \vdots \\ t \quad \mathrm{if} \theta'_{t'+1} < x_i < \theta'_{t'+2} \end{cases}$$
where $\boldsymbol{\theta}$ remains the cut points for the distibution defining the transformation of ***Y*** to ***M***, $\boldsymbol{\theta}'$ is the cut points for the transformation from ***X*** to ***P***, and $t'$ is the number of bins for ***P***. Similar to $\boldsymbol{\theta}$, $\boldsymbol{\theta}'$ has $\theta'_2 \equiv -\infty$ and $\theta'_{t'+2} \equiv \infty$.
As in the polyserial correlation, $\mu_y$ is not identified (or is irrelevant) because, for any $a \in \mathbb{R}$, setting $\tilde{\mu}_y = \mu_y + a$ and $\tilde{\boldsymbol{\theta}}=\boldsymbol{\theta} + a$ lead to exactly the same values of $\mathbf{M}$ and so one of the two must be arbitrarily assigned. The same is true for $\mu_x$. A convenient decision is to decide $\mu_y = \mu_x \equiv 0$. A similar argument holds for $\sigma_y$ and $\sigma_x$ so that $\sigma_y = \sigma_x \equiv 1$
Then the probability of any given $m_i$, $p_i$ pair is
$$\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) = \int_{\theta'_{p_i+1}}^{\theta'_{p_i+2}} \int_{\theta_{m_i+1}}^{\theta_{m_i+2}} \!\!\!\!\!\!\!\!\!\!\!\! f(x,y|\rho=r)dydx$$
where $\rho$ is the correlation coefficient.
Using this function as an objective, the log-likelihood is then maximized.
$$\ell(\rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}';\mathbf{P}=\mathbf{p},\mathbf{M}=\mathbf{m}) = \sum_{i=1}^n w_i \ln\left[\mathrm{Pr}\left( \rho=r, \boldsymbol{\Theta}=\boldsymbol{\theta}, \boldsymbol{\Theta}'=\boldsymbol{\theta}' ; P=p_i, M=m_i \right) \right] $$
This is the weighted log-likelihood function. For the unweighted case all of the weights are set to one.
###Computation of polychoric correlation
This again mirrors the treatment of the polyserial. The derivatives of $\ell$ for the polychoric can be written down but are not readily computed. When the `ML` argument is set to `FALSE` (the default), a one dimensional optimization of $\rho$ is calculated using the `optimize` function from the `stats` package and values of $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ are computed using the last equation in the section titled, "Formulas for polyserial correlation with and without weights". When the `ML` argument is set to `TRUE` a multi-dimensional optimization is done for $\rho$, $\boldsymbol{\theta}$, and $\boldsymbol{\theta}'$ using the `bobyqa` function in the `minqa` package. See the *wCorr Arguments* vignette for a comparison of these two methods.
Because the optimization is not perfect when the correlation is in a boundary condition ($\rho \in \{-1,1\}$), a check for perfect correlation is performed before the above optimization by simply examining if the values of ***P*** and ***M*** have a Goodman-Kruskal correlation coefficient of -1 or 1. When this is the case, the MLE of -1 or 1, respectively, is returned.
#Simulation evidence on the correctness of the estimating methods
It is easy to prove the consistency of the $\boldsymbol{\theta}$ for the polyserial correlation and $\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$ for the polychoric correlation using the non-ML case. Similarly, for $\rho$, because it is an MLE that can be obtained by taking a derivative and setting it equal to zero, the results are asymptotically unbiased and obtain the Cramer-Rao lower bound.
This does not speak to the small sample properties of these correlation coefficients. Previous work has described their properties by simulation; and that tradition is continued below.^[See, for example, the introduction to Rigdon, E. E. and Ferguson C. E., "The Performance of the Polychoric Correlation Coefficient and Selected Fitting Functions in Confirmatory Factor Analysis With Ordinal Data" *Journal of Marketing Research* **28** (4), pp. 491-497.]
## Simulation study of unweighted correlations
In what follows, when the exact method of selecting a parameter (such as $n$) is not noted in the above descriptions it is described as part of each simulation.
Across a number of iterations (the exact number of times will be stated for each simulation), the following procedure is used:
* select a true Pearson correlation coefficient $\rho$;
* select the number of observations $n$;
* generate ***X*** and ***Y*** to be bivariate normally distributed using a pseudo-Random Number Generator (RNG);
* using a pseudo-RNG, select the number of bins for ***M*** and ***P*** ($t$ and $t'$) independantly from the set \{2, 3, 4, 5\};^[This means that the simulation uses discrete ordinal variables (***M*** and ***P***) that have 2, 3, 4, or 5 discrete levels. Note that the number of levels in ***M*** and ***P*** are chosen independently so that one could be 2 while the other is 5 (or any other possible combination).]
* select the bin boundaries for ***M*** and ***P*** ($\boldsymbol{\theta}$ and $\boldsymbol{\theta}'$) by sorting the results of $(t-1)$ and $(t'-1)$ draws, respectively, from a normal distribution using a pseudo-RNG;
* confirm that at least 2 levels of each of ***M*** and ***P*** are occupied (if not, return to previous step); and
* calculate and record relevant statistics.
## Bias, and RMSE of the unweighted correlations
This sections shows the bias of the correlations as a function of the true correlation coefficient, $\rho$. To that end, a simulation was done at each level of the cartesian product of $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, and $n \in \{10, 100, 1000\}$. For precision, each level of $\rho$ and $n$ was run fifty times. The bias is the mean difference between the true correlation coefficient ($\rho_i$) and estimate correlation coefficient ($r_i$). The RMSE is the square root of the mean squared error.
$$\mathrm{RMSE}= \sqrt{ \frac{1}{n} \sum_{i=1}^n \left( r_i - \rho_i \right)^2 }$$
And the bias is given by
$$bias=\frac{1}{n}\sum_{i=1}^n \left(r_i - \rho_i \right)$$
`r fig_nums("biasVsRho", display="cite")` shows the bias as a function of the true correlation $\rho$. Only the polyserial shows no bias at any level of $n$, shown by no clear deviation from 0 at any level of $\rho$. For the Pearson correlation there is bias when $n=10$ that is not present when $n=100$ or 1,000. This is a well known property of the estimator.^[see, for example, Olkin I. and Pratt, J. W. (1958), Unbiased Estimation of Certain Correlation Coefficients. *Annals of Mathematical Statistics*, *29* (1), 201--211.] Similarly, the polychoric shows bias when $n=10$.
The Spearman correlation shows bias at all of the tested levels of $n$. The bias is zero when the true correlation is 1, 0, or -1; is positive when $\rho$ is below 0 (negative correlation); and is negative when $\rho$ is above 0 (positive correlation). In this section, the Spearman correlation coefficient is compared with the true Pearson correlation coefficient. When this is done, the bias is expected because the Spearman correlation is not intended to recover a Pearson type correlation coefficient; it is designed to measure a separate quantity.
**`r fig_nums("biasVsRho", display="cite")`.** *Bias Versus $\rho$ for Unweighted Correlations.*
```{r biasVersusrho, echo=FALSE,fig.width=7, fig.height=5}
#bias$rmse <- sqrt( (bias$est - bias$rho)^2 )
#bias$bias <- bias$est - bias$rho
#aggbias <- summaryBy(bias + rmse ~ n + rho + type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(bias.mean ~ rho|type,
data=aggbias,
groups=n,
type=c("l","g"),
ylab="Bias",
xlab=expression(rho),
scales=list(x=list(cex=0.7), y=list(cex=0.7)),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
\newpage
`r fig_nums("rmseVsRho", display="cite")` shows the RMSE as a function of $\rho$. All of the correlation coefficients have a uniform RMSE as a function of $\rho$ near $\rho=0$ that decreases near $|\rho|=1$. All plots also show a decrease in RMSE as $n$ increases. This plot shows that there is no appreciable RMSE differences as a functions of $\rho$. In addition, it show that our attention to the MLE correlation of -1 or 1 at edge cases did not make the RMSE much worse in the neighborhood of the edges ($|\rho| \sim 1$).
**`r fig_nums("rmseVsRho", display="cite")`.** *Root Mean Square Error Versus $\rho$ for Unweighted Correlations.*
```{r rmseVersusrho, echo=FALSE,fig.width=7, fig.height=3.5}
xyplot(rmse.mean ~ rho|type,
data=aggbias,
groups=n,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
ylab="RMSE",
xlab=expression(rho),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#### Consistency of the correlations
`r fig_nums("rmseVsN", display="cite")` shows the RMSE as a function of $n$. The purpose of this plot is not to show an individual value but to show that the estimator is consistent. The plot shows a slope of about $-\frac{1}{2}$ for the Pearson, polychoric, and polyserial correlations. This is consistent with the expected first order convergence for each correlation coefficient under the assumptions of this simulation. Results for the Spearman also show approximate first order convergence but the slope increases slightly as $n$ increases. Again, the Spearman is not estimating the same quantity as the Pearson and so is expected to diverge.
The plot also shows that the RMSE is less than 0.1 for all methods when $n>100$.
**`r fig_nums("rmseVsN", display="cite")`.** *Root Mean Square Error Versus sample size for Unweighted Correlations.*
```{r rmse Versus n, echo=FALSE,fig.width=7, fig.height=3.5}
#aggbias2 <- summaryBy(rmse ~ n+type, data=bias, FUN=mean, na.rm=TRUE)
xyplot(rmse.mean ~ n,
groups=type,
data=aggbias2,
ylab="RMSE",
xlab="n",
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#### Computing Time
`r fig_nums("speedi", display="cite")` shows the mean time (in seconds) to compute a single correlation coefficient as a function of $\rho$ by $n$ size. The plot shows linearly rising computation times with slopes of about one. This is consistent with a linear computation cost. Using Big O notation, the computation cost is, in the range shown, O($n$). The slope of the Spearman is slightly faster and the algorithm has a O($n \mathrm{lg}(n)$) sort involved, so this is, again, expected.
**`r fig_nums("speedi", display="cite")`.** *Computation time.*
```{r time Versus n, echo=FALSE,fig.width=7, fig.height=4}
# agg <- summaryBy(t ~ n + type, data=ntime, FUN=mean, na.rm=TRUE)
# agg$t.mean <- ifelse(agg$t.mean==0, 0.001,agg$t.mean)
xyplot(t.mean ~ n,
data=aggTime,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
groups=type,
type=c("l","g"),
ylab="Computing time (s)",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
## Simulation study of weighted correlations
When complex sampling (other than simple random sampling with replacement) is used, unweighted correlations may or may not be consistent. In this section the consistency of the weighted coefficients is examined.
When generating simulated data, decisions about the generating functions have to be made. These decisions affect how the results are interpreted. For the weighted case, if these decisions lead to something about the higher weight cases being different from the lower weight cases then the test will be more informative about the role of weights. Thus, while it is not reasonable to always assume that there is a difference between the high and low weight cases, the assumption (used in the simulations below) that there is an association between weights and the correlation coefficients serves as a more robust test of the methods in this package.
##Results of weighted correlation simulations
Simulations are carried out in the same fashion as previously described but include a few extra steps to accommodate weights. The following changes were made:
* Weights are assigned according to $w_i = (x-y)^2 + 1$, and the probability of inclusion in the sample was then $Pr_i = \frac{1}{w_i}$.
* For each unit, a uniformly distributed random number was drawn. When that value was less than the probability of inclusion ($Pr_i$), the unit was included.
Units were generated until $n$ units were in the sample.
Two simulations were run. The first shows the mean absolute deviation (MAD)
$$MAD=\frac{1}{n}\sum_{i=1}^n |r_i - \rho_i|$$
as a function of $\rho$ and was run for $n=100$ and $\rho \in \left( -0.99, -0.95, -0.90, -0.85, ..., 0.95, 0.99 \right)$, with 100 iterations run for each value of $\rho$.
The following plot shows the MAD for the weighted and unweighted results as a function of $\rho$ when $n=100$. This shows that for values of $\rho$ near zero, under our simulation assumptions (for all but the Spearman correlation) the weighted correlation performs better than (that is, has lower MAD than) the unweighted correlation for all correlation coefficients. Over the entire range, the difference between the two is never such that the unweighted has a lower MAD. Thus, under the simulated conditions at least, the weighted correlation has lower or approximately equal MAD for every value of the true correlation coefficient ($\rho$).
**`r fig_nums("rmseVsRho2", display="cite")`.** *Mean Absolute Deviation Versus $\rho$ (Weighted).*
```{r wgt Versus rho plot, echo=FALSE,fig.width=7, fig.height=5.5}
# wgt <- wgtvrho
# wgt$absdrho <- abs(wgt$est - wgt$rho)
#
# agg <- summaryBy(absdrho ~ rho + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(absdrho.mean ~ rho|type,
data=aggWgtvrho,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(cex=0.7)),
type=c("l","g"),
ylab="MAD",
xlab=expression(rho),
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
The second simulation (shown in `r fig_nums("rmseVsN2", display="cite")`) used the same values of $\rho$ and used $n \in \{10, 100, 1000, 10000 \}$ and shows how RMSE and sample size are related. In particular, it shows first-order convergence of the weighted Pearson, polyserial, and polychoric correlation coefficient.
For the previous plots the calculated Spearman correlation coefficient was compared to the generating Pearson correlation coefficient. For this plot only, the Spearman correlation coefficient to the true Spearman correlation coefficient. This is because the Spearman coefficient is not attempting to estimate the Pearson correlation. To do this the simulation is modified slightly. A population of data is generated and the true Spearman correlation coefficient then is calculated as the population coefficient.^[The R `stats` package `cor` function is used to calculate the population Spearman correlation coefficient; this results in an unweighted coefficient, which is appropriate for the population parameter.] Then, a sample from the population with varying probability as described in the weighted simulation section is used to calculate sample Spearman correlation coefficient. Then the root mean squared difference between the sample and population coefficients are calculated as with the Pearson--except that the population Spearman correlation coefficient is used in place of the Pearson correlation coefficient ($\rho$).
Thus, the results in `r fig_nums("rmseVsN2", display="cite")` show that, when compared to the true Spearman correlation coefficient, the weighted Spearman correlation coefficient is consistent.
In all cases the RMSE is lower for the weighted than the unweighted. Again, the fact that the simulations show that the unweighted correlation coefficient is not consistent does not imply that it will always be that way--only that this is possible for these coefficients to not be consistent.
**`r fig_nums("rmseVsN2", display="cite")`.** *Root Mean Square Error Versus $\rho$ (Polyserial, Pearson, Polychoric panels) or Population Spearman correlation coefficient (Spearman panel) for Weighted Correlations*
```{r wgt v n plot, echo=FALSE,fig.width=7, fig.height=5.5}
# wgtvn <- wgtvn[wgtvn$type!= "Spearman",]
#
# wgt <- rbind(wgtvn, spear)
# wgt$mserho <- (wgt$est - wgt$rho)^2
#
# agg <- summaryBy(mserho ~ n + usew + type, data=wgt, FUN=mean, na.rm=TRUE)
# agg$rmserho <- sqrt(agg$mserho)
# agg$weight <- ifelse(agg$usew, "Weighted", "Unweighted")
xyplot(rmserho ~ n|type,
data=aggWgtvn,
groups=weight,
scales=list(y=list(log=10, cex=0.7), x=list(log=10, cex=0.7)),
type=c("l","g"),
ylab="RMSE",
xlab="n",
auto.key=list(lines=TRUE, points=FALSE, space="right", cex=0.7),
par.settings=list(superpose.line=list(lwd=2), plot.line=list(lwd=2)))
```
#Conclusion
Overall the simulations show first order convergence for each unweighted correlation coefficient with an approximately linear computation cost. Further, under our simulation assumptions, the weighted correlation performs better than (has lower MAD or RMSE than) the unweighted correlation for all correlation coefficients.
We show the first order convergence of the weighted Pearson, polyserial, and polychoric correlation coefficient. The Spearman is shown to not consistently estimate the population Pearson correlation coefficient but is shown to consistently estimate the population Spearman correlation coefficient--under the assumptions of our simulation.
# Appendix Proof of consistency of Horvitz-Thompson (HT) estimator of a mean
An HT estimator of a sum takes the form
$$\hat{Y} = \sum_{i=1}^n \frac{1}{\pi_i} y_i$$
where there are $n$ sampled units from a population of $N$ units, each unit has a value $y\in R$, each unit is sampled with probability $\pi_i$, and $\hat{Y}$ is the estimated of $Y$ in a population. Here there is no assumed covariance between sampling of unit $i$ and unit $j$, and the inverse probability is also the unit's weight $w_i$, so that an alternative specification of (1) is
$$\hat{Y} = \sum_{i=1}^n w_i y_i \ .$$
|
/scratch/gouwar.j/cran-all/cranData/wCorr/vignettes/wCorrFormulas.Rmd
|
#' @title Allison and Foster index
#'
#' @description Computes Allison and Foster inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#' @param norm (logical). If TRUE (default) then index is divided by a maximum possible value which is a difference between maximum and minimum of X
#'
#' @importFrom stats aggregate
#'
#' @return The value of Allison and Foster coefficient.
#'
#' @rdname Allison_and_Foster
#'
#' @details Let \eqn{c=(c_{1},...,c_{n})} be the vector of categories in increasing order, \eqn{m} be the median category and \eqn{p_i} be a share of \eqn{i}-th category. The following index was proposed by Allison and Foster (2004):
#' \deqn{AF = \frac{\sum_{i=m}^n c_{i} p_{i} }{\sum_{i=m}^n p_{i}} - \frac{\sum_{i=1}^{m-1} c_{i} p_{i}}{\sum_{i=1}^{m-1} p_{i}}}
#' Note that above formula is valid only for numerical values. Thus, in order to compute AF for ordered factor, X is converted to numerical variable.
#'
#' @references Allison R. A., Foster J E.: (2004) Measuring health inequality using qualitative data, Journal of Health Economics
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' AF(X)
#' AF(X,W)
#'
#' data(Well_being)
#' # Allison and Foster index for health assessment with sample weights
#' X=Well_being$V11
#' W=Well_being$Weight
#' AF(X,W)
#'
#'
#' @export
AF=function(X,W=rep(1,length(X)),norm=TRUE)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
if(length(unique(X))==1)return(0)
X=as.numeric(X)
tab=aggregate(W,by=list(X),FUN=sum)
Fx=(cumsum(tab$x)/sum(tab$x))
SW=cumsum(tab$x)
min=min(X)
max=max(X)
a=which(Fx<0.5)
if(length(a)!=0)
{A=max(a)
b=floor(sum(tab$x)/2)-SW[max(a)]
c=tab$x[max(a)+1]-b }else{A=0
b=floor(sum(tab$x)/2)
c=tab$x[1]-b}
AF=((sum(tab$Group.1[-c(a,A+1)]*tab$x[-c(a,A+1)])+tab$Group.1[A+1]*c)/(sum(tab$x[-c(a,A+1)])+c))-((sum(tab$Group.1[a]*tab$x[a])+tab$Group.1[A+1]*b)/(sum(tab$x[a])+b))
if(norm){return(AF/(max-min))}else{return(AF)}
}
#' @title Atkinson index
#'
#' @description Computes Atkinson inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param e is a coefficient of aversion to inequality, by default 1
#'
#' @return The value of Atkinson coefficient.
#'
#' @rdname Atkinson
#'
#' @details Atkinson coefficient with respect to parameter \eqn{\epsilon} is given by
#' \deqn{1-\frac{1}{\mu}{(\frac{1}{n}\sum_{i=1}^{n} x_{i}^{1-\epsilon} )}^{\frac{1}{1-\epsilon}}}
#' for \eqn{\epsilon \neq 1} and
#' \deqn{1-\frac{1}{\mu}{(\prod_{i=1}^{n} x_i)}^{\frac{1}{n}}}
#' for \eqn{\epsilon=1}.
#'
#' @references Atkinson A. B.: (1970) On the measurement of inequality, Journal of Economic Theory
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Atkinson(X)
#' Atkinson(X,W)
#'
#' data(Tourism)
#' # Atkinson index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Atkinson(X,W)
#'
#' @export
Atkinson=function(X,W=rep(1,length(X)),e=1)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
if(e==1){A=1-prod(X^(W/sum(W)))/ (sum(W*X)/sum(W))}else{
A=1-((1/sum(W)*sum(W*(X^(1-e))))^(1/(1-e)))/(sum(W*X)/sum(W))}
return(A)
}
#' @title Generalized entropy index
#'
#' @description Computes generalized entropy index of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param parameter is a entropy parameter
#'
#'
#' @return The value of generalized entropy index
#'
#' @rdname Entropy
#'
#' @details Entropy coefficient with respect to parameter \eqn{\alpha} is equal to Theil_L(X,W) whenever \eqn{\alpha=0},
#' is equal to Theil_T(X,W) whenever \eqn{\alpha=1}, and whenever \eqn{\alpha \in (0,1)} we have
#' \deqn{GE(\alpha) = \frac{1}{\alpha(\alpha-1)W}\sum_{i=1}^{n}w_{i}\left(\left(\frac{x_{i}}{\mu}\right)^\alpha-1\right)}
#' where \eqn{W} is a sum of weights and \eqn{\mu} is the arithmetic mean of \eqn{x_{1},...,x_{n}}.
#'
#' @references Shorrocks A. F.: (1980) The Class of Additively Decomposable Inequality Measures. Econometrica
#' @references Pielou E.C.: (1966) The measurement of diversity in different types of biological collections. Journal of Theoretical Biology
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Entropy(X)
#' Entropy(X,W)
#'
#' data(Tourism)
#' # Generalized entropy index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Entropy(X,W)
#'
#'
#' @export
Entropy=function (X,W=rep(1,length(X)), parameter = 0.5)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
if (parameter == 0)
e <- Theil_L(X,W)
else if (parameter == 1)
e <- Theil_T(X,W)
else {
k <- parameter
e <- W*(X/(sum(W*X)/sum(W)))^k
e <- sum(e - 1)/(k * (k - 1))/sum(W)
}
e
}
#' @title Kolm index
#'
#' @description Computes Kolm inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param parameter is a Kolm parameter
#' @param scale method of data scaling (None, Normalization, Unitarization, Standardization)
#'
#' @importFrom stats na.omit
#'
#' @return The value of Kolm coefficient.
#'
#' @rdname Kolm
#'
#' @details Kolm index with parameter \eqn{\alpha} is defined as:
#' \deqn{K = \frac{1}{ \alpha} (log( \sum_{i=1}^n \exp(\alpha (w_{i} - \mu)) - log(n)))}
#'
#' Kolm index is scale-dependent. Basic normalization methods can be applied before final computation.
#'
#' @references Kolm S. C.: (1976) Unequal inequalities I and II
#' @references Kolm S. C.: (1996) Intermediate measures of inequality
#' @references Chakravarty S. R.: (2009) Inequality, Polarization and Poverty e-ISBN 978-0-387-79253-8
#'
#'
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Kolm(X)
#' Kolm(X,W)
#'
#' # Compare raw and standardized data.
#' Kolm(X,W)
#' Kolm(X,W, scale ="Standardization")
#'
#' # Changing units has an impact on the final result
#' Kolm(X)
#' Kolm(10*X)
#'
#' # Changing units has no impact on the final result with standardized data
#' Kolm(X,scale ="Standardization")
#' Kolm(10*X,scale ="Standardization")
#'
#' @export
Kolm=function (X,W=rep(1,length(X)), parameter = 1, scale = "None")
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
if(scale=="Standardization"){X=(X-mean(X))/sd(X)}
if(scale=="Unitarization"){X=(X-min(X))/(max(X)-min(X))}
if(scale=="Normalization"){X=X/sqrt(sum(X^2))}
W <- W/sum(W)
KM <- parameter * (sum(W*X) - X)
KM <- sum(W*(exp(KM)))
KM <- (1/parameter) * log(KM)
return(KM)
}
#' @title Ricci and Schutz index
#'
#' @description Computes Ricci and Schutz inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#'
#' @importFrom stats na.omit
#'
#' @return The value of Ricci and Schutz coefficient.
#'
#' @rdname RicciSchutz
#'
#' @details In the case of an empirical distribution with n elements where y_{i} denotes the wealth of household i and \eqn{\overline{y}} the sample average, the Ricci and Schutz coefficient can be expressed as:
#' \deqn{RS = \frac{1}{2n} \sum_{i=1}^{n} \frac{\mid y_{i} - \overline{y} \mid}{\overline{y}}}
#'
#'
#' @references Coulter P. B.: (1989) Measuring Inequality ISBN 0-8133-7726-9
#' @references Eliazar I. I., Sokolov I. M.: (2010) Measuring statistical heterogeneity: The Pietra index
#' @references Costa R. N., Pérez-Duarte S.: (2019) Not all inequality measures were created equal, Statistics Paper Series, No 31
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' RicciSchutz(X)
#' RicciSchutz(X,W)
#'
#' data(Tourism)
#' #Ricci and Schutz index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' RicciSchutz(X,W)
#'
#'
#' @export
RicciSchutz=function (X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
d <- abs(X - (sum(W*X)/sum(W)))
d <- (sum(W*d)/sum(W))/(2 * (sum(W*X)/sum(W)))
return(d)
}
#' @title Coefficient of Variation
#'
#' @description Computes Coefficient of Variation inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param square logical, argument of the function CoefVar, for details see below
#'
#' @importFrom stats na.omit
#'
#' @return The value of CoefVar coefficient.
#'
#' @rdname CoefVar
#'
#' @details Coefficient of variation is given by:
#' \deqn{CV= \frac{\sigma}{\mu}\times 100}
#' where \eqn{\sigma} is a standard deviation and \eqn{\mu} is arithmetic mean.
#'
#'
#' @references Sheret M.: (1984) Social Indicators Research, An International and Interdisciplinary Journal for Quality-of-Life Measurement, Vol. 15, No. 3, Oct. ISSN 03038300
#' @references Coulter P. B.: (1989) Measuring Inequality ISBN 0-8133-7726-9
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' CoefVar(X)
#' CoefVar(X,W)
#'
#' data(Tourism)
#' #Coefficient of variation for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' CoefVar(X,W)
#'
#'
#' @export
CoefVar=function (X,W=rep(1,length(X)), square = FALSE)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
w.m <- sum(W*X)/sum(W)
V <- sqrt(sum(W*(X-w.m)^2)/sum(W))/w.m
if(square){return(V^2)}else{return(V)}
}
#' @title Gini coefficient
#'
#' @description Computes Gini coefficient of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#'
#' @return The value of Gini coefficient.
#'
#' @rdname Gini
#'
#' @details Gini coefficient is given by:
#' \deqn{G = \frac{ \sum_{i=1}^n \sum_{j=1}^n \mid x_{i} - x_{j} \mid}{2n^{2} \overline{x}}}
#'
#' @references Dixon P. M., Weiner, J., Mitchell-Olds, T., and Woodley, R.: (1987) Bootstrapping the Gini Coefficient of Inequality. Ecology , Volume 68 (5)
#' @references Firebaugh G.: (1999) Empirics of World Income Inequality, American Journal of Sociology
#' @references Deininger K.; Squire L.: (1996) A New Data Set Measuring Income Inequality, The World Bank Economic Review, Vol. 10, No. 3
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Gini(X)
#' Gini(X,W)
#'
#' data(Tourism)
#' #Gini coefficient for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Gini(X,W)
#'
#'
#' @export
Gini=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
G=sum(abs(matrix(X,length(X),length(X),TRUE)-matrix(X,length(X),length(X),FALSE))*matrix(W,length(W),length(W),TRUE)*matrix(W,length(W),length(W),FALSE)/(2*sum(W)^2*(sum(W*X)/sum(W))))
return(G)
}
#' @title Hoover index
#'
#' @description Computes Hoover inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#'
#' @return The value of Hoover coefficient.
#'
#' @rdname Hoover
#'
#' @details Let x_{i} be the income of the i-th person and \eqn{\overline{x}} be the mean income. Then the Hoover index H is:
#' \deqn{H={\frac {1}{2}}{\frac {\sum_{i}|x_{i}-{\overline{x}}|}{\sum_{i}x_{i}}}}
#'
#'
#' @references Hoover E. M. Jr.: (1936) The Measurement of Industrial Localization, The Review of Economics and Statistics, 18
#' @references Hoover E. M. Jr.: (1984) An Introduction to Regional Economics, ISBN 0-07-554440-7
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Hoover(X)
#' Hoover(X,W)
#'
#' data(Tourism)
#' #Hoover index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Hoover(X,W)
#'
#'
#' @export
Hoover=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
H=(1/2)*(sum(W*abs(X - (sum(W*X)/sum(W))))/sum(X*W))
return(H)
}
#' @title Leti index
#'
#' @description Computes Leti inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector (ordered factor or numeric)
#' @param W is a vector of weights
#' @param norm (logical). If TRUE (default) then Leti index is divided by a maximum possible value which is \eqn{(k-1)/2} where \eqn{k} in a number of categories.
#'
#' @importFrom stats aggregate
#'
#' @return The value of Leti coefficient.
#'
#' @rdname Leti
#'
#' @details Let \eqn{n_{i}} be the number of individuals in category \eqn{i} and let \eqn{N} be the total sample size.
#' Cumulative distribution is given by \eqn{F_{i} = \frac{\sum_{j=1}^{i} n_{j}}{N}}. Leti index is defined as:
#' \deqn{L =2 \sum_{i=1}^{k-1} F_{i}(1-F_{i})}
#'
#' @references Leti G.: (1983). Statistica descrittiva, il Mulino, Bologna. ISBN: 8-8150-0278-2
#'
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Leti(X)
#' Leti(X,W)
#'
#' data(Tourism)
#' #Leti index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Leti(X,W)
#'
#'
#' @export
Leti=function(X,W=rep(1,length(X)),norm=T)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
if(length(unique(X))==1)return(0)
tab=aggregate(W,by=list(X),FUN=sum)
Fx=(cumsum(tab$x)/sum(tab$x))
Leti=2*sum(Fx*(1-Fx))
if(norm){Leti=Leti*2/(length(Fx)-1)}
return(Leti)
}
#' @title Jenkins, Cowell and Flachaire
#'
#' @description Computes Jenkins as well as Cowell and Flachaire inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param alfa is the Jenkins coefficient parameter
#'
#' @importFrom stats aggregate
#'
#' @return The value of Jenkins, Cowell and Flachaire coefficient.
#'
#' @rdname Jenkins
#'
#' @details Jenkins coefficient is given by:
#' \deqn{J=1-\sum_{j=0}^{K-1} (p_{j+1}-p_{j})(GL_{j}+GL_{j+1})}
#'
#' @details where GL is Generalized Lorenz curve.
#'
#' @details Cowell and Flachaire coefficient with alpha parameter is given by:
#' \deqn{I(\alpha)=\frac{1}{\alpha(\alpha-1)}(\frac{1}{N}\sum_{i=1}^{N}s_{i}^{\alpha}-1)}
#'
#' @details for \eqn{\alpha \in (0,1)}, and
#' \deqn{I(0)=-\frac{1}{N}\sum_{i=1}^{N} log(s_{i})}
#'
#' @details for \eqn{\alpha = 0}.
#'
#'
#'
#' @references Jenkins S. P. and P. J. Lambert: (1997) Three ‘I’s of Poverty Curves, with an Analysis of U.K. Poverty Trends
#' @references Cowell F. A.: (2000) Measurement of Inequality, Handbook of Income Distribution
#' @references Cowell F. A., Flachaire E.: (2017) Inequality with Ordinal Data
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Jenkins(X)
#' Jenkins(X,W)
#'
#' data(Tourism)
#' #Jenkins, Cowell and Flachaire coefficients for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Jenkins(X,W)
#'
#'
#' @export
Jenkins=function(X,W=rep(1,length(X)), alfa=0.8)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
tab=aggregate(W,by=list(X),FUN=sum)
yy=as.data.frame(tab)
yy$fk=yy$x/sum(yy$x) #PDF
yy$Fk=cumsum(yy$x)/sum(yy$x) #CDF
df=as.data.frame(tab$Group.1)
df = merge(df,yy[,c("Group.1","fk","Fk")],by.y="Group.1",by.x = "tab$Group.1",all.x = TRUE)
if(alfa!=0)
{Cowell_and_Flachaire=1/(alfa*(alfa-1))*(1/nrow(df)*sum((df$Fk)^alfa)-1)
}else{
Cowell_and_Flachaire=-1/nrow(df)*sum(log(df$Fk))}
P_I=seq(1:nrow(df))/nrow(df)
GL=1/nrow(df)*cumsum(df$Fk)
df$P_I=P_I
df$GL=GL
GL_J=aggregate(df$GL,by=list(df$`tab$Group.1`),FUN=max)
P_I_J=aggregate(df$P_I,by=list(df$`tab$Group.1`),FUN=max)
JJ=rbind(c(0,0,0),cbind(GL_J,P_I_J$x))
colnames(JJ)=c("Group","GL_J","PI_J")
J=1-sum((JJ$PI_J[2:nrow(JJ)]-JJ$PI_J[1:(nrow(JJ)-1)])*(JJ$GL_J[1:(nrow(JJ)-1)]+JJ$GL_J[2:nrow(JJ)]))
wynik=matrix(c(J,Cowell_and_Flachaire),1,2)
colnames(wynik)=c("Jenkins","Cowell_and_Flachaire")
return(wynik)
}
#' @title Palma index
#'
#' @description Palma proportion - originally the ratio of the total income of the 10% richest people to the 40% poorest people.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#'
#' @importFrom stats aggregate
#'
#' @return The value of Palma coefficient.
#'
#' @rdname Palma
#'
#' @details Palma index is calculated by the following formula:
#' \deqn{Palma =\frac{H}{L}}
#' where \eqn{H} is share of 10% of the highest values,
#' \eqn{L} is share of 40% of the lowest values.
#'
#' @references Cobham A., Sumner A.: (2013) Putting the Gini Back in the Bottle? 'The Palma' as a Policy-Relevant Measure of Inequality
#' @references Palma J. G.: (2011) Homogeneous middles vs. heterogeneous tails, and the end of the ‘Inverted-U’: the share of the rich is what it’s all about
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Palma(X)
#' Palma(X,W)
#'
#' data(Tourism)
#' #Palma index for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Palma(X,W)
#'
#'
#'
#' @export
Palma=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(1)
W=W[order(X)];X=X[order(X)]
Fx=(cumsum(W)/sum(W))
nominator=sum(X[which(Fx>=0.9)])
denominator=ifelse(min(Fx)<=0.4,sum(X[which(Fx<=0.4)]),X[1])
# tab=aggregate(W,by=list(X),FUN=sum)
# Fx=(cumsum(tab$x)/sum(tab$x))
# SW=cumsum(tab$x)
# a=which(Fx<0.4)
# if(length(a)!=0){A=max(a)
# b=floor(sum(tab$x)*0.4)-SW[max(a)]}else{A=0
# b=floor(sum(tab$x)*0.4)}
# c=which(Fx<0.9)
# if(length(c)!=0){C=max(c)
# d=tab$x[max(c)+1]-(floor(sum(tab$x)*0.9)-SW[max(c)])}else{C=0
# d=tab$x[1]-(floor(sum(tab$x)*0.9))}
# Palma=(sum(tab$Group.1[-c(c,C+1)]*tab$x[-c(c,C+1)])+tab$Group.1[C+1]*d)/(sum(tab$Group.1[a]*tab$x[a])+tab$Group.1[A+1]*b)
Palma=nominator/denominator
return(Palma)
}
#' @title Proportion 20:20
#'
#' @description 20:20 ratio - originally the ratio of the total income of the 20% richest people to the 20% poorest people.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#'
#' @importFrom stats aggregate
#'
#' @return The value of 20:20 ratio coefficient.
#'
#' @rdname Prop20_20
#'
#' @details 20:20 ratio is calculated as follows:
#' \deqn{Prop =\frac{H}{L}}
#' where \eqn{H} is share of 20% of the highest values,
#' \eqn{L} is share of 20% of the lowest values.
#'
#'
#' @references Panel Data Econometrics: Theoretical Contributions And Empirical Applications edited by Badi Hani Baltag
#' @references Notes on Statistical Sources and Methods - The Equality Trust.
#'
#'
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Prop20_20(X)
#' Prop20_20(X,W)
#'
#' data(Tourism)
#' #Prop20_20 proportion for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Prop20_20(X,W)
#'
#'
#' @export
Prop20_20=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
if(length(unique(X))==1)return(0)
W=W[order(X)];X=X[order(X)]
Fx=(cumsum(W)/sum(W))
nominator=sum(X[which(Fx>=0.8)])
denominator=ifelse(min(Fx)<=0.2,sum(X[which(Fx<=0.2)]),X[1])
# W=aggregate(W,by=list(X),FUN=sum)
# Fx=(cumsum(tab$x)/sum(tab$x))
# SW=cumsum(tab$x)
# a=which(Fx<0.2)
# a=which(Fx<0.2)
# if(length(a)!=0){A=max(a)
# b=floor(sum(tab$x)*0.2)-SW[max(a)]}else{A=0
# b=floor(sum(tab$x)*0.2)}
# c=which(Fx<0.8)
# if(length(c)!=0){C=max(c)
# d=tab$x[max(c)+1]-(floor(sum(tab$x)*0.8)-SW[max(c)])}else{C=0
# d=tab$x[1]-(floor(sum(tab$x)*0.8))}
# Prop20_20=(sum(tab$Group.1[-c(c,C+1)]*tab$x[-c(c,C+1)])+tab$Group.1[C+1]*d)/(sum(tab$Group.1[a]*tab$x[a])+tab$Group.1[A+1]*b)
Prop20_20=nominator/denominator
return(Prop20_20)
}
#' @title Theil L
#'
#' @description Computes Theil_L inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#'
#' @return The value of Theil_L coefficient.
#'
#' @rdname Theil_L
#'
#' @details Theil L index is defined as:
#' \deqn{T_{L} = T_{\alpha=0} = \frac{1}{N} \sum_{i=1}^N ln \big(\frac{\mu }{x_{i}} \big)}
#' where \deqn{\mu = \frac{1}{N} \sum_{i=1}^N x_{i}}
#'
#' @references Serebrenik A., van den Brand M.: Theil index for aggregation of software metrics values. 26th IEEE International Conference on Software Maintenance. IEEE Computer Society.
#' @references Conceição P., Ferreira P.: (2000) The Young Person’s Guide to the Theil Index: Suggesting Intuitive Interpretations and Exploring Analytical Applications
#' @references OECD: (2020) Regions and Cities at a Glance 2020, Chapter: Indexes and estimation techniques
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Theil_L(X)
#' Theil_L(X,W)
#'
#' data(Tourism)
#' # Theil L coefficient for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Theil_L(X,W)
#'
#'
#'
#' @export
Theil_L=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
return(1/sum(W)*sum(W*log((sum(W*X)/sum(W))/X)))
}
#' @title Theil T
#'
#' @description Computes `Theil_T` inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#'
#' @return The value of `Theil_T` coefficient.
#'
#' @rdname Theil_T
#'
#' @details Theil T index is defined as:
#' \deqn{T_{T} = T_{\alpha=1} = \frac{1}{N} \sum_{i=1}^N \frac{ x_{i} }{\mu} ln \big( \frac{ x_{i} }{\mu} \big)}
#' where \deqn{\mu = \frac{1}{N} \sum_{i=1}^N x_{i}}
#'
#' @references Serebrenik A., van den Brand M.: Theil index for aggregation of software metrics values. 26th IEEE International Conference on Software Maintenance. IEEE Computer Society.
#' @references Conceição P., Ferreira P.: (2000) The Young Person’s Guide to the Theil Index: Suggesting Intuitive Interpretations and Exploring Analytical Applications
#' @references OECD: (2020) Regions and Cities at a Glance 2020, Chapter: Indexes and estimation techniques
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Theil_T(X)
#' Theil_T(X,W)
#'
#' data(Tourism)
#' # Theil T coefficient for Total expenditure with sample weights
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' Theil_T(X,W)
#'
#'
#'
#' @export
Theil_T=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) | !is.numeric(W))return('X and W must be numeric')
return(1/sum(W)*sum(W*(X/(sum(W*X)/sum(W))*log(X/(sum(W*X)/sum(W))))))
}
#' @title Abul Naga and Yalcin index
#'
#' @description Computes Abul Naga and Yalcin inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#' @param a is a positive parameter. See more in details
#' @param b is a positive parameter. See more in details
#'
#' @importFrom stats aggregate
#'
#' @return The value of Abul Naga and Yalcin coefficient.
#'
#' @rdname Abul_Naga_and_Yalcin
#'
#' @details Let \eqn{m} be the median category, \eqn{n} be the number of categories and \eqn{P_{i}} be the cumulative distribution of \eqn{i}-th category.
#' The following index with respect to the parameters a and b was proposed by Abul Naga and Yalcin (2008):
#' \deqn{I=\frac{a\sum_{i<m}^{n}P_{i}-b\sum_{i\geq m}^{n}P_{i}+b(n+1-m)}{0.5(a(m-1)+b(n-m))}}
#'
#' @references Ramses H. Abul Naga and Tarik Yalcin: (2008) Inequality Measurement for ordered response health data, Journal of Health Economics 27(6);
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' AN_Y(X)
#' AN_Y(X,W)
#'
#' data(Well_being)
#' # Abul Naga and Yalcin index for health assessment with sample weights
#' X=Well_being$V1
#' W=Well_being$Weight
#' AN_Y(X,W)
#'
#'
#' @export
AN_Y=function(X,W=rep(1,length(X)),a=1,b=1)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
if(length(unique(X))==1)return(AB_Y=0)
tab=aggregate(W,by=list(X),FUN=sum)
#dystrybuanta
Fx=(cumsum(tab$x)/sum(tab$x))
#mediana
m_b=max(c(1,which(Fx<0.5)),na.rm=T) #ponizej mediany
m=min(which(Fx>=0.5))
sum_bm=sum(Fx[1:m_b])
sum_am=sum(Fx)-sum(Fx[1:m_b])
n=nrow(tab)
Abul_Naga_Yelcin=(a*sum_bm-b*sum_am+b*(n+1-m))/(((a*(m-1))+b*(n-m))/2)
return(Abul_Naga_Yelcin)
}
#' @title Apouey index
#'
#' @description Computes Apouey inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#' @param a is a positive parameter. See more in details
#' @param b is a real parameter. See more in details
#'
#' @importFrom stats aggregate
#'
#' @return The value of Apouey coefficient.
#'
#' @rdname Apouey
#'
#' @details Let \eqn{m} be the median category, \eqn{n} will be the number of categories and \eqn{P_i} be the cumulative distribution of \eqn{i}-th category. The following index was proposed by Apouey (2007):
#' \deqn{I = \alpha(\sum_{i\geq m}^{n}P_{i}-\sum_{i<m}^{n}P_{i}+m-\frac{n}{2}-1)+\beta}
#' where \eqn{\alpha} and \eqn{\beta} are given parameters with default values \eqn{\alpha=\frac{2}{1-n}} and \eqn{\beta=\frac{n}{n-1}}.
#' @references Apouey B.: (2007) Measuring health polarization with self-assessed health data, Health Economics 16; 875-894.
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' Apouey(X,a=2,b=2)
#' Apouey(X,W,a=2,b=2)
#'
#' data(Well_being)
#' # Apouey index for health assessment with sample weights
#' X=Well_being$V1
#' W=Well_being$Weight
#' Apouey(X,W,a=2,b=2)
#'
#'
#' @export
Apouey=function(X,W=rep(1,length(X)),a=2/(1-length(W[!is.na(W) & !is.na(X)])),b=length(W[!is.na(W) & !is.na(X)])/(length(W[!is.na(W) & !is.na(X)])-1))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
if(length(unique(X))==1)return(Apouey=0)
tab=aggregate(W,by=list(X),FUN=sum)
Fx=(cumsum(tab$x)/sum(tab$x))
m_b=max(c(1,which(Fx<0.5)),na.rm=T)
m=min(which(Fx>=0.5))
sum_bm=sum(Fx[1:m_b])
sum_am=sum(Fx)-sum(Fx[1:m_b])
n=nrow(tab)
Apouey=a*(sum_am-sum_bm+m-(n/2)-1)+b
return(Apouey)
}
#' @title Blair and Lacy index
#'
#' @description Computes Blair and Lacy inequality measure of a given variable taking into account weights.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#' @param withsqrt if TRUE function returns index given by BL2, elsewhere by BL (default). See more in details.
#'
#' @importFrom stats aggregate
#'
#' @return The value of Blair and Lacy coefficient.
#'
#' @rdname Blair_Lacy
#'
#' @details Let \eqn{m} be the median category, \eqn{n} be the number of categories and \eqn{P_i} be the cumulative distribution of \eqn{i}-th category.
#' The indices of Blair and Lacy (2000) are the following:
#' \deqn{BL = 1-\frac{\sum_{i=1}^{n-1}(P_{i}-0.5)^2}{\frac{n-1}{4}}}
#' \deqn{BL2 = 1-\left(\frac{\sum_{i=1}^{n-1}(P_{i}-0.5)^2}{\frac{n-1}{4}}\right)^{\frac{1}{2}}}
#'
#' @references Blair J, Lacy M G. (2000): Statistics of ordinal variation, Sociological Methods and Research 28(251);251-280.
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=1:10
#' W=1:10
#' BL(X)
#' BL(X,W)
#'
#' data(Well_being)
#' # Blair and Lacy index for health assessment with sample weights
#' X=Well_being$V1
#' W=Well_being$Weight
#' BL(X,W)
#'
#'
#' @export
#Blair and Lacy Index
BL=function(X,W=rep(1,length(X)),withsqrt=FALSE)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
if(length(unique(X))==1)return(BL=0)
tab=aggregate(W,by=list(X),FUN=sum)
Fx=(cumsum(tab$x)/sum(tab$x))
n=nrow(tab)
BL=1-(sum(Fx[1:(n-1)])-0.5)^2/(n-1)/4
BL2=1-sqrt((sum(Fx[1:(n-1)])-0.5)^2/(n-1)/4)
if(withsqrt){return(BL2)}else{return(BL)}
}
#' @title Median of ordered factor or numeric
#'
#' @description Computes median of ordered factor or numeric variable taking into account weights.
#'
#' @param X is a data vector (numeric or ordered factor)
#' @param W is a vector of weights
#'
#' @importFrom stats aggregate
#'
#' @return The median category (number or label) of ordered factor.
#'
#' @rdname medianf
#'
#' @details Calculates median based on cumulative distribution. Tailored for ordered factors.
#'
#'
#' @examples
#' # Compare weighted and unweighted result
#' X=factor(c('H','H','M','M','L','L'),levels = c('L','M','H'),ordered = TRUE)
#' W=c(2,2,3,3,8,8)
#' medianf(X)
#' medianf(X,W)
#'
#'
#'
#' @export
#Median category
medianf=function(X,W=rep(1,length(X)))
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
tab=aggregate(W,by=list(X),FUN=sum)
Fx=(cumsum(tab$x)/sum(tab$x))
m=min(which(Fx>=0.5))
return(medianf=tab$Group.1[m])
}
#' @title Sample survey on trips
#' @description Data from sample survey on trips conducted in Polish households.
#' @docType data
#' @keywords datasets
#' @name Tourism
#' @usage data(Tourism)
#' @format A data frame with 384 observations of 14 variables
#'\itemize{
#' \item Year
#' \item Group of countries
#' \item Purpose of trip
#' \item Accommodation type
#' \item Total expenditure
#' \item Expenditure for organiser
#' \item Private expenditure
#' \item Expenditure on accommodation
#' \item Expenditure on restaurants & café
#' \item Expenditure on transport
#' \item Expenditure on commodities
#' \item Number of trip's participants
#' \item Nights spent
#' \item Sample weight
#'}
#' @details Answers were modified due to disclosure control. Data presents only part of full database.
NULL
#' @title Sample survey on quality of life
#' @description Data from sample survey on quality of life conducted on Polish-Ukrainian border in 2015 and 2019.
#' @docType data
#' @keywords datasets
#' @name Well_being
#' @usage data(Well_being)
#' @format A data frame with 1197 observations of 27 variables
#'\itemize{
#' \item Area. Rural and urban
#' \item Gender. Male and female
#' \item Year. Year of survey (2015 and 2019)
#' \item V1. I have good opportunities to use my talents and skills at work
#' \item V2. I am treated with respect by others at work
#' \item V3. I have adequate opportunities for vacations or leisure activities
#' \item V4. The quality of local services where (I) live is good
#' \item V5. There is very little pollution from cars or other sources where I spend most of my time
#' \item V6. There are parks and green areas near my residence
#' \item V7. I have the freedom to plan my life the way I want to
#' \item V8. I feel safe walking around my neighborhood during the day
#' \item V9. Overall, to what extent are you currently satisfied with your life
#' \item V10. Overall, to what extent do you feel that the things you do in life are worthwhile
#' \item V11. How do you rate your health
#' \item V12. How do you rate your work
#' \item V13. How do you rate your sleep
#' \item V14. How do you rate your leisure time
#' \item V15. How do you rate your family life
#' \item V16. How do you rate your community and public affairs life
#' \item V17. How do you rate your personal plans
#' \item V18. How do you rate your housing conditions
#' \item V19. How do you rate your personal income
#' \item V20. How do you rate your personal prospects
#' \item V21. Does being part of the local community make you feel good about yourself
#' \item V22. Do you have a say in what the local community is like
#' \item V23. Is your neighborhood a good place for you to live
#' \item Weight. Sample weight for each household
#'}
#' @details Questions are on Likert scale: 1 - the worst assessment, 5 - the best assessment.
#' Only 23 question were selected out of over 100 questions.
#' Answers were modified due to disclosure control.
NULL
|
/scratch/gouwar.j/cran-all/cranData/wINEQ/R/inequalityMeasure.R
|
#' @title Weighted inequality measures
#'
#' @description Calculates weighted mean and sum of X (or median of X), and a set of relevant inequality measures.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param AF.norm (logical). If TRUE (default) then index is divided by its maximum possible value
#' @param Atkinson.e is a parameter for Atkinson coefficient
#' @param Jenkins.alfa is a parameter for Jenkins coefficient
#' @param Entropy.e is a generalized entropy index parameter
#' @param Kolm.p is a parameter for Kolm index
#' @param Kolm.scale method of data standardization before computing
#' @param Leti.norm (logical). If TRUE (default) then Leti index is divided by a maximum possible value
#' @param AN_Y.a is a positive parameter for Abul Naga and Yalcin inequality measure
#' @param AN_Y.b is a parameter for Abul Naga and Yalcin inequality measure
#' @param Apouey.a is a parameter for Apouey inequality measure
#' @param Apouey.b is a parameter for Apouey inequality measure
#' @param BL.withsqrt if TRUE function returns index given by BL2, elsewhere by BL (default). See more in details of BL function.
#'
#' @importFrom dplyr %>%
#' @importFrom dplyr summarise
#'
#' @return The data frame with weighted mean and sum of X, and all inequality measures relevant for a numeric data.
#' In a case of an ordered factor, the data frame with median of X, and all relevant inequality measures.
#'
#' @rdname ineq_weighted
#'
#' @details Function checks if X is a numeric or an ordered factor. Then it calculates all appropriate inequality measures.
#'
#' @examples
#' # Compare weighted and unweighted result.
#' X=1:10
#' W=1:10
#' ineq.weighted(X)
#' ineq.weighted(X,W)
#'
#'
#' data(Tourism)
#' # Results for Total expenditure with sample weights:
#' X=Tourism$`Total expenditure`
#' W=Tourism$`Sample weight`
#' ineq.weighted(X)
#' ineq.weighted(X,W)
#'
#' @export
ineq.weighted=function(
X,
W=rep(1,length(X)),
AF.norm=TRUE,
Atkinson.e=1,
Jenkins.alfa=0.8,
Entropy.e=0.5,
Kolm.p=1,
Kolm.scale='Standardization',
Leti.norm=T,
AN_Y.a=1,
AN_Y.b=1,
Apouey.a=2/(1-length(W[!is.na(W) & !is.na(X)])),
Apouey.b=length(W[!is.na(W) & !is.na(X)])/(length(W[!is.na(W) & !is.na(X)])-1),
BL.withsqrt=FALSE
)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
n=length(X)
data=data.frame(X,W)
if(is.ordered(X))
{
result <- data %>%
summarise(
Median = medianf(X,W),
Allison_Foster=AF(X,W,norm = AF.norm),
Leti=Leti(X,W,norm = Leti.norm),
Abul_Naga_Yalcin=AN_Y(X,W,a = AN_Y.a,b = AN_Y.b),
Apouey=Apouey(X,W,a = Apouey.a,b = Apouey.b),
Blair_Lacy=BL(X,W,withsqrt = BL.withsqrt)
) %>% as.data.frame()
}
if(is.numeric(X))
{
if(min(X)==0){X=X+0.001*min(X[X>0])}
result <- data %>%
summarise(
Mean = sum(W*X)/sum(W),
Total = sum(W*X),
Theil_L= Theil_L(X,W),
Theil_T=Theil_T(X,W),
Hoover=Hoover(X,W),
Gini=Gini(X,W),
Atkinson=Atkinson(X,W,Atkinson.e),
Kolm=Kolm(X,W,scale = Kolm.scale,parameter = Kolm.p),
Entropy=Entropy(X,W,parameter = Entropy.e),
CoefVar=CoefVar(X,W),
RicciSchutz=RicciSchutz(X,W),
Prop20_20=Prop20_20(X,W),
Palma=Palma(X,W),
Jenkins=Jenkins(X,W,Jenkins.alfa)[,1],
Cowell_and_Flachaire=Jenkins(X,W,Jenkins.alfa)[,2]
) %>% as.data.frame()
}
return(result)
}
#' @title Weighted inequality measures with bootstrap
#'
#' @description For weighted mean and weighted total of X (or median of X) as well as for each relevant inequality measure, returns outputs from ineq.weighted and bootstrap outcomes: expected value, bias (in %), standard deviation, coefficient of variation, lower and upper bound of confidence interval.
#'
#' @param X is a data vector
#' @param W is a vector of weights
#' @param B is a number of bootstrap samples.
#' @param AF.norm (logical). If TRUE (default) then index is divided by its maximum possible value
#' @param Atkinson.e is a parameter for Atkinson coefficient
#' @param Jenkins.alfa is a parameter for Jenkins coefficient
#' @param Entropy.e is a generalized entropy index parameter
#' @param Kolm.p is a parameter for Kolm index
#' @param Kolm.scale method of data standardization before computing
#' @param Leti.norm (logical). If TRUE (default) then Leti index is divided by a maximum possible value
#' @param AN_Y.a is a positive parameter for Abul Naga and Yalcin inequality measure
#' @param AN_Y.b is a parameter for Abul Naga and Yalcin inequality measure
#' @param Apouey.a is a parameter for Apouey inequality measure
#' @param Apouey.b is a parameter for Apouey inequality measure
#' @param BL.withsqrt if TRUE function returns index given by BL2, elsewhere by BL (default). See more in details of BL function.
#' @param keepSamples if TRUE, it returns bootstrap samples of data (Xb) and weights (Wb)
#' @param keepMeasures if TRUE, it returns values of all inequality measures for each bootstrap sample
#' @param conf.alpha significance level for confidence interval
#' @param calib.boot if FALSE, then naive bootstrap is performed, calibrated bootstrap elsewhere
#' @param Xs matrix of calibration variables. By default it is a vector of 1's, applied if calib.boot is TRUE
#' @param total vector of population totals. By default it is a sum of weights, applied if calib.boot is TRUE
#' @param calib.method weights' calibration method for function calib (sampling)
#' @param bounds vector of bounds for the g-weights used in the truncated and logit methods; 'low' is the smallest value and 'upp' is the largest value
#'
#' @importFrom stats sd
#' @importFrom stats quantile
#' @importFrom sampling calib
#'
#' @return This functions returns a data frame from ineq.weighted extended with bootstrap results: expected value, bias (in %), standard deviation, coefficient of variation, lower and upper bound of confidence interval.
#' If keepSamples=TRUE or keepMeasures==TRUE then the output becomes a list. If keepSamples=TRUE, the functions returns Xb and Wb, which are the samples of vector data and the samples of weights, respectively.
#' If keepMeasures==TRUE, the functions returns Mb, which is a set of inequality measures from bootstrapping.
#'
#' @rdname ineq_weighted_boot
#'
#' @details By default, naive bootstrap is performed, that is no weights calibration is conducted.
#' You can choose calibrated bootstrap to calibrate weights with respect to provided variables (Xs) and totals (total).
#' Confidence interval is simply derived with quantile of order \eqn{\alpha} and \eqn{1-\alpha} where \eqn{\alpha} is a significance level for confidence interval.
#'
#' @examples
#' # Inequality measures with additional statistics for numeric variable
#' X=1:10
#' W=1:10
#' ineq.weighted.boot(X,W,B=10)
#'
#' # Inequality measures with additional statistics for ordered factor variable
#' X=factor(c('H','H','M','M','L','L'),levels = c('L','M','H'),ordered = TRUE)
#' W=c(2,2,3,3,8,8)
#' ineq.weighted.boot(X,W,B=10)
#'
#' @export
ineq.weighted.boot=function(X,
W=rep(1,length(X)),
B=100,
AF.norm=TRUE,
Atkinson.e=1,
Jenkins.alfa=0.8,
Entropy.e=0.5,
Kolm.p=1,
Kolm.scale='Standardization',
Leti.norm=T,
AN_Y.a=1,
AN_Y.b=1,
Apouey.a=2/(1-length(W[!is.na(W) & !is.na(X)])),
Apouey.b=length(W[!is.na(W) & !is.na(X)])/(length(W[!is.na(W) & !is.na(X)])-1),
BL.withsqrt=FALSE,
keepSamples=FALSE,
keepMeasures=FALSE,
conf.alpha=0.05,
calib.boot=FALSE,
Xs=rep(1,length(X)),
total=sum(W),
calib.method='truncated',
bounds=c(low=0,upp=10)
)
{
ind=which(!is.na(W) & !is.na(X))
if(length(ind)==0)return('Input with NAs only')
W=W[ind];X=X[ind]
if(!is.numeric(X) & !is.ordered(X))return('X must be numeric or ordered factor')
n=length(X)
Z=sample(1:length(X),size = B*length(X),replace = TRUE,prob = 1/W)
Xb=matrix(X[Z],nrow=length(X),ncol=B)
Wb=matrix(W[Z],nrow=length(W),ncol=B)
if(calib.boot)
{
for(i in 1:B)
{
Wb[,i]=Wb[,i]*calib(Xs = Xs,d = Wb[,i],total = total,method = calib.method,bounds=bounds)
}
}
cols=ifelse(is.ordered(X),6,15)
Mb=matrix(0,nrow=B,ncol=cols) %>% as.data.frame()
medians=vector('character',B)
for(i in 1:B)
{
if(is.ordered(X)){XX=factor(Xb[,i],levels = levels(X),ordered = T)}else{XX=Xb[,i]}
m=unlist(ineq.weighted(XX,
Wb[,i],
AF.norm,
Atkinson.e,
Jenkins.alfa,
Entropy.e,
Kolm.p,
Kolm.scale,
Leti.norm,
AN_Y.a,
AN_Y.b,
Apouey.a,
Apouey.b,
BL.withsqrt))
if(!is.ordered(X)){Mb[i,]=m}else{Mb[i,-1]=m[-1];medians[i]=as.character(medianf(XX,Wb[,i]))}
}
M=ineq.weighted(X,W,
AF.norm,
Atkinson.e,
Jenkins.alfa,
Entropy.e,
Kolm.p,
Kolm.scale,
Leti.norm,
AN_Y.a,
AN_Y.b,
Apouey.a,
Apouey.b,
BL.withsqrt)
if(is.ordered(X)){medianM=M$Median;M$Median=NA}
Mmean=colMeans(Mb)
Msd=apply(Mb,2,sd)
Mcv=Msd/Mmean*100
Mq1=apply(Mb,2,quantile,probs=conf.alpha/2,na.rm=TRUE)
Mq2=apply(Mb,2,quantile,probs=1-conf.alpha/2,na.rm=TRUE)
Statistics.names=c('Index (I)','E(I)','Bias(I) [%]','Sd(I)','CV(I) [%]','Lower Q(I)','Higher Q(I)')
Statistics.values=rbind(M,Mmean,(M/Mmean-1)*100,Msd,Mcv,Mq1,Mq2)
Results=cbind(Statistics.names,Statistics.values)
colnames(Mb)=colnames(M)
if(is.ordered(X))
{
Mb=as.data.frame(Mb)
Mb$Median=medians
Results$Median=factor(NA,levels = levels(X),ordered = TRUE)
Results$Median[1]=medianM
}
if(keepSamples==FALSE & keepMeasures==FALSE){return(Stats=Results)}
if(keepSamples & keepMeasures){return(list(Stats=Results,Measures=Mb,Variables=Xb,Weights=Wb))}
if(keepSamples){return(list(Stats=Results,Variables=Xb,Weights=Wb))}
if(keepMeasures){return(list(Stats=Results,Measures=Mb))}
}
|
/scratch/gouwar.j/cran-all/cranData/wINEQ/R/inequalityMeasureBoot.R
|
#' @title GenomeScan
#'
#' @description A Reference Class implementing a Genome Scan
#'
#' @field Options these are options
#' @field Data Things the user inputs.
#' They have interpretable meaning and define the GenomeScan.
#' Currently: y, X, G, K, weights (inverse variances), and variances.
#' @field Intermediates_per_scan Things the GenomeScan will compute once per scan.
#' They are mathetmatical tools that can't really be interpreted.
#' Currently: L, eigen_L, and LL_null.
#' @field Intermediates_per_locus Things the GenomeScan will compute once per locus.
#' They are mathematical tools that can't really be interpreted.
#' Currently: XG
#' @field Intermediates_per_fit Things the GenomeScan will compute many times per locus (once per trial fit on that locus).
#' These are interpretable but rapidly changing and not guaranteed to be finalized or optimal.
#' Currently: M, LDV, and h2
#' @field Results The results of the GenomeScan.
#' Currently: The h2 that maximizes the LL at each locus and the LR as compared with the no-locus (null) model.
#'
#'
#' @export GenomeScan
#' @exportClass GenomeScan
#'
#' @importFrom methods new
#'
GenomeScan <- setRefClass(
Class = 'GenomeScan',
fields = c('Options',
'Data', # y, X, G, K, and weights --
'Intermediates_per_scan', # L, eigen_L, LL_null, h2_null -- uninterpretable computational tools that are constant for a GenomeScan
'Intermediates_per_locus', # M -- uninterpretable computational tools that are constant for a locus
'Intermediates_per_fit', # sigma_a and sigma_e -- interpretable, but changing very fast, don't look here for results
'Results')) # maximum likelihood LL, sigma_a, and sigma_e for each locus
#' @title initialize
#'
#' @name GenomeScan_initialize
#'
#' @description Initialize a GenomeScan
#'
#' @param y vector of length n - the phenotype of each of n genomes (individuals or strains)
#' @param X matrix of dimension n-by-c - the covariate value of each individual for c covariates
#' @param G a list where each element is of length n - the genotype of each individual at p loci
#' @param K matrix of dimension n-by-n - the covariance of the phenotype
#' @param w matrix of dimension n-by-n - the inverse variance of the phenotype
#'
#' @return an object of class GenomeScan
#'
#' @examples
#' library(wISAM)
#'
#' wgs <- GenomeScan$new(y = phenotype,
#' X = covariate_mat,
#' G = locus_list,
#' K = kinship_mat,
#' w = 1/se_mean_per_strain)
#'
NULL
GenomeScan$methods(
initialize = function(y, X, G, K, w, tol = 1e-8) {
n <- length(y)
# browser()
#### UNACCEPTABLE MISSINGNESS ####
if (missing(y)) { stop('Must provide y (n-vector of phenotypes) to initialize a GenomeScan.') }
if (missing(K)) { stop('No covariance information provided. If none known, use lme4::lmer(). If known, input it as K.') }
#### ACCEPTABLE MISSINGNESS ####
if (missing(w)) { w <- rep(1, n) }
if (missing(X)) { X <- matrix(data = 1, nrow = n) }
if (missing(G)) { G <- list(matrix(data = 0, nrow = n)) }
#### CONDITIONS THAT CAUSE AN ERROR ####
if (!all(c(nrow(X), sapply(X = G, FUN = nrow), dim(K), length(w)) == n)) {
stop("Input dimensions don't match.")
}
# browser()
# deal with NA data and non-positive weights
to_carve <- is.na(y)
to_carve <- to_carve | rowSums(x = is.na(X))
# to_carve <- to_carve | rowSums(x = is.na(G))
to_carve <- to_carve | rowSums(x = is.na(K))
to_carve <- to_carve | is.na(w)
to_carve <- to_carve | w <= 0
if (any(to_carve)) {
message('Removing ', sum(to_carve), ' observations due to NA phenotype, covariate, K, or weight or non-positive weight.')
y <- y[!to_carve]
X <- X[!to_carve,]
G <- lapply(X = G, FUN = function(m) m[!to_carve,])
K <- K[!to_carve, !to_carve]
w <- w[!to_carve]
}
Options <<- list(tol = tol)
Data <<- list(num_obs = n,
num_loci = length(G),
y = y, X = X, G = G, K = K, w = w, v = 1/w)
Results <<- list(LR = NULL, h2 = NULL, beta = NULL)
}
)
#' @title prep_scan
#'
#' @name GenomeScan_prep_scan
#'
#' @description Prepare a GenomeScan for running. Does all the computations that need to be done exactly once per genome scan.
#'
#' @return an object of class GenomeScan
#'
#' @examples
#' library(wISAM)
#'
#' wgs <- GenomeScan$new(y = phenotype,
#' X = covariate_mat,
#' G = locus_list,
#' K = kinship_mat,
#' w = 1/se_mean_per_strain)
#'
#' result <- wgs$prep_scan()
#'
NULL
GenomeScan$methods(
prep_scan = function(silent = FALSE, noreturn = FALSE) {
if (!silent) { message('Preparing GenomeScan...') }
# compute L
# matrix math version
# L <- diag(sqrt(Data$w)) %*% Data$K %*% diag(sqrt(Data$w))
# vector-matrix math version (faster)
L <- t(t(sqrt(Data$w) * Data$K) * sqrt(Data$w))
eigen_L <- eigen(x = L, symmetric = TRUE)
eigen_L$values <- check_eigen_decomposition(eigen_L)
# fit the null model
Intermediates_per_scan <<- list(L = L, eigen_L = eigen_L)
null_fit <- .self$fit_locus(locus_idx = NULL)
Intermediates_per_scan <<- c(Intermediates_per_scan,
LL_null = null_fit$LL,
h2_null = null_fit$h2)
return(.self)
}
)
#' @title conduct_scan
#'
#' @name GenomeScan_conduct_scan
#'
#' @description Conducts the GenomeScan.
#'
#' @note TODO: allow user to specify subset of chromosomes or loci
#'
#' @return an object of class GenomeScan
#'
#' @examples
#' library(wISAM)
#'
#' wgs <- GenomeScan$new(y = phenotype,
#' X = covariate_mat,
#' G = locus_list,
#' K = kinship_mat,
#' w = 1/se_mean_per_strain)
#'
#' result <- wgs$prep_scan()$conduct_scan()
#'
NULL
GenomeScan$methods(
conduct_scan = function(silent = FALSE) {
if (!silent) { message('Conducting GenomeScan...') }
if ('uninitializedField' %in% class(Intermediates_per_scan)) {
.self$prep_scan()
}
for (locus_idx in 1:Data$num_loci) {
fit <- fit_locus(locus_idx = locus_idx)
Results <<- list(LR = replace(x = Results$LR, list = locus_idx, values = 2*(fit$LL - Intermediates_per_scan$LL_null)),
h2 = replace(x = Results$h2, list = locus_idx, values = fit$h2),
beta = replace(x = Results$beta, list = locus_idx, values = fit$beta),
n = replace(x = Results$n, list = locus_idx, values = fit$n))
}
return(.self)
}
)
#' @title fit_locus
#'
#' @name GenomeScan_fit_locus
#'
#' @description Fit one locus of a GenomeScan. Should not typically be called by a user.
#'
#' @return an object of class GenomeScan
#'
NULL
GenomeScan$methods(
fit_locus = function(locus_idx) {
# browser()
if (is.null(locus_idx)) {
G <- NULL
good_idxs <- 1:length(Data$y)
} else {
G <- Data$G[[locus_idx]]
good_idxs <- good_idxs <- !is.na(G)
}
Intermediates_per_locus <<- list(y = Data$y[good_idxs],
XG = cbind(Data$X, G)[good_idxs,],
eigen_L = subset_eigen(l = Intermediates_per_scan$eigen_L,
good_idxs = good_idxs),
v = Data$v[good_idxs],
w = Data$w[good_idxs])
opt <- optimize(f = .self$fit_locus_given_h2,
lower = 0,
upper = 1,
maximum = TRUE,
tol = Options$tol)
# browser()
fit <- lm.fit(x = Intermediates_per_fit$M %*% Intermediates_per_locus$XG,
y = Intermediates_per_fit$M %*% Intermediates_per_locus$y)
return(list(h2 = opt[[1]],
LL = opt[[2]],
beta = coef(fit)[length(coef(fit))],
n = length(Intermediates_per_locus$y)))
}
)
#' @title fit_locus_given_h2
#'
#' @name GenomeScan_fit_locus_given_h2
#'
#' @description Fit one locus at a specified value of h2. Should not typically be called by a user.
#'
#' @return an object of class GenomeScan
#'
NULL
GenomeScan$methods(
fit_locus_given_h2 = function(h2) {
.self$calc_multiplier_eigen(h2 = h2)
fit <- lm.fit(x = Intermediates_per_fit$M %*% Intermediates_per_locus$XG,
y = Intermediates_per_fit$M %*% Intermediates_per_locus$y)
n <- Data$num_obs
sigma2_mle <- sum(fit$residuals^2)/n
loglik_uncorr <- (-0.5 * n * log(2*pi)) - (0.5 * n * log(sigma2_mle)) - (0.5 * n)
loglik <- loglik_uncorr - 0.5 * Intermediates_per_fit$LDV
return(loglik)
}
)
#' @title calc_multiplier_eigen
#'
#' @name GenomeScan_calc_multiplier_eigen
#'
#' @description Compute a multiplier (aka rotation) matrix. Details in in h2lmm_math_RWC.Rmd.
#'
#' @return an object of class GenomeScan
#'
NULL
GenomeScan$methods(
calc_multiplier_eigen = function(h2) {
if (missing(h2)) {
stop('Must provide `h2` to `calc_multiplier_eigen`.')
}
eigen_L <- Intermediates_per_locus$eigen_L
v <- Intermediates_per_locus$v
w <- Intermediates_per_locus$w
d <- h2*eigen_L$values + (1 - h2)
M <- 1/sqrt(d) * t(sqrt(w) * eigen_L$vectors)
# browser()
# here is the slower way to calculate it, but it looks more like the math notation
# i have verified they are the same -- RWC
# M <- diag(1/sqrt(h2*eigen_L$values + (1 - h2))) %*% t(eigen_L$vectors) %*% diag(v)
LDV <- sum(log(v)) + sum(log(d))
# M <-
#diag(1/(h2*eigen_L$values + (1-h2))) %*% t(eigen_L$vectors) %*% diag(sqrt(w))
# LDV <- sum(log(1/w)) + sum(log(h2*eigen_L$values + (1-h2)))
Intermediates_per_fit <<- list(M = M,
LDV = LDV,
h2 = h2)
}
)
|
/scratch/gouwar.j/cran-all/cranData/wISAM/R/GenomeScan.R
|
#' Example covariate matrix
#'
#' @docType data
#' @format A matrix with 200 rows and 4 variables
#' @keywords data
'covariate_mat'
#' Example kinship matrix
#'
#' @docType data
#' @format A matrix with 200 rows and 200 columns
#' @keywords data
'kinship_mat'
#' Example locus list
#'
#' @docType data
#' @format A list of matrices, where each matrix has 200 rows and 1 column
#' @keywords data
'locus_list'
#' Example phenotype
#'
#' @docType data
#' @format A vector of length 200
#' @keywords data
'phenotype'
#' Example standard error of the mean per strain
#'
#' @docType data
#' @format A vector of length 200
#' @keywords data
'se_mean_per_strain'
|
/scratch/gouwar.j/cran-all/cranData/wISAM/R/data.R
|
#' @title check_eigen_decomposition
#'
#' @param e the eigen decomposition to check
#' @param tol the threshold below which a number is said to be effectively zero, defaults to 1e-6
#'
#' @description Grabbed from MASS. Useful to sparsify matrices when some eigenvalues are essentially zero.
#'
#' @return The eigen values with any values with absolute value less than tol zeroed.
#' @export
#'
check_eigen_decomposition <- function(e, tol = 1e-6) {
# after MASS function mvrnorm
if (!all(e$values >= -tol * abs(e$values[1L]))){
stop("K is not positive definite")
}
if (any(e$values < 0)) {
if (any(e$values < -tol)) {
message("Zeroing negative eigenvalues: smallest eigenvalue was ",
min(e$values), "\n")
}
e$values <- pmax(e$values, 0)
}
return(e$values)
}
low_rank_eigen <- function(Z, K) {
e <- eigen(K%*%crossprod(Z,Z),symmetric=FALSE)
return(list(values=e$values, vectors=qr.Q(qr(Z%*%e$vectors))))
}
subset_eigen <- function(l, good_idxs) {
return(list(values = l$values[good_idxs],
vectors = l$vectors[good_idxs, good_idxs]))
}
|
/scratch/gouwar.j/cran-all/cranData/wISAM/R/eigen_utils.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## This R script contains the function to produce missing values in a given ##
## data set completely at random. ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#' Introduce MCAR Missing Values in a matrix
#'
#' This function artificially introduces missing values in a data matrix under missing completely at random (MCAR) mechanism.
#'
#' @param x a matrix, in which missing values are to be created.
#' @param miss.prop proportion of missing values
#'
#' @return a matrix with missing values
#' @keywords NA
#' @export
#' @examples
#' set.seed(3)
#' x = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' artifNA(x, 0.10)
artifNA <- function(x, miss.prop=0.1 )
{
n <- nrow(x)
p <- ncol(x)
total <- n*p
miss.ind <- sample(1:total, floor(total*miss.prop ) )
x[miss.ind] <- NA
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/artifNA.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## This R script contains the function to produce missing values in a given ##
## data set completely at random for cross validation. ##
## ##
## Author: Shahla Faisal [email protected] ##
###############################################################################
#' Introduce MCAR Missing Values in a matrix for cross validation
#'
#' This function introduces additional missing values in a missing data matrix artificially.
#' The missing values are introduced under missing completely at random (MCAR) mechanism.
#' @param x a matrix, in which missing values are to be created.
#' @param testNA.prop proportion of missing values
#' @return a list contatining a matrix with artifical missing values, removed indices and the provided x matrix
#' @seealso \code{\link{cv.wNNSel}}
#' @keywords NA cross-validation
#' @export
#' @examples
#' set.seed(3)
#' x = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss<- artifNA(x, 0.10)
#' ## create another 10% missing values in x
#' x.miss.cv<- artifNA.cv(x, 0.10)
#' summary(x.miss)
#' summary(x.miss.cv)
artifNA.cv <- function(x, testNA.prop=0.1 )
{
n <- nrow(x)
p <- ncol(x)
total <- n*p
missing.matrix = is.na(x)
valid.data = which(!missing.matrix)
remove.indices = sample(valid.data, testNA.prop*length(valid.data))
x.train = x
x.train[remove.indices] = NA
return (list(remove.indices = remove.indices, x.train = x.train, x=x))
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/artifNA.cv.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## This function converts the correlations to weights and thus performs ##
## selection of variables using a convex function. ##
## ##
## Author: Shahla Faisal [email protected] ##
###############################################################################
convex.func <- function( r, method="2", m=2, c=0.3)
{
if(method=="1" & missing(c) ) stop("c is required when method=1 ")
if(method=="2" & missing(m) ) stop("m is required when method=2 ")
if(method=="1"){
convex <- ifelse(abs(r)<=c, 0, (abs(r)-c)/(1-c))
}else{
if(method=="2") convex <- abs(r)^m
}
return(convex)
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/convex.func.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## This function aims to autimatically search for optimal values of the ##
## tuning parameters that yield smallest MSIE for a missing data matrix. ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#' Cross Validation for wNNSel Imputation
#'
#' This function aims to search for optimal values of the tuning parameters for the wNNSel imputation.
##### that yield smallest MSIE for a missing data matrix.
#'
#'
#' Some values are artificially deleted and wNNSel is run multiple times, varying \eqn{\lambda} and \eqn{m}.
#' For each pair of \eqn{\lambda} and \eqn{m}, compute MSIE on the subset of the data matrix x for which the
#' the values were deleted artificially. (See References for more detail).
##### (For details, Tutz and Ramzan(2015), Faisal and Tutz (2017) ).
#'
#' @param x a \code{matrix} containing missing values
#' @param kernel kernel function to be used in nearest neighbors imputation. Default kernel function is "gaussian".
#' @param x.dist distance to compute, The default is \code{x.dist="euclidean"}
#' to compute Euclidean distance. Set \code{x.dist} to \code{NULL} to use Manhattan distance.
#' @param method convex function, performs selection of variables. If \code{method="1"},
#' linear function is used and when if \code{method="c"}, power function is used.
#' @param c.values a \code{vector} between 0 and less than 1. It is required when mehtod="1".
#' @param m.values a \code{vector} of integer values, required when mehtod="2".
#' @param lambda.values a \code{vector}, for the tuning parameter \eqn{\lambda}
#' @param times.max maximum number of repititions for the cross validation procedure.
#' @param testNA.prop proportion of values to be deleted artificially for
#' cross validation in the missing matrix \code{x}. Default method uses 5 percent.
#' @return a list containing
#' \item{lambda.opt}{optimal parameter selected by cross validation}
#' \item{m.opt}{optimal parameter selected by cross validation}
#' \item{MSIE.cv}{cross validation error}
#'
#'
#' @author Shahla Faisal <[email protected]>
#' @references Tutz, G. and Ramzan,S. (2015). Improved methods for the imputation of missing data
#' by nearest neighbor methods. \emph{Computational Statistics and Data Analysis}, Vol. 90, pp. 84-99.
#'
#' Faisal, S. and Tutz, G. (2017). Missing value imputation for gene expression data by tailored nearest neighbors.
#' \emph{Statistical Application in Genetics and Molecular Biology}. Vol. 16(2), pp. 95-106.
#' @seealso \code{\link{artifNA.cv}}, \code{\link{wNNSel}}
#' @keywords wNNSel NA weights cross-validation
#' @export
#' @examples
#' set.seed(3)
#' x.true = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss = artifNA(x.true, 0.10)
#' ## use cross validation to find optimal values
#' result = cv.wNNSel(x.miss)
#' ## optimal values are
#' result$lambda.opt
#' result$m.opt
#' ## Now use these values to get final imputation
#' x.impute = wNNSel.impute(x.miss, lambda=result$lambda.opt, m=result$m.opt)
#' ## and final MSIE
#' computeMSIE(x.miss, x.impute, x.true)
cv.wNNSel <- function( x, kernel="gaussian", x.dist="euclidean", method="2" ,
m.values = seq(2,8, by=2), c.values = seq(.1,.5, by=0.1) , lambda.values = seq(0,.6,by=.01)[-1] ,
times.max = 5, testNA.prop = 0.05 )
{
if(method=="1") tune.vec = c.values
if(method=="2") tune.vec = m.values
prelim.list = artif.NAs( x, testNA.prop , times.max )
mse.mat <-t( sapply(lambda.values, function(ii)
{
sapply(tune.vec, function (jj) {
#res <- cv.error.wNNSel( testNA.prop=testNA.prop, k=k, lambda=ii, method="2", m=jj)
if(method=="2") { res <- cv.error.wNNSel(x, times.max=times.max, lambda=ii, method="2", m=jj, kernel=kernel, prelim.list=prelim.list)
} else{ if(method=="1") res <- cv.error.wNNSel(x, times.max=times.max, lambda=ii, method="1", c=jj, kernel=kernel , prelim.list=prelim.list) }
})
}) )
lambda.tune.mse.mat <- result.mat(mse.mat)
lambda.opt <- lambda.tune.mse.mat[ which.min(lambda.tune.mse.mat[,3]) , 1 ]
tune.opt <- lambda.tune.mse.mat[ which.min(lambda.tune.mse.mat[,3]) , 2 ]
mse.opt <- lambda.tune.mse.mat[ which.min(lambda.tune.mse.mat[,3]) , 3 ]
if(method=="1") result <- list(lambda.opt=lambda.opt, c.opt=tune.opt, MSIE.cv=mse.opt)
if(method=="2") result <- list(lambda.opt=lambda.opt, m.opt=tune.opt, MSIE.cv=mse.opt)
return( result )
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/cv.wNNSel.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## his function computes specific distances for the imputation of a ##
## missing value using wNNSel method. ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#### Distance function for wNNSel imputation
####
#### This function computes specific distances for the imputation of a missing value using \code{wNNSel} method.
####
#### The specific distancs are computed using important covariates only.
#### If \code{mehtod="1"}, the linear function in absolute value of \eqn{r} is used, defined by
#### \deqn{\frac{|r|}{1-c} - \frac{c}{1-c},}
#### for \eqn{|r|>c}, and, 0 , otherwise.
#### By default, the power function \eqn{|r|^m } is used when \code{mehtod="2"}. For more detailed discussion, see references.
####
#### @param x a matrix containing missing values
#### @param x.initial an optional. A complete data matrix e.g. using mean imputation of \code{x}. If provided, it will be used for the computation of correlations.
#### @param x.dist distance to compute. The default is \code{x.dist="euclidean"}, that uses the Euclidean distance. Set \code{x.dist} to \code{NULL} for Manhattan distance.
#### @param convex logical. If \code{TRUE}, selected variables are used for the computation of distance. The default is \code{TRUE}.
#### @param m \code{scaler}, a tuning parameter required by the power function.
#### @param c \code{scaler}, a tuning parameter required by the linear function.
#### @param method convex function, performs selection of variables. If \code{method="1"}, linear function is used and the power function is used when \code{method="2"}.
#### @keywords distance weights
#### @references Tutz, G. and Ramzan,S. (2015). Improved methods for the imputation of missing data
#### by nearest neighbor methods. \emph{Computational Statistics and Data Analysis}, Vol. 90, pp. 84-99.
####
#### Faisal, S. and Tutz, G. (2017). Missing value imputation for gene expression data by tailored nearest neighbors.
#### \emph{Statistical Application in Genetics and Molecular Biology}. Vol. 16(2), pp. 95-106.
####
#### @examples
#### set.seed(3)
#### x = matrix(rnorm(100),10,10)
#### x.miss = x > 1
#### x[x.miss] = NA
#### dist.wNNSel(x)
#### @export
dist.wNNSel <- function( x, x.initial=NULL, x.dist="euclidean", convex=TRUE, method="2", m=2 , c=0.3 )
{
if(is.null(x.dist) )
{
if(convex==TRUE)
{
check(method, c, m)
if(is.null(x.initial)) {myx = x} else {myx=x.initial }
R <- convex.func(r=cor(myx, use="pairwise.complete.obs",method="pearson"), method, m, c)
dist.mat = compute.d1c(x, R)
} else{
dist.mat = compute.d1(x)
}
} else {
if(x.dist=="euclidean")
{
if (convex==TRUE)
{
check(method, c, m)
if(is.null(x.initial)) {myx = x} else {myx=x.initial }
R <- convex.func(r=cor(myx, use="pairwise.complete.obs",method="pearson"), method, m, c)
dist.mat = compute.d2c(x, R)
}
else {
dist.mat = compute.d2(x)
}
}
}
return(dist.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/dist.wNNSel.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Mean Squared Imputation Error ##
## Mean Absolute Imputation Error ##
## Normalized Root Mean Squared Imputatoin Error ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#' Mean Squared Imputation Error
#'
#' This function computes the mean squared imputation error for a given complete/true data matrix,
#' imputed data matrix and the data matrix with missing values.
#' @param x.miss a \code{matrix}, having missing values
#' @param x.impute an imputed data \code{matrix}. Note that it should not contain any missing values.
#' @param x.true complete/true data \code{matrix}. Note that it should not contain any missing values.
#' @return value of MSIE
#' @keywords error
#' @export
#' @examples
#' set.seed(3)
#' x.true = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss = artifNA(x.true, 0.10)
#' ## impute using wNNSel method
#' x.impute = wNNSel.impute(x.miss)
#' computeMSIE(x.miss, x.impute, x.true)
computeMSIE <- function( x.miss, x.impute, x.true )
{
x.true=as.matrix(x.true)
x.miss=as.matrix(x.miss)
x.impute=as.matrix(x.impute)
na.index = which(is.na(x.miss))
mse = mean( ( x.true[na.index] - x.impute[na.index] )^2)
return(mse)
}
#' Mean Absolute Imputation Error
#'
#' This function computes the mean absolute imputation error for a given complete/true data matrix,
#' imputed data matrix and the data matrix with missing values.
#' @param x.miss a \code{matrix}, having missing values
#' @param x.impute an imputed data \code{matrix}. Note that it should not contain any missing values.
#' @param x.true complete/true data \code{matrix}. Note that it should not contain any missing values.
#' @return value of MSIE
#' @keywords error
#' @export
#' @examples
#' set.seed(3)
#' x.true = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss = artifNA(x.true, 0.10)
#' ## impute using wNNSel method
#' x.impute = wNNSel.impute(x.miss)
#' computeMAIE(x.miss, x.impute, x.true)
##### x.impute = wNNSel.impute(x.miss, lambda=0.5, m=2)
computeMAIE <- function( x.miss, x.impute, x.true)
{
x.true=as.matrix(x.true)
x.miss=as.matrix(x.miss)
x.impute=as.matrix(x.impute)
na.index = which(is.na(x.miss))
maie = mean(abs( ( x.true[na.index] - x.impute[na.index] ) ))
return(maie)
}
#' Normalized Root Mean Squared Imputatoin Error
#'
#' This function computes the nrmalized root mean squared imputation error for a given complete/true data matrix,
#' imputed data matrix and the data matrix with missing values.
#' @param x.miss a \code{matrix}, having missing values
#' @param x.impute an imputed data \code{matrix}. Note that it should not contain any missing values.
#' @param x.true complete/true data \code{matrix}. Note that it should not contain any missing values.
#' @return value of MSIE
#' @keywords error
#' @export
#' @examples
#' set.seed(3)
#' x.true = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss = artifNA(x.true, 0.10)
#' ## impute using wNNSel method
#' x.impute = wNNSel.impute(x.miss)
#' computeNRMSE(x.miss, x.impute, x.true)
##### x.impute = wNNSel.impute(x.miss, lambda=0.5, m=2)
computeNRMSE <- function( x.miss, x.impute, x.true )
{
x.true=as.matrix(x.true)
x.miss=as.matrix(x.miss)
x.impute=as.matrix(x.impute)
na.index = which(is.na(x.miss))
nrmse <- sqrt(mean((x.impute[na.index]-x.true[na.index])^{2})/var(x.true[na.index]))
return(nrmse)
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/errors.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Compute weights for weighted nearest neighbor Imputation ##
## ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
kernel.weight <- function(distances, lambda=0.3, kernel="gaussian")
{
if(kernel=="triangular")
{
u <- distances/lambda # ; print(u1)
# u <- u1/max(u1) # ; print(u)
Kernel <- ifelse( abs(u)<=1, (1-abs(u)), 0 ) # ; print(Kernel)
} else{
if(kernel=="gaussian") {
u <- distances/lambda # ; print(u)
Kernel <- (1/sqrt(2*pi))*exp((-1/2)*u^2) } # ; print(Kernel)
}
weight <- Kernel/ sum(Kernel) # ; print(weight)
return(weight)
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/kernel.weight.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
##############
artif.NAs <- function( x, testNA.prop, times.max )
{
res <- replicate(times.max, artifNA.cv(x, testNA.prop) , simplify=F)
return(res)
}
###################
cv.error.wNNSel = function(x, times.max, prelim.list, testNA.prop, k, lambda=0.1, method="2", c, m, kernel="gaussian") # method=c("1","2")
{
if (!is.matrix(x)) stop("x should be a numeric data matrix")
check(method, c, m)
if(method=="1"){ tune=c }else{tune=m}
MSIE = sapply(1:times.max, function(i, lambda, tune ) {
x.train <- prelim.list[[i]]$x.train
remove.indices <- prelim.list[[i]]$remove.indices
x <- prelim.list[[i]]$x
if(method=="2"){
result <- wNNSel.impute(x.train, x.initial=x, lambda=lambda, method="2", m=tune, kernel=kernel, verbose=FALSE, verbose2=FALSE) # m=m,
}
if(method=="1"){
result <- wNNSel.impute(x.train, x.initial=x, lambda=lambda, method="1", c=tune, kernel=kernel, verbose=FALSE, verbose2=FALSE) # m=m,
}
error = (result[remove.indices] - x[remove.indices])
mean(error^2, na.rm=TRUE)
}, lambda=lambda, tune=tune)
MSE.final <- mean(MSIE)
if(method=="2"){
result <- unlist(list( lambda=lambda, m=tune, MSIE=MSE.final ) )
}
if(method=="1") {
result <- c( lambda=lambda, c=tune, MSIE=MSE.final )
}
return(result)
}
###################
####
impute.prelim = function(x, byrow = T, verbose=F) {
missing.matrix = is.na(x)
numMissing = sum(missing.matrix)
if(verbose) {
print(paste("imputing on", numMissing, "missing values with matrix size",
nrow(x)*ncol(x), sep=" "))
}
if(numMissing == 0) {
return ( list (missing.matrix = missing.matrix,
numMissing = numMissing,
missing.rows.indices = NULL,
missing.cols.indices = NULL,
x.missing = NULL) )
}
missing.rows.indices = which(apply(missing.matrix, 1, function(i) {
any(i)
}))
missing.cols.indices = which(apply(missing.matrix, 2, function(i) {
any(i)
}))
if (byrow) x.missing = cbind(1:nrow(x),x)[missing.rows.indices,,drop=F]
else x.missing = rbind(1:ncol(x),x)[,missing.cols.indices,drop=F]
return ( list (missing.matrix = missing.matrix,
numMissing = numMissing,
missing.rows.indices = missing.rows.indices,
missing.cols.indices = missing.cols.indices,
x.missing = x.missing) )
}
###################
# 1a function
cv.impute.prelim = function(x, test.fraction = 1/20) {
n = nrow(x) * ncol(x)
missing.matrix = is.na(x)
valid.data = which(!missing.matrix)
remove.indices = sample(valid.data, test.fraction*length(valid.data))
x.train = x; x.train[remove.indices] = NA
return (list(remove.indices = remove.indices,
x.train = x.train))
}
###########
result.mat <- function(input.mat)
{
res.mat <- input.mat[,1:3]
for( i in seq(1,ncol(input.mat),by=3)[-1] )
{ res.mat <- rbind(res.mat, input.mat[,i:(i+2)]) }
return(res.mat)
}
###########
impute.fn.wNNSel = function(values, distances, lambda, kernel, k, useAll=TRUE, verbosefn=FALSE)
{
if(useAll==TRUE) {
k <- length(distances)
}else{
if(!missing(k) & useAll==FALSE ) stopifnot( k <= length(distances) )
}
ranks = order(distances)
smallest.distances = distances[ranks][1:k]
smallest.distances = smallest.distances[!is.na(smallest.distances)]
if ( is.numeric(smallest.distances) )
if (verbosefn) { print(" First k ordered smallest.distances are" ); print( smallest.distances) }
knn.values = values[ranks][1:k]
knn.values = knn.values[!is.na(knn.values)]
if (verbosefn) { print("knn.values corresponding to smallest.distances are" ); print(knn.values) }
if(is.null(kernel) )
{ knn.weights <- c( rep( 1/k, length(knn.values) ) )
} else {
knn.weights <- kernel.weight(smallest.distances, lambda, kernel )
}
if (verbosefn) { print("corresponding knn.weights are" ) ; print (knn.weights) }
estimated.value <- sum(knn.values * knn.weights)
if (verbosefn) { print("The imputed value is" ); print(estimated.value) }
return(estimated.value)
} ## end of impute.fn
###################
compute.d1c <- function(x, R)
{
prelim = impute.prelim(x)
if (prelim$numMissing == 0) { stop("x matrix has no missing values"); return (x)}
dist.mat <- matrix(nrow=0,ncol=nrow(x)+2)
colnames(dist.mat) <- c("row.index","col.index",rep("",nrow(x)) )
for(i in 1:nrow(x))
{
if( sum( is.na(x[i,]) ) > 0 )
{
miss.index <- which(is.na(x[i,]))
for(j in 1:length(miss.index))
{
temp<-matrix(nrow=nrow(x), data= rep( x[i,], nrow(x)),byrow=T )
dist.vec <- matrix(data= rowMeans( abs(temp-x)* matrix( nrow=nrow(x),ncol=ncol(x),data=R[miss.index[j],],byrow=T) ,na.rm=T),byrow=T)
dist.mat <- rbind(dist.mat, c(i,miss.index[j],dist.vec) )
}
}
}
return(dist.mat)
}
###################
compute.d2c <- function(x, R) {
prelim = impute.prelim(x)
if (prelim$numMissing == 0) { stop("x matrix has no missing values"); return (x)}
dist.mat <- matrix(nrow=0,ncol=nrow(x)+2)
colnames(dist.mat) <- c("row.index","col.index",rep("",nrow(x)) )
for(i in 1:nrow(x))
{
if( sum( is.na(x[i,]) ) > 0 )
{
miss.index <- which(is.na(x[i,]))
for(j in 1:length(miss.index))
{
temp<-matrix(nrow=nrow(x), data= rep( x[i,], nrow(x)),byrow=T )
dist.vec <- matrix(data= sqrt(rowMeans( ( (temp-x)^2 )* matrix( nrow=nrow(x),ncol=ncol(x),data=R[miss.index[j],],byrow=T) ,na.rm=T) ),byrow=T)
dist.mat <- rbind(dist.mat, c(i,miss.index[j],dist.vec) )
}
}
}
return(dist.mat)
}
###################
compute.d1 <- function(x) {
prelim = impute.prelim(x)
if (prelim$numMissing == 0) { stop("x matrix has no missing values"); return (x)}
dist.mat <- matrix( nrow=nrow(x),ncol=nrow(x))
for(i in 1:nrow(x) )
{
temp<-matrix(nrow=nrow(x), data= rep( x[i,], nrow(x)),byrow=T )
dist.mat [i,]<- matrix(data= rowMeans(abs(temp-x),na.rm=T),byrow=T)
}
return(dist.mat)
}
###################
compute.d2 <- function(x) {
prelim = impute.prelim(x)
if (prelim$numMissing == 0) { stop("x matrix has no missing values"); return (x)}
dist.mat <- matrix( nrow=nrow(x),ncol=nrow(x))
for(i in 1:nrow(x) )
{
temp<-matrix(nrow=nrow(x), data= rep( x[i,], nrow(x)),byrow=T )
dist.mat [i,]<- matrix( data=sqrt( rowMeans( (temp-x)^2, na.rm=T ) ) ,byrow=T )
}
return(dist.mat)
}
###################
check <- function( method, c, m )
{
if(method=="1" & missing(c) ) stop("c is required when method=1 ")
if(method=="2" & missing(m) ) stop("m is required when method=2 ")
}
###################
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/utils.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#' Weighted Nearest Neighbor Imputation of Missing Values using Selected Variables
#'
#' This package introduces new non-parametric tools for the imputation of missing values in high-dimensional data.
#' It includes weighted nearest neighbor
#' imputation methods that use distances for selected covariates. The careful
#' selection of distances that carry information about the missing values yields an imputation
#' tool. It does not require pre-specified \eqn{k}, unlike other kNN methods.
#' It can be used to impute missing values in high-dimensional data when \eqn{n<p}.
#'
#' \tabular{ll}{ Package: \tab wNNSel\cr Version: \tab 0.1 \cr Date: \tab
#' 2017-11-08\cr Depends: \tab R (>= 2.10) \cr License: \tab GPL (>=
#' 2) }
#'
#' The main function of the package is \code{\link{wNNSel}} for implementing the nonparameteric procedure of nearest neighbors imputaiton.
#' See \code{\link{wNNSel}} for more details.
#'
#'
#' @name wNNSel-package
#' @aliases wNNSel-package
#' @docType package
#' @author Shahla Faisal <[email protected]>
#'
##### Maintainer: Shahla Faisal
#' @references Tutz, G. and Ramzan,S*. (2015). Improved methods for the imputation of missing data
#' by nearest neighbor methods. \emph{Computational Statistics and Data Analysis}, Vol. 90, pp. 84-99.
#'
#' Faisal, S.* and Tutz, G. (2017). Missing value imputation for gene expression data by tailored nearest neighbors.
#' \emph{Statistical Application in Genetics and Molecular Biology}. Vol. 16(2), pp. 95-106.
#'
#' @note *Author's Last name changed to \emph{Faisal} from \emph{Ramzan} in 2016.
#' @keywords package NA weights wNNSel
#' @import stats
NULL
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/wNNSel-Package.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
# Weighted nearest neighbor imputation of missing values using selected variables
#' Imputatin using wNNSel method.
#'
#' \code{'wNNSel'} is used to impute the missing values particularly in high dimensional data.
#' It uses a cross validation procedure for selecting the best values of the tuning parameters.
#' It also works when the samples are smaller than the covariates.
#'
#'
#' For each sample, identify missinng features. For each missing feature
#' find the nearest neighbors which have that feature. Impute the missing
#' value using the imputation function on the \emph{selected} vector of values
#' found from the neighbors.
#' By default the \code{wNNSel} method automatically searches for optimal values for a given data matrix.
#'
#' The default method uses \code{x.dist="euclidean"} including selected covariates.
#' The specific distancs are computed using important covariates only.
#' If \code{mehtod="1"}, the linear function in absolute value of \eqn{r} is used, defined by
#' \deqn{\frac{|r|}{1-c} - \frac{c}{1-c},}
#' for \eqn{|r|>c}, and, 0 , otherwise.
#' By default, the power function \eqn{|r|^m } is used when \code{mehtod="2"}. For more detailed discussion, see references.
#'
#'
#'
#'
#' @param x a numeric data \code{matrix} containing missing values
#' @param k an optional, the number of nearest neighbors to use for imputation.
#' @param useAll \code{logical}. If \code{TRUE}, all \emph{available} neighbors are used for the imputation.
#' @param x.initial an optional. A complete data matrix e.g. using mean imputation of \code{x}. If provided, it will be used for the computation of correlations.
#' @param x.true a matrix of true or complete data. If provided, \code{MSIE} will be returned in the results list.
#' @param x.dist distance to compute. The default is \code{x.dist="euclidean"}, that uses the Euclidean distance. Set \code{x.dist} to \code{NULL} for Manhattan distance.
#' @param kernel kernel function to be used in nearest neighbors imputation. Default kernel function is "gaussian".
#' @param impute.fn the imputation function to run on the length k vector of values for a missing feature. Defaults to a weighted mean of the neighboring values, weighted by the specified \code{kernel}. If not specified then wNN imputation will be used by default.
####### @param convex \code{logical}. whether selection of variables should be performed in computation of distance. The default is \code{TRUE}.
#' @param convex logical. If \code{TRUE}, selected variables are used for the computation of distance. The default is \code{TRUE}.
#' @param c.values a \code{vector} between 0 and less than 1. It is required when mehtod="1".
#' @param m.values a \code{vector} of integer values, required when mehtod="2".
#' @param lambda.values a \code{vector}, for the tuning parameter \eqn{\lambda}
#' @param times.max maximum number of repititions for the cross validation procedure.
#' @param testNA.prop proportion of values to be deleted artificially for
#' cross validation in the missing matrix \code{x}. Default method uses 5 percent.
#' @param method convex function, performs selection of variables. If \code{method="1"}, linear function is used and the power function is used when \code{method="2"}.
#' @param withinFolds \code{logical}. Use only if the neighbors/rows belong to particular folds/groups. Default is set to \code{FALSE}.
#' @param folds a \code{list} of vectors specifying folds/groups for neighbors. lenght of list is equal to the number of folds/groups. Each element/vector of the list indicates row indices belonging to that particular group/fold.
#' @param verbose logical. If \code{TRUE}, prints status updates
#' @keywords wNNSel NA cross-validation
#' @seealso \code{\link{cv.wNNSel}}, \code{\link{wNNSel.impute}}
#' @return a list containing imputed data matrix, and cross validation results
#' \item{x.impute}{imputed data matrix}
#' \item{MSIE}{True error. Note it is only available when x.true is provided.}
#' \item{lambda.opt}{optimal parameter selected by cross validation}
#' \item{m.opt}{optimal parameter selected by cross validation}
#' \item{MSIE.cv}{cross validation error}
#'
#' @references Tutz, G. and Ramzan,S. (2015). Improved methods for the imputation of missing data
#' by nearest neighbor methods. \emph{Computational Statistics and Data Analysis}, Vol. 90, pp. 84-99.
#'
#' Faisal, S. and Tutz, G. (2017). Missing value imputation for gene expression data by tailored nearest neighbors.
#' \emph{Statistical Application in Genetics and Molecular Biology}. Vol. 16(2), pp. 95-106.
#' @examples
#' set.seed(3)
#' x.true = matrix(rnorm(100),10,10)
#' ## create 10% missing values in x
#' x.miss = artifNA(x.true, 0.10)
#' ## imputed matrix
#' result <- wNNSel(x.miss)
#' result$x.impute
#' ## cross validation result can be accessed using
#' result$cross.val
# # lambda.opt m.opt MSIE.cv
# # 0.0900000 8.0000000 0.5598143
# ## another example when true x is known
# result2 <- wNNSel(x.miss, x.true=x.true , method="1")
# ## The true MSIE
# result2$MSIE
### [1] 1.034372
#' @export
######### start of wNNSel function ########
wNNSel <- function( x, x.initial=NULL, x.true=NULL, k, useAll=TRUE, x.dist="euclidean", kernel="gaussian", method="2" , impute.fn,
convex=TRUE, m.values = seq(2,8, by=2), c.values = seq(.1,.5, by=0.1) , lambda.values = seq(0,.6,by=.01)[-1] ,
times.max = 5, testNA.prop = 0.05 , withinFolds=FALSE, folds, verbose=TRUE )
{
if (!is.matrix(x) || !is.numeric(x) ) stop("x should be a numeric data matrix")
if( !missing(k) && k >= nrow(x)) stop("k must be less than the number of rows in x")
if( withinFolds==TRUE && missing(folds) ) stop("The argument folds is missing")
col.miss <- apply(x, 2, function(j) all(is.na(j)))
row.miss <- apply(x, 1, function(i) all(is.na(i)))
if (any(col.miss)) {
cat("column(s)", which(col.miss), "are entirely missing.")
stop("Please fix missing columns.")
}
if (any(row.miss)) {
cat("row(s)", which(row.miss), "are entirely missing.")
stop("Please fix missing rows.")
}
if (verbose) print("Cross validation in process...")
result = cv.wNNSel(x, kernel, x.dist, method, m.values, c.values, lambda.values, times.max, testNA.prop )
if (verbose) print("Cross validation complete")
if(method=="2"){
lambda.opt <- result$lambda.opt
m.opt <- result$m.opt
x.impute = wNNSel.impute(x, k, useAll=TRUE, x.initial=NULL, x.dist="euclidean", kernel="gaussian", lambda=lambda.opt, convex=TRUE, method="2", m=m.opt, verbose=FALSE, verbose2=FALSE)
cv.res=list( lambda.opt=lambda.opt, m.opt=m.opt, MSIE.cv=result$MSIE.cv )
} else{
if(method=="1"){
lambda.opt = result$lambda.opt
c.opt = result$c.opt
x.impute = wNNSel.impute(x, k, useAll=TRUE, x.initial=NULL, x.dist="euclidean", kernel="gaussian", lambda=lambda.opt, convex=TRUE, method="1", c=c.opt, verbose=FALSE, verbose2=FALSE)
cv.res=list( MSIE.cv=result$MSIE.cv, lambda.opt, c.opt=c.opt )
}
}
if(!missing(x.true)) { if(!is.null(x.true)) MSIE <- computeMSIE(x, x.impute, x.true)} else{ MSIE=NULL }
return( list( x.impute=x.impute, MSIE=MSIE, cross.val=unlist(cv.res) ) )
}
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/wNNSel.R
|
###############################################################################
## ##
## wNNSel - weighted nearest neighbor imputation using selected neighbors ##
## ##
## Author: Shahla Faisal [email protected] ##
## ##
###############################################################################
#'
#' Weighted Nearest Neighbor Imputation of Missing Values using Selected Variables
#'
##### Imputatin using wNNSel method.
#'
#' This function imputes the missing values using user-spefied values of the tuning parameters.
#' It also works when the samples are smaller than the covariates.
#'
#' For each sample, identify missinng features. For each missing feature
#' find the nearest neighbors which have that feature. Impute the missing
#' value using the imputation function on the \emph{selected} vector of values
#' found from the neighbors.
#'
#'
#' @param x a \code{matrix} containing missing values
#' @param k an optional, the number of nearest neighbors to use for imputation.
#' @param useAll \code{logical}. The default is \code{useALL=TRUE}, that is, all \emph{available} neighbors are used for the imputation.
#' @param x.initial an optional. A complete data matrix e.g. using mean imputation of \code{x}. If provided, it will be used for the computation of correlations.
#' @param x.dist distance to compute. The default is \code{x.dist="euclidean"}, that uses the Euclidean distance. Set \code{x.dist} to \code{NULL} for Manhattan distance.
#' @param kernel kernel function to be used in nearest neighbors imputation. Default kernel function is "gaussian".
#' @param lambda \code{scaler}, a tuning parameter
#' @param impute.fn the imputation function to run on the length k vector of values for a missing feature.
#' Defaults to a weighted mean of the neighboring values, weighted by the specified \code{kernel}. If not specified then wNN imputation will be used by default.
#' @param convex logical. If \code{TRUE}, selected variables are used for the computation of distance. The default is \code{TRUE}.
#' @param m \code{scaler}, a tuning parameter required by the power function.
#' @param c \code{scaler}, a tuning parameter required by the linear function.
#' @param method convex function, performs selection of variables. If \code{method="1"}, linear function is used and the power function is used when \code{method="2"}.
#' @param withinFolds \code{logical}. Use only if the neighbors/rows belong to particular folds/groups. Default is set to \code{FALSE}.
#' @param folds a \code{list} of vectors specifying folds/groups for neighbors. lenght of list is equal to the number of folds/groups.
#' Each element/vector of the list indicates row indices belonging to that particular group/fold.
#' @param verbose logical. If \code{TRUE}, prints status updates
#' @param verbose2 logical. If \code{TRUE}, prints status updates with more detail
#' @keywords wNNSel NA weights
#' @seealso \code{\link{cv.wNNSel}}, \code{\link{wNNSel}}
#' @return imputed data matrix
#' @examples
#' set.seed(3)
#' x = matrix(rnorm(100),10,10)
#' x.miss = x > 1
#' x[x.miss] = NA
#' wNNSel.impute(x)
#' wNNSel.impute(x, lambda=0.5, m=2)
#' @export
#'
######### start of wNNSel.impute function ########
wNNSel.impute <- function(x, k, useAll=TRUE, x.initial=NULL, x.dist="euclidean", kernel="gaussian", lambda=0.3, impute.fn, convex=TRUE, method="2", m=2,c=0.3,
withinFolds=FALSE, folds, verbose=TRUE, verbose2=FALSE) {
if (!is.matrix(x) || !is.numeric(x) ) stop("x should be a numeric data matrix")
if( !missing(k) && k >= nrow(x)) stop("k must be less than the number of rows in x") ## this is now unnecessary if count.NA is used
if( withinFolds==TRUE && missing(folds) ) stop("The argument folds is missing")
col.miss <- apply(x, 2, function(j) all(is.na(j)))
row.miss <- apply(x, 1, function(i) all(is.na(i)))
if (any(col.miss)) {
cat("column(s)", which(col.miss), "are entirely missing.")
stop("Please fix missing columns.")
}
if (any(row.miss)) {
cat("row(s)", which(row.miss), "are entirely missing.")
stop("Please fix missing rows.")
}
check(method, c, m)
prelim = impute.prelim(x)
if (prelim$numMissing == 0) { print("x matrix has no missing values"); return (x)}
missing.matrix = prelim$missing.matrix
x.missing = prelim$x.missing
missing.rows.indices = prelim$missing.rows.indices
if (missing(impute.fn)) {
impute.fn <- impute.fn.wNNSel
}
if (verbose) print("Computing distance matrix...")
dist.mat = dist.wNNSel( x=x, x.initial=x.initial, x.dist=x.dist, convex=convex, c=c, m=m, method=method )
if (verbose) print("Distance matrix complete")
x.missing.imputed = t(apply(x.missing, 1, function(i)
{
rowIndex = as.numeric(i[1])
i.original = unlist(i[-1])
if(verbose2) print(paste("Imputing row", rowIndex,sep=" "))
missing.cols = which(missing.matrix[rowIndex,])
if(length(missing.cols) == ncol(x))
warning( paste("Row",rowIndex,"is completely missing",sep=" ") )
imputed.values = sapply(missing.cols, function(j)
{
if(withinFolds)
{
all.neighbor.indices = which(!missing.matrix[,j])
valid.ind = as.vector(na.omit (match(folds, all.neighbor.indices )))
neighbor.indices <- all.neighbor.indices[-c(valid.ind)]
} else {
neighbor.indices = which(!missing.matrix[,j])
}
if (verbose2) { print(paste("Row",rowIndex,"and column", j, "is being imputed", sep=" ") ) }
if (verbose2) { print("All nearest neighbour indices having non-missing values in jth col are") ; print(neighbor.indices) }
if (!is.null(dist.mat))
{
if(convex==TRUE) {
knn.dist.all = dist.mat[dist.mat[,1] == rowIndex & dist.mat[,2] == j, 3:ncol(dist.mat)][neighbor.indices]
} else {
knn.dist.all = dist.mat [ rowIndex , neighbor.indices ]
}
knn.dist <- knn.dist.all [ !is.nan(knn.dist.all) ]
if (verbose2) { print(" All 'valid' nearest neighbour distances are " ) ; print(knn.dist) }
}
impute.fn(x[neighbor.indices,j], knn.dist, lambda, kernel, k )
} )
i.original[missing.cols] = imputed.values
i.original
} ) )
x[missing.rows.indices,] = x.missing.imputed
x.missing.imputed <- c(x.missing.imputed)
x.imputed = x
return(x)
} ##end of wNNSel.impute function
|
/scratch/gouwar.j/cran-all/cranData/wNNSel/R/wNNSel.impute.R
|
# To avoid NOTE in 'R CMD check' for an unknown variable, specifically,
# the note "no visible binding for global variable 'Freq'".
globalVariables("Freq")
perm.hom.test <-
function(x, type = c("cont", "flat", "raw"), variable = NULL, R = 9999)
{
# Note: unfct and unct are due to Marc Schwartz.
# unfct converts a "flat" contingency table to a raw data frame.
unfct <- function(x)
{
z <- sapply(1:nrow(x), function(i) x[rep(i, each = x$Freq[i]), ],simplify = FALSE)
z <- subset(do.call("rbind", z), select = -Freq)
for (i in 1:ncol(z)) {z[[i]] <- type.convert(as.character(z[[i]]))}
data.frame(z,row.names=NULL)
}
# unct converts a contingency table (of class table) to a raw data frame.
unct <- function(x)
{
y <- as.data.frame(x)
unfct(y)
}
if (identical(type,c("cont", "flat", "raw"))) type <- "cont"
if (type=="cont") x <- unct(as.table(as.matrix(x[-1])))
else
(if (type=="flat") {names(x)[3]="Freq"; x <- unfct(x)})
obs <- suppressWarnings(chisq.test(x[[1]],x[[2]],correct=FALSE)$statistic)
n <- length(x[[1]])
z <- vector(length=R)
for (i in 1:R)
{
u <- sample(x[[2]])
z[i] <- suppressWarnings(chisq.test(x[[1]],u,correct=FALSE)$statistic)
}
p <- signif((sum(z >= obs)+1)/(R+1),digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
# FOR THE RESULTS
stat.name <- "chi.square"
results <-
list(Perm.values=z,Header=c("RESULTS OF PERMUTATION HOMOGENEITY TEST\n",
paste("BASED ON",R,"REPLICATIONS")),Variable=variable,
Statistic=stat.name,Observed=obs,n=n,Null="homogeneous",
Alternative="nonhomogeneous",P.value=p.value,p.value=p)
class(results) <- "perm.cs.hom" # permutation, chi-square homogeneity.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.hom.test.R
|
perm.ind.loc <-
function(x, y, parameter, stacked = TRUE, variable = NULL,
alternative = c("two.sided","less","greater"),
R=9999)
{
statistic <- parameter;
if (stacked)
{
variable <- all.names(substitute(x))
if (length(variable)>1) variable <- variable[[3]]
y <- as.factor(y)
l <- levels(y)
pop.1 <- l[1]
pop.2 <- l[2]
u <- x[y==l[1]]
v <- x[y==l[2]]
x <- u
y <- v
}
else
{
pop.1 <- all.names(substitute(x))
if (length(pop.1)>1) pop.1 <- pop.1[[3]]
pop.2 <- all.names(substitute(y))
if (length(pop.2)>1) pop.2 <- pop.2[[3]]
}
x <- x
y <- y
m <- length(x)
n <- length(y)
obs <- statistic(x)-statistic(y)
u <- stack(list(x=x,y=y))
s <- u$values
t <- u$ind
z <- vector(length=R)
for (i in 1:R)
{
v <- sample(t);
z[i] <- statistic(s[v=="x"])-statistic(s[v=="y"])
}
if (identical(alternative,c("two.sided","less","greater")))
alternative <- "two.sided"
ltp <- (sum(z<=obs)+1)/(R+1)
rtp <- (sum(z>=obs)+1)/(R+1)
tc <- c("two.sided","less","greater")
pc <- c(2*min(ltp,rtp),ltp,rtp)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("shifted","shifted.left","shifted.right")
alt <- ac[tc==alternative]
# FOR THE RESULTS
stat.name <- paste("diff.",all.names(substitute(parameter)),sep="");
results <-
list(Stacked=stacked,Perm.values=z,
Header=c("RESULTS OF PERMUTATION INDEPENDENT TWO-SAMPLE LOCATION TEST\n",
paste("BASED ON",R,"REPLICATIONS")),
Variable=variable,Pop.1=pop.1,Pop.2=pop.2,n.1=m,n.2=n,Statistic=stat.name,
Observed=obs,Null="identical",Alternative=alt,P.value=p.value,p.value=p)
class(results) <- "perm.ts.ind" # permutation, two-sample, independent.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.ind.loc.R
|
perm.ind.spread <-
function(x, y, parameter, stacked = TRUE, variable = NULL,
alternative = c("two.sided","less","greater"),
R=9999)
{
statistic <- parameter
if (stacked)
{
variable <- all.names(substitute(x))
if (length(variable)>1) variable <- variable[[3]]
y <- as.factor(y)
l <- levels(y)
pop.1 <- l[1]
pop.2 <- l[2]
u <- x[y==l[1]]
v <- x[y==l[2]]
x <- u
y <- v
}
else
{
pop.1 <- all.names(substitute(x))
if (length(pop.1)>1) pop.1 <- pop.1[[3]]
pop.2 <- all.names(substitute(y))
if (length(pop.2)>1) pop.2 <- pop.2[[3]]
}
x <- x
y <- y
m <- length(x)
n <- length(y)
obs <- statistic(x)/statistic(y)
u <- stack(list(x=x,y=y))
s <- u$values
t <- u$ind
z <- vector(length=R)
for (i in 1:R)
{
v <- sample(t)
z[i] <- statistic(s[v=="x"])/statistic(s[v=="y"])
}
if (identical(alternative,c("two.sided","less","greater")))
alternative <- "two.sided"
ltp <- (sum(z<=obs)+1)/(R+1)
rtp <- (sum(z>=obs)+1)/(R+1)
tc <- c("two.sided","less","greater")
pc <- c(2*min(ltp,rtp),ltp,rtp)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("different.spread","smaller.spread","larger.spread")
alt <- ac[tc==alternative]
# FOR THE RESULTS
stat.name <- paste("ratio.",all.names(substitute(parameter)),sep="");
results <-
list(Stacked=stacked,Perm.values=z,
Header=c("RESULTS OF PERMUTATION INDEPENDENT TWO-SAMPLE SPREAD TEST\n",
paste("BASED ON",R,"REPLICATIONS")),
Variable=variable,Pop.1=pop.1,Pop.2=pop.2,n.1=m,n.2=n,Statistic=stat.name,
Observed=obs,Null="identical",Alternative=alt,P.value=p.value,p.value=p)
class(results) <- "perm.ts.ind" # permutation, two-sample, independent.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.ind.spread.R
|
# To avoid NOTE in 'R CMD check' for an unknown variable, specifically,
# the note "no visible binding for global variable 'Freq'".
globalVariables("Freq")
perm.ind.test <-
function(x, type = c("cont", "flat", "raw"), var.names = NULL, R = 9999)
{
# Note: unfct and unct are due to Marc Schwartz.
# unfct converts a "flat" contingency table to a raw data frame.
unfct <- function(x)
{
z <- sapply(1:nrow(x), function(i) x[rep(i, each = x$Freq[i]), ],simplify = FALSE)
z <- subset(do.call("rbind", z), select = -Freq)
for (i in 1:ncol(z)) {z[[i]] <- type.convert(as.character(z[[i]]))}
data.frame(z,row.names=NULL)
}
# unct converts a contingency table (of class table) to a raw data frame.
unct <- function(x)
{
y <- as.data.frame(x)
unfct(y)
}
if (identical(type,c("cont", "flat", "raw"))) type <- "cont"
if (type=="cont") x <- unct(as.table(as.matrix(x[-1])))
else
(if (type=="flat") {names(x)[3]="Freq"; x <- unfct(x)})
if (is.null(var.names)) var.names <- c("Var.1", "Var.2")
obs <- suppressWarnings(chisq.test(x[[1]],x[[2]],correct=FALSE)$statistic)
n <- length(x[[1]])
z <- vector(length=R)
for (i in 1:R)
{
u <- sample(x[[2]])
z[i] <- suppressWarnings(chisq.test(x[[1]],u,correct=FALSE)$statistic)
}
p <- signif((sum(z >= obs)+1)/(R+1),digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
# FOR THE RESULTS
stat.name <- "chi.square"
results <-
list(Perm.values=z,Header=c("RESULTS OF PERMUTATION INDEPENDENCE TEST\n",
paste("BASED ON",R,"REPLICATIONS")),Variable.1=var.names[1],
Variable.2=var.names[2],Statistic=stat.name,Observed=obs,n=n,
Null="nonassociated",Alternative="associated",P.value=p.value,
p.value=p)
class(results) <- "perm.two.var" # permutation, two variables.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.ind.test.R
|
perm.oneway.anova <-
function(x, y, trim = 0, ford = NULL, R = 9999)
{
response <- all.names(substitute(x))
if (length(response)>1) response <- response[[3]]
factor <- all.names(substitute(y))
if (length(factor)>1) factor <- factor[[3]]
y <- as.factor(y)
if (!is.null(ford)) y <- factor(y,levels(y)[ford])
Mean <- tapply(x,y,mean)
n <- tapply(x,y,length)
SD <- tapply(x,y,sd)
trim.vector <- function(x)
{
n <- length(x)
lo <- floor(n * trim) + 1
hi <- n + 1 - lo
x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi]
x
}
F.trim <- function(x,y)
{
l <- tapply(x,y,trim.vector)
d <- stack(l)
oneway.test(d$values~d$ind,var.equal=TRUE)$statistic
}
obs <- F.trim(x,y)
z <- vector(length=R)
for (i in 1:R)
{
u <- sample(y)
z[i] <- F.trim(x,u)
}
p <- signif((sum(z >= obs)+1)/(R+1),digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
# FOR THE RESULTS
stat.name <- "F.trim"
results <-
list(Perm.values=z,
Header=c(paste("RESULTS OF PERMUTATION ",100*trim,"% TRIMMED ONE-WAY ANOVA\n",sep=""),
paste("BASED ON",R,"REPLICATIONS ")),
Response=response,Factor=factor,Levels=levels(y),n=n,Mean=Mean,SD=SD,
Statistic=stat.name,Observed=obs,P.value=p.value,p.value=p,Trim=trim)
class(results) <- "perm.oneway.anova" # permutation, one-way ANOVA.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.oneway.anova.R
|
perm.paired.loc <-
function(x, y, parameter, variable = NULL,
alternative = c("two.sided","less","greater"),
R=9999)
{
statistic <- parameter
pop.1 <- all.names(substitute(x))
if (length(pop.1)>1) pop.1 <- pop.1[[3]]
pop.2 <- all.names(substitute(y))
if (length(pop.2)>1) pop.2 <- pop.2[[3]]
obs <- statistic(x)-statistic(y)
n <- length(x)
z <- vector(length=R)
for (i in 1:R)
{
b <- rbinom(n,1,0.5)
u <- b*x+(1-b)*y
v <- (1-b)*x+b*y
z[i] <- statistic(u)-statistic(v)
}
if (identical(alternative,c("two.sided","less","greater")))
alternative <- "two.sided"
ltp <- (sum(z<=obs)+1)/(R+1)
rtp <- (sum(z>=obs)+1)/(R+1)
tc <- c("two.sided","less","greater")
pc <- c(2*min(ltp,rtp),ltp,rtp)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("shifted","shifted.left","shifted.right")
alt <- ac[tc==alternative]
# FOR THE RESULTS
stat.name <- paste("diff.",all.names(substitute(parameter)),sep="")
results <-
list(Perm.values=z,
Header=c("RESULTS OF PERMUTATION PAIRED LOCATION TEST\n",
paste("BASED ON",R,"REPLICATIONS")),
Variable=variable,Pop.1=pop.1,Pop.2=pop.2,n=n,Statistic=stat.name,
Observed=obs,Null="identical",Alternative=alt,P.value=p.value,
p.value=p)
class(results) <- "perm.paired.loc" # permutation, paired, location.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.paired.loc.R
|
perm.relation <-
function(x, y, method=c("pearson", "kendall", "spearman"),
alternative = c("two.sided","less","greater"), R = 9999)
{
Cor <- function(x,y) cor(x,y,method=method)
var1.name <- all.names(substitute(x))
if (length(var1.name)>1) var1.name <- var1.name[[3]]
var2.name <- all.names(substitute(y))
if (length(var2.name)>1) var2.name <- var2.name[[3]]
obs <- Cor(x,y)
n <- length(x)
z <- vector(length=R)
for (i in 1:R)
{
w <- sample(y)
z[i] <- Cor(x,w)
}
ltp <- (sum(z<=obs)+1)/(R+1)
rtp <- (sum(z>=obs)+1)/(R+1)
if (identical(method,c("pearson", "kendall", "spearman"))) method <- "pearson"
if (identical(alternative,c("two.sided","less","greater"))) alternative <- "two.sided"
tc <- c("two.sided","less","greater")
pc <- c(2*min(ltp,rtp),ltp,rtp)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("relation","neg.relation","pos.relation")
alt <- ac[tc==alternative]
# FOR THE RESULTS
stat.name <- paste(method,".","cor",sep="")
results <-
list(Perm.values=z,Header=c("RESULTS OF PERMUTATION RELATIONSHIP TEST\n",
paste("BASED ON",R,"REPLICATIONS")),Variable.1=var1.name,
Variable.2=var2.name,n=n,Statistic=stat.name,Observed=obs,
Null="no.relation",Alternative=alt,P.value=p.value,p.value=p)
class(results) <- "perm.two.var"; # permutation, two variables.
results
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/perm.relation.R
|
print.perm.cs.hom <-
function(x, ...)
{
hist(x$Perm.values,breaks=20,xlab=paste("permutation",x$Statistic),
main=paste("Histogram of permutation ",x$Statistic,"s",sep=""))
abline(v=x$Observed,col="2")
leg.text <- expression(Observed)
legend("topright",leg.text,col=2,lwd=2,cex=.6)
cat("\n\n",x$Header,"\n\n")
if (is.null(x$Variable))
print(data.frame(SUMMARY="STATISTICS",n=x$n,Statistic=x$Statistic,
Observed=x$Observed),row.names=FALSE)
else
print(data.frame(SUMMARY="STATISTICS",Variable=x$Variable,n=x$n,
Statistic=x$Statistic,Observed=x$Observed),row.names=FALSE)
cat("\n")
print(data.frame(HYPOTHESIS="TEST",Null=x$Null,Alternative=x$Alternative,
P.value=x$P.value),row.names=FALSE)
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/print.perm.cs.hom.R
|
print.perm.oneway.anova <-
function(x, ...)
{
hist(x$Perm.values,breaks=20,xlab="permutation F-value",
main="Histogram of permutation F-values")
abline(v=x$Observed,col="2")
leg.text <- expression(Observed)
legend("topright",leg.text,col=2,lwd=2,cex=.6)
cat("\n\n",x$Header,"\n\n")
cat("SUMMARY STATISTICS\n\n")
u <- data.frame(x$Levels,n=x$n,Mean=x$Mean,SD=x$SD)
names(u)[1]=x$Factor
print(u,row.names=FALSE)
cat("\n")
cat("HYPOTHESIS TEST\n\n")
print(data.frame(Response=x$Response,Factor=x$Factor,Trim=x$Trim,Statistic=x$Statistic,
Observed=x$Observed,P.value=x$P.value),row.names=FALSE)
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/print.perm.oneway.anova.R
|
print.perm.paired.loc <-
function(x, ...)
{
hist(x$Perm.values,breaks=20,xlab=paste("permutation",x$Statistic),
main=paste("Histogram of permutation ",x$Statistic,"s",sep=""))
abline(v=x$Observed,col="2")
leg.text <- expression(Observed)
legend("topright",leg.text,col=2,lwd=2,cex=.6)
cat("\n\n",x$Header,"\n\n")
if (!is.null(x$Variable))
print(data.frame(SUMMARY="STATISTICS",Variable=x$Variable,
Pop.1=x$Pop.1,Pop.2=x$Pop.2,n=x$n,Statistic=x$Statistic,
Observed=x$Observed),row.names=FALSE)
else
print(data.frame(SUMMARY="STATISTICS",Pop.1=x$Pop.1,Pop.2=x$Pop.2,n=x$n,
Statistic=x$Statistic,Observed=x$Observed),row.names=FALSE)
cat("\n")
print(data.frame(HYPOTHESIS="TEST",Null=x$Null,Alternative=x$Alternative,
P.value=x$P.value),row.names=FALSE)
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/print.perm.paired.loc.R
|
print.perm.ts.ind <-
function(x, ...)
{
hist(x$Perm.values,breaks=20,xlab=paste("permutation",x$Statistic),
main=paste("Histogram of permutation ",x$Statistic,"s",sep=""));
abline(v=x$Observed,col="2");
leg.text <- expression(Observed);
legend("topright",leg.text,col=2,lwd=2,cex=.6);
cat("\n\n",x$Header,"\n\n");
if (x$Stacked || !is.null(x$Variable))
print(data.frame(SUMMARY="STATISTICS",Variable=x$Variable,
Pop.1=x$Pop.1,Pop.2=x$Pop.2,n.1=x$n.1,n.2=x$n.2,
Statistic=x$Statistic,Observed=x$Observed),
row.names=FALSE)
else
print(data.frame(SUMMARY="STATISTICS",Pop.1=x$Pop.1,Pop.2=x$Pop.2,
n.1=x$n.1,n.2=x$n.2,Statistic=x$Statistic,Observed=x$Observed),
row.names=FALSE)
cat("\n")
print(data.frame(HYPOTHESIS="TEST",Null=x$Null,Alternative=x$Alternative,
P.value=x$P.value),row.names=FALSE)
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/print.perm.ts.ind.R
|
print.perm.two.var <-
function(x, ...)
{
hist(x$Perm.values,breaks=20,xlab=paste("permutation",x$Statistic),
main=paste("Histogram of permutation ",x$Statistic,"s",sep=""))
abline(v=x$Observed,col="2")
leg.text <- expression(Observed)
legend("topright",leg.text,col=2,lwd=2,cex=.6)
cat("\n\n",x$Header,"\n\n")
print(data.frame(SUMMARY="STATISTICS",Variable.1=x$Variable.1,
Variable.2=x$Variable.2,n=x$n,Statistic=x$Statistic,
Observed=x$Observed),row.names=F)
cat("\n")
print(data.frame(HYPOTHESIS="TEST",Null=x$Null,Alternative=x$Alternative,
P.value=x$P.value),row.names=F)
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wPerm/R/print.perm.two.var.R
|
#' @title CorrelationOverlap
#' @description This function computes the correlation between Nodes and the Overlapping Nodes of interest.
#' @param Data data.frame containing the expression data. Nodes on the Rows, Individuals on the Columns. Don't forget to give the names to the Nodes and to the Individuals. Nodes must have the row.names() with the Node Name.
#' @param Overlap A vector containg the names of the Nodes of interest.
#' @param method Spearman ("s", "spearman") or Pearson ("p", "pearson") correlation
#' @rdname CorrelationOverlap
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @export
#' @importFrom stats cor
#' @importFrom HiClimR fastCor
#' @importFrom Rfast transpose
CorrelationOverlap = function(Data, Overlap, method ){
Overlap = as.character(Overlap)
if(method %in% c("pearson", "p")){
COR = suppressMessages(suppressWarnings(HiClimR::fastCor(t((as.matrix(Data))),
nSplit = 10,
upperTri = FALSE, verbose = F)
))
} else{
COR = suppressMessages(suppressWarnings(stats::cor(t(Data),
method = method,
use = "pairwise.complete.obs")))
}
diag(COR) <- 0
COR[is.na(COR)] = 0
# Final_Correlation = subset(COR, row.names(COR) %in% Overlap)
#
Final_Correlation = COR[row.names(COR) %in% Overlap, colnames(COR) %in% Overlap]
return(Final_Correlation)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/Correlation.Overlap.R
|
#' @title Cut.off
#' @description Computes the cutoff threshold using the descriptive values generated by the bootsrap, also plots the empirical distribution and the sample wTO.
#' @param wTO_value is the table returned by the wTO with all the simulated values.
#' @param main title of the graph.
#' @return plots in a new device the cutoff value, and the amount of links in each one of the bands.
#' @keywords internal
#' @import graphics
#' @importFrom stats density quantile
#' @importFrom methods is
#'
#'
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
Cut.off = function(wTO_value, type, plot){
`%ni%` <- Negate(`%in%`)
wTO_value = plyr::arrange(wTO_value, wTO_value$Var1)
wTO_value[is.na(wTO_value)]<- 0
wTO_value$relstar = wTO_value[,2]/sum(wTO_value[,2], na.rm = T)
wTO_value$relreal = wTO_value[,3]/sum(wTO_value[,3], na.rm = T)
quantile.from.freq <- function(vals,freq,quant) {
ord <- order(vals)
cs <- cumsum(freq[ord])
if(length(which(cs<quant)) > 0){
return(vals[max(which(cs<quant))+1])}
if(length(which(cs<quant)) == 0){
return(min(vals))
}
}
wTO_value$Var1 = as.numeric(as.matrix(wTO_value$Var1))
quantile_star = data.frame( quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.001),
quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.025),
quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.1),
quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.9),
quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.975),
quantile.from.freq(wTO_value$Var1, wTO_value$relstar, 0.999))
quantile_real = data.frame(quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.001),
quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.025),
quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.1),
quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.9),
quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.975),
quantile.from.freq(wTO_value$Var1, wTO_value$relreal, 0.999))
names(quantile_real)= names(quantile_star) = c("0.1%", "2.5%", "10%", "90%", "97.5%", "99.9%")
if(plot == TRUE){
PLOT <- function(wTO_value){
graphics::par(xpd=FALSE)
graphics::plot(wTO_value$relstar~ wTO_value$Var1, type = "l",
xlim = c(floor(min (wTO_value$Var1) ),1),
main = type,
ylim = c(0, max(wTO_value$relstar)), axes = F,
xlab = "wTO", ylab = "Density", col.main = "steelblue2", col.lab = "steelblue2")
graphics::lines(wTO_value$Var1, wTO_value$relreal, type = "l", col = "violet")
graphics::abline(h = 0, col = "gray", lty = 4)
graphics::abline(v = c(quantile_real), col = c("red", "orange", "yellow", "yellow", "orange", "red"
), lty = 2)
graphics::axis(1, las = 1, cex.axis = 0.6, col = "steelblue",
col.ticks = "steelblue3", col.axis = "steelblue")
graphics::axis(2, las = 1, cex.axis = 0.6, col = "steelblue",col.ticks = "steelblue3", col.axis = "steelblue")
graphics::par(xpd=T)
graphics::legend(c(0.9,max(wTO_value$relstar)), c("wTO - Data set",
"wTO - Reshuffle",
"99.9%",
"95%",
"80%"),
inset=c(-0.8,0),lwd = 2,
lty = 1, col = c("violet",
"black",
"yellow", "orange", "red"), bty = "n", cex = 0.5 )
}
res <- try(PLOT(wTO_value))
if(!methods::is(res, 'try-error')){
res
}
}
return(list(Empirical.Quantile = quantile_star, Quantile = quantile_real))
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/Cut.off2.R
|
#' @title ExampleGRF
#' @aliases ExampleGRF
#' @description ExampleGRF data.frame containing data.frame containing names of GRFs.
#' @format data.frame 184 lines, 1 column.
#' @usage data(ExampleGRF)
#' @name ExampleGRF
#' @docType data
#' @keywords datasets
ExampleGRF = read.table("./data/ExampleGRF.txt", stringsAsFactors = F)
ExampleGRF = base::unique(ExampleGRF)
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/ExampleGRF.R
|
#' @title Microarray_Expression1
#' @aliases Microarray_Expression1
#' @description Microarray_Expression1 data.frame containing expression data for 1000 genes and 18 individuals.
#' @format data.frame 1000 lines, 18 columns.
#' @usage Microarray_Expression1
#' @name Microarray_Expression1
# require(plyr)
# require(snow)
# require(base)
Microarray_Expression1 = utils::read.table("./data/Microarray_Expression1.txt")
# load_all()
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/ExampledfExpression.R
|
#' @title Microarray_Expression2
#' @aliases Microarray_Expression2
#' @description Microarray_Expression2 data.frame containing expression data for 1000 genes and 18 individuals.
#' @format data.frame 1000 lines, 18 columns.
#' @usage Microarray_Expression2
#' @name Microarray_Expression2
# require(plyr)
# require(snow)
# require(base)
Microarray_Expression2 = utils::read.table("./data/Microarray_Expression2.txt")
# load_all()
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/ExampledfExpression2.R
|
#' @title NetVis
#' @param Node.1 Names of the Nodes.1 that are connected to the Nodes.2. It's the output from wTO.Complete or Consensus.
#' @param Node.2 Names of the Nodes.2 that are connected to the Nodes.1. It's the output from wTO.Complete or Consensus.
#' @param wTO weight of the links, the wTO output from wTO.Complete or wTO.Consensus.
#' @param pval p-values for the wTO value. By default it is NULL.
#' @param padj Adjusted p-values for the wTO value. By default it is NULL.
#' @param cutoff It's a list containing the kind of cutoff to be used (pval, Threshold or pval.adj)and it's value. Example: cutoff= list(kind = "Threshold", value = 0.5)
#' @param layout a layout from the igraph package.
#' @param smooth.edges If the edges should be smoothed or not.
#' @param path If the graph should be saved specify the name of the file.
#' @param Cluster TRUE or FALSE if the nodes should be clustered (double click to uncluster).
#' @param MakeGroups algorithm to find clusters. One of the followings: walktrap, optimal, spinglass, edge.betweenness, fast_greedy, infomap, louvain, label_prop, leading_eigen. Default to FALSE.
#' @param legend TRUE or FALSE if the legend should appear.
#' @param manipulation TRUE or FALSE if the graph should be editable.
#' @param shape a list shape=list(shape = "triangle", names = NULL), with the shape and the IDs that should have a different shape, shape can be: diamond, star, triangle, triangleDown or square.
#' @description Given a set of Nodes and the weight of the edges, a cutoff for the edges, it draws the networks. Returns a list with the nodes and edges attributes. And plots the network.
#' @importFrom visNetwork visNetwork visInteraction visEdges visOptions visClusteringByGroup visLegend visPhysics visIgraphLayout visOptions visSave visExport
#' @importFrom plyr arrange join
#' @importFrom igraph graph_from_data_frame degree E
#' @importFrom data.table as.data.table
#' @importFrom magrittr "%>%"
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @export
#'
#' @examples
#' \dontrun{
#' X = wTO.Complete( k =1, n = 5, Data = Microarray_Expression1,
#' Overlap = ExampleGRF$x[1:10], method = "p", plot = FALSE)
#' # Plot with the default aguments.
#' NetVis(Node.1 = X$wTO$Node.1, Node.2 = X$wTO$Node.2,
#' wTO = X$wTO$wTO_sign, cutoff = list(kind =
#' "Threshold", value = 0.50))
#'
#' # Plotting just the edges with p-value < 0.05, with straight edges, nodes clustered,
#' # no legend and mapipulation of the graph enabled.
#' NetVis(Node.1 = X$wTO$Node.1, Node.2 = X$wTO$Node.2,
#' wTO = X$wTO$wTO_sign, pval = X$wTO$pval_sign,
#' padj = X$wTO$pval_sign,
#' cutoff= list(kind = "pval", value = 0.05),
#' smooth.edges = FALSE,
#' Cluster = TRUE, legend = FALSE, manipulation = TRUE)
#' # Plotting just the edges with wTO > 0.50, no legend and the nodes:
#' # "ZNF738", "ZNF677" with triagle shape,
#' # no legend and mapipulation of the graph enabled.
#' NetVis(Node.1 = X$wTO$Node.1, Node.2 = X$wTO$Node.2,
#' wTO = X$wTO$wTO_sign, pval = X$wTO$pval_sign,
#' padj = X$wTO$pval_sign, cutoff= list(kind = "Threshold", value = 0.5),legend = FALSE,
#' shape = list(shape = "triangle", names = c("ZNF738", "ZNF677")))
#'
#' }
NetVis =
function (Node.1, Node.2, wTO, pval= NULL, MakeGroups = FALSE,
padj= NULL, cutoff = list(kind = "Threshold", value = 0.5),
layout = NULL, smooth.edges = T, path = NULL, Cluster = F,
legend = T, shape=list(shape = "triangle", names= NULL), manipulation = F)
{
input_vis = data.frame (Node.1 = Node.1, Node.2 = Node.2, wTO = as.numeric(wTO))
if(!is.null(pval)){
input_vis$pval = pval
}
if(!is.null(padj) ){
input_vis$padj = padj
}
`%ni%` <- Negate(`%in%`)
`%>%` <- magrittr::`%>%`
if (cutoff$kind %ni% c("Threshold", "pval", "pval.adj")) {
stop("cutoff kind must be \"Threshold\", \"pval\" or \"pval.adj\".")
}
if (is.numeric(cutoff$value) == F) {
stop("cutoff value must be numeric.")
}
MakeGroups_pos = c('walktrap',
'optimal', 'spinglass', 'edge.betweenness',
'fast_greedy', 'infomap', 'louvain', 'label_prop',
'leading_eigen', FALSE)
if(MakeGroups %ni% MakeGroups_pos){
stop("MakeGroups should be FALSE or one of the following options: 'walktrap',
'optimal', 'spinglass', 'edge.betweenness',
'fast_greedy', 'infomap', 'louvain', 'label_prop',
'leading_eigen'.")
}
if (Cluster %ni% c(T, F)) {
stop("Cluster must be T / F.")
}
if (smooth.edges %ni% c(T, F)) {
stop("smooth.edges must be T / F.")
}
input_vis = subset(input_vis, abs(input_vis$wTO) > 0.01)
if (cutoff$kind == "Threshold") {
input_vis = subset(input_vis, abs(input_vis$wTO) >= cutoff$value)
}
else if (cutoff$kind == "pval") {
input_vis = subset(input_vis, input_vis$pval <= cutoff$value)
}
else if (cutoff$kind == "pval.adj") {
input_vis = subset(input_vis, input_vis$pval.adj <= cutoff$value)
}
if (nrow(input_vis) <= 2) {
stop("There is less than 2 nodes on your network. Choose a lower cutoff.")
}
if (smooth.edges == T) {
smooth.edges = "enabled"
}
input_vis = input_vis[!is.na(input_vis$wTO), ]
input_vis = plyr::arrange(input_vis, input_vis$Node.1, input_vis$Node.2)
nodes <- data.frame(id = sort(unique(c(as.character(input_vis$Node.1),
as.character(input_vis$Node.2)))))
g = igraph::graph_from_data_frame(input_vis, directed = F)
DEGREE = as.data.frame(igraph::degree(g))
igraph::E(g)$weight = abs(input_vis$wTO)
names(DEGREE) = "degree"
DEGREE$id = row.names(DEGREE)
nodes = suppressMessages(plyr::join(nodes, DEGREE))
nodes$shape = ifelse(nodes$id %in% shape$names, shape$shape, "dot")
nodes$value = (nodes$degree - min(nodes$degree))/(max(nodes$degree) -
min(nodes$degree))
nodes$value = nodes$value * 2 + 1
nodes$size = nodes$value
if(MakeGroups == FALSE){
group = 1
}
if (MakeGroups == 'infomap'){
group = igraph::cluster_infomap(g)$membership
}
else if (MakeGroups == 'walktrap'){
group = igraph::cluster_walktrap(g)$membership
}
else if (MakeGroups == 'leading_eigen'){
group = igraph::cluster_leading_eigen(g)$membership
}
else if (MakeGroups == 'louvain'){
group = igraph::cluster_louvain(g)$membership
}
else if (MakeGroups == 'label_prop'){
group = igraph::cluster_label_prop(g)$membership
}
else if (MakeGroups == 'fast_greedy'){
group = igraph::cluster_fast_greedy(g)$membership
}
else if (MakeGroups == 'optimal'){
group = igraph::cluster_optimal(g)$membership
}
else if (MakeGroups == 'spinglass'){
group = igraph::cluster_spinglass(g)$membership
}
else if (MakeGroups == 'edge.betweenness'){
group = igraph::edge.betweenness.community(g)$membership
}
nodes = plyr::join(nodes, data.frame(id = igraph::V(g)$name, group = group))
nodes$label = nodes$id
# print(nodes)
# nodes$shape = "circle"
#
# nodes$value = ifelse(nodes$id %in% shape$names, 3, 1)
nodes$title = paste0("<p> Node ID: ", nodes$id, "<br>Degree: ",
nodes$degree, "</p>")
edges <- data.frame(from = input_vis$Node.1, to = input_vis$Node.2)
wto = abs(input_vis$wTO)
edges$width = 0.5 + 5 * abs((wto - min(wto))/(max(wto) -
min(wto)))
edges$color = ifelse(input_vis$wTO > 0, "violetred", "springgreen")
edges$title = paste0("<p> wTO: ", round(input_vis$wTO, 2),
"</p>")
ledges <- data.frame(color = c("violetred", "springgreen"),
label = c("+ wTO", "- wTO"), arrows = c("", ""))
network <- visNetwork::visNetwork(nodes, edges) %>%
visNetwork::visInteraction(navigationButtons = TRUE) %>%
visNetwork::visEdges(smooth = smooth.edges) %>%
visNetwork::visOptions(highlightNearest = list(enabled = TRUE,
degree = 1, hover = T), nodesIdSelection = list(enabled = TRUE,
style = "width: 200px; height: 26px;\n background: #f8f8f8;\n color: darkblue;\n border:none;\n outline:none;"),
manipulation = F) %>%
visNetwork::visPhysics(enabled = F) %>%
visNetwork::visExport(type = "pdf",
name = "networkpdf",
float = "left",
label = "Save pdf",
background = "transparent",
style= "")%>%
visNetwork::visExport(type = "png",
name = "networkpng",
float = "right",
label = "Save png",
background = "transparent",
style= "")
if (Cluster == T) {
network <- network %>% visNetwork::visClusteringByGroup(groups = unique((nodes$group)))
}
if (legend == T) {
network <- network %>% visNetwork::visLegend(width = 0.3,
position = "right", main = "Group", addEdges = ledges,
ncol = 2)
}
if (!is.null(layout)) {
network <- network %>% visNetwork::visIgraphLayout(layout = layout)
}
if(manipulation == T){
network <- network %>% visNetwork::visOptions(manipulation = TRUE)
}
if (is.null(path)) {
network
} else if (!is.null(path)) {
visNetwork::visSave(network, file = path)
message(path)
}
nodesout =data.frame(id = nodes$id, group = nodes$group, degree = nodes$degree)
return(list(Nodes = nodesout, network = network))
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/NetVis.R
|
#' @title metagenomics_abundance
#' @aliases metagenomics_abundance
#' @description metagenomics_abundance
#' @format data.frame from The USC Microbial Observatory. The data is public available at <https://www.ebi.ac.uk/metagenomics/projects/ERP013549>
#' @usage data('metagenomics_abundance')
#' @name metagenomics_abundance
metagenomics_abundance = utils::read.table("./data/metagenomics_abundance.txt", sep = '\t')
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/metagenomics.R
|
#' @title wTO.aux.each
#' @description Computes the repeated measures correlation. based on Blendman Altman (1995). Implemented by <https://doi.org/10.3389/fpsyg.2017.00456>
#' @keywords internal
rmcor <- function (ID,
Measure1,
Measure2)
{
newdat <- stats::na.omit(data.frame(ID, Measure1,
Measure2))
Participant <- newdat$ID
Measure1 <- newdat$Measure1
Measure2 <- newdat$Measure2
lmmodel <- stats::lm(Measure2 ~ Participant + Measure1)
lmslope <- stats::coef(lmmodel)["Measure1"]
errordf <- lmmodel$df.residual
corrsign <- sign(lmslope)
type3rmcorr <- stats::drop1(lmmodel, ~., test = "F")
SSFactor <- type3rmcorr$"Sum of Sq"[3]
SSresidual <- type3rmcorr$RSS[1]
rmcorrvalue <- as.numeric(corrsign * sqrt(SSFactor/(SSFactor +
SSresidual)))
return(rmcorrvalue)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/rmcor.R
|
#' @title sample_ind
#' @description Computes the resufling of the expression values for the IDs.
#' @param x Column to be resampled
#' @param dfExpression data.frame object containing the genes expression on the rows and the individuals (Individuals in the Columns)
#' @keywords internal
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
sample_ind = function(x, dfExpression){
z = base::sample(dfExpression[,x], replace = F)
return(z)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/sample_ind.R
|
#' @title wTO.Complete
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @param k Number of threads to be used for computing the weight Topological Overlap. Default is set to 1.
#' @param n Number of resamplings, used to compute the empirical distribuitions of the links. Default is set to 100.
#' @param Data data.frame containing the count / expression data for the correlation.
#' @param Overlap Set of nodes of interest, where the Overlapping weights will be computed.
#' @param method Type of the correlation that should be used. "s" / "spearman" will compute the rank spearman correlation, "p" / "pearson" will compute the linear correlation. If no value is given, the default is to use "p".
#' @param method_resampling method of the resampling. Bootstrap, BlockBootstrap or Reshuffle. Bootstrap null hypothesis is that the wTO is random, and Reshuffle tests if the wTO is equal to zero.
#' @param pvalmethod method to compute the multiple test correction for the pvalue. for more information check the function \code{\link[stats]{p.adjust}}.
#' @param savecor T/F if need to save the correlation.
#' @param expected.diff Difference expected between the real wTO and resampled wTO By default, it is set to 0.2.
#' @param lag time dependency, lag, if you are using the BlockedBootstrap.
#' @param ID ID of the samples for the blocked bootstrap (for repeated measures).
#' @param normalize T/F Should the data be normalized?
#' @param plot T/F Should the diagnosis plot be plotted?
#'
#' @description Compute the wTO and also the bootstraps. Proposed at: arXiv:1711.04702
#' @return a list with results.
#' \itemize{
#' \item wTO is a data.frame containig the Nodes, the wTO computed using the signed correlations, the pvalue and the adj.pvalue.
#' \item abs.wTO is a data.frame containig the Nodes, the wTO computed using the absolute correlations, the pvalue and the adj.pvalue.
#' \item Correlation is a data.frame containing the correlation between all the nodes.
#' \item Empirical.Quantile quantile values for the empirical distribution.
#' \item Quantile quantile values for the sample distribution.
#' }
#' @importFrom parallel makeCluster clusterExport clusterApplyLB stopCluster
#' @importFrom data.table rbindlist dcast
#' @importFrom som normalize
#' @importFrom stats cor p.adjust reshape
#' @importFrom graphics plot axis par abline legend
#'
#'
#' @examples
#' \dontrun{
#' # Using spearman rank correlation and bonferroni correction for the pvalues.
#' wTO.Complete( k =8, n = 1000, Data = Microarray_Expression1,
#' Overlap = ExampleGRF$x, method = "s", pvalmethod = "bonferroni")
#' # Changing the resampling method to Reshuffle.
#' wTO.Complete( k =1, n = 1000, Data = Microarray_Expression1,
#' Overlap = ExampleGRF$x, method_resampling = "Reshuffle")
#' # Changing the resampling method to BlockBootstrap, with a lag of 2.
#' row.names(metagenomics_abundance) = metagenomics_abundance$OTU
#' metagenomics_abundance = metagenomics_abundance[,-1]
#' wTO.Complete( k =1, n = 1000, Data = metagenomics_abundance, method = "s",
#' Overlap = row.names(metagenomics_abundance), method_resampling = "BlockBootstrap", lag = 2)
#' wTO.Complete( k =2, n = 1000, Data = Microarray_Expression1, method = "s",
#' Overlap = ExampleGRF$x, method_resampling = "BlockBootstrap", ID = rep(1:9,each = 2))
#' X = wTO.Complete( k =1, n = 1000, Data = Microarray_Expression1,
#' Overlap = ExampleGRF$x, method = "p", plot = FALSE)
#' }
#' @export
wTO.Complete = function(k = 1 ,n = 100, Data , Overlap = row.names(Data),
method = "p", method_resampling = "Bootstrap",
pvalmethod = "BH", savecor = F,
expected.diff = 0.20, lag = NULL, ID = NULL,
normalize = F, plot = T){
N = k
Overlap = unique(as.character(Overlap))
`%ni%` <- Negate(`%in%`)
##### Messages
if(is.numeric(k) == F){
stop("k must be numeric.")
}
if(k <= 0){
stop("k must be greater than 0.")
}
if(is.numeric(n) == F){
stop("n must be numeric.")
}
if(n <= 0){
stop("n must be greater than 0.")
}
if(is.data.frame(Data) == F){
stop("Data must be a data.frame.")
}
if(method %ni% c("s", "spearman", "p", "pearson")){
stop('Method must be: "s", "spearman", "p" or "pearson".')
}
if(method_resampling %ni% c("Bootstrap", "Reshuffle", "BlockBootstrap")){
stop('Method must be: "Bootstrap", "BlockBootstrap" or "Reshuffle".')
}
if(method_resampling %in% "BlockBootstrap"){
if (is.null(lag)&is.null(ID)){
stop('If you want to use the "BlockBootstrap" please give a lag or the indivuals ID.')
}
if(!is.null(lag)&!is.null(ID)){
stop('If you want to use the "BlockBootstrap" please give a lag OR the indivuals ID.')
}
}
if(pvalmethod %ni% c ('holm', 'hochberg', 'hommel', 'bonferroni', 'BH', 'BY', 'fdr', 'none')){
stop("pvalmethod must be: 'holm', 'hochberg', 'hommel', 'bonferroni', 'BH', 'BY', 'fdr' or 'none'")
}
if(normalize %ni% c (T, F)){
stop("normalize must be: TRUE or FALSE.")
}
if(normalize == T){
Data.n = as.data.frame(som::normalize(Data))
row.names(Data.n)= row.names(Data)
Data = Data.n
}
DIM_Overlap = nrow(subset(Data, row.names(Data) %in% Overlap))
if(DIM_Overlap == 0){
stop('There is no overlapping nodes. Please check your input "Overlap"')
}
if(!is.null(DIM_Overlap)){
message(paste('There are',DIM_Overlap, "overlapping nodes,",dim(Data)[1],
"total nodes and" , dim(Data)[2],"individuals." ))
}
message("This function might take a long time to run. Don't turn off the computer.")
PAR = par()
## For the original data
# real_Genes = Data
Saving = CorrelationOverlap(Data = Data, Overlap = Overlap, method = method)
WTO_abs = wTO(A_TF = Saving, sign = "abs")
WTO_sign = wTO(A_TF = Saving, sign = "sign")
Cor_real = wTO.in.line(WTO_sign)
Cor_real_abs = wTO.in.line(WTO_abs)
names(Cor_real) = names(Cor_real_abs) <-c("Node.1", "Node.2", "wTO_0")
idcol = c("Node.1", "Node.2")
rm("WTO_abs")
rm("WTO_sign")
data.table::setkeyv(Cor_real, c("Node.1", "Node.2"))
data.table::setkeyv(Cor_real_abs, c("Node.1", "Node.2"))
Orig = cbind(Rep = 0, Cor_real[Cor_real_abs])
names(Orig)= c("Rep","Node.1", "Node.2", "wTO_sign", "wTO_abs")
reps_rest = n
### If only one node
if ( k == 1){
a = 0
while ( reps_rest > 0){
# message(a)
K = 1:min(N, reps_rest)
# K = 1:n
OUTPUT = lapply(K, wTO.aux.each, Data= Data,
Overlap = Overlap, method = method, ID, lag = lag, method_resampling= method_resampling)
ALL = data.table::rbindlist(OUTPUT, idcol = idcol)
names(ALL) = names(Orig) = c("Rep", "Node.1", "Node.2", "wTO_sign" ,"wTO_abs")
ALL_DT_sig = data.table::dcast(ALL, Node.1 + Node.2 ~ Rep, value.var = "wTO_sign")
ALL_DT_abs = data.table::dcast(ALL, Node.1 + Node.2 ~ Rep, value.var = "wTO_abs")
if ( a == 0){
Ps1 = rowSums(ALL_DT_sig[,-c(1:2)] < Orig$wTO_sign - expected.diff)
Ps2 = rowSums(ALL_DT_sig[,-c(1:2)] > Orig$wTO_sign + expected.diff)
Ps = Ps1 + Ps2
Pa1 = rowSums(ALL_DT_abs[,-c(1:2)] < Orig$wTO_abs - expected.diff)
Pa2 = rowSums(ALL_DT_abs[,-c(1:2)] > Orig$wTO_abs + expected.diff)
Pa = Pa1 + Pa2
TAB_SIGN = as.data.frame(table(unlist(round(ALL_DT_sig[,-c(1:2)], 2))))
TAB_ABS = as.data.frame(table(unlist(round(ALL_DT_abs[,-c(1:2)], 2))))
}
if ( a > 0){
Ps1 = rowSums(ALL_DT_sig[,-c(1:2)] < Orig$wTO_sign - expected.diff)
Ps2 = rowSums(ALL_DT_sig[,-c(1:2)] > Orig$wTO_sign + expected.diff)
Ps = Ps + Ps1 + Ps2
Pa1 = rowSums(ALL_DT_abs[,-c(1:2)] < Orig$wTO_abs - expected.diff)
Pa2 = rowSums(ALL_DT_abs[,-c(1:2)] > Orig$wTO_abs + expected.diff)
Pa = Pa + Pa1 + Pa2
TAB_SIGN_aux = as.data.frame(table(unlist(round(ALL_DT_sig[,-c(1:2)], 2))))
TAB_ABS_aux = as.data.frame(table(unlist(round(ALL_DT_abs[,-c(1:2)], 2))))
TAB_SIGN = plyr::join(TAB_SIGN, TAB_SIGN_aux, by = "Var1")
TAB_SIGN = data.frame(Var1 = TAB_SIGN$Var1,
Sum = rowSums(TAB_SIGN[,-1]))
TAB_ABS = plyr::join(TAB_ABS, TAB_ABS_aux, by = "Var1")
TAB_ABS = data.frame(Var1 = TAB_ABS$Var1,
Sum = rowSums(TAB_ABS[,-1]))
}
rm("ALL_DT_sig", "ALL_DT_abs", "ALL", "OUTPUT")
reps_rest = (reps_rest - N)
a = a +1
}
}
else if ( k > 1){
WTO = new.env()
assign("Data", Data, envir = WTO)
assign("Overlap", Overlap, envir = WTO)
assign("method", method, envir = WTO)
assign("CorrelationOverlap", CorrelationOverlap, envir = WTO)
assign("wTO", wTO, envir = WTO)
assign("wTO.in.line", wTO, envir = WTO)
assign("wTO.aux.each", wTO.aux.each, envir = WTO)
assign("method_resampling", method_resampling, envir = WTO)
assign("sample_ind", sample_ind, envir = WTO)
assign("lag", lag, envir = WTO)
assign("ID", ID, envir = WTO)
cl = parallel::makeCluster(k)
parallel::clusterExport(cl, "Data", envir = WTO)
parallel::clusterExport(cl, "wTO.in.line", envir = WTO)
parallel::clusterExport(cl, "lag", envir = WTO)
parallel::clusterExport(cl, "Overlap", envir = WTO)
parallel::clusterExport(cl, "method", envir = WTO)
parallel::clusterExport(cl, "CorrelationOverlap", envir = WTO )
parallel::clusterExport(cl, "wTO", envir = WTO)
parallel::clusterExport(cl, 'wTO.aux.each', envir = WTO)
parallel::clusterExport(cl, 'method_resampling', envir = WTO)
parallel::clusterExport(cl, 'sample_ind', envir = WTO)
# message("cluster")
# K = 1:n
a = 0
while ( reps_rest > 0){
# message(a)
K = 1:min(N, reps_rest)
OUTPUT = parallel::clusterApply(cl, K, wTO.aux.each , Data= Data,
Overlap = Overlap, ID, lag = lag, method = method, method_resampling= method_resampling)
ALL = data.table::rbindlist(OUTPUT, idcol = idcol)
names(ALL) = names(Orig) = c("Rep", "Node.1", "Node.2", "wTO_sign" ,"wTO_abs")
ALL_DT_sig = data.table::dcast(ALL, Node.1 + Node.2 ~ Rep, value.var = "wTO_sign")
ALL_DT_abs = data.table::dcast(ALL, Node.1 + Node.2 ~ Rep, value.var = "wTO_abs")
if ( a == 0){
Ps = rowSums(ALL_DT_sig[,-c(1:2)] < Orig$wTO_sign - expected.diff) +
rowSums(ALL_DT_sig[,-c(1:2)] > Orig$wTO_sign + expected.diff)
Pa = rowSums(ALL_DT_abs[,-c(1:2)] < Orig$wTO_abs - expected.diff) +
rowSums(ALL_DT_abs[,-c(1:2)] > Orig$wTO_abs + expected.diff)
TAB_SIGN = as.data.frame(table(unlist(round(ALL_DT_sig[,-c(1:2)], 2))))
TAB_ABS = as.data.frame(table(unlist(round(ALL_DT_abs[,-c(1:2)], 2))))
}
if ( a > 0){
Ps = Ps + rowSums(ALL_DT_sig[,-c(1:2)] < Orig$wTO_sign - expected.diff) +
rowSums(ALL_DT_sig[,-c(1:2)] > Orig$wTO_sign + expected.diff)
Pa = Pa + rowSums(ALL_DT_abs[,-c(1:2)] < Orig$wTO_abs - expected.diff) +
rowSums(ALL_DT_abs[,-c(1:2)] > Orig$wTO_abs + expected.diff)
# message(Pa)
# message(Ps)
TAB_SIGN_aux = as.data.frame(table(unlist(round(ALL_DT_sig[,-c(1:2)], 2))))
TAB_ABS_aux = as.data.frame(table(unlist(round(ALL_DT_abs[,-c(1:2)], 2))))
TAB_SIGN = plyr::join(TAB_SIGN, TAB_SIGN_aux, by = "Var1")
TAB_SIGN = data.frame(Var1 = TAB_SIGN$Var1,
Sum = rowSums(TAB_SIGN[,-1]))
TAB_ABS = plyr::join(TAB_ABS, TAB_ABS_aux, by = "Var1")
TAB_ABS = data.frame(Var1 = TAB_ABS$Var1,
Sum = rowSums(TAB_ABS[,-1]))
}
rm("ALL_DT_sig", "ALL_DT_abs", "ALL", "OUTPUT")
reps_rest = (reps_rest - N)
a = a +1
}
parallel::stopCluster(cl)
}
message("Simulations are done.")
message("Computing p-values")
Orig$pval_sig = Ps / n
Orig$pval_abs = Pa / n
if(method_resampling == "Reshuffle"){
Orig$pval_sig = 1- Orig$pval_sig
Orig$pval_abs = 1- Orig$pval_abs
}
Orig$Padj_sig = (stats::p.adjust(Orig$pval_sig, method = pvalmethod))
Orig$Padj_abs = (stats::p.adjust(Orig$pval_abs, method = pvalmethod))
## Running the correlation
if( savecor == T){
Total_Correlation = as.data.frame(stats::cor(t(Data), method = method))
Total_Correlation = wTO.in.line(Total_Correlation)
names(Total_Correlation) = c("Node.1", "Node.2", "Cor")
}
if( savecor == F){
Total_Correlation = NULL
}
TAB_SIGN_aux = as.data.frame(table(round(Orig$wTO_sign,2)))
TAB_ABS_aux = as.data.frame(table(round(Orig$wTO_abs,2)))
TAB_SIGN = plyr::join(TAB_SIGN, TAB_SIGN_aux, by = "Var1")
TAB_ABS = plyr::join(TAB_ABS, TAB_ABS_aux, by = "Var1")
message("Computing cutoffs")
if(plot == TRUE){
graphics::par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE, mfrow = c(3,2))
}
Cutoffs = Cut.off(TAB_SIGN, "wTO - Resampling", plot = plot)
Cutoffs_abs = Cut.off(TAB_ABS, "|wTO| - Resampling", plot = plot)
Orig = Orig[, -"Rep"]
Orig$wTO_abs = as.numeric(Orig$wTO_abs)
Orig$wTO_sign = as.numeric(Orig$wTO_sign)
Orig$pval_abs = as.numeric(Orig$pval_abs)
Orig$pval_sig = as.numeric(Orig$pval_sig)
Orig$Padj_abs = as.numeric(Orig$Padj_abs)
Orig$Padj_sig = as.numeric(Orig$Padj_sig)
Quantiles = rbind(
Cutoffs$Empirical.Quantile,
Cutoffs$Quantile ,
Cutoffs_abs$Empirical.Quantile,
Cutoffs_abs$Quantile)
row.names(Quantiles) = c( 'Empirical.Quantile',
'Quantile',
'Empirical.Quantile.abs',
'Quantile.abs')
tQ = as.data.frame(t(Quantiles))
output = list(wTO = Orig,
Correlation = Total_Correlation,
Quantiles = Quantiles
)
col = ifelse(Orig$pval_sig < 0.05 & Orig$pval_abs < 0.05, "red",
ifelse(Orig$pval_sig < 0.05, "orange",
ifelse (Orig$pval_abs < 0.05, "yellow", "black")))
if(plot == T){
# par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE, mfrow = c(3,1))
graphics::plot(Orig$wTO_sign, Orig$wTO_abs, axes = F,
xlab = "|wTO|", ylab = "wTO",
main = "|wTO| vs wTO", pch = ".", xlim = c(-1,1), ylim = c(0,1),
col.main = "steelblue2", col.lab = "steelblue2", col = col)
graphics::axis(1, las = 1, cex.axis = 0.6, col = "steelblue",
col.ticks = "steelblue3", col.axis = "steelblue")
graphics::axis(2, las = 1, cex.axis = 0.6, col = "steelblue",col.ticks = "steelblue3", col.axis = "steelblue")
graphics::legend(c(0.9,0), c ("p-value < 0.05", 'wTO sign & |wTO|',
'wTO sign','|wTO|'),
col = c("transparent","red", "orange", "yellow"), pch = 16, bty = "n",
inset=c(-0.8,0), cex = 0.5 )
graphics::par(xpd=FALSE)
graphics::abline( h = 0, lty = 2, col = "gray50")
graphics::abline(v = 0, lty = 2, col = "gray50")
graphics::plot(Orig$wTO_sign, Orig$pval_sig, axes = F,
xlab = "wTO", ylab = "p-value", ylim = c(0,1), xlim = c(-1,1), col.main = "steelblue2", col.lab = "steelblue2",
main = "wTO vs p-value",
pch = 16)
graphics::axis(1, las = 1, cex.axis = 0.6, col = "steelblue",
col.ticks = "steelblue3", col.axis = "steelblue")
graphics::axis(2, las = 1, cex.axis = 0.6, col = "steelblue",col.ticks = "steelblue3", col.axis = "steelblue")
graphics::par(xpd=FALSE)
graphics::abline( v = tQ$Empirical.Quantile, lty = 2, col = c("red", "orange", "yellow", "yellow", "orange", "red"))
graphics::par(xpd=T)
graphics::legend(c(0.9,0), c ("Empirical Quantiles", '0.1%','2.5%','10%','90%','97.5%','99.9%'),
col = c("white", "red", "orange", "yellow", "yellow", "orange", "red"), lwd = 2, bty = "n",
inset=c(-0.8,0), cex = 0.5 )
graphics::par(xpd=FALSE)
graphics::plot(Orig$wTO_abs, Orig$pval_abs, axes = F,
xlab = "|wTO|", ylab = "p-value", ylim = c(0,1), xlim = c(0,1),
main = "|wTO| vs p-value",
pch = 16, col.main = "steelblue2", col.lab = "steelblue2")
graphics::axis(1, las = 1, cex.axis = 0.6, col = "steelblue",
col.ticks = "steelblue3", col.axis = "steelblue")
graphics::axis(2, las = 1, cex.axis = 0.6, col = "steelblue",col.ticks = "steelblue3", col.axis = "steelblue")
graphics::abline( v = tQ$Empirical.Quantile.abs, lty = 2, col = c("red", "orange", "yellow", "yellow", "orange", "red"))
graphics::par(xpd=T)
graphics::legend(c(0.9,0), c ("Empirical Quantiles", '0.1%','2.5%','10%','90%','97.5%','99.9%'),
col = c("white", "red", "orange", "yellow", "yellow", "orange", "red"), lwd = 2, bty = "n",
inset=c(-0.8,0), cex = 0.5 )
}
class(output)<- append('wTO', class(output))
message("Done!")
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.Complete2.R
|
#' @title wTO.Consensus
#' @aliases wTO.Consensus
#' @description Consensus requires a list of data.frame containing the pair of nodes, and the wTO values for all networks that need to be joined. Reference: arXiv:1711.04702
#' @param data list of data.frame containing the "Node.1", "Node.2" and "wTO".
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @export
#' @importFrom plyr join_all
#' @importFrom stats pchisq
#' @examples
#' \dontrun{
#'EXAMPLE = wTO.Complete( k =1, n = 200, Data = Microarray_Expression1,
#' Overlap = ExampleGRF$x, method = "p")
#'
#' # Constructing the consensus network
#' data = list(data.frame(Node.1 = EXAMPLE$wTO$Node.1,
#' Node.2 = EXAMPLE$wTO$Node.2,
#' wto_sig = EXAMPLE$wTO$wTO_sign,
#' pvalsig = EXAMPLE$wTO$pval_sig),
#' data.frame(Node.1 = EXAMPLE$wTO$Node.1,
#' Node.2 = EXAMPLE$wTO$Node.2,
#' wtoabs = EXAMPLE$wTO$wTO_abs,
#' pvalabs = EXAMPLE$wTO$pval_abs) )
#' CONS = wTO.Consensus(data)
#'
#' }
wTO.Consensus = function(data){
if (!is.list(data)){
stop("data must be a list of data.frames.")
}
### Weight
weight = pval = nodes = list()
for ( i in 1:length(data)){
weight[[i]] = data[[i]][,1:3]
pval[[i]] = data[[i]][,c(1:2,4)]
ID = unique(c(levels(data[[i]]$Node.1), levels(data[[i]]$Node.2)))
nodes[[i]] = data.frame(ID = ID)
names(weight[[i]])[3] = paste0(names(weight[[i]][3]), i)
names(pval[[i]])[3] = paste0(names(pval[[i]][3]), i)
}
weight = plyr::join_all(weight, type = 'full')
pval = plyr::join_all(pval, type = 'full')
nodes = plyr::join_all(nodes, type = 'inner')
message(paste('Total common nodes:', nrow(nodes)))
weight = subset(weight, weight$Node.1 %in% nodes$ID & weight$Node.2 %in% nodes$ID)
pval = subset(pval, pval$Node.1 %in% nodes$ID & pval$Node.2 %in% nodes$ID)
pval[is.na(pval)] <- 1
weight[is.na(weight)] <- 0.01
wTOCN = CN_aux(weight[, -c(1:2)])
pvalue_fisher = fishermethod(pval[, -c(1:2)])
Out = data.frame(Node.1 = pval[,1], Node.2 = pval[,2],
CN = wTOCN, pval.fisher = pvalue_fisher)
return(Out)
}
fishermethod = function(data_x){
chi = rowSums(log(data_x))*-2
pval = sapply(chi, function(x) stats::pchisq(x, 2*ncol(data_x), lower.tail = FALSE))
return(pval)
}
CN_aux = function(data_x){
abs_x = apply(data_x, 2, abs)
sum_abs_x = apply(abs_x, 1, sum)
div = (abs_x/sum_abs_x) * data_x
wTO_cons = apply(div, 1, sum)
return(wTO_cons)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.Consensus.R
|
#' @title wTO
#' @description Calculates the weighted topologycal overlap (wTO)
#' between a set of Nodes and the Overlapping nodes. This function implements the method from Nowick (2009).
#' @param A_TF Is the weighted adjency matrix (correlation matrix).
#' @param sign ("abs", "sign") if the user wants to use the absolute correlation or the signed correlation.
#' @return A matrix containing the wTO values.
#' @export
#' @references Katja Nowick, Tim Gernat, Eivind Almaas and Lisa Stubbs (2009) <doi:10.1073/pnas.0911376106>
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @importFrom Rfast Crossprod transpose
wTO = function(A_TF, sign = c("abs", "sign")){
A_TF = as.matrix(A_TF)
if(sign %in% c("abs", "absolute")){
A_TF = abs(A_TF)
}
# A_TF = as.data.frame(subset(A, select = row.names(A)))
C = Rfast::Crossprod(A_TF, Rfast::transpose(A_TF))
W = C + A_TF ###
K = matrix(NA, nrow(A_TF), ncol(A_TF))
KI = rowSums(abs(A_TF), na.rm = T)
for( ii in 1: nrow(A_TF)){
for( jj in 1: ncol(A_TF)){
K[ii,jj] = min(KI[ii], KI[jj])
}
}
WTO = round(W / (K + 1 - abs(A_TF)),3)
return(WTO)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.R
|
#' @title wTO.aux.each
#' @description wTO.aux.each calculate the wTO for each one of the resamplings.
#' @keywords internal
#' @importFrom stats na.exclude
#' @importFrom data.table setkeyv
#' @param n Number of bootstraps / reshuffles to be run for the estimates of the "Threshold" or "pval".
#' @param Data data.frame containing the count / expression data for the correlation.
#' @param Overlap Nodes of interested, where the Overlapping weights will be computed.
#' @param method Type of the correlation that should be used. "s" / "spearman" will compute the rank spearman correlation, "p" / "pearson" will compute the linear correlation. If no value is given, the default is to use "s".
#' @param method_resampling method of the resampling. Bootstrap or Reshuffle. Bootstrap null hypothesis is that the wTO is random, and Reshuffle tests if the wTO is equal to zero.
wTO.aux.each = function (n, Data, Overlap, method, method_resampling, lag, ID){
if(method_resampling == "Bootstrap"){
real_Genes = sample(Data, replace = T)
}
if(method_resampling == "BlockBootstrap"){
if(!is.null(lag)){
nsampl = ifelse (ncol(Data) %% lag == 0, ncol(Data) %/% lag, ncol(Data) %/% lag +1)
Y = sample(1:nsampl, size = nsampl, replace = T)
Vect = Y*lag
i = lag - 1
while( i > 0){
Vect = cbind(Vect , Y*lag - i)
i = i - 1
}
SAMPLES = c(Vect)
SAMPLES[SAMPLES > ncol(Data)] <- NA
SAMPLE = stats::na.exclude(SAMPLES)
real_Genes = Data[,SAMPLE]
row.names(real_Genes)=row.names(Data)
}
if(!is.null(ID)){
ID %<>% as.factor
bootID = sample(levels(ID), replace = TRUE)
Data_boot = subset(Data, select = ID %in% bootID[1])
for (k in 2:length(bootID)){
real_Genes = cbind(Data_boot,
subset(Data, select = ID %in% bootID[k]))
}
}
}
else if(method_resampling == "Reshuffle" ){
real_Genes = as.data.frame(lapply(1:ncol(Data), FUN = sample_ind, dfExpression = Data))
names(real_Genes)=names(Data)
row.names(real_Genes)=row.names(Data)
}
Saving = CorrelationOverlap(Data = real_Genes, Overlap = Overlap, method = method)
WTO_abs = wTO(A_TF = Saving, sign = "abs")
WTO_sig = wTO(A_TF = Saving, sign = "sign")
# message(".", appendLF = F)
Cor_star = wTO.in.line(WTO_sig)
Cor_star_abs = wTO.in.line(WTO_abs)
names(Cor_star) = c ("Node.1", "Node.2", "wTo_sign")
names(Cor_star_abs) = c ("Node.1", "Node.2", "wTo_abs")
data.table::setkeyv(Cor_star, c("Node.1", "Node.2"))
data.table::setkeyv(Cor_star_abs, c("Node.1", "Node.2"))
return(Cor_star[Cor_star_abs])
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.aux.each.R
|
#' @title wTO.export
#' @aliases wTO.export
#' @description Exports the significative interactions, the wTO weight and pvalues into a .txt file, tab separeted. This file can be imported in other visualization tools (Cytoscape for example).
#' @param DATA Output from the function wTO.Complete or wTO.Consensus.
#' @param path Path and file name where the .txt file should be saved.
#' @param sign Should the network contain the results for the signed network or unsigned? Only for data coming from wTO.Complete.
#' @param pvalue cutoff p-value for the network. Only for data coming from wTO.Complete.
#' @param padj cutoff adjusted p-value for the network. Only for data coming from wTO.Complete.
#' @param prop.NA cutoff proportion of NAs for the network. Only for data coming from wTO.Consensus.
#' @importFrom utils write.table
#'
#' @export
#'
#' @examples
#' \dontrun{
#' EXAMPLE = wTO.Complete( k =1, n = 200, Data = Microarray_Expression2,
#' Overlap = ExampleGRF$x, method = "p")
#' wTO.export(EXAMPLE , './EXAMPLE.txt')
#'
#' #Selection of only the significative ones for the Consensus
#' Ex_k1_cor_p_boot_p005_sig = subset(EXAMPLE$wTO,
#' EXAMPLE$wTO$pval_sig < 0.05,
#' select = c("Node.1", "Node.2", "wTO_sign"))
#' Ex_k1_cor_p_boot_p005_abs = subset(EXAMPLE$wTO,
#' EXAMPLE$wTO$pval_abs < 0.05,
#' select = c("Node.1", "Node.2", "wTO_abs"))
#' # Constructing the consensus network
#' CN = wTO.Consensus(data = list(Ex_k1_cor_p_boot_p005_sig,
#' Ex_k1_cor_p_boot_p005_abs))
#' wTO.export(CN, './CN.txt')
#' ### You can store the result on the workspace.
#' y = wTO.export(CN, './CN.txt')
#' head(y)
#' }
#'
#'
#'
wTO.export = function(DATA, path, sign = TRUE, pvalue = 0.05, padj = 0.05, prop.NA = 0.5){
if(any(class(DATA) %in% 'wTO')){
if(sign == TRUE){
save = subset(DATA$wTO, DATA$wTO$pval_sig < pvalue & DATA$wTO$Padj_sig < padj,
select = c("Node.1" , "Node.2" , "wTO_sign" , "pval_sig" , "Padj_sig"))
}
if(sign == FALSE){
save = subset(DATA$wTO, DATA$wTO$pval_abs < pvalue & DATA$wTO$Padj_abs,
select = c("Node.1" , "Node.2" , "wTO_abs" , "pval_abs" , "Padj_abs"))
}
names(save) = c(c("Node.1" , "Node.2" , "wTO" , "pval" , "Padj"))
}
if(any(class(DATA) %in% 'wTOCN')){
allowedNA = prop.NA
save = subset(DATA, DATA$prop.NA < allowedNA,
select = c("Node.1" , "Node.2" , "wTO_Cons" , "prop.NA" ))
names(save) = c("Node.1" , "Node.2" , "wTO" , "prop.NA")
}
write.table(save, path, quote = F, sep = '\t', row.names = FALSE)
return(invisible(save))
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.export.R
|
#' @title wTO.in.line
#'
#' @param d correlation matrix to be converted into the line format.
#' @description Transforms a correlation matrix into the line format.
#' @return the wTO matrix into a data.frame: Node1, Node2 and wTO.
#' @importFrom data.table as.data.table
#' @importFrom stats na.omit
#' @importFrom reshape2 melt
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @export
#'
wTO.in.line <-function(d){
# names2= matrix(names(d), nrow = nrow(d), ncol = ncol(d), byrow = T)
# names3= matrix(row.names(d), nrow = nrow(d), ncol = ncol(d))
#
# Genes.1 <- names2[upper.tri(names2)]
# Genes.2 <- names3[upper.tri(names3)]
# M.Genes.1 <- apply(cbind(Genes.1, Genes.2), 1, min)
# M.Genes.2<- apply(cbind(Genes.1, Genes.2), 1, max)
# # M.nomes = paste(M.Genes.1, M.Genes.2, sep = "~")
#
#
# M.sup <- d[upper.tri(d)]
# corre=data.table::as.data.table(cbind(M.Genes.1 ,M.Genes.2, M.sup))
# names(corre)<-c("Node.1", "Node.2", "wTO")
# # row.names(corre)= paste(M.Genes.1, M.Genes.2, sep = "<->")
# # corre$wTO = as.numeric(as.matrix(corre$wTO))
#
# system.time(correlations<-cor(mydata,use="pairwise.complete.obs"))#get correlation matrix
upperTriangle<-upper.tri(d, diag=F) #turn into a upper triangle
d.upperTriangle<-d #take a copy of the original cor-mat
d.upperTriangle[!upperTriangle]<-NA#set everything not in upper triangle to NA
d_melted<-data.table::as.data.table(stats::na.omit(reshape2::melt(as.matrix(d.upperTriangle), value.name ="correlationCoef"))) #use melt to reshape the matrix into triplets, na.omit to get rid of the NA rows
names(d_melted)<-c("Node.1", "Node.2", "wTO")
return(d_melted)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.in.line.R
|
#' @title wTO.rep_measure
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @param n Number of resamplings, used to compute the empirical distribuitions of the links. Default is set to 100.
#' @param Data data.frame containing the count / expression data for the correlation.
#' @param Overlap Set of nodes of interest, where the Overlapping weights will be computed.
#' @param sign Should the wTO be signed?
#' @param delta expected difference between the real wTO and the bootstraped.
#' @param ID a vector with the individuals identification
#' @description Compute the wTO for a repeated measures expermiment and also the bootstraps. Proposed at arXiv:1711.04702. This is a quicker version of the wTO.Complete. It doesn'T contain diagnose plots nor a parallel version.
#' @importFrom parallel makeCluster clusterExport clusterApplyLB stopCluster
#' @importFrom data.table rbindlist dcast
#' @importFrom som normalize
#' @importFrom stats cor p.adjust reshape pchisq
#' @importFrom graphics plot axis par abline legend
#' @import magrittr
#' @export
#' @examples
#'
#' #wTO.rep_measure(Data = Microarray_Expression1, ID = rep(c(1:9),2),
#' #Overlap = ExampleGRF$x)
wTO.rep_measure = function(Data, Overlap = row.names(Data), ID,
sign = 'sign',
delta = 0.2, n = 10){
Overlap = unique(as.character(Overlap))
`%ni%` <- Negate(`%in%`)
ID = as.factor(ID)
##### Messages
if(is.numeric(n) == F){
stop("n must be numeric.")
}
if(n <= 0){
stop("n must be greater than 0.")
}
if(is.data.frame(Data) == F){
stop("Data must be a data.frame.")
}
DIM_Overlap = nrow(subset(Data, row.names(Data) %in% Overlap))
if(DIM_Overlap == 0){
stop('There is no overlapping nodes. Please check your input "Overlap"')
}
if(!is.null(DIM_Overlap)){
message(paste('There are',DIM_Overlap, "overlapping nodes,",dim(Data)[1],
"total nodes and" , dim(Data)[2],"individuals." ))
}
message("This function might take a long time to run. Don't turn off the computer.")
Datat = t(Data)
Cor = matrix(0, nrow = ncol(Datat), ncol = ncol(Datat)) %>% data.frame()
names(Cor) = row.names(Cor)= names(Datat)
message('Starting correlations.')
for( i in 1:ncol(Datat)){
for(j in i:(ncol(Datat) )){
if( i == j){
Cor[i,j] = 0
}
else{
Cor[i,j] = Cor[j,i] = suppressWarnings(rmcor(ID,Datat[,i],Datat[,j]))
}
}
}
Cor[is.na(Cor)] = 0
names(Cor) = row.names(Cor)= colnames(Datat)
wtomelt0 = subset(Cor, row.names(Cor) %in% Overlap) %>% wTO::wTO(., sign)
`%>%` <- magrittr::`%>%`
. <- NULL
for ( B in 1:n){
message('.', appendLF = FALSE)
bootID = sample(levels(ID), replace = TRUE)
Data_boot = subset(Datat, ID == bootID[1])
for (k in 2:length(bootID)){
Data_boot = rbind(Data_boot,
subset(Datat, ID == bootID[k]))
}
Cor = matrix(0, nrow = ncol(Data_boot), ncol = ncol(Data_boot)) %>% data.frame()
names(Cor) = row.names(Cor)= colnames(Data_boot)
for( i in 1:ncol(Data_boot)){
for(j in 1:(ncol(Data_boot) )){
Cor[i,j] = Cor[j,i] = suppressWarnings( rmcor(ID,Data_boot[,i],Data_boot[,j]))
}
}
Cor[is.na(Cor)] = 0
res = subset(Cor, row.names(Cor) %in% Overlap) %>% wTO::wTO(., sign)
U = (res < wtomelt0 - delta) + (res > wtomelt0 + delta)
if ( B == 1){
out = U}
if (B != 1){
out = out + U
}
}
wtomelt0 = wTO.in.line(wtomelt0)
cor = wTO.in.line(out)
pval = data.table::data.table(wtomelt0, pval = cor$wTO/n)
message('Done!')
return(pval)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTO.rep_measure.R
|
#' @title wTO.fast
#' @author Deisy Morselli Gysi <deisy at bioinf.uni-leipzig.de>
#' @param n Number of resamplings, used to compute the empirical distribuitions of the links. Default is set to 100.
#' @param Data data.frame containing the count / expression data for the correlation.
#' @param Overlap Set of nodes of interest, where the Overlapping weights will be computed.
#' @param method Type of the correlation that should be used. "s" / "spearman" will compute the rank spearman correlation, "p" / "pearson" will compute the linear correlation. If no value is given, the default is to use "p".
#' @param sign Should the wTO be signed?
#' @param delta expected difference between the real wTO and the bootstraped.
#' @param method_resampling method of the resampling. Bootstrap or BlockBootstrap.If the second is used, please give the lag (time dependency among the data).
#' @param lag Time dependency for the blocked bootstrap (for time series).
#' @param ID ID of the samples for the blocked bootstrap (for repeated measures).
#'
#' @description Compute the wTO and also the bootstraps. Proposed at arXiv:1711.04702. This is a quicker version of the wTO.Complete. It doesn't contain diagnose plots nor a parallel version.
#' @importFrom parallel makeCluster clusterExport clusterApplyLB stopCluster
#' @importFrom data.table rbindlist dcast
#' @importFrom som normalize
#' @importFrom stats cor p.adjust reshape pchisq
#' @importFrom graphics plot axis par abline legend
#' @import magrittr
#' @export
#' @examples
#' # wTO.fast(Data = Microarray_Expression1,
#' # Overlap = ExampleGRF$x,
#' # method = "p")
#'
#' # For a time series with lag = 4
#' # wTO.fast(Data = Microarray_Expression1,
#' # Overlap = ExampleGRF$x,
#' # method = "p",
#' # method_resampling = 'BlockBootstrap',
#' # lag = 4)
#'
#' # For a study where the individuals were measured multiple times.
#' # wTO.fast(Data = Microarray_Expression1,
#' # Overlap = ExampleGRF$x,
#' # method = "p",
#' # method_resampling = 'BlockBootstrap',
#' # ID = rep(1:9, each= 2))
wTO.fast = function(Data,
Overlap = row.names(Data),
method = 'p',
sign = 'sign',
delta = 0.2,
n = 10,
method_resampling = 'Bootstrap', lag = NULL, ID = NULL){
Overlap = unique(as.character(Overlap))
`%ni%` <- Negate(`%in%`)
##### Messages
if(is.numeric(n) == F){
stop("n must be numeric.")
}
if(n <= 0){
stop("n must be greater than 0.")
}
if(is.data.frame(Data) == F){
stop("Data must be a data.frame.")
}
if(method %ni% c("s", "spearman", "p", "pearson")){
stop('Method must be: "s", "spearman", "p" or "pearson".')
}
if(method_resampling %ni% c("Bootstrap", "BlockBootstrap")){
stop('Method must be: "Bootstrap" or "BlockBootstrap".')
}
if(method_resampling %in% "BlockBootstrap"){
if (is.null(lag)&is.null(ID)){
stop('If you want to use the "BlockBootstrap" please give a lag or the indivuals ID.')
}
if(!is.null(lag)&!is.null(ID)){
stop('If you want to use the "BlockBootstrap" please give a lag OR the indivuals ID.')
}
}
DIM_Overlap = nrow(subset(Data, row.names(Data) %in% Overlap))
if(DIM_Overlap == 0){
stop('There is no overlapping nodes. Please check your input "Overlap"')
}
if(!is.null(DIM_Overlap)){
message(paste('There are',DIM_Overlap, "overlapping nodes,",dim(Data)[1],
"total nodes and" , dim(Data)[2],"individuals." ))
}
message("This function might take a long time to run. Don't turn off the computer.")
wtomelt0 = wTO::CorrelationOverlap(Data = Data,
Overlap = Overlap,
method = method) %>%
wTO::wTO(., sign)
`%>%` <- magrittr::`%>%`
. <- NULL
for ( i in 1:n){
message(' ',i,' ', appendLF = FALSE)
if(method_resampling == 'BlockBootstrap'){
if (!is.null(lag)){
nsampl = ifelse (ncol(Data) %% lag == 0, ncol(Data) %/% lag, ncol(Data) %/% lag +1)
Y = sample(1:nsampl, size = nsampl, replace = T)
Vect = Y*lag
j = lag - 1
while( j > 0){
Vect = cbind(Vect , Y*lag - j)
j = j - 1
}
SAMPLES = c(Vect)
SAMPLES[SAMPLES > ncol(Data)] <- NA
SAMPLE = stats::na.exclude(SAMPLES)
Data_boot = Data[,SAMPLE]
}
if(!is.null(ID)){
ID %<>% as.factor
bootID = sample(levels(ID), replace = TRUE)
Data_boot = subset(Data, select = ID %in% bootID[1])
for (k in 2:length(bootID)){
Data_boot = cbind(Data_boot,
subset(Data, select = ID %in% bootID[k]))
}
}
res = wTO::CorrelationOverlap(Data = Data_boot, Overlap = Overlap, method = method) %>%
wTO::wTO(., sign)
}
else if (method_resampling != 'BlockBootstrap'){
res = wTO::CorrelationOverlap(Data = Data[,sample(1:ncol(Data), replace = TRUE)], Overlap = Overlap, method = method) %>%
wTO::wTO(., sign)
}
U = (res < wtomelt0 - delta) + (res > wtomelt0 + delta)
if ( i == 1){
out = U}
if (i != 1){
out = out + U
}
rm(res)
rm (U)
}
wtomelt0 = wTO.in.line(wtomelt0)
cor = wTO.in.line(out)
adj.pval = p.adjust(cor$wTO/n, method = 'BH')
pval = data.table::data.table(wtomelt0, pval = cor$wTO/n, pval.adj = adj.pval)
message('Done!')
return(pval)
}
|
/scratch/gouwar.j/cran-all/cranData/wTO/R/wTOfast.R
|
#' Output a character vector containing code for a palette
#'
#' Call this function to get the code for a character vector containing a
#' palette. If using RStudio, the code will be loaded at the console prompt;
#' otherwise, it will be printed at the terminal.
#'
#' @inheritParams wa_pal
#'
#' @return The generated code, invisibly, as a character vector.
#'
#' @examples
#' pal_vector("rainier", 4)
#'
#' @export
pal_vector = function(palette, n, which=NULL,
type=c("discrete", "continuous"), reverse=FALSE) {
type = match.arg(type)
pal = wa_pal(palette, n, which, type, reverse)
varname = paste0("PAL_", toupper(attr(pal, "name")))
code = paste0(varname, " = c(", paste0('"', pal, '"', collapse=", "), ")")
code = paste0(strwrap(code, 76, indent=0, exdent=nchar(varname) + 5),
collapse="\n")
code_output(code)
invisible(code)
}
#' Output a character vector containing code for a `ggplot2` scale
#'
#' Call this function to get the code for the `scale_*` functions for a palette.
#' If using RStudio, the code will be loaded at the console prompt;
#' otherwise, it will be printed at the terminal. Assumes that `ggplot2` has
#' been loaded into the namespace, or will be by the time the scales are used.
#'
#' @param palette a `[wacolors]` palette or palette name.
#' @param which if not `NULL`, the indices or names of a subset of colors to use.
#' @param type Either `continuous`, `discrete`, or `binned`. Use `continuous` if
#' you want to automatically interpolate between colors. Custom scale midpoints
#' are not supported (see [scale_fill_wa_c()]).
#' @param reverse `TRUE` if the colors should be reversed.
#'
#' @return The generated code, invisibly, as a character vector.
#'
#' @examples
#' pal_functions("rainier")
#'
#' @export
pal_functions = function(palette, which=NULL, type=c("discrete", "continuous"),
reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
names(pal$pal) = NULL
make_discr = function(aesthetic) {
pal_col_code = paste0(" pal_cols = c(", paste0('"', pal$pal, '"', collapse=", "), ")")
pal_col_code = paste0(strwrap(pal_col_code, 76, indent=2, exdent=15), collapse="\n")
pal_col_code = paste0(pal_col_code, "\n", " n_col = length(pal_cols)\n")
pal_fun_code = " ramp = grDevices::colorRampPalette(pal_cols)\n"
if (!(pal$name %in% cont_pal)) {
pal_fun_code = paste0(pal_fun_code, " pal_fun = function(n) ",
"if (n <= n_col) pal_cols[1:n] else ramp(n)\n")
} else {
pal_fun_code = paste0(pal_fun_code, " pal_fun = ramp\n")
}
pal_gen_code = paste0(' discrete_scale("', aesthetic, '", "',
pal$name, '", palette=pal_fun, ...)\n')
fname = paste0("scale_", aesthetic, "_", pal$name, "_d")
paste0(fname, " = function(...) {\n", pal_col_code,
pal_fun_code, pal_gen_code, "}\n")
}
make_cont = function(aesthetic) {
pal_cols = paste0("c(", paste0('"', pal$pal, '"', collapse=", "), ")")
pal_gen_code = paste0("scale_", aesthetic, "_gradientn(..., colours=",
pal_cols, ")")
pal_gen_code = paste0(strwrap(pal_gen_code, 76, indent=2,
exdent=23 + (aesthetic=="color")), collapse="\n")
fname = paste0("scale_", aesthetic, "_", pal$name, "_c")
paste0(fname, " = function(...) {\n", pal_gen_code, "\n}\n")
}
make_binned = function(aesthetic) {
pal_col_code = paste0(" pal_cols = c(", paste0('"', pal$pal, '"', collapse=", "), ")")
pal_col_code = paste0(strwrap(pal_col_code, 76, indent=2, exdent=15), collapse="\n")
pal_col_code = paste0(pal_col_code, "\n", " n_col = length(pal_cols)\n")
pal_fun_code = " ramp = grDevices::colorRampPalette(pal_cols)\n"
if (!(pal$name %in% cont_pal)) {
pal_fun_code = paste0(pal_fun_code, " pal_fun = function(n) ",
"if (n <= n_col) pal_cols[1:n] else ramp(n)\n")
} else {
pal_fun_code = paste0(pal_fun_code, " pal_fun = ramp\n")
}
pal_gen_code = paste0(' binned_scale("', aesthetic, '", "',
pal$name, '", palette=pal_fun, ...)\n')
pal_gen_code = paste0(strwrap(pal_gen_code, 76, indent=2,
exdent=23 + (aesthetic=="color")), collapse="\n")
fname = paste0("scale_", aesthetic, "_", pal$name, "_b")
paste0(fname, " = function(...) {\n", pal_col_code,
pal_fun_code, pal_gen_code, "\n}\n")
}
code = ""
if ("discrete" %in% type) {
code = paste0(code, make_discr("color"), make_discr("fill"), "\n")
}
if ("continuous" %in% type) {
code = paste0(code, make_cont("color"), make_cont("fill"), "\n")
}
if ("binned" %in% type) {
code = paste0(code, make_binned("color"), make_binned("fill"), "\n")
}
code_output(code)
invisible(code)
}
|
/scratch/gouwar.j/cran-all/cranData/wacolors/R/codegen.R
|
#' Washington State Color Palettes
#'
#' A collection of colorblind-friendly color palettes for various settings in
#' the state of Washington. Colors were extracted from a set of photographs, and
#' then combined to form a set of continuous and discrete palettes. Continuous
#' palettes were designed to be perceptually uniform, while discrete palettes
#' were chosen to maximize contrast at several different levels of overall
#' brightness and saturation. Each palette has been evaluated to ensure colors
#' are distinguishable by colorblind people.
#'
#' Discrete palettes contain at most seven colors. Don't create graphics that
#' use more than seven discrete colors. You can color a map with four.
#' Anything more risks confusion. Consider differentiating through faceting or
#' labels, instead.
#'
#' **Available continuous palettes**:
#'
#' `r paste0(', '.png)\n', collapse="\n")`
#'
#' **Available discrete palettes**:
#'
#' `r paste0(' - length(cont_pal)), '.png)\n', collapse="\n")`
#'
#' @format A list of character vectors containing the color palettes. Discrete
#' palette vectors contain names for each color.
#'
#' @examples
#' wacolors$rainier
#' wacolors$palouse[1:4]
#'
#' @export
wacolors = list(
# DISCRETE
rainier = c(lake="#465177", ragwort="#E4C22B", lodge="#965127",
trees="#29483A", ground="#759C44", winter_sky="#9FB6DA",
paintbrush="#DF3383"),
washington_pass = c(trees="#31543B", stone="#48628A", tips="#94AA3D",
road="#7F9CB1", sunbreak="#D9D1BE", stump="#3E3C3A"),
palouse = c(snake="#2D3F4A", wheat="#C0A43D", fallow="#8A6172",
hills="#748A52", canyon="#CCBA98", sky="#69A2E4"),
forest = c(trees="#254029", stream="#1E212F", fern="#516F25",
bark="#3A270A", mountains="#40666F"),
larch = c(larch="#D2A554", shrub="#626B5D", rock="#8C8F9E",
moss="#858753", sky="#A4BADF", dirt="#D3BEAF"),
coast = c(surf="#7BAEA0", sea="#386276", rocks="#3A4332", sand="#7A7D6F",
sunset="#D9B96E", sky="#BED4F0"),
san_juan = c(trees="#21281D", grass="#CA884C", sea="#3A5775",
driftwood="#BAAF9F", clouds="#C9DCE2"),
uw = c(purple="#483778", gold="#D2C28B", brick="#7E4837", cherry="#D48792",
stone="#6B7471"),
fort_worden = c(sea="#4D5370", shrub="#263C19", lighthouse="#A6B7C6",
rust="#8F6D3F"),
skagit = c(red="#B51829", yellow="#EFC519", violet="#831285",
orange="#CC7810", purple="#C886E8", mountains="#3E6E94"),
flag = c(green="#247F5B", yellow="#E2CD70", blue="#5DB3DB", tan="#FFDCCD"),
# CONTINUOUS
sound_sunset = c("#001E36", "#0F2649", "#352C5A", "#533369", "#6E3D71",
"#814C74", "#925C78", "#A26D7C", "#BC7A7D", "#D08B79",
"#DE9F71", "#E7B565", "#EBCC5C", "#E7E55C", "#DCFF6C"),
ferries = c("#241E33", "#1E2C54", "#003F69", "#005373", "#006372",
"#007273", "#008174", "#349075", "#579E78", "#77AB7E",
"#97B788", "#B3C493", "#CFD28F", "#F2DE83", "#FFE66C"),
forest_fire = c("#2C0915", "#40111D", "#551A23", "#6A2626", "#7E3225",
"#92401D", "#A64F00", "#B96000", "#CB7100", "#DB8300",
"#E69800", "#EDAD00", "#F3C307", "#FAD753", "#FFEC7A"),
sea = c("#1C1B23", "#222541", "#1F325A", "#094270", "#005282", "#0B6184",
"#2F6F89", "#457D90", "#598C99", "#6B9AA3", "#7CA9AE", "#8DB9B9",
"#9DC8C5", "#ADD8D2", "#BEE8DF"),
sea_star = c("#4B0049", "#5D014F", "#700853", "#821554", "#932252",
"#963D4E", "#985350", "#95675B", "#B16D51", "#C3774D",
"#D48349", "#E48F43", "#F29C3B", "#FFAA3B", "#FFBA54"),
volcano = c("#29272C", "#323335", "#3E3F3F", "#4A4B4B", "#555857",
"#626561", "#71726B", "#837E77", "#8C8D87", "#989B98",
"#A7A8A8", "#B5B7B7", "#C2C5C8", "#D3D3D9", "#E7E0E8"),
baker = c("#627F9A", "#7684A7", "#8B88B1", "#A08CB9", "#B490BF", "#C795C3",
"#D69CC3", "#E1A5C0", "#E9AFBE", "#EEBABD", "#F2C6BE", "#F4D2C2",
"#F4DEC9", "#F5E9D3", "#F6F4E5"),
diablo = c("#172512", "#0E320C", "#003F0F", "#004C1A", "#005929",
"#00663C", "#007051", "#007B63", "#008575", "#279085",
"#3E9B96", "#51A6A6", "#63B2B7", "#75BDC7", "#87C8D8"),
puget = c("#1D3024", "#123C2E", "#00483E", "#005352", "#005C67", "#01657D",
"#386B91", "#5F6FA3", "#8272B1", "#A175B8", "#B87DB8", "#CB86B6",
"#DA92B3", "#E69FAF", "#EFAEAB"),
mountains = c("#002733", "#003141", "#003B50", "#004661", "#005172",
"#005C84", "#006797", "#0071AA", "#3A7BB5", "#5E85BE",
"#7990C7", "#8F9CCF", "#A4A8D8", "#B7B4E1", "#C8C2EA"),
gorge = c("#00352A", "#00402F", "#004C33", "#005736", "#076338", "#236E38",
"#387937", "#4D8434", "#658E3C", "#7C984D", "#91A15E", "#A5AB70",
"#B7B583", "#C8C095", "#D9CBA7"),
foothills = c("#001C28", "#00262D", "#003233", "#003E39", "#004B3E",
"#005841", "#006542", "#00723F", "#007F38", "#18893C",
"#529249", "#739C5B", "#8FA572", "#A6AF8C", "#B9BAAF"),
footbridge = c("#32221C", "#42291C", "#50311D", "#5E391D", "#6B431C",
"#774C1E", "#7D5933", "#846545", "#8B7155", "#927C64",
"#9A8873", "#A39481", "#ACA08E", "#B6AC9B", "#C1B8A7"),
olympic = c("#2A2A40", "#3E3E63", "#535285", "#6B6B98", "#8484A4",
"#9E9EB4", "#B9B9C5", "#D4D4DA", "#B3BDB0", "#93A58E",
"#738F6A", "#517942", "#396128", "#2D4823", "#232F1F"),
lopez = c("#A3730F", "#B2833A", "#C19456", "#D0A570", "#DEB689", "#EBC8A3",
"#F6DBBF", "#F1F1F1", "#DBDCFF", "#C9CAFA", "#B7B8F0", "#A5A7E5",
"#9496DA", "#8385CE", "#7275C2"),
vantage = c("#473065", "#5B497F", "#70629B", "#857EAB", "#949EB5",
"#A4BDC6", "#C0DAD6", "#EFF2E5", "#DCD797", "#C5BB61",
"#AD9F3C", "#9A8224", "#89641E", "#774718", "#632B14"),
stuart = c("#4F3173", "#6F457F", "#8D5C8C", "#A87499", "#C18FA8",
"#D8AAB8", "#EDC7CB", "#FFE2DE", "#F6C7AE", "#D8B07A",
"#AF9C57", "#85884B", "#617344", "#425C3D", "#2C4534")
)
cont_pal = c("sound_sunset", "ferries", "forest_fire", "sea_star", "sea",
"volcano", "baker", "diablo", "puget", "mountains", "gorge",
"foothills", "footbridge", "olympic", "lopez", "vantage", "stuart")
#' Washington State Color Palette Generator
#'
#' Generate `palette` objects from the `wacolors` list
#'
#' @param palette The name of the palette (partial matching supported), or an
#' actual palette from `[wacolors]`.
#' @param which if not `NULL`, the indices or names of a subset of colors to use.
#' @param n The number of colors in the palette. If this exceeds the actual
#' number and `type` is not provided, it will be set to `continuous`.
#' @param type Either `continuous` or `discrete`. Use `continuous` if you want
#' to automatically interpolate between colors.
#' @param reverse `TRUE` if palette should be reversed.
#'
#' @return A vector of colors of type `palette`. Use the `plot()` function to
#' plot the palette. If the `cli` package is installed, printing the palette
#' to the console will also show its colors.
#'
#' @examples
#' wa_pal("rainier")
#' wa_pal(wacolors$rainier)
#' wa_pal("sound_sunset", 20, "continuous")
#' wa_pal("washington_pass", reverse=TRUE)
#'
#' @importFrom grDevices colorRampPalette
#' @export
wa_pal = function(palette, n, which=NULL,
type=c("discrete", "continuous"), reverse=FALSE) {
obj = match_pal(palette)
pal = obj$pal
name = obj$name
if (!is.null(which)) pal = pal[which]
if (is.null(pal))
stop("Palette `", name, "` not found.")
if (missing(n))
n = length(pal)
if (name %in% cont_pal || n > length(pal))
type="continuous"
type = match.arg(type)
if (type == "discrete")
out = pal[1:n]
else
out = colorRampPalette(pal, space="Lab", interpolate="spline")(n)
if (reverse) out = rev(out)
structure(out, class="palette", name=name)
}
|
/scratch/gouwar.j/cran-all/cranData/wacolors/R/colors.R
|
#' Helper function-palette for discrete scales
#'
#' @param pal the palette colors
#' @param reverse whether to reverse
#'
#' @importFrom grDevices colorRampPalette
#'
#' @keywords internal
#' @noRd
discr_pal = function(pal, reverse=FALSE) {
n_col = length(pal)
names(pal) = NULL
ramp = colorRampPalette(pal, space="Lab", interpolate="spline")
function(n) {
if (n <= n_col)
pal[1:n]
else
ramp(n)
}
}
#' Color palettes for `ggplot2`
#'
#' @rdname scale_wa
#'
#' @param palette a [`wacolors`] palette or palette name.
#' @param which if not `NULL`, the indices or names of a subset of colors to use.
#' @param midpoint if not `NULL` and at least one limit is not provided, the
#' value to center the scale at. Useful for diverging scales.
#' @param ... Other arguments passed on to [ggplot2::discrete_scale()],
#' [ggplot2::continuous_scale()], or [ggplot2::binned_scale()] to control
#' name, limits, breaks, labels and so forth.
#' @param reverse `TRUE` if the colors should be reversed.
#'
#' @return A [ggplot2::Scale] object.
#'
#' @examples
#' library(ggplot2)
#'
#' ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(color = factor(cyl), size=hp)) +
#' scale_color_wa_d()
#'
#' ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(color = hp)) +
#' scale_color_wa_c("palouse", which=c("snake", "wheat"))
#'
#' ggplot(diamonds) +
#' geom_bar(aes(x = cut, fill = clarity)) +
#' scale_fill_wa_d(wacolors$sound_sunset, reverse=TRUE)
#'
#' @importFrom ggplot2 discrete_scale binned_scale scale_color_gradientn scale_fill_gradientn
#' @export
scale_color_wa_d = function(palette="rainier", which=NULL, ..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
if (pal$name %in% cont_pal)
pal_fun = colorRampPalette(pal$pal, space="Lab", interpolate="spline")
else
pal_fun = discr_pal(pal$pal)
discrete_scale("colour", pal$name, palette=pal_fun, ...)
}
#' @rdname scale_wa
#' @export
scale_fill_wa_d = function(palette="rainier", which=NULL, ..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
if (pal$name %in% cont_pal)
pal_fun = colorRampPalette(pal$pal, space="Lab", interpolate="spline")
else
pal_fun = discr_pal(pal$pal)
discrete_scale("fill", pal$name, palette=pal_fun, ...)
}
#' @rdname scale_wa
#' @export
scale_color_wa_c = function(palette="sound_sunset", which=NULL, midpoint=NULL,
..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
if (is.null(midpoint)) {
rescaler = scales::rescale
} else {
rescaler = function(x, to=c(0, 1), from=range(x, na.rm=TRUE)) {
scales::rescale_mid(x, to, from, midpoint)
}
}
scale_color_gradientn(..., colours=pal$pal, rescaler=rescaler)
}
#' @rdname scale_wa
#' @export
scale_fill_wa_c = function(palette="sound_sunset", which=NULL, midpoint=NULL,
..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
if (is.null(midpoint)) {
rescaler = scales::rescale
} else {
rescaler = function(x, to=c(0, 1), from=range(x, na.rm=TRUE)) {
scales::rescale_mid(x, to, from, midpoint)
}
}
scale_fill_gradientn(..., colours=pal$pal, rescaler=rescaler)
}
#' @rdname scale_wa
#' @export
scale_color_wa_b = function(palette="sound_sunset", which=NULL, ..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
pal_fun = scales::colour_ramp(pal$pal)
binned_scale("color", pal$name, palette=pal_fun, ...)
}
#' @rdname scale_wa
#' @export
scale_fill_wa_b = function(palette="sound_sunset", which=NULL, ..., reverse=FALSE) {
pal = match_pal(palette)
if (!is.null(which)) pal$pal = pal$pal[which]
if (reverse) pal$pal = rev(pal$pal)
pal_fun = scales::colour_ramp(pal$pal)
binned_scale("fill", pal$name, palette=pal_fun, ...)
}
#' @rdname scale_wa
#' @export
scale_colour_wa_d = scale_color_wa_d
#' @rdname scale_wa
#' @export
scale_colour_wa_c = scale_color_wa_c
#' @rdname scale_wa
#' @export
scale_colour_wa_b = scale_color_wa_b
|
/scratch/gouwar.j/cran-all/cranData/wacolors/R/scales.R
|
#' Takes name of palette or palette itself and returns palette + name
#' @param name the name of the palette or the palette itself
#'
#' @keywords internal
#' @noRd
match_pal = function(name) {
found_name = pmatch(name, names(wacolors))
if (length(found_name) > 0 && !any(is.na(found_name))) {
pal = wacolors[[found_name]]
} else if (all(is.na(found_name)) && is.character(name) &&
all(startsWith(name, "#"))) {
pal = name
try_match = vapply(wacolors, FUN=function(x) isTRUE(all.equal(x, pal)),
FUN.VALUE=logical(1))
name = names(wacolors)[try_match]
} else {
stop("Palette `", name, "` not found.")
}
list(pal=pal, name=name)
}
#' @export
#' @importFrom graphics rect par image text
print.palette = function(x, ...) {
x_print = unclass(x)
if (!is.null(pal_name <- attr(x, "name", exact=TRUE))) {
cat("`", pal_name, "`\n", sep="")
attr(x_print, "name") = NULL
}
print(x_print)
if (requireNamespace("cli", quietly=TRUE)) {
n_colors = cli::num_ansi_colors()
if (n_colors >= 256) {
if (n_colors < 256^3/2) cat("approximate palette:\n")
block = "\u00A0\u00A0"
cat(" ")
for (color in x) {
cat(cli::make_ansi_style(color, bg=TRUE)(block))
cat(" ")
}
}
}
cat("\n")
invisible(x)
}
# `plot.palette` modified from that in `wesanderson` (c) 2016 Karthik Ram
#' @export
#' @importFrom graphics rect par image text
plot.palette = function(x, ...) {
n <- length(x)
old <- par(mar=c(0.5, 0.5, 0.5, 0.5))
on.exit(par(old))
image(1:n, 1, as.matrix(1:n), col=x,
ylab="", xaxt="n", yaxt="n", bty="n")
rect(0, 0.9, n + 1, 1.1, col=grDevices::rgb(1, 1, 1, 0.8), border=NA)
text((n + 1) / 2, 1, labels=attr(x, "name", exact=TRUE), col="black", cex=1, font=2)
if (!is.null(names(x)))
text(1:n, 1.25, labels=names(x), col="black", cex=1)
}
#' Paste-to-hex
#' @keywords internal
#' @noRd
pth = function(x) {
rgb = as.list(as.integer(strsplit(x, "\\s")[[1]]) / 256)
col = do.call(colorspace::sRGB, rgb)
str = paste0('"', colorspace::hex(col), '"')
code_output(str)
}
# Helper for interactive code output
code_output = function(x) {
if (interactive() && requireNamespace("rstudioapi", quietly=TRUE)) {
rstudioapi::sendToConsole(x, execute=F)
} else {
cat(x, "\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wacolors/R/util.R
|
utils::globalVariables(c("w", "term", "term_count"))
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/globals.R
|
#' Split into test and train data sets
#'
#' Randomly partition input into a list of \code{train} and \code{test} data sets
#'
#' @param .data Input data. If atomic (numeric, integer, character, etc.), the
#' input is first converted to a data frame with a column name of "x."
#' @param .p Proportion of data that should be used for the \code{train} data set
#' output. The default value is 0.80, meaning the \code{train} output will include
#' roughly 80 pct. of the input cases while the \code{test} output will include roughly
#' 20 oct..
#' @param ... Optional. The response (outcome) variable. Uses tidy evaluation
#' (quotes are not necessary). This is only relevant if the identified
#' variable is categorical–i.e., character, factor, logical–in which case it
#' is used to ensure a uniform distribution for the \code{train} output data set.
#' If a value is supplied, uniformity in response level observations is
#' prioritized over the \code{.p} (train proportion) value.
#' @return A list with \code{train} and \code{test} tibbles (data.frames)
#'
#' @examples
#'
#' ## example data frame
#' d <- data.frame(
#' x = rnorm(100),
#' y = rnorm(100),
#' z = c(rep("a", 80), rep("b", 20))
#' )
#'
#' ## split using defaults
#' split_test_train(d)
#'
#' ## split 0.60/0.40
#' split_test_train(d, 0.60)
#'
#' ## split with equal response level obs
#' split_test_train(d, 0.80, label = z)
#'
#' ## apply to atomic data
#' split_test_train(letters)
#'
#' @export
split_test_train <- function(.data, .p = 0.80, ...) {
UseMethod("split_test_train")
}
#' @export
split_test_train.data.frame <- function(.data, .p = 0.80, ...) {
split_test_train(tibble::as_tibble(.data), .p, ...)
}
#' @export
split_test_train.tbl_df <- function(.data, .p = 0.80, ...) {
dots <- capture_dots(...)
if (length(dots) == 2) {
stop("split_test_train can only accept one response variable", call. = FALSE)
}
n <- round(nrow(.data) * .p, 0)
r <- seq_len(nrow(.data))
if (length(dots) > 0 && !is.numeric(y <- eval(dots[[1]], envir = .data))) {
ty <- table(y)
ny <- length(ty)
lo <- min(as.integer(ty))
if ((n / ny) > lo) {
n <- lo * ny
}
r <- split(r, y)
}
r <- sampleit(r, n)
list(
train = .data[r, ],
test = .data[-r, ]
)
}
#' @export
split_test_train.default <- function(.data, .p = 0.80, ...) {
if (!is.recursive(.data)) {
.data <- list(x = .data)
}
split_test_train(tibble::as_tibble(.data), .p, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/split_test_train.R
|
## embedding through training
create_embedding_matrix <- function(text,
word_vectors_size = 50,
term_count_min = 1L,
skip_grams_window = 5L) {
text <- tolower(text)
tokens <- text2vec::space_tokenizer(text)
it <- text2vec::itoken(tokens, progressbar = FALSE)
vocab <- text2vec::create_vocabulary(it)
vocab <- text2vec::prune_vocabulary(vocab, term_count_min)
vectorizer <- text2vec::vocab_vectorizer(vocab)
tcm <- text2vec::create_tcm(it, vectorizer, skip_grams_window)
glove <- text2vec::GlobalVectors$new(word_vectors_size, vocabulary = vocab, x_max = 10)
wv_main <- glove$fit_transform(tcm, n_iter = 10, convergence_tol = 0.01)
wv_context <- glove$components
word_vectors <- wv_main + t(wv_context)
return(word_vectors)
}
## embedding through pretrained vectors
download_vector_file <- function(type = "twitter") {
if(type == "twitter") {
vector_file = "~/glove.twitter.27B"
if (!file.exists(vector_file)) {
utils::download.file("http://nlp.stanford.edu/data/glove.twitter.27B.zip",
"~/glove.twitter.27B.zip")
utils::unzip("~/glove.twitter.27B.zip", files = "glove.twitter.27B",
exdir = "~/")
}
}
}
create_embedding_matrix2 <- function(text,
vector_type = "twitter",
vector_file = NULL) {
text <- tolower(text)
tokens <- text2vec::space_tokenizer(text)
it <- text2vec::itoken(tokens, progressbar = FALSE)
vocab <- text2vec::create_vocabulary(it)
word_index <- data.frame(word = vocab[, 1])
word_index$idx <- seq.int(nrow(word_index))
## read pretrained vectors
if(!is.null(vector_file)) {
if (vector_type == "twitter") {
vector_file <- "glove.6B/glove.6B.50d.txt"
lines <- readLines(file.path(vector_file))
lines_split <- strsplit(lines, split = " ")
}
} else {
lines <- readLines(file.path(vector_file))
lines_split <- strsplit(lines, split = " ")
}
## create vector matrix
word_vectors <- matrix(data = lines_split)
word_vectors <- do.call("rbind", word_vectors)
word_list <- word_vectors[, 1]
word_vectors <- word_vectors[, 2:ncol(word_vectors)]
rownames(word_vectors) <- word_list
## select words vectors from vector matrix
embedding_matrix <- matrix(0L, nrow = nrow(word_index), ncol = ncol(word_vectors))
matchs <- match(word_index$word, rownames(word_vectors))
i <- 0
for (idx in matchs) {
i <- i+1
if(!is.na(idx)) {
embedding_matrix[i, ] <- word_vectors[idx, ]
}
}
return(embedding_matrix)
}
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/try_glove.R
|
std_dev <- function(x, na.rm = TRUE, ...) {
stats::sd(x, na.rm = na.rm, ...)
}
variance <- function(x, na.rm = TRUE, ...) {
stats::var(x, na.rm = na.rm, ...)
}
the_mean <- function(x, na.rm = TRUE, ...) {
mean(x, na.rm = na.rm, ...)
}
prepend_class <- function(x, ...) {
`class<-`(x, unique(c(..., class(x))))
}
append_class <- function(x, ...) {
`class<-`(x, unique(c(class(x), ...)))
}
`%||%` <- function(x, y) {
if (is_null(x))
y
else x
}
is_null <- function(x) is.null(x)
caller_env <- function(n = 1) {
parent.frame(n + 1)
}
scale_with_params <- function(x, m, std) {
if (!is.matrix(x)) {
x <- as.matrix(x)
}
ncol <- length(m)
nrow <- nrow(x)
divide <- function(e1, e2) {
e <- `/`(e1, e2)
e[e2 == 0 & e1 == 0] <- 0
e
}
divide((x - matrix(m,
nrow = nrow,
ncol = ncol,
byrow = TRUE)), matrix(std,
nrow = nrow,
ncol = ncol,
byrow = TRUE))
}
sampleit <- function(x, n) {
if (!is.list(x)) {
sort(sample(x, n))
} else {
sort(unlist(lapply(x, sample, round(n / length(x)), 0), use.names = FALSE))
}
}
capture_dots <- function(...) {
eval(substitute(alist(...)), envir = parent.frame())
}
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/utils.R
|
as_fun <- function(x) {
if (is.function(x)) {
return(x)
}
## convert string to function
if (is.character(x)) {
x <- get(x, envir = caller_env())
} else {
x <- eval(x)
}
## check and return function call
stopifnot(is.function(x))
x
}
validate_tokenizer_ <- function(x) {
## if function then return
if (tryCatch(is.function(x),
error = function(e) FALSE)) {
return(prepend_class(x, "tokenizer"))
}
## if null, return default tokenizer otherwise validate function
if (is.null(x)) {
x <- function(x) tokenizers::tokenize_words(x, lowercase = TRUE)
} else {
x <- as_fun(x)
}
## return as tokenizer function
prepend_class(x, "tokenizer")
}
##
validate_tokenizer <- function(tokenizer = NULL) {
## validate and fix commmon tokenizer spec problem
tokenizer <- validate_tokenizer_(tokenizer)
## return
tokenizer
}
#' A wactor object
#'
#' A factor-like class for word vectors
#' @param max_words Maximum number of words in vocabulary
#' @param doc_prop_max Maximum proportion of docs for terms in dinctionary
#' @param doc_prop_min Minimum proportion of docs for terms in dictionary.
#' @param .text Input text
#' @param .vectorizer Object used to vectorize text into numeric
#' @param .vocab Predefined vocabulary
#' @param .tokenizer Function used to tokenize text
#' @param .tfidf Term frequency inverse document frequency object
#' @param .tfidf_m Mean used to scale matrix
#' @param .tfidf_sd Standard deviation used to scale matrix
#'
#' @export
Wactr <- R6::R6Class("wactor", list(
.text = NULL,
.vectorizer = NULL,
.vocab = NULL,
.tokenizer = NULL,
.tfidf = NULL,
.tfidf_m = NULL,
.tfidf_sd = NULL,
dtm = NULL,
tfidf = NULL,
initialize = function(text = character(),
tokenizer = NULL,
max_words = 1000,
doc_prop_max = 1.000,
doc_prop_min = 0.000) {
# if (!is.null(train_rows <- get_train_rows(text))) {
# text <- text[train_rows]
# }
self$.text <- text
## create/config tokenizer
self$.tokenizer <- validate_tokenizer(substitute(tokenizer))
## tokenize training strings
i <- text2vec::itoken(
iterable = self$.text,
progressbar = FALSE,
ids = seq_along(self$.text),
tokenizer = self$.tokenizer)
## create and prune vocab
self$.vocab <- text2vec::prune_vocabulary(
vocabulary = text2vec::create_vocabulary(i),
doc_proportion_max = doc_prop_max,
doc_proportion_min = doc_prop_min,
vocab_term_max = max_words)
## vectorizer
self$.vectorizer <- text2vec::vocab_vectorizer(self$.vocab)
## document-term matrix
self$dtm <- function(x) {
x <- text2vec::itoken(
iterable = x,
progressbar = FALSE,
ids = seq_along(x),
tokenizer = self$.tokenizer)
suppressWarnings(text2vec::create_dtm(x, self$.vectorizer))
}
## create tfidf method
self$.tfidf <- text2vec::TfIdf$new()
## fit on data
msd <- self$.tfidf$fit_transform(self$dtm(self$.text))
self$.tfidf_m <- apply(msd, 2, the_mean)
self$.tfidf_sd <- apply(msd, 2, std_dev)
## export function for creating tfidfs
self$tfidf <- function(x, normalize = TRUE) {
tf <- self$.tfidf$transform(self$dtm(x))
if (normalize) {
tf <- scale_with_params(tf, self$.tfidf_m, self$.tfidf_sd)
}
tf
}
## return self
self
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/wactor-r6.R
|
#' As wactor
#'
#' Convert data into object of type 'wactor'
#'
#' @param .x Input text vector
#' @param ... Other args passed to Wactr$new(...)
#' @return An object of type wactor
#' @export
as_wactor <- function(.x, ...) {
UseMethod("as_wactor")
}
#' @export
as_wactor.default <- function(.x, ...) {
Wactr$new(.x, ...)
}
#' Create wactor
#'
#' Create an object of type 'wactor'
#'
#' @param .x Input text vector
#' @param ... Other args passed to Wactr$new(...)
#' @return An object of type wactor
#' @examples
#'
#' ## create
#' w <- wactor(c("a", "a", "a", "b", "b", "c"))
#'
#' ## summarize
#' summary(w)
#'
#' ## plot
#' plot(w)
#'
#' ## predict
#' predict(w)
#'
#' ## use on NEW data
#' dtm(w, letters[1:5])
#'
#' ## dtm() is the same as predict()
#' predict(w, letters[1:5])
#'
#' ## works if you specify 'newdata' too
#' predict(w, newdata = letters[1:5])
#'
#' @export
wactor <- function(.x, ...) {
UseMethod("wactor")
}
#' @export
wactor.default <- function(.x, ...) {
Wactr$new(.x, ...)
}
#' Term frequency inverse document frequency
#'
#' Converts character vector into a term frequency inverse document frequency
#' (TFIDF) matrix
#'
#' @param object Input object containing dictionary (column), e.g., wactor
#' @param .x Text from which the tfidf matrix will be created
#' @return A c-style matrix
#' @examples
#'
#' ## create wactor
#' w <- wactor(letters)
#'
#' ## use wactor to create tfidf of same vector
#' tfidf(w, letters)
#'
#' ## using the initial data is the default; so you don't actually have to
#' ## respecify it
#' tfidf(w)
#'
#' ## use wactor to create tfidf on new vector
#' tfidf(w, c("a", "e", "i", "o", "u"))
#'
#' ## apply directly to character vector
#' tfidf(letters)
#'
#' @export
tfidf <- function(object, .x = NULL) UseMethod("tfidf")
#' @export
tfidf.wactor <- function(object, .x = NULL) {
object$tfidf(.x %||% object$.text)
}
#' @export
tfidf.character <- function(object, .x = NULL) {
object <- wactor(object)
object$tfidf(.x %||% object$.text)
}
#' Document term frequency
#'
#' Converts character vector into document term matrix (dtm)
#'
#' @param object Input object containing dictionary (column), e.g., wactor
#' @param .x Text from which the document term matrix will be created
#' @return A c-style matrix
#' @examples
#'
#' ## create wactor
#' w <- wactor(letters)
#'
#' ## use wactor to create dtm of same vector
#' dtm(w, letters)
#'
#' ## using the initial data is the default; so you don't actually have to
#' ## respecify it
#' dtm(w)
#'
#' ## use wactor to create dtm on new vector
#' dtm(w, c("a", "e", "i", "o", "u"))
#'
#' ## apply directly to character vector
#' dtm(letters)
#'
#' @export
dtm <- function(object, .x = NULL) UseMethod("dtm")
#' @export
dtm.wactor <- function(object, .x = NULL) {
object$dtm(.x %||% object$.text)
}
#' @export
dtm.character <- function(object, .x = NULL) {
object <- wactor(object)
object$dtm(.x %||% object$.text)
}
#' @export
predict.wactor <- function(object, ...) {
dots <- list(object = object, ...)
names(dots)[names(dots) == "newdata"] <- ".x"
do.call("dtm", dots)
}
#' @export
summary.wactor <- function(object, ...) {
len <- length(object$.vocab$term)
x <- as.data.frame(object)
attr(x, "len") <- len
x
}
#' @export
plot.wactor <- function(x, n = 20, ...) {
x <- utils::head(as.data.frame(x), n)
x$term <- factor(x$term, levels = rev(unique(x$term)))
ggplot2::ggplot(x, ggplot2::aes(x = term, y = term_count)) +
ggplot2::geom_col() +
ggplot2::coord_flip()
}
#' @export
as.data.frame.wactor <- function(x, ...) {
tibble::as_tibble(x$.vocab)
}
#' @export
levels.wactor <- function(x) x$.vocab
#' @export
print.wactor <- function(x, ...) {
len <- length(x$.text)
x <- as.data.frame(x)
attr(x, "len") <- len
print(x, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/wactor-s3.R
|
#' xgb matrix
#'
#' Simple wrapper for creating a xgboost matrix
#'
#' @param x Input data
#' @param ... Other data to cbind
#' @param y Label vector
#' @param split Optional number between 0-1 indicating the desired split between
#' train and test
#' @return A xgb.Dmatrix
#' @examples
#'
#' xgb_mat(data.frame(x = rnorm(20), y = rnorm(20)))
#'
#' @export
xgb_mat <- function(x, ..., y = NULL, split = NULL) {
UseMethod("xgb_mat")
}
#' @export
xgb_mat.data.frame <- function(x, ..., y = NULL, split = NULL) {
x <- as.matrix.data.frame(x, rownames.force = FALSE)
xgb_mat(x, ..., y = y, split = split)
}
#' @export
xgb_mat.default <- function(x, ..., y = NULL, split = NULL) {
x <- as.matrix(x)
xgb_mat(x, ..., y = y, split = split)
}
#' @export
xgb_mat.matrix <- function(x, ..., y = NULL, split = NULL) {
x <- cbind(x, ...)
if (is.null(split)) {
if (is.null(y)) {
return(xgboost::xgb.DMatrix(x))
}
return(xgboost::xgb.DMatrix(x, label = y))
}
train_rows <- sample(seq_len(nrow(x)), nrow(x) * split)
if (is.null(y)) {
return(list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE])
))
}
list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE],
label = y[train_rows]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE],
label = y[-train_rows])
)
}
#' @export
xgb_mat.dgCMatrix <- function(x, ..., y = NULL, split = NULL) {
x <- cbind(x, ...)
if (is.null(split)) {
if (is.null(y)) {
return(xgboost::xgb.DMatrix(x))
}
return(xgboost::xgb.DMatrix(x, label = y))
}
train_rows <- sample(seq_len(nrow(x)), nrow(x) * split)
if (is.null(y)) {
return(list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE])
))
}
list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE],
label = y[train_rows]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE],
label = y[-train_rows])
)
}
#' @export
xgb_mat.dgCMatrix <- function(x, ..., y = NULL, split = NULL) {
x <- cbind(x, ...)
if (is.null(split)) {
if (is.null(y)) {
return(xgboost::xgb.DMatrix(x))
}
return(xgboost::xgb.DMatrix(x, label = y))
}
train_rows <- sample(seq_len(nrow(x)), nrow(x) * split)
if (is.null(y)) {
return(list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE])
))
}
list(
train = xgboost::xgb.DMatrix(x[train_rows, , drop = FALSE],
label = y[train_rows]),
test = xgboost::xgb.DMatrix(x[-train_rows, , drop = FALSE],
label = y[-train_rows])
)
}
|
/scratch/gouwar.j/cran-all/cranData/wactor/R/xgb_mat.R
|
geom_rrect <- function(mapping = NULL, data = NULL, # nocov start
stat = "identity", position = "identity",
radius = grid::unit(6, "pt"),
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRrect,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
radius = radius,
na.rm = na.rm,
...
)
)
}
GeomRrect <- ggplot2::ggproto(
"GeomRrect", ggplot2::Geom,
default_aes = ggplot2::aes(
fill = "grey35", size = 0.5, linetype = 1, alpha = NA#, colour = NA
),
required_aes = c("xmin", "xmax", "ymin", "ymax"),
draw_panel = function(self, data, panel_params, coord,
radius = grid::unit(6, "pt")) {
coords <- coord$transform(data, panel_params)
lapply(1:length(coords$xmin), function(i) {
grid::roundrectGrob(
coords$xmin[i], coords$ymax[i],
width = (coords$xmax[i] - coords$xmin[i]),
height = (coords$ymax[i] - coords$ymin)[i],
r = radius,
default.units = "native",
just = c("left", "top"),
gp = grid::gpar(
col = coords$colour[i],
fill = alpha(coords$fill[i], coords$alpha[i]),
lwd = coords$size[i] * .pt,
lty = coords$linetype[i],
lineend = "butt"
)
)
}) -> gl
grobs <- do.call(grid::gList, gl)
ggname("geom_rrect", grid::grobTree(children = grobs))
},
draw_key = ggplot2::draw_key_polygon
) # nocov end
|
/scratch/gouwar.j/cran-all/cranData/waffle/R/a-geom-rect.R
|
utils::globalVariables(c("x", "y", "value"))
.dbg <- TRUE
msg <- function(...) {
if (.dbg) message(...)
}
|
/scratch/gouwar.j/cran-all/cranData/waffle/R/aaa.r
|
# Waffles mappings from css names to unicode chars was out of date
# This variation updates it from the latests css from github
.fa_unicode_init <- function() {
xdf <- readRDS(system.file("extdat/fadf.rds", package = "waffle"))
xdf[xdf[["type"]] != "regular", ]
}
.fa_unicode <- .fa_unicode_init()
.display_fa <- function(fdf) {
vb <- stringr::str_match(fdf[["glyph"]], '(viewBox="[^"]+")')[,2]
stringr::str_replace(
fdf[["glyph"]],
vb,
sprintf('%s width="24" height="24"', vb)
) -> fdf[["glyph"]]
DT::datatable(fdf[,c("name", "type", "glyph")], escape = FALSE)
}
#' Search Font Awesome glyph names for a pattern
#'
#' @param pattern pattern to search for in the names of Font Awesome fonts
#' @export
fa_grep <- function(pattern) {
res <- which(grepl(pattern, .fa_unicode[["name"]]))
if (length(res)) {
.display_fa(.fa_unicode[res, ])
} else {
message("No Font Awesome font found with that name pattern.")
}
}
#' List all Font Awesome glyphs
#'
#' @export
fa_list <- function() {
.display_fa(.fa_unicode)
}
#' Install Font Awesome 5 Fonts
#'
#' @export
install_fa_fonts <- function() {
message(
"The TTF font files for Font Awesome 5 fonts are in:\n\n",
system.file("fonts", package = "waffle"),
"\n\nPlease navigate to that directory and install them on ",
"your system."
)
}
#' Font Awesome 5 Solid
#'
#' @description `fa5_solid` is shorthand for "`FontAwesome5Free-Solid`"
#' @docType data
#' @export
fa5_solid <- "FontAwesome5Free-Solid"
#' Font Awesome 5 Brand
#'
#' @description `fa5_brand` is shorthand for "`FontAwesome5Brands-Regular`"
#' @docType data
#' @export
fa5_brand <- "FontAwesome5Brands-Regular"
|
/scratch/gouwar.j/cran-all/cranData/waffle/R/fontawesome.R
|
picto_scale <- function(aesthetic, values = NULL, ...) {
values <- if (is_missing(values)) "circle" else force(values)
pal <- function(n) {
vapply(
if (n > length(values)) rep(values[[1]], n) else values,
function(.x) .fa_unicode[.fa_unicode[["name"]] == .x, "unicode"],
character(1),
USE.NAMES = FALSE
)
}
discrete_scale(aesthetic, "manual", pal, ...)
}
#' Used with geom_pictogram() to map Font Awesome fonts to labels
#'
#' @param ... dots
#' @param values values
#' @param aesthetics aesthetics
#' @export
scale_label_pictogram <- function(..., values, aesthetics = "label") {
picto_scale(aesthetics, values, ...)
}
#' Legend builder for pictograms
#'
#' @param data,params,size legend key things
#' @keywords internal
#' @export
draw_key_pictogram <- function(data, params, size) {
# msg("==> draw_key_pictogram()")
#
# print(str(data, 1))
# print(str(params, 1))
if (is.null(data$label)) data$label <- "a"
textGrob(
label = data$label,
x = 0.5, y = 0.5,
rot = data$angle %||% 0,
hjust = data$hjust %||% 0,
vjust = data$vjust %||% 0.5,
gp = gpar(
col = alpha(data$colour %||% data$fill %||% "black", data$alpha),
fontfamily = data$family %||% "",
fontface = data$fontface %||% 1,
fontsize = (data$size %||% 3.88) * .pt,
lineheight = 1.5
)
)
}
#' Pictogram Geom
#'
#' There are two special/critical `aes()` mappings:
#' - `label` (so the geom knows which column to map the glyphs to)
#' - `values` (which column you're mapping the filling for the squares with)
#'
#' @md
#' @param mapping Set of aesthetic mappings created by `aes()` or
#' `aes_()`. If specified and `inherit.aes = TRUE` (the
#' default), it is combined with the default mapping at the top level of the
#' plot. You must supply `mapping` if there is no plot mapping.
#' @param n_rows how many rows should there be in the waffle chart? default is 10
#' @param flip If `TRUE`, flip x and y coords. n_rows then becomes n_cols.
#' Useful to achieve waffle column chart effect. Defaults is `FALSE`.
#' @param make_proportional compute proportions from the raw values? (i.e. each
#' value `n` will be replaced with `n`/`sum(n)`); default is `FALSE`.
#' @param data The data to be displayed in this layer. There are three
#' options:
#'
#' If `NULL`, the default, the data is inherited from the plot
#' data as specified in the call to `ggplot()`.
#'
#' A `data.frame`, or other object, will override the plot
#' data. All objects will be fortified to produce a data frame. See
#' `fortify()` for which variables will be created.
#'
#' A `function` will be called with a single argument,
#' the plot data. The return value must be a `data.frame.`, and
#' will be used as the layer data.
#' @param na.rm If `FALSE`, the default, missing values are removed with
#' a warning. If `TRUE`, missing values are silently removed.
#' @param show.legend logical. Should this layer be included in the legends?
#' `NA`, the default, includes if any aesthetics are mapped.
#' `FALSE` never includes, and `TRUE` always includes.
#' It can also be a named logical vector to finely select the aesthetics to
#' display.
#' @param inherit.aes If `FALSE`, overrides the default aesthetics,
#' rather than combining with them. This is most useful for helper functions
#' that define both data and aesthetics and shouldn't inherit behaviour from
#' the default plot specification, e.g. `borders()`.
#' @param ... other arguments passed on to `layer()`. These are
#' often aesthetics, used to set an aesthetic to a fixed value, like
#' `color = "red"` or `size = 3`. They may also be parameters
#' to the paired geom/stat.
#' @export
geom_pictogram <- function(mapping = NULL, data = NULL,
n_rows = 10, make_proportional = FALSE, flip = FALSE,
..., na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = "waffle",
geom = "pictogram",
position = "identity",
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
n_rows = n_rows,
make_proportional = make_proportional,
flip = flip,
...
)
)
}
#' @rdname geom_pictogram
#' @export
GeomPictogram <- ggplot2::ggproto(
`_class` = "GeomPictogram",
`_inherit` = GeomText,
# required_aes = c("x", "y", "label", "colour"),
default_aes = aes(
fill = NA, alpha = NA, colour = "black",
size = 9, angle = 0, hjust = 0.5, vjust = 0.5,
family = "FontAwesome5Free-Solid", fontface = 1, lineheight = 1
),
draw_group = function(self, data, panel_params, coord,
n_rows = 10, make_proportional = FALSE, flip = FALSE,
radius = grid::unit(0, "npc")) {
# msg("Called => GeomPictogram::draw_group()")
coord <- ggplot2::coord_equal()
grobs <- GeomText$draw_panel(data, panel_params, coord, parse = FALSE, check_overlap = FALSE)
# msg("Done With => GeomPictogram::draw_group()")
ggname("geom_pictogram", grid::grobTree(children = grobs))
},
draw_panel = function(self, data, panel_params, coord,
n_rows = 10, make_proportional = FALSE, flip = FALSE, ...) {
# msg("Called => GeomPictogram::draw_panel()")
# print(str(data, 1))
coord <- ggplot2::coord_equal()
grobs <- GeomText$draw_panel(data, panel_params, coord, parse = FALSE, check_overlap = FALSE)
# msg("Done With => GeomPictogram::draw_panel()")
ggname("geom_pictogram", grid::grobTree(children = grobs))
},
draw_key = draw_key_pictogram
)
|
/scratch/gouwar.j/cran-all/cranData/waffle/R/geom-pictogram.R
|
geom_rtile <- function(mapping = NULL, data = NULL,
stat = "identity", position = "identity",
radius = grid::unit(6, "pt"),
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRtile,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
radius = radius,
na.rm = na.rm,
...
)
)
}
GeomRtile <- ggplot2::ggproto("GeomRtile", GeomRrect,
extra_params = c("na.rm", "width", "height"),
setup_data = function(data, params) {
data$width <- data$width %||% params$width %||% ggplot2::resolution(data$x, FALSE)
data$height <- data$height %||% params$height %||% ggplot2::resolution(data$y, FALSE)
transform(data,
xmin = x - width / 2, xmax = x + width / 2, width = NULL,
ymin = y - height / 2, ymax = y + height / 2, height = NULL
)
},
default_aes = ggplot2::aes(
fill = "grey20", colour = NA, size = 0.1, linetype = 1, alpha = NA
),
required_aes = c("x", "y"),
draw_key = ggplot2::draw_key_polygon
)
|
/scratch/gouwar.j/cran-all/cranData/waffle/R/geom-rtile.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.