content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @keywords internal #' @noRd Wind_averaging <- function(ws,wd) { # Calculate the u and v wind components u_wind <- - ws * sin(2 * pi * wd/360) v_wind <- - ws * cos(2 * pi * wd/360) # Calculate the average wind vectors mean_u <- mean(u_wind, na.rm = T) mean_v <- mean(v_wind, na.rm = T) # Calculate the resultant vector average wind direction with atan2 wd_average <- (atan2(mean_u, mean_v) * 360/2/pi) wd_average <- ifelse(wd_average > 180, wd_average - 180, wd_average + 180) # Calculate the vector average wind speed ws_average <- ((mean_u^2 + mean_v^2)^0.5) ### Output return(list(Wind_speed = ws_average, Wind_direction = wd_average)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/Wind_averaging.R
#' @keywords internal #' @noRd as_difftime <- function (timeseries, format = "%X", units = "auto") { if (units %in% c("months", "years")) { if (!is.numeric(timeseries)) { stop("'timeseries' must be numeric for units = '", units, "'", sep = "") } else { structure(timeseries, units = units, class = "difftime") } } else base::as.difftime(timeseries, format, units) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/as_difftime.R
#' Download air quality data from ARPA Lombardia website #' #' @description 'get_ARPA_Lombardia_AQ_data' returns observed air quality measurements collected by #' ARPA Lombardia ground detection system for Lombardy region in Northern Italy. #' Available airborne pollutant concentrations are: NO2, NOx, PM10, PM2.5, Ozone, Arsenic, Benzene, #' Benzo-a-pirene, Ammonia, Sulfur Dioxide, Black Carbon, CO, Nikel, Cadmium and Lead. #' Data are available from 1968 and are updated up to the current date (2023). #' For more information about the municipal data visit the section 'Monitoraggio aria' at the webpage: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj #' #' @param ID_station Numeric value. ID of the station to consider. Using ID_station = NULL, all the available #' stations are selected. Default is ID_station = NULL. #' @param Date_begin Character vector of the first date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_begin = "2022-01-01". #' @param Date_end Character vector of the last date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_end = "2022-12-31". #' @param Frequency Temporal aggregation frequency. It can be "hourly", "daily", "weekly", "monthly" or "yearly". #' Default is Frequency = "hourly". #' @param Var_vec Character vector of variables to aggregate. If NULL (default) all the variables are averaged. #' @param Fns_vec Character vector of aggregation function to apply to the selected variables. #' Available functions are mean, median, min, max, sum, qPP (PP-th percentile), sd, var, #' vc (variability coefficient), skew (skewness) and kurt (kurtosis). #' @param by_sensor Logic value (TRUE or FALSE). If 'by_sensor = TRUE', the function returns the observed concentrations #' by sensor code, while if 'by_sensor = FALSE' (default) it returns the observed concentrations by station. #' @param verbose Logic value (TRUE or FALSE). Toggle warnings and messages. If 'verbose = TRUE' (default) the function #' prints on the screen some messages describing the progress of the tasks. If 'verbose = FALSE' any message about #' the progression is suppressed. #' @param parallel Logic value (TRUE or FALSE). If 'parallel = FALSE' (default), data downloading is performed using a sequential/serial approach and additional parameters 'parworkers' and 'parfuturetype' are ignored. #' When 'parallel = TRUE', data downloading is performed using parallel computing through the Futureverse setting. #' More detailed information about parallel computing in the Futureverse can be found at the following webpages: #' https://future.futureverse.org/ and https://cran.r-project.org/web/packages/future.apply/vignettes/future.apply-1-overview.html #' @param parworkers Numeric integer value. If 'parallel = TRUE' (parallel mode active), the user can declare the number of parallel workers to be activated using 'parworkers = integer number'. By default ('parworkers = NULL'), the number of active workers is half of the available local cores. #' @param parfuturetype Character vector. If 'parallel = TRUE' (parallel mode active), the user can declare the parallel strategy to be used according to the Futureverse syntax through 'parfuturetype'. By default, the 'multisession' (background R sessions on local machine) is used. In alternative, the 'multicore' (forked R processes on local machine. Not supported by Windows and RStudio) setting can be used. #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' #' @examples #' \donttest{ #' ## Download hourly air quality data for 2022 at station 501. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_data(ID_station=501, Date_begin = "2022-01-01", #' Date_end = "2022-12-31", Frequency="hourly", parallel = TRUE) #' } #' ## Download (parallel) monthly data for NOx and NO2 observed between May and #' ## August 2021 for all the stations active on the network. For NOx is computed #' ## the 25th percentile, while for NO2 is computed the maximum concentration observed. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_data(ID_station=NULL,Date_begin = "2022-05-01", #' Date_end = "2022-08-31", Frequency="monthly",Var_vec=c("NOx","NO2"), #' Fns_vec=c("q25","max"), parallel = TRUE) #' } #' ## Download hourly air quality data by sensor for January 2023 at station 501. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_data(ID_station=501,Date_begin = "2023-01-01 00:00:00", #' Date_end = "2023-01-31 23:00:00", by_sensor = TRUE) #' } #' } #' #' @export get_ARPA_Lombardia_AQ_data <- function(ID_station = NULL, Date_begin = "2022-01-01", Date_end = "2022-12-31", Frequency = "hourly", Var_vec = NULL, Fns_vec = NULL, by_sensor = FALSE, verbose = TRUE, parallel = FALSE, parworkers = NULL, parfuturetype = "multisession") { ### Welcome message if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: started at", as.character(Sys.time()), "\n") } ############################ ########## Checks ########## ############################ ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Check if package 'RSocrata' is installed # See: https://r-pkgs.org/dependencies-in-practice.html#sec-dependencies-in-suggests-r-code rlang::check_installed("RSocrata", reason = "Package \"RSocrata\" must be installed to download data from ARPA Lombardia Open Database.") ##### Check if Futureverse is installed rlang::check_installed("future", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") rlang::check_installed("future.apply", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") ##### Define %notin% '%notin%' <- Negate('%in%') ##### Checks if by_sensor setup properly if (by_sensor %notin% c(0,1,FALSE,TRUE)) { stop("Wrong setup for 'by_sensor'. Use 1 or 0 or TRUE or FALSE.", call. = FALSE) } ##### Checks if parallel setup properly if (parallel %notin% c(FALSE,TRUE)) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } if (parfuturetype %notin% c("multisession","multicore")) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } ###################################### ########## Downloading data ########## ###################################### ### Registry/Metadata Metadata <- AQ_metadata_reshape() Metadata <- Metadata %>% dplyr::select(-c(.data$Altitude,.data$Province,.data$City, .data$DateStart,.data$DateStop,.data$Latitude,.data$Longitude, .data$ARPA_zone,.data$ARPA_stat_type)) ### Checks if ID_station is valid (in the list of active stations) '%notin%' <- Negate('%in%') if (!is.null(ID_station) & all(ID_station %notin% Metadata$IDStation)) { stop("ID_station NOT in the list of active stations. Change ID_station or use ID_station = NULL", call. = FALSE) } if (!is.null(ID_station)) { Metadata <- Metadata %>% dplyr::filter(.data$IDStation %in% ID_station) } if (!is.null(Var_vec)) { Metadata <- Metadata %>% dplyr::filter(.data$Pollutant %in% Var_vec) } if (verbose == TRUE) { cat("Downloading data from ARPA Lombardia: started at", as.character(Sys.time()), "\n") } ##### Splitting strategy for improving download speed n_blocks <- 12 # Check dates format if (is.null(lubridate::guess_formats(x = Date_begin, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_begin'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.null(lubridate::guess_formats(x = Date_end, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_end'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.na(lubridate::ymd_hms(Date_begin, quiet = TRUE))) { Date_begin <- lubridate::ymd_hms(paste0(Date_begin," 00:00:00"), tz = "CET") } if (is.na(lubridate::ymd_hms(Date_end, quiet = TRUE))) { Date_end <- lubridate::ymd_hms(paste0(Date_end," 23:00:00"), tz = "CET") } ### Check for the presence of breaking dates (URLs change in several years) if (lubridate::year(Date_begin) == lubridate::year(Date_end)) { break_years <- lubridate::year(Date_begin) } else { break_years <- seq(from = lubridate::year(Date_begin), to = lubridate::year(Date_end), by = 1) } ##### Check online availability of the resources for the specified years if (verbose == TRUE) { cat("Checking availability of online resources: started at", as.character(Sys.time()), "\n") } URLs <- res_check <- numeric(length = length(break_years)) for (yr in 1:length(break_years)) { URLs[yr] <- url <- url_dataset_year(Stat_type = "AQ", Year = break_years[yr]) temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk(url, temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for year ", break_years[yr]," is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) } else { res_check[yr] <- 1 } } if (verbose == TRUE) { if (sum(res_check) == length(break_years)) { message("All the online resources are available.\n") } if (sum(res_check) > 0 & sum(res_check) < length(break_years)) { message("Part of the required online resources are not available. Please, try with a new request.\n") return(invisible(NULL)) } if (sum(res_check) == 0) { message("None of the required online resources are available. Please, try with a new request.\n") return(invisible(NULL)) } } ### Downloading data if (verbose == TRUE) { cat("Downloading data: started at", as.character(Sys.time()), "\n") } ### Building URLs/links in Socrata format using sequences of dates if (length(break_years) == 1) { break_dates <- paste0(break_years,"-12-31 23:00:00") } else { break_dates <- paste0(break_years[-length(break_years)],"-12-31 23:00:00") } dates_seq_end <- c(Date_begin,lubridate::ymd_hms(break_dates),Date_end) dates_seq_end <- unique(dates_seq_end) dates_seq_end <- dates_seq_end[-1] dates_seq_end <- dates_seq_end[dates_seq_end <= Date_end & dates_seq_end >= Date_begin] dates_seq_begin <- c(Date_begin,lubridate::ymd_hms(break_dates) + lubridate::hours(1)) dates_seq_begin <- unique(dates_seq_begin) dates_seq_begin <- dates_seq_begin[dates_seq_begin <= Date_end & dates_seq_begin >= Date_begin] URL_blocks <- vector(mode = "list", length = length(dates_seq_begin)) for (b in 1:length(dates_seq_begin)) { seq_temp <- seq(lubridate::as_datetime(dates_seq_begin[b]), lubridate::as_datetime(dates_seq_end[b]), length.out = n_blocks + 1) seq_begin_temp <- seq_temp[-length(seq_temp)] seq_begin_temp <- lubridate::round_date(seq_begin_temp, unit = "hour") seq_begin_temp <- stringr::str_replace(string = seq_begin_temp, pattern = " ", replacement = "T") seq_end_temp <- c(seq_temp[-c(1,length(seq_temp))] - lubridate::hours(1),seq_temp[length(seq_temp)]) seq_end_temp <- lubridate::round_date(seq_end_temp, unit = "hour") seq_end_temp <- stringr::str_replace(string = seq_end_temp, pattern = " ", replacement = "T") if (is.null(ID_station)) { str_sensor <- NULL } else { str_sensor <- paste0("AND idsensore in(",paste0(sapply(X = Metadata$IDSensor, function(x) paste0("'",x,"'")),collapse = ","),")") } URL_blocks[[b]] <- data.frame(seq_begin_temp,seq_end_temp) %>% dplyr::mutate(link = paste0(URLs[b],"?$where=data between '", seq_begin_temp, "' and '", seq_end_temp,"'", str_sensor)) %>% dplyr::select(.data$link) } URL_blocks <- dplyr::bind_rows(URL_blocks) ### Preparing parallel computation: explicitly open multisession/multicore workers by switching plan if (parallel == TRUE) { # Explicitly open multisession/multicore workers by switching plan future::plan(future::multisession, workers = 12) if (is.null(parworkers)) { parworkers <- future::availableCores()/2 } eval(parse(text = paste0("future::plan(future::",parfuturetype,", workers = ",parworkers,")"))) if (verbose == TRUE) { message("Start parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } ### Download using Socrata API Aria <- do.call( rbind, future.apply::future_apply(X = as.matrix(URL_blocks), MARGIN = 1, FUN = function(x) { RSocrata::read.socrata(url = x, app_token = "Fk8hvoitqvADHECh3wEB26XbO") }) ) ### Ending parallel computation: explicitly close multisession/multicore workers by switching plan if (parallel == TRUE) { # Explicitly close multisession/multicore workers by switching plan future::plan(future::sequential) if (verbose == TRUE) { message("Stop parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } ##################################### ########## Processing data ########## ##################################### if (verbose == TRUE) { cat("Processing data: started at", as.character(Sys.time()), "\n") } ### Change variable names Aria <- Aria %>% dplyr::select(IDSensor = .data$idsensore, Date = .data$data, Value = .data$valore) %>% dplyr::mutate(IDSensor = as.numeric(.data$IDSensor), Value = as.numeric(.data$Value)) # clean RAM invisible(gc()) ### Add metadata Aria <- dplyr::right_join(Aria,Metadata, by = "IDSensor") ### Cleaning if (by_sensor %in% c(1,TRUE)) { Aria <- Aria %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,.data$IDSensor, .data$Pollutant,.data$Value) } else if (by_sensor %in% c(0,FALSE)) { Aria <- Aria %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::select(-c(.data$IDSensor)) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) %>% tidyr::pivot_wider(names_from = .data$Pollutant, values_from = .data$Value, values_fn = function(x) mean(x,na.rm=T)) # Mean (without NA) of a NA vector = NaN } Aria[is.na(Aria)] <- NA Aria[is.nan_df(Aria)] <- NA # clean RAM invisible(gc()) ### Add dataset attributes attr(Aria, "class") <- c("ARPALdf","tbl_df","tbl","data.frame") ### if (is.null(Var_vec) & is.null(Fns_vec)) { vv <- c("Ammonia","Arsenic","Benzene","Benzo_a_pyrene","BlackCarbon","Cadmium", "CO","Lead","Nikel","NO","NO2","NOx","Ozone","PM_tot","PM10","PM2.5","Sulfur_dioxide") vv <- vv[vv %in% names(Aria)] fv <- rep("mean",length(vv)) } else { vv <- Var_vec fv <- Fns_vec } # Checks if all the pollutants are available for the selected stations if (all(dplyr::all_of(vv) %in% names(Aria)) == FALSE) { stop("One ore more pollutants are not avaiable for the selected stations! Change the values of 'Var_vec'", call. = FALSE) } if (by_sensor %in% c(0,FALSE)) { ### Aggregating dataset across time if (Frequency != "hourly") { if (verbose == TRUE) { cat("Aggregating ARPA Lombardia data: started started at", as.character(Sys.time()), "\n") } Aria <- Aria %>% Time_aggregate(Frequency = Frequency, Var_vec = Var_vec, Fns_vec = Fns_vec, verbose = verbose) %>% dplyr::arrange(.data$NameStation, .data$Date) } else { Aria <- Aria %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,vv) %>% dplyr::arrange(.data$NameStation, .data$Date) } ### Regularizing dataset: same number of timestamps for each station and variable if (verbose == TRUE) { cat("Regularizing ARPA Lombardia data: started started at", as.character(Sys.time()), "\n") } freq_unit <- dplyr::case_when(Frequency == "hourly" ~ "hours", Frequency == "daily" ~ "days", Frequency == "weekly" ~ "weeks", Frequency == "monthly" ~ "months", Frequency == "yearly" ~ "years") Aria <- Aria %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) %>% tidyr::pivot_longer(cols = -c(.data$Date,.data$IDStation,.data$NameStation), names_to = "Measure", values_to = "Value") %>% dplyr::mutate(Date = case_when(Frequency %in% c("hourly") ~ as.character(format(x = .data$Date, format = "%Y-%m-%d %H:%M:%S")), TRUE ~ as.character(format(x = .data$Date, format = "%Y-%m-%d")))) %>% tidyr::pivot_wider(names_from = .data$Date, values_from = .data$Value) %>% tidyr::pivot_longer(cols = -c(.data$Measure,.data$IDStation,.data$NameStation), names_to = "Date", values_to = "Value") %>% tidyr::pivot_wider(names_from = .data$Measure, values_from = .data$Value) if (Frequency != "hourly") { Aria <- Aria %>% dplyr::mutate(Date = ymd(.data$Date)) %>% dplyr::arrange(.data$IDStation,.data$Date) } else { Aria <- Aria %>% dplyr::mutate(Date = ymd_hms(.data$Date)) %>% dplyr::arrange(.data$IDStation,.data$Date) } structure(list(Aria = Aria)) attr(Aria, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") attr(Aria, "frequency") <- Frequency attr(Aria, "units") <- freq_unit } else if (by_sensor %in% c(1,TRUE)) { Aria <- Aria %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) structure(list(Aria = Aria)) attr(Aria, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") attr(Aria, "frequency") <- "hourly" attr(Aria, "units") <- "hours" } if (verbose == TRUE) { cat("Processing data: ended at", as.character(Sys.time()), "\n") } if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: ended at", as.character(Sys.time()), "\n") } return(Aria) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_AQ_data.R
#' Download air quality data at manucipal from ARPA Lombardia website #' #' More detailed description. #' #' @description 'get_ARPA_Lombardia_AQ_municipal_data' returns the air quality levels at municipal level estimated by #' ARPA Lombardia using a physico-chemical model which simulates air quality based on weather and geo-physical #' variables. For each municipality of Lombardy, ARPA estimates the average (NO2_mean) and maximum daily (NO2_max_day) #' level of NO2, the daily maximum (Ozone_max_day) and the 8-hours moving window maximum (Ozone_max_8h) of Ozone #' and the average levels of PM10 (PM10_mean) and PM2.5 (PM2.5_mean). #' Data are available from 2011 and are updated up to the current date. #' For more information about the municipal data visit the section 'Stime comunali dell'aria' at the webpage: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj #' #' @param ID_station Numeric value. ID of the station to consider. Using ID_station = NULL, all the available #' stations are selected. Default is ID_station = NULL. #' @param Date_begin Character vector of the first date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_begin = "2022-01-01". #' @param Date_end Character vector of the last date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_end = "2022-12-31". #' @param Frequency Temporal aggregation frequency. It can be "daily", "weekly", "monthly" or "yearly". #' Default is Frequency = "daily". #' @param Var_vec Character vector of variables to aggregate. If NULL (default) all the variables are averaged. #' @param Fns_vec Character vector of aggregation function to apply to the selected variables. #' Available functions are mean, median, min, max, sum, qPP (PP-th percentile), sd, var, #' vc (variability coefficient), skew (skewness) and kurt (kurtosis). #' @param by_sensor Logic value (TRUE or FALSE). If 'by_sensor = TRUE', the function returns the observed concentrations by sensor code, while #' if 'by_sensor = FALSE' (default) it returns the observed concentrations by station. #' @param verbose Logic value (TRUE or FALSE). Toggle warnings and messages. If 'verbose = TRUE' (default) the function #' prints on the screen some messages describing the progress of the tasks. If 'verbose = FALSE' any message about #' the progression is suppressed. #' @param parallel Logic value (TRUE or FALSE). If 'parallel = FALSE' (default), data downloading is performed using a sequential/serial approach and additional parameters 'parworkers' and 'parfuturetype' are ignored. #' When 'parallel = TRUE', data downloading is performed using parallel computing through the Futureverse setting. #' More detailed information about parallel computing in the Futureverse can be found at the following webpages: #' https://future.futureverse.org/ and https://cran.r-project.org/web/packages/future.apply/vignettes/future.apply-1-overview.html #' @param parworkers Numeric integer value. If 'parallel = TRUE' (parallel mode active), the user can declare the number of parallel workers to be activated using 'parworkers = integer number'. By default ('parworkers = NULL'), the number of active workers is half of the available local cores. #' @param parfuturetype Character vector. If 'parallel = TRUE' (parallel mode active), the user can declare the parallel strategy to be used according to the Futureverse syntax through 'parfuturetype'. By default, the 'multisession' (background R sessions on local machine) is used. In alternative, the 'multicore' (forked R processes on local machine. Not supported by Windows and RStudio) setting can be used. #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' The column 'NameStation' identifies the name of each municipality. The column 'IDStation' is an ID code #' (assigned from ARPA) uniquely identifying each municipality. #' #' @examples #' \donttest{ #' ## Download daily concentrations at municipal levels observed in 2020 #' ## for all the municipalities in Lombardy #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_municipal_data(ID_station=NULL,Date_begin = "2020-01-01", #' Date_end = "2020-12-31", Frequency="daily") #' } #' ## Download monthly concentrations of NO2 (average and maximum) observed in 2021 #' ## at city number 100451. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_municipal_data(ID_station=100451,Date_begin = "2021-01-01", #' Date_end = "2021-12-31", Frequency="monthly",Var_vec=c("NO2_mean","NO2_mean"), #' Fns_vec=c("mean","max")) #' } #' ## Download daily concentrations observed in March and April 2022 at city number 100451. #' ## Data are reported by sensor. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_AQ_municipal_data(ID_station=100451, Date_begin = "2022-03-01", #' Date_end = "2022-04-30", by_sensor = TRUE) #' } #' } #' #' @export get_ARPA_Lombardia_AQ_municipal_data <- function(ID_station = NULL, Date_begin = "2021-01-01", Date_end = "2022-12-31", Frequency = "daily", Var_vec = NULL, Fns_vec = NULL, by_sensor = FALSE, verbose=TRUE, parallel = FALSE, parworkers = NULL, parfuturetype = "multisession") { ### Welcome message if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: started at", as.character(Sys.time()), "\n") } ############################ ########## Checks ########## ############################ ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Check if package 'RSocrata' is installed # See: https://r-pkgs.org/dependencies-in-practice.html#sec-dependencies-in-suggests-r-code rlang::check_installed("RSocrata", reason = "Package \"RSocrata\" must be installed to download data from ARPA Lombardia Open Database.") ##### Check if Futureverse is installed rlang::check_installed("future", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") rlang::check_installed("future.apply", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") ##### Define %notin% '%notin%' <- Negate('%in%') ##### Checks if by_sensor setup properly if (by_sensor %notin% c(0,1,FALSE,TRUE)) { stop("Wrong setup for 'by_sensor'. Use 1 or 0 or TRUE or FALSE.", call. = FALSE) } ##### Checks if parallel setup properly if (parallel %notin% c(FALSE,TRUE)) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } if (parfuturetype %notin% c("multisession","multicore")) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } ##### Checks Input frequency stopifnot("Frequency cannot be hourly as municipal estimates are provided on a daily basis" = (Frequency != "hourly") == T) ###################################### ########## Downloading data ########## ###################################### ##### Define %notin% '%notin%' <- Negate('%in%') ### Registry/Metadata Metadata <- AQ_municipal_metadata_reshape() Metadata <- Metadata %>% dplyr::select(-c(.data$Province,.data$DateStart,.data$DateStop)) ### Checks if ID_station is valid (in the list of active stations) if (!is.null(ID_station) & all(ID_station %notin% Metadata$IDStation)) { stop("ID_station NOT in the list of active stations. Change ID_station or use ID_station = NULL", call. = FALSE) } if (!is.null(ID_station)) { Metadata <- Metadata %>% dplyr::filter(.data$IDStation %in% ID_station) } ##### Splitting strategy for improving download speed n_blocks <- 12 # Check dates format if (is.null(lubridate::guess_formats(x = Date_begin, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_begin'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.null(lubridate::guess_formats(x = Date_end, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_end'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.na(lubridate::ymd_hms(Date_begin, quiet = TRUE))) { Date_begin <- lubridate::ymd_hms(paste0(Date_begin," 00:00:00")) } if (is.na(lubridate::ymd_hms(Date_end, quiet = TRUE))) { Date_end <- lubridate::ymd_hms(paste0(Date_end," 23:00:00")) } ### Check for the presence of breaking dates (URLs change in several years) if (lubridate::year(Date_begin) == lubridate::year(Date_end)) { break_years <- lubridate::year(Date_begin) } else { break_years <- seq(from = lubridate::year(Date_begin), to = lubridate::year(Date_end), by = 1) } ##### Check online availability of the resources for the specified years if (verbose == TRUE) { cat("Checking availability of online resources: started at", as.character(Sys.time()), "\n") } URLs <- res_check <- numeric(length = length(break_years)) for (yr in 1:length(break_years)) { URLs[yr] <- url <- url_dataset_year(Stat_type = "AQ_municipal_check", Year = break_years[yr]) temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk(url, temp), silent = TRUE)) URLs[yr] <- url <- url_dataset_year(Stat_type = "AQ_municipal", Year = break_years[yr]) if(res$status_code != 200) { message(paste0("The internet resource for year ", break_years[yr]," is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) } else { res_check[yr] <- 1 } } if (verbose == TRUE) { if (sum(res_check) == length(break_years)) { message("All the online resources are available.\n") } if (sum(res_check) > 0 & sum(res_check) < length(break_years)) { message("Part of the required online resources are not available. Please, try with a new request.\n") return(invisible(NULL)) } if (sum(res_check) == 0) { message("None of the required online resources are available. Please, try with a new request.\n") return(invisible(NULL)) } } ### Downloading data if (verbose == TRUE) { cat("Downloading data: started at", as.character(Sys.time()), "\n") } ##### Years up to 2021 ##### if (any(break_years %notin% c(2022,2023,2024))) { ### Files names (from ARPA database) file_name <- dplyr::case_when(break_years %in% c(2021) ~ paste0("dati_stime_comunali_2021.csv"), break_years %in% c(2020) ~ paste0("dati_stime_comunali_2020.csv"), break_years %in% c(2019) ~ paste0("dati_stime_comunali_2019.csv"), break_years %in% c(2018) ~ paste0("dati_stime_comunali_2018.csv"), break_years %in% c(2017) ~ paste0("dati_stime_comunali_2017.csv"), break_years %in% 2011:2016 ~ "dati_stime_comunali_2011-2016.csv") URL_blocks <- as.matrix(paste(URLs,file_name)) ### Download Aria1 <- do.call( rbind, future.apply::future_apply(X = as.matrix(URL_blocks[break_years %notin% c(2022,2023,2024),]), MARGIN = 1, FUN = function(x) { ##### Download link_str <- stringr::str_split(string = x, pattern = " ")[[1]][1] file_str <- stringr::str_split(string = x, pattern = " ")[[1]][2] options(timeout = 10000) Aria_temp <- readr::read_csv(archive::archive_read(link_str, file = file_str), col_types = readr::cols()) options(timeout = 100) ##### Processing data if (verbose == TRUE) { cat("Processing data: started at", as.character(Sys.time()), "\n") } ### Change variable names Aria_temp <- Aria_temp %>% dplyr::select(IDSensor = .data$IdSensore, Date = .data$Data, Value = .data$Valore, Operator = .data$idOperatore) %>% dplyr::mutate(Date =lubridate::dmy_hms(.data$Date), Operator = dplyr::case_when(.data$Operator == 1 ~ "mean", .data$Operator == 3 ~ "max", .data$Operator == 11 ~ "max_8h", .data$Operator == 12 ~ "max_day")) }) ) } else { Aria1 <- NULL } # clean RAM invisible(gc()) ##### Data from 2022 to current (2024) are provided via Socrata API ##### if (any(break_years %in% c(2022,2023,2024))) { ### Filter valid URLs and dates URLs_22on <- URLs[break_years %in% c(2022,2023,2024)] break_years_22on <- break_years[break_years %in% c(2022,2023,2024)] if (lubridate::ymd("2022-01-01") > Date_begin) { Date_begin_22on <- lubridate::ymd("2022-01-01") } else { Date_begin_22on <- Date_begin } ### Building URLs/links in Socrata format using sequences of dates if (length(break_years_22on) == 1) { break_dates <- paste0(break_years_22on,"-12-31 23:00:00") } else { break_dates <- paste0(break_years_22on[-length(break_years_22on)],"-12-31 23:00:00") } dates_seq_end <- c(Date_begin_22on,lubridate::ymd_hms(break_dates),Date_end) dates_seq_end <- unique(dates_seq_end) dates_seq_end <- dates_seq_end[-1] dates_seq_end <- dates_seq_end[dates_seq_end <= Date_end & dates_seq_end >= Date_begin_22on] dates_seq_begin <- c(Date_begin_22on,lubridate::ymd_hms(break_dates) + lubridate::hours(1)) dates_seq_begin <- unique(dates_seq_begin) dates_seq_begin <- dates_seq_begin[dates_seq_begin <= Date_end & dates_seq_begin >= Date_begin_22on] URL_blocks <- vector(mode = "list", length = length(dates_seq_begin)) for (b in 1:length(dates_seq_begin)) { seq_temp <- seq(dates_seq_begin[b], dates_seq_end[b], length.out = n_blocks + 1) seq_begin_temp <- seq_temp[-length(seq_temp)] seq_begin_temp <- lubridate::round_date(seq_begin_temp, unit = "hour") seq_begin_temp <- stringr::str_replace(string = seq_begin_temp, pattern = " ", replacement = "T") seq_end_temp <- c(seq_temp[-c(1,length(seq_temp))] - lubridate::hours(1),seq_temp[length(seq_temp)]) seq_end_temp <- lubridate::round_date(seq_end_temp, unit = "hour") seq_end_temp <- stringr::str_replace(string = seq_end_temp, pattern = " ", replacement = "T") if (is.null(ID_station)) { str_sensor <- NULL } else { str_sensor <- paste0("AND idsensore in(",paste0(sapply(X = Metadata$IDSensor, function(x) paste0("'",x,"'")),collapse = ","),")") } URL_blocks[[b]] <- data.frame(seq_begin_temp,seq_end_temp) %>% dplyr::mutate(link = paste0(URLs_22on[b],"?$where=data between '", seq_begin_temp, "' and '", seq_end_temp,"'", str_sensor)) %>% dplyr::select(.data$link) } URL_blocks <- dplyr::bind_rows(URL_blocks) ### Preparing parallel computation: explicitly open multisession/multicore workers by switching plan if (parallel == TRUE) { future::plan(future::multisession, workers = 12) if (is.null(parworkers)) { parworkers <- future::availableCores()/2 } eval(parse(text = paste0("future::plan(future::",parfuturetype,", workers = ",parworkers,")"))) if (verbose == TRUE) { message("Start parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } Aria2 <- do.call( rbind, future.apply::future_apply(X = as.matrix(URL_blocks), MARGIN = 1, FUN = function(x) { RSocrata::read.socrata(url = x, app_token = "Fk8hvoitqvADHECh3wEB26XbO") }) ) ### Preparing parallel computation: explicitly open multisession/multicore workers by switching plan if (parallel == TRUE) { # Explicitly close multisession/multicore workers by switching plan future::plan(future::sequential) if (verbose == TRUE) { message("Stop parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } ### Processing data Aria2 <- Aria2 %>% dplyr::select(IDSensor = .data$idsensore, Date = .data$data, Value = .data$valore, Operator = .data$idoperatore) %>% dplyr::mutate(IDSensor = as.numeric(.data$IDSensor), Value = as.numeric(.data$Value), Date = lubridate::ymd(.data$Date), Operator = dplyr::case_when(.data$Operator == 1 ~ "mean", .data$Operator == 3 ~ "max", .data$Operator == 11 ~ "max_8h", .data$Operator == 12 ~ "max_day")) ### Ending parallel computation: explicitly close multisession/multicore workers by switching plan if (parallel == TRUE) { future::plan(future::sequential) if (verbose == TRUE) { message("Stop parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } } else { Aria2 <- NULL } ##################################### ########## Processing data ########## ##################################### ### Append datasets Aria <- dplyr::bind_rows(Aria1,Aria2) rm(Aria1,Aria2) # clean RAM invisible(gc()) ### Add metadata Aria <- dplyr::right_join(Aria,Metadata, by = "IDSensor") ### Cleaning if (by_sensor %in% c(1,TRUE)) { Aria <- Aria %>% dplyr::mutate(Pollutant = paste0(.data$Pollutant,"_",.data$Operator)) %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::select(-c(.data$Operator)) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,.data$IDSensor, .data$Pollutant,.data$Value) } else if (by_sensor %in% c(0,FALSE)) { Aria <- Aria %>% dplyr::mutate(Pollutant = paste0(.data$Pollutant,"_",.data$Operator)) %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::select(-c(.data$IDSensor,.data$Operator)) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) %>% tidyr::pivot_wider(names_from = .data$Pollutant, values_from = .data$Value, values_fn = function(x) mean(x,na.rm=T)) # Mean (without NA) of a NA vector = NaN } Aria[is.na(Aria)] <- NA Aria[is.nan_df(Aria)] <- NA ### Add dataset attributes attr(Aria, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") ### if (is.null(Var_vec) & is.null(Fns_vec)) { vv <- c("NO2_mean","NO2_max_day","Ozone_max_8h","Ozone_max_day","PM10_mean","PM2.5_mean") vv <- vv[vv %in% names(Aria)] fv <- rep("mean",length(vv)) } else { vv <- Var_vec fv <- Fns_vec } # Checks if all the pollutants are available for the selected stations if (all(dplyr::all_of(vv) %in% names(Aria)) == FALSE) { stop("One ore more pollutants are not avaiable for the selected stations! Change the values of 'Var_vec'", call. = FALSE) } if (by_sensor %in% c(0,FALSE)) { ### Aggregating dataset if (Frequency != "daily") { if (verbose==T) { cat("Aggregating ARPA Lombardia data: started started at", as.character(Sys.time()), "\n") } Aria <- Aria %>% Time_aggregate(Frequency = Frequency, Var_vec = Var_vec, Fns_vec = Fns_vec, verbose = verbose) %>% dplyr::arrange(.data$NameStation, .data$Date) } else { Aria <- Aria %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,vv) %>% dplyr::arrange(.data$NameStation, .data$Date) } ### Regularizing dataset: same number of timestamps for each station and variable if (verbose == TRUE) { cat("Regularizing ARPA Lombardia data: started started at", as.character(Sys.time()), "\n") } freq_unit <- dplyr::case_when(Frequency == "daily" ~ "days", Frequency == "weekly" ~ "weeks", Frequency == "monthly" ~ "months", Frequency == "yearly" ~ "years") Aria <- Aria %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) %>% tidyr::pivot_longer(cols = -c(.data$Date,.data$IDStation,.data$NameStation), names_to = "Measure", values_to = "Value") %>% tidyr::pivot_wider(names_from = .data$Date, values_from = .data$Value) %>% tidyr::pivot_longer(cols = -c(.data$Measure,.data$IDStation,.data$NameStation), names_to = "Date", values_to = "Value") %>% tidyr::pivot_wider(names_from = .data$Measure, values_from = .data$Value) %>% dplyr::mutate(Date = ymd(.data$Date)) %>% dplyr::arrange(.data$IDStation,.data$Date) structure(list(Aria = Aria)) attr(Aria, "class") <- c("ARPALdf","ARPALdf_AQ_mun","tbl_df","tbl","data.frame") attr(Aria, "frequency") <- Frequency attr(Aria, "units") <- freq_unit } else if (by_sensor %in% c(1,TRUE)) { Aria <- Aria %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) structure(list(Aria = Aria)) attr(Aria, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") attr(Aria, "frequency") <- "daily" attr(Aria, "units") <- "days" } if (verbose == TRUE) { cat("Processing data: ended at", as.character(Sys.time()), "\n") } if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: ended at", as.character(Sys.time()), "\n") } return(Aria) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_AQ_municipal_data.R
#' Download metadata (registry) on air quality monitoring stations at municipal level from ARPA Lombardia website #' #' @description 'get_ARPA_Lombardia_AQ_municipal_registry' returns the registry (list) of all the air quality sensors #' owned by ARPA Lombardia for each municipality of Lombardy. The information reported are: ID of each sensor and #' station, starting date and ending date. The column 'NameStation' identifies the name of each municipality. #' The column 'IDStation' is an ID code (assigned from ARPA) uniquely identifying each municipality. #' For more information about the municipal data visit the section 'Stime comunali sull'aria' at the webpage: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' #' @examples get_ARPA_Lombardia_AQ_municipal_registry() #' #' @export get_ARPA_Lombardia_AQ_municipal_registry <- function() { Metadata <- AQ_municipal_metadata_reshape() structure(list(Metadata = Metadata)) attr(Metadata, "class") <- c("ARPALdf","ARPALdf_AQ_mun","tbl_df","tbl","data.frame") return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_AQ_municipal_registry.R
#' Download metadata (registry) on air quality monitoring stations from ARPA Lombardia website #' #' @description 'get_ARPA_Lombardia_AQ_registry' returns the registry (list) of all the air quality sensors #' and stations belonging to the ARPA Lombardia network. The information reported are: ID of each sensor and #' station, geo-location (coordinates in degrees), altitude (mt), starting date and ending date. #' The column 'NameStation' identifies the name of each station, while 'IDStation' is an ID code (assigned #' from ARPA) uniquely identifying each station. #' For more information about the municipal data visit the section 'Monitoraggio aria' at the webpage: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' #' @examples get_ARPA_Lombardia_AQ_registry() #' #' @export get_ARPA_Lombardia_AQ_registry <- function() { Metadata <- AQ_metadata_reshape() structure(list(Metadata = Metadata)) attr(Metadata, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_AQ_registry.R
#' Download weather/meteorological data from ARPA Lombardia website #' #' More detailed description. #' #' @description 'get_ARPA_Lombardia_W_data' returns observed air weather measurements collected by #' ARPA Lombardia ground detection system for Lombardy region in Northern Italy. #' Available meteorological variables are: temperature (Celsius degrees), rainfall (mm), wind speed (m/s), #' wind direction (degrees), relative humidity (%), global solar radiation (W/m2), and snow height (cm). #' Data are available from 1989 and are updated up to the current date. #' For more information about the municipal data visit the section 'Monitoraggio aria' at the webpage: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj #' #' @param ID_station Numeric value. ID of the station to consider. Using ID_station = NULL, all the available #' stations are selected. Default is ID_station = NULL. #' @param Date_begin Character vector of the first date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_begin = "2022-01-01". #' @param Date_end Character vector of the last date-time to download. Format can be either "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss". Default is Date_end = "2022-12-31". #' @param Frequency Temporal aggregation frequency. It can be "10mins", "hourly", "daily", "weekly", #' "monthly". Default is Frequency = "10mins" #' @param Var_vec Character vector of variables to aggregate. If NULL (default) all the variables are averaged, #' except for 'Temperature' and 'Snow_height', which are cumulated. #' @param Fns_vec Character vector of aggregation function to apply to the selected variables. #' Available functions are mean, median, min, max, sum, qPP (PP-th percentile), sd, var, #' vc (variability coefficient), skew (skewness) and kurt (kurtosis). Attention: for Wind Speed and #' Wind Speed Gust only mean, min and max are available; for Wind Direction and Wind Direction Gust #' only mean is available. #' @param by_sensor Logic value (TRUE or FALSE). If 'by_sensor = TRUE', the function returns the observed concentrations #' by sensor code, while if 'by_sensor = FALSE' (default) it returns the observed concentrations by station. #' @param verbose Logic value (TRUE or FALSE). Toggle warnings and messages. If 'verbose = TRUE' (default) the function #' prints on the screen some messages describing the progress of the tasks. If 'verbose = FALSE' any message about #' the progression is suppressed. #' @param parallel Logic value (TRUE or FALSE). If 'parallel = FALSE' (default), data downloading is performed using a sequential/serial approach and additional parameters 'parworkers' and 'parfuturetype' are ignored. #' When 'parallel = TRUE', data downloading is performed using parallel computing through the Futureverse setting. #' More detailed information about parallel computing in the Futureverse can be found at the following webpages: #' https://future.futureverse.org/ and https://cran.r-project.org/web/packages/future.apply/vignettes/future.apply-1-overview.html #' @param parworkers Numeric integer value. If 'parallel = TRUE' (parallel mode active), the user can declare the number of parallel workers to be activated using 'parworkers = integer number'. By default ('parworkers = NULL'), the number of active workers is half of the available local cores. #' @param parfuturetype Character vector. If 'parallel = TRUE' (parallel mode active), the user can declare the parallel strategy to be used according to the Futureverse syntax through 'parfuturetype'. By default, the 'multisession' (background R sessions on local machine) is used. In alternative, the 'multicore' (forked R processes on local machine. Not supported by Windows and RStudio) setting can be used. #' #' #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' #' @examples #' \donttest{ #' ## Download all the (10 minutes frequency) weather measurements at station 100 #' ## between August 2021 and December 2022. #' if (require("RSocrata")) { #' get_ARPA_Lombardia_W_data(ID_station = 100, Date_begin = "2021-08-01", #' Date_end = "2022-12-31", Frequency = "10mins") #' } #' ## Download all the (daily frequency) weather measurements at station 1974 during 2022 #' if (require("RSocrata")) { #' get_ARPA_Lombardia_W_data(ID_station = 1974, Date_begin = "2022-01-01", #' Date_end = "2022-12-31", Frequency = "daily") #' } #' } #' #' @export get_ARPA_Lombardia_W_data <- function(ID_station = NULL, Date_begin = "2021-01-01", Date_end = "2022-12-31", Frequency = "10mins", Var_vec = NULL, Fns_vec = NULL, by_sensor = FALSE, verbose = TRUE, parallel = FALSE, parworkers = NULL, parfuturetype = "multisession") { ### Welcome message if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: started at", as.character(Sys.time()), "\n") } ############################ ########## Checks ########## ############################ ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Check if package 'RSocrata' is installed # See: https://r-pkgs.org/dependencies-in-practice.html#sec-dependencies-in-suggests-r-code rlang::check_installed("RSocrata", reason = "Package \"RSocrata\" must be installed to download data from ARPA Lombardia Open Database.") ##### Check if Futureverse is installed rlang::check_installed("future", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") rlang::check_installed("future.apply", reason = "Package \"future\" must be installed to download (parallel) data from ARPA Lombardia Open Database.") ##### Define %notin% '%notin%' <- Negate('%in%') ##### Check if package 'RSocrata' is installed # See: https://r-pkgs.org/dependencies-in-practice.html#sec-dependencies-in-suggests-r-code rlang::check_installed("RSocrata", reason = "Package \"RSocrata\" must be installed to download data from ARPA Lombardia Open Database.") ##### Checks if by_sensor setup properly if (by_sensor %notin% c(0,1,FALSE,TRUE)) { stop("Wrong setup for 'by_sensor'. Use 1 or 0 or TRUE or FALSE.", call. = FALSE) } ##### Checks if parallel setup properly if (parallel %notin% c(FALSE,TRUE)) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } if (parfuturetype %notin% c("multisession","multicore")) { stop("Wrong setup for 'parallel'. Use TRUE or FALSE.", call. = FALSE) } ##### Control checks on Var_vec and Fns_vec # Wind_speed and Wind_direction must be selected together if (!is.na(match("Wind_speed",Var_vec)) & is.na(match("Wind_direction",Var_vec))) { stop("It's not possible to select only Wind_speed: also Wind_direction must be included in the list of selected variables! Change the values of 'Var_vec' and 'Var_funs'", call. = FALSE) } if (is.na(match("Wind_speed",Var_vec)) & !is.na(match("Wind_direction",Var_vec))) { stop("It's not possible to select only Wind_direction: also Wind_speed must be included in the list of selected variables! Change the values of 'Var_vec' and 'Var_funs'", call. = FALSE) } # Wind direction can only be averaged if(sum(Var_vec %in% c("Wind_direction","Wind_direction_gust") & Fns_vec != "mean") > 0) { stop("Error: on Wind_direction and Wind_direction_gust is possible to calculate only the average value. Use 'mean' in 'Fns_vec.'", call. = FALSE) } # Wind speed can only be averaged, maximized or minimized if(sum(Var_vec %in% c("Wind_speed","Wind_speed_gust") & Fns_vec %notin% c("mean","min","max")) > 0) { stop("Error: on Wind_speed and Wind_speed_gust is possible to calculate only mean, max or min values. Use 'mean' or 'max' or 'min' in 'Fns_vec.'", call. = FALSE) } ###################################### ########## Downloading data ########## ###################################### ### Registry Metadata <- W_metadata_reshape() Metadata <- Metadata %>% dplyr::select(-c(.data$Altitude,.data$Province, .data$DateStart,.data$DateStop, .data$Latitude,.data$Longitude)) ### Checks if ID_station is valid (in the list of active stations) if (!is.null(ID_station) & all(ID_station %notin% Metadata$IDStation)) { stop("ID_station NOT in the list of active stations. Change ID_station or use ID_station = NULL", call. = FALSE) } if (!is.null(ID_station)) { Metadata <- Metadata %>% dplyr::filter(.data$IDStation %in% ID_station) } if (!is.null(Var_vec)) { Metadata <- Metadata %>% dplyr::filter(.data$Measure %in% Var_vec) } ##### Splitting strategy for improving download speed n_blocks <- 12 # Check dates format if (is.null(lubridate::guess_formats(x = Date_begin, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_begin'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.null(lubridate::guess_formats(x = Date_end, orders = c("ymd HMS","ymd")))) { stop("Wrong format for 'Date_end'. Use 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'", call. = FALSE) } if (is.na(lubridate::ymd_hms(Date_begin, quiet = TRUE))) { Date_begin <- lubridate::ymd_hms(paste0(Date_begin," 00:00:00")) } if (is.na(lubridate::ymd_hms(Date_end, quiet = TRUE))) { Date_end <- lubridate::ymd_hms(paste0(Date_end," 23:55:00")) } ### Check for the presence of breaking dates (URLs change in several years) if (lubridate::year(Date_begin) == lubridate::year(Date_end)) { break_years <- lubridate::year(Date_begin) } else { break_years <- seq(from = lubridate::year(Date_begin), to = lubridate::year(Date_end), by = 1) } ##### Check online availability of the resources for the specified years if (verbose == TRUE) { cat("Checking availability of online resources: started at", as.character(Sys.time()), "\n") } URLs <- res_check <- numeric(length = length(break_years)) for (yr in 1:length(break_years)) { URLs[yr] <- url <- url_dataset_year(Stat_type = "W_check", Year = break_years[yr]) temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk(url, temp), silent = TRUE)) URLs[yr] <- url <- url_dataset_year(Stat_type = "W", Year = break_years[yr]) if(res$status_code != 200) { message(paste0("The internet resource for year ", break_years[yr]," is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) } else { res_check[yr] <- 1 } } if (verbose == TRUE) { if (sum(res_check) == length(break_years)) { message("All the online resources are available.\n") } if (sum(res_check) > 0 & sum(res_check) < length(break_years)) { message("Part of the required online resources are not available. Please, try with a new request.\n") return(invisible(NULL)) } if (sum(res_check) == 0) { message("None of the required online resources are available. Please, try with a new request.\n") return(invisible(NULL)) } } ### Downloading data if (verbose == TRUE) { cat("Downloading data: started at", as.character(Sys.time()), "\n") } ##### Years up to 2022 ##### if (any(break_years %notin% c(2023,2024))) { ### Files names (from ARPA database) file_name <- dplyr::case_when(break_years >= 2013 ~ paste0(break_years,".csv"), break_years %in% 2011:2012 ~ "2012.csv", break_years %in% 2009:2010 ~ "2010.csv", break_years %in% 2006:2008 ~ "2008.csv", break_years %in% 2001:2005 ~ "2005.csv", break_years %in% 1989:2000 ~ "2000.csv") URL_blocks <- as.matrix(paste(URLs,file_name)) ### Download Meteo1 <- do.call( rbind, future.apply::future_apply(X = as.matrix(URL_blocks[break_years %notin% c(2023,2024),]), MARGIN = 1, FUN = function(x) { ###################################### ########## Downloading data ########## ###################################### link_str <- stringr::str_split(string = x, pattern = " ")[[1]][1] file_str <- stringr::str_split(string = x, pattern = " ")[[1]][2] options(timeout = 10000) Meteo_temp <- readr::read_csv(archive::archive_read(link_str, file = file_str), col_types = readr::cols()) options(timeout = 100) ##################################### ########## Processing data ########## ##################################### if (verbose == TRUE) { cat("Processing data: started at", as.character(Sys.time()), "\n") } ### Change variable names Meteo_temp <- Meteo_temp %>% dplyr::select(IDSensor = .data$IdSensore, Date = .data$Data, Value = .data$Valore, Operator = .data$idOperatore) %>% dplyr::mutate(Date = lubridate::dmy_hms(.data$Date), IDSensor = as.numeric(.data$IDSensor)) ### Add metadata Meteo_temp <- dplyr::right_join(Meteo_temp,Metadata, by = "IDSensor") ### Cleaning if (by_sensor %in% c(1,TRUE)) { Meteo_temp <- Meteo_temp %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::mutate(Operator = dplyr::case_when(.data$Measure == "Relative_humidity" & .data$Operator == 3 ~ 1, .data$Measure == "Relative_humidity" & .data$Operator == 2 ~ 1, .data$Measure == "Temperature" & .data$Operator == 3 ~ 1, .data$Measure == "Temperature" & .data$Operator == 2 ~ 1, TRUE ~ as.numeric(.data$Operator)), Measure = dplyr::case_when(.data$Measure == "Wind_direction" & .data$Operator == 3 ~ "Wind_direction_gust", .data$Measure == "Wind_speed" & .data$Operator == 3 ~ "Wind_speed_gust", TRUE ~ as.character(.data$Measure))) %>% dplyr::select(-c(.data$Operator)) %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,.data$IDSensor, .data$Measure,.data$Value) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) } else if (by_sensor %in% c(0,FALSE)) { Meteo_temp <- Meteo_temp %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::mutate(Operator = dplyr::case_when(.data$Measure == "Relative_humidity" & .data$Operator == 3 ~ 1, .data$Measure == "Relative_humidity" & .data$Operator == 2 ~ 1, .data$Measure == "Temperature" & .data$Operator == 3 ~ 1, .data$Measure == "Temperature" & .data$Operator == 2 ~ 1, TRUE ~ as.numeric(.data$Operator)), Measure = dplyr::case_when(.data$Measure == "Wind_direction" & .data$Operator == 3 ~ "Wind_direction_gust", .data$Measure == "Wind_speed" & .data$Operator == 3 ~ "Wind_speed_gust", TRUE ~ as.character(.data$Measure))) %>% dplyr::select(-c(.data$IDSensor, .data$Operator)) %>% tidyr::pivot_wider(names_from = .data$Measure, values_from = .data$Value, values_fn = function(x) mean(x,na.rm=T)) %>% dplyr::mutate(dplyr::across(dplyr::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) } Meteo_temp[is.na(Meteo_temp)] <- NA Meteo_temp[is.nan_df(Meteo_temp)] <- NA Meteo_temp }) ) } else { Meteo1 <- NULL } # clean RAM invisible(gc()) ##### Data from 2023 to current (2024) are provided via Socrata API ##### if (any(break_years %in% c(2023,2024))) { ### Filter valid URLs and dates URLs_23on <- URLs[break_years %in% c(2023,2024)] break_years_23on <- break_years[break_years %in% c(2023,2024)] if (lubridate::ymd("2023-01-01") > Date_begin) { Date_begin_23on <- lubridate::ymd("2023-01-01") } else { Date_begin_23on <- Date_begin } ### Building URLs/links in Socrata format using sequences of dates if (length(break_years_23on) == 1) { break_dates <- paste0(break_years_23on,"-12-31 23:00:00") } else { break_dates <- paste0(break_years_23on[-length(break_years_23on)],"-12-31 23:00:00") } dates_seq_end <- c(Date_begin_23on,lubridate::ymd_hms(break_dates),Date_end) dates_seq_end <- unique(dates_seq_end) dates_seq_end <- dates_seq_end[-1] dates_seq_end <- dates_seq_end[dates_seq_end <= Date_end & dates_seq_end >= Date_begin_23on] dates_seq_begin <- c(Date_begin_23on,lubridate::ymd_hms(break_dates) + lubridate::hours(1)) dates_seq_begin <- unique(dates_seq_begin) dates_seq_begin <- dates_seq_begin[dates_seq_begin <= Date_end & dates_seq_begin >= Date_begin_23on] URL_blocks <- vector(mode = "list", length = length(dates_seq_begin)) for (b in 1:length(dates_seq_begin)) { seq_temp <- seq(dates_seq_begin[b], dates_seq_end[b], length.out = n_blocks + 1) seq_begin_temp <- seq_temp[-length(seq_temp)] seq_begin_temp <- lubridate::round_date(seq_begin_temp, unit = "hour") seq_begin_temp <- stringr::str_replace(string = seq_begin_temp, pattern = " ", replacement = "T") seq_end_temp <- c(seq_temp[-c(1,length(seq_temp))] - lubridate::hours(1),seq_temp[length(seq_temp)]) seq_end_temp <- lubridate::round_date(seq_end_temp, unit = "hour") seq_end_temp <- stringr::str_replace(string = seq_end_temp, pattern = " ", replacement = "T") if (is.null(ID_station)) { str_sensor <- NULL } else { str_sensor <- paste0("AND idsensore in(",paste0(sapply(X = Metadata$IDSensor, function(x) paste0("'",x,"'")),collapse = ","),")") } URL_blocks[[b]] <- data.frame(seq_begin_temp,seq_end_temp) %>% dplyr::mutate(link = paste0(URLs_23on[b],"?$where=data between '", seq_begin_temp, "' and '", seq_end_temp,"'", str_sensor)) %>% dplyr::select(.data$link) } URL_blocks <- dplyr::bind_rows(URL_blocks) ### Preparing parallel computation: explicitly open multisession/multicore workers by switching plan if (parallel == TRUE) { future::plan(future::multisession, workers = 12) if (is.null(parworkers)) { parworkers <- future::availableCores()/2 } eval(parse(text = paste0("future::plan(future::",parfuturetype,", workers = ",parworkers,")"))) if (verbose == TRUE) { message("Start parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } ### Download Meteo2 <- do.call( rbind, future.apply::future_apply(X = as.matrix(URL_blocks), MARGIN = 1, FUN = function(x) { RSocrata::read.socrata(url = x, app_token = "Fk8hvoitqvADHECh3wEB26XbO") }) ) ### Ending parallel computation: explicitly close multisession/multicore workers by switching plan if (parallel == TRUE) { future::plan(future::sequential) if (verbose == TRUE) { message("Stop parallel computing: number of parallel workers = ", future::nbrOfWorkers()) } } ### Processing data if (verbose == TRUE) { cat("Processing data: started at", as.character(Sys.time()), "\n") } ### Change variable names Meteo2 <- Meteo2 %>% dplyr::select(IDSensor = .data$idsensore, Date = .data$data, Value = .data$valore, Operator = .data$idoperatore) %>% dplyr::mutate(IDSensor = as.numeric(.data$IDSensor)) ### Add metadata Meteo2 <- dplyr::right_join(Meteo2,Metadata, by = "IDSensor") ### Cleaning if (by_sensor %in% c(1,TRUE)) { Meteo2 <- Meteo2 %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::mutate(Operator = dplyr::case_when(.data$Measure == "Relative_humidity" & .data$Operator == 3 ~ 1, .data$Measure == "Relative_humidity" & .data$Operator == 2 ~ 1, .data$Measure == "Temperature" & .data$Operator == 3 ~ 1, .data$Measure == "Temperature" & .data$Operator == 2 ~ 1, TRUE ~ as.numeric(.data$Operator)), Measure = dplyr::case_when(.data$Measure == "Wind_direction" & .data$Operator == 3 ~ "Wind_direction_gust", .data$Measure == "Wind_speed" & .data$Operator == 3 ~ "Wind_speed_gust", TRUE ~ as.character(.data$Measure))) %>% dplyr::select(-c(.data$Operator)) %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,.data$IDSensor, .data$Measure,.data$Value) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) } else if (by_sensor %in% c(0,FALSE)) { Meteo2 <- Meteo2 %>% dplyr::filter(!is.na(.data$Date)) %>% dplyr::mutate(Value = as.numeric(.data$Value), Operator = dplyr::case_when(.data$Measure == "Relative_humidity" & .data$Operator == 3 ~ 1, .data$Measure == "Relative_humidity" & .data$Operator == 2 ~ 1, .data$Measure == "Temperature" & .data$Operator == 3 ~ 1, .data$Measure == "Temperature" & .data$Operator == 2 ~ 1, TRUE ~ as.numeric(.data$Operator)), Measure = dplyr::case_when(.data$Measure == "Wind_direction" & .data$Operator == 3 ~ "Wind_direction_gust", .data$Measure == "Wind_speed" & .data$Operator == 3 ~ "Wind_speed_gust", TRUE ~ as.character(.data$Measure))) %>% dplyr::select(-c(.data$IDSensor, .data$Operator)) %>% tidyr::pivot_wider(names_from = .data$Measure, values_from = .data$Value, values_fn = function(x) mean(x,na.rm=T)) %>% dplyr::mutate(dplyr::across(dplyr::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,-9999))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ dplyr::na_if(.,NaN))) } Meteo2[is.na(Meteo2)] <- NA Meteo2[is.nan_df(Meteo2)] <- NA } else { Meteo2 <- NULL } ##################################### ########## Processing data ########## ##################################### ### Append datasets Meteo <- dplyr::bind_rows(Meteo1,Meteo2) rm(Meteo1,Meteo2) # clean RAM invisible(gc()) ### Add dataset attributes attr(Meteo, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") ### Cleaning if (is.null(Var_vec) & is.null(Fns_vec)) { vv <- c("Rainfall","Temperature","Relative_humidity","Global_radiation","Water_height", "Snow_height","Wind_speed","Wind_speed_max","Wind_direction","Wind_direction_max") vv <- vv[vv %in% names(Meteo)] fv <- ifelse(vv == "Rainfall" | vv == "Snow_height", "sum", "mean") } else { vv <- Var_vec fv <- Fns_vec } # Checks if all the variables are available for the selected stations if (all(dplyr::all_of(vv) %in% names(Meteo)) == F) { stop("One ore more variables are not avaiable for the selected stations! Change the values of 'Var_vec'", call. = FALSE) } if (by_sensor %in% c(0,FALSE)) { ### Aggregating dataset if (Frequency != "10mins") { if (verbose==T) { cat("Aggregating ARPA Lombardia data: started started at", as.character(Sys.time()), "\n") } Meteo <- Meteo %>% Time_aggregate(Frequency = Frequency, Var_vec = Var_vec, Fns_vec = Fns_vec, verbose = verbose) %>% dplyr::arrange(.data$NameStation, .data$Date) } else { Meteo <- Meteo %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,vv) %>% dplyr::arrange(.data$NameStation, .data$Date) } ### Regularizing dataset: same number of timestamps for each station and variable if (verbose == TRUE) { cat("Regularizing ARPA data: started started at", as.character(Sys.time()), "\n") } Meteo <- Meteo %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) %>% tidyr::pivot_longer(cols = -c(.data$Date,.data$IDStation,.data$NameStation), names_to = "Measure", values_to = "Value") %>% dplyr::mutate(Date = case_when(Frequency %in% c("10mins","hourly") ~ as.character(format(x = .data$Date, format = "%Y-%m-%d %H:%M:%S")), TRUE ~ as.character(format(x = .data$Date, format = "%Y-%m-%d")))) %>% tidyr::pivot_wider(names_from = .data$Date, values_from = .data$Value) %>% tidyr::pivot_longer(cols = -c(.data$Measure,.data$IDStation,.data$NameStation), names_to = "Date", values_to = "Value") %>% tidyr::pivot_wider(names_from = .data$Measure, values_from = .data$Value) if (Frequency %notin% c("10mins","hourly")) { Meteo <- Meteo %>% dplyr::mutate(Date = ymd(.data$Date)) %>% dplyr::arrange(.data$IDStation,.data$Date) } else { Meteo <- Meteo %>% dplyr::mutate(Date = ymd_hms(.data$Date)) %>% dplyr::arrange(.data$IDStation,.data$Date) } freq_unit <- dplyr::case_when(Frequency == "10mins" ~ "10 min", Frequency == "hourly" ~ "hours", Frequency == "daily" ~ "days", Frequency == "weekly" ~ "weeks", Frequency == "monthly" ~ "months") structure(list(Meteo = Meteo)) attr(Meteo, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") attr(Meteo, "frequency") <- Frequency attr(Meteo, "units") <- freq_unit } else if (by_sensor %in% c(1,TRUE)) { Meteo <- Meteo %>% dplyr::arrange(.data$Date) %>% dplyr::filter(.data$Date >= Date_begin, .data$Date <= Date_end) structure(list(Meteo = Meteo)) attr(Meteo, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") attr(Meteo, "frequency") <- "10mins" attr(Meteo, "units") <- "10 min" } if (verbose == TRUE) { cat("Processing data: ended at", as.character(Sys.time()), "\n") } if (verbose == TRUE) { cat("Retrieving desired ARPA Lombardia dataset: ended at", as.character(Sys.time()), "\n") } return(Meteo) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_W_data.R
#' Download metadata (registry) on weather monitoring stations from ARPA Lombardia website #' #' @description 'get_ARPA_Lombardia_W_registry' returns the registry (list) of all the weather sensors #' and stations belonging to the ARPA Lombardia network. The information reported are: ID of each sensor and #' station, geo-location (coordinates in degrees), altitude (mt), starting date and ending date. #' The column 'NameStation' identifies the name of each station, while 'IDStation' is an ID code (assigned #' from ARPA) uniquely identifying each station. #' For more information about the municipal data visit the section 'Meteo' at the webpages: #' https://www.dati.lombardia.it/stories/s/auv9-c2sj and #' https://www.dati.lombardia.it/Ambiente/Stazioni-Meteorologiche/nf78-nj6b #' #' @return A data frame of class 'data.frame' and 'ARPALdf'. The object is fully compatible with Tidyverse. #' #' @examples #' get_ARPA_Lombardia_W_registry() #' #' #' @export get_ARPA_Lombardia_W_registry <- function() { Metadata <- W_metadata_reshape() structure(list(Metadata = Metadata)) attr(Metadata, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_W_registry.R
#' Download ARPA Lombardia zoning geometries #' #' @description 'get_ARPA_Lombardia_zoning' returns the geometries (polygonal shape file) and a map of #' the ARPA zoning of Lombardy. The zoning reflects the main orographic characteristics of the territory. #' Lombardy region is classified into seven type of areas: large urbanized areas, urbanized areas in rural #' contexts, rural areas, mountainous areas and valley bottom. #' For more information about the municipal data visit the section 'Zonizzazione ARPA Lombardia' at the webpages #' https://www.arpalombardia.it/temi-ambientali/aria/rete-di-rilevamento/classificazione-zone/ and #' https://www.arpalombardia.it/temi-ambientali/aria/mappa-della-zonizzazione/ #' #' @param plot_map Logic value (FALSE or TRUE). If plot_map = TRUE, the ARPA Lombardia zoning is represented #' on a map, if plot_mat = FALSE only the geometry (polygon shapefile) is stored in the output. #' Default is plot_map = TRUE. #' @param title Title of the plot. Deafult is 'ARPA Lombardia zoning' #' @param line_type Linetype for the zones' borders. Default is 1. #' @param line_size Size of the line for the zones. Default is 1. #' @param xlab x-axis label. Default is 'Longitude'. #' @param ylab y-axis label. Default is 'Latitude'. #' #' @return The function returns an object of class 'sf' containing the polygon borders of the seven zones used by #' ARPA Lombardia to classify the regional territory. If plot_map = 1, it also returns a map of the zoning. #' #' @examples #' zones <- get_ARPA_Lombardia_zoning(plot_map = TRUE) #' #' @export get_ARPA_Lombardia_zoning <- function(plot_map = TRUE, title = "ARPA Lombardia zoning", line_type = 1, line_size = 1, xlab = "Longitude", ylab = "Latitude") { ##### Define %notin% '%notin%' <- Negate('%in%') ##### Checks if by_sensor setup properly if (plot_map %notin% c(FALSE,TRUE)) { stop("Wrong setup for 'plot_map'. Use 1 or 0 or TRUE or FALSE.", call. = FALSE) } ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Check online availability for zoning metadata from GitHub temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk("https://github.com/PaoloMaranzano/ARPALData/raw/main/ARPA_zoning_shape.zip", temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for ARPA Lombardia zoninig (from GitHub) is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) return(invisible(NULL)) } # Dowload shape file for Lombardy municipalities temp1 <- tempfile() temp2 <- tempfile() download.file(url = "https://github.com/PaoloMaranzano/ARPALData/raw/main/ARPA_zoning_shape.zip", destfile = temp1) unzip(zipfile = temp1, exdir = temp2) your_SHP_file <- list.files(temp2, pattern = ".shp$",full.names=TRUE) # Read and reshape the shapefile Zoning <- sf::read_sf(your_SHP_file) %>% sf::st_as_sf(crs = 4326) %>% dplyr::mutate(Zone = case_when(.data$COD_ZONA == "A" ~ "Urbanized Plain", .data$COD_ZONA == "Agg_BG" ~ "Metropolitan area of Bergamo", .data$COD_ZONA == "Agg_BS" ~ "Metropolitan area of Brescia", .data$COD_ZONA == "Agg_MI" ~ "Metropolitan area of Milano", .data$COD_ZONA == "B" ~ "Rural Plain", .data$COD_ZONA == "C" ~ "Mountain", .data$COD_ZONA == "D" ~ "Valley floor")) %>% dplyr::select(Cod_Zone = .data$COD_ZONA, .data$Zone, .data$geometry) if (plot_map == 1) { geo_plot <- Zoning %>% ggplot2::ggplot() + ggplot2::geom_sf(aes(fill = .data$Zone),linetype = line_type, size = line_size) + ggplot2::labs(title = title, x = xlab, y = ylab) + ggplot2::theme_bw() + ggplot2::theme(legend.position="bottom") + ggplot2::guides(fill=guide_legend(nrow=2,byrow=TRUE)) + ggplot2::scale_x_continuous(labels = function(x) paste0(x, '\u00B0', "E")) + ggplot2::scale_y_continuous(labels = function(x) paste0(x, '\u00B0', "N")) print(geo_plot) } file.remove(your_SHP_file) attr(Zoning, "class") <- c("ARPALdf","tbl_df","tbl","data.frame","sf") return(Zoning) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_ARPA_Lombardia_zoning.R
#' Download geospatial data of Lombardy from Eurostat #' #' @description 'get_Lombardia_geospatial' returns the polygonal (shape file) object containing the geometries #' of Lombardy. Shapefile are available at different NUTS levels (https://ec.europa.eu/eurostat/web/nuts/background): #' 'LAU' for the shapefile of municipalities of Lombardy, 'NUTS3' for the shapefile of provinces of Lombardy #' and 'NUTS2' for the shapefile of Lombardy. #' #' @param NUTS_level NUTS level required: use "NUTS2" for regional geometries, "NUTS3" for provincial geometries, #' or "LAU" for municipal geometries. Default NUTS_level = "LAU". #' #' @return A data frame of class 'data.frame', "sf" and 'ARPALdf'. #' #' @examples #' \donttest{ #' shape <- get_Lombardia_geospatial(NUTS_level = "LAU") #' } #' #' @export get_Lombardia_geospatial <- function(NUTS_level = "LAU") { '%notin%' <- Negate('%in%') ##### Checks if (NUTS_level %notin% c("NUTS2","NUTS3","LAU")) { stop("Selected NUTS not available: use one of 'NUTS2', 'NUTS3' and 'LAU'",call. = FALSE) } ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Check online availability for L'ombardy's shapefile from GitHub temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk("https://github.com/PaoloMaranzano/ARPALData/raw/main/Shape_Comuni_Lombardia.zip", temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for shapefile of Lombardy (from GitHub) is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) return(invisible(NULL)) } # Dowload shape file for Lombardy municipalities temp1 <- tempfile() temp2 <- tempfile() download.file(url = "https://github.com/PaoloMaranzano/ARPALData/raw/main/Shape_Comuni_Lombardia.zip", destfile = temp1) unzip(zipfile = temp1, exdir = temp2) your_SHP_file<-list.files(temp2, pattern = ".shp$",full.names=TRUE) # Read the shapefile Lombardia <- sf::read_sf(your_SHP_file) %>% sf::st_as_sf(crs = 4326) %>% dplyr::rename(Prov_name = .data$NOME_PRO,City = .data$NOME_COM, City_code_ISTAT = .data$ISTAT) %>% dplyr::mutate(Prov_name = case_when(.data$Prov_name == "VARESE" ~ "Varese", .data$Prov_name == "COMO" ~ "Como", .data$Prov_name == "LECCO" ~ "Lecco", .data$Prov_name == "SONDRIO" ~ "Sondrio", .data$Prov_name == "BERGAMO" ~ "Bergamo", .data$Prov_name == "BRESCIA" ~ "Brescia", .data$Prov_name == "PAVIA" ~ "Pavia", .data$Prov_name == "LODI" ~ "Lodi", .data$Prov_name == "CREMONA" ~ "Cremona", .data$Prov_name == "MANTOVA" ~ "Mantova", .data$Prov_name == "MILANO" ~ "Milano", .data$Prov_name == "MONZA E DELLA BRIANZA" ~ "Monza e della Brianza")) %>% dplyr::select(.data$Prov_name, .data$City, .data$City_code_ISTAT, .data$geometry) %>% dplyr::mutate(dplyr::across(c(.data$City), toupper), dplyr::across(c(.data$City), ~ gsub("\\-", " ", .x)), dplyr::across(c(.data$City), ~ stringr::str_replace_all(.x, c("S\\."="San ","s\\."="San ", "V\\."="Via ","v\\."="Via ", " D\\`" = " D\\' ", " D\\` " = " D\\'", "D\\`" = " D\\'", "D\\'" = " D\\' "))), dplyr::across(c(.data$City), tm::removePunctuation), dplyr::across(c(.data$City), tm::removeNumbers), dplyr::across(c(.data$City), tm::stripWhitespace)) %>% dplyr::mutate(City = dplyr::recode(.data$City, "CASASCO DINTELVI" = "CASASCO INTELVI", "CERANO DINTELVI" = "CERANO INTELVI", "SAN GIORGIO BIGARELLO" = "BIGARELLO", "PUEGNAGO DEL GARDA" = "PUEGNAGO SUL GARDA", "FELONICA" = "SERMIDE E FELONICA", "GERRE DE CAPRIOLI" = "GERRE DECAPRIOLI")) %>% dplyr::mutate(dplyr::across(c(.data$City), stringr::str_to_title), dplyr::across(c(.data$City), ~ stringr::str_replace_all(.x, c(" D " = " D\\'", "Sermide E Felonica" = "Sermide e Felonica")))) # Associating NUTS codes (from Eurostat) to each observation Eurostat <- eurostat::get_eurostat_geospatial(output_class = "sf",resolution = 60, nuts_level = 3, year = 2016) Eurostat <- Eurostat %>% sf::st_as_sf(crs = 4326) %>% data.frame() %>% dplyr::filter(grepl("ITC4",.data$geo)) %>% dplyr::select(Prov_name = .data$NUTS_NAME, Prov_code_EUROSTAT = .data$NUTS_ID) %>% dplyr::mutate(Reg_name = "Lombardia", Reg_code_EUROSTAT = "ITC4") # Joining ISTAT metadata and Eurostat metadata Lombardia <- dplyr::left_join(Lombardia,Eurostat,by=c("Prov_name")) Lombardia <- Lombardia %>% dplyr::select(.data$Reg_name,.data$Reg_code_EUROSTAT,.data$Prov_name,.data$Prov_code_EUROSTAT, .data$City,.data$City_code_ISTAT,.data$geometry) if (NUTS_level == "NUTS3") { Lombardia <- Lombardia %>% dplyr::group_by(.data$Reg_name,.data$Reg_code_EUROSTAT,.data$Prov_name,.data$Prov_code_EUROSTAT) %>% dplyr::summarise(.groups = "keep") %>% dplyr::ungroup() } if (NUTS_level == "NUTS2") { Lombardia <- Lombardia %>% dplyr::group_by(.data$Reg_name,.data$Reg_code_EUROSTAT) %>% dplyr::summarise(.groups = "keep") %>% dplyr::ungroup() } file.remove(your_SHP_file) attr(Lombardia, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame","sf") return(Lombardia) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/get_Lombardia_geospatial.R
#' @keywords internal #' @noRd is.infinite_df <- function(x) { do.call(cbind, lapply(x, is.infinite)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.infinite_df.R
#' @keywords internal #' @noRd is.nan_df <- function(x) { do.call(cbind, lapply(x, is.nan)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.nan_df.R
#' @keywords internal #' @noRd is.negative <- function(x) { x < 0 }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.negative.R
#' @keywords internal #' @noRd is.negative_df <- function(x) { do.call(cbind, lapply(x, is.negative)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.negative_df.R
#' @keywords internal #' @noRd is.zero <- function(x) { x == 0 }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.zero.R
#' @keywords internal #' @noRd is.zero_df <- function(x) { do.call(cbind, lapply(x, is.zero)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is.zero_df.R
#' Check if a given object belongs to class 'ARPALdf' #' #' @description 'is_ARPALdf' checks if the input object belongs to the class 'ARPALdf' #' #' @param Data Object to check if the class of a dataframe is 'ARPALdf', i.e. ARPAL dataframe. #' #' @return The function returns 'True' if the object is of class 'ARPALdf' and it returns 'False' if the #' object isn't of class 'ARPALdf' #' #' @examples #' d <- get_ARPA_Lombardia_AQ_registry() #' is_ARPALdf(d) #' #' @export is_ARPALdf <- function(Data) { is.element("ARPALdf", attr(Data,"class")) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is_ARPALdf.R
#' Check if a given object belongs to class 'ARPALdf_AQ' #' #' @description 'is_ARPALdf_AQ' checks if the input object belongs to the class 'ARPALdf_AQ' #' #' @param Data Object to check if the class of a dataframe is 'ARPALdf_AQ', i.e. ARPAL #' dataframe for air quality data. #' #' @return The function returns 'True' if the object is of class 'ARPALdf_AQ' and it returns 'False' if the #' object isn't of class 'ARPALdf_AQ' #' #' @examples #' d <- get_ARPA_Lombardia_AQ_registry() #' is_ARPALdf_AQ(d) #' #' @export is_ARPALdf_AQ <- function(Data) { is.element("ARPALdf_AQ", attr(Data,"class")) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is_ARPALdf_AQ.R
#' Check if a given object belongs to class 'ARPALdf_AQ_mun' #' #' @description 'is_ARPALdf_AQ_mun' checks if the input object belongs to the class 'ARPALdf_AQ_mun' #' #' @param Data Object to check if the class of a dataframe is 'ARPALdf_AQ_mun', i.e. ARPAL #' dataframe for air quality data at municipal level (See 'get_ARPA_Lombardia_AQ_municipal_data'. #' command). #' #' @return The function returns 'True' if the object is of class 'ARPALdf_AQ_mun' and it returns #' 'False' if the object isn't of class 'ARPALdf_AQ_mun' #' #' @examples #' d <- get_ARPA_Lombardia_AQ_registry() #' is_ARPALdf_AQ_mun(d) #' #' @export is_ARPALdf_AQ_mun <- function(Data) { is.element("ARPALdf_AQ_mun", attr(Data,"class")) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is_ARPALdf_AQ_mun.R
#' Check if a given object belongs to class 'ARPALdf_W' #' #' @description 'is_ARPALdf_W' checks if the input object belongs to the class 'ARPALdf_W' #' #' @param Data Object to check if the class of a dataframe is 'ARPALdf_W', i.e. ARPAL #' dataframe for weather data. #' #' @return The function returns 'True' if the object is of class 'ARPALdf_W' and it returns 'False' if the #' object isn't of class 'ARPALdf_W' #' #' @examples #' d <- get_ARPA_Lombardia_W_registry() #' is_ARPALdf_W(d) #' #' @export is_ARPALdf_W <- function(Data) { is.element("ARPALdf_W", attr(Data,"class")) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/is_ARPALdf_W.R
#' @keywords internal #' @noRd # Identifies the values of the first (minimum) k values in a vector x k_min_vals_vec <- function(x,k) { idx <- sort.int(x, index.return = T)$ix[1:k] x[idx] }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/k_min_vals_vec.R
#' @keywords internal #' @noRd # Identifies the indices of the first (minimum) k values in a vector x k_min_vals_vec_idx <- function(x,k) { sort.int(x, index.return = T)$ix[1:k] }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/k_min_vals_vec_idx.R
#' Generate a map of the selected stations #' #' @description 'get_ARPA_Lombardia_AQ_data' represents on a map (geometries/polygon of Lombardy) the location of #' the stations contained in a data frame of class 'ARPALdf'. Data can be either a ARPALdf of observed data #' (from 'get_ARPA_Lombardia_xxx' commands) and an ARPALdf obtained as registry #' (from 'get_ARPA_Lombardia_xxx_registry' command). #' #' @param data Dataset of class 'ARPALdf' containing the stations to plot on the map. It can be either a #' ARPALdf of observed data (from 'get_ARPA_Lombardia_xxx' commands) and an ARPALdf obtained as registry #' (from 'get_ARPA_Lombardia_xxx_registry' command). #' @param title Title of the plot. Deafult is 'Map of ARPA stations in Lombardy' #' @param prov_line_type Linetype for Lombardy provinces. Default is 1. #' @param prov_line_size Size of the line for Lombardy provinces. Default is 1. #' @param col_points Color of the points. Default is 'blue'. #' @param xlab x-axis label. Default is 'Longitude'. #' @param ylab y-axis label. Default is 'Latitude'. #' #' @return A map of selected stations across the Lombardy region #' #' @examples #' \donttest{ #' ## Map network from a dataset of measurements #' if (require("RSocrata")) { #' # Download daily concentrations observed at all the stations in 2020. #' d <- get_ARPA_Lombardia_AQ_data(ID_station = NULL, Date_begin = "2020-01-01", #' Date_end = "2020-12-31", Frequency = "daily") #' # Map the stations included in 'd' #' map_Lombardia_stations(data = d, title = "Air quality stations in Lombardy") #' } #' ## Map network from a registry dataset #' if (require("RSocrata")) { #' # Download registry for all the AQ stations in 2020. #' r <- get_ARPA_Lombardia_AQ_registry() #' # Map the stations included in 'r' #' map_Lombardia_stations(data = r, title = "Air quality stations in Lombardy") #' } #' } #' #' @export map_Lombardia_stations <- function(data, title = "Map of ARPA stations in Lombardy", prov_line_type = 1, prov_line_size = 1, col_points = "blue", xlab = "Longitude", ylab = "Latitude") { ##### Check for internet connection if(!curl::has_internet()) { message("Internet connection not available at the moment.\nPlease check your internet connection. If the problem persists, please contact the package maintainer.") return(invisible(NULL)) } ##### Retrieve shapefile from Eurostat Lombardia <- get_Lombardia_geospatial(NUTS_level = "NUTS3") if (is.null(Lombardia)) { message("The map will not include the ground layer with Lombardy's shapefile. Only points/coordinates will be plot.") } ##### Data manipulation if(is_ARPALdf_AQ(Data = data) == T) { Stats <- get_ARPA_Lombardia_AQ_registry() } else if (is_ARPALdf_W(Data = data) == T) { Stats <- get_ARPA_Lombardia_W_registry() } data <- data.frame(IDStation = unique(data$IDStation)) d <- dplyr::left_join(data,Stats,by="IDStation") d <- d %>% sf::st_as_sf(coords = c("Longitude", "Latitude"),crs = 4326) ##### Mapping geo_plot <- Lombardia %>% ggplot2::ggplot() + ggplot2::geom_sf(linetype = prov_line_type, size = prov_line_size) + ggplot2::geom_sf(data = d, col=col_points) + ggplot2::labs(title = title) + ggplot2::theme_bw() + ggplot2::scale_x_continuous(labels = function(x) paste0(x, '\u00B0', "E")) + ggplot2::scale_y_continuous(labels = function(x) paste0(x, '\u00B0', "N")) print(geo_plot) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/map_Lombardia_stations.R
#' @keywords internal #' @noRd mom_kurt <- function(x, na.rm = FALSE) { if (is.matrix(x)) { apply(x, 2, FUN = function(x) { if (na.rm) { x <- x[!is.na(x)] } (sum((x - mean(x))^4)/n)/(sum((x - mean(x))^2)/n)^(4/2) }) } else if (is.vector(x)) { if (na.rm) { x <- x[!is.na(x)] } n <- length(x) (sum((x - mean(x))^4)/n)/(sum((x - mean(x))^2)/n)^(4/2) } else if (is.data.frame(x)) { sapply(X = x, FUN = function(x) { if (na.rm) { x <- x[!is.na(x)] } (sum((x - mean(x))^4)/n)/(sum((x - mean(x))^2)/n)^(4/2) }) } }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/mom_kurt.R
#' @keywords internal #' @noRd mom_skew <- function(x, na.rm = FALSE) { if (is.matrix(x)) { apply(x, 2, FUN = function(x) { if (na.rm) { x <- x[!is.na(x)] } (sum((x - mean(x))^3)/n)/(sum((x - mean(x))^2)/n)^(3/2) }) } else if (is.vector(x)) { if (na.rm) { x <- x[!is.na(x)] } n <- length(x) (sum((x - mean(x))^3)/n)/(sum((x - mean(x))^2)/n)^(3/2) } else if (is.data.frame(x)) { sapply(X = x, FUN = function(x) { if (na.rm) { x <- x[!is.na(x)] } (sum((x - mean(x))^3)/n)/(sum((x - mean(x))^2)/n)^(3/2) }) } }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/mom_skew.R
#' @keywords internal #' @noRd quantilep <- function(x, perc_str) { p <- as.numeric(sub(".*q", "",perc_str))/100 p_names <- purrr::map_chr(p, ~ paste0("q",.x*100)) p_funs <- purrr::map(p, ~ purrr::partial(stats::quantile, probs = .x, na.rm = TRUE)) %>% purrr::set_names(nm = p_names) return(p_funs) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/quantilep.R
#' Identifies the K-nearest-neighbors (stations) to all the monitoring sites included in a given ARPALdf #' registry data.frame. The neighbors are identified computing the Euclidean distance among the sites' #' coordinates. #' #' @description For each element included in reg_X, it identifies the k-nearest neighbors locations #' (among those included in reg_Y) according to an Euclidean distance metric. reg_X and reg_Y #' must be two 'ARPALdf' objects obtained using get_ARPA_Lombardia_xxx_registry'. #' #' @param reg_X Dataset of class 'ARPALdf' containing the stations list obtained as registry #' (from 'get_ARPA_Lombardia_xxx_registry' command). The object must contain the following #' colums: 'IDStation','NameStation','Longitude' and 'Latitude'. #' @param reg_Y Dataset of class 'ARPALdf' containing the stations list obtained as registry #' (from 'get_ARPA_Lombardia_xxx_registry' command). The object must contain the following #' colums: 'IDStation','NameStation','Longitude' and 'Latitude'. #' @param k Integer value. Represents the number of neighbors the user wants to identify. #' #' @return A data.frame object having the same length of reg_X. For each row (stations in reg_X) #' it contains the name and the IDStation code for the k-nearest neighbors. #' #' @examples #' if (require("tidyverse")) { #' regAQ <- get_ARPA_Lombardia_AQ_registry() #' regAQ <- regAQ %>% filter(Pollutant %in% c("PM10","Ammonia")) #' regW <- get_ARPA_Lombardia_W_registry() #' registry_KNN_dist(regAQ,regW,k=2) #' } #' #' @export registry_KNN_dist <- function(reg_X,reg_Y,k=1){ # For each element included in reg_X, it dentifies the k-nearest neighbours (among those included in reg_Y) # according to an Euclidean distance metric reg_X <- reg_X %>% dplyr::distinct(.data$IDStation,.data$NameStation,.data$Longitude,.data$Latitude) %>% sf::st_as_sf(coords = c("Longitude", "Latitude"), crs=4326) %>% dplyr::arrange(.data$IDStation) reg_Y <- reg_Y %>% dplyr::distinct(.data$IDStation,.data$NameStation,.data$Longitude,.data$Latitude) %>% sf::st_as_sf(coords = c("Longitude", "Latitude"), crs=4326) %>% dplyr::arrange(.data$IDStation) ### Computing distance dist_matrix <- sf::st_distance(reg_X,reg_Y) ### Identifying the k-NN IDStation of reg_Y min_dist_idx <- apply(dist_matrix,MARGIN = 1, FUN = k_min_vals_vec_idx, k=k) min_dist <- apply(dist_matrix,MARGIN = 1, FUN = k_min_vals_vec, k=k) if (k == 1) { min_dist_idx <- t(as.matrix(min_dist_idx)) min_dist <- t(as.matrix(min_dist)) } ### Extracting for each k the corresponding ID and NameStation of reg_Y knn_list <- list(length = k) for (j in 1:k) { knn_prog <- data.frame(cbind(reg_Y[min_dist_idx[j,],]$NameStation, reg_Y[min_dist_idx[j,],]$IDStation, round(min_dist[j,],2))) colnames(knn_prog) <- c(paste0("reg_Y_nn",j,"_name"), paste0("reg_Y_nn",j,"_ID"), paste0("reg_Y_nn",j,"_dist")) knn_list[[j]] <- knn_prog } ### Selecting columns from reg_X reg_X_name <- reg_X %>% dplyr::select(reg_X_name = .data$NameStation, reg_X_ID = .data$IDStation) ### Combining dataframes output_tab <- dplyr::bind_cols(reg_X_name,knn_list) return(list(output_tab)) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/registry_KNN_dist.R
#' @keywords internal #' @noRd url_dataset_year <- function(Stat_type, Year) { url <- switch(Stat_type, ##### Weather monitoring network W = dplyr::case_when(Year %in% 2023:2024 ~ "https://www.dati.lombardia.it/resource/647i-nhxk.json", Year == 2022 ~ "https://www.dati.lombardia.it/download/mvvc-nmzv/application%2Fzip", Year == 2021 ~ "https://www.dati.lombardia.it/download/49n9-866s/application%2Fzip", Year == 2020 ~ "https://www.dati.lombardia.it/download/erjn-istm/application%2Fzip", Year == 2019 ~ "https://www.dati.lombardia.it/download/wrhf-6ztd/application%2Fzip", Year == 2018 ~ "https://www.dati.lombardia.it/download/sfbe-yqe8/application%2Fzip", Year == 2017 ~ "https://www.dati.lombardia.it/download/vx6g-atiu/application%2Fzip", Year == 2016 ~ "https://www.dati.lombardia.it/download/kgxu-frcw/application%2Fzip", Year == 2015 ~ "https://www.dati.lombardia.it/download/knr4-9ujq/application%2Fzip", Year == 2014 ~ "https://www.dati.lombardia.it/download/fn7i-6whe/application%2Fzip", Year == 2013 ~ "https://www.dati.lombardia.it/download/76wm-spny/application%2Fzip", Year %in% 2011:2012 ~ "https://www.dati.lombardia.it/download/srpn-ykcs/application%2Fzip", Year %in% 2009:2010 ~ "https://www.dati.lombardia.it/download/9nu5-ed8s/application%2Fzip", Year %in% 2006:2008 ~ "https://www.dati.lombardia.it/download/6udq-c5ub/application%2Fzip", Year %in% 2001:2005 ~ "https://www.dati.lombardia.it/download/stys-ktts/application%2Fzip", Year %in% 1989:2000 ~ "https://www.dati.lombardia.it/download/tj2h-b7vd/application%2Fzip"), ##### Web pages of the weather data W_check = dplyr::case_when(Year %in% 2023:2024 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo/647i-nhxk", Year == 2022 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2022/mvvc-nmzv", Year == 2021 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2021/49n9-866s", Year == 2020 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2020/erjn-istm", Year == 2019 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2019/wrhf-6ztd", Year == 2018 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2018/sfbe-yqe8", Year == 2017 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2017/vx6g-atiu", Year == 2016 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2016/kgxu-frcw", Year == 2015 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2015/knr4-9ujq", Year == 2014 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2014/fn7i-6whe", Year == 2013 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2013/76wm-spny", Year %in% 2011:2012 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2011-2012/srpn-ykcs", Year %in% 2009:2010 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2009-2010/9nu5-ed8s", Year %in% 2006:2008 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2006-2008/6udq-c5ub", Year %in% 2001:2005 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-2001-2005/stys-ktts", Year %in% 1989:2000 ~ "https://www.dati.lombardia.it/Ambiente/Dati-sensori-meteo-1989-2000/tj2h-b7vd"), ##### Air quality from ground network AQ = dplyr::case_when(Year %in% 2023:2024 ~ "https://www.dati.lombardia.it/resource/nicp-bhqi.json", Year %in% 2018:2022 ~ "https://www.dati.lombardia.it/resource/g2hp-ar79.json", Year %in% 2010:2017 ~ "https://www.dati.lombardia.it/resource/nr8w-tj77.json", Year %in% 2000:2009 ~ "https://www.dati.lombardia.it/resource/cthp-zqrr.json", Year %in% 1968:1999 ~ "https://www.dati.lombardia.it/resource/evzn-32bs.json"), ##### Municipal data AQ_municipal = dplyr::case_when(Year %in% 2023:2024 ~ "https://www.dati.lombardia.it/resource/ysm5-jwrn.json", Year == 2022 ~ "https://www.dati.lombardia.it/resource/fqaz-7ste.json", Year == 2021 ~ "https://www.dati.lombardia.it/download/56c9-hxta/application%2Fzip", Year == 2020 ~ "https://www.dati.lombardia.it/download/ej5v-5krk/application%2Fzip", Year == 2019 ~ "https://www.dati.lombardia.it/download/dupr-g65c/application%2Fzip", Year == 2018 ~ "https://www.dati.lombardia.it/download/v75z-59qh/application%2Fzip", Year == 2017 ~ "https://www.dati.lombardia.it/download/a7tn-gnv9/application%2Fzip", Year %in% 2011:2016 ~ "https://www.dati.lombardia.it/download/yjvq-g3tp/application%2Fzip"), ##### Web pages of the municipal data AQ_municipal_check = dplyr::case_when(Year %in% 2023:2024 ~ "https://www.dati.lombardia.it/Ambiente/Dati-stime-comunali/ysm5-jwrn", Year == 2022 ~ "https://www.dati.lombardia.it/Ambiente/Dati-stime-comunali-2022/fqaz-7ste", Year == 2021 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2021/56c9-hxta", Year == 2020 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2020/ej5v-5krk", Year == 2019 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2019/dupr-g65c", Year == 2018 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2018/v75z-59qh", Year == 2017 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2017/a7tn-gnv9", Year %in% 2011:2016 ~ "https://www.dati.lombardia.it/Ambiente/Dati-Stime-Comunali-2011-2016/yjvq-g3tp") ) return(url) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/url_dataset_year.R
#' ARPobservation #' #' Tools for simulating different methods of observing alternating renewal processes #' #' \pkg{ARPobservation} provides a set of tools for simulating data based on direct observation of behavior. #' It works by first simulating a behavior stream based on an alternating renewal process, using #' specified distributions of event durations and interim times. Different procedures for recording data #' can then be applied to the simulated behavior stream. #' #' The main function for simulating a behavior stream is \code{\link{r_behavior_stream}}. Currently, the event #' duration and interim time distributions must come from the class \code{\link{eq_dist}}. (See the documentation #' for this class for distributions that are currently implemented.) #' #' Several different observation recording procedures can then be applied as filters to a simulated behavior stream. #' The following procedures are currently implemented: #' \itemize{ #' \item \code{\link{continuous_duration_recording}} #' \item \code{\link{momentary_time_recording}} #' \item \code{\link{event_counting}} #' \item \code{\link{interval_recording}} #' } #' To apply multiple procedures to the same behavior stream, use \code{\link{reported_observations}}. Data can also #' be simulated using the convenience functions \code{\link{r_PIR}}, \code{\link{r_WIR}}, \code{\link{r_MTS}}, #' \code{\link{r_continuous_recording}}, and \code{\link{r_event_counting}}. These functions wrap the #' behavior-stream generation step and the observation recording step into a single function. They are more #' memory efficient, but slightly less computationally efficient, than executing each step in turn. #' #' #' #' @author James E. Pustejovsky <jepusto@@gmail.com> #' #' @name ARPobservation #' @docType package #' @aliases ARPobservation-package NULL
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/ARPobservation-package.r
## generate behavior streams #### #' @title An interactive alternating renewal process simulator #' #' @description An interactive tool that simulates single-case designs with #' outcomes measured by systematic direct observation. The behavioral outcomes #' are generated from the alternating renewal process model. Both event #' behaviors and state behaviors are supported. Event behaviors are generated #' from a renewal process with gamma-distributed inter-event times. State #' behaviors are generated from an alternating poisson process, in which the #' event durations and interim times are exponentially distributed. Currently, #' multiple baseline and treatment reversal designs are supported. #' #' @param launch_browser Logical value indicating whether to run the tool using #' the system's default web browser. Defaults to \code{TRUE}. #' @export #' ARPsimulator <- function(launch_browser = TRUE) { pkgs <- c("shiny","markdown","dplyr","tidyr","ggplot2","viridis") loaded_pkgs <- sapply(pkgs, requireNamespace, quietly = TRUE) if (any(!loaded_pkgs)) { msg <- paste0("The simulator requires the following packages to work: ", paste(pkgs[!loaded_pkgs], collapse = ", "), ". Please install them and then try again.") stop(msg, call. = FALSE) } appDir <- system.file("shiny-examples", "ARPsimulator", package = "ARPobservation") if (appDir == "") { stop("Could not find the application directory. Try re-installing ARPobservation.", call. = FALSE) } shiny::runApp(appDir, launch.browser = launch_browser, display.mode = "normal") }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/ARPsimulator.R
#--------------------------- # transformation functions #--------------------------- # simple transform functions logit <- function(x) log(x) - log(1 - x) expit <- function(x) 1 / (1 + exp(-x)) # transform parameters from logit-phi, log-zeta to different scales param_trans <- function(param, transform) { names(param) <- NULL switch(transform, none = c(logit_phi = param[1], log_zeta = param[2]), exp = c(phi = expit(param[1]), zeta = exp(param[2])), means = c(mu = expit(param[1]) / exp(param[2]), lambda = (1 - expit(param[1])) / exp(param[2])), logmeans = c(log_mu = -log(1 + exp(-param[1])) - param[2], log_lambda = -log(exp(param[1]) + 1) - param[2])) } #--------------------------------------- # conditional probability functions #--------------------------------------- p_0 <- function(x,phi,zeta) phi * (1 - exp(-1 * x * zeta / (phi * (1- phi)))) p_1 <- function(x,phi,zeta) phi + (1 - phi) * exp(-1 * x * zeta / (phi * (1- phi))) #--------------------------------------------------- # penalty functions #--------------------------------------------------- # Note that priors are on (phi, 1 / zeta) when const = 1 # For inverse-gamma prior on zeta, use const = -1 # For priors on (mu, lambda), use const = 2 Beta_Gamma <- function(k_mu, k_lambda, theta_mu, theta_lambda, const = 1) { function(param, c, coding) { if (coding == "WIR") param[1] <- -param[1] -(k_mu - 1) * log(1 + exp(-param[1])) - (k_lambda - 1) * log(exp(param[1]) + 1) - (k_mu + k_lambda - const) * param[2] - (expit(param[1]) / theta_mu + (1 - expit(param[1])) / theta_lambda) * exp(-param[2]) / c } } # Normal-normal priors on log(mu), log(lambda) Norm_mu_lambda <- function(g_mu, g_lambda, sigma_mu, sigma_lambda) function(param, c, coding) { if (coding == "WIR") param[1] <- -param[1] ((-log(1 + exp(-param[1])) - param[2] - g_mu - log(c))^2 / sigma_mu^2 + (-log(exp(param[1]) + 1) - param[2] - g_lambda - log(c))^2 / sigma_lambda^2) / 2 } # Normal-normal priors on logit(phi), log(zeta) Norm_phi_zeta <- function(g_phi, sigma_phi, g_zeta, sigma_zeta) function(param, c, coding) { if (coding == "WIR") param[1] <- -param[1] ((param[1] - g_phi)^2 / sigma_phi^2 + (param[2] - g_zeta - log(c))^2 / sigma_zeta^2) / 2 } #--------------------------------------- # log-likelihood function for MTS #--------------------------------------- MTS_loglik <- function(param, Tmat, c) { phi <- expit(param[1]) zeta <- exp(param[2]) p0 <- p_0(c, phi, zeta) p1 <- p_1(c, phi, zeta) Tmat[1,1] * log(1 - p0) + Tmat[1,2] * log(p0) + Tmat[2,1] * log(1 - p1) + Tmat[2,2] * log(p1) } # penalized log-likelihood for MTS MTS_loglik_pen <- function(param, Tmat, c, penalty_func) MTS_loglik(param, Tmat, c) + penalty_func(param, c, coding = "MTS") #---------------------------------------------- # maximum likelihood estimation for MTS, # with optional penalty function #---------------------------------------------- # transition matrix Transitions <- function(X) { N <- length(X) crossprod(cbind(1 - X[1:(N-1)], X[1:(N-1)]), cbind(1 - X[2:N], X[2:N])) } # Brown, Solomon, & Stephens (1977) estimator BSSest <- function(Tmat, c) { if (any(rowSums(Tmat) == 0)) return(c(NA, NA)) P01 <- Tmat[1,2] / sum(Tmat[1,]) P10 <- Tmat[2,1] / sum(Tmat[2,]) S <- P01 + P10 if (S >= 1) { est <- c(logit(sum(Tmat[,2]) / sum(Tmat)), NA) } else { est <- c(logit(P01 / S), log(P01) + log(P10) + log(-log(1 - S)) - log(c) - 2 * log(S)) } return(est) } # penalized maximum likelihood estimates for a single response string MTSmle <- function(X, c, penalty_func = NULL, phi_start = pmin(pmax(mean(X), 1 / length(X)), 1 - 1 / length(X)), zeta_start = .10, transform = "none") { if (sum(is.na(X)) > 0) return(c(phi = NA, zeta = NA)) N <- length(X) # transition matrix Tmat <- crossprod(cbind(1 - X[1:(N-1)], X[1:(N-1)]), cbind(1 - X[2:N], X[2:N])) if (is.null(penalty_func)) { est <- BSSest(Tmat, c) } else { objective <- function(par) MTS_loglik_pen(par, Tmat = Tmat, c = c, penalty_func) results <- stats::optim(par = c(logit(phi_start), log(zeta_start)), fn = objective, control = list(fnscale = -1)) est <- results$par } param_trans(est, transform = transform) } #---------------------------------------------- # bootstrapping for MTS #---------------------------------------------- MTSbootstrap <- function(X, c, penalty_func = NULL, phi_start = pmin(pmax(mean(X), 1 / length(X)), 1 - 1 / length(X)), zeta_start = .10, transform = "none", iterations = 2000, p = .05, seed = NULL) { if (!is.null(seed)) set.seed(seed) est <- MTSmle(X = X, c = c, penalty_func = penalty_func, phi_start = phi_start, zeta_start = zeta_start, transform = "none") mu_lambda <- param_trans(est, transform = "means") rep_dat <- r_MTS(n = iterations, mu = mu_lambda[1], lambda = mu_lambda[2], stream_length = c * length(X), F_event = F_exp(), F_interim = F_exp(), interval_length = c) rep_ests <- t(apply(rep_dat, MARGIN = 1, FUN = MTSmle, c = c, penalty_func = penalty_func, phi_start = phi_start, zeta_start = zeta_start, transform = transform)) ests <- param_trans(est, transform = transform) std_devs <- apply(rep_ests, 2, stats::sd) CIs <- apply(rep_ests, 2, stats::quantile, probs = c(p/2, 1 - p/2)) data.frame(parm = names(ests), est = ests, sd = std_devs, CI_L = CIs[1,], CI_U = CIs[2,], row.names = NULL) } #------------------------------------------------------ # Expected information for MTS #------------------------------------------------------ MTS_info <- function(phi, zeta, c) { rho <- zeta / (phi * (1 - phi)) erc <- exp(-rho * c) p0 <- p_0(c, phi, zeta) p1 <- p_1(c, phi, zeta) eq_pi <- rep(c(1 - phi, phi), each = 2) / c(1 - p0, p0, 1 - p1, p1) dpi_dphi <- c(erc - 1, 1 - erc, erc - 1, 1 - erc) dpi_drho <- c(-phi * c * erc, phi * c * erc, (1 - phi) * c * erc, -(1 - phi) * c * erc) info <- rbind(dpi_dphi, dpi_drho) %*% (eq_pi * cbind(dpi_dphi, dpi_drho)) rownames(info) <- colnames(info) <- c("phi","rho") info } #--------------------------------------- # log-likelihood function for PIR #--------------------------------------- # calculate conditional probabilities PIRpsi <- function(phi, zeta, U, c, d) { K <- length(U) p1_cd <- p_1(c + d, phi, zeta) p0_cd <- p_0(c + d, phi, zeta) p0_d <- p_0(d, phi, zeta) e_neg_lambda <- exp(-1 * zeta * c / (1 - phi)) psi <- vector(mode = "numeric", length = K) psi[1] <- phi for(i in 1:(K - 1)) { psi_lag <- psi[i] u <- U[i] psi[i + 1] <- ((psi_lag * p1_cd + (1 - psi_lag) * (p0_cd - p0_d * e_neg_lambda)) / (1 - (1-psi_lag) * e_neg_lambda))^u * p0_d^(1-u) } return(psi) } #' @title Calculate log-likelihood #' #' @description Calculates the log-likelihood of within-session PIR data #' #' @param param vector of parameter values logit(prevalence), log(incidence) #' @param U a vector containing interval-level PIR data #' @param c the length of the active interval #' @param d the length of the recording interval #' #' The vector \code{U} should only contain values of 1 or 0. #' \code{c} must be a positive value. \code{d} must be some non-negative value. Setting #' \code{d} equal to zero represents a PIR observation where no time was set aside for recording. #' #' @return The value of the log-likelihood #' #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export # log-likelihood for PIR PIR_loglik <- function(param, U, c, d) { phi <- expit(param[1]) zeta <- exp(param[2]) psi <- PIRpsi(phi, zeta, U, c, d) loglik <- sum(U * log(1 - (1 - psi) * exp(-1 * zeta * c / (1 - phi))) + (1 - U) * (log(1 - psi) - zeta * c / (1 - phi))) return(loglik) } # penalized log-likelihood for PIR PIR_loglik_pen <- function(param, U, c, d, penalty_func, coding) PIR_loglik(param, U, c, d) + penalty_func(param, c, coding) #---------------------------------------------- # maximum likelihood estimation for PIR, # with optional penalty function #---------------------------------------------- # maximum likelihood estimates for a single response string PIRmle <- function(U, c, d, coding = "PIR", penalty_func = NULL, phi_start = max(mean(U) / 2, expit(-10)), zeta_start = .10, transform = "none") { if(sum(is.na(U)) > 0) return(c(phi = NA, zeta = NA)) if(coding == "WIR") { U <- 1 - U phi_start <- 1 - phi_start } if (is.null(penalty_func)) { objective <- function(par) PIR_loglik(par, U = U, c = c, d = d) } else { objective <- function(par) { PIR_loglik_pen(par, U = U, c = c, d = d, penalty_func, coding = coding) } } results <- stats::optim(par = c(logit(phi_start), log(zeta_start)), fn = objective, control = list(fnscale = -1)) if (coding == "WIR") results$par[1] <- -results$par[1] param_trans(results$par, transform = transform) } #---------------------------------------------- # bootstrapping for PIR #---------------------------------------------- PIRbootstrap <- function(U, c, d, coding = "PIR", penalty_func = NULL, phi_start = max(mean(U) / 2, expit(-10)), zeta_start = .10, transform = "none", iterations = 2000, p = .05, seed = NULL) { if (!is.null(seed)) set.seed(seed) est <- PIRmle(U = U, c = c, d = d, coding = coding, penalty_func = penalty_func, phi_start = phi_start, zeta_start = zeta_start, transform = "none") mu_lambda <- param_trans(est, transform = "means") r_gen <- if (coding == "PIR") r_PIR else r_WIR rep_dat <- r_gen(n = iterations, mu = mu_lambda[1], lambda = mu_lambda[2], stream_length = (c + d) * length(U), F_event = F_exp(), F_interim = F_exp(), interval_length = c + d, rest_length = d) rep_ests <- t(apply(rep_dat, MARGIN = 1, FUN = PIRmle, c = c, d = d, coding = coding, penalty_func = penalty_func, phi_start = phi_start, zeta_start = zeta_start, transform = transform)) ests <- param_trans(est, transform = transform) std_devs <- apply(rep_ests, 2, stats::sd) CIs <- apply(rep_ests, 2, stats::quantile, probs = c(p/2, 1 - p/2)) data.frame(parm = names(ests), est = ests, sd = std_devs, CI_L = CIs[1,], CI_U = CIs[2,], row.names = NULL) } #--------------------------------------- # log-likelihood function for AIR #--------------------------------------- # transition probabilities for AIR data AIR_pi <- function(phi, zeta, c, d) { p0_d <- p_0(d, phi, zeta) p0_cd <- p_0(c + d, phi, zeta) exp_lambda <- exp(- zeta * c / (1 - phi)) p1_d <- p_1(d, phi, zeta) p1_cd <- p_1(c + d, phi, zeta) exp_mu <- exp(- zeta * c / phi) c(pi_0000 = (1 - p0_d) * exp_lambda, pi_1000 = NA, pi_0100 = p0_d * exp_lambda, pi_1100 = NA, pi_0010 = 1 - p0_cd - (1 - p0_d) * exp_lambda, pi_1010 = 1 - p1_cd - (1 - p1_d) * exp_mu, pi_0110 = p0_cd - p0_d * exp_lambda, pi_1110 = p1_cd - p1_d * exp_mu, pi_0001 = NA, pi_1001 = NA, pi_0101 = NA, pi_1101 = NA, pi_0011 = NA, pi_1011 = (1 - p1_d) * exp_mu, pi_0111 = NA, pi_1111 = p1_d * exp_mu) } # log-likelihood for AIR AIR_loglik <- function(param, Tmat, c, d) { phi <- expit(param[1]) zeta <- exp(param[2]) trans_prob <- AIR_pi(phi, zeta, c, d) loglik <- sum(log(trans_prob) * Tmat, na.rm = TRUE) return(loglik) } # penalized log-likelihood for AIR AIR_loglik_pen <- function(param, Tmat, c, d, penalty_func) AIR_loglik(param, Tmat, c, d) + penalty_func(param, c, coding = "AIR") #---------------------------------------------- # maximum likelihood estimation for AIR, # with optional penalty function #---------------------------------------------- # maximum likelihood estimates for a single response string AIR_Tmat <- function(XUW) { K <- nrow(XUW) - 1 MTS_lag <- factor(XUW[1:K, 1], levels = 0:1) table(MTS_lag, factor(XUW[-1,1], levels = 0:1), factor(XUW[-1,2], levels = 0:1), factor(XUW[-1,3], levels = 0:1)) } AIRmle <- function(XUW, c, d, penalty_func = NULL, phi_start = min(max(mean(XUW[,1]), expit(-10)), 1 - expit(-10)), zeta_start = .10, transform = "none") { if (sum(is.na(XUW[-1,])) + is.na(XUW[1,1]) > 0) return(c(phi = NA, zeta = NA)) Tmat <- AIR_Tmat(XUW) if (is.null(penalty_func)) { objective <- function(par) AIR_loglik(par, Tmat = Tmat, c = c, d = d) } else { objective <- function(par) { AIR_loglik_pen(par, Tmat = Tmat, c = c, d = d, penalty_func) } } results <- stats::optim(par = c(logit(phi_start), log(zeta_start)), fn = objective, control = list(fnscale = -1)) param_trans(results$par, transform = transform) } #------------------------------------------------------ # Expected information for AIR #------------------------------------------------------ AIR_dpi <- function(phi, zeta, c, d) { rho <- zeta / (phi * (1 - phi)) p0_d <- p_0(d, phi, zeta) p0_cd <- p_0(c + d, phi, zeta) exp_lambda <- exp(- rho * phi * c) p1_d <- p_1(d, phi, zeta) p1_cd <- p_1(c + d, phi, zeta) exp_mu <- exp(- rho * (1 - phi) * c) Tpi <- AIR_pi(phi, zeta, c, d) exp_rd <- exp(-rho * d) exp_rcd <- exp(-rho * (c + d)) dphi <- c(pi_0000 = -rho * c * Tpi[[1]] - exp_lambda * p0_d / phi, pi_1000 = NA, pi_0100 = -rho * c * Tpi[[3]] + exp_lambda * p0_d / phi, pi_1100 = NA, pi_0010 = -p0_cd / phi + rho * c * Tpi[[1]] + exp_lambda * p0_d / phi, pi_1010 = -p0_cd / phi - rho * c * Tpi[[14]] + exp_mu * p0_d / phi, pi_0110 = p0_cd / phi + rho * c * Tpi[[3]] - exp_lambda * p0_d / phi, pi_1110 = p0_cd / phi - rho * c * Tpi[[16]] - exp_mu * p0_d / phi, pi_0001 = NA, pi_1001 = NA, pi_0101 = NA, pi_1101 = NA, pi_0011 = NA, pi_1011 = rho * c * Tpi[[14]] - exp_mu * p0_d / phi, pi_0111 = NA, pi_1111 = rho * c * Tpi[[16]] + exp_mu * p0_d / phi) drho <- c(pi_0000 = -phi * (c * Tpi[[1]] + d * exp_rd * exp_lambda), pi_1000 = NA, pi_0100 = -phi * (c * Tpi[[3]] + d * exp_rd * exp_lambda), pi_1100 = NA, pi_0010 = phi * (-(c + d) * exp_rcd + c * Tpi[[1]] + d * exp_rd * exp_lambda), pi_1010 = (1 - phi) * ((c + d) * exp_rcd + c * Tpi[[14]] - d * exp_rd * exp_mu), pi_0110 = phi * ((c + d) * exp_rcd + c * Tpi[[3]] - d * exp_rd * exp_lambda), pi_1110 = (1 - phi) * (-(c + d) * exp_rcd + c * Tpi[[16]] + d *exp_rd * exp_mu), pi_0001 = NA, pi_1001 = NA, pi_0101 = NA, pi_1101 = NA, pi_0011 = NA, pi_1011 = -(1 - phi) * (c * Tpi[[14]] - d * exp_rd * exp_mu), pi_0111 = NA, pi_1111 = (1 - phi) * (c * Tpi[[16]] + d * exp_rd * exp_mu)) cbind(dphi, drho) } AIR_info <- function(phi, zeta, c, d) { Tpi <- AIR_pi(phi, zeta, c, d) eq_pi <- rep(c(1 - phi, phi), times = 8) / ifelse(Tpi == 0, Inf, Tpi) dpi <- AIR_dpi(phi, zeta, c, d) ind <- c(1,3,5,7,6,8,14,16) info <- t(dpi[ind,]) %*% (eq_pi[ind] * dpi[ind,]) rownames(info) <- colnames(info) <- c("phi","rho") info }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/PIR-APP.R
# Generic error-checking function common to all of these functions phase_validation <- function(observations, phase, base_level) { #recasts phase as a factor if it is just a vector phase <- factor(phase) if(!any(levels(phase) == base_level)){ stop('Specified base level does not match a level in the data.') } #Checks to be sure there are only two levels if(length(levels(phase)) != 2){ stop('This function requires exactly two levels in the phase variable') } if(length(phase) != length(observations)){ stop('The length of \'observations\' and the length of \'phase\' need to be the same') } treat_level <- levels(phase)[(base_level != levels(phase))] return(c(base_level, treat_level)) } #' @title Calculate log-response ratio, variance, and confidence interval #' #' @description Estimates the log-response ratio (with or without bias correction), #' the variance of the log-response ratio, and the confidence interval for a given #' confidence level. #' #' @param observations Vector of observations #' @param phase Factor or vector indicating levels of the PIR measurements. #' @param base_level a character string or value indicating the name of the baseline level. #' @param conf_level Desired coverage rate of the calculated confidence interval. Default is \code{.95}. #' @param bias_correct Logical value indicating if the bias-corrected log-response ratio should be used. Default is \code{TRUE} #' @param exponentiate Logical value indicating if the log-response ratio should be exponentiated. #' #' @details The \code{observations} vector can be in any order corresponding to the factor or vector \code{phase}. #' The levels of \code{phase} can be any two levels, such as "A" and "B", "base" and "treat", or "0" and "1". #' If there are more than two levels in \code{phase} this function will not work. #' A value for \code{base_level} must be specified - if it is a character string it is case sensitive. #' If \code{exponentiate = TRUE}, the log-ratio and the confidence interval will be exponentiated, #' but the variance will be excluded from the output. #' #' @return If \code{exponentiate = FALSE}, a list with three named entries. #' The first entry, \code{lRR}, is the estimated log-response ratio. #' The second entry, \code{V_lRR}, is the estimated variance of the log-response ratio. #' The third entry, \code{CI}, is a vector containing the endpoints of a confidence #' interval of \code{conf_level} coverage rate. #' #' If \code{exponentiate = TRUE}, a list with two named entries. #' The first entry, \code{RR}, is the estimated response ratio. #' The second entry, \code{CI}, is a vector containing the endpoints of a confidence #' interval of \code{conf_level} coverage rate. #' #' @examples #' #' # Estimate the log response ratio and its variance for Carl from Moes dataset #' data(Moes) #' with(subset(Moes, Case == "Carl"), #' logRespRatio(observations = outcome, phase = Phase, base_level = "No Choice")) #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export logRespRatio <- function(observations, phase, base_level, conf_level = .95, bias_correct = TRUE, exponentiate = FALSE) { level_labels <- phase_validation(observations = observations, phase = phase, base_level = base_level) # calculate summary statistics for both samples, sort so that base level is first nObs <- table(phase)[level_labels] means <- tapply(observations, phase, mean)[level_labels] variances <- tapply(observations, phase, stats::var)[level_labels] if (!all(means > 0)) stop('The mean of one or both phases is at the floor of 0.') if (bias_correct == TRUE) { BC <- log(means) - variances / (2 * nObs * means^2) lRR <- as.numeric(BC[2] - BC[1]) } else { lRR <- log(means[2]) - log(means[1]) } V_lRR <- sum(variances / (nObs * means^2)) CI <- lRR + c(-1, 1) * stats::qnorm(1-(1-conf_level)/2) * sqrt(V_lRR) if(exponentiate){ return(list(RR = exp(lRR), CI = exp(CI))) }else{ return(list(lRR = lRR, V_lRR = V_lRR, CI = CI)) } } #' @title Prevalence bounds and confidence interval #' #' @description Calculates a bound for the log of the prevalence ratio of two samples (referred to as baseline and treatment) #' based on partial interval recording (PIR) data, assuming that the behavior follows an Alternating Renewal Process. #' #' @param PIR vector of PIR measurements #' @param phase factor or vector indicating levels of the PIR measurements. #' @param base_level a character string or value indicating the name of the baseline level. #' @param mu_L the lower limit on the mean event duration #' @param active_length length of the active observation interval #' @param intervals the number of intervals in the sample of observations. Default is \code{NA}. #' @param conf_level Coverage rate of the confidence interval. Default is \code{.95}. #' @param exponentiate Logical value indicating if the log of the bounds and the confidence interval should be exponentiated. Default is \code{FALSE}. #' #' @details The prevalence ratio estimate is based on the assumptions that #' 1) the underlying behavior stream follows an Alternating Renewal Process and #' 2) the average event duration is greater than \code{mu_L}. #' #' The \code{PIR} vector can be in any order corresponding to the factor or vector \code{phase}. #' The levels of \code{phase} can be any two levels, such as "A" and "B", "base" and "treat", or "0" and "1". #' If there are more than two levels in \code{phase} this function will not work. #' A value for \code{base_level} must be specified - if it is a character string it is case sensitive. #' #' For all of the following variables, the function assumes that if a vector of values is provided they are constant across all observations and simply uses the first value in that vector. #' #' \code{mu_L} is the lower limit on the mean event durations. This is a single value assumed to hold for both samples of behavior #' #' \code{active_length} This is the total active observation length. If the intervals are 15 seconds long but 5 seconds of each interval is reserved for recording purposes, \code{active_length= 10}. Times are often in seconds, but can be in any time unit. #' #' \code{intervals} is the number of intervals in the observations. This is a single value and is assumed to be constant across both samples and all observations. This value is only relevant if the mean of one of the samples is at the floor or ceiling of 0 or 1. In that case it will be used to truncate the sample mean. If the sample mean is at the floor or ceiling and no value for \code{intervals} is provided, the function will stop. #' #' @return A list with three named entries. #' The first entry, \code{estimate_bounds}, contains the lower and upper bound for the estimate of the prevalence ratio. #' The second entry, \code{estimate_SE}, contains the standard error of the estimate. #' The third entry, \code{estimate_CI}, contains the lower and upper bounds for the confidence interval of the prevalence ratio. #' #' @examples #' # Estimate bounds on the prevalence ratio for Carl from Moes dataset #' data(Moes) #' with(subset(Moes, Case == "Carl"), #' prevalence_bounds(PIR = outcome, phase = Phase, base_level = "No Choice", #' mu_L = 10, active_length = active_length, intervals = intervals)) #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export prevalence_bounds <- function(PIR, phase, base_level, mu_L, active_length, intervals = NA, conf_level = 0.95, exponentiate = FALSE) { if(length(which(PIR > 1 | PIR < 0)) > 0 | sum(is.na(PIR)) > 0) { stop('Values for PIR must be between 0 and 1 and cannot be NA') } level_labels <- phase_validation(observations = PIR, phase = phase, base_level = base_level) # calculate summary statistics for both samples, sort so that base level is first nObs <- table(phase)[level_labels] means <- tapply(PIR, phase, mean)[level_labels] variances <- tapply(PIR, phase, stats::var)[level_labels] #change vectors to single values mu_L <- mu_L[1] active_length <- active_length[1] intervals <- intervals[1] if ((!all(means > 0) | !all(means < 1)) & is.na(intervals)) stop('One of the means is at the floor or ceiling of 0 or 1 and no value for intervals has been provided to perform truncation') means <- ifelse(means == 0, 1/(nObs * intervals),ifelse(means == 1, 1 - (1/(nObs * intervals)),means)) #Natural log of the ratio of the two means R <- log(means[2]) - log(means[1]) h <- log(mu_L + active_length) - log(mu_L) #calculate lower and upper bounds lower_bound <- as.numeric(R - h) upper_bound <- as.numeric(R + h) #variance of the log ratio variance_R <- as.numeric((variances[1]/(nObs[1] * (means[1]^2))) + (variances[2]/(nObs[2] * (means[2]^2)))) #calculate the CI lower_CI <- as.numeric(R - (h + (stats::qnorm(1-(1-conf_level)/2)*sqrt(variance_R)))) upper_CI <- as.numeric(R + (h + (stats::qnorm(1-(1-conf_level)/2)*sqrt(variance_R)))) #exponentiates the values, if desired if(exponentiate == TRUE){ lower_bound <- exp(lower_bound) upper_bound <- exp(upper_bound) lower_CI <- exp(lower_CI) upper_CI <- exp(upper_CI) } return(list(estimate_bounds = c(lower_bound = lower_bound, upper_bound = upper_bound), estimate_SE = sqrt(variance_R), estimate_CI = c(lower_CI = lower_CI, upper_CI = upper_CI))) } #' Incidence bounds and confidence interval #' #' @description Calculates a bound for the log of the incidence ratio of two samples #' (referred to as baseline and treatment) based on partial interval recording (PIR) data, #' assuming that the behavior follows an Alternating Renewal Process. #' #' @param PIR vector of PIR measurements #' @param phase factor or vector indicating levels of the PIR measurements. #' @param base_level a character string or value indicating the name of the baseline level. #' @param mu_U the upper limit on the mean event duration #' @param p upper limit on the probability that the interim time between behavioral events is less than the active interval #' @param active_length length of the active observation interval #' @param intervals the number of intervals in the sample of observations. Default is \code{NA} #' @param conf_level Coverage rate of the confidence interval. Default is \code{.95}. #' @param exponentiate Logical value indicating if the log of the bounds and the confidence interval should be exponentiated. Default is \code{FALSE}. #' #' @details The incidence ratio estimate is based on the assumptions that #' 1) the underlying behavior stream follows an Alternating Renewal Process, #' 2) the average event duration is less than \code{mu_U}, and #' 3) the probability of observing an interim time less than the active interval length is less than \code{p}. #' #' The \code{PIR} vector can be in any order corresponding to the factor or vector \code{phase}. The levels of \code{phase} can be any two levels, such as "A" and "B", "base" and "treat", or "0" and "1". If there are more than two levels in \code{phase} this function will not work. A value for \code{base_level} must be specified - if it is a character string it is case sensitive. #' #' For all of the following variables, the function assumes that if a vector of values is provided they are constant across all observations and simply uses the first value in that vector. #' #' \code{mu_U} is the upper limit on the mean event durations. This is a single value assumed to hold for both samples of behavior #' #' \code{active_length} This is the total active observation length. If the intervals are 15 seconds long but 5 seconds of each interval is reserved for recording purposes, \code{active_length= 10}. Times are often in seconds, but can be in any time unit. #' #' \code{intervals} is the number of intervals in the observations. This is a single value and is assumed to be constant across both samples and all observations. This value is only relevant if the mean of one of the samples is at the floor or ceiling of 0 or 1. In that case it will be used to truncate the sample mean. If the sample mean is at the floor or ceiling and no value for \code{intervals} is provided, the function will stop. #' #' @return A list containing two named vectors and a single named number. #' The first entry, \code{estimate_bounds}, contains the lower and upper bound for the estimate of the incidence ratio. #' The second entry, \code{estimate_SE}, contains the standard error of the estimate. #' The third entry, \code{estimate_CI}, contains the lower and upper bounds for the confidence interval of the incidence ratio. #' #' @examples #' #' # Estimate bounds on the incidence ratio for Ahmad from the Dunlap dataset #' data(Dunlap) #' with(subset(Dunlap, Case == "Ahmad"), #' incidence_bounds(PIR = outcome, phase = Phase, base_level = "No Choice", #' mu_U = 10, p = .15, active_length = active_length, intervals = intervals)) #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export incidence_bounds <- function(PIR, phase, base_level, mu_U, p, active_length, intervals = NA, conf_level = 0.95, exponentiate = FALSE) { if(length(which(PIR > 1 | PIR < 0)) > 0 | sum(is.na(PIR)) > 0) { stop('Values for PIR must be between 0 and 1 and cannot be NA') } level_labels <- phase_validation(observations = PIR, phase = phase, base_level = base_level) # calculate summary statistics for both samples, sort so that base level is first nObs <- table(phase)[level_labels] means <- tapply(PIR, phase, mean)[level_labels] variances <- tapply(PIR, phase, stats::var)[level_labels] mu_U <- mu_U[1] active_length <- active_length[1] intervals <- intervals[1] if ((!all(means > 0) | !all(means < 1)) & is.na(intervals)) stop('One of the means is at the floor or ceiling of 0 or 1 and no value for intervals has been provided to perform truncation') means <- ifelse(means == 0, 1/(nObs * intervals),ifelse(means == 1, 1 - (1/(nObs * intervals)),means)) R <- log(means[2]) - log(means[1]) h <- log(mu_U + active_length) - log(1-p) - log(active_length) lower_bound <- as.numeric(R - h) upper_bound <- as.numeric(R + h) variance_R <- as.numeric((variances[1]/(nObs[1] * (means[1]^2))) + (variances[2]/(nObs[2] * (means[2]^2)))) lower_CI <- as.numeric(R - (h + (stats::qnorm(1-(1-conf_level)/2) * sqrt(variance_R)))) upper_CI <- as.numeric(R + (h + (stats::qnorm(1-(1-conf_level)/2) * sqrt(variance_R)))) if(exponentiate == TRUE){ exp(lower_bound) exp(upper_bound) exp(lower_CI) exp(upper_CI) } return(list(estimate_bounds = c(lower_bound = lower_bound,upper_bound = upper_bound), estimate_SE = sqrt(variance_R), estimate_CI = c(lower_CI = lower_CI,upper_CI = upper_CI))) } #' @title Interim bounds and confidence interval #' #' @description Calculates a bound for the log of the ratio of interim time of two samples #' (referred to as baseline and treatment) based on partial interval recording (PIR) data, #' assuming that the average event durations are equal across samples and that #' interim times are exponentially distributed. #' #' @param PIR vector of PIR measurements #' @param phase factor or vector indicating levels of the PIR measurements. #' @param base_level a character string or value indicating the name of the baseline level. #' @param conf_level Desired coverage rate of the calculated confidence interval. Default is \code{.95}. #' @param intervals the number of intervals in the sample of observations. Default is \code{NA} #' @param exponentiate Logical value indicating if the log of the bounds and the confidence interval should be exponentiated. Default is \code{FALSE}. #' #' @details The interim ratio estimate is based on the assumptions that #' 1) the underlying behavior stream follows an Alternating Renewal Process, #' 2) the average event durations in each sample are equal, and #' 3) interim times follow exponential distributions. #' #' The \code{PIR} vector can be in any order corresponding to the factor or vector \code{phase}. #' The levels of \code{phase} can be any two levels, such as "A" and "B", "base" and "treat", #' or "0" and "1". If there are more than two levels in \code{phase} this function will not work. #' A value for \code{base_level} must be specified; if it is a character string it is case sensitive. #' #' \code{intervals} is the number of intervals in the observations. #' This is a single value and is assumed to be constant across both samples and all observations. #' If intervals is sent as a vector instead of a single value, the first value in the vector will #' be used. This value is only relevant if the mean of one of the samples is at the floor or ceiling #' of 0 or 1. In that case it will be used to truncate the sample mean. If the sample mean is at the #' floor or ceiling and no value for \code{intervals} is provided, the function will stop. #' #' @return A list with three named entries #' The first entry, \code{estimate_bounds}, contains the lower and upper bound for the estimate of the prevalence ratio. #' The second entry, \code{estimate_SE}, contains the standard errors for the upper and lower bounds. #' The third entry, \code{estimate_CI}, contains the lower and upper bounds for the confidence interval of the prevalence ratio. #' #' @examples #' # Estimate bounds on the interim time ratio for Carl from the Moes dataset #' data(Moes) #' with(subset(Moes, Case == "Carl"), #' interim_bounds(PIR = outcome, phase = Phase, base_level = "No Choice")) #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export interim_bounds <- function(PIR, phase, base_level, conf_level = 0.95, intervals = NA, exponentiate = FALSE) { if(length(which(PIR > 1 | PIR < 0)) > 0 | sum(is.na(PIR)) > 0) { stop('Values for PIR must be between 0 and 1 and cannot be NA') } level_labels <- phase_validation(observations = PIR, phase = phase, base_level = base_level) # calculate summary statistics for both samples, sort so that base level is first nObs <- table(phase)[level_labels] means <- tapply(PIR, phase, mean)[level_labels] variances <- tapply(PIR, phase, stats::var)[level_labels] intervals <- intervals[1] if ((!all(means > 0) | !all(means < 1)) & is.na(intervals)) stop('One of the means is at the floor or ceiling of 0 or 1 and no value for intervals has been provided to perform truncation') means <- ifelse(means == 0, 1/(nObs * intervals),ifelse(means == 1, 1 - (1/(nObs * intervals)),means)) #the logit and complimentary log-log functions logit <- function(x) {log(x) - log(1-x)} cll <- function(x) {log(-1 * log(1-x))} #This code checks to see which transformation is the appropriate value for #the lower and upper bounds if(means[1] > means[2]){ f_lower <- as.numeric(logit(means[2]) - logit(means[1])) f_upper <- as.numeric(cll(means[2]) - cll(means[1])) }else{ f_lower <- as.numeric(cll(means[2]) - cll(means[1])) f_upper <- as.numeric(logit(means[2]) - logit(means[1])) } #This is the variance of the log ratio of the sample means var_LOR <- as.numeric((variances[1]/(nObs[1] * (means[1]^2) * (1- means[1])^2)) + (variances[2]/(nObs[2] * (means[2]^2) * (1- means[2])^2))) #This is the variance of the complimentary log-log ratio var_CLR <- as.numeric((variances[1]/(nObs[1] * (1- means[1])^2 * (log(1-means[1]))^2)) + (variances[2]/(nObs[2] * (1- means[2])^2 *(log(1-means[2]))^2))) #The ratio and z_conf is used in determining which variance is appropriate cll_ratio <- cll(means[2]) - cll(means[1]) z_conf <- stats::qnorm(1-(1-conf_level)/2) if(cll_ratio <= (z_conf * sqrt(var_LOR))){ var_f_lower <- var_LOR } else{ var_f_lower <- var_CLR } if(cll_ratio < -(z_conf * sqrt(var_LOR))){ var_f_upper <- var_CLR }else{ var_f_upper <- var_LOR } lower_CI <- f_lower - (z_conf * sqrt(var_f_lower)) upper_CI <- f_upper + (z_conf * sqrt(var_f_upper)) if(exponentiate == TRUE) { f_lower <- exp(f_lower) f_upper <- exp(f_upper) lower_CI <- exp(lower_CI) upper_CI <- exp(upper_CI) } return(list(estimate_bounds = c(lower_bound = f_lower,upper_bound = f_upper), estimate_SE = c(lower_SE = sqrt(var_f_lower), upper_SE = sqrt(var_f_upper)), estimate_CI = c(lower_CI = lower_CI, upper_CI = upper_CI))) } # Zeta as a function of phi and E(Y) PIR_Zeta <- function(phi, ExY, active) -1 * (1-phi) * log((ExY-1)/(phi-1)) / active # Var(Y) as a function of phi and E(Y) PIR_VarEx <- function(phi, ExY, active, L, K){ zeta = PIR_Zeta(phi, ExY, active) if (phi == 0) { vary = ExY * (1- ExY) / K } else { k = 1:(K-1) #vector for summation term vary = ((1/K) * ExY * (1-ExY)) * ( 1 + (2 * phi / K / ExY) * sum((K - k) * exp((zeta * active / phi) - zeta * k * L /phi / (1- phi) / K)) ) } return(vary) } # Finds the root for phi using the variance and mean PIR_Phi <- function(ExY, VarY, nObs, active, L, K) { fun <- function(phi) PIR_VarEx(phi, ExY, active, L, K) / VarY - 1 stats::uniroot(fun, interval = c(0, ExY), tol = .Machine$double.eps^0.5)$root } # DEPRECATED # generates multiple samples of PIR data using the r_behavior_stream and interval_recording # functions from ARPobservation. Useful for simulating or performing bootstraps generatePIRData <- function(nObs, phi, zeta, active, K, rest = 0, iterations = 1) { # necessary to recast as numbers because PIR_MOM sends them as lists with one element phi <- as.numeric(phi) zeta <- as.numeric(zeta) #calculating the means of the distributions of event durations and interim #times from estimates of prevalence and incidence mu <- phi/zeta lambda <- (1-phi)/zeta #the full interval length is the sum of the active and rest (recording) lengths intervalLength = active + rest #this block generates a matrix of PIR data with each column corresponding to a #a sample of n = nObs observations sampleObs <- replicate(iterations, { BS <- r_behavior_stream(n = nObs, mu = mu, lambda = lambda, F_event = F_exp(), F_interim = F_exp(), stream_length = intervalLength * K) interval_recording(BS = BS, interval_length = intervalLength, rest_length = rest) }) mean <- colMeans(sampleObs) variance <- apply(sampleObs, 2, stats::var) sampleData <- data.frame(mean = mean, variance = variance) return(sampleData) } # A packaging function that invokes the previous set of functions to neatly # provide an estimate of phi and zeta for a sample or vector of samples PIR_inv <- function(ExY, VarY, nObs, active, L, K, varTrunc = 1 / (nObs * K^2)) { ExY <- pmin(pmax(ExY, 1 / (nObs * K)), 1 - 1 / (nObs * K)) VarY <- pmin(pmax(VarY, ExY * (1 - ExY) / K + varTrunc), ExY * (1 - ExY) - varTrunc) phi <- mapply(PIR_Phi, ExY = ExY, VarY = VarY, nObs = nObs, active = active, L = L, K = K) zeta <- mapply(PIR_Zeta, phi = phi, ExY = ExY, active = active) ests <- data.frame(phi = phi, zeta = zeta) return (ests) } # bootstraps the confidence intervals for a pair of samples with estimates of phi and zeta PIRbootstrappair <- function(nObs, phi, zeta, active, rest, K, iterations, alpha, exponentiate, seed = NULL){ # set seed if one is supplied if (!is.null(seed)) set.seed(seed) #the total length of the observation is the total interval length times the number of intervals L = (active + rest) * K #calculating the means of the distributions of event durations and interim #times from estimates of prevalence and incidence mu <- phi/zeta lambda <- (1-phi)/zeta # Simulate first set of parms sampleData0 <- replicate(n = iterations, r_PIR(n = nObs[1], mu = mu[1], lambda = lambda[1], stream_length = L, F_event = F_exp(), F_interim = F_exp(), interval_length = active + rest, rest_length = rest, summarize = TRUE)) mean0 <- colMeans(sampleData0) variance0 <- apply(sampleData0, 2, stats::var) # Returns a dataframe of estimates of phi and zeta ests0 <- PIR_inv(ExY = mean0, VarY = variance0, nObs = nObs[1], active = active, K = K, L = L) # get the confidence interval bounds for the first sample pbounds0 <- stats::quantile(ests0$phi, probs = c(alpha/2, 1-alpha/2)) zbounds0 <- stats::quantile(ests0$zeta, probs = c(alpha/2, 1-alpha/2)) # simulate second set of parms sampleData1 <- replicate(n = iterations, r_PIR(n = nObs[2], mu = mu[2], lambda = lambda[2], stream_length = L, F_event = F_exp(), F_interim = F_exp(), interval_length = active + rest, rest_length = rest, summarize = TRUE)) mean1 <- colMeans(sampleData1) variance1 <- apply(sampleData1, 2, stats::var) ests1 <- PIR_inv(ExY = mean1, VarY = variance1, nObs = nObs[2], active = active, K = K, L = L) pbounds1 <- stats::quantile(ests1$phi, probs = c(alpha/2, 1-alpha/2)) zbounds1 <- stats::quantile(ests1$zeta, probs = c(alpha/2, 1-alpha/2)) # calculate log ratio value and confidence interval bounds plogratio <- log(ests1$phi/ests0$phi) plogbounds <- stats::quantile(plogratio, probs = c(alpha/2, 1-alpha/2)) zlogratio <- log(ests1$zeta/ests0$zeta) zlogbounds <- stats::quantile(zlogratio, probs = c(alpha/2, 1-alpha/2)) if(exponentiate){ pratbounds <- exp(plogbounds) zratbounds <- exp(zlogbounds) results <- cbind(phi = c(phi[1], phi[2], as.numeric(phi[2])/as.numeric(phi[1])), phi_lower_CI = c(pbounds0[1], pbounds1[1], pratbounds[1]), phi_upper_CI = c(pbounds0[2], pbounds1[2], pratbounds[2]), zeta = c(zeta[1], zeta[2], as.numeric(zeta[2])/as.numeric(zeta[1])), zeta_lower_CI = c(zbounds0[1], zbounds1[1], zratbounds[1]), zeta_upper_CI = c(zbounds0[2], zbounds1[2], zratbounds[2])) }else{ results <- cbind(phi = c(phi[1], phi[2], log(as.numeric(phi[2])/as.numeric(phi[1]))), phi_lower_CI = c(pbounds0[1], pbounds1[1], plogbounds[1]), phi_upper_CI = c(pbounds0[2], pbounds1[2], plogbounds[2]), zeta = c(zeta[1], zeta[2], log(as.numeric(zeta[2])/as.numeric(zeta[1]))), zeta_lower_CI = c(zbounds0[1], zbounds1[1], zlogbounds[1]), zeta_upper_CI = c(zbounds0[2], zbounds1[2], zlogbounds[2])) } return(results) } #deprecated version of PIRbootstrappair that didn't use our general purpose #data generating code PIRbootstrappair_old <- function(nObs, phi, zeta, active, rest, K, iterations, alpha, seed = NULL){ # set seed if one is supplied if (!is.null(seed)) set.seed(seed) #the total length of the observation is the total interval length times the number of intervals L = (active + rest) * K # Simulate first set of parms sampleData0 <- generatePIRData(nObs = nObs[1], phi = phi[1], zeta = zeta[1], active = active, K = K, rest = rest, iterations = iterations) # Returns a dataframe of estimates of phi and zeta ests0 <- with(sampleData0, PIR_inv(ExY = mean, VarY = variance, nObs = nObs[1], active = active, K = K, L = L)) # get the confidence interval bounds for the first sample pbounds0 <- stats::quantile(ests0$phi, probs = c(alpha/2, 1-alpha/2)) zbounds0 <- stats::quantile(ests0$zeta, probs = c(alpha/2, 1-alpha/2)) # simulate second set of parms sampleData1 <- generatePIRData(nObs = nObs[2], phi = phi[2], zeta = zeta[2], active = active, K = K, rest = rest, iterations = iterations) ests1 <- with(sampleData1, PIR_inv(ExY = mean, VarY = variance, nObs = nObs[2], active = active, K = K, L = L)) pbounds1 <- stats::quantile(ests1$phi, probs = c(alpha/2, 1-alpha/2)) zbounds1 <- stats::quantile(ests1$zeta, probs = c(alpha/2, 1-alpha/2)) # calculate log ratio value and confidence interval bounds plogratio <- log(ests1$phi/ests0$phi) plogbounds <- stats::quantile(plogratio, probs = c(alpha/2, 1-alpha/2)) zlogratio <- log(ests1$zeta/ests0$zeta) zlogbounds <- stats::quantile(zlogratio, probs = c(alpha/2, 1-alpha/2)) results <- cbind(phi = c(phi[1], phi[2], log(as.numeric(phi[2])/as.numeric(phi[1]))), phi_lower_CI = c(pbounds0[1], pbounds1[1], plogbounds[1]), phi_upper_CI = c(pbounds0[2], pbounds1[2], plogbounds[2]), zeta = c(zeta[1], zeta[2], log(as.numeric(zeta[2])/as.numeric(zeta[1]))), zeta_lower_CI = c(zbounds0[1], zbounds1[1], zlogbounds[1]), zeta_upper_CI = c(zbounds0[2], zbounds1[2], zlogbounds[2])) return(results) } #' @title Moment estimator for prevalence and incidence, with bootstrap confidence intervals #' #' @description Estimates prevalence and incidence for two samples, #' along with the ratios of each parameter, assuming that the behavior follows #' an `. Also provides bootstrap confidence intervals. #' #' @param PIR vector of PIR measurements #' @param phase factor or vector indicating levels of the PIR measurements. #' @param base_level a character string or value indicating the name of the baseline level. #' @param intervals the number of intervals in the sample of observations #' @param interval_length the total length of each interval #' @param rest_length length of the portion of the interval devoted to recording. Default is \code{0} #' @param Bootstraps desired number of bootstrap replicates. Default is \code{2000} #' @param conf_level Desired coverage rate of the calculated confidence interval. Default is \code{.95}. #' @param exponentiate a logical indicating whether the row corresponding to the ratio of treatment to baseline should be exponentiated, with the default as \code{FALSE}. #' @param seed seed value set in order to make bootstrap results reproducible. Default is \code{null} #' #' @details The moment estimators are based on the assumption that the #' underlying behavior stream follows an Alternating Poisson Process, in which both the #' event durations and interim times are exponentially distributed. #' #' @details The \code{PIR} vector can be in any order corresponding to the factor or vector #' \code{phase}. The levels of \code{phase} can be any two levels, such as "A" and "B", #' "base" and "treat", or "0" and "1". If there are more than two levels in \code{phase} #' this function will not work. A value for \code{base_level} must be specified - if it is a #' character string it is case sensitive. #' #' \code{intervals}, \code{interval_length}, and \code{rest_length} are all single values that #' are assumed to be held constant across both samples and all observation sessions. #' If vectors of values are provided for these variables, it is assumed that the first value #' in each vector is constant across all observations. #' #' \code{interval_length} This is the total length of each individual interval. #' Sometimes a portion of the interval is set aside for recording purposes, in which case #' \code{rest_length} should be set to the length of time devoted to recording. #' The default assumption is that there is no recording time. #' The length of time devoted to active observation is calculated as \code{interval_length - rest_length}. #' #' At the default setting of \code{bootstraps = 2000}, PIR_MOM takes just under six seconds to run on an Intel Core i5-2410M processor. #' #' @return A dataframe with six columns and three rows corresponding to baseline, treatment, #' and the log ratio or ratio (depending upon the value of \code{exponentiate}) of treatment to baseline #' #' @examples #' #' # Estimate prevalence and incidence ratios for Carl from the Moes dataset #' data(Moes) #' with(subset(Moes, Case == "Carl"), #' PIR_MOM(PIR = outcome, #' phase = Phase, #' intervals = intervals, #' interval_length = (active_length + rest_length), #' rest_length = rest_length, #' base_level = "No Choice", #' Bootstraps = 200, #' seed = 149568373)) #' #' @author Daniel Swan <dswan@@utexas.edu> #' @export PIR_MOM <- function(PIR, phase, base_level, intervals, interval_length, rest_length = 0, Bootstraps = 2000, conf_level = 0.95, exponentiate = FALSE, seed = NULL) { if(length(which(PIR > 1 | PIR < 0)) > 0 | sum(is.na(PIR)) > 0) { stop('Values for PIR must be between 0 and 1 and cannot be NA') } level_labels <- phase_validation(observations = PIR, phase = phase, base_level = base_level) # calculate summary statistics for both samples, sort so that base level is first nObs <- table(phase)[level_labels] means <- tapply(PIR, phase, mean)[level_labels] variances <- tapply(PIR, phase, stats::var)[level_labels] intervals <- intervals[1] interval_length <- interval_length[1] rest_length <- rest_length[1] if ((!all(means > 0) | !all(means < 1)) & is.na(intervals)) stop('One of the means is at the floor or ceiling of 0 or 1 and no value for intervals has been provided to perform truncation') #Get an estimate of phi and zeta using the moment estimator for baseline and treatment phases base_ests <- PIR_inv(ExY = means[1], VarY = variances[1], nObs = nObs[1], active = interval_length - rest_length, L = intervals * interval_length, K = intervals) treat_ests <- PIR_inv(ExY = means[2], VarY = variances[2], nObs = nObs[2], active = interval_length - rest_length, L = intervals * interval_length, K = intervals) #Bootstrap the confidence intervals results <- PIRbootstrappair(nObs = c(nObs[[1]], nObs[[2]]), phi = c(base_ests[[1]], treat_ests[[1]]), zeta = c(base_ests[[2]], treat_ests[[2]]), active = interval_length - rest_length, rest = rest_length, K = intervals, iterations = Bootstraps, alpha = 1-conf_level, exponentiate = exponentiate, seed = seed) #Set the row names appropriately based on the levels in the "phase" variable if(exponentiate){ row.names(results) <- c(base_level, levels(phase)[which(levels(phase) != base_level)], paste(levels(phase)[which(levels(phase) != base_level)], base_level, sep = "/")) }else{ row.names(results) <- c(base_level, levels(phase)[which(levels(phase) != base_level)], paste0("log(",paste(levels(phase)[which(levels(phase) != base_level)], base_level, sep = "/"),")")) } return(results) } #' Dunlap et al.(1994) data #' #' @description Single case design data measured with partial interval recording from a study of the effect of providing Choice between academic activities on the disruptive behavior of three elementary school students with emotional and behavioral disorders. For this data "No Choice" is the baseline phase. Data were extracted from the figures in the publication. #' #' @format A data frame with 58 observations on 7 variables #' #' \itemize{ #' \item \code{Case} The participant for whom the observation took place #' \item \code{Phase} The level of the observation ("Choice" vs. "No Choice") #' \item \code{Session} The observation session # for each participant #' \item \code{outcome} The summary PIR measurement for the observation session #' \item \code{active_length} The length of the active observation interval, in seconds #' \item \code{rest_length} The length of the recording interval, in seconds #' \item \code{intervals} The total number of intervals in the observation session #' } #' #' @docType data #' @keywords datasets #' @format A data frame with 53940 rows and 10 variables #' @name Dunlap #' @references #' Dunlap, G., DePerczel, M., Clarke, S., Wilson, D., Wright, S., White, R., & Gomez, A. (1994). Choice making to promote adaptive behavior for students with emotional and behavioral challenges. Journal of Applied Behavior Analysis, 27 (3), 505-518. NULL #' Moes(1998) data #' #' @description Single-case design data from a study that using partial interval recording (PIR) examining the impact of choice-making in a homework tutoring context on disruptive behavior. In this data "No Choice" is the baseline phase. Data were extracted from the figure in the publication. #' #' @format A data frame with 80 observations on 7 variables #' #' \itemize{ #' \item \code{Case} The participant for whom the observation took place #' \item \code{Phase} The level of the observation ("Choice" vs. "No Choice") #' \item \code{Session} The observation session # for each participant #' \item \code{outcome} The summary PIR measurement for the observation session #' \item \code{active_length} The length of the active observation interval, in seconds #' \item \code{rest_length} The length of the recording interval, in seconds #' \item \code{intervals} The total number of intervals in the observation session #' } #' #' @docType data #' @keywords datasets #' @format A data frame with 53940 rows and 10 variables #' @name Moes #' @references #' Moes, D. R. (1998). Integrating choice-making opportunities within teacher-assigned academic tasks to facilitate the performance of children with autism. Research and Practice for Persons with Severe Disabilities, 23 (4), 319-328. NULL
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/effect_size_estimators.R
#' @title Constructor for class \code{eq_dist} #' #' @description #' The \code{eq_dist} class consists of a pair of component functions for generating random variates from a #' specified distribution and the corresponding equilibrium distribution. #' #' @param r_gen function for generating random deviates. #' @param r_eq function for generating random deviates from the corresponding equilibrium distribution. #' #' @details Both functions must take arguments \code{n} and \code{mean}. Currently, the following distributions #' are implemented: #' \itemize{ #' \item \code{\link{F_exp}} - Exponential #' \item \code{\link{F_gam}} - Gamma #' \item \code{\link{F_gam_mix}} - Mixture of two gammas #' \item \code{\link{F_weib}} - Weibull #' \item \code{\link{F_unif}} - Uniform #' \item \code{\link{F_const}} - Constant #' } #' #' @return Object of class \code{eq_dist} with components \code{r_gen} and \code{r_eq}. #' #' @export eq_dist <- function(r_gen, r_eq) { eq_dist <- list(r_gen = r_gen, r_eq = r_eq) class(eq_dist) <- "eq_dist" return(eq_dist) } ## exponential distribution #### #' @title Exponential distribution and related equilibrium distribution #' #' @description #' Random number generation from exponential distributions, for use with \code{\link{r_behavior_stream}}. #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} generates random deviates from an exponential distribution with specified \code{mean}. #' #' The function \code{r_eq(n, mean)} generates random deviates from an exponential distribution with specified \code{mean}. #' #' @examples #' hist(F_exp()$r_gen(1000, 3)) #' hist(F_exp()$r_eq(1000, 3)) #' #' @export F_exp <- function() eq_dist(r_gen = function(n, mean) stats::rexp(n, rate = 1 / mean), r_eq = function(n, mean) stats::rexp(n, rate = 1 / mean)) ## gamma distribution #### pgamma_eq <- function(x, mean, shape) x / mean + stats::pgamma(x, shape = shape + 1, scale = mean / shape) - stats::pgamma(x, shape = shape, scale = mean / shape) * x / mean rgamma_eq <- function(n, mean, shape) mapply(function(p, m) stats::uniroot(function(y) p - pgamma_eq(y, mean = m, shape = shape), lower = 0, upper = m * 10^5)$root, p = stats::runif(n), m = mean) #' @title Gamma distribution and related equilibrium distribution #' #' @description #' Random number generation from a gamma distribution and the related equilibrium distribution, #' for use with \code{\link{r_behavior_stream}}. #' #' @param shape shape parameter #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} generates random deviates from a gamma distribution with specified #' \code{mean} and \code{shape} parameters. #' #' The function \code{r_eq(n, mean)} generates random deviates from the equilibrium distribution corresponding #' to the gamma distribution with specified \code{mean} and \code{shape} parameters. #' #' @examples #' hist(F_gam(2)$r_gen(1000, 3)) #' hist(F_gam(2)$r_eq(1000, 3)) #' #' @export F_gam <- function(shape) eq_dist(r_gen = function(n, mean) stats::rgamma(n, shape = shape, scale = mean / shape), r_eq = function(n, mean) rgamma_eq(n, mean, shape)) ## gamma mixture #### pgamma_mix_eq <- function(x, mean, shape1, shape2, scale_ratio, mix) { theta2 <- mean / (mix * shape1 * scale_ratio + (1 - mix) * shape2) theta1 <- scale_ratio * theta2 (mix * shape1 * theta1 * pgamma_eq(x, shape1 * theta1, shape1) + (1 - mix) * shape2 * theta2 * pgamma_eq(x, shape2 * theta2, shape2)) / mean } rgamma_mix_eq <- function(n, mean, shape1, shape2, scale_ratio, mix) mapply(function(p, m) stats::uniroot(function(y) p - pgamma_mix_eq(y, mean=m, shape1, shape2, scale_ratio, mix), lower = 0, upper = m * 10^5)$root, p = stats::runif(n), m = mean) #' @title Mixture of two gamma distributions and related equilibrium distribution #' #' @description #' Random number generation from a mixture of two gamma distributions and the related equilibrium distribution, #' for use with \code{\link{r_behavior_stream}}. #' #' @param shape1 shape parameter for first mixture component, \eqn{k_1} #' @param shape2 shape parameter for second mixture component, \eqn{k_2} #' @param scale_ratio ratio of first scale component to second scale component, \eqn{\theta_1 / \theta_2} #' @param mix mixing proportion of first component, \eqn{p} #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} generates random deviates from a mixture of two gamma distributions with specified #' \code{mean}, \code{shape1}, \code{shape2}, \code{scale_ratio}, and \code{mix}. The cumulative distribution function #' is given by \deqn{F(x) = p \Gamma(x; k_1, \theta_1) + (1 - p) \Gamma(x; k_2, \theta_2),} where \eqn{\Gamma(x; k, \theta)} #' is the cumulative distribution function of a Gamma random variable with shape \eqn{k} and scale \eqn{\theta}, and #' the scale parameters are determined by the specified \code{mean} and \code{scale_ratio}. #' #' The function \code{r_eq(n, mean)} generates random deviates from the equilibrium distribution corresponding #' to the mixture of gamma distributions. #' #' @examples #' hist(F_gam_mix(2, 2, 1 / 12, 3 / 5)$r_gen(1000, 20)) #' hist(F_gam_mix(2, 2, 1 / 12, 3 / 5)$r_eq(1000, 20)) #' #' @export F_gam_mix <- function(shape1, shape2, scale_ratio, mix) eq_dist(r_gen = function(n, mean) { m <- stats::rbinom(n, 1, mix) shape <- c(shape1, shape2)[2 - m] scale <- c(scale_ratio, 1)[2 - m] * mean / (mix * shape1 * scale_ratio + (1 - mix) * shape2) stats::rgamma(n, shape=shape, scale=scale) }, r_eq = function(n, mean) rgamma_mix_eq(n, mean, shape1, shape2, scale_ratio, mix)) ## Weibull distribution #### pweibull_eq <- function(x, mean, shape) { scale <- mean / gamma(1 + 1 / shape) stats::integrate(function(z) exp(-(z / scale)^shape), 0, x)$value / mean } rweibull_eq <- function(n, mean, shape) mapply(function(p, m) stats::uniroot(function(y) p - pweibull_eq(y, mean = m, shape = shape), lower = 0, upper = m * 10^3)$root, p = stats::runif(n), m = mean) #' @title Weibull distribution and related equilibrium distribution #' #' @description #' Random number generation from a Weibull distribution and the related equilibrium distribution, #' for use with \code{\link{r_behavior_stream}}. #' #' @param shape shape parameter #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} generates random deviates from a Weibull distribution with specified #' \code{mean} and \code{shape} parameters. #' #' The function \code{r_eq(n, mean)} generates random deviates from the equilibrium distribution corresponding #' to the Weibull distribution with specified \code{mean} and \code{shape} parameters. #' #' @examples #' hist(F_gam(2)$r_gen(1000, 3)) #' hist(F_gam(2)$r_eq(1000, 3)) #' #' @export F_weib <- function(shape) eq_dist(r_gen = function(n, mean) stats::rweibull(n, shape = shape, scale = mean / gamma(1 + 1 / shape)), r_eq = function(n, mean) rweibull_eq(n, mean, shape)) ## uniform distribution on (0, 2 * mean) #### #' @title Uniform distribution and related equilibrium distribution #' #' @description #' Random number generation from a uniform distribution and the related equilibrium distribution, #' for use with \code{\link{r_behavior_stream}}. #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} generates random deviates from a uniform distribution with specified #' \code{mean} \eqn{\mu} on the interval \eqn{(0, 2 \mu)}. The cumulative distribution function #' is given by \eqn{F(x) = x / 2 \mu}. #' #' The function \code{r_eq(n, mean)} generates random deviates from the equilibrium distribution corresponding #' to a uniform distribution on the interval \eqn{(0, 2 \mu)}. The cumulative distribution function is given by #' \deqn{F(x) = x (4 \mu - x) / (4 \mu^2).} #' #' @examples #' hist(F_unif()$r_gen(1000, 2)) #' hist(F_unif()$r_eq(1000, 2)) #' #' @export F_unif <- function() eq_dist(r_gen = function(n, mean) stats::runif(n, min = 0, max = 2 * mean), r_eq = function(n, mean) 2 * mean * (1 - sqrt(1 - stats::runif(n)))) ## constant (degenerate) distribution #### #' @title Constant (degenerate) distribution and related equilibrium distribution #' #' @description #' Generation from a degenerate distribution and random number generation from the related equilibrium distribution, #' for use with \code{\link{r_behavior_stream}}. #' #' @return Object of class \code{\link{eq_dist}} with components \code{r_gen} and \code{r_eq}. #' #' The function \code{r_gen(n, mean)} simply returns a vector of length \code{n} with all values equal to \code{mean}. #' #' The function \code{r_eq(n, mean)} generates random deviates from a uniform distribution on the interval (0, mean). #' #' @examples #' hist(F_const()$r_gen(1000, 2)) #' hist(F_const()$r_eq(1000, 2)) #' #' @export F_const <- function() eq_dist(r_gen = function(n, mean) rep(mean, length.out = n), r_eq = function(n, mean) stats::runif(n, min=0, max=mean))
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/equilibrium_distributions.R
## continuous duration recording #### CDR_single <- function(b_stream, stream_length) { start_state <- b_stream$start_state switches <- length(b_stream$b_stream) if (switches > 1) { duration <- ((if (!start_state==(switches %% 2)) stream_length else 0) + sum(b_stream$b_stream[seq(2 - start_state, switches, 2)]) - sum(b_stream$b_stream[seq(1 - start_state, switches, 2)])) / stream_length } else if (switches == 1) { duration <- ((2 * start_state - 1) * b_stream$b_stream + (1 - start_state) * stream_length) / stream_length } else { duration <- start_state } return(duration) } #' @title Applies continuous duration recording to a behavior stream #' #' @description #' Calculates the proportion of session time during which behavior occurs. #' #' @param BS object of class \code{behavior_stream} #' #' @export #' #' @return Vector of proportions. #' #' @examples #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' continuous_duration_recording(BS) continuous_duration_recording <- function(BS) sapply(BS$b_streams, CDR_single, stream_length = BS$stream_length) ## momentary time recording #### MTS_single <- function(b_stream, moments) { obs_pos <- findInterval(moments, vec=b_stream$b_stream) (obs_pos %% 2) != b_stream$start_state } #' @title Applies momentary time recording to a behavior stream #' #' @description Evaluates the presence or absence of the behavior at fixed moments in time. #' #' @param BS object of class behavior_stream #' @param interval_length length of interval between moments. #' @param summarize logical value indicating whether vector of moments should be summarized by taking their mean. #' #' @export #' #' @return If \code{summarize = FALSE}, a matrix with length \code{n_intervals + 1} and width equal to the number #' of behavior streams in \code{BS}. If \code{summarize = TRUE}, a vector of proportions of length equal to the #' number of behavior streams in \code{BS}. Note that if \code{summarize = TRUE}, the initial state of the #' behavior stream is excluded when calculating the mean, so the proportion is based on \code{n_intervals} values. #' #' @examples #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' momentary_time_recording(BS, interval_length = 20, FALSE) #' momentary_time_recording(BS, interval_length = 20) #' colMeans(momentary_time_recording(BS, 20, FALSE)[-1,]) momentary_time_recording <- function(BS, interval_length, summarize = TRUE) { moments <- seq(interval_length * summarize, BS$stream_length, interval_length) MTS <- sapply(BS$b_streams, MTS_single, moments = moments) if (summarize) colMeans(MTS) else MTS } ## event counting #### #' @title Applies event counting to a behavior stream #' #' @description #' Calculates the number of behaviors that begin during the observation session. #' #' @param BS object of class \code{behavior_stream} #' #' @export #' #' @return Vector of non-negative integers. #' #' @examples #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' event_counting(BS) event_counting <- function(BS) sapply(BS$b_streams, function(x) floor((length(x$b_stream) + 1 - x$start_state) / 2)) ## interval recording #### IntRec_single <- function(b_stream, start_time, end_time, partial = TRUE) { start_event <- findInterval(start_time, vec=b_stream$b_stream) end_event <- findInterval(end_time, vec=b_stream$b_stream) WIR <- (start_event == end_event) * ((start_event + 1 - partial) %% 2 == b_stream$start_state) if (partial) 1 - WIR else WIR } #' @title Applies interval recording to a behavior stream #' #' @description Divides the observation session into a specified number of intervals. For partial interval recording, #' each interval is scored according to whether the behavior is present at any point during the interval. For whole #' interval recording, each interval is scored according to whether the behavior is present for the duration. #' #' @param BS object of class behavior_stream #' @param interval_length time length of each interval. #' @param rest_length portion of each interval to exclude from observation. Default is zero. See details. #' @param partial logical value indicating whether to use partial interval recording (\code{TRUE}) or #' whole interval recording (\code{FALSE}). #' @param summarize logical value indicating whether vector of moments should be summarized by taking their mean. #' #' @details #' Each behavior stream is divided into intervals of length \code{interval_length}. #' The last \code{rest_length} of each interval is excluded from observation. #' For example, for a stream length of 100, \code{interval_length = 20}, and #' \code{rest_length = 5}, the first interval runs from [0,15), the second interval runs from [20,35), etc. #' #' @export #' #' @return If \code{summarize = FALSE}, a matrix with rows equal to the number of intervals per session and #' columns equal to the number of behavior streams in \code{BS}. #' If \code{summarize = TRUE}, a vector of proportions of length equal to the #' number of behavior streams in \code{BS}. #' #' @examples #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' interval_recording(BS, interval_length = 20, partial = TRUE, summarize = FALSE) #' interval_recording(BS, interval_length = 20, partial = TRUE, summarize = TRUE) #' colMeans(interval_recording(BS, 20, partial = TRUE, summarize = FALSE)) #' interval_recording(BS, interval_length = 20, rest_length = 5, partial = FALSE) interval_recording <- function(BS, interval_length, rest_length = 0, partial = TRUE, summarize = TRUE) { n_intervals <- floor(BS$stream_length / interval_length) start_time <- interval_length * (0:(n_intervals - 1)) end_time <- start_time + interval_length - rest_length IR <- sapply(BS$b_streams, IntRec_single, start_time = start_time, end_time = end_time, partial = partial) if (summarize) colMeans(IR) else IR } ## Process behavior stream using multiple procedures #### #' @title Applies multiple recording procedures to a behavior stream #' #' @description This is a convenience function that allows multiple recording procedures to be applied #' to a single behavior stream. Results are reported either per behavior stream or as summary statistics, averaged #' over multiple behavior streams. #' #' @param BS object of class behavior_stream #' @param data_types list of recording procedures to apply to the behavior stream. See details. #' @param interval_length time length of each interval used to score momentary time recording and interval recording procedures. #' @param rest_length portion of each interval to exclude from observation for interval recording. #' See documentation for \code{\link{interval_recording}}. #' @param n_aggregate number of observations over which to calculate summary statistics. #' #' @details #' The following recording procedures are currently implemented #' \itemize{ #' \item \code{C} - continuous duration recording #' \item \code{M} - momentary time recording #' \item \code{E} - event counting #' \item \code{P} - partial interval recording #' \item \code{W} - whole interval recording #' } #' #' @export #' #' @return If \code{n_aggregate = 1}, a data frame with one column per procedure listed in \code{data_types} and length equal to the number #' of behavior streams in \code{BS}. If \code{n_aggregate > 1}, a list containing two data frames: one with sample means #' and one with sample variances, both taken across \code{n_aggregate} behavior streams. #' #' @examples #' BS <- r_behavior_stream(n = 50, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' reported_observations(BS, interval_length = 10) #' reported_observations(BS, interval_length = 10, n_aggregate = 5) reported_observations <- function(BS, data_types = c("C","M","E","P","W"), interval_length = 1, rest_length = 0, n_aggregate = 1) { n <- length(BS$b_streams) recorded <- data.frame(matrix(NA, n, length(data_types))) names(recorded) <- data_types if ("C" %in% data_types) recorded$C <- continuous_duration_recording(BS) if ("M" %in% data_types) recorded$M <- momentary_time_recording(BS, interval_length) if ("E" %in% data_types) recorded$E <- event_counting(BS) if ("P" %in% data_types) recorded$P <- interval_recording(BS, interval_length, rest_length, partial = TRUE) if ("W" %in% data_types) recorded$W <- interval_recording(BS, interval_length, rest_length, partial = FALSE) if (n_aggregate == 1) return(recorded) else { groups <- list(rep(1:(n / n_aggregate), each = n_aggregate)) recorded_mean <- stats::aggregate(recorded, groups, mean) recorded_var <- stats::aggregate(recorded, groups, stats::var) return(list(mean = recorded_mean[,-1], var = recorded_var[,-1])) } } ## augmented interval recording #### augmented_recording_single <- function(b_stream, moments, start_time, end_time) { MTS <- MTS_single(b_stream, moments) PIR <- IntRec_single(b_stream, start_time, end_time, partial = TRUE) WIR <- IntRec_single(b_stream, start_time, end_time, partial = FALSE) cbind(MTS = MTS, PIR = c(NA, PIR), WIR = c(NA, WIR)) } #' @title Applies augmented interval recording to a behavior stream #' #' @description Divides the observation session into intervals. #' Each interval is scored using partial interval recording, whole interval recording, and #' momentary time sampling (at the beginning of the following interval). #' #' @param BS object of class behavior_stream #' @param interval_length time length of each interval. #' @param rest_length portion of each interval to exclude from observation. Default is zero. See details. #' #' @details #' Each behavior stream is divided into intervals of length \code{interval_length}. #' The last \code{rest_length} of each interval is excluded from observation. #' For example, for a stream length of 100, \code{interval_length = 20}, and #' \code{rest_length = 5}, the first interval runs from [0,15), the second interval runs from [20,35), etc. #' #' @export #' #' @return A matrix with rows equal to the number of intervals per session and #' columns equal to the number of behavior streams in \code{BS}. #' #' @examples #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) #' augmented_recording(BS, interval_length = 20) augmented_recording <- function(BS, interval_length, rest_length = 0) { moments <- seq(0, BS$stream_length, interval_length) n_intervals <- floor(BS$stream_length / interval_length) start_time <- interval_length * (0:(n_intervals - 1)) end_time <- start_time + interval_length - rest_length AIR_dat <- sapply(BS$b_streams, augmented_recording_single, moments = moments, start_time = start_time, end_time = end_time) replicates <- length(BS$b_streams) array(AIR_dat, dim = c(n_intervals + 1, 3, replicates), dimnames = list(interval = 0:n_intervals, proc = c("MTS","PIR","WIR"), rep = 1:replicates)) } # ## intermittent transition recording #### # # #' @title Applies intermittent transition recording to a behavior stream # #' # #' @description Divides the observation session into intervals. # #' # #' @param BS object of class behavior_stream # #' @param interval_length time length of each interval. # #' @param rest_length portion of each interval to exclude from observation. Default is zero. See details. # #' # #' @details # #' Each behavior stream is divided into intervals of length \code{interval_length}. # #' The last \code{rest_length} of each interval is excluded from observation. # #' For example, for a stream length of 100, \code{interval_length = 20}, and # #' \code{rest_length = 5}, the first interval runs from [0,15), the second interval runs from [20,35), etc. # #' # #' @export # #' # #' @return A matrix with length \code{n_intervals} and width equal to the number # #' of behavior streams in \code{BS}. # #' # #' @examples # #' BS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, # #' F_event = F_exp(), F_interim = F_exp(), stream_length = 100) # #' augmented_recording(BS, 30) # # intermittent_transition_recording <- function(BS, n_intervals, rest_proportion = 0) { # MTS <- momentary_time_recording(BS, n_intervals, summarize = FALSE) # }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/observation_method_filters.R
#-------------------------------------------- # Plot method for behavior_stream objects #-------------------------------------------- get_segments <- function(bs, stream_length) { transitions <- c(0, bs$b_stream, stream_length) s <- trunc((length(bs$b_stream) + bs$start_state + 1) / 2) start_time <- transitions[2 * (1:s) - bs$start_state] end_time <- transitions[2 * (1:s) - bs$start_state + 1] data.frame(stream = bs$index, start_time, end_time) } #' @title Plot method for \code{behavior_stream} objects #' #' @description Creates a graphical representation of a set of simulated #' behavior streams. #' #' @param x object of class \code{behavior_stream} #' @param session_color character string indicating the color of the lines that #' represent session time. Default is black. #' @param episode_color character string indicating the color of the bars that #' represent episode durations. Default is blue. #' @param episode_thickness numeric value indicating the thickness of the bars #' that represent episode durations. Default is 2. #' @param ... Further arguments, not used for this method. #' #' @details The plot is created using \code{ggplot} from the ggplot2 package, #' which must be installed. #' @export #' #' @return An object of class \code{ggplot}. #' #' @examples #' #' if (requireNamespace("ggplot2", quietly = TRUE)) { #' b_streams <- r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), #' stream_length = 100) #' plot(b_streams) #' } #' plot.behavior_stream <- function(x, session_color = "black", episode_color = "blue", episode_thickness = 2, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("This function needs the ggplot2 package to work. Please install it.", call. = FALSE) } streams <- length(x$b_streams) stream_dat <- data.frame(stream = 1:streams, start_time = 0, end_time = x$stream_length) for (i in 1:streams) x$b_streams[[i]]$index <- i segment_dat <- lapply(x$b_streams, get_segments, stream_length = x$stream_length) segment_dat <- do.call(rbind, segment_dat) ggplot2::ggplot(stream_dat, ggplot2::aes_string(x = "start_time", xend = "end_time", y = "stream", yend = "stream")) + ggplot2::geom_segment(color = session_color) + ggplot2::scale_y_discrete(breaks = 1:streams) + ggplot2::coord_cartesian(xlim = c(0, x$stream_length)) + ggplot2::geom_segment(data = segment_dat, color = episode_color, size = episode_thickness) + ggplot2::labs(x = "session time", y = "stream") + ggplot2::theme_minimal() }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/plot_methods.R
## generate single behavior stream #### r_behavior_stream_single <- function(mu, lambda, F_event, F_interim, stream_length, equilibrium, p0, tuning) { # initial condition start_state <- stats::rbinom(1, 1, p0) # draw initial time if (equilibrium) { if (start_state) { b_stream <- F_event$r_eq(1, mu) } else { b_stream <- F_interim$r_eq(1, lambda) } } else { if (start_state) { b_stream <- F_event$r_gen(1, mu) } else { b_stream <- F_interim$r_gen(1, lambda) } } cum_length <- b_stream cum_size <- 1 # add event durations and interim times until total length exceeds stream length while (cum_length < stream_length) { # generate random event durations and interim times extend_size <- ceiling(tuning * (stream_length - cum_length) / (mu + lambda)) event_times <- F_event$r_gen(n=extend_size, mean = mu) interim_times <- F_interim$r_gen(n=extend_size, mean = lambda) # lengthen behavior stream vector b_stream <- append(b_stream, cum_length + cumsum( if (start_state) c(rbind(interim_times, event_times)) else c(rbind(event_times, interim_times)))) # update totals cum_size <- cum_size + 2 * extend_size cum_length <- b_stream[cum_size] } list(start_state=start_state, b_stream = b_stream[b_stream < stream_length]) } ## generate behavior streams #### #' @title Generates random behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution. #' #' @param n number of behavior streams to generate #' @param mu vector of mean event durations #' @param lambda vector of mean interim time #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param stream_length length of behavior stream #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 vector of initial state probabilities. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. The vectors \code{mu}, \code{lambda}, and \code{p0} are #' recycled to length \code{n}. #' @export #' #' @return An object of class \code{behavior_stream} containing two elements. #' #' @examples #' # default equilibrium initial conditions #' r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_exp(), F_interim = F_exp(), #' stream_length = 100) #' #' # non-equilibrium initial conditions #' r_behavior_stream(n = 5, mu = 3, lambda = 10, #' F_event = F_gam(3), F_interim = F_gam(3), #' stream_length = 100, #' equilibrium = FALSE, p0 = 0.5) r_behavior_stream <- function(n, mu, lambda, F_event, F_interim, stream_length, equilibrium = TRUE, p0 = 0, tuning = 2) { mu_vec <- rep(mu, length.out = n) lambda_vec <- rep(lambda, length.out = n) p0_vec <- if (equilibrium) mu_vec / (mu_vec + lambda_vec) else rep(p0, length.out = n) BS <- list(stream_length = stream_length, b_streams = mapply(r_behavior_stream_single, mu = mu_vec, lambda = lambda_vec, p0 = p0_vec, MoreArgs = list(F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, tuning = tuning), SIMPLIFY = FALSE)) class(BS) <- "behavior_stream" return(BS) } #' @title Generates random partial interval recording behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' which are then coded as partial interval recording data with given interval length #' and rest length. #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param interval_length total interval length #' @param rest_length length of any recording time in each interval #' @param summarize logical value indicating whether the behavior streams should by summarized by taking their mean #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies a partial interval recording filter to the generated behavior streams. #' #' @return If \code{summarize = FALSE}, a matrix with rows equal to \code{n} and a number of columns equal to the number intervals per session. If \code{summarize = TRUE} a vector of means of length \code{n}. #' @export #' #' @examples #' #' # An unsummarized set of PIR observations #' r_PIR(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, rest_length = 0) #' #' # A summarized set of of PIR observations #' r_PIR(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, rest_length = 0, #' summarize = TRUE) #' #' @author Daniel Swan <dswan@@utexas.edu> r_PIR <- function(n, mu, lambda, stream_length, F_event, F_interim, interval_length, rest_length = 0, summarize = FALSE, equilibrium = TRUE, p0 = 0, tuning = 2){ if (equilibrium) p0 <- mu / (mu + lambda) n_intervals <- floor(stream_length / interval_length) start_time <- interval_length * (0:(n_intervals - 1)) end_time <- start_time + interval_length - rest_length samples <- replicate(n, { BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) IntRec_single(b_stream = BS, start_time = start_time, end_time = end_time) }) if (summarize) colMeans(samples) else t(samples) } #' @title Generates random whole interval recording behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' which are then coded as whole interval recording data with given interval length #' and rest length. #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param interval_length total interval length #' @param rest_length length of any recording time in each interval #' @param summarize logical value indicating whether the behavior streams should by summarized by taking their mean #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies a whole interval recording filter to the generated behavior streams. #' @return If \code{summarize = FALSE}, a matrix with rows equal to \code{n} and a number of columns equal to the number intervals per session. If \code{summarize = TRUE} a vector of means of length \code{n}. #' @export #' #' @examples #' #' # An unsummarized set of WIR observations #' r_WIR(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, rest_length = 0) #' #' # A summarized set of of WIR observations #' r_WIR(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, rest_length = 0, #' summarize = TRUE) #' #' @author Daniel Swan <dswan@@utexas.edu> r_WIR <- function(n, mu, lambda, stream_length, F_event, F_interim, interval_length, rest_length = 0, summarize = FALSE, equilibrium = TRUE, p0 = 0, tuning = 2){ if (equilibrium) p0 <- mu / (mu + lambda) n_intervals <- floor(stream_length / interval_length) start_time <- interval_length * (0:(n_intervals - 1)) end_time <- start_time + interval_length - rest_length samples <- replicate(n, { BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) IntRec_single(b_stream = BS, start_time = start_time, end_time = end_time, partial = FALSE) }) if (summarize) colMeans(samples) else t(samples) } #' @title Generates random momentary time sampling behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' which are then coded as momentary time sampling data with given interval length #' between moments. #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param interval_length length of time between moments #' @param summarize logical value indicating whether the vector of moments should be summarized by taking their mean, excluding the first moment in each row. #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time. #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies a momentary time sampling filter to the generated behavior streams. #' #' @return If \code{summarize = FALSE}, a matrix of logicals with rows equal to \code{n} and length equal to \code{(stream_length/interval_length) + 1}. If \code{summarize = TRUE}, a vector of means of length \code{n}. #' @export #' #' @examples #' #' # A set of unsummarized MTS observations #' r_MTS(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), interval_length = 1) #' #' # A set of summarized MTS observations #' r_MTS(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, summarize = TRUE) #' #' @author Daniel Swan <dswan@@utexas.edu> r_MTS <- function(n, mu, lambda, stream_length, F_event, F_interim, interval_length, summarize = FALSE, equilibrium = TRUE, p0 = 0, tuning = 2) { if (equilibrium) p0 <- mu / (mu + lambda) moments <- seq(interval_length * summarize, stream_length, interval_length) samples <- replicate(n, { BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) MTS_single(b_stream = BS, moments = moments) }) if(summarize) colMeans(samples) else t(samples) } #' @title Generates random samples of continuously recorded behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' summarized as the total proportion of time the behavior of interest occurred. #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies a continuous recording filter to the generated behavior streams. #' #' @return A vector of proportions of length \code{n}. #' @export #' #' @examples #' #' r_continuous_recording(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp()) #' #' @author Daniel Swan <dswan@@utexas.edu> r_continuous_recording <- function(n, mu, lambda, stream_length, F_event, F_interim, equilibrium = TRUE, p0 = 0, tuning = 2) { if (equilibrium) p0 <- mu / (mu + lambda) samples <- replicate(n, { BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) CDR_single(b_stream = BS, stream_length = stream_length) }) samples } #' @title Generates random samples of event counts #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' summarized as the the total number of behaviors that began during the recording #' session #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies an event counting filter to the generated behavior streams. #' #' @return A vector of behavior counts of length \code{n}. #' @export #' #' @examples #' #' r_event_counting(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp()) #' #' @author Daniel Swan <dswan@@utexas.edu> r_event_counting <- function(n, mu, lambda, stream_length, F_event, F_interim, equilibrium = TRUE, p0 = 0, tuning = 2) { if (equilibrium) p0 <- mu / (mu + lambda) samples <- replicate(n,{ BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) floor((length(BS$b_stream) + 1 - BS$start_state)/2) }) samples } #' @title Generates random augmented interval recording behavior streams #' #' @description #' Random generation of behavior streams (based on an alternating #' renewal process) of a specified length and with specified mean event #' durations, mean interim times, event distribution, and interim distribution, #' which are then coded as augmented interval recording data with given interval length #' and rest length. #' #' @param n number of behavior streams to generate #' @param mu mean event duration #' @param lambda mean interim time #' @param stream_length length of behavior stream #' @param F_event distribution of event durations. Must be of class \code{\link{eq_dist}}. #' @param F_interim distribution of interim times. Must be of class \code{\link{eq_dist}}. #' @param interval_length total interval length #' @param rest_length length of any recording time in each interval #' @param equilibrium logical; if \code{TRUE}, then equilibrium initial conditions are used; #' if \code{FALSE}, then \code{p0} is used to determine initial state and normal generating #' distributions are used for event durations and interim times. #' @param p0 Initial state probability. Only used if \code{equilibrium = FALSE}, in which case #' default is zero (i.e., behavior stream always starts with an interim time). #' @param tuning controls the size of the chunk of random event durations and interim times. #' Adjusting this may be useful in order to speed computation time . #' #' @details Generates behavior streams by repeatedly drawing random event durations and #' random interim times from the distributions as specified, until the sum of the durations and interim #' times exceeds the requested stream length. Then applies an augmented interval recording filter #' to the generated behavior streams. #' #' @return An array with rows corresponding the number of intervals per session, #' columns corresponding to MTS, PIR, and WIR records, all replicated n times. #' \code{n} times. #' @export #' #' @examples #' #' r_AIR(n = 5, mu = 2, lambda = 4, stream_length = 20, #' F_event = F_exp(), F_interim = F_exp(), #' interval_length = 1, rest_length = 0) #' #' @author James Pustejovsky <jepusto@@gmail.com> r_AIR <- function(n, mu, lambda, stream_length, F_event, F_interim, interval_length, rest_length = 0, equilibrium = TRUE, p0 = 0, tuning = 2) { if (equilibrium) p0 <- mu / (mu + lambda) moments <- seq(0, stream_length, interval_length) n_intervals <- floor(stream_length / interval_length) start_time <- interval_length * (0:(n_intervals - 1)) end_time <- start_time + interval_length - rest_length samples <- replicate(n, { BS <- r_behavior_stream_single(mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = stream_length, equilibrium = equilibrium, p0 = p0, tuning = tuning) augmented_recording_single(b_stream = BS, moments = moments, start_time = start_time, end_time = end_time) }) samples }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/simulate_behavior_stream.R
################################## ## models for latent parameters ## ################################## # Generate AR(1) variates r_AR1 <- function(iterations, series_length, rho, sigma_sq) matrix(stats::rnorm(iterations * series_length), iterations, series_length) %*% chol(sigma_sq * rho^as.matrix(stats::dist(1:series_length))) # solve for conditional prevalence logit <- function(x) log(x) - log(1 - x) expit <- function(x) 1 / (1 + exp(-x)) E_logitnorm <- function(eta_star, sigma_sq) sapply(eta_star, function(eta_star) stats::integrate(function(v) expit(eta_star + v) * exp(-v^2 / (2 * sigma_sq)) / sqrt(2 * pi * sigma_sq), lower = -Inf, upper = Inf)$value) eta_star_phi <- function(marginal_mean, sigma_sq, interval = c(-1000,1000)) { n <- max(length(marginal_mean), length(sigma_sq)) marginal_mean <- rep(marginal_mean, length.out = n) sigma_sq <- rep(sigma_sq, length.out = n) mapply(function(marginal_mean, sigma_sq) stats::uniroot(function(x) E_logitnorm(x, sigma_sq) - marginal_mean, interval = interval)$root, marginal_mean = marginal_mean, sigma_sq = sigma_sq) } # generate random, dependent phi values r_phi_star <- function(iterations, series_length, phi_marg, rho, sigma_sq) { eta_cond <- rep(eta_star_phi(phi_marg, sigma_sq), length.out = series_length) nu <- t(r_AR1(iterations, series_length, rho, sigma_sq)) expit(eta_cond + nu) } # generate random, dependent zeta values r_zeta_star <- function(iterations, series_length, zeta_marg, rho, sigma_sq) { nu <- t(r_AR1(iterations, series_length, rho, sigma_sq)) - sigma_sq / 2 zeta_marg * exp(nu) } # smooth covariance matrix smooth_cov <- function(V) { n <- dim(V)[1] smooth_cov <- sapply(1:n, function(x) ifelse(x < n, mean(diag(V[x:n,1:(n-x+1)])), V[x,1])) matrix(smooth_cov[as.matrix(stats::dist(1:n, diag=TRUE)) + 1], n, n) / smooth_cov[1] }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/R/simulate_latent_parameters.R
set.seed(10) # set parameters zeta <- 1 / 60 iterations <- 5000 phi <- seq(0.01, 0.99, 0.01) gen_dist <- c("Exponential-Exponential","Gamma(3)-Gamma(3)","Constant-Gamma(3)") dist_lookup <- list("Exponential-Exponential" = list(F_exp(),F_exp()), "Gamma(3)-Gamma(3)" = list(F_gam(3), F_gam(3)), "Constant-Gamma(3)" = list(F_const(), F_gam(3))) # create parameter combinations parms <- expand.grid(gen_dist = gen_dist, phi = phi) # function for simulating mean and variance of continuous recording data CDR_moments <- function(phi, zeta, gen_dist, iterations, stream_length) { BS <- r_behavior_stream(n = iterations, mu = phi / zeta, lambda = (1 - phi) / zeta, F_event = dist_lookup[[gen_dist]][[1]], F_interim = dist_lookup[[gen_dist]][[2]], stream_length = stream_length) CDR <- continuous_duration_recording(BS) c(mean=mean(CDR), var=var(CDR)) } # apply function to each combination of parameter values library(plyr) CDR_var <- mdply(parms, CDR_moments, zeta = zeta, iterations = iterations, stream_length = 600) # plot results library(ggplot2) qplot(x = phi, y = var, color = gen_dist, linetype = gen_dist, data = CDR_var, geom = "point", alpha = I(0.5), size = I(1.5), xlab = "Prevalence", ylab = expression(Var(Y^C))) + geom_smooth(method = "lm", formula = y ~ I(x^2 * (1 - x)^2) + 0, se = FALSE) + labs(linetype="Generating distributions", color="Generating distributions") + theme_bw()
/scratch/gouwar.j/cran-all/cranData/ARPobservation/demo/CDR_reliability.R
set.seed(10) # set parameters zeta <- 1 / 60 iterations <- 100 phi <- rep(seq(0.01, 0.99, 0.01), each = iterations) mu <- phi / zeta lambda <- (1 - phi) / zeta # generate behavior streams, apply continuous recording and MTS BS <- r_behavior_stream(n = length(phi), mu = mu, lambda = lambda, F_event = F_exp(), F_interim = F_exp(), stream_length = 600) obs <- reported_observations(BS, c("C","M"), interval_length = 15) # plot results library(ggplot2) qplot(x = phi, y = M, data = obs, geom = "point", alpha = I(0.1), xlab = "Prevalence", ylab = "Momentary time sampling") + geom_smooth(method = "loess", se = FALSE, color = "red") + theme_bw() qplot(x = C, y = M, data = obs, geom = "point", alpha = I(0.1), xlab = "Continuous recording", ylab = "Momentary time sampling") + geom_smooth(method = "loess", se = FALSE, color = "red") + theme_bw()
/scratch/gouwar.j/cran-all/cranData/ARPobservation/demo/MTS_measurands.R
set.seed(10) # set parameters stream_length <- 60 iterations <- 1000 phi <- 0.1 zeta <- seq(1/100, 1/2, length.out = 100) interim_dist <- c("Weibull(3)", "Gamma(3)", "Uniform", "Exponential", "Gamma(1/2)", "Weibull(1/2)") dist_lookup <- list("Weibull(3)" = F_weib(3), "Gamma(3)" = F_gam(3), "Uniform" = F_unif(), "Exponential" = F_exp(), "Gamma(1/2)" = F_gam(1/2), "Weibull(1/2)" = F_weib(1/2)) # create parameter combinations parms <- expand.grid(interim_dist = interim_dist, zeta = zeta) # function for simulating mean of PIR data PIR_mean <- function(phi, zeta, interim_dist, iterations, stream_length) { BS <- r_behavior_stream(n = iterations, mu = phi / zeta, lambda = (1 - phi) / zeta, F_event = F_const(), F_interim = dist_lookup[[interim_dist]], stream_length = stream_length) PIR <- interval_recording(BS, interval_length = 1) c(PIR_mean = mean(PIR)) } # apply function to each combination of parameter values library(plyr) PIR_sim <- mdply(parms, PIR_mean, phi=phi, iterations = iterations, stream_length = stream_length) # plot results library(ggplot2) qplot(x = zeta, y = PIR_mean, color = interim_dist, linetype = interim_dist, data = PIR_sim, geom = "smooth", method = "loess", se = FALSE, xlab = "Incidence (per interval)", ylab = expression(E(Y^P))) + labs(linetype="Interim time distribution", color="Interim time distribution") + theme_bw()
/scratch/gouwar.j/cran-all/cranData/ARPobservation/demo/PIR_bias.R
set.seed(123) # set parameters stream_length <- c(5,10) sessions <- 4 phi <- 0.25 zeta <- 1 / 60 prev_ratio <- 0.4 replicates <- 5 # create parameter combinations # function for simulating ABAB data ABAB <- function(phi, zeta, prev_ratio, stream_length, sessions, replicates) { Condition <- rep(rep(c("Baseline","NCR"), times = 2), each = sessions) Phase <- rep(c("A1","B1","A2","B2"), each = sessions) phi_vec <- rep(phi * rep(c(1, prev_ratio), times = 2), each = sessions) zeta_vec <- rep(zeta, 4 * sessions) BS <- r_behavior_stream(n = 4 * sessions * replicates, mu = phi_vec / zeta_vec, lambda = (1 - phi_vec) / zeta_vec, F_event = F_exp(), F_interim = F_exp(), stream_length = 60 * stream_length) CDR <- continuous_duration_recording(BS) MTS <- momentary_time_recording(BS, interval_length = 20) repnr <- rep(1:replicates, each = 4 * sessions) data.frame(repnr, session = 1:(4 * sessions), Phase, Condition, CDR, MTS) } # apply function to each combination of parameter values library(plyr) ABAB_sim <- mdply(data.frame(stream_length), ABAB, phi=phi, zeta = zeta, prev_ratio = prev_ratio, replicates = replicates, sessions = sessions) # plot results library(reshape2) ABAB_sim <- melt(ABAB_sim, measure.vars = c("CDR","MTS")) ABAB_sim <- within(ABAB_sim,{ session_length <- ordered(paste(stream_length, "m"), levels = c("5 m","10 m")) system <- factor(variable, labels = c("Continuous recording", "20 s MTS")) replication <- paste("Replication", repnr) }) library(ggplot2) qplot(x = session, y = value, color = Condition, linetype = Condition, group = Phase, data = ABAB_sim, facets = replication ~ session_length + system, geom = c("point","line"), xlab = "Session", ylab = "% Duration") + labs(color="Phase", linetype = "Phase") + theme_bw()
/scratch/gouwar.j/cran-all/cranData/ARPobservation/demo/study_planning.R
--- title: "Direct observation recording: Algorithms used in `ARPobservation`" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: number_sections: false vignette: > %\VignetteIndexEntry{Algorithms for direct observation recording} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette describes the algorithms used in `ARPobservation` to simulate behavior streams and direct observation recording data based on an alternating renewal process. The ARP is a statistical model that can be used to describe the characteristics of simple behavior streams, in which a behavior of interest is either occurring or not occurring at a given point in time. We will refer to the length of individual episodes of behavior as _event durations_ and the lengths of time between episodes of behavior as _interim times_. In the ARP framework, variability is introduced into the behavior stream by treating each individual event duration and each interim time as a random quantity, drawn from some probability distribution. The sequence of events comprising the behavior stream can be described as follows. Let $L$ denote the length of the observation session. Let $A_1$ denote the duration of the first behavioral event observed, $A_2$ denote the duration of the second event, and $A_u$ the duration of event $u$ for $u = 3,4,5,...$. Let $B_0$ denote the length of time from the beginning of the observation session until the first behavioral event, with $B_0 = 0$ if event 1 is occurring at the beginning of the session. Let $B_u$ denote the $u^{th}$ interim time, meaning the length of time between the end of event $u$ and the beginning of event $u + 1$, for $u = 1,2,3,...$. The values $B_0,A_1,B_1,A_2,B_2,A_3,B_3,...$ provide a quantitative description of the behavior stream from an observation session. Note that these quantities are measured in time units, such as seconds. # Simulating the behavior stream ## Generating distributions The `ARPobservation` package generates behavior streams that follow an alternating renewal process with specified generating distributions. The package provides two approaches to generating the initial interim time and initial event duration, which we explain further below. Subsequent event durations $A_2,A_3,A_4,...$ are generated independently, following a specified probability distribution with mean $\mu$ and cumulative distribution function (cdf) $F(t; \mu)$. Subsequent interim times $B_1,B_2,B_3,...$ are generated independently, following a specified probability distribution with mean $\lambda$ and cdf $G(t; \lambda)$. The package currently includes functions for exponential distributions, gamma distributions, mixtures of two gamma distributions, Weibull distributions, uniform distributions, and constant values. Each distribution is implemented as an object of class `eq_dist`, which provides functions for generating random deviates from the specified distribution and the corresponding equilibrium distribution. For distributions involving more than a single parameter, all parameters except for the mean must be specified. ## Initial conditions `ARPobservation` provides two approaches to generating the initial interim time and initial event duration. The first approach involves the following steps: 1. Generate a random number $X$ from a Bernoulli distribution with user-specified probability $p_0$. 2. If $X = 0$, then generate $B_0$ from the same distribution as subsequent interim times, i.e., from $G(t; \lambda)$. If $X = 1$, then set $B_0 = 0$. 3. Generate $A_1$ from the same distribution as subsequent event durations, i.e., from $F(t; \mu)$. If $p_0$ is not specified by the user, the default value of $p_0 = 0$ is used, so that behavior streams always begin with an interim time. This approach produces behavior streams that are initially out of equilibrium. The other approach uses initial conditions chosen so that the resulting process is in equilibrium. This involves the following steps: 1. Generate a random number $X$ from a Bernoulli distribution with probability $\mu / (\mu + \lambda)$. 2. If $X = 0$, then generate $B_0$ from the distribution with cdf \[ \tilde{G}(t; \lambda) = \frac{1}{\lambda} \int_0^t \left[1 - G(x; \lambda)\right] dx. \] If $X = 1$, then set $B_0 = 0$. 3. If $X = 0$, then generate $A_1$ from $F(t; \mu)$. If $X = 1$, then generate $A_1$ from the distribution with cdf \[ \tilde{F}(t; \mu) = \frac{1}{\mu} \int_0^t \left[1 - F(x; \mu) \right] dx. \] # Direct observation recording procedures The package provides several algorithms that simulate commonly used direct observation recording procedures. Each algorithm takes as input a randomly generated behavior stream and produces as output a summary measurement (or measurements) from a direct observation procedure. ## Event counting Event counting produces a measurement $Y^E$ equal to the number of events that begin during the observation session. Let $J$ denote the number of last behavioral event that begins during the observation session, which can be calculated by finding the integer that satisfies the inequalities $$ \sum_{j=0}^{J-1} \left(A_j + B_j \right) \leq L < \sum_{j=0}^{J} \left(A_j + B_j \right), $$ where we define $A_0 = 0$ for notational convenience. It follows that $Y^E = J$. ## Continuous recording Continuous recording produces a measurement $Y^C$ equal to the proportion of the observation session during which the behavior occurs. In order to calculate this quantity from the behavior stream, we must account for the possibility that the last event beginning during the observation session may have a duration that extends beyond when the session ends. The measurement based on continuous recording can be calculated as $$ Y^C = \begin{cases} \frac{1}{L} \sum_{j=1}^J A_j & \text{if}\quad \sum_{j=1}^{J} \left(B_{j-1} + A_j\right) \leq L \\ 1 - \frac{1}{L} \sum_{j=0}^{J-1} B_j & \text{if}\quad \sum_{j=1}^{J} \left(B_{j-1} + A_j\right) > L \end{cases} $$ ## Momentary time sampling In momentary time sampling, an observer divides the observation session into $K$ intervals of equal length and notes whether the behavior is present or absent at the very end of each interval. The summary measurement $Y^M$ then corresponds to the proportion of moments during which the behavior is observed. Let $X_k = 1$ if the behavior is occurring at the end of interval $k$ for $k = 1,...,K$. The value of $X_k$ can be calculated from the behavior stream as follows. Let $I(X)$ denote the indicator function, equal to one if condition $X$ is true and zero otherwise. Let $m_k$ be the number of the last event that ends before the $k^{th}$ interval ends, defined formally as $$ m_k = \sum_{i=1}^J I\left[\sum_{j=1}^i \left(B_{j-1} + A_j\right) < k L \right] $$ for $k = 1,...,K$. If interim time $B_{m_k}$ concludes before the end of interval $k$ (or equivalently, if event $A_{m_k+1}$ begins before the end of interval $k$), then $X_k = 1$; formally, $$ X_k = I\left[\sum_{j=0}^{m_k} \left(A_j + B_j\right) < k L \right] $$ for $k = 1,...,K$. The summary measurement is then calculated as $\displaystyle{Y^M = \sum_{k=1}^K X_k / K}$. ## Partial interval recording Like momentary time sampling, partial interval recording is also based on a set of $K$ intervals of equal length, but a different rule is used to score each interval. In partial interval recording, the observer counts a behavior as present if it occurs at any point during the first $c$ time units of the interval, where $c \leq L / K$; the remaining $L / K - c$ time units are used to record notes or rest. Let $U_k = 1$ if the behavior occurs at any point during the $k^{th}$ interval, $U_k = 0$ otherwise. The $k^{th}$ interval will be equal to one if and only if interim time $m_{k-1}$ ends during the active part of the interval. Noting that interim time $m_{k-1}$ ends at time $\sum_{j=0}^{m_{k-1}} \left(A_j + B_j\right)$ and that the active part of the $k^{th}$ interval ends at time $(k-1)L + c$, it can be seen that $$ U_k = I \left[\sum_{j=0}^{m_{k-1}} \left(A_j + B_j\right) < (k-1)L + c \right], $$ for $k=1,...,K$. The summary measurement $Y^P$ is then calculated as the proportion of intervals during which the behavior is observed at any point: $\displaystyle{Y^P = \sum_{k=1}^K U_k / K}$. ## Whole interval recording Whole interval recording is similar to partial interval recording but uses yet a different rule to score each interval. Specifically, the observer counts a behavior as present only if it occurs for all $c$ time units at the beginning of the interval. Let $W_k = 1$ if the behavior occurs for the duration, with $W_k = 0$ otherwise. Let $n_k$ be the number of the last event that begins before the $k^{th}$ interval begins, defined formally as $$ n_k = \sum_{i=1}^J I\left[\sum_{j=0}^i \left(A_j + B_j\right) < (k - 1) L \right] $$ for $k = 1,...,K$. It follows that $W_k$ will be equal to one if and only if event $n_k$ ends after the active part of interval $k$: $$ W_k = I \left[\sum_{j=1}^{n_k} \left(B_{j-1} + A_j\right) \geq (k - 1) L + c \right], $$ for $k=1,...,K$. The summary measurement $Y^W$ is then calculated as the proportion of intervals during which the behavior is observed at any point: $\displaystyle{Y^W = \sum_{k=1}^K W_k / K}$.
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/doc/Observation-algorithms.Rmd
#this unit test is "optional" because the small simulation contained within it would significantly lengthen the build time every time the package was checked library(testthat) library(ARPobservation) library(plyr) Context("An optional unit test for examining the log-likelihood estimator.") test_that("Monte Carlo estimates match the expected value of the likelihood function", { phi <- seq(.10, .50, length.out = 3) zeta <- seq(.1, .50, length.out = 3) c <- 1 d <- c(0, 1/4, 1/3) iterations <- 100000 nIntervals <- 5 set.seed(847291) parms <- expand.grid(phi = phi, zeta = zeta, c = c, d = d, nIntervals = nIntervals, iterations = iterations) generateEsts <- function(phi, zeta, c, d, nIntervals, iterations) { mu <- phi/zeta lambda <- (1-phi)/zeta intervalLength <- c + d BS <- r_behavior_stream(n = iterations, mu = mu, lambda = lambda, F_event = F_exp(), F_interim = F_exp(), stream_length = intervalLength * nIntervals) samples <- interval_recording(BS = BS, interval_length = intervalLength, rest_length = d, summarize = FALSE) samples <- as.data.frame(t(samples)) samples$One <- 1 mc_est <- ddply(samples, .variables = names(samples)[-(nIntervals + 1)], summarize, freq = sum(One)) mc_est$p <- exp(apply(mc_est[,1:nIntervals], 1, PIR_loglik, phi = phi, zeta = zeta, c = c, d = d)) return (mc_est) } mc_est <- mdply(.data = parms, .fun = generateEsts, .inform = TRUE) mc_est<- mc_est[,c(1,2,4,12,13)] chisq_wrapper <- function(subset){ chisq.test(x = subset$freq, p = subset$p)$p.value } test_values <- ddply(mc_est, .(phi, zeta, d), .fun = chisq_wrapper) expect_that(min(test_values$V1) > .01, is_true()) })
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/mc_est_test.R
#------------------------------ # Design matrix #------------------------------ get_phase_changes <- function(design, sessions_TR, phase_pattern, MB_phase_changes, cases) { if (design=="Treatment Reversal") { phase_labels <- strsplit(phase_pattern, "")[[1]] phase_changes <- sessions_TR * (1:(length(phase_labels) - 1)) } else if (design=="Multiple Baseline") { lapply(MB_phase_changes, function(x) as.numeric(strsplit(x, split = ",")[[1]])) %>% sapply(function(x) rep(x, length.out = cases)) %>% as.data.frame() %>% mutate(case = paste("Case", LETTERS[1:cases])) %>% gather("phase",session, -case) -> phase_changes } else { phase_changes <- NULL } return(phase_changes) } phase_design <- function(design, n_trt, cases, phase_pattern, sessions_TR, sessions_MB, phase_changes, n_alternations, randomize_AT, samples) { case_names <- paste("Case", LETTERS[1:cases]) sample_nr <- 1:samples if (design=="Treatment Reversal") { phase_labels <- strsplit(phase_pattern, "")[[1]] session_nr <- 1:(length(phase_labels) * sessions_TR) phase <- rep(1:length(phase_labels), each = sessions_TR) trt <- rep(phase_labels, each = sessions_TR) } else if (design=="Multiple Baseline") { session_nr <- 1:sessions_MB phase <- unlist(with(phase_changes,tapply(session, case, function(t) rep(1:(n_trt + 1), times = diff(c(0,t,sessions_MB)))))) trt <- LETTERS[phase] } else { n <- n_trt + 1 session_nr <- 1:(n * n_alternations) phase <- rep(1:n_alternations, each = n) if (randomize_AT) { trt <- as.vector(replicate(n_alternations, sample(LETTERS[1:n]))) } else { trt <- rep(LETTERS[1:(n_trt + 1)], n_alternations) } } dat <- expand.grid(session = session_nr, case = case_names, sample = sample_nr) dat$phase <- factor(phase) dat$trt <- factor(trt) return(dat) } #---------------------------- # Generate outcome data #---------------------------- impact <- function(trt, omega) { n <- length(trt) impact <- matrix(c(as.numeric(trt[1]==(levels(trt)[-1])), rep(NA, (n - 1) * (nlevels(trt) - 1))), nlevels(trt) - 1, n) for (j in 2:n) impact[,j] <- omega * impact[,j - 1] + (1 - omega) * (trt[j]==(levels(trt)[-1])) data.frame(t(impact)) } simulate_measurements <- function(dat, behavior, freq, freq_dispersion, duration, interim_time, state_dispersion, trt_effect_params, system, interval_length, session_length) { N <- nrow(dat) omega <- 1 - trt_effect_params$immediacy / 100 group_by(dat, sample, case) %>% do(impact(.$trt, omega = omega)) %>% ungroup() %>% select(-sample, -case) %>% as.matrix() -> X_impact if (behavior == "Event behavior") { mu <- rep(0, N) lambda <- 1 / (freq * (X_impact %*% trt_effect_params$freq_change / 100 + 1)) F_event <- F_const() F_interim <- F_gam(shape = 1 / freq_dispersion) } else { mu <- duration * (X_impact %*% trt_effect_params$duration_change / 100 + 1) / 60 lambda <- interim_time * (X_impact %*% trt_effect_params$interim_change / 100 + 1) / 60 F_event <- F_gam(shape = 1 / state_dispersion) F_interim <- F_gam(shape = 1 / state_dispersion) } BS <- r_behavior_stream(n = N, mu = mu, lambda = lambda, F_event = F_event, F_interim = F_interim, stream_length = session_length) # compute true trend lines if (system == "Frequency counting") { dat$truth <- session_length / lambda } else { dat$truth <- 100 * mu / (mu + lambda) } # compute observed data dat$Y <- switch(system, "Frequency counting" = event_counting(BS), "Continuous recording" = 100 * continuous_duration_recording(BS), "Momentary time sampling" = 100 * momentary_time_recording(BS, interval_length / 60), "Partial interval recording" = 100 * interval_recording(BS, interval_length / 60, partial = TRUE), "Whole interval recording" = 100 * interval_recording(BS, interval_length / 60, partial = FALSE) ) return(dat) } #---------------------------- # Create SCD graph #---------------------------- graph_SCD <- function(dat, design, phase_changes, system, showtruth) { Y_lab <- switch(system, "Frequency counting" = "Events", "Continuous recording" = "% Session time", "Momentary time sampling" = "% Moments", "Partial interval recording" = "% Partial intervals", "Whole interval recording" = "% Whole intervals" ) Y_range <- c(0, ifelse(system=="Frequency counting", max(dat$Y) + 1, 100)) samples <- max(dat$sample) if (design == "Treatment Reversal") { SCD_graph <- ggplot(dat, aes(session, Y, color = trt, shape = trt, group = interaction(phase, sample))) + geom_point(alpha = samples^(-1/4)) + geom_line(alpha = samples^(-1/2)) + geom_vline(xintercept = phase_changes + 0.5, linetype = "dashed") + labs(color = "Condition", shape = "Condition", y = Y_lab) } else if (design == "Multiple Baseline") { SCD_graph <- ggplot(dat, aes(session, Y, color = trt, shape = trt, group = interaction(phase, sample))) + geom_point(alpha = samples^(-1/4)) + geom_line(alpha = samples^(-1/2)) + geom_vline(data = phase_changes, aes(xintercept = session + 0.5), linetype = "dashed") + labs(color = "Condition", shape = "Condition", y = Y_lab) } else { SCD_graph <- ggplot(dat, aes(session, Y, color = trt, shape = trt, group = interaction(trt,sample))) + geom_point(alpha = samples^(-1/4)) + labs(color = "Condition", shape = "Condition", y = Y_lab) } SCD_graph <- SCD_graph + scale_color_viridis(discrete = TRUE, begin = .2, end = .8, option = "C") + coord_cartesian(ylim = Y_range) + theme_minimal() + theme(legend.position = "bottom") if (nlevels(dat$case) > 1) { SCD_graph <- SCD_graph + facet_grid(case ~ .) } if (showtruth) { SCD_graph <- SCD_graph + geom_line(data = subset(dat, sample==1), aes(session, truth), size = 1.0) } SCD_graph } #------------------------------------------------ # Create effect size graph and summary table #------------------------------------------------ calculate_ES <- function(dat, phase_pre, phase_post, effect_size, improvement) { if (is.null(phase_pre)) phase_pre <- "A" if (is.null(phase_post)) phase_post <- "B" ES_function <- switch(effect_size, PND = PND, PEM = PEM, PAND = PAND, IRD = IRD, NAP = NAP, Tau = Tau, "Within-case SMD" = SMD) # calculate effect sizes group_by(dat, case, sample) %>% filter(trt %in% c(phase_pre, phase_post)) %>% summarize(ES = ES_function(data = Y, phase = trt, base_phase = phase_pre, increase = improvement==1)) %>% ungroup() } summarize_ES <- function(ES_dat) { group_by(ES_dat, case) %>% summarize(pct05 = quantile(ES, p = .05), pct10 = quantile(ES, p = .10), pct25 = quantile(ES, p = .25), median = median(ES), mean = mean(ES), pct75 = quantile(ES, p = .75), pct90 = quantile(ES, p = .90), pct95 = quantile(ES, p = .95)) -> ES_summary names(ES_summary)[c(2:4,7:9)] <- paste(c("5th","10th","25th","75th","90th","95th"), "%tile") ES_summary } graph_ES <- function(ES_dat, effect_size, showAvgES) { # graph effect sizes X_range <- switch(effect_size, PND = c(0,100), PEM = c(0,100), PAND = c(0,100), IRD = c(-1,1), NAP = c(0,100), Tau = c(-1,1), "Within-case SMD" = range(ES_dat$ES)) cases <- nlevels(ES_dat$case) legend_position <- if (cases > 1) "bottom" else "none" ES_graph <- ggplot(ES_dat, aes(ES, fill = case)) + geom_density(alpha = 1 / max(2,cases)) + coord_cartesian(xlim = X_range) + theme_minimal() + theme(legend.position = legend_position) + labs(x = effect_size, y = "distribution", fill = "") if (showAvgES) { ES_avg <- group_by(ES_dat, case) %>% summarize(ESavg = mean(ES)) ES_graph <- ES_graph + geom_vline(data = ES_avg, aes(xintercept = ESavg, color = case), size = 1, linetype = "dashed") } ES_graph }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/ARPsimulator.R
library(ARPobservation) library(dplyr) library(tidyr) library(ggplot2) rm(list=ls()) source("inst/shiny-examples/ARPsimulator/effect_sizes.R") source("inst/shiny-examples/ARPsimulator/ARPsimulator.R") input <- list() # baseline behavior input$behavior <- "Event behavior" input$freq <- 3 input$freq_dispersion <- 1 input$duration <- 30 input$interim_time <- 90 input$state_dispersion <- 1 # behavior change input$n_trt <- 3 input$freq_change1 <- -50 input$duration_change1 <- NA input$interim_change1 <- NA input$immediacy1 <- 100 input$freq_change2 <- -80 input$duration_change2 <- NA input$interim_change2 <- NA input$immediacy2 <- 100 input$freq_change3 <- -90 input$duration_change3 <- NA input$interim_change3 <- NA input$immediacy3 <- 100 # Measurement system input$system <- "Frequency counting" input$interval_length <- 15 input$session_length <- 10 # Study design input$design <- "Treatment Reversal" input$cases <- 3 input$phase_pattern <- "ABCDABCD" input$sessions_TR <- 5 input$sessions_MB <- 30 input$phase_change_list1 <- "5,10,15" input$phase_change_list2 <- "12,17,22" input$phase_change_list3 <- "19,24,28" input$n_alternations <- 3 input$randomize_AT <- TRUE # Miscellaneous input$refresh <- NA input$samples <- 50 input$showtruth <- TRUE input$effect_size <- "NAP" input$improvement <- 2 input$showAvgES <- TRUE # Treatment effect parameters (reactive) trts <- 1:input$n_trt freq_change <- unlist(input[paste0("freq_change",trts)]) duration_change <- unlist(input[paste0("duration_change",trts)]) interim_change <- unlist(input[paste0("interim_change",trts)]) immediacy <- unlist(input[paste0("immediacy",trts)]) trt_effect_params <- list(freq_change = freq_change, duration_change = duration_change, interim_change = interim_change, immediacy = immediacy) # MB phase changes (reactive) MB_phase_changes <- unlist(input[paste0("phase_change_list",1:input$n_trt)]) phase_changes <- get_phase_changes(input$design, input$sessions_TR, input$phase_pattern, MB_phase_changes, input$cases) dat <- phase_design(input$design, input$n_trt, input$cases, input$phase_pattern, input$sessions_TR, input$sessions_MB, phase_changes, input$n_alternations, input$randomize_AT, input$samples) dat <- simulate_measurements(dat, input$behavior, input$freq, input$freq_dispersion, input$duration, input$interim_time, input$state_dispersion, trt_effect_params, input$system, input$interval_length, input$session_length) sim_dat <- list(dat = dat, design = input$design, phase_changes = phase_changes, system = input$system) with(sim_dat, graph_SCD(dat, design, phase_changes, system, input$showtruth)) design <- input$design system <- input$system phase_pre <- "A" phase_post <- "C" effect_size <- input$effect_size improvement <- input$improvement showAvgES <- input$showAvgES ES_dat <- calculate_ES(dat, phase_pre, phase_post, input$effect_size, input$improvement) with(sim_dat, graph_ES(ES_dat, input$effect_size, input$showAvgES)) summarize_ES(ES_dat)
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/ARPsimulator_check.R
PND <- function(data, phase, base_phase, increase = TRUE) { if (!increase) data <- -1 * data 100 * mean(data[phase != base_phase] > max(data[phase == base_phase])) } PEM <- function(data, phase, base_phase, increase = TRUE) { if (!increase) data <- -1 * data med <- median(data[phase == base_phase]) 100 * mean((data[phase != base_phase] > med) + 0.5 * (data[phase != base_phase] == med)) } PAND <- function(data, phase, base_phase, increase = TRUE) { if (!increase) data <- -1 * data m <- sum(phase == base_phase) n <- sum(phase != base_phase) X <- sort(data[phase == base_phase]) Y <- sort(data[phase != base_phase]) ij <- expand.grid(i = 1:m, j = 1:n) ij$no_overlap <- mapply(function(i, j) X[i] < Y[j], i = ij$i, j = ij$j) ij$overlap <- with(ij, i + n - j + 1) overlaps <- with(ij, max(overlap * no_overlap)) 100 * overlaps / length(data) } IRD <- function(data, phase, base_phase, increase = TRUE) { pand <- PAND(data = data, phase = phase, base_phase = base_phase, increase = increase) / 100 m <- sum(phase == base_phase) n <- sum(phase != base_phase) ((m+n)^2 * pand - m^2 - n^2) / (2 * m * n) } NAP <- function(data, phase, base_phase, increase = TRUE) { if (!increase) data <- -1 * data XY <- expand.grid(x = data[phase==base_phase], y = data[phase!=base_phase]) 100 * mean(with(XY, (y > x) + 0.5 * (y == x))) } Tau <- function(data, phase, base_phase, increase = TRUE) { nap <- NAP(data = data, phase = phase, base_phase = base_phase, increase = increase) 2 * nap / 100 - 1 } SMD <- function(data, phase, base_phase, ...) { treat_phase <- levels(phase)[(base_phase != levels(phase))] y_bar <- tapply(data, phase, mean)[c(base_phase, treat_phase)] s_sq <- tapply(data, phase, var)[c(base_phase, treat_phase)] n <- table(phase)[c(base_phase, treat_phase)] s_pooled <- sqrt(sum((n - 1) * s_sq) / sum(n - 1)) (y_bar[2] - y_bar[1]) / s_pooled }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/effect_sizes.R
--- output: md_document --- ### Accessing the ARPsimulator There are two ways to access the ARPsimulator: via a website or on your own computer, using the RStudio software. Since you are reading this now, you have presumably already figure out at least one of these. Read on to learn more about both options. #### Access through the shinyapps.io website The simplest way to access the simulator is via the web, at https://jepusto.shinyapps.io/ARPsimulator/. This version of the simulator is hosted by a service called [shinyapps.io](https://www.shinyapps.io/), which imposes limitations on the number of concurrent users of the site and total hours of active use of the site. Consequently, you might find that the site is not always available. If you intend to use the ARPsimulator extensively, I would encourage you to follow the steps below in order to install it on your own computer. Doing so has the further advantage that the simulator will tend to run faster on your own machine than it does over the web. #### Access through RStudio In order to run the ARPsimulator on your own computer, you will need to install two pieces of software (R and RStudio), both of which are open-source and freely available. You will then need to follow several further steps to configure that software. The installation is more involved, but has the benefit of letting you run the simulator as much as you want, at faster speeds than over the web. Feel free to contact me if you have trouble following the steps below. 1. Install R from http://cran.r-project.org/ 2. For Windows users, install Rtools from http://cran.r-project.org/bin/windows/Rtools/ 3. Install RStudio from http://www.rstudio.com/products/rstudio/download/ 4. Once you have these programs installed, you will need to install several R packages that are required to run ARPsimulator. Do this by typing the following commands at the console prompt: ```{r, eval=FALSE} install.packages("devtools") install.packages("ggplot2") install.packages("viridis") install.packages("dplyr") install.packages("shiny") install.packages("markdown") devtools::install_github("jepusto/ARPobservation") ``` 5. After all of these packages are installed, type the following commands at the console prompt to start the simulator (the simulator should then open in a new window): ```{r, eval = FALSE} library(ARPobservation) ARPsimulator() ``` 6. To exit the simulator, simply close the window in which it appears or click the red "Stop" icon in the upper right-hand corner of the RStudio console window.
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/markdown/Accessing_ARPsimulator.Rmd
library(shiny) library(markdown) library(ARPobservation) library(dplyr) library(tidyr) library(ggplot2) library(viridis) source("effect_sizes.R") source("ARPsimulator.R") trt_effect_UI <- function(k) { column(12, conditionalPanel( condition = "input.n_trt > 1", h4(paste("Treatment",LETTERS[k+1])) ), conditionalPanel( condition = "input.behavior=='Event behavior'", numericInput(paste0("freq_change",k), label = "Percentage change in frequency", value = 0, min = -100, step = 10) ), conditionalPanel( condition = "input.behavior=='State behavior'", numericInput(paste0("duration_change",k), label = "Percentage change in event duration", value = 0, min = -100, step = 10), numericInput(paste0("interim_change",k), label = "Percentage change in Interim time", value = 0, min = -100, step = 10) ), sliderInput(paste0("immediacy",k), label = "Immediacy of change (%)", min = 0, max = 100, value = 100, step = 5) ) } server <- function(input, output) { output$trt_effects_UI <- renderUI({ lapply(1:input$n_trt, trt_effect_UI) }) trt_effect_params <- reactive({ trts <- 1:input$n_trt freq_change <- rep(0, input$n_trt) duration_change <- rep(0, input$n_trt) interim_change <- rep(0, input$n_trt) immediacy <- rep(0, input$n_trt) if (any(grepl("_change",names(input)))) { for (t in trts) { freq_change[t] <- input[[paste0("freq_change",t)]] duration_change[t] <- input[[paste0("duration_change",t)]] interim_change[t] <- input[[paste0("interim_change",t)]] immediacy[t] <- input[[paste0("immediacy",t)]] } } list(freq_change = freq_change, duration_change = duration_change, interim_change = interim_change, immediacy = immediacy) }) choices <- c("Frequency counting","Continuous recording", "Momentary time sampling","Partial interval recording","Whole interval recording") output$systemUI <- renderUI( { choices_available <- switch(input$behavior, "Event behavior" = choices[c(1,4)], "State behavior" = choices[-1]) selectInput("system", label = "Measurement system", choices = choices_available) }) output$cases_UI <- renderUI({ cases <- 1L + 2 * (input$design == "Multiple Baseline") numericInput("cases", label = "Number of cases", value = cases, min = 1) }) output$TR_phase_pattern_UI <- renderUI({ phases <- paste0(LETTERS[rep(1:(input$n_trt + 1), 2)], collapse = "") textInput("phase_pattern", label = "Phase pattern", value = phases) }) output$MB_phase_change_UI <- renderUI({ cases <- if (is.null(input$cases)) 1L else input$cases n_trt <- if (is.null(input$n_trt)) 1L else input$n_trt phase_changes <- trunc(input$sessions_MB * (1:(cases * n_trt)) / (cases * n_trt + 1)) lapply(1:input$n_trt, function(k) { lab <- if (input$n_trt > 1) paste0("Phase change times (Trt ", LETTERS[k+1],")") else "Phase change times" textInput(paste0("phase_change_list",k), label = lab, value = paste(phase_changes[cases * (k - 1) + 1:cases], collapse = ", ")) }) }) MB_phase_changes <- reactive({ phase_change_list <- c() for (i in 1:(input$n_trt)) { phase_change_list[i] <- input[[paste0("phase_change_list",i)]] } phase_change_list }) sim_dat <- eventReactive(c(input$outputPanel, input$simulateGraph, input$simulateES), { cases <- if (is.null(input$cases)) 1L else input$cases system <- if (is.null(input$system)) { if (input$behavior=="Event behavior") choices[1] else choices[2] } else { input$system } phase_pattern <- if (is.null(input$phase_pattern)) "ABAB" else input$phase_pattern phase_changes <- get_phase_changes(input$design, input$sessions_TR, phase_pattern, MB_phase_changes(), input$cases) samples <- ifelse(input$outputPanel == "SCD Graph", input$samplesGraph, input$samplesES) dat <- phase_design(input$design, input$n_trt, cases, phase_pattern, input$sessions_TR, input$sessions_MB, phase_changes, input$n_alternations, input$randomize_AT, samples) dat <- simulate_measurements(dat, input$behavior, input$freq, input$freq_dispersion, input$duration, input$interim_time, input$state_dispersion, trt_effect_params(), system, input$interval_length, input$session_length) height <- max(300, 150 * cases) list(dat = dat, design = input$design, phase_changes = phase_changes, system = system, height_SCD = height) }) output$SCDplot <- renderPlot({ if (input$simulateGraph > 0 | input$simulateES > 0) { with(sim_dat(), graph_SCD(dat, design, phase_changes, system, input$showtruth)) } }, height = function() sim_dat()$height_SCD) output$phase_pre_UI <- renderUI({ selectInput("phase_pre", label = "Pre phase", choices = LETTERS[1:(input$n_trt + 1)]) }) output$phase_post_UI <- renderUI({ selectInput("phase_post", label = "Post phase", choices = setdiff(LETTERS[1:(input$n_trt + 1)], input$phase_pre)) }) ES_dat <- reactive({ calculate_ES(sim_dat()$dat, input$phase_pre, input$phase_post, input$effect_size, input$improvement) }) output$ESplot <- renderPlot({ graph_ES(ES_dat(), input$effect_size, input$showAvgES) }, height = 400) output$downloadGraph <- downloadHandler( filename = "ARPsimulator - fake graph.png", content = function(file) { p <- with(sim_dat(), graph_SCD(dat, design, phase_changes, system, input$showtruth)) ht <- sim_dat()$height_SCD / 100 ggsave(filename = file, plot = p, width = 8, height = ht) }, contentType = "image/png" ) output$downloadData <- downloadHandler( filename = "ARPsimulator - fake data.csv", content = function(file) { dat <- sim_dat()$dat dat <- subset(dat, select = c(sample, case, session, phase, trt, Y)) names(dat) <- c("Sample", "Case", "Session", "Phase", "Condition", "Outcome") write.csv(dat, file, row.names=FALSE) }, contentType = "text/csv" ) output$EStable <- renderTable({ summarize_ES(ES_dat()) }, include.rownames = FALSE) }
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/server.R
library(shiny) ui <- navbarPage(title = "Alternating Renewal Process Simulator", tabPanel("Simulator", fluidRow( # Baseline behavioral parameters column(3, h3("Baseline behavior"), selectInput("behavior", label = "Behavior class", choices = c("Event behavior", "State behavior")), conditionalPanel( condition = "input.behavior=='Event behavior'", numericInput("freq", label = "Frequency (per min)", value = 1, min = 0, step = 0.1), numericInput("freq_dispersion", label = "Variability", value = 1, min = 0.05, step = 0.05) ), conditionalPanel( condition = "input.behavior=='State behavior'", numericInput("duration", label = "Event duration (seconds)", value = 30, min = 0, step = 1), numericInput("interim_time", label = "Interim time (seconds)", value = 60, min = 0, step = 1), numericInput("state_dispersion", label = "Variability", value = 1, min = 0.05, step = 0.05) ) ), # Behavior change parameters column(3, h3("Behavior change"), numericInput("n_trt", label = "Number of treatments", value = 1, min = 1, max = 4), htmlOutput("trt_effects_UI") ), # Measurement procedures column(3, h3("Measurement procedures"), numericInput("session_length", label = "Session length (min)", value = 10, min = 1), htmlOutput("systemUI"), conditionalPanel( condition = "input.system=='Momentary time sampling'||input.system=='Partial interval recording'||input.system=='Whole interval recording'", numericInput("interval_length", label = "Interval length (seconds)", value = 15, min = 1) ) ), # Study design column(3, h3("Study design"), selectInput("design", label = "Study design", choices = c("Treatment Reversal","Multiple Baseline","Alternating Treatment")), htmlOutput("cases_UI"), conditionalPanel( condition = "input.design=='Treatment Reversal'", htmlOutput("TR_phase_pattern_UI"), numericInput("sessions_TR", label = "Sessions per phase", value = 5, min = 1) ), conditionalPanel( condition = "input.design=='Multiple Baseline'", numericInput("sessions_MB", label = "Total number of sessions", value = 20, min = 1), htmlOutput("MB_phase_change_UI") ), conditionalPanel( condition = "input.design=='Alternating Treatment'", numericInput("n_alternations", label = "Number of alternations", value = 5, min = 1), checkboxInput("randomize_AT", label = "Randomize treatment order", value = TRUE) ) ) ), fluidRow(column(12, hr(), h3("Results"))), tabsetPanel(id = "outputPanel", type = "tabs", tabPanel("SCD Graph", column(12, br()), sidebarLayout( sidebarPanel(width = 3, numericInput("samplesGraph", label = "Samples per case", value = 1, min = 1, max = 100), checkboxInput("showtruth", label = "Show true trend lines", value = FALSE), column(12, align = "center", actionButton("simulateGraph", label = "Simulate!")), br() ), mainPanel(width = 9, plotOutput('SCDplot', height = "auto"), conditionalPanel( condition = "input.simulateGraph > 0 | input.simulateES > 0", fluidRow( column(6, align = "center", downloadLink("downloadGraph", label = "Download graph")), column(6, align = "center", downloadLink("downloadData", label = "Download data")) ) ), br() ) ) ), tabPanel("Effect sizes", column(12, br()), sidebarLayout( sidebarPanel(width = 3, conditionalPanel( condition = "input.n_trt > 1", column(6, htmlOutput("phase_pre_UI")), column(6, htmlOutput("phase_post_UI")) ), selectInput("effect_size", label = "Effect size measure", choices = c("PND","PEM","PAND","IRD","NAP","Tau","Within-case SMD")), conditionalPanel( condition = "input.effect_size != 'Within-case SMD'", radioButtons("improvement", label = "Direction of improvement", choices = list("increase" = 1, "decrease" = 2), selected = 1) ), numericInput("samplesES", label = "Samples per case", value = 100, min = 1, max = 1000), checkboxInput("showAvgES", label = "Show average", value = FALSE), column(12, align = "center", actionButton("simulateES", label = "Simulate!")), br() ), mainPanel(width = 9, conditionalPanel( condition = "input.simulateGraph > 0 | input.simulateES > 0", plotOutput('ESplot', height = "auto"), br(), tableOutput('EStable') ) ) ) ) ) ), tabPanel("Help", navlistPanel(widths = c(3,9), tabPanel("Overview", includeMarkdown("markdown/Overview.md")), tabPanel("Baseline behavior", includeMarkdown("markdown/Behavioral_parameters.md")), tabPanel("Behavior change", includeMarkdown("markdown/Behavior_change.md")), tabPanel("Measurement procedures", includeMarkdown("markdown/Measurement_procedures.md")), tabPanel("Study design features", includeMarkdown("markdown/Study_design.md")), tabPanel("Single-case graph", includeMarkdown("markdown/SCD_graph.md")), tabPanel("Effect size graph", includeMarkdown("markdown/ES_graph.md")) ) ), tabPanel("About", navlistPanel(widths = c(3,9), tabPanel("ARPsimulator", includeMarkdown("markdown/ARPsimulator.md")), tabPanel("Accessing the simulator", includeMarkdown("markdown/Accessing_ARPsimulator.md")) ) ) )
/scratch/gouwar.j/cran-all/cranData/ARPobservation/inst/shiny-examples/ARPsimulator/ui.R
--- title: "Direct observation recording: Algorithms used in `ARPobservation`" author: "James E. Pustejovsky" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: number_sections: false vignette: > %\VignetteIndexEntry{Algorithms for direct observation recording} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette describes the algorithms used in `ARPobservation` to simulate behavior streams and direct observation recording data based on an alternating renewal process. The ARP is a statistical model that can be used to describe the characteristics of simple behavior streams, in which a behavior of interest is either occurring or not occurring at a given point in time. We will refer to the length of individual episodes of behavior as _event durations_ and the lengths of time between episodes of behavior as _interim times_. In the ARP framework, variability is introduced into the behavior stream by treating each individual event duration and each interim time as a random quantity, drawn from some probability distribution. The sequence of events comprising the behavior stream can be described as follows. Let $L$ denote the length of the observation session. Let $A_1$ denote the duration of the first behavioral event observed, $A_2$ denote the duration of the second event, and $A_u$ the duration of event $u$ for $u = 3,4,5,...$. Let $B_0$ denote the length of time from the beginning of the observation session until the first behavioral event, with $B_0 = 0$ if event 1 is occurring at the beginning of the session. Let $B_u$ denote the $u^{th}$ interim time, meaning the length of time between the end of event $u$ and the beginning of event $u + 1$, for $u = 1,2,3,...$. The values $B_0,A_1,B_1,A_2,B_2,A_3,B_3,...$ provide a quantitative description of the behavior stream from an observation session. Note that these quantities are measured in time units, such as seconds. # Simulating the behavior stream ## Generating distributions The `ARPobservation` package generates behavior streams that follow an alternating renewal process with specified generating distributions. The package provides two approaches to generating the initial interim time and initial event duration, which we explain further below. Subsequent event durations $A_2,A_3,A_4,...$ are generated independently, following a specified probability distribution with mean $\mu$ and cumulative distribution function (cdf) $F(t; \mu)$. Subsequent interim times $B_1,B_2,B_3,...$ are generated independently, following a specified probability distribution with mean $\lambda$ and cdf $G(t; \lambda)$. The package currently includes functions for exponential distributions, gamma distributions, mixtures of two gamma distributions, Weibull distributions, uniform distributions, and constant values. Each distribution is implemented as an object of class `eq_dist`, which provides functions for generating random deviates from the specified distribution and the corresponding equilibrium distribution. For distributions involving more than a single parameter, all parameters except for the mean must be specified. ## Initial conditions `ARPobservation` provides two approaches to generating the initial interim time and initial event duration. The first approach involves the following steps: 1. Generate a random number $X$ from a Bernoulli distribution with user-specified probability $p_0$. 2. If $X = 0$, then generate $B_0$ from the same distribution as subsequent interim times, i.e., from $G(t; \lambda)$. If $X = 1$, then set $B_0 = 0$. 3. Generate $A_1$ from the same distribution as subsequent event durations, i.e., from $F(t; \mu)$. If $p_0$ is not specified by the user, the default value of $p_0 = 0$ is used, so that behavior streams always begin with an interim time. This approach produces behavior streams that are initially out of equilibrium. The other approach uses initial conditions chosen so that the resulting process is in equilibrium. This involves the following steps: 1. Generate a random number $X$ from a Bernoulli distribution with probability $\mu / (\mu + \lambda)$. 2. If $X = 0$, then generate $B_0$ from the distribution with cdf \[ \tilde{G}(t; \lambda) = \frac{1}{\lambda} \int_0^t \left[1 - G(x; \lambda)\right] dx. \] If $X = 1$, then set $B_0 = 0$. 3. If $X = 0$, then generate $A_1$ from $F(t; \mu)$. If $X = 1$, then generate $A_1$ from the distribution with cdf \[ \tilde{F}(t; \mu) = \frac{1}{\mu} \int_0^t \left[1 - F(x; \mu) \right] dx. \] # Direct observation recording procedures The package provides several algorithms that simulate commonly used direct observation recording procedures. Each algorithm takes as input a randomly generated behavior stream and produces as output a summary measurement (or measurements) from a direct observation procedure. ## Event counting Event counting produces a measurement $Y^E$ equal to the number of events that begin during the observation session. Let $J$ denote the number of last behavioral event that begins during the observation session, which can be calculated by finding the integer that satisfies the inequalities $$ \sum_{j=0}^{J-1} \left(A_j + B_j \right) \leq L < \sum_{j=0}^{J} \left(A_j + B_j \right), $$ where we define $A_0 = 0$ for notational convenience. It follows that $Y^E = J$. ## Continuous recording Continuous recording produces a measurement $Y^C$ equal to the proportion of the observation session during which the behavior occurs. In order to calculate this quantity from the behavior stream, we must account for the possibility that the last event beginning during the observation session may have a duration that extends beyond when the session ends. The measurement based on continuous recording can be calculated as $$ Y^C = \begin{cases} \frac{1}{L} \sum_{j=1}^J A_j & \text{if}\quad \sum_{j=1}^{J} \left(B_{j-1} + A_j\right) \leq L \\ 1 - \frac{1}{L} \sum_{j=0}^{J-1} B_j & \text{if}\quad \sum_{j=1}^{J} \left(B_{j-1} + A_j\right) > L \end{cases} $$ ## Momentary time sampling In momentary time sampling, an observer divides the observation session into $K$ intervals of equal length and notes whether the behavior is present or absent at the very end of each interval. The summary measurement $Y^M$ then corresponds to the proportion of moments during which the behavior is observed. Let $X_k = 1$ if the behavior is occurring at the end of interval $k$ for $k = 1,...,K$. The value of $X_k$ can be calculated from the behavior stream as follows. Let $I(X)$ denote the indicator function, equal to one if condition $X$ is true and zero otherwise. Let $m_k$ be the number of the last event that ends before the $k^{th}$ interval ends, defined formally as $$ m_k = \sum_{i=1}^J I\left[\sum_{j=1}^i \left(B_{j-1} + A_j\right) < k L \right] $$ for $k = 1,...,K$. If interim time $B_{m_k}$ concludes before the end of interval $k$ (or equivalently, if event $A_{m_k+1}$ begins before the end of interval $k$), then $X_k = 1$; formally, $$ X_k = I\left[\sum_{j=0}^{m_k} \left(A_j + B_j\right) < k L \right] $$ for $k = 1,...,K$. The summary measurement is then calculated as $\displaystyle{Y^M = \sum_{k=1}^K X_k / K}$. ## Partial interval recording Like momentary time sampling, partial interval recording is also based on a set of $K$ intervals of equal length, but a different rule is used to score each interval. In partial interval recording, the observer counts a behavior as present if it occurs at any point during the first $c$ time units of the interval, where $c \leq L / K$; the remaining $L / K - c$ time units are used to record notes or rest. Let $U_k = 1$ if the behavior occurs at any point during the $k^{th}$ interval, $U_k = 0$ otherwise. The $k^{th}$ interval will be equal to one if and only if interim time $m_{k-1}$ ends during the active part of the interval. Noting that interim time $m_{k-1}$ ends at time $\sum_{j=0}^{m_{k-1}} \left(A_j + B_j\right)$ and that the active part of the $k^{th}$ interval ends at time $(k-1)L + c$, it can be seen that $$ U_k = I \left[\sum_{j=0}^{m_{k-1}} \left(A_j + B_j\right) < (k-1)L + c \right], $$ for $k=1,...,K$. The summary measurement $Y^P$ is then calculated as the proportion of intervals during which the behavior is observed at any point: $\displaystyle{Y^P = \sum_{k=1}^K U_k / K}$. ## Whole interval recording Whole interval recording is similar to partial interval recording but uses yet a different rule to score each interval. Specifically, the observer counts a behavior as present only if it occurs for all $c$ time units at the beginning of the interval. Let $W_k = 1$ if the behavior occurs for the duration, with $W_k = 0$ otherwise. Let $n_k$ be the number of the last event that begins before the $k^{th}$ interval begins, defined formally as $$ n_k = \sum_{i=1}^J I\left[\sum_{j=0}^i \left(A_j + B_j\right) < (k - 1) L \right] $$ for $k = 1,...,K$. It follows that $W_k$ will be equal to one if and only if event $n_k$ ends after the active part of interval $k$: $$ W_k = I \left[\sum_{j=1}^{n_k} \left(B_{j-1} + A_j\right) \geq (k - 1) L + c \right], $$ for $k=1,...,K$. The summary measurement $Y^W$ is then calculated as the proportion of intervals during which the behavior is observed at any point: $\displaystyle{Y^W = \sum_{k=1}^K W_k / K}$.
/scratch/gouwar.j/cran-all/cranData/ARPobservation/vignettes/Observation-algorithms.Rmd
# -------------------------------------------------------------------------------- ##' Aligned Rank Transform for Nonparametric Factorial Analysis ##' ##' The function computes a separate aligned response variable for each effect of an user-specified model, ##' transform it into a ranking, and applies a separate ANOVA to every resulting ranked aligned response to ##' check the significance of the corresponding effect. ##' @title Aligned Rank Transform procedure ##' @param formula A formula indicating the model to be fitted. ##' @param data A data frame containing the input data. The name of the columns should match the names used in ##' the user-specified \code{formula} of the model that will be fitted. ##' @param perform.aov Optional: whether separate ANOVAs should be run on the Ranked aligned responses or not. ##' In case it should not, only the ranked aligned responses will be returned. Defaults to \code{TRUE}. ##' @param SS.type A string indicating the type of sums of squares to be used in the ANOVA on the aligned responses. ##' Must be one of "I", "II", "III". If \code{perform.aov} was set to \code{FALSE}, the value of \code{SS.type} will be ignored. ##' Please note SS types coincide when the design is balanced (equal number of observations per cell) but differ otherwise. ##' Refer to Shaw and Mitchell-Olds (1993) or Fox (1997) for further reading and recomentations on how to conduct ANOVA analyses with unbalanced designs. ##' @param ... Other arguments passed to \link{lm} when computing effect estimates via ordinary least squares for the alignment. ##' @return A tagged list with the following elements: ##' \itemize{ ##' \item \code{$aligned}: a data frame with the input data and additional columns to the right, containing the aligned ##' and the ranked aligned responses for each model effect. ##' \item \code{$significance}: (only when \code{perform.aov = TRUE}) the ANOVA table that collects every unique meaningful row of ##' each of the separate ANOVA tables obtained from the ranked aligned responses. ##' } ##' @author Pablo J. Villacorta Iglesias ##' @references Higgins, J. J., Blair, R. C. and Tashtoush, S. (1990). The aligned rank transform procedure. Proceedings of the Conference on Applied Statistics in Agriculture. ##' Manhattan, Kansas: Kansas State University, pp. 185-195. ##' @references Higgins, J. J. and Tashtoush, S. (1994). An aligned rank transform test for interaction. Nonlinear World 1 (2), pp. 201-211. ##' @references Mansouri, H. (1999). Aligned rank transform tests in linear models. Journal of Statistical Planning and Inference 79, pp. 141 - 155. ##' @references Wobbrock, J.O., Findlater, L., Gergle, D. and Higgins, J.J. (2011). The Aligned Rank Transform for nonparametric factorial analyses using only ANOVA procedures. ##' Proceedings of the ACM Conference on Human Factors in Computing Systems (CHI '11). New York: ACM Press, pp. 143-146. ##' @references Higgins, J.J. (2003). Introduction to Modern Nonparametric Statistics. Cengage Learning. ##' @references Shaw, R.G. and Mitchell-Olds, T. (1993). Anova for Unbalanced Data: An Overview. Ecology 74, 6, pp. 1638 - 1645. ##' @references Fox, J. (1997). Applied Regression Analysis, Linear Models, and Related Methods. SAGE Publications. ##' @references ARTool R package, for full models only. \url{http://cran.r-project.org/package=ARTool} ##' @seealso \link{lm} ##' @examples ##' # Input data contained in the Higgins1990-Table1.csv file distributed with ARTool ##' # The data were used in the 1990 paper cited in the References section ##' data(higgins1990, package = "ART"); ##' # Two-factor full factorial model that will be fitted to the data ##' art.results = aligned.rank.transform(Response ~ Row * Column, data = data.higgins1990); ##' print(art.results$aligned, digits = 4); ##' print(art.results$significance); aligned.rank.transform<-function(formula, data, perform.aov = TRUE, SS.type = c("III", "II", "I"), ...){ SS.type = match.arg(SS.type); ## ------- FORMULA PROCESSING ---------- termsObject = terms(formula, keep.order = FALSE, simplify = FALSE, allowDotAsName = FALSE); responseIndexInFormula = attr(termsObject, "response"); # index of the response within the vector "variables" factorMatrix = attr(termsObject, "factors"); variables = as.character(attr(termsObject, "variables"))[-1]; # first element is always the string "list" intercept = attr(termsObject, "intercept"); ## ------------------------------------- frameNames = names(data); ncols = length(frameNames); # including the response variable responseColumn = NULL; frameNames = gsub(":", "_", frameNames); # strip character ":" from the column names to avoid confusion with interaction names if(length(factorMatrix) == 0){ stop("ERROR: the formula does not contain any variable, but only an intercept constant"); } for(i in 1:length(variables)){ if(!(variables[[i]] %in% frameNames)){ stop("ERROR: variable ",variables[[i]]," not found in the frame data"); } if(i == responseIndexInFormula){ # index of the response variable in the formula responseColumn = match(variables[[i]], frameNames); } } ## Delete rows with NA in the response isdata = !is.na(data[[responseColumn]]); data = data[isdata,]; nrows = nrow(data); # -------------------------------------------------------------------------- # First step: compute estimated effects using ordinary least squares with function lm (linear model) # and strip those effects from the response variable # -------------------------------------------------------------------------- matrixEffectNames = colnames(factorMatrix); old.contrasts = getOption("contrasts"); # get contrasts settings ... options(contrasts=c("contr.sum", "contr.sum")); # ... modify them temporarily... # --------------- LEAST SQUARES FITTING --------------- mymodel = lm(formula = formula, x = TRUE, data = data, ...); # get the model matrix x coefficients = mymodel$coefficients; coefficients[is.na(coefficients)] = 0; # ----------------------------------------------------- model.matrix = mymodel$x; options(contrasts = old.contrasts); # ... and restore original contrast settings chopped.model.effects = strsplit(x = colnames(model.matrix), ":"); chopped.effectNames = strsplit(x = matrixEffectNames, ":"); aligned_col_names = sapply(X = matrixEffectNames, FUN = function(x) gsub(":","_", paste("Aligned", x, sep="_"))); ranked_col_names = sapply(X = matrixEffectNames, FUN = function(x) gsub(":","_", paste("Ranks", x, sep="_"))); aligned.matrix = data.frame(matrix(NA, nrow = nrows, ncol = ncol(factorMatrix))); ranked.aligned.matrix = data.frame(matrix(NA, nrow = nrows, ncol = ncol(factorMatrix))); colnames(aligned.matrix) = aligned_col_names; colnames(ranked.aligned.matrix) = ranked_col_names; for(i in 1:length(chopped.effectNames)){ # Alignment for effect matrixEffectNames[i] # appearances[j] = TRUE iff the i-th effect or interaction appears in the j-th column of the model matrix appearances = rep(FALSE, length(chopped.model.effects)); # column 1 of the model is the intercept subtraction.matrix = model.matrix; for(j in 1:length(chopped.model.effects)){ # check all column names of the model matrix if(length(chopped.effectNames[[i]]) == length(chopped.model.effects[[j]])){ # Both have the same number of effects. Check they are the same bool.mat = sapply(X = chopped.effectNames[[i]], FUN = grepl, x = chopped.model.effects[[j]]); if( ( is.matrix(bool.mat) && sum(colSums(bool.mat) == 0) == 0 ) || # all effects (e.g. "a", "b") appear at least once in the effect combination (e.g. "a1:b2") ( !is.matrix(bool.mat) && sum(bool.mat) == length(chopped.effectNames[[i]]) ) ){ # Delete the effect name that matches from the effect combinations vector and make sure the residual are just numbers # that encode the levels (e.g. delete "Row" and "Column" from the combination "Row2:Column1") and check the residuals are "2" and "1". replacement.mat = sapply(X = chopped.effectNames[[i]], FUN = sub, x = chopped.model.effects[[j]], replacement = ""); replacement.mat[replacement.mat == ""] = 1; # correction (any number) for residual "" (returned by function sub when there is a perfect match) if(is.matrix(replacement.mat)){ valid = suppressWarnings( !is.na(matrix(as.integer(replacement.mat), nrow = nrow(replacement.mat), ncol=ncol(replacement.mat))) ); correct = sum(colSums(valid) == 0) == 0; } else{ all.numbers = suppressWarnings(as.integer(replacement.mat[bool.mat])); correct = sum(is.na(all.numbers)) == 0 # all the conversions were successful -> all residuals are names } if(correct){ appearances[j] = TRUE; # found: this column of the model matrix must NOT be stripped out for this alignment } } } } subtraction.matrix[,appearances] = 0; # Align the response variable with respect to effect matrixEffectNames[i] aligned.matrix[,i] = data[,responseColumn] - subtraction.matrix %*% coefficients; ranked.aligned.matrix[,i] = rank(aligned.matrix[,i], ties.method = "average"); } data = data.frame(data, aligned.matrix, ranked.aligned.matrix); # ------------------------------------------------------------------------------- # Final step: apply conventional ANOVA to the ranked aligned responses separately # ------------------------------------------------------------------------------- if(perform.aov){ final.rows = ncol(factorMatrix); final.table = data.frame(matrix(nrow=final.rows, ncol = 4)); # Columns of any usual ANOVA table: Sum Sq, Df, F value, Pr(>F) old.contrasts = getOption("contrasts"); options(contrasts=c("contr.sum", "contr.sum")); for(k in 1:length(ranked_col_names)){ expr = update.formula(formula, as.formula(paste(ranked_col_names[[k]], "~ . "))); aux.model = lm(expr, data); if(SS.type == "I" || sum(mymodel$residuals)==0){ # for a perfect fit, we turn to stats::anova() regardless the SS.type my.table = anova(aux.model)[,-3]; } # delete the third column (mean sums of squares) else{ my.table = Anova(aux.model, type=SS.type, singular.ok = TRUE); } # Anova() from car package final.table[k,] = my.table[matrixEffectNames[[k]],]; if(k == 1){ names(final.table) = names(my.table); } } row.names(final.table) = matrixEffectNames; options(contrasts = old.contrasts); return(list(aligned = data, significance = final.table)); } else{ # No ANOVA required after the ART return(list(aligned = data)); } }
/scratch/gouwar.j/cran-all/cranData/ART/R/aligned-rank-transform.R
#' @import shiny ARTofR_user_interface <- function() { ART_of_R_functions <- c("title1", "title2", "title3", "divider1", "divider2",'box','box1','list','list1') ui <- fluidPage( HTML("<br/> <strong> DESCRIPTION</strong>"), HTML("<br/> 1. Use this app to insert title, divider, or box of comments into your script"), HTML("<br/> 2. Start by choosing a decoration style"), HTML("<br/> 3. Copy your text and click 'Use clipboard'"), HTML("<br/> 4. OR, you can just type it and click 'OK'"), HTML("<br/> 5. See results in your console! All results are auto copied and ready to paste"), radioButtons("STYLE", "Select your decoration:", ART_of_R_functions,inline=TRUE), HTML("<br/>"), actionButton("clipboard", "Use clipboard"), HTML("<br/>"), textInput("userinput",""), actionButton("WORDS", "OK"), HTML("<br/> Tips: choose divider and click OK to get a simple line"), HTML("<br/> Tips2: for more information, check: <a href=\"https://github.com/Hzhang-ouce/ARTofR\">ARTofR guide</a>") ) server <- function(input, output, session) { observe({ if (input$clipboard) { rstudioapi::callFun("sendToConsole",paste0('ARTofR::xxx_',input$STYLE,'()')) stopApp() } }) observe({ if (input$WORDS) { rstudioapi::callFun("sendToConsole",paste0('ARTofR::xxx_',input$STYLE,'(\"',input$userinput,'\")')) stopApp() } }) } viewer <- dialogViewer('ARTofR') runGadget(ui, server, viewer = viewer) } "2022-03-04 17:31:21"
/scratch/gouwar.j/cran-all/cranData/ARTofR/R/app.R
#' Core functions for comment making #' #' This is modified from \code{bannerCommenter::banner()}, the main modification is to extend the comment length (minHashes) to 80 #' #' @param x A string, first line of the comment. If #' \code{""}, the zero-length string, only the top lines of the banner are made. #' If missing, in an interactive session the user will be prompted for the input #' strings, one per line, in the console. #' @param ... Zero or more additional strings as extra lines. Strings may contain #' newline characters resulting in further line breaks. #' @param emph A logical value: Do you want this to be an emphasised comment? #' @param snug A logical value: Do you want the decoration to hug the strings closely? #' @param upper A logical value: Do you want the strings converted to upper case? #' @param centre A logical value: Do you want the text strings centred? #' (alternative: left justified) #' @param leftSideHashes A positive integer: How many hashes go on the left side? #' @param rightSideHashes A non-negative integer: How many hashes go on the right side? #' @param minHashes A non-negative integer: What is the minimum number of hashes #' in the boundary lines? #' @param numLines A non-negative integer: How many lines of hashes above #' and below do you want? #' @param bandChar A single character. Used instead of # for all characters in #' the bands around the text, apart from the first character of every line. #' @param center Alternative spelling of \code{centre}. #' @param fold Logical: should the text be folded to ensure lines are not too long? #' @param maxChar Ingeter: maximum length allowed in any line if \code{fold} is \code{TRUE}. #' @import bannerCommenter #' #' @return A character string vector returned invisibly, #' but automatically displayed in the console #' @export #' @keywords internal #' @examples #' my_own_banner("This should appear clearly and stand out.", #' "The lines are left justified by default.") #' ##This is NOT recommended!!! #' ##This is NOT recommended!!! #' ##This is NOT recommended!!! #' ##pls use bannerCommenter::banner() my_own_banner<-function (x, ..., emph = FALSE, snug = FALSE, upper = emph, centre = !fold, leftSideHashes = 2 + emph, rightSideHashes = leftSideHashes, minHashes = (!snug) * (80 + 10 * emph), numLines = 1 + emph, bandChar = "#", center = centre, fold = FALSE, maxChar = 75) { if (missing(x)) { x <- if (interactive()) { paste(scan(what = "", sep = "\n", quiet = TRUE), collapse = "\n") } else "" } if (fold) { text <- gsub("\n", " ", paste(as.character(unlist(list(x, ...))), collapse = " ")) if (nchar(text) > maxChar) { txt <- character() repeat { if (nchar(text) <= maxChar) break pos <- gregexpr(" ", text)[[1]] if (any(pos < 0) || !any(pos <= maxChar)) break pos <- max(pos[pos <= maxChar]) txt <- c(txt, substring(text, 0, pos)) text <- substring(text, pos + 1, nchar(text)) } text <- paste(sub("^ +", "", sub(" +$", "", c(txt, text))), collapse = "\n") } } else { text <- paste(as.character(unlist(list(x, ...))), collapse = "\n") } text <- strsplit(text, "\n")[[1]] if (length(text) == 0) text <- "" nt <- length(text) kt <- seq_len(nt) n0 <- max(sapply(text, nchar)) hashes <- max(n0 + leftSideHashes + rightSideHashes + 2, minHashes) #hashes <- hashes + ((n0%%2) != (hashes%%2)) bandChar <- substring(paste0(as.character(bandChar), " "), 0, 1) line <- paste(c(rep("#", leftSideHashes), rep(bandChar, hashes - leftSideHashes), "\n"), collapse = "") leftHash <- paste(rep("#", leftSideHashes), collapse = "") rightHash <- paste(c(rep(bandChar, rightSideHashes), "\n"), collapse = "") if (nt == 1 && n0 == 0) return(structure(c("\n", rep(line, 1 + emph), "\n"), class = "banner")) if (missing(centre) & !missing(center)) centre <- center mid <- character(nt) for (k in kt) { blanks <- (hashes - leftSideHashes - rightSideHashes - nchar(text[[k]])) blanks1 <- ifelse(centre, round(blanks/2), 2) blanks2 <- blanks - blanks1 left <- paste(c(leftHash, rep(" ", blanks1)), collapse = "") right <- paste(c(rep(" ", blanks2), rightHash), collapse = "") mid[k] <- paste(left, ifelse(upper, toupper(text[[k]]), text[[k]]), right, sep = "", collapse = "") } blanks <- rep(" ", hashes - leftSideHashes - rightSideHashes) gap <- paste(c(leftHash, blanks, rightHash), collapse = "") comment <- c("\n", rep(line, numLines), rep(gap, max(0, numLines - 1)), mid, rep(gap, max(0, numLines - 1)), rep(line, numLines), "\n") structure(comment, class = "banner") #invisible(comment) } #' @describeIn my_own_banner sub-function to fold sentence, don't use this in other circumstance #' @param x the string #' @param maxChar same as width in str_wrap #' @param exdent for xxx_list() #' @keywords internal #' @return fold_it<-function(x,maxChar,exdent = 0){ text <- gsub("\n", " ", paste(as.character(unlist(list(x))), collapse = " ")) if (nchar(text) > maxChar) { txt <- character() repeat { if (nchar(text) <= maxChar) break pos <- gregexpr(" ", text)[[1]] if (any(pos < 0) || !any(pos <= maxChar)) break pos <- max(pos[pos <= maxChar]) txt <- c(txt, substring(text, 0, pos)) text <- substring(text, pos + 1, nchar(text)) } exdent_blank=paste0(rep(" ",exdent),collapse = '') extend_part=paste0("\n",exdent_blank,collapse = '') text <- paste(sub("^ +", "", sub(" +$", "", c(txt, text))), collapse = extend_part ) } return(text) } # print.banner2 <- function(x, ...) { # y <- unlist(strsplit(x, "\n")) # #copy_to_clipboard(y, sep = "\n") # utils::writeClipboard(y,format=13) # cat(y, sep = "\n") # invisible(x) # } .onLoad <- function(libname, pkgname){ requireNamespace('bannerCommenter') }
/scratch/gouwar.j/cran-all/cranData/ARTofR/R/my_own_banner.R
#' Convert sentences into a box of comments #' #' For user guide, please refer to <https://github.com/Hzhang-ouce/ARTofR>\cr #' #' xxx_box is for paragraph, xxx_list is for bullet points or numbered list.\cr #' Number 1 in xxx_box1 and xxx_list1 imply style 1.\cr #' @param mystring A string, will be a line of comment. If missing, the function will try to grab texts from your clipboard. #' @param center A logical value: Do you want the text strings centred? #' (alternative: left justified) #' #' @return A character string vector returned invisibly, #' but automatically displayed in the console, the output will be saved into clipboard #' @export #' #' #' @examples #' # COMMON WORKFLOW OF THIS PACKAGE #' # 1. type your text in any text editor #' # 2. copy your text #' # 3. call xxx_box() etc without arguments to grab text from clipboard #' # 4. decorated text will be displayed and sent to clipboard, NO NEED to copy them #' # 5. paste into your R script #' # #' # #' #............................................... #' # # OTHER USAGE #' #' xxx_box('test sentence, test sentence, test sentence, test sentence',center = FALSE ) #' xxx_box1('test sentence, test sentence, test sentence, test sentence',center = FALSE ) #' xxx_box1('test sentence, test sentence, test sentence, test sentence',center = TRUE ) xxx_box<-function(mystring=NULL, center=FALSE){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = TRUE) } if (length(mystring)<1) { storage<-mystring storage2<-strwrap(storage,width=75) }else{ storage2='' storage = mystring[mystring != ""] #remove enpty row for (i in 1:length(storage)) { storage2[i]<-fold_it(storage[i],maxChar = 75) storage2[i]<-paste0('',storage2[i],'\n') } } my_own_banner(storage2, leftSideHashes = 1, rightSideHashes = 0, centre = center,numLines = 0) } #' @describeIn xxx_box Same as xxx_box but add border #' #' @export xxx_box1<-function(mystring=NULL, center=FALSE){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = TRUE) } if (length(mystring)<1) { storage<-mystring storage2<-strwrap(storage,width=75) }else{ storage2='' storage = mystring[mystring != ""] #remove enpty row for (i in 1:length(storage)) { storage2[i]<-fold_it(storage[i],maxChar=75) storage2[i]<-paste0('',storage2[i],'\n') } } storage3<-storage2 storage3[2:(length(storage2)+1)]<-storage2 storage3[1]<-' ' storage3[length(storage2)+2]<-''#storage 3 just to create empty space before and after paragraph my_own_banner(storage3, center=center, bandChar = ".",leftSideHashes=1,rightSideHashes=1) } #' @describeIn xxx_box Same as xxx_box but looks better for bullet points list or numbered list #' #' @export xxx_list<-function(mystring=NULL, center=FALSE){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = TRUE) } if (length(mystring)<1) { storage<-mystring storage2<-strwrap(storage,width=74) }else{ storage2='' storage = mystring # storage2<-stringr::str_wrap(storage,indent = 0,width=74,exdent =3) has the same effect as below for (i in 1:length(storage)) { storage2[i]<-fold_it(storage[i],maxChar=74,exdent =3) } } storage3<-storage2 storage3[2:(length(storage2)+1)]<-storage2 storage3[1]<-' ' storage3[length(storage2)+2]<-' '#storage 3 just to create empty space before and after paragraph my_own_banner(storage2, leftSideHashes = 1, rightSideHashes = 0, centre = center, minHashes = 0, numLines = 0) } #' @describeIn xxx_box Same as xxx_list but add border #' #' @export xxx_list1<-function(mystring=NULL, center=FALSE){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = TRUE) } if (length(mystring)<1) { storage<-mystring storage2<-strwrap(storage,width=74) }else{ storage2='' storage = mystring # storage2<-stringr::str_wrap(storage,indent = 0,width=74,exdent =3) has the same effect as below for (i in 1:length(storage)) { storage2[i]<-fold_it(storage[i],maxChar=74,exdent =3) } } storage3<-storage2 storage3[2:(length(storage2)+1)]<-storage2 storage3[1]<-' ' storage3[length(storage2)+2]<-' '#storage 3 just to create empty space before and after paragraph my_own_banner(storage3, center=center, bandChar = ".",leftSideHashes=1, rightSideHashes = 1) } #............................................................................... # . # I used ARTofR everyday and it makes my R script so neat. I used ARTofR . # everyday and it makes my R script so neat. I used ARTofR everyday and it . # makes my R script so neat. . # . # I recommended this pacakge to all my families. . # . # Yes, even my grandmother like it! . # . #............................................................................... #............................................................................... # . # 1. I used ARTofR everyday . # 2. I used ARTofR everyday and it makes my R script so neat. I used ARTofR . # everyday and it makes my R script so neat. . # 3. I recommended this pacakge to all my families. I recommended this . # pacakge to all my families. . # 4. Yes, even my grandmother like it! . # . #...............................................................................
/scratch/gouwar.j/cran-all/cranData/ARTofR/R/xxx_box.R
#' Insert a divider (line break or page break) #' #' For user guide, please refer to <https://github.com/Hzhang-ouce/ARTofR>\cr #' #' #' Divider is shorter than title for hierarchy purpose, title will be come a section (for folding) in Rstudio, but divider will not\cr #' There are two styles of break, xxx_divider1 and xxx_divider2 \cr #' Please note that a super long divider string will result in an error\cr #' call \code{xxx_divider1()} will grab text from clipboard,\cr #' call \code{xxx_divider1('')} will make a line without text, \cr #' call \code{xxx_divider1('mystring')} will make a divider with 'mystring' #' #' @param mystring the content of your break, must be one line only, not too long #' #' @return A character string vector returned invisibly, #' but automatically displayed in the console, the output will be saved into clipboard #' @export #' #' @examples #' # COMMON WORKFLOW OF THIS PACKAGE #' # 1. type your text in any text editor #' # 2. copy your text #' # 3. call xxx_box() etc without arguments to grab text from clipboard #' # 4. decorated text will be displayed and sent to clipboard, NO NEED to copy them #' # 5. paste into your R script #' # #' # #' #............................................... #' # # OTHER USAGE #' #' xxx_divider1('mystring') #' xxx_divider1('') #' xxx_divider2('mystring') #' xxx_divider2('') xxx_divider1<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) mystring<-mystring[1] } text <- mystring if (length(text) == 0) { text <- ""} nt <- 1 kt <- 1 n0 <- nchar(text) leftHash='#' leftSideHashes=nchar(leftHash) rightHash='' rightSideHashes=nchar(rightHash) mid <- character(nt) hashes = 65 blanks <- (hashes - leftSideHashes - rightSideHashes - n0) if (blanks<0) { stop('you must provide a sentence less than 65 character') } blanks1 = round(blanks/2) blanks2 <- blanks - blanks1 left <- paste(c(' ',leftHash, rep(".", blanks1)), collapse = "") right <- paste(c(rep(".", blanks2), rightHash), collapse = "") mid <- paste(left, text, right, sep = "", collapse = "") structure(mid, class = "banner") #print.banner2(mid) } #' @describeIn xxx_divider1 Same as xxx_divider1 but different style #' #' @export xxx_divider2<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) mystring<-mystring[1] } if (nchar(mystring)==0) { comment<-my_own_banner("",leftSideHashes=1,bandChar = '~',minHashes=65) structure(paste0(' ',comment), class = "banner") }else{ a<-my_own_banner(mystring,bandChar = '~',numLines=0,rightSideHashes=3,leftSideHashes=1,minHashes=65) b<-my_own_banner("",leftSideHashes=1,bandChar = '~',minHashes=65) comment<-paste0(' ',a,' ',b) structure(comment, class = "banner") } } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/scratch/gouwar.j/cran-all/cranData/ARTofR/R/xxx_divider.R
#' Insert a title #' #' For user guide, please refer to <https://github.com/Hzhang-ouce/ARTofR>\cr #' #' There are three levels of titles, inserting a title with xxx_title() will also add a section in Rstudio automatically.\cr #' Please note that a super long title string will result in an error\cr #' xxx_title0() was designed as the main title of the whole script\cr #' xxx_title1() is the largest header\cr #' xxx_title2() is the middle size header\cr #' xxx_title3() is the smallest header\cr #' #' @param mystring the content of your title, must be one line only, not too long #' #' @return A character string vector returned invisibly, #' but automatically displayed in the console, the output will be saved into clipboard #' @export #' #' @examples #' # COMMON WORKFLOW OF THIS PACKAGE #' # 1. type your text in any text editor #' # 2. copy your text #' # 3. call xxx_box() etc without arguments to grab text from clipboard #' # 4. decorated text will be displayed and sent to clipboard, NO NEED to copy them #' # 5. paste into your R script #' # #' # #' #............................................... #' # # OTHER USAGE #' #' xxx_title0('my title') #' xxx_title1('my title') #' xxx_title2('my title') #' xxx_title3('my title') #' xxx_title1<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) } my_out_string<-my_own_banner(mystring, numLines = 2,upper=T, bandChar='~') my_out_string<-my_out_string[-3] my_out_string<-my_out_string[-7] my_out_string[4]<-gsub(" (?= )", '-', my_out_string[4],perl = TRUE) my_out_string[4]<-gsub('.{5}$', '----\n', my_out_string[4]) my_out_string[3]<-gsub('.{3}$', '--\n', my_out_string[3]) my_out_string[5]<-gsub('.{3}$', '--\n', my_out_string[5]) structure(my_out_string, class = "banner") #print.banner(my_out_string) } #' @describeIn xxx_title1 the middle size title #' #' @export xxx_title0<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) } my_out_string<-my_own_banner(mystring, numLines = 2,upper=T, bandChar='~') my_out_string[5]<-gsub('.{5}$', '', my_out_string[5]) my_out_string[5]<-paste0(my_out_string[5],'----\n') structure(my_out_string, class = "banner") #print.banner(my_out_string) } #' @describeIn xxx_title1 the middle size title #' #' @export xxx_title2<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) } my_out_string<-my_own_banner(mystring,bandChar = '~', rightSideHashes = 2) my_out_string<-gsub('.{5}$', '', my_out_string) my_out_string[2]<-paste0(my_out_string[2],'~~~~\n') my_out_string[3]<-paste0(my_out_string[3],'----\n') my_out_string[4]<-paste0(my_out_string[4],'~~~~\n') structure(my_out_string, class = "banner") #print.banner2(my_out_string) } #' @describeIn xxx_title1 the smallest title #' #' @export xxx_title3<-function(mystring=NULL){ if (is.null(mystring)) { mystring<-clipr::read_clip(allow_non_interactive = Sys.getenv("CLIPR_ALLOW", interactive())) } mystring<-paste0('~ ',mystring,' ') my_out_string<-my_own_banner(mystring,bandChar = '~',snug=T,minHashes = 0, rightSideHashes = 0, centre = FALSE) my_out_string<-gsub('.{1}$', '', my_out_string) my_out_string[2]<-paste0(my_out_string[2],'~~~~\n') my_out_string[3]<-paste0(my_out_string[3],'----\n') my_out_string[4]<-paste0(my_out_string[4],'~~~~\n') structure(my_out_string, class = "banner") #print.banner2(my_out_string) } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## -- ##...................................ARTOFR.................................---- ## -- ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## XXX ---- ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##~~~~~~~~~~~~ ## ~ XX ---- ##~~~~~~~~~~~~
/scratch/gouwar.j/cran-all/cranData/ARTofR/R/xxx_title.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/ARTofR/inst/doc/User_guide.R
--- title: "Who ever care about the (art of R) scripts?" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{User_guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## ARTofR ARTofR is designed to make comment decoration like this: ``` r #........................ARTofR line break....................... ``` It is a wrapper of bannerCommenter::banner(), written with thanks to the author, Bill Venables ## Installation Install from CRAN: ``` r install.packages('ARTofR') ``` Install the developing version of ARTofR: ``` r # install.packages("devtools") devtools::install_github("Hzhang-ouce/ARTofR") ``` ## User guide: **User guide at** [Hzhang-ouce-ARTofR](https://github.com/Hzhang-ouce/ARTofR)
/scratch/gouwar.j/cran-all/cranData/ARTofR/inst/doc/User_guide.Rmd
--- title: "Who ever care about the (art of R) scripts?" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{User_guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## ARTofR ARTofR is designed to make comment decoration like this: ``` r #........................ARTofR line break....................... ``` It is a wrapper of bannerCommenter::banner(), written with thanks to the author, Bill Venables ## Installation Install from CRAN: ``` r install.packages('ARTofR') ``` Install the developing version of ARTofR: ``` r # install.packages("devtools") devtools::install_github("Hzhang-ouce/ARTofR") ``` ## User guide: **User guide at** [Hzhang-ouce-ARTofR](https://github.com/Hzhang-ouce/ARTofR)
/scratch/gouwar.j/cran-all/cranData/ARTofR/vignettes/User_guide.Rmd
# ANOVAs for art objects # # Author: mjskay ############################################################################### #' Aligned Rank Transform Analysis of Variance #' #' Conduct analyses of variance on aligned rank transformed data. #' #' This function runs several ANOVAs: one for each fixed effect term in the #' model \code{object}. In each ANOVA, the independent variables are the same, #' but the response is aligned by a different fixed effect term (if response is #' "aligned") or aligned and ranked by that fixed effect term (if response is #' "art"). These models are generated using \code{\link{artlm}}. #' #' From each model, only the relevant output rows are kept (unless #' \code{all.rows} is \code{TRUE}, in which case all rows are kept). #' #' When \code{response} is \code{"art"} (the default), only one row is kept #' from each ANOVA: the row corresponding to fixed effect term the response was #' aligned and ranked by. These results represent nonparametric tests of #' significance for the effect of each term on the original response variable. #' #' When \code{response} is \code{"aligned"}, all rows \emph{except} the row #' corresponding to the fixed effect term the response was aligned by are kept. #' If the ART procedure is appropriate for this data, these tests should have #' all effects "stripped out", and have an F value of ~0. If that is not the #' case, another analysis should be considered. This diagnostic is tested by #' \code{\link{summary.art}} and a warning generated if the F values are not #' all approximately 0. #' #' @name anova.art #' @rdname anova.art #' @aliases anova.art print.anova.art #' @param object An object of class \code{\link{art}}. #' @param response Which response to run the ANOVA on: the aligned responses #' (\code{"aligned"}) or the aligned and ranked responses (\code{"art"}). This #' argument is passed to \code{\link{artlm}}. See 'Details'. #' @param type Type of ANOVAs to conduct. If \code{type} is \code{1} or #' \code{"I"}, then conducts Type I ANOVAs using \code{\link{anova}}. #' Otherwise, conducts Type II or Type III ANOVAs using \code{\link{Anova}}. #' The default is Type III \emph{if} the underlying model supports it. Models #' fit with \code{Error} terms are fit using \code{\link{aov}}, which only #' supports Type I ANOVAs. #' @param factor.contrasts The name of the contrast-generating function to be #' applied by default to fixed effect factors. See the first element of #' \code{\link{options}("contrasts")}. The default is to use #' \code{"contr.sum"}, i.e. sum-to-zero contrasts, which is appropriate for #' Type III ANOVAs (also the default). This argument is passed to #' \code{\link{artlm}}. #' @param test Test statistic to use. Default \code{"F"}. Note that some models #' and ANOVA types may not support \code{"Chisq"}. #' @param all.rows Show all rows of the resulting ANOVA tables? By default #' (\code{FALSE}), shows only the rows that are relevant depending on the type #' of \code{response}. #' @param x An object of class \code{\link{art}}. #' @param verbose When \code{TRUE}, sums of squares and residual sum of squares #' in addition to degrees of freedom are printed in some ANOVA types (e.g. #' repeated measures ANOVAs). Default \code{FALSE}, for brevity. #' @param digits Digits of output in printed table; see \code{\link{print}}. #' @param \dots Additional arguments passed to \code{\link{Anova}} or #' \code{\link{anova}} by \code{anova.art} or to \code{\link{print}} by #' \code{print.anova.art}. #' @return An object of class \code{"anova"}, which usually is printed. #' @author Matthew Kay #' @seealso See \code{\link{art}} for an example. See also #' \code{\link{summary.art}}, \code{\link{artlm}}. #' @references Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' (2011). The aligned rank transform for nonparametric factorial analyses #' using only ANOVA procedures. \emph{Proceedings of the ACM Conference on #' Human Factors in Computing Systems (CHI '11)}. Vancouver, British Columbia #' (May 7--12, 2011). New York: ACM Press, pp. 143--146. \doi{10.1145/1978942.1978963} #' @keywords nonparametric #' #' @export anova.art = function(object, response=c("art", "aligned"), type=c("III", "II", "I", 3, 2, 1), factor.contrasts="contr.sum", test=c("F", "Chisq"), all.rows=FALSE, ... ) { #sensible names for generic parameters m = object #match enum arguments response = match.arg(response) type = as.character(type) type = match.arg(type) test = match.arg(test) #get transformed responses based on response type requested responses = switch(response, aligned=m$aligned, art=m$aligned.ranks) #determine anova type to use type = if (type %in% c(1,"I")) "I" else if (type %in% c(2,"II")) "II" else if (type %in% c(3,"III")) "III" #are we going to need to show the term we aligned by #for each row of the output? show.aligned.by.term = response == "aligned" || all.rows #run linear models and anovas df = m$data anovas = NULL table.description = "" for (j in 1:ncol(responses)) { #can't use ldply here because it appears to drop row names when binding rows aligned.by.term = colnames(responses)[[j]] #get linear model m.l = artlm(m, aligned.by.term, response=response, factor.contrasts=factor.contrasts) #run anova and extract desired results anova.j = flat.anova(m.l, type=type, test=test, ...) if (j == 1) table.description = attr(anova.j, "description") #extract desired result rows from anova #for art, this is the one row correponding to the effect we aligned and ranked by #for aligned, this is every effect *except* the one we aligned and ranked by if (!all.rows) { include.row = anova.j$Term == aligned.by.term if (response == "aligned") include.row = !include.row anova.j = anova.j[include.row,] } #Add "Aligned By" column when needed to disambiguate if (nrow(anova.j) > 0) { #if only one fixed effect we can get no rows here, e.g. if response="aligned" #and all.rows=FALSE, so the above guard is necessary if (show.aligned.by.term) { anova.j = cbind(anova.j[,1,drop=FALSE], `Aligned By`=aligned.by.term, anova.j[,-1,drop=FALSE]) } } anovas = rbind(anovas, anova.j) } #fill in the rest of the anova table metadata and return class(anovas) = c("anova.art", "anova", "data.frame") attr(anovas, "model") = if (m$n.grouping.terms > 0) "lmer" else if (m$n.error.terms > 0) "aov" else "lm" attr(anovas, "table.description") = table.description attr(anovas, "response") = response attr(anovas, "response.term") = colnames(m$cell.means)[1] anovas } ### Generate p stars for a vector of p values #' @importFrom stats symnum p.stars = function(p.values) { unclass(symnum(p.values, corr = FALSE, na = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " "))) } #' @rdname anova.art #' @importFrom magrittr %<>% #' @export print.anova.art = function(x, verbose=FALSE, digits=5, ...) { #print heading and metadata cat("Analysis of Variance of Aligned Rank Transformed Data\n\n") cat("Table Type:", attr(x, "table.description"), "\n") cat("Model:", switch(attr(x, "model"), lm = "No Repeated Measures (lm)\n", aov = "Repeated Measures (aov)\n", lmer = "Mixed Effects (lmer)\n", )) cat(sep="", "Response: ", attr(x, "response"), "(", attr(x, "response.term"), ")\n\n") #format p values p.col = last(which(grepl("^(P|Pr)\\(", names(x)))) stars.legend = if (!is.na(p.col)) { #add column for p stars stars = p.stars(x[[p.col]]) x = cbind(x, ` ` = stars) #reformat p values for printing x[[p.col]] %<>% format.pval() #return stars legend attr(stars, "legend") } else NULL #generate row names from Terms rownames(x) = paste(1:nrow(x), x$Term) x$Term = NULL #abbreviate columns if (!is.null(x$Error)) x$Error %<>% abbreviate(5) if (!is.null(x$`Aligned By`)) x$`Aligned By` %<>% abbreviate(10) #drop "Sum Sq" (etc) columns when not doing verbose output if (!verbose) x %<>% select(everything(), -contains("Sum Sq"), -contains("Mean Sq")) #print table print.data.frame(x, digits=digits, ...) #print legend if (!is.null(stars.legend)) { cat("---\nSignif. codes: ", stars.legend, "\n") } }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/anova.art.R
# art function and some basic generic function implementations for art objects, # such as print.art and summary.art # # Author: mjskay ############################################################################### #' Aligned Rank Transform #' #' Apply the aligned rank transform to a factorial model (with optional #' grouping terms). Usually done in preparation for a nonparametric analyses of #' variance on models with numeric or ordinal responses, which can be done by #' following up with \code{anova.art}. #' #' The aligned rank transform allows a nonparametric analysis of variance to be #' conducted on factorial models with fixed and random effects (or repeated #' measures) and numeric or ordinal responses. This is done by first aligning #' and ranking the fixed effects using this function, then conducting an #' analysis of variance on linear models built from the transformed data using #' \code{\link{anova.art}} (see 'Examples'). The model specified using this #' function \emph{must} include all interactions of fixed effects. #' #' The \code{formula} should contain a single response variable (left-hand #' side) that can be numeric, an ordered factor, or logical. The right-hand #' side of the formula should contain one or more fixed effect factors, zero or #' more grouping terms, and zero or more error terms. Error terms and grouping #' terms cannot be used simultaneously. All possible interactions of the fixed #' effect terms must be included. For example, \code{y ~ x} and \code{y ~ #' a*b*c} and \code{y ~ a + b + b:c} are legal, but \code{y ~ a + b} is not, as #' it omits the interaction \code{a:b}. Grouping terms are specified as in #' \code{\link{lmer}}, e.g. \code{y ~ a*b*c + (1|d)} includes the random #' intercept term \code{(1|d)}. Error terms are specified as in #' \code{\link{aov}}, e.g. \code{y ~ a*b*c + Error(d)}. Grouping terms and #' error terms are not involved in the transformation, but are included in the #' model when ANOVAs are conducted, see \code{\link{anova.art}}. #' #' For details on the transformation itself, see Wobbrock \emph{et al.} (2011) #' or the ARTool website: \url{https://depts.washington.edu/acelab/proj/art/}. #' #' @param formula A factorial formula with optional grouping terms or error #' terms (but not both). Should be a formula with a single response variable #' (left-hand side) and one or more terms with all interactions on the #' right-hand side, e.g. \code{y ~ x} or \code{y ~ a*b*c} or \code{y ~ a + b + #' b:c}. If you want to run a mixed effects ANOVA on the transformed data using #' \code{\link{lmer}}, you can include grouping terms, as in \code{y ~ a*b*c + #' (1|d)}. If you want to run a repeated measures ANOVA using #' \code{\link{aov}}, you can include error terms, as in \code{y ~ a*b*c + #' Error(d)}. See 'Details'. #' @param data An optional data frame containing the variables in the model. #' @param rank.comparison.digits The number of digits to round aligned #' responses to before ranking (to ensure ties are computed consistently). See #' the \code{digits} argument of \code{\link{round}}. The default value is #' based on the default \code{tolerance} used for fuzzy comparison in #' \code{all.equal}. #' @param check.errors.are.factors Should we check to ensure \code{Error()} #' terms are all factors? A common mistake involves coding a categorical variable #' as numeric and passing it to \code{Error()}, yielding incorrect results #' from \code{\link{aov}}. Disabling this check is not recommended unless you #' know what you are doing; the most common uses of \code{Error()} (e.g. #' in repeated measures designs) involve categorical variables (factors). #' @return An object of class \code{"art"}: #' #' \item{call}{ The call used to generate the transformed data. } #' \item{formula}{ The formula used to generate the transformed data. } #' \item{cell.means}{ A data frame of cell means for each fixed term and #' interaction on the right-hand side of formula. } \item{estimated.effects}{ A #' data frame of estimated effects for each fixed term and interaction on the #' right-hand side of formula. } \item{residuals}{ A vector of residuals #' (response - cell mean of highest-order interaction). } \item{aligned}{ A #' data frame of aligned responses for each fixed term and interaction on the #' right-hand side of formula. } \item{aligned.ranks}{ A data frame of aligned #' and ranked responses for each fixed term and interaction on the right-hand #' side of formula. } \item{data}{ The input data frame } #' \item{n.grouping.terms}{ The number of grouping variables in the input #' formula. } #' #' For a complete description of cell means, estimated effects, aligned ranks, #' etc., in the above output, see Wobbrock \emph{et al.} (2011). #' @author Matthew Kay #' @seealso \code{\link{summary.art}}, \code{\link{anova.art}}, #' \code{\link{artlm}}, \code{\link{artlm.con}}, \code{\link{art.con}}. #' @references Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' \emph{ARTool}. \url{https://depts.washington.edu/acelab/proj/art/}. #' #' Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' (2011). The aligned rank transform for nonparametric factorial analyses #' using only ANOVA procedures. \emph{Proceedings of the ACM Conference on #' Human Factors in Computing Systems (CHI '11)}. Vancouver, British Columbia #' (May 7--12, 2011). New York: ACM Press, pp. 143--146. \doi{10.1145/1978942.1978963} #' @keywords nonparametric #' @examples #' \donttest{ #' data(Higgins1990Table5, package = "ARTool") #' #' ## perform aligned rank transform #' m <- art(DryMatter ~ Moisture*Fertilizer + (1|Tray), data=Higgins1990Table5) #' #' ## see summary data to ensure aligned rank transform is appropriate for this data #' summary(m) #' ## looks good (aligned effects sum to 0 and F values on aligned responses #' ## not of interest are all ~0) #' #' ## we can always look at the anova of aligned data if we want more detail #' ## to assess the appropriateness of ART. F values in this anova should all #' ## be approx 0. #' anova(m, response="aligned") #' #' ## then we can run an anova on the ART responses (equivalent to anova(m, response="art")) #' anova(m) #' #' #' ## if we want contrast tests, we can use art.con(): #' ## Ex 1: pairwise contrasts on Moisture: #' art.con(m, "Moisture") #' ## Ex 2: pairwise contrasts on Moisture:Fertilizer: #' art.con(m, "Moisture:Fertilizer") #' ## Ex 3: difference-of-difference tests on the Moisture:Fertilizer interaction: #' art.con(m, "Moisture:Fertilizer", interaction = TRUE) #' #' #' ## The above three examples with art.con() can be constructed manually as well. #' ## art.con() extracts the appropriate linear model and conducts contrasts #' ## using emmeans(). If we want to use a specific method for post-hoc tests #' ## other than emmeans(), artlm.con(m, term) returns the linear model for the #' ## specified term which we can then examine using our preferred method #' ## (emmeans, glht, etc). The equivalent calls for the above examples are: #' library(emmeans) #' #' ## Ex 1: pairwise contrasts on Moisture: #' contrast(emmeans(artlm.con(m, "Moisture"), pairwise ~ Moisture)) #' #' ## Ex 2: pairwise contrasts on Moisture:Fertilizer: #' ## See artlm.con() documentation for more details on the syntax, specifically #' ## the formula passed to emmeans. #' contrast(emmeans(artlm.con(m, "Moisture:Fertilizer"), pairwise ~ MoistureFertilizer)) #' #' ## Ex 3: difference-of-difference tests on the Moisture:Fertilizer interaction: #' ## Note the use of artlm() instead of artlm.con() #' contrast( #' emmeans(artlm(m, "Moisture:Fertilizer"), ~ Moisture:Fertilizer), #' method = "pairwise", interaction = TRUE #' ) #' #' #' ## For a more in-depth explanation and example of contrasts with art and #' ## differences between interaction types, see vignette("art-contrasts") #' #' } #' #' @importFrom stats complete.cases model.frame terms #' @importFrom plyr laply llply #' @export art = function(formula, data, #number of digits to round aligned responses to before ranking (to ensure ties are computed consistently) rank.comparison.digits = -floor(log10(.Machine$double.eps ^ 0.5)), check.errors.are.factors = TRUE ) { #parse and validate formula f = parse.art.formula(formula) #generate base model data frame (fixed effects only) if (missing(data)) { data = environment(formula) } #get data frame from input formula and data #first col will be response, followed by fixed effects df = model.frame(f$fixed.only, data, na.action=function(object) { #verify that all cases are complete (no NAs) #TODO: add na.rm or na.action support if (!all(complete.cases(object))) { stop("Aligned Rank Transform cannot be performed when fixed effects have missing data (NAs).") } object }) #verify that the reponse is numeric or ordinal and translate to ordinal if (!is.numeric(df[,1]) && !is.ordered(df[,1]) && !is.logical(df[,1])) { stop("Reponse term must be numeric, ordered factor, or logical (it was ", do.call(paste, as.list(class(df[,1]))), ")") } #coerce response to numeric for processing df[,1] = as.numeric(df[,1]) #verify that all fixed effects are factors #TODO: can these be ordered factors? non.factor.terms = Filter(function (col) !is.factor(df[,col]) && !is.logical(df[,col]), 2:ncol(df)) if (any(non.factor.terms)) { stop( "All fixed effect terms must be factors or logical (e.g. not numeric).\n", " The following terms are not factors or logical:\n ", paste0(names(df)[non.factor.terms], collapse = "\n "), "\n If these terms are intended to represent categorical data, you may\n ", "want to convert them into factors using factor()." ) } #coerce fixed effects to numeric for processing for (j in 2:ncol(df)) { df[,j] = as.numeric(df[,j]) } #for error terms, issue error if any terms aren't factors if (check.errors.are.factors && f$n.error.terms > 0) { error.term.df = model.frame(f$error.terms, data) non.factor.error.terms = Filter(function (col) !is.factor(error.term.df[,col]), 1:ncol(error.term.df)) if (any(non.factor.error.terms)) { stop( "The following Error terms are not factors:\n ", paste0(names(error.term.df)[non.factor.error.terms], collapse = "\n "), "\n If these terms are intended to represent categorical data, such as subjects in a \n", " repeated measures design, you should convert them into factors using factor().\n", " \n", " If you know what you are doing and still want Error terms that are not factors, use\n", " check.errors.are.factors = FALSE." ) } } #calculate cell means and estimated effects m = art.estimated.effects(terms(f$fixed.only), df) #calculate residuals (response - cell mean of highest-order interaction) m$residuals = df[,1] - m$cell.means[,ncol(m$cell.means)] #calculate aligned responses m$aligned = m$residuals + m$estimated.effects #compute aligned and ranked responses m$aligned.ranks = data.frame(llply(round(m$aligned, rank.comparison.digits), rank), check.names=FALSE) class(m) = "art" m$formula = formula m$call = match.call() m$data = data m$n.grouping.terms = f$n.grouping.terms m$n.error.terms = f$n.error.terms m } #' Aligned Rank Transform Summary #' #' Summary and diagnostics for aligned rank transformed data #' #' This function gives diagnostic output to help evaluate whether the ART #' procedure is appropriate for an analysis. It tests that column sums of #' aligned responses are ~0 and that F values of ANOVAs on aligned responses #' not of interest are ~0. For more details on these diagnostics see Wobbrock #' \emph{et al.} (2011). #' #' @param object An object of class \code{\link{art}}. #' @param \dots Potentially further arguments passed from other methods. #' @return An object of class \code{"summary.art"}, which usually is printed. #' @author Matthew Kay #' @seealso See \code{\link{art}} for an example. See also #' \code{\link{anova.art}}. #' @references Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' (2011). The aligned rank transform for nonparametric factorial analyses #' using only ANOVA procedures. \emph{Proceedings of the ACM Conference on #' Human Factors in Computing Systems (CHI '11)}. Vancouver, British Columbia #' (May 7--12, 2011). New York: ACM Press, pp. 143--146. \doi{10.1145/1978942.1978963} #' @keywords nonparametric #' #' @importFrom stats anova #' @export summary.art = function(object, ...) { #sensible names for generic parameters m = object #verify that aligned responses sum to 0 (using fuzzy compare) m$aligned.col.sums = colSums(m$aligned) if (!isTRUE(all.equal(as.vector(m$aligned.col.sums), rep(0, length(m$aligned.col.sums))))) { stop("Aligned responses do not sum to ~0. ART may not be appropriate.") } #verify that F values of ANOVA are all ~0 (using fuzzy compare) m$aligned.anova = anova(m, response="aligned") if (!isTRUE(all.equal(m$aligned.anova$F, rep(0, length(m$aligned.anova$F))))) { warning("F values of ANOVAs on aligned responses not of interest are not all ~0. ART may not be appropriate.") } class(m) = c("summary.art", class(m)) m } #' @export print.art = function(x, ...) print(summary(x), ...) #' @export print.summary.art = function(x, #number of digits to display (based on tolerance used for fuzzy compare in all.equals) display.digits = -floor(log10(.Machine$double.eps ^ 0.5)), ... ) { #sensible names for generic parameters m = x cat("Aligned Rank Transform of Factorial Model\n\nCall:\n", paste(deparse(m$call), sep="\n", collapse="\n"), "\n\n", sep="") cat("Column sums of aligned responses (should all be ~0):\n") print(round(m$aligned.col.sums, display.digits), ...) cat("\nF values of ANOVAs on aligned responses not of interest (should all be ~0):\n") print(round(summary(m$aligned.anova$F), display.digits), ...) }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/art.R
# art.con function for both "normal" contrasts and interaction contrasts # # Author: lelkin ############################################################################### #' Aligned Ranked Transform Contrasts #' #' Conduct contrast tests following an Aligned Ranked Transform (ART) ANOVA #' (\code{\link{anova.art}}). Conducts contrasts on \code{\link{art}} models #' using aligned-and-ranked linear models using the ART (Wobbrock et al. 2011) #' or ART-C (Elkin et al. 2021) alignment procedure, as appropriate to the requested contrast. #' #' An \code{\link{art}} model \code{m} stores the \code{formula} and \code{data} #' that were passed to \code{\link{art}} when \code{m} was created. Depending on the #' requested contrast type, this function either extracts the linear model from \code{m} #' needed to perform that contrast or creates a new linear model on data #' aligned-and-ranked using the ART-C #' procedure, then conducts the contrasts specified in parameter \code{formula}. #' #' Internally, this function uses \code{\link{artlm.con}} (when \code{interaction = FALSE}) #' or \code{\link{artlm}} (when \code{interaction = TRUE}) to get the linear #' model necessary for the requested contrast, computes estimated marginal #' means on the linear model using \code{\link{emmeans}}, and conducts contrasts #' using \code{\link{contrast}}. #' #' @param m An object of class \code{\link{art}}. #' @param formula Either a character vector or a formula specifying the fixed #' effects whose levels will be compared. See "Formula" section below. #' @param response Which response to use: the aligned response #' (\code{"aligned"}) or the aligned-and-ranked response #' (\code{"art"}). Default is "art". This argument is passed to \code{\link{artlm.con}} #' (when \code{interaction = FALSE}) or \code{\link{artlm}} (when \code{interaction = TRUE}). #' @param factor.contrasts The name of the contrast-generating function to be #' applied by default to fixed effect factors. Sets the the first element of #' \code{\link{options}("contrasts")} for the duration of this function. The #' default is to use \code{"contr.sum"}, i.e. sum-to-zero contrasts, which is #' appropriate for Type III ANOVAs (the default ANOVA type for #' \code{\link{anova.art}}). This argument is passed to \code{\link{artlm.con}} / #' \code{\link{artlm}}. #' @param method Contrast method argument passed to \code{\link{contrast}}. #' Note: the default is \code{"pairwise"} even though the default for the #' \code{\link{contrast}} function is \code{"eff"}. #' @param interaction Logical value. If \code{FALSE} (the default), conducts contrasts using #' the ART-C procedure and \code{\link{artlm.con}}. If \code{TRUE}, conducts #' difference-of-difference contrasts using a model returned by \code{\link{artlm}}. #' See the "Interaction Contrasts" section in \code{\link{contrast}}. #' @param adjust Character: adjustment method (e.g., "bonferroni") passed to #' \code{\link{contrast}}. If not provided, \code{\link{contrast}} will use #' its default ("tukey" at the time of publication). All available options are listed #' in \code{\link{summary.emmGrid}} in the "P-value adjustments" section. #' @param \dots Additional arguments passed to \code{\link{lm}} or #' \code{\link{lmer}}. #' @return An object of class \code{emmGrid}. See \code{\link{contrast}} #' for details. #' @author Lisa A. Elkin, Matthew Kay, Jacob O. Wobbrock #' #' @section Formula: Contrasts compare combinations of levels from multiple #' factors. The \code{formula} parameter indicates which factors are involved. Two #' formats are accepted: (1) a character vector as used in #' \code{\link{artlm}} and \code{\link{artlm.con}}, with factors separated by \code{":"}; #' or (2) a formula as used in \code{\link{emmeans}}, with factors separated by \code{*}. #' For example, contrasts comparing #' combinations of levels of factors \emph{X1} and \emph{X2} can be expressed #' as \code{"X1:X2"} (character vector) or as \code{~ X1*X2} (formula). #' #' @references Elkin, L. A., Kay, M, Higgins, J. J., and Wobbrock, J. O. #' (2021). An aligned rank transform procedure for multifactor contrast tests. #' \emph{Proceedings of the ACM Symposium on User Interface Software and #' Technology (UIST '21)}. Virtual Event (October 10--14, 2021). New York: #' ACM Press, pp. 754--768. \doi{10.1145/3472749.3474784} #' #' Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' (2011). The aligned rank transform for nonparametric factorial analyses #' using only ANOVA procedures. \emph{Proceedings of the ACM Conference on #' Human Factors in Computing Systems (CHI '11)}. Vancouver, British Columbia #' (May 7--12, 2011). New York: ACM Press, pp. 143--146. \doi{10.1145/1978942.1978963} #' #' @export #' #' @examples #' \donttest{ #' data(Higgins1990Table5, package = "ARTool") #' #' library(dplyr) #' #' ## Perform aligned rank transform #' m <- art(DryMatter ~ Moisture*Fertilizer + (1|Tray), data=Higgins1990Table5) #' #' ## In a some workflows, contrast tests using ART-C would follow a #' ## significant omnibus effect found by running an anova on the ART responses #' ## (equivalent to anova(m, response="art")). #' ## If conducting planned contrasts, this step can be skipped. #' anova(m) #' #' ## We can conduct contrasts comparing levels of Moisture using the ART-C procedure. #' ## If conducting contrasts as a post hoc test, this would follow a significant effect #' ## of Moisture on DryMatter. #' #' ## Using a character vector #' art.con(m, "Moisture") #' ## Or using a formula #' art.con(m, ~ Moisture) #' #' ## Note: Since the ART-C procedure is mathematically equivalent to the ART procedure #' ## in the single-factor case, this is the same as #' ## emmeans(artlm(m, "Moisture"), pairwise ~ Moisture) #' #' ## art.con() returns an emmGrid object, which does not print asterisks #' ## beside "significant" tests (p < 0.05). If you wish to add stars beside #' ## tests of a particular significant level, you can always do that to the #' ## data frame returned by the summary() method of emmGrid. For example: #' art.con(m, ~ Moisture) %>% #' summary() %>% #' mutate(sig = ifelse(p.value < 0.05, "*", "")) #' #' ## Or a more complex example: #' art.con(m, ~ Moisture) %>% #' summary() %>% #' mutate(sig = symnum(p.value, corr = FALSE, na = FALSE, #' cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), #' symbols = c("***", "**", "*", ".", " ") #' )) #' #' ## We can conduct contrasts comparing combinations of levels #' ## of Moisture and Fertilizer using the ART-C procedure. #' ## If conducting contrasts as a post hoc test, this would follow #' ## a significant Moisture:Fertlizer interaction effect on Drymatter. #' #' ## Using a character vector for formula #' art.con(m, "Moisture:Fertilizer") #' ## Using a formula #' art.con(m, ~ Moisture*Fertilizer) #' #' ## We can also conduct interaction contrasts (comparing differences of differences) #' art.con(m, "Moisture:Fertilizer", interaction = TRUE) #' #' ## For more examples, see vignette("art-contrasts") #' #' } art.con = function( m, formula, response = "art", factor.contrasts="contr.sum", method = "pairwise", interaction = FALSE, adjust, ... ) { f.parsed = parse.art.con.formula(formula) # syntax handled differently for interaction contrasts. if (interaction) { art.interaction.contrast = do.art.interaction.contrast(m, f.parsed, response, factor.contrasts, method, adjust, ...) art.interaction.contrast } else { artlm.con = artlm.con.internal(m, f.parsed, response, factor.contrasts, ...) art.contrast = do.art.contrast(f.parsed, artlm.con, method, adjust) art.contrast } }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/art.con.R
### Gets variables from spec ### Makes sure op is the only operator in spec. Throws error if not. ### Returns vector containing all variables in spec ### spec is an expression (e.g., a:b:`var with spaces and : in it`) ### op is a string representation of the allowed operation (e.g., ":") ### Note: for our current purposes, we only have one validation operator for each situation ### we would use this function in. Obviously, it would have to be re-written to accomodate more. get.variables = function(spec, op) { if (is.call(spec) && spec[[1]] == op) { # recursive case: get variables from children c(get.variables(spec[[2]], op), get.variables(spec[[3]], op)) } else if (is.name(spec)) { # base case: this is a variable # if recursive case is never called, need to make sure we still return a vector # if recursive case is called, it's fine, c(c(1), c(2)) = c(1,2) c(spec) } else { stop("Contrast term can only contain variables and `", op, "`.") } } ### Parses and validates contrast formula of form "a:b:c" ### Raises exception if formula does not validate. ### returns list with: ### interaction.variables: list of fixed variables (of type name) in interaction (e.g., list(a, b, c)). ### interaction.term.labels: quoted interaction term label (e.g. "a:b:c") ### concat.interaction.variable: concatenation (of type name) of all variables in interaction.variables (e.g., abc) #' @importFrom stats terms parse.art.con.string.formula = function(f.orig) { # make sure f.orig is a single string (as opposed to a vector of multiple strings) # don't think this is actually needed. seems to get caught earlier by # "Contrast must either be formula of form ~ X1*X2*X3 or # term of form \"X1:X2:X3\")" error below if (!is.character(f.orig) || length(f.orig) != 1) { stop("Contrast must be string term of form \"X1:X2\")") } # parse spec into an expression f.orig.expr = parse(text = f.orig, keep.source = FALSE)[[1]] variables = get.variables(f.orig.expr, ":") term.labels = f.orig # ensure we have exactly one interaction and no other terms if (length(term.labels) != 1) { stop( "Model must have exactly one interaction and no other terms (" , length(term.labels), " terms given)" ) } # Setup return list # interaction.term.label: string name for interaction term label (e.g. "a:b:c") interaction.term.label = term.labels # interaction.variables: list of fixed variables (e.g., list(a, b, c)) interaction.variables = variables # concat.interaction.variable: string formula with all # interaction variables concatenated (e.g., abc) concat.interaction.variable = as.name(paste(interaction.variables, collapse="")) list( interaction.term.label = interaction.term.label, interaction.variables = interaction.variables, concat.interaction.variable = concat.interaction.variable ) } ### parses f.orig which is formula or term (f) passed to art.con or artlm.con ### f.orig can be of form ~ a*b*c or "a:b:c" ### if f.orig is of "formula" form (i.e., ~ a*b*c), ### converts it to "string" form (i.e., "a:b:c") ### returns: result of parse.art.con.string.formula when passed "string" version of f.orig ### list with ### interaction.variables: list of fixed variables (of type name) in interaction (e.g., list(a, b, c)). ### interaction.term.labels: quoted interaction term label (e.g. "a:b:c") ### concat.interaction.variable: concatenation (of type name) of all variables in interaction.variables (e.g., abc) #' @importFrom stats as.formula parse.art.con.formula = function(f.orig) { # looking for ~ a*b*c if (inherits(f.orig, "formula")) { # make sure only operator on rhs of original formula is "*" # e.g., f.orig = ~ a*b*c -> f.orig[[1]] = ~ and f.orig[[2]] = a*b*c if (f.orig[[1]] != "~") { stop("Left hand side of formula must be `~`.") } # if there is a dependent variable (i.e., variable on lhs of ~), then f.orig will have length 3 # otherwise, f.orig will have length 2 # e.g., f.orig = Y ~ a*b*c -> f.orig[[1]] = ~, f.orig[[2]] = Y, f.orig[[3]] = a*b*c # e.g., f.orig = ~ a*b*c -> f.orig[[1]] = ~, f.orig[[2]] = a*b*c if (length(f.orig) > 2) { stop( "Formula must not have any variables on LHS (got ", f.orig[[2]], "). ", "Did you mean ", gsub(pattern = toString(f.orig[[2]]), x = deparse(f.orig), replacement = ''), "?" ) } # get formula variables (i.e., individual variables on lhs), and ensure "*" is the only RHS operator. # e.g., f.orig = ~ a*b*c -> f.orig[[1]] = `~`, f.orig[[2]] = a*b*c f.orig.expr = f.orig[[2]] variables = get.variables(f.orig.expr, "*") # stitch variables back together with : operator between each variable. # e.g. variables = c(a,b,c) -> a:b:c stiched.variables = Reduce(function(x, y) call(":", x, y), variables) # add "~" to lhs and turn into formula # e.g., stiched.variables = a:b:c -> ~a:b:c f = as.formula(call("~", stiched.variables)) # extract terms from formula f.terms = terms(f) # char vector of names of rhs terms and their interactions # e.g. ~ a:b:c -> c("a:b:c")) term.labels = attr(f.terms, "term.labels") # don't think we need this. # I think it gets caught by # "Term or formula passed to art.con or artlm.con must contain a single # interaction term and no other terms, and the interaction term must be in art model formula." if (length(term.labels) != 1) { stop( "Model must have exactly one interaction and no other terms (" , length(term.labels), " terms given)" ) } return(parse.art.con.string.formula(term.labels[[1]])) } # end f is formula # looking for "a:b:c" else if (is.character(f.orig) & length(f.orig) == 1) { return(parse.art.con.string.formula(f.orig)) } # Error if f is not formula or string else { stop("Contrast must either be formula of form `~ X1*X2*X3` or term of form \"X1:X2:X3\")") } } ### Parses formula from supplied model ### Validates that interaction term from f is in model formula ### Does not validate model formula itself since model formula was validated when model was created ### Given a formula like response ~ a*b*c + (1|d) + Error(g) ### input: ### m.f is the formula for the original art model ### f.parsed is the already parsed contrast formula ### returns list with: ### response: response (type name) (e.g. response) ### fixed.variables: list of named fixed variables (of type name) (e.g. list(a, b, c)) ### grouping.variables: list of grouping variables (of type name) (e.g. list(1|d)) ### error.variables: list of error variables (of type name) (e.g. list(Error(g))) parse.art.model.formula = function(m.f, f.parsed) { # get model formula terms m.f.terms = terms(m.f) # char vector of names of rhs terms and their interactions # e.g. Response ~ a*b + (1|d) + Error(g) -> c("a", "b", "1 | d", "Error(g)", "a:b")) not necessarily in this order m.f.term.labels = attr(m.f.terms, "term.labels") # check if interaction term from f is in model formula f.interaction.term.label = f.parsed$interaction.term.label # f.parsed always has exactly one interaction is.interaction.term = grepl(f.interaction.term.label, m.f.term.labels) # if interaction term from f is not in model formula, error if (!any(is.interaction.term)) { stop( "Term or formula passed to art.con or artlm.con must contain a single interaction term\n", "and no other terms, and the interaction term must be in art model formula." ) } # response ~ a*b*c*d + (1|d) + Error(g) -> list(response, a, b, c, d, 1|d, Error(g)) m.f.variables.all = as.list(attr(m.f.terms, "variables"))[c(-1)] m.f.response = m.f.variables.all[[1]] m.f.variables = m.f.variables.all[c(-1)] #determine which variables on the rhs are grouping variables, error variables, or fixed variables is.grouping.variable = sapply(m.f.variables, function(term) as.list(term)[[1]] == quote(`|`)) is.error.variable = sapply(m.f.variables, function(term) as.list(term)[[1]] == quote(`Error`)) #all other variables that aren't grouping or error variables must be fixed variables is.fixed.variable = !(is.grouping.variable | is.error.variable) m.f.grouping = m.f.variables[is.grouping.variable] m.f.error = m.f.variables[is.error.variable] m.f.fixed = m.f.variables[is.fixed.variable] # remove response list( response = m.f.response, fixed.variables = m.f.fixed, grouping.variables = m.f.grouping, error.variables = m.f.error ) } ### creates new data frame which is a copy of df except ### adds a new column by concatenated columns of df whose names are the interaction variables in f ### removes columns whose names are the interaction variables in f ### m.formula is the original formula used to create the ART model ### df is the data frame used to creat the ART model ### formula is the contrast formula generate.art.concatenated.df = function(m.f.parsed, df, f.parsed) { # concatenate columns of data frame whose columns names are the variables in f.parsed.interaction.variables # e.g. abc f.concatenated.variable = f.parsed$concat.interaction.variable # e.g. list(a, b, c) f.interaction.variables = f.parsed$interaction.variables # indexing in art.con.df used below with f.concatenated.variable does not work on tibbles, # so convert to a data.frame in advance art.con.df = as.data.frame(df, optional = TRUE) # turn list of names to vector of strings. e.g, aa = as.name("a"), bb = as.name("b"), list(aa, bb) -> c("a","b") # unname throws error when only one interaction variable and we don't need to concatenate in that case # this is easier than debugging it if (length(f.interaction.variables) > 1) { f.interaction.variables.string.vec = sapply(f.interaction.variables,deparse) art.con.df[[f.concatenated.variable]] = do.call(paste, c(unname(art.con.df[,f.interaction.variables.string.vec]), sep = ",")) art.con.df[,f.interaction.variables.string.vec] = NULL } # note: when m was created would have thrown error if a fixed var column in df was not a factor art.con.df[[f.concatenated.variable]] = factor(art.con.df[[f.concatenated.variable]]) art.con.df } ### removes variables from the model formula (m.f) that are in the contrast formula (f) ### replaces them with the new concatenated (the concatenation of all variables in the contrast formula) ### creates and returns art model on art.concatenated.df using the created formula ### note: this is not the same as using the full factorial model of all columns in df ### there can be columns in df that are not used in the model. generate.art.concatenated.model = function(m.f, m.f.parsed, art.concatenated.df, f.parsed) { # fixed variables in model formula m.f.fixed.variables = m.f.parsed$fixed.variables # interaction variables f.interaction.variables = f.parsed$interaction.variables # concatenated interaction variable f.concat.interaction.variable = f.parsed$concat.interaction.variable # indices of interaction variables in model formula fixed variable list # e.g. f.interaction.variables = (a, c) and m.f.fixed.variables = (a, b, c) -> c(1, 3) interaction.variable.index = match(f.interaction.variables, m.f.fixed.variables) # remove interaction variables from model formula # e.g. f.interaction.variables = (a, c) and m.f.fixed.variables = (a, b, c) -> list(b) m.f.fixed.variables.no.interaction.vars = m.f.fixed.variables[-interaction.variable.index] m.f.fixed.variables.with.concat = c(m.f.fixed.variables.no.interaction.vars, f.concat.interaction.variable) # create formula with response m.f.response, fixed vars m.f.fixed.variables.with.concat # grouping variables m.f.grouping.variables, error variables m.f.error.variables m.f.response = m.f.parsed$response m.f.grouping.variables = m.f.parsed$grouping.variables m.f.error.variables = m.f.parsed$error.variables # collapse fixed variable vector into string separated by * # e.g. list(ac,b) -> "ac*b" m.f.fixed.str = paste(m.f.fixed.variables.with.concat, collapse = "*") # add parentheses around each grouping variable and # collapse grouping variables vector into string separated by + # e.g list(1|d, 1|e) -> "(1|d) + (1|e)" # note: empty list coerced to empty vector i.e list() -> character(0) m.f.grouping.paren = if (length(m.f.grouping.variables) == 0) character() else paste("(", m.f.grouping.variables, ")", sep="") m.f.grouping.str = if (length(m.f.grouping.paren) == 0) character() else paste(m.f.grouping.paren, collapse = "+") # collaprse error variables vector into string separated by + # e.g. list(Error(g), Error(h)) -> "Error(g) + Error(h)" # note: empty list coerced to empty vector i.e list() -> character(0) m.f.error.str = if (length(m.f.error.variables) == 0) character() else paste(m.f.error.variables, collapse = "+") # assemble concatenated art formula # some terms may be missing (e.g., no grouping term). remove those from vector # because weird things happen when pasting multiple strings together and one is empty strings.to.join = c(m.f.fixed.str, m.f.grouping.str, m.f.error.str) nonempty.strings.to.join = strings.to.join[strings.to.join != ""] art.concatenated.formula.rhs.str = paste(nonempty.strings.to.join, collapse = "+") art.concatenated.formula = as.formula(paste(m.f.response, "~", art.concatenated.formula.rhs.str)) # make art concatenated model m = art(art.concatenated.formula, data = art.concatenated.df) m } ### called internally from artlm.con. ### aligns-and-ranks data in m with ART-C procedure ### creates linear model, linear mixed model, or aov model depending on grouping terms in m.f ### and returns resulting model ### m: art model passed into art.con ### f.parsed: parsed contrast formula ### response: "aligned" for compare aligned responses or "art" for compare aligned-and-ranked responses ### factor.contrasts: e.g. contr.sum passed to artlm. ### ...: extra parameter passed to artlm and subsequently lm or lmer ### returns: An object of class lm if m.f does not ### contain grouping or error terms, an object of class merMod ### (i.e. a model fit by lmer) if it contains grouping terms, or ### an object of class aovlist (i.e. a model fit by aov) if ### it contains error terms. ### Note: only allowed from artlm.con not art.con. artlm.con.internal = function(m, f.parsed, response, factor.contrasts, ...) { # make sure m is an art model if (!inherits(m, "art")) { stop("Model must be an art model; got ", deparse0(class(m)), ".") } # get model formula m.f = m$formula # m$data holds data used to create m, even if data frame var name was assigned to something else. df = m$data # parse model formula m.f.parsed = parse.art.model.formula(m.f, f.parsed) # concatenarte columns in df corresponding to interaction terms in f art.concatenated.df = generate.art.concatenated.df(m.f.parsed, df, f.parsed) # concatenate terms in m.f correcsponding to interaction terms in f and create new art model art.concatenated.model = generate.art.concatenated.model(m.f, m.f.parsed, art.concatenated.df, f.parsed) # artlm with new art model artlm.con.internal = artlm( art.concatenated.model, toString(f.parsed$concat.interaction.variable), response = response, factor.contrasts = factor.contrasts, ... ) artlm.con.internal } ### called internally from art.con iff interaction = TRUE ### m: art model passed into art.con ### f.parsed: parsed contrast formula ### response: "aligned" for compare aligned responses or "art" for compare aligned-and-ranked responses ### factor.contrasts: e.g. contr.sum passed to artlm. ### method: e.g. pairwise. passed to contrast ### adjust: e.g. tukey. passed to contrast ### ...: extra parameter passed to artlm and subsequently lm or lmer ### returns: result of conducting interaction contrasts on terms specified in f.parsed ### (object of class emmGrid) do.art.interaction.contrast = function(m, f.parsed, response, factor.contrasts, method, adjust, ...) { # e.g. list("a", "b", "c") interaction.variables = f.parsed$interaction.variables # e.g. list("a", "b", "c") -> "a:b:c". will be passed to artlm interaction.string.term = paste(interaction.variables, collapse=":") # e.g. list("a", "b", "c") -> "~ a*b*c". will be passed to emmeans interaction.expression = Reduce(function(x, y) call("*", x, y), interaction.variables) interaction.formula = as.formula(call("~", interaction.expression)) contrast( emmeans( artlm(m, interaction.string.term, response = response, factor.contrasts = factor.contrasts, ...), interaction.formula ), method = method, adjust = adjust, interaction = TRUE ) } ### conducts contrasts given model returned by artlm.con ### f.parsed: parsed contrast formula ### artlm.con: model returned by artlm.con given the original inputs to art.con ### method: contrast method propogated to contrast ### adjust: adjustment method propogated to contrast ### returns: result of conducting contrasts on artlm.con model (object of class emmGrid) ### syntax: m = art(Y ~ X1*X2, data = df) ### art.con(m, "X1") or art.con(m, ~X1) ### art.con(m, "X1:X2") or art.con(m, ~ X1*X2) ### Note: called internally from art.con iff interaction = FALSE #' @importFrom stats p.adjust p.adjust.methods #' @importFrom emmeans emmeans contrast do.art.contrast = function(f.parsed, artlm.con, method, adjust) { # e.g. f.parsed$concat.interaction.variable = X1X2 -> ~ X1X2 emmeans.formula = as.formula(call("~", f.parsed$concat.interaction.variable)) art.con.emmeans = emmeans(artlm.con, emmeans.formula) art.con = contrast(art.con.emmeans, method, adjust=adjust) art.con }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/art.con.internal.R
# art function and some basic generic function implementations for art objects # # Author: mjskay ############################################################################### #' Per-Term Linear Model from Aligned Rank Transformed Data #' #' Build a linear model for ART data with response aligned or aligned and #' ranked by the specified term from the model. #' #' This function is used primarily for post-hoc tests. To run an ANOVA, it does #' not need to be called directly; instead, use \code{\link{anova.art}}, which #' calls this function as needed. #' #' @param m An object of class \code{\link{art}}. #' @param term A character vector indicating the effect term #' in the transformed data in \code{m} to use as the aligned or art response. #' @param response Which response to use: the aligned response #' (\code{"aligned"}) or the aligned and ranked (\code{"art"}) response. #' @param factor.contrasts The name of the contrast-generating function to be #' applied by default to fixed effect factors. Sets the the first element of #' \code{\link{options}("contrasts")} for the duration of this function. The #' default is to use \code{"contr.sum"}, i.e. sum-to-zero contrasts, which is #' appropriate for Type III ANOVAs (the default ANOVA type for #' \code{\link{anova.art}}). #' @param \dots Additional arguments passed to \code{\link{lm}} or #' \code{\link{lmer}}. #' @return An object of class \code{\link{lm}} if \code{formula(m)} does not #' contain grouping or error terms, an object of class \code{\link{merMod}} #' (i.e. a model fit by \code{\link{lmer}}) if it contains grouping terms, or #' an object of class \code{aovlist} (i.e. a model fit by \code{\link{aov}}) if #' it contains error terms. #' @author Matthew Kay #' @seealso See \code{\link{art}} for an example. See also #' \code{\link{anova.art}}, which makes use of this function. #' @keywords nonparametric #' #' @importFrom stats lm update aov #' @importFrom lme4 lmer #' @export artlm = function(m, term, response=c("art", "aligned"), factor.contrasts="contr.sum", ... ) { #match enum arguments response = match.arg(response) #for the duration of this function, switch to the supplied contrast types original.contrasts = getOption("contrasts") tryCatch({ options(contrasts=c(factor.contrasts, original.contrasts[-1])) #place the transformed (aligned or aligned and ranked) version of y #into the data frame as the dummy response ".y" df = m$data df$.y = switch(response, aligned=m$aligned[[term]], art=m$aligned.ranks[[term]]) #modify formula to use dummy response name ".y" f = update(m$formula, .y ~ .) #reassign the environment of the model formula to this frame so that the data can be correctly recreated #by other functions (e.g. emmeans:::recover.data) that use the function call and environment environment(f) = sys.frame(sys.nframe()) #run linear model if (m$n.grouping.terms > 0) { #grouping terms => REML m = lmer(f, data=df, ...) } else if (m$n.error.terms > 0) { #error terms => repeated measures ANOVA m = aov(f, data=df, ...) } else { #no grouping or error terms => OLS m = lm(f, data=df, ...) } attr(m, "term") = term attr(m, "response") = response m }, finally = { options(contrasts=original.contrasts) }) }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/artlm.R
# artlm function to align-and-rank with ART-C and then create linear model # # Author: lelkin ############################################################################### #' Per-Term Linear Model on Data Aligned-and-Ranked with ART-C #' #' Given an \code{\link{art}} model, build a linear model from data aligned or #' aligned-and-ranked with ART-C alignment procedure by the specified term in #' the model. #' #' This function is used internally by \code{\link{art.con}} to construct #' linear models for contrasts using the ART-C procedure (Elkin et al. 2021). #' It is typically not necessary to use this function directly to conduct contrasts using #' the ART-C procedure, you can use \code{\link{art.con}} instead, which will #' ensure that the correct model and contrast test is run. However, should you #' wish to use the ART-C procedure with a different contrast test #' than provided by \code{\link{art.con}}, you may with to use this function. #' #' @param m An object of class \code{\link{art}}. #' @param term A character vector indicating the effect term in #' the transformed data in \code{m} to use as the aligned or art response. #' @param response Which response to use: the aligned (with ART-C) response #' (\code{"aligned"}) or the aligned and ranked (with ART-C) response #' (\code{"art"}). #' @param factor.contrasts The name of the contrast-generating function to be #' applied by default to fixed effect factors. Sets the the first element of #' \code{\link{options}("contrasts")} for the duration of this function. The #' default is to use \code{"contr.sum"}, i.e. sum-to-zero contrasts, which is #' appropriate for Type III ANOVAs (the default ANOVA type for #' \code{\link{anova.art}}). #' @param \dots Additional arguments passed to \code{\link{lm}} or #' \code{\link{lmer}}. #' @return An object of class \code{\link{lm}} if \code{formula(m)} does not #' contain grouping or error terms, an object of class \code{\link{merMod}} #' (i.e. a model fit by \code{\link{lmer}}) if it does contain grouping terms, or #' an object of class \code{aovlist} (i.e. a model fit by \code{\link{aov}}) #' if it contains error terms. #' @details Internally, the ART-C procedure concatenates the variables specified #' in \code{term}, and then removes the originals. When specifying the effect #' terms on which to conduct contrasts, use the concatenation of the effects #' specified in \code{term} instead of the original variables. This is demonstrated #' in the example below. #' @seealso See also \code{\link{art.con}}, which makes use of this function. #' #' @author Lisa A. Elkin #' @references Elkin, L. A., Kay, M, Higgins, J. J., and Wobbrock, J. O. #' (2021). An aligned rank transform procedure for multifactor contrast tests. #' \emph{Proceedings of the ACM Symposium on User Interface Software and #' Technology (UIST '21)}. Virtual Event (October 10--14, 2021). New York: #' ACM Press, pp. 754--768. \doi{10.1145/3472749.3474784} #' @export #' #' @examples #' \donttest{ #' data(Higgins1990Table5, package = "ARTool") #' #' ## create an art model #' m <- art(DryMatter ~ Moisture*Fertilizer + (1|Tray), data=Higgins1990Table5) #' #' ## use emmeans to conduct pairwise contrasts on "Moisture" #' library(emmeans) #' contrast(emmeans(artlm.con(m, "Moisture"), pairwise ~ Moisture)) #' #' ## use emmeans to conduct pairwise contrasts on "Moisture:Fertilizer" #' ## N.B. internally, artlm.con concatenates the factors Moisture and Fertilizer #' ## to create MoistureFertilizer. If you try to use any of Moisture, Fertilizer, #' ## Moisture:Fertilizer, or Moisture*Fertilizer in the RHS of the formula #' ## passed to emmeans, you will get an error because the factors Moisture and Fertilizer #' ## do not exist in the model returned by artlm.con. #' contrast(emmeans(artlm.con(m, "Moisture:Fertilizer"), pairwise ~ MoistureFertilizer)) #' #' ## Note: art.con uses emmeans internally, and the above examples are equivalent to #' ## the following calls to art.con, which is the recommended approach as it will #' ## ensure the model selected and the contrasts extracted from emmeans match. #' art.con(m, "Moisture") #' art.con(m, "Moisture:Fertilizer") #' #' } #' artlm.con = function(m, term, response = "art", factor.contrasts = "contr.sum", ...) { f.parsed = parse.art.con.string.formula(term) artlm.con = artlm.con.internal(m, f.parsed, response, factor.contrasts, ...) artlm.con }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/artlm.con.R
# documentation of datasets # # Author: mjskay ############################################################################### #' Aligned Rank Transformed Version of Higgins1990Table1 #' #' The ART version of \code{\link{Higgins1990Table1}} as produced by the #' original ARTool, used to test the correctness of \code{\link{art}} output. #' #' #' @name Higgins1990Table1.art #' @docType data #' @format A data frame with 36 observations on the following 10 variables. #' \describe{ #' \item{Subject}{a factor with levels \code{"s1"} .. \code{"s36"}} #' \item{Row}{a factor with levels \code{"r1"} .. \code{"r3"}} #' \item{Column}{a factor with levels \code{"c1"} .. \code{"c3"}} #' \item{Response}{a numeric vector} #' \item{aligned.Response..for.Row}{a numeric vector} #' \item{aligned.Response..for.Column}{a numeric vector} #' \item{aligned.Response..for.Row.Column}{a numeric vector} #' \item{ART.Response..for.Row}{a numeric vector} #' \item{ART.Response..for.Column}{a numeric vector} #' \item{ART.Response..for.Row.Column}{a numeric vector} #' } #' @seealso \code{\link{Higgins1990Table1}}, \code{\link{art}}. #' @source Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' \emph{ARTool}. \url{https://depts.washington.edu/acelab/proj/art/}. #' @keywords datasets internal NULL #' Synthetic 3x3 Factorial Randomized Experiment #' #' Synthetic data from a balanced 3x3 factorial experiment with main effects, #' no interaction, and independent and identically distributed (i.i.d.) Normal #' errors. #' #' #' @name Higgins1990Table1 #' @docType data #' @format A data frame with 36 observations on the following 4 variables. #' \describe{ #' \item{Subject}{a factor with levels \code{"s1"} .. \code{"s36"}} #' \item{Row}{a factor with levels \code{"r1"} .. \code{"r3"}} #' \item{Column}{a factor with levels \code{"c1"} .. \code{"c3"}} #' \item{Response}{a numeric vector} #' } #' @seealso \code{\link{art}}, \code{\link{anova.art}}. #' @source Higgins, J. J., Blair, R. C. and Tashtoush, S. (1990). The aligned #' rank transform procedure. \emph{Proceedings of the Conference on Applied #' Statistics in Agriculture}. Manhattan, Kansas: Kansas State University, pp. #' 185-195. #' @keywords datasets #' @examples #' #' data(Higgins1990Table1, package = "ARTool") #' #' ## run aligned-rank transform and ANOVA on the data #' ## Note: because there is only one observation per Subject #' ## in this dataset, we do not need to include Subject as #' ## a grouping term in this formula. Indeed, if we did, #' ## lmer would complain when we attempt the ANOVA. #' m <- art(Response ~ Row*Column, data=Higgins1990Table1) #' anova(m) #' NULL #' Aligned Rank Transformed Version of Higgins1990Table5 #' #' The ART version of \code{\link{Higgins1990Table5}} as produced by the #' original ARTool, used to test the correctness of \code{\link{art}} output. #' #' #' @name Higgins1990Table5.art #' @docType data #' @format A data frame with 48 observations on the following 10 variables. #' \describe{ #' \item{Tray}{a factor with levels \code{"t1"} .. \code{"t12"}} #' \item{Moisture}{a factor with levels \code{"m1"} .. \code{"m4"}} #' \item{Fertilizer}{a factor with levels \code{"f1"} .. \code{"f4"}} #' \item{DryMatter}{a numeric vector} #' \item{aligned.DryMatter..for.Moisture}{a numeric vector} #' \item{aligned.DryMatter..for.Fertilizer}{a numeric vector} #' \item{aligned.DryMatter..for.Moisture.Fertilizer}{a numeric vector} #' \item{ART.DryMatter..for.Moisture}{a numeric vector} #' \item{ART.DryMatter..for.Fertilizer}{a numeric vector} #' \item{ART.DryMatter..for.Moisture.Fertilizer}{a numeric vector} #' } #' @seealso \code{\link{Higgins1990Table5}}, \code{\link{art}}. #' @source Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' \emph{ARTool}. \url{https://depts.washington.edu/acelab/proj/art/}. #' @keywords datasets internal NULL #' Split-plot Experiment Examining Effect of Moisture and Fertilizer on Dry #' Matter in Peat Pots #' #' This dataset comes from a split-plot experiment examining \code{Tray}s of 4 #' peat pots each. \code{Moisture} was varied between \code{Tray}s (i.e. it was #' the whole-plot treatment) and \code{Fertilizer} was varied within #' \code{Tray}s (i.e. it was the sub-plot treatment). The outcome measure was #' \code{DryMatter}. #' #' This dataset, originally from Milliken & Johnson (1984), is reproduced here #' from Higgins \emph{et al.} (1990). #' #' #' @name Higgins1990Table5 #' @docType data #' @format A data frame with 48 observations on the following 4 variables. #' \describe{ #' \item{Tray}{a factor with levels \code{"t1"} .. \code{"t12"}} #' \item{Moisture}{a factor with levels \code{"m1"} .. \code{"m4"}} #' \item{Fertilizer}{a factor with levels \code{"f1"} .. \code{"f4"}} #' \item{DryMatter}{a numeric vector} #' } #' @seealso See \code{\link{art}} for a more complete example. See also #' \code{\link{anova.art}}. #' @references Higgins, J. J., Blair, R. C. and Tashtoush, S. (1990). The #' aligned rank transform procedure. \emph{Proceedings of the Conference on #' Applied Statistics in Agriculture}. Manhattan, Kansas: Kansas State #' University, pp. 185-195. #' @source Milliken, G.A., Johnson, D.E. (1984). \emph{Analysis of Messy Data #' Vol I: Designed Experiments}. Van Nostrand Reinhold Company, New York. #' @keywords datasets #' @examples #' #' data(Higgins1990Table5, package = "ARTool") #' #' ## run aligned-rank transform and ANOVA on the data #' m <- art(DryMatter ~ Moisture*Fertilizer + (1|Tray), data=Higgins1990Table5) #' anova(m) #' NULL #' Aligned Rank Transformed Version of HigginsABC #' #' The ART version of \code{\link{HigginsABC}} as produced by the original #' ARTool, used to test the correctness of \code{\link{art}} output. #' #' #' @name HigginsABC.art #' @docType data #' @format A data frame with 16 observations on the following 19 variables. #' \describe{ #' \item{Subject}{a factor with levels \code{"s1"} .. \code{"s8"}} #' \item{A}{a factor with levels \code{"a1"} \code{"a2"}} #' \item{B}{a factor with levels \code{"b1"} \code{"b2"}} #' \item{C}{a factor with levels \code{"c1"} \code{"c2"}} #' \item{Y}{a numeric vector} #' \item{aligned.Y..for.A}{a numeric vector} #' \item{aligned.Y..for.B}{a numeric vector} #' \item{aligned.Y..for.A.B}{a numeric vector} #' \item{aligned.Y..for.C}{a numeric vector} #' \item{aligned.Y..for.A.C}{a numeric vector} #' \item{aligned.Y..for.B.C}{a numeric vector} #' \item{aligned.Y..for.A.B.C}{a numeric vector} #' \item{ART.Y..for.A}{a numeric vector} #' \item{ART.Y..for.B}{a numeric vector} #' \item{ART.Y..for.A.B}{a numeric vector} #' \item{ART.Y..for.C}{a numeric vector} #' \item{ART.Y..for.A.C}{a numeric vector} #' \item{ART.Y..for.B.C}{a numeric vector} #' \item{ART.Y..for.A.B.C}{a numeric vector} #' } #' @seealso \code{\link{HigginsABC}}, \code{\link{art}}. #' @source Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' \emph{ARTool}. \url{https://depts.washington.edu/acelab/proj/art/}. #' @keywords datasets internal NULL #' Synthetic 2x2x2 Mixed Design Experiment #' #' Synthetic data from an experiment with two between-\code{Subject}s factors #' (\code{A} and \code{B}) having two levels each and one #' within-\code{Subject}s factor (\code{C}) with two levels. #' #' #' @name HigginsABC #' @docType data #' @format A data frame with 16 observations on the following 5 variables. #' \describe{ #' \item{Subject}{a factor with levels \code{"s1"} .. \code{"s8"}} #' \item{A}{a factor with levels \code{"a1"} \code{"a2"}} #' \item{B}{a factor with levels \code{"b1"} \code{"b2"}} #' \item{C}{a factor with levels \code{"c1"} \code{"c2"}} #' \item{Y}{a numeric vector} #' } #' @seealso \code{\link{art}}, \code{\link{anova.art}}. #' @source Wobbrock, J. O., Findlater, L., Gergle, D., and Higgins, J. J. #' \emph{ARTool}. \url{https://depts.washington.edu/acelab/proj/art/}. #' @keywords datasets #' @examples #' \donttest{ #' data(HigginsABC, HigginsABC.art, package = "ARTool") #' #' ## run aligned-rank transform and ANOVA on the data #' m <- art(Y ~ A*B*C + Error(Subject), data = HigginsABC) #' anova(m) #' } NULL #' Synthetic 2x2 Within-Subjects Experiment #' #' Synthetic data from an experiment with two within-subjects factors #' (\code{A} and \code{B}) having two levels each. #' #' @name ElkinAB #' @docType data #' @format A data frame with 32 observations on the following 4 variables. #' \describe{ #' \item{S}{a factor representing subjects with levels \code{"s1"} .. \code{"s8"}} #' \item{A}{a factor with levels \code{"a1"} \code{"a2"}} #' \item{B}{a factor with levels \code{"b1"} \code{"b2"}} #' \item{Y}{a numeric vector} #' } #' @keywords datasets #' @source Elkin, L. A., Kay, M, Higgins, J. J., and Wobbrock, J. O. #' (2021). An aligned rank transform procedure for multifactor contrast tests. #' \emph{Proceedings of the ACM Symposium on User Interface Software and #' Technology (UIST '21)}. Virtual Event (October 10--14, 2021). New York: #' ACM Press, pp. 754--768. \doi{10.1145/3472749.3474784} #' @examples #' \donttest{ #' data(ElkinAB, package = "ARTool") #' #' ## run contrast using the ART-C procedure on the data. #' m <- art(Y ~ A*B + (1|S), data = ElkinAB) #' art.con(m, "A:B") #' } NULL #' Synthetic 2x2x2 Within-Subjects Experiment #' #' Synthetic data from an experiment with three within-subjects factors #' (\code{A}, \code{B}, and \code{C}) having two levels each. #' #' @name ElkinABC #' @docType data #' @format A data frame with 64 observations on the following 5 variables. #' \describe{ #' \item{S}{a factor representing subjects with levels \code{"s1"} .. \code{"s8"}} #' \item{A}{a factor with levels \code{"a1"} \code{"a2"}} #' \item{B}{a factor with levels \code{"b1"} \code{"b2"}} #' \item{C}{a factor with levels \code{"c1"} \code{"c2"}} #' \item{Y}{a numeric vector} #' } #' @keywords datasets #' @source Elkin, L. A., Kay, M, Higgins, J. J., and Wobbrock, J. O. #' (2021). An aligned rank transform procedure for multifactor contrast tests. #' \emph{Proceedings of the ACM Symposium on User Interface Software and #' Technology (UIST '21)}. Virtual Event (October 10--14, 2021). New York: #' ACM Press, pp. 754--768. \doi{10.1145/3472749.3474784} #' @examples #' \donttest{ #' data(ElkinABC, package = "ARTool") #' #' ## run contrast using the ART-C procedure on the data. #' m <- art(Y ~ A*B*C + (1|S), data = ElkinABC) #' art.con(m, "A:B:C") #' } NULL #' Synthetic Data Used in the Contrast Test Vignette #' #' See \code{vignette("art-contrasts")} for a description of this data. #' #' #' @name InteractionTestData #' @docType data #' @seealso \code{\link{art}}, \code{\link{anova.art}}. #' @keywords datasets #' @examples #' ## see vignette("art-contrasts") NULL
/scratch/gouwar.j/cran-all/cranData/ARTool/R/data.R
# Internal function: Standardizsed ANOVA tables for lm, lmer, and aov objects # with descriptions of the methods used and a single table for all results # (instead of multiple tables as returned by aovlist objects --- hence "flat") # # Author: mjskay ############################################################################### # Names that should be suppressed from global variable check by codetools # Names used broadly should be put in global.variables.R globalVariables(c("Df", "Df.res", "Sum Sq", "Sum Sq.res", "Term", "Error")) flat.anova = function(m, ...) { UseMethod("flat.anova", m) } #' @importFrom stats anova #' @importFrom car Anova #' @importFrom magrittr %<>% flat.anova.default = function(m, type="III", test="F", ...) { #get ANOVA table a = switch(type, I = anova(m, test=test, ...), II = Anova(m, type="II", test=test, ...), III = Anova(m, type="III", test=test, ...)[-1,] #first row is intercept => ignore ) #get the anova description from the heading description = strsplit(attr(a, "heading"),"\n")[[1]] if (type == "I") description %<>% paste("(Type I)") #Type I ANOVAs don't include the label themselves #add a column to keep track of the term name (rather than as row names because #in some instances this will not be unique, and row names must be unique) a = cbind(Term = rownames(a), a) attr(a, "description") = description a } #' @importFrom magrittr %<>% flat.anova.lm = function(m, type="III", test="F", ...) { a = flat.anova.default(m, type, test, ...) description = attr(a, "description") #for lm (no grouping variables), the anova table will have an extra row with residual df; #add the residual df from that row as a column and drop the unneeded row a %<>% columnify.anova.residuals() attr(a, "description") = description a } #' @importFrom magrittr %<>% #' @import dplyr columnify.anova.residuals = function (a.table) { #given a flat anova table with "Term", "Df", and "Sum Sq" columns #and the last row containing residual Df and Sum Sq, move the #residual Df and Sum Sq into columns in the other rows k = nrow(a.table) df.res = a.table[k,"Df"] sumsq.res = a.table[k,"Sum Sq"] a.table %<>% .[-k,,drop=FALSE] %>% #drop last row (residuals) mutate( Df.res = df.res, `Sum Sq.res` = sumsq.res ) #reorder columns cbind( select(a.table, Term, Df, Df.res, `Sum Sq`, `Sum Sq.res`), select(a.table, -Term, -Df, -Df.res, -`Sum Sq`, -`Sum Sq.res`) ) } ### Flat version of an anova from an aov model #' @importFrom plyr ldply #' @import dplyr flat.anova.aovlist = function(m, type="I", test="F", #type and test are ignored: they are always "I" and "F" for aov objects ... ) { #construct flat anova table a = ldply(seq_along(m), function(i) { error = names(m)[[i]] ldply(summary(m[[i]]), function(anova.j) { if (nrow(anova.j) > 1) { #last row just has residual df and sum of squares, #extract residual df to move into its own column anova.j %>% mutate( Term = gsub("\\s+$", "", rownames(.)), Error = error ) %>% columnify.anova.residuals() } }) }) #reorder columns a = cbind( select(a, Term, Error), select(a, -Term, -Error) ) attr(a, "description") = "Repeated Measures Analysis of Variance Table (Type I)" a }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/flat.anova.R
# Names that should be suppressed from global variable check by codetools # Names used broadly should be put here; Names used in specific files should # be put at the top of the corresponding file. # # Author: mjskay ############################################################################### # names used in dlpyr functions globalVariables(c(".", "everything", "contains"))
/scratch/gouwar.j/cran-all/cranData/ARTool/R/global.variables.R
# Internal helper functions for the art function # # Author: mjskay ############################################################################### ### Parses and validates model formula for art. ### Raises exception if formula does not validate (e.g. not factorial). ### Given a formula like y ~ a*b*c + (1|d) + Error(g * h) + Error(i), ### returns list with: ### fixed.only: formula with only fixed components (y ~ a+b+c) ### fixed.terms: additive formula with only variables from fixed terms and no response ### (for use with ddply) (e.g., ~ a + b + c) ### fixed.term.labels: character vector of term labels (e.g., c("a", "b", "c", "a:b", "a:c", "b:c", "a:b:c")) ### n.grouping.terms: number of grouping terms like (1|d) (e.g. 1) ### n.error.terms: number of error terms like Error(g) (e.g. 2) ### error.terms: formula with error terms extracted from within Error() (e.g. ~ g * h + i) #' @importFrom stats terms #' @importFrom plyr laply parse.art.formula = function(formula) { #extract terms from the formula f.terms = terms(formula) #ensure we have an independent variable and an intercept if (attr(f.terms, "response") != 1) { stop("Model must have exactly one dependent variable (got ", attr(f.terms, "response"), ")") } if (attr(f.terms, "intercept") == 0) { stop("Model must have an intercept (got ", attr(f.terms, "intercept"), ")") } #unique variables in the rhs of the formula as list of quoted variables #e.g. y ~ a*b*c + (1|d) + Error(g) -> list(quote(a), quote(b), quote(c), quote((1|d)), quote(Error(g)))) variables = as.list(attr(f.terms, "variables"))[c(-1,-2)] #char vector of names of rhs terms and their interactions #e.g. y ~ a*b*c + (1|d) + Error(g) -> c("a","b","c","1 | d","Error(g)","a:b","b:c","a:b:c")) term.labels = attr(f.terms, "term.labels") #vector of length(f.term.labels); value is the order of the interaction of the corresponding entry in term.labels #e.g. y ~ a*b*c + (1|d) + Error(g) -> c(1,1,1,1,1,2,2,3) term.order = attr(f.terms, "order") #determine which variables on the rhs are grouping variables, error variables, or fixed variables is.grouping.variable = laply(variables, function(term) as.list(term)[[1]] == quote(`|`)) is.error.variable = laply(variables, function(term) is.call(term) & as.list(term)[[1]] == quote(`Error`)) #all other variables that aren't grouping or error variables must be fixed variables is.fixed.variable = !(is.grouping.variable | is.error.variable) #ensure we have at least one fixed effect and are using either grouping terms or error terms but not both if (sum(is.fixed.variable) == 0) { stop("Model must have at least one fixed effect (0 given)") } if (any(is.grouping.variable) & any(is.error.variable)) { stop("Model cannot contain both grouping terms, like (1|d), and error terms, like Error(d). Use one or the other.") } #get table with rows == rhs variables and cols == term labels, each cell == 1 if variable in term variables.by.terms = attr(f.terms, "factors")[-1,,drop=FALSE] #prevent reducing to vector if only one cell #make a version of the formula terms with only fixed effects n.rhs.variables = length(variables) n.rhs.terms = length(term.labels) n.interaction.terms = n.rhs.terms - n.rhs.variables if (n.interaction.terms < 0) { #only happens when not factorial stop("Model must include all combinations of interactions of fixed effects.") } is.fixed.term = c(is.fixed.variable, rep(TRUE, n.interaction.terms)) fixed.variables.by.terms = variables.by.terms[is.fixed.variable, is.fixed.term, drop=FALSE] fixed.term.labels = term.labels[is.fixed.term] fixed.term.order = term.order[is.fixed.term] #ensure design of fixed effects portion of model has all interactions #first, pull out the response and the main (i.e. order-1) fixed effect terms response = formula[[2]] #build a factorial model of all fixed effects #e.g. y ~ a*b*c + (1|d) + Error(g) -> y ~ a*b*c factorial.formula = eval(bquote(.(response) ~ .(Reduce(function(x,y) bquote(.(x) * .(y)), variables[is.fixed.variable])))) environment(factorial.formula) = environment(formula) #verify the factorial model is the same as the fixed effects in the supplied model factorial.factors = attr(terms(factorial.formula), "factors")[-1,,drop=FALSE] if (!all(dim(factorial.factors) == dim(fixed.variables.by.terms)) || !all(factorial.factors == fixed.variables.by.terms)) { stop("Model must include all combinations of interactions of fixed effects.") } #build a formula with only fixed variables on the right-hand-side (added to each other) #e.g. y ~ a*b*c + (1|d) + Error(g) -> ~ a + b + c fixed.terms = eval(bquote(~ .(Reduce(function(x,y) bquote(.(x) + .(y)), variables[is.fixed.variable])))) environment(fixed.terms) = environment(formula) #build a formula with all Error terms extracted from Error() on the right-hand side (added to each other) #e.g. y ~ a*b*c + (1|d) + Error(g * h) + Error(i) -> ~ g * h + i error.terms = eval(bquote(~ .(Reduce(function(x,y) bquote(.(x) + .(y)), Map(function (v) v[[2]], variables[is.error.variable]))))) environment(error.terms) = environment(formula) #return validated formulas list( fixed.only = factorial.formula, fixed.terms = fixed.terms, fixed.term.labels = fixed.term.labels, n.grouping.terms = sum(is.grouping.variable), n.error.terms = sum(is.error.variable), error.terms = error.terms ) } ### Given a factorial, fixed-effects-only formula, ### calculate the cell means and estimated effects ### for all responses. Returns a list of three ### data frames all indexed in parallel: data ### (the original data as determined by ### model.frame(formula, data)), ### cell.means, and estimated.effects ### ### Given some formula f and an input data frame df, ### parameters to art.estimated effects are: ### formula.terms = terms(f) ### data = model.frame(f, df) #' @importFrom plyr ddply art.estimated.effects = function(formula.terms, data) { #N.B. in this method "interaction" refers to #all 0 - n order interactions (i.e., grand mean, #first-order/"main" effects, and 2+-order interactions) #matrix with interactions as columns #and the response + all first-order factors as rows, #with each cell indicating if the first-order factor (row) #contributes to the nth-order interaction (column) interaction.matrix = cbind(data.frame(.grand=FALSE), attr(formula.terms, "factors") == 1) interaction.names = colnames(interaction.matrix) term.names = row.names(interaction.matrix) #interaction order of each column in interaction.matrix #(order of grand mean (first column) is 0, main effects #are 1, n-way interactions are n) interaction.order = c(0, attr(formula.terms, "order")) #calculate cell means for each interaction cell.means = data.frame(y=rep(mean(data[,1]), nrow(data))) colnames(cell.means) = term.names[1] data$.row = 1:nrow(data) #original row indices so we can keep rows in order when we split/combine for (j in 2:ncol(interaction.matrix)) { term.index = interaction.matrix[,j] #calculate cell means #must dervie term.formula as below (instead of just passing term.names[term.index] to ddply) #because otherwise expressions like "factor(a)" would be converted to ~ factor(a) (instead of ~ `factor(a)`, which #is what we want here because the expression has already been evaluated previously) #A nicer way to do all this would be good to come up with eventually term.formula = eval(bquote(~ .(Reduce(function(x,y) bquote(.(x) + .(y)), Map(as.name, term.names[term.index]))))) cell.mean.df = ddply(data, term.formula, function (df) { df$.cell.mean = mean(df[,1]) #mean of response for this interaction df }) #put results into cell.means in the order of the original rows (so that they match up) cell.means[[interaction.names[j]]] = cell.mean.df[order(cell.mean.df$.row), ".cell.mean"] } #calculate estimated effects for each interaction estimated.effects = data.frame(y=cell.means[,1]) #estimated effect for grand mean == grand mean colnames(estimated.effects) = term.names[1] for (j in 2:ncol(interaction.matrix)) { #index of which cell means (columns of cell.means) #contribute to the estimated effect of this interaction. #This will select any columns involving a subset of the same factors; e.g. for A:B:C it would select #the grand mean, A, B, C, A:B, A:C, B:C, A:B:C; but not (say) A:D (since D is not in A:B:C). cell.means.cols = colSums(interaction.matrix[interaction.matrix[,j],]) == interaction.order #cell means contribute positively if they have the same #order (mod 2) as this interaction and negatively otherwise cell.means.multiplier = ifelse((interaction.order - interaction.order[j]) %% 2, -1, 1) #calculate estimated effect estimated.effects[[interaction.names[j]]] = rowSums(t(t(cell.means[,cell.means.cols]) * cell.means.multiplier[cell.means.cols])) } estimated.effects[1] = NULL #drop first column (grand mean), not needed list( cell.means=cell.means, estimated.effects=estimated.effects ) } ## deparse1 backport deparse0 = function(expr, collapse = " ", width.cutoff = 500L, ...) { paste(deparse(expr, width.cutoff, ...), collapse = collapse) }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/internal.R
# Release questions for devtools::release() # # Author: mjskay ############################################################################### release_questions <- function() { c( "Is README.md up to date?" ) }
/scratch/gouwar.j/cran-all/cranData/ARTool/R/release.questions.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ## ----message=FALSE, warning=FALSE--------------------------------------------- library(dplyr) #data_frame, %>%, filter, summarise, group_by library(emmeans) #emmeans, contrast library(phia) #testInteractions library(tidyr) #spread library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ## ----------------------------------------------------------------------------- n_per_group = 150 df = tibble( X1 = factor(c(rep("A", n_per_group), rep("B", n_per_group))), X2 = factor(rep(c("C","D","E"), n_per_group * 2/3)), Y = rnorm( n_per_group * 2, (X1 == "B") + 2* (X2 == "D") + 2 * (X1 == "B" & X2 == "D") - 2 * (X1 == "A" & X2 == "D") + 2 * (X2 == "E") ) ) ## ----------------------------------------------------------------------------- data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ## ----interaction_plot, fig.cap=""--------------------------------------------- # variant of the Dark2 colorbrewer scale with specific name mappings (so # we can keep color -> name mapping consistent throughout this document) palette = c("#1b9e77", "#d95f02", "#7570b3") names(palette) = c("C", "D", "E") df %>% ggplot(aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1) + stat_summary(aes(x = 1.5, group = NA), fun = mean, geom = "point", size = 9, pch = "+") + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim = c(-6, 10)) + facet_grid(. ~ X2) ## ----------------------------------------------------------------------------- m.linear = lm(Y ~ X1*X2, data = df) anova(m.linear) ## ----------------------------------------------------------------------------- m.art = art(Y ~ X1*X2, data = df) anova(m.art) ## ---- message=FALSE----------------------------------------------------------- contrast(emmeans(m.linear, ~ X1), method = "pairwise") contrast(emmeans(m.linear, ~ X2), method = "pairwise") ## ---- message=FALSE----------------------------------------------------------- # this works for single factors, though it is better (more general) to use # artlm.con() or art.con() (see below) contrast(emmeans(artlm(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm(m.art, "X2"), ~ X2), method = "pairwise") ## ---- message=FALSE----------------------------------------------------------- contrast(emmeans(artlm.con(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm.con(m.art, "X2"), ~ X2), method = "pairwise") ## ---- message=FALSE----------------------------------------------------------- art.con(m.art, "X1") art.con(m.art, "X2") ## ----------------------------------------------------------------------------- contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise") ## ----------------------------------------------------------------------------- #DO NOT DO THIS! contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise") ## ----interaction_plot_AC_AD, fig.cap="", fig.width=3-------------------------- df %>% filter(X1 == "A", X2 %in% c("C", "D")) %>% ggplot(aes(x = X1:X2, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim=c(-6,10)) ## ----message = FALSE---------------------------------------------------------- art.con(m.art, "X1:X2") ## ----message = FALSE---------------------------------------------------------- art.con(m.art, ~ X1*X2) ## ----message = FALSE---------------------------------------------------------- contrast(emmeans(artlm.con(m.art, "X1:X2"), ~ X1X2), method = "pairwise") ## ---- interaction_plot_C_D, fig.cap=""---------------------------------------- plot_interaction_for_X2_levels = function(...) { x2_levels = c(...) df. = filter(df, X2 %in% x2_levels) X1_in_X2 = df. %>% group_by(X1, X2) %>% summarise(Y = mean(Y), .groups = "drop") %>% spread(X1, Y) print( ggplot(df., aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1, linetype = "dashed") + geom_errorbar( aes(x = 2.2, ymin = A, ymax = B, y = NULL), data = X1_in_X2, width = .19, size = 0.8, color = "black" ) + geom_text( aes(x = 2.35, y = (A + B)/2, label = paste("A - B |", X2)), data = X1_in_X2, hjust = 0, size = 5, color = "black" ) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette[x2_levels]) + coord_cartesian(xlim = c(0, 3.5), ylim = c(-6,10)) + facet_grid(. ~ X2) ) } plot_interaction_for_X2_levels("C", "D") ## ----------------------------------------------------------------------------- contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise", interaction = TRUE) ## ----interaction_plot_C_E, fig.cap=""----------------------------------------- plot_interaction_for_X2_levels("C", "E") ## ---- interaction_plot_D_E, fig.cap=""---------------------------------------- plot_interaction_for_X2_levels("D", "E") ## ----------------------------------------------------------------------------- art.con(m.art, "X1:X2", interaction = TRUE) ## ----------------------------------------------------------------------------- contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise", interaction = TRUE) ## ----------------------------------------------------------------------------- testInteractions(artlm(m.art, "X1:X2"), pairwise = c("X1","X2"))
/scratch/gouwar.j/cran-all/cranData/ARTool/inst/doc/art-contrasts.R
--- title: "Contrast tests with ART" author: "Matthew Kay, Lisa A. Elkin, Jacob O. Wobbrock" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Interaction Contrasts with ART} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The aligned-rank transform (ART) allows for non-parametric analyses of variance ([Wobbrock et al. 2011](https://doi.org/10.1145/1978942.1978963)). But how should we do contrast tests with ART? Contrasts involving levels of single factors, combinations of factors, or differences of differences across two factors can be performed by conducting those contrasts on a linear model aligned-and-ranked on the factors involved in the contrasts. This linear model may be one of the models used in the ART procedure, or it may require concatenating factors and constructing a new model, a procedure called *ART contrasts* or *ART-C* ([Elkin et al. 2021](https://dx.doi.org/10.1145/3472749.3474784)). The `art.con()` function selects the appropriate model given a desired set of contrasts and then performs the requested contrasts. This page explains when and why a separate aligning-and-ranking procedure is needed to conduct contrasts and demonstrates how to conduct those contrasts using the `art.con()` function within the ART paradigm. If you are not sure when/how to select the appropriate aligned-and-ranked linear model for a given contrast (i.e. when to use ART versus ART-C), the `art.con()` function demonstrated in this vignette will select the appropriate method given a contrast specification. ## Contents 1. [Test Dataset](#test-dataset): Description of the test data we will use to compare a linear model against ART 2. [Contrast tests of main effects](#contrast-tests-of-main-effects): Demo of conducting contrasts within a single factor (no interaction) 3. [Tests of differences in pairwise combinations of levels between factors in interactions](#tests-of-differences-in-pairwise-combinations-of-levels-between-factors-in-interactions): This type of interaction contrast must be conducted with ART-C 4. [Tests of differences of differences in interactions](#tests-of-differences-of-differences-in-interactions): This type of interaction contrast can be conducted with ART ## Libraries needed for this vignette ```{r setup, include=FALSE} knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ``` ```{r message=FALSE, warning=FALSE} library(dplyr) #data_frame, %>%, filter, summarise, group_by library(emmeans) #emmeans, contrast library(phia) #testInteractions library(tidyr) #spread library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ``` ## Test dataset Let's generate some test data where we actually know what the effects are. Specifically, ```{r} n_per_group = 150 df = tibble( X1 = factor(c(rep("A", n_per_group), rep("B", n_per_group))), X2 = factor(rep(c("C","D","E"), n_per_group * 2/3)), Y = rnorm( n_per_group * 2, (X1 == "B") + 2* (X2 == "D") + 2 * (X1 == "B" & X2 == "D") - 2 * (X1 == "A" & X2 == "D") + 2 * (X2 == "E") ) ) ``` This is normally-distributed error with the same variance at all levels, so we can compare the results of ART and ART-C to a linear model, which will correctly estimate the effects. I pre-ran the above code and saved it as `InteractionTestData` so that the text here is consistent: ```{r} data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ``` The "true" means from the model look like this: | X1 | X2 | Mean | |:--:|:--------:|:----:| | A | C or D | 0 | | A | E | 2 | | B | C | 1 | | B | D | 5 | | B | E | 3 | Which we can see pretty well: ```{r interaction_plot, fig.cap=""} # variant of the Dark2 colorbrewer scale with specific name mappings (so # we can keep color -> name mapping consistent throughout this document) palette = c("#1b9e77", "#d95f02", "#7570b3") names(palette) = c("C", "D", "E") df %>% ggplot(aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1) + stat_summary(aes(x = 1.5, group = NA), fun = mean, geom = "point", size = 9, pch = "+") + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim = c(-6, 10)) + facet_grid(. ~ X2) ``` And "true" means for each level (averaging over the levels of the other factor): | Level | Mean | |:--------:|:--------:| | X1 == A | 0.66666 | | X1 == B | 3 | | X2 == C | 0.5 | | X2 == D | 2.5 | | X2 == E | 2.5 | Let's fit a linear model: ```{r} m.linear = lm(Y ~ X1*X2, data = df) anova(m.linear) ``` Now with ART: ```{r} m.art = art(Y ~ X1*X2, data = df) anova(m.art) ``` Both have significance at all levels (expected given the number of samples and the "true" effects) and similar enough F values. The real question is whether/what kind of contrast tests make sense. ### Contrast tests of main effects For the main effects, let's look at contrast tests for the linear model: ```{r, message=FALSE} contrast(emmeans(m.linear, ~ X1), method = "pairwise") contrast(emmeans(m.linear, ~ X2), method = "pairwise") ``` These are about right: The "true" effect for `A - B` is `-2.3333`, for `C - D` and `C - E` is `-2`, and for `D - E` is `0` (see table above). For ART, `artlm()` will return the appropriate linear model for single-factor contrasts, which we can then use with a library that does contrasts (such as `emmeans()`): ```{r, message=FALSE} # this works for single factors, though it is better (more general) to use # artlm.con() or art.con() (see below) contrast(emmeans(artlm(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm(m.art, "X2"), ~ X2), method = "pairwise") ``` This is about right (effects in the same direction, the estimates aren't the same because they are on the scale of ranks and not the data, but the t values are similar to the linear model, as we should hope). However, I recommend using `artlm.con()` instead of `artlm()`, as it will also return the correct model in this case but not in the general case, as we will see below. Using `artlm.con()`, we get the same result as before: ```{r, message=FALSE} contrast(emmeans(artlm.con(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm.con(m.art, "X2"), ~ X2), method = "pairwise") ``` We can also use the shortcut function `art.con()`, which will perform the appropriate call to both `artlm.con()` and `emmeans()` for the desired contrast: ```{r, message=FALSE} art.con(m.art, "X1") art.con(m.art, "X2") ``` **Within a single factor** ART (i.e., `artlm()`) and ART-C (`artlm.con()`) are mathematically equivalent, so the contrast tests for ART and ART-C have the same results. ### Tests of differences in pairwise combinations of levels between factors in interactions Now let's look at tests of differences in combinations of levels between factors: ```{r} contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise") ``` If we naively apply the ART procedure (using `artlm()`), we will get incorrect results: ```{r} #DO NOT DO THIS! contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise") ``` Compare these to the linear model: very different results! The linear model tests are easy to interpret: they tell us the expected mean difference between combinations of levels. The ART results are more difficult to interpret. Take `A,C - A,D`, which looks like this: ```{r interaction_plot_AC_AD, fig.cap="", fig.width=3} df %>% filter(X1 == "A", X2 %in% c("C", "D")) %>% ggplot(aes(x = X1:X2, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim=c(-6,10)) ``` The linear model correctly estimates this difference as approximately `0`, which is both the true effect and what we should expect from a visual inspection of the data. Unlike the linear model, the ART model gives us a statistically significant difference between `A,C` and `A,D`, which if we interpret in the same way as the linear model is obviously incorrect. The key here is to understand that ART is reporting differences with the main effects subtracted out. That is, the `A,C - A,D` effect is something like the difference between this combination of levels if we first subtracted out the effect of `C - D`. We can see this if we take the ART estimate for `C - D` in the `emmeans` output for `X2` above (`-123.13`) and the ART estimate for `A,C - A,D` (`125.12`) here, we can get approximate an estimate of the difference (`-123.13 + 125.12 == 1.99`) that is consistent with the expected 0 (given the SE here). The ART-C procedure was developed to align and rank data specifically for contrasts involving levels from any number of factors, and is available through `art.con()`: ```{r message = FALSE} art.con(m.art, "X1:X2") ``` Like the linear model, `art.con()` correctly estimates the difference between `A,C - A,D` as approximately `0`. In fact, its results agree with the linear model for all contrasts conducted. (Note that the `art.con()` and linear model results appear in a different order). The syntax used above is consistent with *term* syntax used by `artlm()`. `art.con()` also accepts the *formula* syntax accepted by `emmeans::emmeans()`. We can conduct the same contrasts as above using the following syntax: ```{r message = FALSE} art.con(m.art, ~ X1*X2) ``` We can also manually conduct the contrasts with `emmeans::emmeans()` (or another library for running contrasts) by first extracting the linear model with `artlm.con()`. Note that the contrasts must be performed on the variable constructed by `artlm.con()` with the names of the factors involved concatenated together (`X1X2`): ```{r message = FALSE} contrast(emmeans(artlm.con(m.art, "X1:X2"), ~ X1X2), method = "pairwise") ``` ### Tests of _differences of differences_ in interactions You may also with to test _differences of differences_; e.g., for the interaction `X1:X2`, we might ask, is the difference `A - B` different when `X2 = C` compared to when `X2 = D`?. We can test this using the `interaction` argument to `art.con()`. When the `interaction` argument is supplied to art.con, differences of differences are tested on data that has been aligned-and-ranked using the **original** ART method (i.e., the data is **not** aligned-and-ranked using the ART-C method, as it is not necessary for these contrasts). Before we test, let's try to visualize what's going on in just this interaction: ```{r, interaction_plot_C_D, fig.cap=""} plot_interaction_for_X2_levels = function(...) { x2_levels = c(...) df. = filter(df, X2 %in% x2_levels) X1_in_X2 = df. %>% group_by(X1, X2) %>% summarise(Y = mean(Y), .groups = "drop") %>% spread(X1, Y) print( ggplot(df., aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1, linetype = "dashed") + geom_errorbar( aes(x = 2.2, ymin = A, ymax = B, y = NULL), data = X1_in_X2, width = .19, size = 0.8, color = "black" ) + geom_text( aes(x = 2.35, y = (A + B)/2, label = paste("A - B |", X2)), data = X1_in_X2, hjust = 0, size = 5, color = "black" ) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette[x2_levels]) + coord_cartesian(xlim = c(0, 3.5), ylim = c(-6,10)) + facet_grid(. ~ X2) ) } plot_interaction_for_X2_levels("C", "D") ``` The true effect for `A - B | C` is -1, for `A - B | D` is -5, and for `(A - B | C) - (A - B | D)` is `(-1) - (-5) = 4`. Visually, we're asking if the two dashed lines in the above plot are parallel. Equivalently, we're asking if the vertical distance from the mean of A to the mean of B in the left panel (when X2 == C) is the same as the vertical distance between A and B in the right panel (when X2 == D). The true difference between these vertical distances (the "difference of a difference") is 4, which is also about what we would estimate it to be by looking at the above plot. We can get the estimate of this "difference of a difference" from the linear model by adding `interaction = TRUE` to the same call to `contrast` we made previously: ```{r} contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise", interaction = TRUE) ``` Here we can interpret the row `A - B C - D` as the difference between (`A - B | C`) and (`A - B | D`), which is estimated as `3.82` (close to the true effect of 4, see the plot above). We can look at a similar plot for the row `A - B C - E`: ```{r interaction_plot_C_E, fig.cap=""} plot_interaction_for_X2_levels("C", "E") ``` Here the true effect for `A - B | C` is -1, `A - B | E` is also -1, and `(A - B | C) - (A - B | E)` is `0`. Visually, this sample looks close to the true effects (the height of `A - B | C` is about the same as `A - B | E`). From the the row `A-B : C-E` above we can see that the estimate from the linear model is ~0, as we should hope. A similar visual analysis finds the estimate for row `A - B D - E` (~ -4.2) also to be correct (true effect is -4): ```{r, interaction_plot_D_E, fig.cap=""} plot_interaction_for_X2_levels("D", "E") ``` Now we look at these differences of differences in ART, using art.con(): ```{r} art.con(m.art, "X1:X2", interaction = TRUE) ``` This is equivalent to: ```{r} contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise", interaction = TRUE) ``` And we see *t* values consistent with the linear model, and consistent estimates (given the standard error). These types of comparisons work under ART because they do not involve coefficients of main effects (see the description of these tests in `vignette("phia")`), thus are consistent even when ART has stripped out the main effects. If you prefer the `phia` package, the code to run the equivalent tests using the `testInteractions` function in `phia` instead of using `emmeans` is: ```{r} testInteractions(artlm(m.art, "X1:X2"), pairwise = c("X1","X2")) ``` While `emmeans()` uses _t_ tests in this case, `testInteractions()` gives the result of equivalent _F_ tests with one numerator degree of freedom (an _F_ test with $F(1,\nu) = f$ is equivalent to a two-sided _t_ test with $t(\nu) = \sqrt{f}$). I prefer the _t_ test in this case because the _t_ value preserves the direction of the effect (its sign) and is more amenable to calculating interpretable (ish) effect sizes like Cohen's _d_. For an example of the latter, see <code>[vignette("art-effect-size")](art-effect-size.html)</code>.
/scratch/gouwar.j/cran-all/cranData/ARTool/inst/doc/art-contrasts.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ## ----message=FALSE------------------------------------------------------------ library(dplyr) #%>% library(emmeans) #emmeans library(DescTools) #EtaSq library(car) #sigmaHat library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ## ----------------------------------------------------------------------------- data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ## ----------------------------------------------------------------------------- #we'll be doing type 3 tests, so we want sum-to-zero contrasts options(contrasts = c("contr.sum", "contr.poly")) m.linear = lm(Y ~ X1*X2, data=df) ## ----------------------------------------------------------------------------- m.art = art(Y ~ X1*X2, data=df) ## ----------------------------------------------------------------------------- m.art.anova = anova(m.art) print(m.art.anova, verbose=TRUE) ## ----------------------------------------------------------------------------- m.art.anova$eta.sq.part = with(m.art.anova, `Sum Sq`/(`Sum Sq` + `Sum Sq.res`)) m.art.anova ## ----------------------------------------------------------------------------- EtaSq(m.linear, type=3) ## ----------------------------------------------------------------------------- x2.contrasts = summary(pairs(emmeans(m.linear, ~ X2))) ## ----------------------------------------------------------------------------- x2.contrasts$d = x2.contrasts$estimate / sigmaHat(m.linear) x2.contrasts ## ----------------------------------------------------------------------------- m.art.x2 = artlm(m.art, "X2") x2.contrasts.art = summary(pairs(emmeans(m.art.x2, ~ X2))) x2.contrasts.art$d = x2.contrasts.art$estimate / sigmaHat(m.art.x2) x2.contrasts.art ## ----------------------------------------------------------------------------- x2.contrasts.ci = confint(pairs(emmeans(m.linear, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.linear)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.ci ## ----------------------------------------------------------------------------- x2.contrasts.art.ci = confint(pairs(emmeans(m.art.x2, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.art.x2)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.art.ci ## ----cohens-d-comparison------------------------------------------------------ rbind( cbind(x2.contrasts.ci, model="linear"), cbind(x2.contrasts.art.ci, model="ART") ) %>% ggplot(aes(x=model, y=d, ymin=d.lower, ymax=d.upper)) + geom_pointrange() + geom_hline(aes(yintercept = true_effect), data = data.frame(true_effect = c(-2, -2, 0), contrast = c("C - D", "C - E", "D - E")), linetype = "dashed", color = "red") + facet_grid(contrast ~ .) + coord_flip()
/scratch/gouwar.j/cran-all/cranData/ARTool/inst/doc/art-effect-size.R
--- title: "Effect Sizes with ART" author: "Matthew Kay" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Effect Sizes with ART} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The aligned-rank transform (ART) allows for non-parametric analyses of variance. But how do we derive effect sizes from ART results? __NOTE:__ Before embarking down the path of calculating standardized effect sizes, it is always worth asking if that is what you really want. Carefully consider, for example, the arguments of [Cummings (2011)](https://dx.doi.org/10.1001/archpediatrics.2011.97) against the use of standardized effect sizes and in favor of simple (unstandardized) effect sizes. If you decide you would rather use simple effect sizes, you may need to consider a different procedure than ART, as the ranking procedure destroys the information necessary to calculate simple effect sizes. ## Contents 1. [Test Dataset](#test-dataset): The test data we will use to compare a linear model against ART 1. [Partial _eta_-squared](#partial-eta-squared): Calculation of partial _eta_-squared (effect size for _F_ tests) 1. [Cohen's _d_](#cohens-d): Calculation of standardized mean differences (Cohen's _d_; effect size for _t_ tests), including confidence intervals ## Libraries needed for this ```{r setup, include=FALSE} knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ``` ```{r message=FALSE} library(dplyr) #%>% library(emmeans) #emmeans library(DescTools) #EtaSq library(car) #sigmaHat library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ``` ## Test dataset Let's load the test dataset from <code>[vignette("art-contrasts")](art-contrasts.html)</code>: ```{r} data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ``` Let's fit a linear model: ```{r} #we'll be doing type 3 tests, so we want sum-to-zero contrasts options(contrasts = c("contr.sum", "contr.poly")) m.linear = lm(Y ~ X1*X2, data=df) ``` Now with ART: ```{r} m.art = art(Y ~ X1*X2, data=df) ``` ## Partial _eta_-squared Note that for Fixed-effects-only models and repeated measures models (those with `Error()` terms) ARTool also collects the sums of squares, but does not print them by default. We can pass `verbose = TRUE` to `print()` to print them: ```{r} m.art.anova = anova(m.art) print(m.art.anova, verbose=TRUE) ``` We can use the sums of squares to calculate partial _eta_-squared: ```{r} m.art.anova$eta.sq.part = with(m.art.anova, `Sum Sq`/(`Sum Sq` + `Sum Sq.res`)) m.art.anova ``` We can compare the above results to partial _eta_-squared calculated on the linear model (the second column below): ```{r} EtaSq(m.linear, type=3) ``` The results are comparable. ## Cohen's _d_ We can derive Cohen's _d_ (the standardized mean difference) by dividing estimated differences by the residual standard deviation of the model. Note that this relies somewhat on the assumption of constant variance across levels (aka homoscedasticity). ### in the linear model (for comparison) As a comparison, let's first derive pairwise contrasts for all levels of X2 in the linear model: ```{r} x2.contrasts = summary(pairs(emmeans(m.linear, ~ X2))) ``` Then divide these estimates by the residual standard deviation to get an estimate of _d_: ```{r} x2.contrasts$d = x2.contrasts$estimate / sigmaHat(m.linear) x2.contrasts ``` Note that this is essentially the same as the unstandardized estimate for this model; that is because this test dataset was generated with a residual standard deviation of 1. ### in ART We can follow the same procedure on the ART model for factor X2: ```{r} m.art.x2 = artlm(m.art, "X2") x2.contrasts.art = summary(pairs(emmeans(m.art.x2, ~ X2))) x2.contrasts.art$d = x2.contrasts.art$estimate / sigmaHat(m.art.x2) x2.contrasts.art ``` Note how standardization is helping us now: The standardized mean differences (_d_) are quite similar to the estimates of _d_ from the linear model above. ## Confidence intervals We can also derive confidence intervals on these effect sizes. To do that, we'll use the `d.ci` function from the `psych` package, which also requires us to indicate how many observations were in each group for each contrast. That is easy in this case, as each group has 100 observations. Thus: ```{r} x2.contrasts.ci = confint(pairs(emmeans(m.linear, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.linear)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.ci ``` And from the ART model: ```{r} x2.contrasts.art.ci = confint(pairs(emmeans(m.art.x2, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.art.x2)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.art.ci ``` And plotting both, to compare (red dashed line is the true effect): ```{r cohens-d-comparison} rbind( cbind(x2.contrasts.ci, model="linear"), cbind(x2.contrasts.art.ci, model="ART") ) %>% ggplot(aes(x=model, y=d, ymin=d.lower, ymax=d.upper)) + geom_pointrange() + geom_hline(aes(yintercept = true_effect), data = data.frame(true_effect = c(-2, -2, 0), contrast = c("C - D", "C - E", "D - E")), linetype = "dashed", color = "red") + facet_grid(contrast ~ .) + coord_flip() ```
/scratch/gouwar.j/cran-all/cranData/ARTool/inst/doc/art-effect-size.Rmd
--- title: "Contrast tests with ART" author: "Matthew Kay, Lisa A. Elkin, Jacob O. Wobbrock" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Interaction Contrasts with ART} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The aligned-rank transform (ART) allows for non-parametric analyses of variance ([Wobbrock et al. 2011](https://doi.org/10.1145/1978942.1978963)). But how should we do contrast tests with ART? Contrasts involving levels of single factors, combinations of factors, or differences of differences across two factors can be performed by conducting those contrasts on a linear model aligned-and-ranked on the factors involved in the contrasts. This linear model may be one of the models used in the ART procedure, or it may require concatenating factors and constructing a new model, a procedure called *ART contrasts* or *ART-C* ([Elkin et al. 2021](https://dx.doi.org/10.1145/3472749.3474784)). The `art.con()` function selects the appropriate model given a desired set of contrasts and then performs the requested contrasts. This page explains when and why a separate aligning-and-ranking procedure is needed to conduct contrasts and demonstrates how to conduct those contrasts using the `art.con()` function within the ART paradigm. If you are not sure when/how to select the appropriate aligned-and-ranked linear model for a given contrast (i.e. when to use ART versus ART-C), the `art.con()` function demonstrated in this vignette will select the appropriate method given a contrast specification. ## Contents 1. [Test Dataset](#test-dataset): Description of the test data we will use to compare a linear model against ART 2. [Contrast tests of main effects](#contrast-tests-of-main-effects): Demo of conducting contrasts within a single factor (no interaction) 3. [Tests of differences in pairwise combinations of levels between factors in interactions](#tests-of-differences-in-pairwise-combinations-of-levels-between-factors-in-interactions): This type of interaction contrast must be conducted with ART-C 4. [Tests of differences of differences in interactions](#tests-of-differences-of-differences-in-interactions): This type of interaction contrast can be conducted with ART ## Libraries needed for this vignette ```{r setup, include=FALSE} knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ``` ```{r message=FALSE, warning=FALSE} library(dplyr) #data_frame, %>%, filter, summarise, group_by library(emmeans) #emmeans, contrast library(phia) #testInteractions library(tidyr) #spread library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ``` ## Test dataset Let's generate some test data where we actually know what the effects are. Specifically, ```{r} n_per_group = 150 df = tibble( X1 = factor(c(rep("A", n_per_group), rep("B", n_per_group))), X2 = factor(rep(c("C","D","E"), n_per_group * 2/3)), Y = rnorm( n_per_group * 2, (X1 == "B") + 2* (X2 == "D") + 2 * (X1 == "B" & X2 == "D") - 2 * (X1 == "A" & X2 == "D") + 2 * (X2 == "E") ) ) ``` This is normally-distributed error with the same variance at all levels, so we can compare the results of ART and ART-C to a linear model, which will correctly estimate the effects. I pre-ran the above code and saved it as `InteractionTestData` so that the text here is consistent: ```{r} data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ``` The "true" means from the model look like this: | X1 | X2 | Mean | |:--:|:--------:|:----:| | A | C or D | 0 | | A | E | 2 | | B | C | 1 | | B | D | 5 | | B | E | 3 | Which we can see pretty well: ```{r interaction_plot, fig.cap=""} # variant of the Dark2 colorbrewer scale with specific name mappings (so # we can keep color -> name mapping consistent throughout this document) palette = c("#1b9e77", "#d95f02", "#7570b3") names(palette) = c("C", "D", "E") df %>% ggplot(aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1) + stat_summary(aes(x = 1.5, group = NA), fun = mean, geom = "point", size = 9, pch = "+") + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim = c(-6, 10)) + facet_grid(. ~ X2) ``` And "true" means for each level (averaging over the levels of the other factor): | Level | Mean | |:--------:|:--------:| | X1 == A | 0.66666 | | X1 == B | 3 | | X2 == C | 0.5 | | X2 == D | 2.5 | | X2 == E | 2.5 | Let's fit a linear model: ```{r} m.linear = lm(Y ~ X1*X2, data = df) anova(m.linear) ``` Now with ART: ```{r} m.art = art(Y ~ X1*X2, data = df) anova(m.art) ``` Both have significance at all levels (expected given the number of samples and the "true" effects) and similar enough F values. The real question is whether/what kind of contrast tests make sense. ### Contrast tests of main effects For the main effects, let's look at contrast tests for the linear model: ```{r, message=FALSE} contrast(emmeans(m.linear, ~ X1), method = "pairwise") contrast(emmeans(m.linear, ~ X2), method = "pairwise") ``` These are about right: The "true" effect for `A - B` is `-2.3333`, for `C - D` and `C - E` is `-2`, and for `D - E` is `0` (see table above). For ART, `artlm()` will return the appropriate linear model for single-factor contrasts, which we can then use with a library that does contrasts (such as `emmeans()`): ```{r, message=FALSE} # this works for single factors, though it is better (more general) to use # artlm.con() or art.con() (see below) contrast(emmeans(artlm(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm(m.art, "X2"), ~ X2), method = "pairwise") ``` This is about right (effects in the same direction, the estimates aren't the same because they are on the scale of ranks and not the data, but the t values are similar to the linear model, as we should hope). However, I recommend using `artlm.con()` instead of `artlm()`, as it will also return the correct model in this case but not in the general case, as we will see below. Using `artlm.con()`, we get the same result as before: ```{r, message=FALSE} contrast(emmeans(artlm.con(m.art, "X1"), ~ X1), method = "pairwise") contrast(emmeans(artlm.con(m.art, "X2"), ~ X2), method = "pairwise") ``` We can also use the shortcut function `art.con()`, which will perform the appropriate call to both `artlm.con()` and `emmeans()` for the desired contrast: ```{r, message=FALSE} art.con(m.art, "X1") art.con(m.art, "X2") ``` **Within a single factor** ART (i.e., `artlm()`) and ART-C (`artlm.con()`) are mathematically equivalent, so the contrast tests for ART and ART-C have the same results. ### Tests of differences in pairwise combinations of levels between factors in interactions Now let's look at tests of differences in combinations of levels between factors: ```{r} contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise") ``` If we naively apply the ART procedure (using `artlm()`), we will get incorrect results: ```{r} #DO NOT DO THIS! contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise") ``` Compare these to the linear model: very different results! The linear model tests are easy to interpret: they tell us the expected mean difference between combinations of levels. The ART results are more difficult to interpret. Take `A,C - A,D`, which looks like this: ```{r interaction_plot_AC_AD, fig.cap="", fig.width=3} df %>% filter(X1 == "A", X2 %in% c("C", "D")) %>% ggplot(aes(x = X1:X2, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette) + coord_cartesian(ylim=c(-6,10)) ``` The linear model correctly estimates this difference as approximately `0`, which is both the true effect and what we should expect from a visual inspection of the data. Unlike the linear model, the ART model gives us a statistically significant difference between `A,C` and `A,D`, which if we interpret in the same way as the linear model is obviously incorrect. The key here is to understand that ART is reporting differences with the main effects subtracted out. That is, the `A,C - A,D` effect is something like the difference between this combination of levels if we first subtracted out the effect of `C - D`. We can see this if we take the ART estimate for `C - D` in the `emmeans` output for `X2` above (`-123.13`) and the ART estimate for `A,C - A,D` (`125.12`) here, we can get approximate an estimate of the difference (`-123.13 + 125.12 == 1.99`) that is consistent with the expected 0 (given the SE here). The ART-C procedure was developed to align and rank data specifically for contrasts involving levels from any number of factors, and is available through `art.con()`: ```{r message = FALSE} art.con(m.art, "X1:X2") ``` Like the linear model, `art.con()` correctly estimates the difference between `A,C - A,D` as approximately `0`. In fact, its results agree with the linear model for all contrasts conducted. (Note that the `art.con()` and linear model results appear in a different order). The syntax used above is consistent with *term* syntax used by `artlm()`. `art.con()` also accepts the *formula* syntax accepted by `emmeans::emmeans()`. We can conduct the same contrasts as above using the following syntax: ```{r message = FALSE} art.con(m.art, ~ X1*X2) ``` We can also manually conduct the contrasts with `emmeans::emmeans()` (or another library for running contrasts) by first extracting the linear model with `artlm.con()`. Note that the contrasts must be performed on the variable constructed by `artlm.con()` with the names of the factors involved concatenated together (`X1X2`): ```{r message = FALSE} contrast(emmeans(artlm.con(m.art, "X1:X2"), ~ X1X2), method = "pairwise") ``` ### Tests of _differences of differences_ in interactions You may also with to test _differences of differences_; e.g., for the interaction `X1:X2`, we might ask, is the difference `A - B` different when `X2 = C` compared to when `X2 = D`?. We can test this using the `interaction` argument to `art.con()`. When the `interaction` argument is supplied to art.con, differences of differences are tested on data that has been aligned-and-ranked using the **original** ART method (i.e., the data is **not** aligned-and-ranked using the ART-C method, as it is not necessary for these contrasts). Before we test, let's try to visualize what's going on in just this interaction: ```{r, interaction_plot_C_D, fig.cap=""} plot_interaction_for_X2_levels = function(...) { x2_levels = c(...) df. = filter(df, X2 %in% x2_levels) X1_in_X2 = df. %>% group_by(X1, X2) %>% summarise(Y = mean(Y), .groups = "drop") %>% spread(X1, Y) print( ggplot(df., aes(x = X1, y = Y, color = X2)) + geom_violin(trim = FALSE, adjust = 1.5) + geom_point(pch = "-", size = 4) + stat_summary(fun = mean, geom = "point", size = 4) + stat_summary(aes(group = X2), fun = mean, geom = "line", size = 1, linetype = "dashed") + geom_errorbar( aes(x = 2.2, ymin = A, ymax = B, y = NULL), data = X1_in_X2, width = .19, size = 0.8, color = "black" ) + geom_text( aes(x = 2.35, y = (A + B)/2, label = paste("A - B |", X2)), data = X1_in_X2, hjust = 0, size = 5, color = "black" ) + scale_y_continuous(breaks = seq(-6, 10, by = 2), minor_breaks = -6:10) + scale_color_manual(guide = FALSE, values = palette[x2_levels]) + coord_cartesian(xlim = c(0, 3.5), ylim = c(-6,10)) + facet_grid(. ~ X2) ) } plot_interaction_for_X2_levels("C", "D") ``` The true effect for `A - B | C` is -1, for `A - B | D` is -5, and for `(A - B | C) - (A - B | D)` is `(-1) - (-5) = 4`. Visually, we're asking if the two dashed lines in the above plot are parallel. Equivalently, we're asking if the vertical distance from the mean of A to the mean of B in the left panel (when X2 == C) is the same as the vertical distance between A and B in the right panel (when X2 == D). The true difference between these vertical distances (the "difference of a difference") is 4, which is also about what we would estimate it to be by looking at the above plot. We can get the estimate of this "difference of a difference" from the linear model by adding `interaction = TRUE` to the same call to `contrast` we made previously: ```{r} contrast(emmeans(m.linear, ~ X1:X2), method = "pairwise", interaction = TRUE) ``` Here we can interpret the row `A - B C - D` as the difference between (`A - B | C`) and (`A - B | D`), which is estimated as `3.82` (close to the true effect of 4, see the plot above). We can look at a similar plot for the row `A - B C - E`: ```{r interaction_plot_C_E, fig.cap=""} plot_interaction_for_X2_levels("C", "E") ``` Here the true effect for `A - B | C` is -1, `A - B | E` is also -1, and `(A - B | C) - (A - B | E)` is `0`. Visually, this sample looks close to the true effects (the height of `A - B | C` is about the same as `A - B | E`). From the the row `A-B : C-E` above we can see that the estimate from the linear model is ~0, as we should hope. A similar visual analysis finds the estimate for row `A - B D - E` (~ -4.2) also to be correct (true effect is -4): ```{r, interaction_plot_D_E, fig.cap=""} plot_interaction_for_X2_levels("D", "E") ``` Now we look at these differences of differences in ART, using art.con(): ```{r} art.con(m.art, "X1:X2", interaction = TRUE) ``` This is equivalent to: ```{r} contrast(emmeans(artlm(m.art, "X1:X2"), ~ X1:X2), method = "pairwise", interaction = TRUE) ``` And we see *t* values consistent with the linear model, and consistent estimates (given the standard error). These types of comparisons work under ART because they do not involve coefficients of main effects (see the description of these tests in `vignette("phia")`), thus are consistent even when ART has stripped out the main effects. If you prefer the `phia` package, the code to run the equivalent tests using the `testInteractions` function in `phia` instead of using `emmeans` is: ```{r} testInteractions(artlm(m.art, "X1:X2"), pairwise = c("X1","X2")) ``` While `emmeans()` uses _t_ tests in this case, `testInteractions()` gives the result of equivalent _F_ tests with one numerator degree of freedom (an _F_ test with $F(1,\nu) = f$ is equivalent to a two-sided _t_ test with $t(\nu) = \sqrt{f}$). I prefer the _t_ test in this case because the _t_ value preserves the direction of the effect (its sign) and is more amenable to calculating interpretable (ish) effect sizes like Cohen's _d_. For an example of the latter, see <code>[vignette("art-effect-size")](art-effect-size.html)</code>.
/scratch/gouwar.j/cran-all/cranData/ARTool/vignettes/art-contrasts.Rmd
--- title: "Effect Sizes with ART" author: "Matthew Kay" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Effect Sizes with ART} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The aligned-rank transform (ART) allows for non-parametric analyses of variance. But how do we derive effect sizes from ART results? __NOTE:__ Before embarking down the path of calculating standardized effect sizes, it is always worth asking if that is what you really want. Carefully consider, for example, the arguments of [Cummings (2011)](https://dx.doi.org/10.1001/archpediatrics.2011.97) against the use of standardized effect sizes and in favor of simple (unstandardized) effect sizes. If you decide you would rather use simple effect sizes, you may need to consider a different procedure than ART, as the ranking procedure destroys the information necessary to calculate simple effect sizes. ## Contents 1. [Test Dataset](#test-dataset): The test data we will use to compare a linear model against ART 1. [Partial _eta_-squared](#partial-eta-squared): Calculation of partial _eta_-squared (effect size for _F_ tests) 1. [Cohen's _d_](#cohens-d): Calculation of standardized mean differences (Cohen's _d_; effect size for _t_ tests), including confidence intervals ## Libraries needed for this ```{r setup, include=FALSE} knitr::opts_chunk$set( #default code chunk options fig.width = 6, fig.height = 4 ) pander::panderOptions("table.split.table", Inf) #don't split wide tables in output pander::panderOptions("table.style", "rmarkdown") #table style that's supported by github ``` ```{r message=FALSE} library(dplyr) #%>% library(emmeans) #emmeans library(DescTools) #EtaSq library(car) #sigmaHat library(ARTool) #art, artlm library(ggplot2) #ggplot, stat_..., geom_..., etc ``` ## Test dataset Let's load the test dataset from <code>[vignette("art-contrasts")](art-contrasts.html)</code>: ```{r} data(InteractionTestData, package = "ARTool") df = InteractionTestData #save some typing ``` Let's fit a linear model: ```{r} #we'll be doing type 3 tests, so we want sum-to-zero contrasts options(contrasts = c("contr.sum", "contr.poly")) m.linear = lm(Y ~ X1*X2, data=df) ``` Now with ART: ```{r} m.art = art(Y ~ X1*X2, data=df) ``` ## Partial _eta_-squared Note that for Fixed-effects-only models and repeated measures models (those with `Error()` terms) ARTool also collects the sums of squares, but does not print them by default. We can pass `verbose = TRUE` to `print()` to print them: ```{r} m.art.anova = anova(m.art) print(m.art.anova, verbose=TRUE) ``` We can use the sums of squares to calculate partial _eta_-squared: ```{r} m.art.anova$eta.sq.part = with(m.art.anova, `Sum Sq`/(`Sum Sq` + `Sum Sq.res`)) m.art.anova ``` We can compare the above results to partial _eta_-squared calculated on the linear model (the second column below): ```{r} EtaSq(m.linear, type=3) ``` The results are comparable. ## Cohen's _d_ We can derive Cohen's _d_ (the standardized mean difference) by dividing estimated differences by the residual standard deviation of the model. Note that this relies somewhat on the assumption of constant variance across levels (aka homoscedasticity). ### in the linear model (for comparison) As a comparison, let's first derive pairwise contrasts for all levels of X2 in the linear model: ```{r} x2.contrasts = summary(pairs(emmeans(m.linear, ~ X2))) ``` Then divide these estimates by the residual standard deviation to get an estimate of _d_: ```{r} x2.contrasts$d = x2.contrasts$estimate / sigmaHat(m.linear) x2.contrasts ``` Note that this is essentially the same as the unstandardized estimate for this model; that is because this test dataset was generated with a residual standard deviation of 1. ### in ART We can follow the same procedure on the ART model for factor X2: ```{r} m.art.x2 = artlm(m.art, "X2") x2.contrasts.art = summary(pairs(emmeans(m.art.x2, ~ X2))) x2.contrasts.art$d = x2.contrasts.art$estimate / sigmaHat(m.art.x2) x2.contrasts.art ``` Note how standardization is helping us now: The standardized mean differences (_d_) are quite similar to the estimates of _d_ from the linear model above. ## Confidence intervals We can also derive confidence intervals on these effect sizes. To do that, we'll use the `d.ci` function from the `psych` package, which also requires us to indicate how many observations were in each group for each contrast. That is easy in this case, as each group has 100 observations. Thus: ```{r} x2.contrasts.ci = confint(pairs(emmeans(m.linear, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.linear)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.ci ``` And from the ART model: ```{r} x2.contrasts.art.ci = confint(pairs(emmeans(m.art.x2, ~ X2))) %>% mutate(d = estimate / sigmaHat(m.art.x2)) %>% cbind(d = plyr::ldply(.$d, psych::d.ci, n1 = 100, n2 = 100)) x2.contrasts.art.ci ``` And plotting both, to compare (red dashed line is the true effect): ```{r cohens-d-comparison} rbind( cbind(x2.contrasts.ci, model="linear"), cbind(x2.contrasts.art.ci, model="ART") ) %>% ggplot(aes(x=model, y=d, ymin=d.lower, ymax=d.upper)) + geom_pointrange() + geom_hline(aes(yintercept = true_effect), data = data.frame(true_effect = c(-2, -2, 0), contrast = c("C - D", "C - E", "D - E")), linetype = "dashed", color = "red") + facet_grid(contrast ~ .) + coord_flip() ```
/scratch/gouwar.j/cran-all/cranData/ARTool/vignettes/art-effect-size.Rmd
#' @title Censored Mixed-Effects Models with Autoregressive Correlation Structure and DEC for Normal and t-Student Errors #' @import numDeriv #' @import TruncatedNormal #' @import LaplacesDemon #' @import tcltk #' @import MASS #' @import stats #' @import relliptical #' @import expm #' @description This functino fits left, right or intervalar censored mixed-effects linear model, with autoregressive errors of order \code{p}, using the EM algorithm. It returns estimates, standard errors and prediction of future observations. #' @param y Vector \code{1 x n} of censored responses, where \code{n} is the sum of the number of observations of each individual #' @param x Design matrix of the fixed effects of order \code{n x s}, corresponding to vector of fixed effects. #' @param z Design matrix of the random effects of order\code{n x b}, corresponding to vector of random effects. #' @param cc Vector of censoring indicators of length \code{n}, where \code{n} is the total of observations. For each observation: \code{0} if non-censored, \code{1} if censored. #' @param nj Vector \code{1 x m} with the number of observations for each subject, where \code{m} is the total number of individuals. #' @param tt Vector \code{1 x n} with the time the measurements were made, where \code{n} is the total number of measurements for all individuals. Default it's considered regular times. #' @param struc \code{UNC},\code{ARp},\code{DEC},\code{SYM} or \code{DEC(AR)} for uncorrelated ,autoregressive, DEC(phi1,phi2), DEC(phi1,phi2=1), DEC(DEC(phi1,phi2=1)) structure, respectively #' @param order Order of the autoregressive process. Must be a positive integer value. #' @param initial List with the initial values in the next orden: betas,sigma2,alphas,phi and nu. If it is not indicated it will be provided automatically. Default is \code{NULL} #' @param nu.fixed Logical. Should estimate the parameter "nu" for the t-student distribution?. If is False indicates the value in the list of initial values. Default is \code{FALSE} #' @param typeModel \code{Normal} for Normal distribution and \code{Student} for t-Student distribution. Default is \code{Normal} #' @param cens.type \code{left} for left censoring, \code{right} for right censoring and \code{interval} for intervalar censoring. Default is \code{left} #' @param LI Vector censoring lower limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{-inf} if censored. It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL} #' @param LS Vector censoring upper limit indicator of length \code{n}. For each observation: \code{0} if non-censored, \code{inf} if censored.It is only indicated for when \code{cens.type} is \code{both}. Default is \code{NULL} #' @param MaxIter The maximum number of iterations of the EM algorithm. Default is \code{200} #' @param error The convergence maximum error. Default is \code{0.0001} #' @param Prev Indicator of the prediction process. Available at the moment only for the \code{typeModel=normal} case. Default is \code{FALSE} #' @param isubj Vector indicator of subject included in the prediction process. Default is \code{NULL} #' @param step Number of steps for prediction. Default is \code{NULL} #' @param xpre Design matrix of the fixed effects to be predicted. Default is \code{NULL}. #' @param zpre Design matrix of the random effects to be predicted. Default is \code{NULL}. #' @return returns list of class \dQuote{ARpMMEC}: #' \item{FixEffect}{Data frame with: estimate, standar errors and confidence intervals of the fixed effects.} #' \item{Sigma2}{Data frame with: estimate, standar errors and confidence intervals of the variance of the white noise process.} #' \item{Phi}{Data frame with: estimate, standar errors and confidence intervals of the autoregressive parameters.} #' \item{RandEffect}{Data frame with: estimate, standar errors and confidence intervals of the random effects.} #' \item{nu}{the parameter "nu" for the t-student distribution} #' \item{Est}{Vector of parameters estimate (fixed Effects, sigma2, phi, random effects).} #' \item{SE}{Vector of the standard errors of (fixed Effects, sigma2, phi, random effects).} #' \item{Residual}{Vector of the marginal residuals.} #' \item{loglik}{Log-likelihood value.} #' \item{AIC}{Akaike information criterion.} #' \item{BIC}{Bayesian information criterion.} #' \item{AICc}{Corrected Akaike information criterion.} #' \item{iter}{Number of iterations until convergence.} #' \item{Yfit}{Vector "y" fitted} #' \item{MI}{Information matrix} #' \item{Prev}{Predicted values (if xpre and zpre is not \code{NULL}).} #' \item{time}{Processing time.} #' \item{others}{The first and second moments of the random effect and vector Y} #' @references Olivari, R. C., Garay, A. M., Lachos, V. H., & Matos, L. A. (2021). Mixed-effects #' models for censored data with autoregressive errors. Journal of Biopharmaceutical Statistics, 31(3), 273-294. #' \doi{10.1080/10543406.2020.1852246} #' @examples #' \dontrun{ #'p.cens = 0.1 #'m = 10 #'D = matrix(c(0.049,0.001,0.001,0.002),2,2) #'sigma2 = 0.30 #'phi = 0.6 #'beta = c(1,2,1) #'nj=rep(4,10) #'tt=rep(1:4,length(nj)) #'x<-matrix(runif(sum(nj)*length(beta),-1,1),sum(nj),length(beta)) #'z<-matrix(runif(sum(nj)*dim(D)[1],-1,1),sum(nj),dim(D)[1]) #'data=ARpMMEC.sim(m,x,z,tt,nj,beta,sigma2,D,phi,struc="ARp",typeModel="Normal",p.cens=p.cens) #' #'teste1=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal",MaxIter = 2) #'teste2=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Student",MaxIter = 2) #' #'xx=matrix(runif(6*length(beta),-1,1),6,length(beta)) #'zz=matrix(runif(6*dim(D)[1],-1,1),6,dim(D)[1]) #'isubj=c(1,4,5) #'teste3=ARpMMEC.est(data$y_cc,x,z,tt,data$cc,nj,struc="ARp",order=1,typeModel="Normal", #' MaxIter = 2,Prev=TRUE,step=2,isubj=isubj,xpre=xx,zpre=zz) #'teste3$Prev #' #' } #' #' #' @export #' #' #' ARpMMEC.est=function(y,x,z,tt,cc,nj,struc="UNC",order=1, initial=NULL,nu.fixed=TRUE, typeModel="Normal",cens.type="left", LI=NULL,LS=NULL, MaxIter=200, error=0.0001, Prev=FALSE,step=NULL,isubj=NULL,xpre=NULL,zpre=NULL) { m<-length(y); N<-sum(nj); p<-dim(x)[2]; q1<-dim(z)[2]; m1<-m*p; m2<-m*q1 if(is.matrix(y)) y <- y[as.vector(!is.na(as.vector(t(y))))] if(is.matrix(cc)) cc <- cc[as.vector(!is.na(as.vector(t(cc))))] if (!is.matrix(x)) x=as.matrix(x) if (!is.matrix(z)) x=as.matrix(z) if( is.matrix(nj)) nj <- nj[as.vector(!is.na(as.vector(t(nj))))] if(!is.numeric(y)) stop("y must be a numeric vector. Check documentation!") if(sum(is.na(y))>0) stop("Vector y does not support NA values.") if(!is.vector(y)) stop("y must be a vector.Check documentation!") if(length(y)!=nrow(as.matrix(x))) stop("x does not have the same number of lines than y.") if(length(y)!=length(cc)) stop("cc does not have the same length than y.") if(length(y)!=nrow(as.matrix(z))) stop("x does not have the same number of lines than y.") if(length(y)!=sum(nj)) stop("not compatible sizes between the response y and the repetited measures nj") if(length(y)==0) stop("The parameter y must be provided.") if(length(y)!=length(tt)) stop("not compatible sizes between the response y and the vector time tt") if(!is.numeric(x)) stop("x must be a numeric matrix. Check documentation!") if(sum(is.na(x))>0) stop("There are some NA values in x.") if(!is.matrix(x)) stop("x must be a matrix. Check documentation!") if(det(t(x)%*%x)==0) stop("the columns of x must be linearly independent.") if(length(x)==0) stop("The parameter x must be provided.") if(!is.numeric(z)) stop("z must be a numeric matrix. Check documentation!") if(!is.matrix(z)) stop("z must be a matrix. Check documentation!") if(sum(is.na(z))>0) stop("There are some NA values in z.") if(length(z)==0) stop("The parameter z must be provided.") if(!is.numeric(cc)) stop("cc must be a numeric vector. Check documentation!") if(!is.vector(cc)) stop("cc must be a vector.Check documentation!") if(sum(is.na(cc))>0) stop("There are some NA values in cc.") if(sum(cc%in%c(0,1))<length(cc)) stop("The elements of the vector cc must be 0 or 1.") if(length(cc)==0) stop("The parameter cc must be provided.") if(!is.numeric(nj)) stop("nj must be a numeric vector. Check documentation!") if(!is.vector(nj)) stop("nj must be a vector. Check documentation!") if(sum(is.na(nj))>0) stop("There are some NA values in nj") if(length(nj)==0) stop("The parameter nj must be provided.") if(struc!="DEC"&struc!="DEC(AR)"&struc!="SYM"&struc!="ARp"&struc!="UNC") stop("Struc must be UNC, DEC, DEC(AR), SYM or ARp. Check documentation!") if(struc=="ARp"){ if(!is.numeric(order) ) stop("Orde must be a number. Check documentation!") if(length(order)!=1) stop("Order must be a value.") if(is.numeric(order)) { if(order!=round(order)|order<=0) stop("Order must be a positive integer value.")}} if(!is.null(initial)) { if(!is.null(initial$betas)) {if(!is.numeric(initial$betas)) stop("betas must be a numeric vector. Check documentation!") if(!is.vector(initial$betas)) stop("betas must be a vector. Check documentation!") if(length(initial$betas)!=ncol(x)) stop("not compatible sizes between the matrix x and parameter betas.")} if(!is.null(initial$sigma2)) {if(!is.numeric(initial$sigma2)) stop("sigma2 must be a scalar.") if(length(initial$sigma2)>1) stop("sigma2 must be a scalar.")} if(!is.null(initial$alphas)) {if(!is.matrix(initial$alphas)) stop("alphas must be a matrix.") if(initial$alphas[upper.tri(initial$alphas)]!=initial$alphas[lower.tri(initial$alphas)])stop("alphas must be a simetric matrix.") if(dim(initial$alphas)[2]!=ncol(z)) stop("not compatible sizes between the matrix z and parameter alphas.")} if(struc=="ARp"){ if(!is.null(initial$phi)) {if(!is.numeric(initial$phi)) stop("phi must be a numeric vector. Check documentation!") if(length(initial$phi)!=order) stop("not compatible sizes between the value Arp and parameter phi. Check documentation!")} } } if(typeModel!='Normal'& typeModel!='Student') stop('typeModel must be Normal or Student. Check documentation!') if(cens.type!="left" & cens.type!="right" & cens.type!="interval")stop('cens.type must be left, right or interval. Check documentation!') if(cens.type=="interval"&is.null(LI)) stop("The parameter LI must be provided.. Check documentation!") if(cens.type=="interval"&is.null(LS)) stop("The parameter LS must be provided.. Check documentation!") if(!is.null(LI)&!is.numeric(LI)) stop("LI must be a numeric vector. Check documentation!") if(!is.null(LS)&!is.numeric(LS)) stop("LS must be a numeric vector. Check documentation!") if(length(LS)!=length(LI)) stop("not compatible sizes between the vectors LI and LS. Check documentation!") if(cens.type=="interval") { if(length(y)!=length(LI)) stop("not compatible sizes between the vectors y and LI. Check documentation!") if(length(y)!=length(LS)) stop("not compatible sizes between the vectors y and LS. Check documentation!") } if (!is.numeric(MaxIter)) stop("MaxIter must be a positive number. Check documentation!") if (length(MaxIter) > 1) stop("MaxIter parameter must be a scalar") if (MaxIter <0) stop("MaxIter parameter must be positive number") if (!is.numeric(error)) stop("error must be a positive number. Check documentation!") if (length(error) > 1) stop("error parameter must be a scalar") if (error <0) stop("error parameter must be positive number") if (Prev) { if(is.null(step)|is.null(xpre)|is.null(zpre)|is.null(isubj)) stop("step, isubj, xpre, zpre needs to be provided. Check documentation!") if (!is.numeric(isubj)) stop("isubj must be a numeric vector. Check documentation!") if (!is.numeric(step)) stop("step must be a positive number. Check documentation!") if (step <0) stop("step parameter must be positive number") if (length(step) > 1) stop("step parameter must be a scalar") if (ncol(xpre)!=ncol(as.matrix(x))) stop("xpre must have the same number of columns than x") if (sum(is.na(xpre))>0) stop("There are some NA values in xpre") if (!is.numeric(xpre)) stop("xpred must be a numeric matrix") if (ncol(zpre)!=ncol(as.matrix(z))) stop("zpre must have the same number of columns than z") if (sum(is.na(zpre))>0) stop("There are some NA values in zpre") if (!is.numeric(zpre)) stop("zpred must be a numeric matrix") if(nrow(xpre)!=length(isubj)*step) stop("not compatible sizes between xpre and isubj. Check documentation!") if(nrow(zpre)!=length(isubj)*step) stop("not compatible sizes between zpre and isubj. Check documentation!") } if(typeModel=="Normal"){ if(struc=="ARp"){ out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=order, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error, Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)} if(struc=="UNC"){ out<-EMCensArpN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, Arp=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error, Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)} if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){ out<-EMCensDECN(cc=cc,y=y,x=x,z=z,tt=tt,nj=nj, struc=struc, initial=initial, cens.type=cens.type, LI=LI,LS=LS,MaxIter=MaxIter,ee=error, Prev=Prev,step=step,isubj=isubj ,xpre=xpre,zpre=zpre)} } if(typeModel=="Student"){ if(struc=="ARp"){ out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=order,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed, iter.max=MaxIter,precision=error) } if(struc=="UNC"){ out<-EMCensArpT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,Arp=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed, iter.max=MaxIter,precision=error) } if(struc=="DEC"|struc=="DEC(AR)"|struc=="SYM"){ out<-EMCensDECT(cc=cc,y=y,x=x,z=z,ttc=tt,nj=nj,struc=struc,initial=initial,cens.type=cens.type,LL=LI,LU=LS,nu.fixed=nu.fixed, iter.max=MaxIter,precision=error) } } if(struc=="ARp") { cat('\n') cat('---------------------------------------------------\n') cat('Autoregressive censored mixed-effects models \n') cat('---------------------------------------------------\n') cat('\n') cat("Autoregressive order =",order) cat('\n') cat("Distribution =",typeModel) cat('\n') if(typeModel=="Student") cat("nu =",out$nu); cat('\n') cat("Subjects =",length(nj),";",'Observations =',sum(nj)) cat('\n') cat('\n') cat('-----------\n') cat('Estimates\n') cat('-----------\n') cat('\n') cat('- Fixed effects \n') cat('\n') print(out$tableB) cat('\n') cat('\n') cat('- Sigma^2 \n') cat('\n') print(out$tableS) cat('\n') cat('\n') cat('- Autoregressives parameters\n') cat('\n') print(out$tableP) cat('\n') cat('\n') cat('- Random effects \n') cat('\n') print(out$tableA) cat('\n') cat('\n') cat('------------------------\n') cat('Model selection criteria\n') cat('------------------------\n') cat('\n') critFin <- c(out$loglik, out$AIC, out$BIC) critFin <- round(t(as.matrix(critFin)),digits=3) dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC")) print(critFin) cat('\n') cat('-------\n') cat('Details\n') cat('-------\n') cat('\n') cat("Convergence reached? =",(out$iter < MaxIter)) cat('\n') cat('Iterations =',out$iter,"/",MaxIter) cat('\n') cat("Processing time =",out$time,units(out$time)) cat('\n') } if(struc!="ARp") { cat('\n') cat('---------------------------------------------------\n') cat('DEC censored mixed-effects models \n') cat('---------------------------------------------------\n') cat('\n') cat("Case =",struc) cat('\n') cat("Distribution =",typeModel) cat('\n') if(typeModel=="Student") cat("nu =",out$nu); cat('\n') cat("Subjects =",length(nj),";",'Observations =',sum(nj)) cat('\n') cat('\n') cat('-----------\n') cat('Estimates\n') cat('-----------\n') cat('\n') cat('- Fixed effects \n') cat('\n') print(out$tableB) cat('\n') cat('\n') cat('- Sigma^2 \n') cat('\n') print(out$tableS) cat('\n') cat('\n') if(struc!="UNC"){ cat('- Autoregressives parameters\n') cat('\n') print(out$tableP) cat('\n') cat('\n')} cat('- Random effects \n') cat('\n') print(out$tableA) cat('\n') cat('\n') cat('------------------------\n') cat('Model selection criteria\n') cat('------------------------\n') cat('\n') critFin <- c(out$loglik, out$AIC, out$BIC) critFin <- round(t(as.matrix(critFin)),digits=3) dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC")) print(critFin) cat('\n') cat('-------\n') cat('Details\n') cat('-------\n') cat('\n') cat("Convergence reached? =",(out$iter < MaxIter)) cat('\n') cat('Iterations =',out$iter,"/",MaxIter) cat('\n') cat("Processing time =",out$time,units(out$time)) cat('\n') } if(typeModel=="Student") nu<-out$nu if(typeModel=="Normal") nu<-NULL obj.out <- list(FixEffect=out$tableB, Sigma2=out$tableS, Phi=out$tableP,RandEffect=out$tableA, nu=nu, Est=c(out$beta1, sigma2=out$sigmae, phi=out$phi, RnEffect=out$dd), SE=out$SE,Residual=out$residual, loglik=out$loglik, AIC=out$AIC, BIC=out$BIC, AICc=out$AICcorr, iter=out$iter, Yfit=out$yfit, MI=out$MI, Prev=out$Prev, time=out$time, others=list(ubi = out$ubi, ubbi = out$ubbi, uybi = out$uybi, uyi = out$uyi, uyyi = out$uyyi,varbeta=out$varbeta,yog=out$yorg) ) class(obj.out) = "ARpMMEC" return(obj.out) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/ARpMMEC.est.R
#' @title Generating Censored Autoregressive Dataset with Mixed Effects, for normal distribution. #' @import TruncatedNormal #' @import LaplacesDemon #' @import mnormt #' @import numDeriv #' @import tcltk #' @import stats #' @import relliptical #' @description This function simulates a censored response variable with autoregressive errors of order \code{p}, with mixed effect and a established censoring rate. This function returns the censoring vector and censored response vector. #' @param m Number of individuals #' @param x Design matrix of the fixed effects of order \code{n x s}, corresponding to vector of fixed effects. #' @param z Design matrix of the random effects of order\code{n x b}, corresponding to vector of random effects. #' @param tt Vector \code{1 x n} with the time the measurements were made, where \code{n} is the total number of measurements for all individuals. #' @param nj Vector \code{1 x m} with the number of observations for each subject, where \code{m} is the total number of individuals. #' @param beta Vector of values fixed effects. #' @param sigmae It's the value for sigma. #' @param D Covariance Matrix for the random effects. #' @param phi Vector of length \code{Arp}, of values for autoregressive parameters. #' @param struc Correlation structure. This must be one of \code{UNC},\code{ARp},\code{DEC},\code{SYM} or \code{DEC(AR)}. #' @param order Order of the autoregressive process. Must be a positive integer value. #' @param typeModel \code{Normal} for Normal distribution and \code{Student} for t-Student distribution. Default is \code{Normal} #' @param p.cens Censoring percentage for the process. Default is \code{NULL} #' @param n.cens Censoring level for the process. Default is \code{NULL} #' @param cens.type \code{left} for left censoring, \code{right} for right censoring and \code{interval} for intervalar censoring. Default is \code{left} #' @param nu degrees of freedom for t-Student distibution (nu > 0, maybe non-integer). #' @return returns list: #' \item{cc}{Vector of censoring indicators.} #' \item{y_cc}{Vector of responses censoring.} #' @examples #' \dontrun{ #'p.cens = 0.1 #'m = 10 #'D = matrix(c(0.049,0.001,0.001,0.002),2,2) #'sigma2 = 0.30 #'phi = 0.6 #'beta = c(1,2,1) #'nj=rep(4,10) #'tt=rep(1:4,length(nj)) #'x<-matrix(runif(sum(nj)*length(beta),-1,1),sum(nj),length(beta)) #'z<-matrix(runif(sum(nj)*dim(D)[1],-1,1),sum(nj),dim(D)[1]) #'data=ARpMMEC.sim(m,x,z,tt,nj,beta,sigma2,D,phi,struc="ARp",typeModel="Normal",p.cens=p.cens) #' y<-data$y_cc #' cc<-data$cc #' } #' @export ARpMMEC.sim=function(m,x=NULL,z=NULL,tt=NULL,nj,beta,sigmae,D,phi,struc="ARp",order=1,typeModel="Normal",p.cens= NULL,n.cens= NULL,cens.type="left",nu=NULL) { if(m==sum(nj)) stop("not compatible sizes between m and nj") if(!is.null(x)){ if(!is.numeric(x)) stop("x must be a numeric matrix. Check documentation!") if(sum(is.na(x))>0) stop("There are some NA values in x.") if(!is.matrix(x)) stop("x must be a matrix. Check documentation!") if(det(t(x)%*%x)==0) stop("the columns of x must be linearly independent.") if(length(x)==0) stop("The parameter x must be provided.") if(dim(x)[1]!=dim(z)[1]) stop("not compatible sizes between x and z") if(dim(x)[1]!=sum(nj)) stop("not compatible sizes between x and nj") if(dim(x)[2]!=length(beta)) stop("not compatible sizes between x and beta") if(dim(x)[1]!=length(tt)) stop("not compatible sizes between x and tt") } if(!is.null(z)){ if(!is.numeric(z)) stop("z must be a numeric matrix. Check documentation!") if(sum(is.na(z))>0) stop("There are some NA values in z.") if(!is.matrix(z)) stop("z must be a matrix. Check documentation!") if(length(z)==0) stop("The parameter z must be provided.") if(dim(z)[1]!=sum(nj)) stop("not compatible sizes between z and nj") if(dim(z)[2]!=dim(D)[2]) stop("not compatible sizes between z and D") } if(!is.numeric(nj)) stop("nj must be a numeric vector. Check documentation!") if(!is.vector(nj)) stop("nj must be a vector. Check documentation!") if(sum(is.na(nj))>0) stop("There are some NA values in nj") if(length(nj)==0) stop("The parameter nj must be provided.") if(!is.numeric(beta)) stop("beta must be a numeric vector. Check documentation!") if(!is.vector(beta)) stop("beta must be a vector. Check documentation!") if(!is.numeric(sigmae)) stop("sigmae must be a scalar.") if(length(sigmae)>1) stop("beta must be a scalar.") if(!is.matrix(D)) stop("D must be a matrix.") if(D[upper.tri(D)]!=D[lower.tri(D)])stop("D must be a simetric matrix.") if(!is.numeric(phi)) stop("phi must be a numeric vector. Check documentation!") if(cens.type!="left" & cens.type!="right" & cens.type!="interval")stop('cens.type must be left, right or interval. Check documentation!') if(typeModel!='Normal'& typeModel!='Student') stop('typeModel must be Normal or Student. Check documentation!') if(struc!="DEC"&struc!="DEC(AR)"&struc!="SYM"&struc!="ARp"&struc!="UNC") stop("Struc must be UNC, DEC, DEC(AR), SYM or ARp. Check documentation!") if(!is.null(p.cens)){ if(p.cens>1| p.cens<0) stop("the p.cens must be between 0 and 1 . Check documentation!") if(!is.numeric(p.cens)) stop("p.cens must be a numeric. Check documentation!") } if(!is.null(n.cens)){ if(!is.null(p.cens)) stop("For the censoring only need the parameter n.cens or p.cents. Please, choose to specify n.cens or p.cens. Check documentation!") if(!is.numeric(n.cens)) stop("n.cens must be a numeric. Check documentation!") } if(!is.null(nu)){ if(!is.numeric(nu)) stop("nu must be a numeric. Check documentation!")} if(struc=="ARp"){ if(length(phi)!=order) stop("not compatible information between phi and order. Check documentation!")} MMsimu(m=m,x=x,z=z,tt=tt,nj=nj,beta=beta,sigmae=sigmae,D=D,phi=phi,struc=struc,typeModel=typeModel,percCensu=p.cens,nivel.Censu=n.cens,cens.type=cens.type,nu=nu) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/ARpMMEC.sim.R
EMCensArpN<-function(cc,y,x,z,tt,nj,Arp,initial,cens.type,LI,LS,MaxIter,ee,Prev,step,isubj,xpre,zpre) { start.time <- Sys.time() pb = tkProgressBar(title = "AR(p)-N-LMEC by EM", min = 0,max = MaxIter, width = 300) setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",MaxIter," - ",0,"% done",sep = "")) if(cens.type=="left"){ LI=rep(-Inf,length(cc)) LS=rep(Inf,length(cc)) LS[cc==1]=y[cc==1] LI=as.vector(LI) LS=as.vector(LS) } if(cens.type=="right"){ LI=rep(-Inf,length(cc)) LI[cc==1]=y[cc==1] LS=rep(Inf,length(cc)) LI=as.vector(LI) LS=as.vector(LS) } if(cens.type=="interval"){ LI=LI LS=LS LI=as.vector(LI) LS=as.vector(LS) } m<-length(nj)[1] N<-sum(nj) p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 if(!is.null(initial)){ beta1<-initial$betas sigmae<- initial$sigma2 D1<-initial$alphas iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 if(!is.null(initial$phi)){ if(Arp>=1){ piis = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = initial$phi teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi) } if(Arp=="UNC"){ Arp=0 piis = 0 phi = 0 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)])} } if(is.null(initial$phi)){ if(Arp>=1){ piis = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = estphit(piis) teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi)} if(Arp=="UNC"){ Arp=0 piis = 0 phi = 0 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)])} } } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.5 D1=0.1*diag(dim(z)[2]) iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 if(Arp>=1){ piis = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = estphit(piis) teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi)} if(Arp=="UNC"){ Arp=0 piis = 0 phi = 0 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) teta1<- c(beta1,sigmae,D1[upper.tri(D1, diag = T)])} } criterio<-1 count<-0 loglik <- logliknArplmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,pii=piis) loglikp <- loglik while(criterio > ee){ count <- count + 1 soma1<- matrix(0,q1,q1) soma2<-0 soma3<- matrix(0,p,p) soma4<- matrix(0,p,1) soma5<- matrix(0,p,p) MI <- matrix(0,p+1+length(D1[upper.tri(D1, diag = T)])+Arp, p+1+length(D1[upper.tri(D1, diag = T)])+Arp) ubi=matrix(0,m2,m) ubbi=matrix(0,m2,m2) uybi=matrix(0,N,m2) uyyi=matrix(0,N,N) uyi=matrix(0,N,m) yhi=matrix(0,N,1) xi=matrix(0,N,m1) zi=matrix(0,N,m2) ver<-matrix(0,m,1) for (j in 1:m ){ cc1=cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1=y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) LI1<- LI[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LS1<- LS[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 if(Arp==0){eGama=diag(1,nj[j]) Gama=eGama*sigmae} if(Arp!=0){ Gama<- MatArp(piis,tt1,sigmae) eGama<-Gama/sigmae } Psi<-(Gama+(z1)%*%D1%*%t(z1)) Psi<-(Psi+t(Psi))/2 delta<- solve(iD1+(t(z1)%*%solve(eGama)%*%(z1*(1/sigmae)))) if(sum(cc1)==0){ uy<- matrix(y1,nj[j],1) uyy<- y1%*%t(y1) ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1),mu=as.vector(gammai),Sigma=Psi) } if(sum(cc1)>=1){ if(sum(cc1)==nj[j]){ muc=x1%*%beta1 Sc<-Psi aux<- relliptical::mvtelliptical(lower = as.vector(LI1),upper=as.vector(LS1),mu = as.vector(muc), Sigma =Sc,dist = "Normal") uy<- aux$EY uyy<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- TruncatedNormal::pmvnorm(lb=as.vector(LI1), ub=as.vector(LS1), mu=as.vector(muc),sigma=Sc) } else { muc=x1[cc1==1,]%*%beta1+Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%(y1[cc1==0]-x1[cc1==0,]%*%beta1) Sc <-Psi[cc1==1,cc1==1]-Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%Psi[cc1==0,cc1==1] Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1[cc1==1]),upper=as.vector(LS1[cc1==1]),mu = as.vector(muc), Sigma =Sc) uy <-matrix(y1,nj[j],1) uy[cc1==1]<- aux$EY uyy<- uy%*%t(uy) uyy[cc1==1,cc1==1]<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1[cc1==0]),mu=as.vector(gammai[cc1==0]),Sigma=as.matrix(Psi[cc1==0,cc1==0]))*( TruncatedNormal::pmvnorm(lb=as.vector(LI1[cc1==1]),ub=as.vector(LS1[cc1==1]),mu=as.vector(muc),sigma=Sc))[1] } } soma1<- soma1 + ubb soma2<- soma2 + (sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))) soma3<- soma3 + (t(x1)%*%solve(eGama)%*%x1) soma4<- soma4 + (t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) soma5<- soma5 + (t(x1)%*%solve(Psi)%*%x1-t(x1)%*%solve(Psi)%*%(uyy-uy%*%t(uy))%*%solve(Psi)%*%x1) ubi[(((j-1)*q1)+1) : (j*q1), j]<-ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<-ubb uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)]<-uyb uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<-uyy uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j]<-uy yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),] <- uy zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-z1 xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)]<-x1 tetaMI=c(beta1,sigmae,phi) si<-Jt(tetaMI,uy,x1,z1,tt1,ub,ubb,p,Arp,D1) MI <- MI + t(si)%*%si } yorg<-apply(yhi,1,sum) beta1<- solve(soma3)%*%soma4 sigmae<- (1/N)*(soma2) sigmae<-as.numeric(sigmae) D1<- (1/m)*(soma1) iD1<- solve(D1) teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) if(Arp!=0){ piis <- optim(piis, method = "L-BFGS-B", FCiArp, lower =rep(-.999,Arp), upper =rep(.999,Arp), beta1=beta1,sigmae=sigmae, ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,x=x,z=z,tt=tt,nj=nj,hessian=TRUE)$par phi=estphit(piis) teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)], phi) } if(Arp==0){phi=0} varbeta<-solve(soma5) logver <- sum(log(ver)) loglik <- logliknArplmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,pii=piis) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",floor((count)/(MaxIter)*100),"% done",sep = "")) } if(count==MaxIter){criterio <- 0.0000000000001} teta <- teta1 loglikp <- loglikp1 } dd<-D1[upper.tri(D1, diag = T)] npar<-length(c(teta1)) ni<-sum(nj) loglik<-loglikp AICc<- -2*loglik +2*npar AICcorr<- AICc + ((2*npar*(npar+1))/(ni-npar-1)) BICc <- -2*loglik +log(ni)*npar if(Prev){ contt=0 Predicao<- matrix(0,length(isubj),1+step) for (j in isubj ){ contt=contt+1 IndPred=c(rep(0,nj[j]),rep(1,step)) xobs=x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ] xprei=xpre[(step*contt-(step-1)) : (step*contt), ] xobspre=rbind(xobs,xprei) zobs=z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ] zprei=zpre[(step*contt-(step-1)) : (step*contt), ] zobspre=rbind(zobs,zprei) gammai = xobs%*%beta1 yobs=uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] if(Arp==0){ Gama=diag(1,nj[j]+step)*sigmae} if(Arp!=0){ tt1=c(tt1, tt1[nj[j]]+seq(1:step)) Gama<- MatArp(piis,tt1,sigmae)} PsiPred<-(Gama+(zobspre)%*%t(D1)%*%t(zobspre)) Aux1Pred <- xprei%*%beta1 Aux2Pred <- PsiPred[IndPred==1,IndPred==0]%*%solve(PsiPred[IndPred==0,IndPred==0]) Aux3Pred <- (yobs-gammai) Predicao[contt,1] <- j Predicao[contt,2:(step+1)] <- Aux1Pred + Aux2Pred%*%Aux3Pred } Predicao=as.data.frame(Predicao) colnames(Predicao) = c("subj",paste("step",1:step)) } if(!Prev){Predicao=NULL} SE=round(sqrt(diag(solve(MI))),3) intPar=round(1.96*SE,3) tableB = data.frame(round(beta1,3),SE[1:p],paste("<",round(beta1,3)-round(intPar[1:p],3),",",round(beta1,3)+round(intPar[1:p],3),">")) rownames(tableB) = paste("beta",1:p) colnames(tableB) = c("Est","SE","IConf(95%)") if((round(sigmae,3)-round(intPar[p+1],3))<0) tableS = data.frame(round(sigmae,3),SE[p+1],paste("<",0,",",round(sigmae,3)+round(intPar[p+1],3),">")) if((round(sigmae,3)-round(intPar[p+1],3))>=0) tableS = data.frame(round(sigmae,3),SE[p+1],paste("<",round(sigmae,3)-round(intPar[p+1],3),",",round(sigmae,3)+round(intPar[p+1],3),">")) rownames(tableS) = "Sigma^2" colnames(tableS) = c("Est","SE","IConf(95%)") if(Arp!=0){ tableP = data.frame(round(phi,3),SE[(p+2):(p+1+Arp)],paste("<",round(phi,3)-round(intPar[(p+2):(p+1+Arp)],3),",",round(phi,3)+round(intPar[(p+2):(p+1+Arp)],3),">")) rownames(tableP) = paste("Phi",1:Arp) colnames(tableP) = c("Est","SE","IConf(95%)") } if(Arp==0){ phi=NULL; tableP =NULL } nnp=0 for(al in 1:dim(D1)[1]) {noa=paste(1:al,al,sep = "") nnp=c(nnp,noa) } nnp=nnp[-1] ici=round(dd,3)-round(intPar[(p+2+Arp):(p+1+length(D1[upper.tri(D1, diag = T)])+Arp)],3) ics=round(dd,3)+round(intPar[(p+2+Arp):(p+1+length(D1[upper.tri(D1, diag = T)])+Arp)],3) ici[as.numeric(nnp)%%11==0&ici<0]=0 tableA = data.frame(round(dd,3),SE[(p+2+Arp):(p+1+length(D1[upper.tri(D1, diag = T)])+Arp)],paste("<",ici,",",ics,">")) rownames(tableA) = paste("Alpha",nnp) colnames(tableA) = c("Est","SE","IConf(95%)") res <-fitY<- vector(mode = "numeric", length = sum(nj)) Di <- matrix(0,dim(z)[2],dim(z)[2]) Di[upper.tri(Di, diag = T)] <- dd Di[lower.tri(Di, diag = T)] <- dd Di <- round(Di,6) for (k in 1:length(nj)) {tc<-tt[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] if(Arp=="UNC"){Mq<-diag(1,length(tc))} if(Arp!="UNC"){Mq<- MatArpJ(phi,tc,sigmae)} Sii<-solve(round(z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),]%*%Di%*%t(z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),])+sigmae*Mq,6)) yyii<-yorg[(sum(nj[1:k-1])+1) :(sum(nj[1:k]))]-x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1 res[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]=sqrtm(Sii)%*%(yyii) fitY[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]= x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1+ z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),]%*%as.matrix(ubi[(((k-1)*q1)+1) : (k*q1),k]) } end.time <- Sys.time() time.taken <- end.time - start.time obj.out <- list(beta1 = beta1, sigmae= sigmae, phi=phi, dd = dd, loglik=loglik, AIC=AICc, BIC=BICc, AICcorr=AICcorr, iter = count, varbeta=varbeta, ubi = ubi, ubbi = ubbi, uybi = uybi, uyi = uyi, uyyi = uyyi , MI=MI, yorg=yorg,residuals=res,yfit=fitY, Prev= Predicao, time=time.taken, SE=SE,tableB=tableB,tableS=tableS,tableP=tableP, tableA=tableA) if (count == MaxIter) { setTkProgressBar(pb, MaxIter, label=paste("MaxIter reached ",count,"/",MaxIter," - 100 % done",sep = "")) close(pb) } else { setTkProgressBar(pb, MaxIter, label=paste("Convergence at Iter ",count,"/",MaxIter," - 100 % done",sep = "")) close(pb) } class(obj.out) <- "ARpNLMEC" return(obj.out) } EMCensDECN<-function(cc,y,x,z,tt,nj,struc,initial,cens.type,LI,LS,MaxIter,ee,Prev,step,isubj,xpre,zpre) { start.time <- Sys.time() pb = tkProgressBar(title = "DEC-N-LMEC by EM", min = 0,max = MaxIter, width = 300) setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",MaxIter," - ",0,"% done",sep = "")) if(cens.type=="left"){ LI=rep(-Inf,length(cc)) LS=rep(Inf,length(cc)) LS[cc==1]=y[cc==1] LI=as.vector(LI) LS=as.vector(LS) } if(cens.type=="right"){ LI=rep(-Inf,length(cc)) LI[cc==1]=y[cc==1] LS=rep(Inf,length(cc)) LI=as.vector(LI) LS=as.vector(LS) } if(cens.type=="interval"){ LI=LI LS=LS LI=as.vector(LI) LS=as.vector(LS) } m<-length(nj) N<-sum(nj) criterio<-1 count<-0 if(struc=="DEC"){ p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 if(!is.null(initial)){ beta1<-initial$betas sigmae<- initial$sigma2 D1<-initial$alphas iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 if(!is.null(initial$phi1)){ phi1 <- initial$phi1 phi2 <- initial$phi2} if(is.null(initial$phi1)){ phi1 <- 0.1 phi2 <- 1} } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 phi1 <- 0.1 phi2 <- 1 } rho= phi1 gamma<-phi2 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho,gamma) tetacrit <- c(beta1,sigmae) q1<-dim(D1)[2] p<-length(beta1) m1<-m*p m2<-m*q1 ubi=matrix(0,m2,m) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp <- loglik criterio<-1 count<-0 while(criterio > ee){ count <- count + 1 soma1<-matrix(0,q1,q1) soma2<-0 soma3<-matrix(0,p,p) soma4<-matrix(0,p,1) soma5<-matrix(0,p,p) MI <- matrix(0,p+1+length(D1[upper.tri(D1, diag = T)])+2, p+1+length(D1[upper.tri(D1, diag = T)])+2) res <- vector(mode = "numeric", length = N) ub1<-ubi ubi=matrix(0,m2,m) ubbi=matrix(0,m2,m2) uybi=matrix(0,N,m2) uyyi=matrix(0,N,N) uyi=matrix(0,N,m) yhi=matrix(0,N,1) xi=matrix(0,N,m1) zi=matrix(0,N,m2) ver<-matrix(0,m,1) for (j in 1:m ){ cc1=cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1=y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) LI1<- LI[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LS1<- LS[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Gama<- MatAr1(tt1,rho,gamma,sigmae) eGama<-Gama/sigmae Psi<-(Gama+(z1)%*%D1%*%t(z1)) Psi<-(Psi+t(Psi))/2 delta<- solve(iD1+(t(z1)%*%solve(eGama)%*%(z1*(1/sigmae)))) if(sum(cc1)==0){ uy<- matrix(y1,nj[j],1) uyy<- y1%*%t(y1) ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1),mu=as.vector(gammai),Sigma=Psi) } if(sum(cc1)>=1){ if(sum(cc1)==nj[j]){ muc=x1%*%beta1 Sc<-Psi aux<- relliptical::mvtelliptical(lower = as.vector(LI1),upper=as.vector(LS1),mu = as.vector(muc), Sigma =Sc) uy<- aux$EY uyy<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- TruncatedNormal::pmvnorm(lb=as.vector(LI1), ub=as.vector(LS1), mu=c(muc),sigma=Sc) } else { muc=x1[cc1==1,]%*%beta1+Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%(y1[cc1==0]-x1[cc1==0,]%*%beta1) Sc <-Psi[cc1==1,cc1==1]-Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%Psi[cc1==0,cc1==1] Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1[cc1==1]),upper=as.vector(LS1[cc1==1]),mu = as.vector(muc), Sigma =Sc) uy <-matrix(y1,nj[j],1) uy[cc1==1]<- aux$EY uyy<- uy%*%t(uy) uyy[cc1==1,cc1==1]<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1[cc1==0]),mu=as.vector(gammai[cc1==0]),Sigma=as.matrix(Psi[cc1==0,cc1==0]))*( TruncatedNormal::pmvnorm(lb=as.vector(LI1[cc1==1]),ub=as.vector(LS1[cc1==1]),mu=as.vector(muc),sigma=Sc))[1] } } soma1<- soma1 + ubb soma2<- soma2 + (sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))) soma3<- soma3 + (t(x1)%*%solve(eGama)%*%x1) soma4<- soma4 + (t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) soma5<- soma5 + (t(x1)%*%solve(Psi)%*%x1-t(x1)%*%solve(Psi)%*%(uyy-uy%*%t(uy))%*%solve(Psi)%*%x1) uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<-uyy uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), j]<-uy uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-uyb ubi[(((j-1)*q1)+1) : (j*q1), j]<-ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<-ubb zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-z1 xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)]<-x1 yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),] <- uy dbeta <- (1/sigmae)*((t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) - (t(x1)%*%solve(eGama)%*%x1)%*%beta1) dsigma <- -(1/2)*((nj[j]/sigmae)-(1/sigmae^2)*((sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))))) Dp <- DevEiAr1(tt1,rho,gamma,sigmae) Dpr <- Dp$devR_r Dpg <- Dp$devR_g dE1_r <- sum(diag(solve(eGama)%*%Dpr)) dE2_r <- -(solve(eGama)%*%Dpr%*%solve(eGama)) dE1_g <- sum(diag(solve(eGama)%*%Dpg)) dE2_g <- -(solve(eGama)%*%Dpg%*%solve(eGama)) drho <- - 0.5*dE1_r-(0.5/sigmae)*((sum(diag(uyy%*%dE2_r))-t(uy)%*%dE2_r%*%gammai-t(gammai)%*%dE2_r%*%uy-sum(diag(dE2_r%*%((uyb)%*%t(z1))))-sum(diag(dE2_r%*%((uyb)%*%t(z1)))) +t(gammai)%*%dE2_r%*%z1%*%ub+t(ub)%*%t(z1)%*%dE2_r%*%gammai+t(gammai)%*%dE2_r%*%gammai+sum(diag(ubb%*%t(z1)%*%dE2_r%*%z1)))) dgama <- - 0.5*dE1_g-(0.5/sigmae)*((sum(diag(uyy%*%dE2_g))-t(uy)%*%dE2_g%*%gammai-t(gammai)%*%dE2_g%*%uy-sum(diag(dE2_g%*%((uyb)%*%t(z1))))-sum(diag(dE2_g%*%((uyb)%*%t(z1)))) +t(gammai)%*%dE2_g%*%z1%*%ub+t(ub)%*%t(z1)%*%dE2_g%*%gammai+t(gammai)%*%dE2_g%*%gammai+sum(diag(ubb%*%t(z1)%*%dE2_g%*%z1)))) D_der <- Derivadas(D1) deralpha<-rep(0,length(D1[upper.tri(D1, diag = T)])) md2<-dim(D1)[1] kont <- 0 for(i1 in 1:md2){ for(i2 in 1:(md2+1-i1)){ kont <- kont+1 di <- D_der[[i1]][[i2]] deralpha[kont] <- (-0.5)*sum(diag(iD1%*%di-iD1%*%di%*%iD1*ubb)) } } si <- matrix(c(t(dbeta),t(dsigma),t(drho),t(dgama),t(deralpha)),p+1+2+length(D1[upper.tri(D1, diag = T)]),1) MI <- MI + si%*%t(si) } yorg<-apply(yhi,1,sum) beta1<- solve(soma3)%*%soma4 sigmae<- (1/N)*(soma2) sigmae<-as.numeric(sigmae) D1<- (1/m)*(soma1) iD1<- solve(D1) rhos <- optim(c(rho,gamma), method = "L-BFGS-B", FCi, lower =c(0.01,0.01), upper =c(0.9,30), beta1=beta1,sigmae=sigmae,tt=tt,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,xi=xi,zi=zi,nj=nj,hessian=TRUE)$par rho<-rhos[1] gamma<-rhos[2] teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho,gamma) teta1crit <- c(beta1,sigmae) varbeta<-solve(soma5) logver <- sum(log(ver)) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=rho,phi2=gamma,struc=struc) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",floor((count)/(MaxIter)*100),"% done",sep = "")) } if(count==MaxIter){criterio <- 0.0000000000001} teta <- teta1 loglikp <- loglikp1 tetacrit <- teta1crit logver1<-logver } } if(struc=="DEC(AR)"){ p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 if(!is.null(initial)){ beta1<-initial$betas sigmae<- initial$sigma2 D1<-initial$alphas iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 if(!is.null(initial$phi1)){ phi1 <- initial$phi1 phi2 <- 1} if(is.null(initial$phi1)){ phi1 <- 0.1 phi2 <- 1} } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 phi1 <- 0.1 phi2 <- 1 } rho= phi1 gamma<-phi2 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho) tetacrit <- c(beta1,sigmae) ubi=matrix(0,m2,m) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp <- loglik criterio<-1 count<-0 while(criterio > ee){ count <- count + 1 soma1<-matrix(0,q1,q1) soma2<-0 soma3<-matrix(0,p,p) soma4<-matrix(0,p,1) soma5<-matrix(0,p,p) MI <- matrix(0,p+1+length(D1[upper.tri(D1, diag = T)])+1, p+1+length(D1[upper.tri(D1, diag = T)])+1) ub1<-ubi res <- vector(mode = "numeric", length = N) ubi=matrix(0,m2,m) ubbi=matrix(0,m2,m2) uybi=matrix(0,N,m2) uyyi=matrix(0,N,N) uyi=matrix(0,N,m) yhi=matrix(0,N,1) xi=matrix(0,N,m1) zi=matrix(0,N,m2) ver<-matrix(0,m,1) for (j in 1:m ){ cc1=cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1=y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) LI1<- LI[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LS1<- LS[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Gama<- MatAr1(tt1,rho,gamma,sigmae) eGama<-Gama/sigmae Psi<-(Gama+(z1)%*%D1%*%t(z1)) delta<- solve(iD1+(t(z1)%*%solve(eGama)%*%(z1*(1/sigmae)))) if(sum(cc1)==0){ uy<- matrix(y1,nj[j],1) uyy<- y1%*%t(y1) ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1),mu=as.vector(gammai),Sigma=Psi) } if(sum(cc1)>=1){ if(sum(cc1)==nj[j]){ muc=x1%*%beta1 Sc<-Psi Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1),upper=as.vector(LS1),mu = as.vector(muc), Sigma =Sc) uy<- aux$EY uyy<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- TruncatedNormal::pmvnorm(lb=as.vector(LI1), ub=as.vector(LS1), mu=as.vector(muc),sigma=Sc) } else { muc=x1[cc1==1,]%*%beta1+Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%(y1[cc1==0]-x1[cc1==0,]%*%beta1) Sc <-Psi[cc1==1,cc1==1]-Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%Psi[cc1==0,cc1==1] Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1[cc1==1]),upper=as.vector(LS1[cc1==1]),mu = as.vector(muc), Sigma =Sc) uy <-matrix(y1,nj[j],1) uy[cc1==1]<- aux$EY uyy<- uy%*%t(uy) uyy[cc1==1,cc1==1]<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1[cc1==0]),mu=as.vector(gammai[cc1==0]),Sigma=as.matrix(Psi[cc1==0,cc1==0]))*( TruncatedNormal::pmvnorm(lb=as.vector(LI1[cc1==1]),ub=as.vector(LS1[cc1==1]),mu=as.vector(muc),sigma=Sc))[1] } } soma1<- soma1 + ubb soma2<- soma2 + (sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))) soma3<- soma3 + (t(x1)%*%solve(eGama)%*%x1) soma4<- soma4 + (t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) soma5<- soma5 + (t(x1)%*%solve(Psi)%*%x1-t(x1)%*%solve(Psi)%*%(uyy-uy%*%t(uy))%*%solve(Psi)%*%x1) uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<-uyy uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), j]<-uy uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-uyb ubi[(((j-1)*q1)+1) : (j*q1), j]<-ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<-ubb yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),] <- uy zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-z1 xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)]<-x1 dbeta <- (1/sigmae)*((t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) - (t(x1)%*%solve(eGama)%*%x1)%*%beta1) dsigma <- -(1/2)*((nj[j]/sigmae)-(1/sigmae^2)*((sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))))) Dp <- DevEiAr1(tt1,rho,gamma,sigmae) Dpr <- Dp$devR_r dE1_r <- sum(diag(solve(eGama)%*%Dpr)) dE2_r <- -(solve(eGama)%*%Dpr%*%solve(eGama)) drho <- - 0.5*dE1_r-(0.5/sigmae)*((sum(diag(uyy%*%dE2_r))-t(uy)%*%dE2_r%*%gammai-t(gammai)%*%dE2_r%*%uy-sum(diag(dE2_r%*%((uyb)%*%t(z1))))-sum(diag(dE2_r%*%((uyb)%*%t(z1)))) +t(gammai)%*%dE2_r%*%z1%*%ub+t(ub)%*%t(z1)%*%dE2_r%*%gammai+t(gammai)%*%dE2_r%*%gammai+sum(diag(ubb%*%t(z1)%*%dE2_r%*%z1)))) D_der <- Derivadas(D1) deralpha<-rep(0,length(D1[upper.tri(D1, diag = T)])) md2<-dim(D1)[1] kont <- 0 for(i1 in 1:md2){ for(i2 in 1:(md2+1-i1)){ kont <- kont+1 di <- D_der[[i1]][[i2]] deralpha[kont] <- (-0.5)*sum(diag(iD1%*%di-iD1%*%di%*%iD1*ubb)) } } si <- matrix(c(t(dbeta),t(dsigma),t(drho),t(deralpha)),p+1+length(D1[upper.tri(D1, diag = T)])+1,1) MI <- MI + si%*%t(si) } yorg<-apply(yhi,1,sum) beta1<- solve(soma3)%*%soma4 sigmae<- (1/N)*(soma2) sigmae<-as.numeric(sigmae) D1<- (1/m)*(soma1) iD1<- solve(D1) rhos <- optim(rho, method = "L-BFGS-B", FCi_gamma, lower = 0.0001, upper =0.9, gamma=gamma,beta1=beta1,sigmae=sigmae,tt=tt,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,xi=xi,zi=zi,nj=nj,hessian=TRUE)$par rho<-rhos[1] gamma<-1 teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho) varbeta<-solve(soma5) logver <- sum(log(ver)) teta1crit <- c(beta1,sigmae) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=rho,phi2=gamma,struc=struc) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",floor((count)/(MaxIter)*100),"% done",sep = "")) } if(count==MaxIter){criterio <- 0.0000000000001} teta <- teta1 loglikp <- loglikp1 tetacrit <- teta1crit logver1<-logver } } if(struc=="SYM"){ p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 if(!is.null(initial)){ beta1<-initial$betas sigmae<- initial$sigma2 D1<-initial$alphas iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 if(!is.null(initial$phi1)){ phi1 <- initial$phi1 phi2 <- 0} if(is.null(initial$phi1)){ phi1 <- 0.1 phi2 <- 0} } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 phi1 <- 0.1 phi2 <- 0 } rho=phi1 gamma<-phi2 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho) tetacrit <- c(beta1,sigmae) q1<-dim(D1)[2] p<-length(beta1) m1<-m*p m2<-m*q1 ubi=matrix(0,m2,m) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp <- loglik criterio<-1 count<-0 while(criterio >ee){ count <- count + 1 soma1<-matrix(0,q1,q1) soma2<-0 soma3<-matrix(0,p,p) soma4<-matrix(0,p,1) soma5<-matrix(0,p,p) MI <- matrix(0,p+1+length(D1[upper.tri(D1, diag = T)])+1, p+1+length(D1[upper.tri(D1, diag = T)])+1) ub1<-ubi res <- vector(mode = "numeric", length = N) ubi=matrix(0,m2,m) ubbi=matrix(0,m2,m2) uybi=matrix(0,N,m2) uyyi=matrix(0,N,N) uyi=matrix(0,N,m) yhi=matrix(0,N,1) xi=matrix(0,N,m1) zi=matrix(0,N,m2) ver<-matrix(0,m,1) for (j in 1:m ){ cc1=cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1=y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) LI1<- LI[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LS1<- LS[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Gama<- MatAr1(tt1,rho,gamma,sigmae) eGama<-Gama/sigmae Psi<-(Gama+(z1)%*%D1%*%t(z1)) Psi=(Psi+t(Psi))/2 delta<- solve(iD1+(t(z1)%*%solve(eGama)%*%(z1*(1/sigmae)))) if(sum(cc1)==0){ uy<- matrix(y1,nj[j],1) uyy<- y1%*%t(y1) ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1),mu=as.vector(gammai),Sigma=Psi) } if(sum(cc1)>=1){ if(sum(cc1)==nj[j]){ muc=x1%*%beta1 Sc<-Psi Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1),upper=as.vector(LS1),mu = as.vector(muc), Sigma =Sc) uy<- aux$EY uyy<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<-TruncatedNormal::pmvnorm(lb=as.vector(LI1), ub=as.vector(LS1), mu=as.vector(muc),sigma=Sc) } else { muc=x1[cc1==1,]%*%beta1+Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%(y1[cc1==0]-x1[cc1==0,]%*%beta1) Sc <-Psi[cc1==1,cc1==1]-Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%Psi[cc1==0,cc1==1] Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1[cc1==1]),upper=as.vector(LS1[cc1==1]),mu = as.vector(muc), Sigma =Sc) uy <-matrix(y1,nj[j],1) uy[cc1==1]<- aux$EY uyy<- uy%*%t(uy) uyy[cc1==1,cc1==1]<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1[cc1==0]),mu=as.vector(gammai[cc1==0]),Sigma=as.matrix(Psi[cc1==0,cc1==0]))*( TruncatedNormal::pmvnorm(lb=as.vector(LI1[cc1==1]),ub=as.vector(LS1[cc1==1]),mu=as.vector(muc),sigma=Sc))[1] } } soma1<- soma1 + ubb soma2<- soma2 + (sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))) soma3<- soma3 + (t(x1)%*%solve(eGama)%*%x1) soma4<- soma4 + (t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) soma5<- soma5 + (t(x1)%*%solve(Psi)%*%x1-t(x1)%*%solve(Psi)%*%(uyy-uy%*%t(uy))%*%solve(Psi)%*%x1) uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<-uyy uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), j]<-uy uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-uyb ubi[(((j-1)*q1)+1) : (j*q1), j]<-ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<-ubb yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),] <- uy zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-z1 xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)]<-x1 dbeta <- (1/sigmae)*((t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) - (t(x1)%*%solve(eGama)%*%x1)%*%beta1) dsigma <- -(1/2)*((nj[j]/sigmae)-(1/sigmae^2)*((sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))))) Dp <- DevEiAr1(tt1,rho,gamma,sigmae) Dpr <- Dp$devR_r dE1_r <- sum(diag(solve(eGama)%*%Dpr)) dE2_r <- -(solve(eGama)%*%Dpr%*%solve(eGama)) drho <- - 0.5*dE1_r-(0.5/sigmae)*((sum(diag(uyy%*%dE2_r))-t(uy)%*%dE2_r%*%gammai-t(gammai)%*%dE2_r%*%uy-sum(diag(dE2_r%*%((uyb)%*%t(z1))))-sum(diag(dE2_r%*%((uyb)%*%t(z1)))) +t(gammai)%*%dE2_r%*%z1%*%ub+t(ub)%*%t(z1)%*%dE2_r%*%gammai+t(gammai)%*%dE2_r%*%gammai+sum(diag(ubb%*%t(z1)%*%dE2_r%*%z1)))) D_der <- Derivadas(D1) deralpha<-rep(0,length(D1[upper.tri(D1, diag = T)])) md2<-dim(D1)[1] kont <- 0 for(i1 in 1:md2){ for(i2 in 1:(md2+1-i1)){ kont <- kont+1 di <- D_der[[i1]][[i2]] deralpha[kont] <- (-0.5)*sum(diag(iD1%*%di-iD1%*%di%*%iD1*ubb)) } } si <- matrix(c(t(dbeta),t(dsigma),t(drho),t(deralpha)),p+1+length(D1[upper.tri(D1, diag = T)])+1,1) MI <- MI + si%*%t(si) } yorg<-apply(yhi,1,sum) beta1<- solve(soma3)%*%soma4 sigmae<- (1/N)*(soma2) sigmae<-as.numeric(sigmae) D1<- (1/m)*(soma1) iD1<- solve(D1) rhos <- optim(rho, method = "L-BFGS-B", FCi_gamma, lower = 0.0001, upper =0.9, gamma=gamma,beta1=beta1,sigmae=sigmae,tt=tt,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,xi=xi,zi=zi,nj=nj,hessian=TRUE)$par rho<-rhos[1] gamma<-0 teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],rho) teta1crit <- c(beta1,sigmae) varbeta<-solve(soma5) logver <- sum(log(ver)) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=rho,phi2=gamma,struc=struc) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",floor((count)/(MaxIter)*100),"% done",sep = "")) } if(count==MaxIter){criterio <- 0.0000000000001} teta <- teta1 loglikp <- loglikp1 tetacrit <- teta1crit logver1<-logver } } if(struc=="UNC"){ p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 if(!is.null(initial)){ beta1<-initial$betas sigmae<- initial$sigma2 D1<-initial$alphas iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 phi1 <- NULL phi2 <- NULL } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) iD1<- solve(D1) iD1 <- (iD1 + t(iD1))/2 phi1 <- NULL phi2 <- NULL } teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) tetacrit <- c(beta1,sigmae) q1<-dim(D1)[2] p<-length(beta1) m1<-m*p m2<-m*q1 ubi=matrix(0,m2,m) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp <- loglik criterio<-1 count<-0 while(criterio > ee){ count <- count + 1 soma1<-matrix(0,q1,q1) soma2<-0 soma3<-matrix(0,p,p) soma4<-matrix(0,p,1) soma5<-matrix(0,p,p) MI <- matrix(0,p+1+length(D1[upper.tri(D1, diag = T)]), p+1+length(D1[upper.tri(D1, diag = T)])) ub1<-ubi res <- vector(mode = "numeric", length = N) ubi=matrix(0,m2,m) ubbi=matrix(0,m2,m2) uybi=matrix(0,N,m2) uyyi=matrix(0,N,N) uyi=matrix(0,N,m) yhi=matrix(0,N,1) xi=matrix(0,N,m1) zi=matrix(0,N,m2) ver<-matrix(0,m,1) for (j in 1:m ){ cc1=cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1=y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) LI1<- LI[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LS1<- LS[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 if(sum(cc1)==0){ Psi<-(sigmae*diag(nj[j])+(z1)%*%D1%*%t(z1)) Psi<-(Psi+t(Psi))/2 delta<- solve(iD1+(t(z1)%*%((z1*(1/sigmae))))) uy<- matrix(y1,nj[j],1) uyy<- y1%*%t(y1) ub<- delta%*%(t(z1)*(1/sigmae))%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1),mu=as.vector(gammai),Sigma=Psi) } if(sum(cc1)>=1){ Psi<-(sigmae*diag(nj[j])+(z1)%*%D1%*%t(z1)) Psi<-(Psi+t(Psi))/2 if(sum(cc1)==nj[j]){ muc=x1%*%beta1 Sc<-Psi Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1),upper=as.vector(LS1),mu = as.vector(muc), Sigma =Sc) uy<- aux$EY uyy<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- TruncatedNormal::pmvnorm(lb=as.vector(LI1),ub= as.vector(LS1), mu=c(muc),sigma=Sc) } else { muc=x1[cc1==1,]%*%beta1+Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%(y1[cc1==0]-x1[cc1==0,]%*%beta1) Sc <-Psi[cc1==1,cc1==1]-Psi[cc1==1,cc1==0]%*%solve(Psi[cc1==0,cc1==0])%*%Psi[cc1==0,cc1==1] Sc=(Sc+t(Sc))/2 aux<- relliptical::mvtelliptical(lower = as.vector(LI1[cc1==1]),upper=as.vector(LS1[cc1==1]),mu = as.vector(muc), Sigma =Sc) uy <-matrix(y1,nj[j],1) uy[cc1==1]<- aux$EY uyy<- uy%*%t(uy) uyy[cc1==1,cc1==1]<- aux$EYY ub<- delta%*%(t(z1)*(1/sigmae))%*%solve(eGama)%*%(uy-gammai) ubb<- delta+(delta%*%(t(z1)*((1/sigmae)^2))%*%solve(eGama)%*%(uyy-uy%*%t(gammai)-gammai%*%t(uy)+gammai%*%t(gammai))%*%solve(eGama)%*%z1%*%delta) uyb<- (uyy-uy%*%t(gammai))%*%solve(eGama)%*%(z1*(1/sigmae))%*%delta ver[j,]<- LaplacesDemon::dmvn(x=as.vector(y1[cc1==0]),mu=as.vector(gammai[cc1==0]),Sigma=as.matrix(Psi[cc1==0,cc1==0]))*( TruncatedNormal::pmvnorm(lb=as.vector(LI1[cc1==1]),up=as.vector(LS1[cc1==1]),mu=as.vector(muc),sigma=Sc))[1] uyy<- matrix(0,nj[j],nj[j]) } } soma1<- soma1 + ubb soma2<- soma2 + (sum(diag(uyy))-t(uy)%*%gammai-t(gammai)%*%uy-sum(diag(t(uyb)%*%z1))-sum(diag(uyb%*%t(z1))) +t(gammai)%*%z1%*%ub+t(ub)%*%t(z1)%*%gammai+t(gammai)%*%gammai+sum(diag(ubb%*%t(z1)%*%z1))) soma3<- soma3 + (t(x1)%*%x1) soma4<- soma4 + (t(x1)%*%(uy-z1%*%ub)) soma5<- soma5 + (t(x1)%*%solve(Psi)%*%x1-t(x1)%*%solve(Psi)%*%(uyy-uy%*%t(uy))%*%solve(Psi)%*%x1) uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<-uyy uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), j]<-uy uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-uyb ubi[(((j-1)*q1)+1) : (j*q1), j]<-ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<-ubb yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),] <- uy zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)]<-z1 xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)]<-x1 dbeta <- (1/sigmae)*((t(x1)%*%solve(eGama)%*%(uy-z1%*%ub)) - (t(x1)%*%solve(eGama)%*%x1)%*%beta1) dsigma <- -(1/2)*((nj[j]/sigmae)-(1/sigmae^2)*((sum(diag(uyy%*%solve(eGama)))-t(uy)%*%solve(eGama)%*%gammai-t(gammai)%*%solve(eGama)%*%uy-sum(diag(solve(eGama)%*%((uyb)%*%t(z1))))-sum(diag(solve(eGama)%*%((uyb)%*%t(z1)))) +t(gammai)%*%solve(eGama)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(eGama)%*%gammai+t(gammai)%*%solve(eGama)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(eGama)%*%z1))))) D_der <- Derivadas(D1) deralpha<-rep(0,length(D1[upper.tri(D1, diag = T)])) md2<-dim(D1)[1] kont <- 0 for(i1 in 1:md2){ for(i2 in 1:(md2+1-i1)){ kont <- kont+1 di <- D_der[[i1]][[i2]] deralpha[kont] <- (-0.5)*sum(diag(iD1%*%di-iD1%*%di%*%iD1*ubb)) } } si <- matrix(c(t(dbeta),t(dsigma),t(deralpha),p+1+length(D1[upper.tri(D1, diag = T)]),1)) MI <- MI + si%*%t(si) } yorg<-apply(yhi,1,sum) beta1<- solve(soma3)%*%soma4 sigmae<- (1/(N))*as.numeric(soma2) D1<- (1/(m))*(soma1) iD1<-solve(D1) teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)]) teta1crit <- c(beta1,sigmae) varbeta<-solve(soma5) logver <- sum(log(ver)) loglik <- logliknslmec(y=y,x=x,z=z,cc=cc,ttc=tt,nj=nj,LL=LI,LU=LS,betas=beta1,sigmae=sigmae,D1=D1,phi1=rho,phi2=gamma,struc=struc) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",floor((count)/(MaxIter)*100),"% done",sep = "")) } if(count==MaxIter){criterio <- 0.0000000000001} teta <- teta1 loglikp <- loglikp1 tetacrit <- teta1crit logver1<-logver gamma <- 0 rho <- 0 } } dd<-D1[upper.tri(D1, diag = T)] npar<-length(c(teta1)) ni<-sum(nj) loglik<-logver1 AICc<- -2*loglik +2*npar AICcorr<- AICc + ((2*npar*(npar+1))/(ni-npar-1)) BICc <- -2*loglik +log(ni)*npar MI<-round((MI+t(MI))/2,6) SE=round(sqrt(diag(ginv(MI))),3) intPar=round(1.96*SE,3) tableB = data.frame(round(beta1,3),SE[1:p],paste("<",round(beta1,3)-round(intPar[1:p],3),",",round(beta1,3)+round(intPar[1:p],3),">")) rownames(tableB) = paste("beta",1:p) colnames(tableB) = c("Est","SE","IConf(95%)") if((round(sigmae,3)-round(intPar[p+1],3))<0) tableS = data.frame(round(sigmae,3),SE[p+1],paste("<",0,",",round(sigmae,3)+round(intPar[p+1],3),">")) if((round(sigmae,3)-round(intPar[p+1],3))>=0) tableS = data.frame(round(sigmae,3),SE[p+1],paste("<",round(sigmae,3)-round(intPar[p+1],3),",",round(sigmae,3)+round(intPar[p+1],3),">")) rownames(tableS) = "Sigma^2" colnames(tableS) = c("Est","SE","IConf(95%)") if(struc=="DEC"){ phi=c(gamma,rho) tableP = data.frame(round(phi,3),SE[(p+2):(p+1+2)],paste("<",round(phi,3)-round(intPar[(p+2):(p+1+2)],3),",",round(phi,3)+round(intPar[(p+2):(p+1+2)],3),">")) rownames(tableP) = paste("Phi",1:2) colnames(tableP) = c("Est","SE","IConf(95%)") } if(struc=="DEC(AR)"){ phi=rho tableP = data.frame(round(phi,3),SE[(p+2):(p+1+1)],paste("<",round(phi,3)-round(intPar[(p+2):(p+1+1)],3),",",round(phi,3)+round(intPar[(p+2):(p+1+1)],3),">")) rownames(tableP) = paste("Phi",1) colnames(tableP) = c("Est","SE","IConf(95%)") } if(struc=="SYM"){ phi=rho tableP = data.frame(round(phi,3),SE[(p+2):(p+1+1)],paste("<",round(phi,3)-round(intPar[(p+2):(p+1+1)],3),",",round(phi,3)+round(intPar[(p+2):(p+1+1)],3),">")) rownames(tableP) = paste("Phi",1) colnames(tableP) = c("Est","SE","IConf(95%)") } if(struc=="UNC"){ phi=NULL; tableP =NULL } nnp=0 for(al in 1:dim(D1)[1]) {noa=paste(1:al,al,sep = "") nnp=c(nnp,noa) } nnp=nnp[-1] ici=round(dd,3)-round(intPar[(p+2+length(phi)):(p+1+length(D1[upper.tri(D1, diag = T)])+length(phi))],3) ics=round(dd,3)+round(intPar[(p+2+length(phi)):(p+1+length(D1[upper.tri(D1, diag = T)])+length(phi))],3) ici[as.numeric(nnp)%%11==0&ici<0]=0 tableA = data.frame(round(dd,3),SE[(p+2+length(phi)):(p+1+length(D1[upper.tri(D1, diag = T)])+length(phi))],paste("<",ici,",",ics,">")) rownames(tableA) = paste("Alpha",nnp) colnames(tableA) = c("Est","SE","IConf(95%)") res <-fitY<- vector(mode = "numeric", length = sum(nj)) Di <- matrix(0,dim(z)[2],dim(z)[2]) Di[upper.tri(Di, diag = T)] <- dd Di[lower.tri(Di, diag = T)] <- dd Di <- round(Di,6) for (k in 1:length(nj)) { tc<-tt[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] if(struc=="DEC") Mq<- MatDec(tc,phi[1],phi[2],"DEC") if(struc=="DEC(AR)") Mq<- MatDec(tc,phi[1],1,"DEC(AR)") if(struc=="SYM") Mq<- MatDec(tc,phi[1],0,"SYM") if(struc=="UNC") Mq<- MatDec(tc,phi1=NULL,phi2=NULL,"UNC") Sii<-ginv(round(z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),]%*%Di%*%t(z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),])+sigmae*Mq,6)) yyii<-yorg[(sum(nj[1:k-1])+1) :(sum(nj[1:k]))]-x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1 res[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]=sqrtm(Sii)%*%(yyii) fitY[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]= x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1+ z[(sum(nj[1:k-1])+1):(sum(nj[1:k])),]%*%as.matrix(ubi[(((k-1)*q1)+1) : (k*q1),k]) } end.time <- Sys.time() time.taken <- end.time - start.time obj.out <- list(beta1 = beta1, sigmae= sigmae, phi=phi, dd = dd, loglik=loglik, AIC=AICc, BIC=BICc, AICcorr=AICcorr, iter = count, varbeta=varbeta, ubi = ubi, ubbi = ubbi, uybi = uybi, uyi = uyi, uyyi = uyyi , MI=MI, yorg =yorg,residuals=res,yfit=fitY, time=time.taken, SE=SE,tableB=tableB,tableS=tableS,tableP=tableP, tableA=tableA) if (count == MaxIter) { setTkProgressBar(pb, MaxIter, label=paste("MaxIter reached ",count,"/",MaxIter," - 100 % done",sep = "")) close(pb) } else { setTkProgressBar(pb, MaxIter, label=paste("Convergence at Iter ",count,"/",MaxIter," - 100 % done",sep = "")) close(pb) } class(obj.out) <- "DECNLMEC" return(obj.out) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/EMCensArpN.R
EMCensDECT<- function(cc,y,x,z,ttc,nj,struc,initial,cens.type,LL,LU,nu.fixed,iter.max,precision) { start.time <- Sys.time() pb = tkProgressBar(title = "DEC-T-tLMEC by EM", min = 0,max = iter.max, width = 300) setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",iter.max," - ",0,"% done",sep = "")) if(cens.type=="left"){ LL=rep(-Inf,length(cc)) LU=rep(Inf,length(cc)) LU[cc==1]=y[cc==1] LL=as.vector(LL) LU=as.vector(LU) } if(cens.type=="right"){ LL=rep(-Inf,length(cc)) LL[cc==1]=y[cc==1] LU=rep(Inf,length(cc)) LL=as.vector(LL) LU=as.vector(LU) } if(cens.type=="interval"){ LL=LL LU=LU LL=as.vector(LL) LU=as.vector(LU) } m <- length(nj)[1] N <- sum(nj) q1 <- dim(z)[2] m2 <- m*q1 p <- dim(x)[2] if(!is.null(initial)){ beta1 <- matrix(initial$betas,p,1) sigmae <- initial$sigma2 D1 <- initial$alphas iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 nu <- initial$nu if(!is.null(initial$phi)){ phis<-initial$phi if(struc=="DEC") { phi1 <- initial$phi1 phi2 <- initial$phi2 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,phi2,nu) } else if(struc=="DEC(AR)"){ phi1 <- initial$phi1 phi2 <- 1 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else if(struc=="SYM"){ phi1 <- initial$phi1 phi2 <- 0 teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else { phi1 <- NULL phi2 <- NULL teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],nu) } } if(is.null(initial$phi)){ if(struc=="DEC") { phi1 <- 0.1 phi2 <- 1 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,phi2,nu) } else if(struc=="DEC(AR)"){ phi1 <- 0.1 phi2 <- 1 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else if(struc=="SYM"){ phi1 <- 0.1 phi2 <- 0 teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else { phi1 <- NULL phi2 <- NULL teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],nu) } } } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) nu=3 iD1<- solve(D1) if(struc=="DEC") { phi1 <- 0.1 phi2 <- 1 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,phi2,nu) } else if(struc=="DEC(AR)"){ phi1 <- 0.1 phi2 <- 1 teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else if(struc=="SYM"){ phi1 <- 0.1 phi2 <- 0 teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else { phi1 <- NULL phi2 <- NULL teta <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],nu) } } qr <- length(D1[lower.tri(D1, diag = T)]) W <- x gamma1 <- as.vector(c(beta1)) criterio <- 1 count <- 0 loglik <- logliktslmec(nu=nu,y=y,x=x,z=z,cc=cc,ttc=ttc,nj=nj,LL=LL,LU=LU,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp <- loglik while(criterio > precision){ count <- count + 1 soma1 <- matrix(0,q1,q1) soma2 <- 0 soma3 <- matrix(0,p,p) soma4 <- matrix(0,p,1) soma7 <- matrix(0,p,p) Infbetas <- matrix(0,p,p) res <- vector(mode = "numeric", length = N) ui <- rep(0,m) uyi <- matrix(0,N,m) uyyi <- matrix(0,N,N) ubi <- matrix(0,m2,m) ubbi <- matrix(0,m2,m2) uybi <- matrix(0,N,m2) yest <- matrix(0,N,1) biest <- matrix(0,m2,m) yhi <- matrix(0,N,1) for (j in 1:m){ cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 Gama <- MatDec(tt1,phi1,phi2,struc) invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 dm <- as.numeric(t(y1 - muii)%*%SIGMAinv%*%(y1-muii)) cdm <- as.numeric((nu+nj[j])/(nu+dm)) if(sum(cc1)==0) { u <- cdm uy <- matrix(y1,nj[j],1)*cdm uyy <- (y1%*%t(y1))*cdm ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) yh <- matrix(y1,nj[j],1) best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) Eu2yy <- (cdm^2)*(y1%*%t(y1)) Eu2y <- (cdm^2)*(matrix(y1,nj[j],1)) Eu2 <- cdm^2 E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { aux1U <- TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=(nu+2), sigma = as.matrix((nu/(nu + 2))*SIGMA)) aux2U <- TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=(nu+2), sigma = as.matrix(SIGMA)) u <- as.numeric(aux1U/aux2U) auxy <-relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix((nu/(nu + 2))*SIGMA),dist = "t",nu=(nu+2)) uy <- u*auxy$EY uyy <- u*auxy$EYY ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) auxb <- relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix(SIGMA),dist = "t",nu=(nu)) yh <- auxb$EY best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) cp <- (((nu+nj[j])/nu)^2)*((gamma((nu+nj[j])/2)*gamma((nu+4)/2))/(gamma(nu/2)*gamma((nu+nj[j]+4)/2))) auxw <- relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix((nu/(nu + 4))*SIGMA),dist = "t",nu=(nu+4)) auxEU <-TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=(nu+4), sigma = as.matrix((nu/(nu + 4))*SIGMA)) Eu2yy <- cp*(auxEU/aux2U)*auxw$EYY Eu2y <- cp*(auxEU/aux2U)*auxw$EY Eu2 <- cp*(auxEU/aux2U) E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) auxQy02 <- as.numeric((nu + Qy0)/(nu + 2 + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si Sc0til <- auxQy02*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] aux1U <- TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + 2 + length(cc1[cc1==0])), sigma = as.matrix(Sc0til)) aux2U <- TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + length(cc1[cc1==0])), sigma = as.matrix(Sc0)) u <- as.numeric(aux1U/aux2U)*(1/auxQy0) Sc0til=round((Sc0til+t(Sc0til))/2,3) auxy <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sc0til),dist = "t",nu=(nu + 2 + length(cc1[cc1==0]))) w1aux <- auxy$EY w2aux <- auxy$EYY uy <- matrix(y1,nj[j],1)*u uy[cc1==1] <- w1aux*u uyy <- y1%*%t(y1)*u uyy[cc1==0,cc1==1] <- u*y1[cc1==0]%*%t(w1aux) uyy[cc1==1,cc1==0] <- u*w1aux%*%t(y1[cc1==0]) uyy[cc1==1,cc1==1] <- u*w2aux uyy <- (uyy + t(uyy))/2 ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) auxb <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sc0),dist = "t",nu=(nu + length(cc1[cc1==0]))) yh <- matrix(y1,nj[j],1) yh[cc1==1] <- auxb$EY best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) dp <- ((nu+nj[j])^2)*((gamma((nu+nj[j])/2)*gamma((nu+4+length(cc1[cc1==0]))/2))/(gamma((nu+length(cc1[cc1==0]))/2)*gamma((nu+4+nj[j])/2))) Sirc=(nu + Qy0)/(nu + 4 + length(cc1[cc1==0]))*Si Sirc=round(((t(Sirc)+Sirc)/2),2) auxEU <-TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + 4 + length(cc1[cc1==0])), sigma = as.matrix(Sirc)) auxEw <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sirc),dist = "t",nu=(nu + 4 + length(cc1[cc1==0]))) Ew1aux <- auxEw$EY Ew2aux <- auxEw$EYY Eu2yy <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*y1%*%t(y1) Eu2yy[cc1==0,cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*y1[cc1==0]%*%t(Ew1aux) Eu2yy[cc1==1,cc1==0] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)* Ew1aux%*%t(y1[cc1==0]) Eu2yy[cc1==1,cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*Ew2aux Eu2y <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*matrix(y1,nj[j],1) Eu2y[cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*Ew1aux Eu2 <- (dp/((nu + Qy0)^2))*(auxEU/aux2U) E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } } soma1 <- soma1 + ubb soma2 <- soma2 + (sum(diag(uyy%*%invGama)) - t(uy)%*%invGama%*%muii - t(muii)%*%invGama%*%uy - sum(diag(t(uyb)%*%invGama%*%z1)) - sum(diag(uyb%*%t(z1)%*%invGama)) + t(muii)%*%invGama%*%z1%*%ub + t(ub)%*%t(z1)%*%invGama%*%muii + u*t(muii)%*%invGama%*%muii + sum(diag(ubb%*%t(z1)%*%invGama%*%z1))) soma3 <- soma3 + (u*t(x1)%*%invGama%*%x1) soma4 <- soma4 + (t(x1)%*%invGama%*%(uy - z1%*%ub)) soma7 <- soma7 + (((nu+nj[j])/(nu+nj[j]+2))*t(x1)%*%SIGMAinv%*%x1 - ((nu+nj[j]+2)/(nu+nj[j]))*t(x1)%*%SIGMAinv%*%(E2)%*%SIGMAinv%*%x1 + (t(x1)%*%SIGMAinv%*%(E1)%*%SIGMAinv%*%x1)) ui[j] <- u uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] <- uy uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<- uyy ubi[(((j-1)*q1)+1) : (j*q1), j] <- ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<- ubb uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)]<- uyb yest[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] <- z1%*%best + muii biest[(((j-1)*q1)+1) : (j*q1), j] <- best yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] <- yh } yhatorg <- apply(yhi,1,sum) yfit <- apply(yest,1,sum) yfit[cc==1] <- yhatorg[cc==1] Infbetas[1:p,1:p] <- soma7 Infbetas <- (Infbetas + t(Infbetas))/2 beta1 <- solve(soma3)%*%soma4 gamma1 <- as.vector(c(beta1)) sigmae <- (1/N)*(soma2) sigmae <- as.numeric(sigmae) D1 <- (1/m)*(soma1) iD1 <- solve(D1) if(nu.fixed==FALSE) { nu <- optimize(f = logliktslmec,interval = c(2.01,30),tol = 0.00001, maximum = TRUE,y=y,x=x,z=z,cc=cc,ttc=ttc,nj=nj, LL=LL,LU=LU,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc)$maximum } if(struc=="DEC") { phis <- optim(c(phi1,phi2), FCit,lower =c(0.01,0.01), upper=c(0.9,30),method = "L-BFGS-B", hessian=TRUE, beta1=beta1,sigmae=sigmae,ttc=ttc,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,ui=ui,x=x,z=z,nj=nj,struc=struc)$par phi1 <- phis[1] phi2 <- phis[2] teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],phi1,phi2,nu) } else if(struc=="DEC(AR)"){ phi2 <- 1 phi1 <- optimize(f=FCiphi1t, lower= 0.0001, upper=0.9, tol = 0.001, phi2=phi2, beta1=beta1,sigmae=sigmae, ttc=ttc,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,ui=ui,x=x,z=z,nj=nj,struc=struc)$minimum teta1 <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else if(struc=="SYM"){ phi2 <- 0 phi1 <- optimize(f=FCiphi1t, lower= 0.0001, upper=0.9,tol = 0.001, phi2=phi2, beta1=beta1,sigmae=sigmae, ttc=ttc,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,ui=ui,x=x,z=z,nj=nj,struc=struc)$minimum teta1 <- c(beta1, sigmae,D1[upper.tri(D1, diag = T)],phi1,nu) } else{ teta1 <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],nu) } loglik <- logliktslmec(nu=nu,y=y,x=x,z=z,cc=cc,ttc=ttc,nj=nj,LL=LL,LU=LU,betas=beta1,sigmae=sigmae,D1=D1,phi1=phi1,phi2=phi2,struc=struc) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",iter.max," - ",floor((count)/(iter.max)*100),"% done",sep = "")) } if(count==iter.max){criterio <- precision*0.0001} teta <- teta1 loglikp <- loglikp1 } for (k in 1:length(nj)) {tc<-ttc[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] if(struc=="DEC(AR)"){Mq<- MatDec(tc,phi1,phi2,"DEC(AR)")} if(struc=="SYM"){Mq<- MatDec(tc,phi1,phi2,"SYM")} if(struc=="DEC"){Mq<- MatDec(tc,phi1,phi2,"DEC")} if(struc=="UNC"){Mq<- MatDec(tc,phi1,phi2,"UNC")} res[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]=(sqrtm(solve(round(z[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%D1%*%t(z[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),])+sigmae*Mq,6)))%*%(yhatorg[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] -x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1)) } dd <- D1[upper.tri(D1, diag = T)] npar <- length(c(teta1)) AICc <- -2*loglikp + 2*npar BICc <- -2*loglikp + log(N)*npar SE=round(sqrt(diag(solve(Infbetas))),3) intPar=round(qt(0.975,nu)*SE,3) tableB = data.frame(round(beta1,3),SE,paste("<",round(beta1,3)-round(intPar,3),",",round(beta1,3)+round(intPar,3),">")) rownames(tableB) = paste("beta",1:p) colnames(tableB) = c("Est","SE","IConf(95%)") tableS = data.frame(round(sigmae,3)) rownames(tableS) = "Sigma^2" colnames(tableS) = c("Est") if(struc=="DEC"){ phi=c(phi1,phi2) tableP = data.frame(round(phi,3)) rownames(tableP) = paste("Phi",1:2) colnames(tableP) = c("Est") } if(struc=="DEC(AR)"){ phi=phi1 tableP = data.frame(round(phi,3)) rownames(tableP) = paste("Phi",1) colnames(tableP) = c("Est") } if(struc=="SYM"){ phi=phi1 tableP = data.frame(round(phi,3)) colnames(tableP) = c("Est") } if(struc=="UNC"){ phi=NULL; tableP =NULL } nnp=0 for(al in 1:dim(D1)[1]) {noa=paste(1:al,al,sep = "") nnp=c(nnp,noa) } nnp=nnp[-1] tableA = data.frame(round(dd,3)) rownames(tableA) = paste("Alpha",nnp) colnames(tableA) = c("Est") end.time <- Sys.time() time.taken <- end.time - start.time obj.out <- list(beta1 = beta1, sigmae= sigmae, phi=phi, dd = dd,nu=nu, loglik=loglik, AIC=AICc, BIC=BICc, iter = count, ubi = ubi, ubbi = ubbi, uybi = uybi, uyi = uyi, uyyi = uyyi,ui=ui, MI=Infbetas,residual=res, Prev= NULL, time=time.taken, SE=SE,tableB=tableB,tableS=tableS,tableP=tableP, tableA=tableA,yfit=yfit, yorg = yhatorg) if (count == iter.max) { setTkProgressBar(pb, iter.max, label=paste("MaxIter reached ",count,"/",iter.max," - 100 % done",sep = "")) close(pb) } else { setTkProgressBar(pb, iter.max, label=paste("Convergence at Iter ",count,"/",iter.max," - 100 % done",sep = "")) close(pb) } class(obj.out) <- "DECTLMEC" return(obj.out) } EMCensArpT<- function(cc,y,x,z,ttc,nj,Arp,initial,cens.type,LL,LU,nu.fixed,iter.max,precision) { start.time <- Sys.time() pb = tkProgressBar(title = "AR(p)-T-LMEC by EM", min = 0,max = iter.max, width = 300) setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",iter.max," - ",0,"% done",sep = "")) if(cens.type=="left"){ LL=rep(-Inf,length(cc)) LU=rep(Inf,length(cc)) LU[cc==1]=y[cc==1] LL=as.vector(LL) LU=as.vector(LU) } if(cens.type=="right"){ LL=rep(-Inf,length(cc)) LL[cc==1]=y[cc==1] LU=rep(Inf,length(cc)) LL=as.vector(LL) LU=as.vector(LU) } if(cens.type=="interval"){ LL=LL LU=LU LL=as.vector(LL) LU=as.vector(LU) } m <- length(nj)[1] N <- sum(nj) q1 <- dim(z)[2] m2 <- m*q1 p <- dim(x)[2] if(!is.null(initial)){ beta1 <- matrix(initial$betas,p,1) sigmae <- initial$sigma2 D1 <- initial$alphas iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 nu <- initial$nu if(!is.null(initial$phi)){ pii = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phis<-initial$phi if(Arp!="UNC"){ pii = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = phis} if(Arp=="UNC"){ pii=0 Arp=0 phi = 0} } if(is.null(initial$phi)){ if(Arp!="UNC"){ pii = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = estphit(pii)} if(Arp=="UNC"){ pii=0 Arp=0 phi = 0} } } if(is.null(initial)){ beta1=solve(t(x)%*%x)%*%t(x)%*%y sigmae= 0.25 D1=0.1*diag(dim(z)[2]) nu=3 pii = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) iD1<- solve(D1) if(Arp!="UNC"){ pii = as.numeric(pacf((y - x%*%beta1),lag.max=Arp,plot=F)$acf) phi = estphit(pii)} if(Arp=="UNC"){ Arp=0 pii=0 phi = 0} } qr <- length(D1[lower.tri(D1, diag = T)]) W <- x gamma1 <- as.vector(c(beta1)) teta <- c(beta1,sigmae,D1[upper.tri(D1, diag = T)],pii,nu) criterio <- 1 count <- 0 loglik <- logliktArplmec(nu=nu,y=y,x=x,z=z,cc=cc,ttc=ttc,nj=nj,LL=LL,LU=LU,betas=beta1,sigmae=sigmae,D1=D1,pii) loglikp <- loglik while(criterio > precision){ criterio count <- count + 1 soma1 <- matrix(0,q1,q1) soma2 <- 0 soma3 <- matrix(0,p,p) soma4 <- matrix(0,p,1) soma7 <- matrix(0,p,p) Infbetas <- matrix(0,p,p) ui <- rep(0,m) uyi <- matrix(0,N,m) uyyi <- matrix(0,N,N) ubi <- matrix(0,m2,m) ubbi <- matrix(0,m2,m2) uybi <- matrix(0,N,m2) yest <- matrix(0,N,1) biest <- matrix(0,m2,m) yhi <- matrix(0,N,1) res <- vector(mode = "numeric", length = N) for (j in 1:m){ cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 if(Arp==0){Gama=diag(1,nj[j]) eGamma=Gama*sigmae} if(Arp!=0){ eGamma<-MatArp(pii,tt1,sigmae) Gama <-eGamma/sigmae } invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 dm <- as.numeric(t(y1 - muii)%*%SIGMAinv%*%(y1-muii)) cdm <- as.numeric((nu+nj[j])/(nu+dm)) if(sum(cc1)==0) { u <- cdm uy <- matrix(y1,nj[j],1)*cdm uyy <- (y1%*%t(y1))*cdm ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) yh <- matrix(y1,nj[j],1) best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) Eu2yy <- (cdm^2)*(y1%*%t(y1)) Eu2y <- (cdm^2)*(matrix(y1,nj[j],1)) Eu2 <- cdm^2 E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { aux1U <- TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=(nu + 2), sigma = as.matrix((nu/(nu + 2))*SIGMA)) aux2U <- TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=nu, sigma = as.matrix(SIGMA)) u <- as.numeric(aux1U/aux2U) auxy <- relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix((nu/(nu + 2))*SIGMA),dist = "t",nu=(nu+2)) uy <- u*auxy$EY uyy <- u*auxy$EYY ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) auxb <- relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix(SIGMA),dist = "t",nu=(nu)) yh <- auxb$EY best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) cp <- (((nu+nj[j])/nu)^2)*((gamma((nu+nj[j])/2)*gamma((nu+4)/2))/(gamma(nu/2)*gamma((nu+nj[j]+4)/2))) auxw <- relliptical::mvtelliptical(lower = as.vector(LL1),upper=as.vector(LU1),mu = as.vector(muii), Sigma = as.matrix((nu/(nu + 4))*SIGMA),dist = "t",nu=(nu+4)) auxEU <- TruncatedNormal::pmvt(lb = as.vector(LL1), ub = as.vector(LU1), mu = as.vector(muii), df=(nu +4), sigma = as.matrix((nu/(nu + 4))*SIGMA)) Eu2yy <- cp*(auxEU/aux2U)*auxw$EYY Eu2y <- cp*(auxEU/aux2U)*auxw$EY Eu2 <- cp*(auxEU/aux2U) E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) auxQy02 <- as.numeric((nu + Qy0)/(nu + 2 + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si Sc0til <- auxQy02*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] aux1U <- TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + 2 + length(cc1[cc1==0])), sigma = as.matrix(Sc0til)) aux2U <- TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + length(cc1[cc1==0])), sigma = as.matrix(Sc0)) u <- as.numeric(aux1U/aux2U)*(1/auxQy0) Sc0til=round((Sc0til+t(Sc0til))/2,3) auxy <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sc0til),dist = "t",nu=(nu + 2 + length(cc1[cc1==0]))) w1aux <- auxy$EY w2aux <- auxy$EYY uy <- matrix(y1,nj[j],1)*u uy[cc1==1] <- w1aux*u uyy <- y1%*%t(y1)*u uyy[cc1==0,cc1==1] <- u*y1[cc1==0]%*%t(w1aux) uyy[cc1==1,cc1==0] <- u*w1aux%*%t(y1[cc1==0]) uyy[cc1==1,cc1==1] <- u*w2aux uyy <- (uyy + t(uyy))/2 ub <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uy - u*muii) ubb <- Lambda1 + (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(uyy - uy%*%t(muii) - muii%*%t(uy) + u*muii%*%t(muii))%*%t(Lambda1%*%(t(z1)*(1/sigmae))%*%invGama) uyb <- (uyy - uy%*%t(muii))%*%(invGama%*%(z1*(1/sigmae))%*%Lambda1) auxb <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sc0),dist = "t",nu=(nu + length(cc1[cc1==0]))) yh <- matrix(y1,nj[j],1) yh[cc1==1] <- auxb$EY best <- (Lambda1%*%(t(z1)*(1/sigmae))%*%invGama)%*%(yh - muii) dp <- ((nu+nj[j])^2)*((gamma((nu+nj[j])/2)*gamma((nu+4+length(cc1[cc1==0]))/2))/(gamma((nu+length(cc1[cc1==0]))/2)*gamma((nu+4+nj[j])/2))) Sirc=as.numeric((nu + Qy0)/(nu + 4 + length(cc1[cc1==0])))*Si Sirc=round(Sirc,3) auxEU <- TruncatedNormal::pmvt(lb = as.vector(LL1c), ub = as.vector(LU1c), mu = as.vector(muiic), df=(nu + 2 + length(cc1[cc1==0])), sigma = as.matrix(Sc0til)) auxEw <- relliptical::mvtelliptical(lower = as.vector(LL1c),upper=as.vector(LU1c),mu = as.vector(muiic), Sigma = as.matrix(Sirc),dist = "t",nu=(nu + 4 + length(cc1[cc1==0]))) Ew1aux <- auxEw$EY Ew2aux <- auxEw$EYY Eu2yy <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*y1%*%t(y1) Eu2yy[cc1==0,cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*y1[cc1==0]%*%t(Ew1aux) Eu2yy[cc1==1,cc1==0] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)* Ew1aux%*%t(y1[cc1==0]) Eu2yy[cc1==1,cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*Ew2aux Eu2y <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*matrix(y1,nj[j],1) Eu2y[cc1==1] <- (dp/((nu + Qy0)^2))*(auxEU/aux2U)*Ew1aux Eu2 <- (dp/((nu + Qy0)^2))*(auxEU/aux2U) E2 <- Eu2yy - Eu2y%*%t(muii) - muii%*%t(Eu2y) + Eu2*(muii)%*%t(muii) E1 <- (uy - u*(muii))%*%t(uy - u*(muii)) } } soma1 <- soma1 + ubb soma2 <- soma2 + (sum(diag(uyy%*%invGama)) - t(uy)%*%invGama%*%muii - t(muii)%*%invGama%*%uy - sum(diag(t(uyb)%*%invGama%*%z1)) - sum(diag(uyb%*%t(z1)%*%invGama)) + t(muii)%*%invGama%*%z1%*%ub + t(ub)%*%t(z1)%*%invGama%*%muii + u*t(muii)%*%invGama%*%muii + sum(diag(ubb%*%t(z1)%*%invGama%*%z1))) soma3 <- soma3 + (u*t(x1)%*%invGama%*%x1) soma4 <- soma4 + (t(x1)%*%invGama%*%(uy - z1%*%ub)) soma7 <- soma7 + (((nu+nj[j])/(nu+nj[j]+2))*t(x1)%*%SIGMAinv%*%x1 - ((nu+nj[j]+2)/(nu+nj[j]))*t(x1)%*%SIGMAinv%*%(E2)%*%SIGMAinv%*%x1 + (t(x1)%*%SIGMAinv%*%(E1)%*%SIGMAinv%*%x1)) ui[j] <- u uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] <- uy uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]<- uyy ubi[(((j-1)*q1)+1) : (j*q1), j] <- ub ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]<- ubb uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)]<- uyb yest[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] <- z1%*%best + muii biest[(((j-1)*q1)+1) : (j*q1), j] <- best yhi[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] <- yh } yhatorg <- apply(yhi,1,sum) yfit <- apply(yest,1,sum) yfit[cc==1] <- yhatorg[cc==1] Infbetas[1:p,1:p] <- soma7 Infbetas <- (Infbetas + t(Infbetas))/2 beta1 <- solve(soma3)%*%soma4 gamma1 <- as.vector(c(beta1)) sigmae <- (1/N)*(soma2) sigmae <- as.numeric(sigmae) D1 <- (1/m)*(soma1) iD1 <- solve(D1) if(nu.fixed==FALSE) { nu <- optimize(f = logliktArplmec, interval = c(2.01,30), tol = 0.00001, maximum = TRUE,y=y,x=x,z=z, cc=cc,ttc=ttc,nj=nj,LL=LL,LU=LU,betas=beta1, sigmae=sigmae,D1=D1,pii=pii)$maximum } if(Arp!=0){ pii <- optim(pii, method = "L-BFGS-B", FCiArpt, lower =rep(-.999,Arp), upper =rep(.999,Arp), beta1=beta1,sigmae=sigmae,ttc=ttc,ubi=ubi,ubbi=ubbi,uybi=uybi,uyyi=uyyi,uyi=uyi,ui=ui,x=x,z=z,nj=nj,hessian=TRUE)$par phi=estphit(pii) } if(Arp==0){phi=0} loglik <- logliktArplmec(nu=nu,y=y,x=x,z=z,cc=cc,ttc=ttc,nj=nj,LL=LL,LU=LU,betas=beta1,sigmae=sigmae,D1=D1,pii) loglikp1 <- loglik if(count > 1){ criterio <- sqrt(((loglikp1/loglikp)-1)%*%((loglikp1/loglikp)-1)) setTkProgressBar(pb, count, label=paste("Iter ",count,"/",iter.max," - ",floor((count)/(iter.max)*100),"% done",sep = "")) } if(count==iter.max){criterio <- precision*0.0001} loglikp <- loglikp1 } for (k in 1:length(nj)) {tc<-ttc[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] if(Arp=="UNC"){Mq<-diag(1,length(tc))} if(Arp!="UNC"){Mq<- MatArpJ(phi,tc,sigmae)} res[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))]=(sqrtm(solve(round(z[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%D1%*%t(z[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),])+sigmae*Mq,6)))%*%(yhatorg[(sum(nj[1:k-1])+1) : (sum(nj[1:k]))] -x[(sum(nj[1:k-1])+1) : (sum(nj[1:k])),]%*%beta1)) } dd <- D1[upper.tri(D1, diag = T)] npar <- length(c(teta)) AICc <- -2*loglikp + 2*npar BICc <- -2*loglikp + log(N)*npar SE=round(sqrt(diag(solve(Infbetas))),3) intPar=round(qt(0.975,nu)*SE,3) tableB = data.frame(round(beta1,3),SE,paste("<",round(beta1,3)-round(intPar,3),",",round(beta1,3)+round(intPar,3),">")) rownames(tableB) = paste("beta",1:p) colnames(tableB) = c("Est","SE","IConf(95%)") tableS = data.frame(round(sigmae,3)) rownames(tableS) = "Sigma^2" colnames(tableS) = c("Est") if(Arp!=0){ tableP = data.frame(round(phi,3)) rownames(tableP) = paste("Phi",1:Arp) colnames(tableP) = c("Est")} if(Arp==0){ tableP = NULL;phi=NULL} nnp=0 for(al in 1:dim(D1)[1]) {noa=paste(1:al,al,sep = "") nnp=c(nnp,noa) } nnp=nnp[-1] tableA = data.frame(round(dd,3)) rownames(tableA) = paste("Alpha",nnp) colnames(tableA) = c("Est") end.time <- Sys.time() time.taken <- end.time - start.time obj.out <- list(beta1 = beta1, sigmae= sigmae, phi=phi, dd = dd,nu=nu, loglik=loglik, AIC=AICc, BIC=BICc, iter = count, ubi = ubi, ubbi = ubbi, uybi = uybi, uyi = uyi, uyyi = uyyi,ui=ui, MI=Infbetas,residual=res, Prev= NULL, time=time.taken, SE=SE,tableB=tableB,tableS=tableS,tableP=tableP, tableA=tableA,yfit=yfit, yorg = yhatorg) if (count == iter.max) { setTkProgressBar(pb, iter.max, label=paste("MaxIter reached ",count,"/",iter.max," - 100 % done",sep = "")) close(pb) } else { setTkProgressBar(pb, iter.max, label=paste("Convergence at Iter ",count,"/",iter.max," - 100 % done",sep = "")) close(pb) } class(obj.out) <- "ARpTLMEC" return(obj.out) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/EMCensArpT.R
estphit = function(pit){ p = length(pit) Phi = matrix(0,ncol=p,nrow=p) if (p>1) { diag(Phi) = pit for (j in 2:p) { for (k in 1:(j-1)) { Phi[j,k] = Phi[j-1,k] - pit[j]*Phi[j-1,j-k] } } return(Phi[p,]) } else return(pit) } MatArp<- function(pii,tt,sigma2){ if(min(tt)==0){tt=tt+1}else{tt=tt} A <- .Mat(pii =pii,n=max(tt),sigma2=sigma2) B <- matrix(NA,nrow=length(tt),ncol=length(tt)) ii <- 0 for(i in tt) { ii <- ii+1 jj <- 0 for(j in tt) { jj <- jj+1 B[ii,jj]<-A[i,j] } } return(B) } .Mat<-function(pii,n,sigma2){ p = length(pii) phi=estphit(pii) if (n==1) Rn = 1 else Rn = toeplitz(ARMAacf(ar=phi, ma=0, lag.max = n-1)) rhos = ARMAacf(ar=phi, ma=0, lag.max = p)[(1:p)+1] Rnx<-sigma2*Rn/(1-sum(rhos*phi)) Rnx<-(Rnx+t(Rnx))/2 return(Rnx) } MatArpJ<- function(phi,tt,sigma2){ if(min(tt)==0){tt=tt+1}else{tt=tt} A <-.MatJ(phi =phi,n=max(tt),sigma2=sigma2) B <- matrix(NA,nrow=length(tt),ncol=length(tt)) ii <- 0 for(i in tt) { ii <- ii+1 jj <- 0 for(j in tt) { jj <- jj+1 B[ii,jj]<-A[i,j] } } return(B) } .MatJ<-function(phi,n,sigma2){ p = length(phi) if (n==1) Rn = 1 else Rn = toeplitz(ARMAacf(ar=phi, ma=0, lag.max = n-1)) rhos = ARMAacf(ar=phi, ma=0, lag.max = p)[(1:p)+1] Rnx<-sigma2*Rn/(1-sum(rhos*phi)) Rnx<-(Rnx+t(Rnx))/2 return(Rnx) } logliktArplmec <- function(nu,y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,pii){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 if(length(pii)>1 ) { eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(length(pii)==1 ) { if(pii!=0 ){ eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(pii==0) { Gama=diag(1,nj[j]) eGamma=Gama*sigmae} } invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1), mu = as.vector(muii), S = as.matrix(SIGMA), df = nu )) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvt(lb = as.vector(LL1),ub=as.vector(LU1), mu = as.vector(muii),df= nu, sigma = as.matrix(SIGMA))) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]),S =as.matrix(SIGMA[cc1==0,cc1==0]),df= nu)*as.numeric(TruncatedNormal::pmvt(lb = as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic),df=nu, sigma = as.matrix(Sc0)))) } } } logvero <- sum(log(ver)) return(logvero) } FCiArpt <- function(pii,beta1,sigmae,ttc,ubi,ubbi,uybi,uyyi,uyi,ui,x,z,nj){ m <- length(nj)[1] p <- dim(x)[2] q1 <- dim(z)[2] beta1 <- as.vector(c(beta1)) soma <- 0 for (j in 1:m ){ x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) muii <- x1%*%beta1 tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] ub <- matrix(ubi[(((j-1)*q1)+1) : (j*q1), j], nrow=q1, ncol=1) ubb <- as.matrix(ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]) uyb <- matrix(uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)], ncol=q1) uyy <- as.matrix(uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]) uy <- matrix(uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j],ncol=1) u <- ui[j] Cii <- MatArp(pii,tt1,sigmae)/sigmae Cii <- (Cii + t(Cii))/2 if(det(Cii)<=0){A <- 1}else{A <- det(Cii)} invCii <- solve(Cii) Ai <- as.vector(sum(diag(uyy%*%invCii)) - sum(diag(invCii%*%((uyb)%*%t(z1)))) - sum(diag(invCii%*%(z1%*%t(uyb)))) + sum(diag(ubb%*%t(z1)%*%invCii%*%z1)) - t(uy)%*%invCii%*%muii - t(muii)%*%invCii%*%uy + t(muii)%*%invCii%*%z1%*%ub + t(ub)%*%t(z1)%*%invCii%*%muii + u*t(muii)%*%invCii%*%muii) soma <- soma - 0.5*log(A) - (0.5/sigmae)*Ai } return(-soma) } FCiArp<-function(piis,beta1,sigmae, ubi,ubbi,uybi,uyyi,uyi,x,z,tt,nj){ m<-length(nj)[1] N<-sum(nj) p<-dim(x)[2] q1<-dim(z)[2] m1<-m*p m2<-m*q1 soma=0 for (j in 1:m ){ ub<-ubi[(((j-1)*q1)+1) : (j*q1), j] ubb<-ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)] uyb<-uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)] uyy<-uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] uy<-uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] x1=matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1=matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Cii<- MatArp(piis,tt1,sigmae)/sigmae Cii <- (Cii + t(Cii))/2 if(det(Cii)<=0){A <- 1}else{A <- det(Cii)} invCii <- solve(Cii) Ai= (sum(diag(uyy%*%solve(Cii)))- t(uy)%*%solve(Cii)%*%gammai- t(gammai)%*%solve(Cii)%*%uy- sum(diag(solve(Cii)%*%((uyb)%*%t(z1))))- sum(diag(solve(Cii)%*%((uyb)%*%t(z1))))+ t(gammai)%*%solve(Cii)%*%z1%*%ub+ t(ub)%*%t(z1)%*%solve(Cii)%*%gammai+ t(gammai)%*%solve(Cii)%*%gammai+ sum(diag(ubb%*%t(z1)%*%solve(Cii)%*%z1))) soma <- soma - 0.5*log(A) - (0.5/sigmae)*Ai } return(-soma) } Dbeta = function(beta,y,x,z,b,p) { n = length(y) D = matrix(0,p+1,p+1) for (ii in 1:(p+1)) { for (jj in 1:(p+1)) { D[ii,jj] = sum((y-x%*%beta-z%*%b)[ii:(n+1-jj)]*(y-x%*%beta-z%*%b)[jj:(n+1-ii)]) D[is.na(D)]<-0 } } return(D) } D11 = function(beta,y,x,z,b,p) matrix(Dbeta(beta,y,x,z,b,p)[1,1]) Dphi = function(beta,y,x,z,b,p) matrix(Dbeta(beta,y,x,z,b,p)[2:(p+1),1]) Dphiphi = function(beta,y,x,z,b,p) Dbeta(beta,y,x,z,b,p)[2:(p+1),2:(p+1)] dD<-function(M){ m1<-dim(M)[1] m2<-dim(M)[2] d<-list() for(i in 1:m1){ d[[i]]<-list() for(j in 1:(m2+1-i)){ d[[i]][[j]]<-matrix(0,m1,m2) if(j==1){d[[i]][[j]][i,i]<-1} else{ d[[i]][[j]][i,i+(j-1)]<-d[[i]][[j]][i+(j-1),i]<-1} } } return(d=d) } Jt = function(theta,y,x,z,tt,b,bb,p,Arp,D1) { l = p n = length(y) beta = matrix(theta[1:l]) sig2 = theta[l+1] if(Arp==0){ Mn = diag(1,n)*sig2 invMn = solve(Mn/sig2) spi= (t(y-x%*%beta-z%*%b)%*%invMn%*%(y-x%*%beta-z%*%b)) } if(Arp!=0){ phi = theta[(l+2):length(theta)] p = length(phi) Mn = MatArpJ(phi,tt,sig2) invMn = solve(Mn/sig2) lambda = matrix(c(-1,phi)) spi = t(lambda)%*%Dbeta(beta,y,x,z,b,p)%*%lambda} dbeta = 1/sig2*(t(x)%*%invMn%*%(y-z%*%b)- t(x)%*%invMn%*%x%*%beta) dsig2 = -n/(2*sig2) +spi/(2*sig2^2) if(length(D1)==1){ dD_alp = 1 dalpha<-rep(0,1) md2<-1 } if(length(D1)>1){ dD_alp = dD(D1) dalpha<-rep(0,length(D1[upper.tri(D1, diag = T)])) md2<-dim(D1)[1] } kont <- 0 for(i1 in 1:md2){ for(i2 in 1:(md2+1-i1)){ kont <- kont+1 di <- dD_alp[[i1]][[i2]] dalpha[kont] <- (-0.5)*sum(diag(solve(D1)%*%di-solve(D1)%*%di%*%solve(D1)*bb)) } } derivadas=cbind(t(dbeta),t(dsig2),t(dalpha)) if(Arp!=0){ gp = function(phi,sigma=sig2)ifelse(length(phi)==1,log(MatArpJ(phi,tt,sigma)) ,log(det(MatArpJ(phi,tt,sigma)))) dgp = matrix(jacobian(gp,phi)) dphi = -1/sig2*(-Dphi(beta,y,x,z,b,p) + Dphiphi(beta,y,x,z,b,p)%*%phi)-1/2*dgp derivadas=cbind(t(dbeta),t(dsig2),t(dphi),t(dalpha)) } return(derivadas) } MatDec <- function(tt,phi1,phi2,struc){ r <- length(tt) if(struc=="DEC" || struc=="DEC(AR)"){ if(phi2<=0.0000001){ W <- matrix(phi1,nrow=r,ncol=r) for (i in 1:r){W[i,i]<- 1} V <- W } else{ H <- (abs(outer(tt, tt, "-")))^phi2 V <- (phi1^H) } } if(struc=="SYM"){ W <- matrix(phi1,nrow=r,ncol=r) diag(W)<-1 V <- W } if(struc=="MA"){ W <- matrix(0,nrow=r,ncol=r) for (i in 1:r){ W[i,i]<- 1 for(j in 1:r){ dif <- abs(tt[i]-tt[j]) if(dif==1){W[i,j]= phi1}}} V <- W } if(struc=="UNC"){ W <- diag(1,nrow=r,ncol=r) V <- W } return(V) } FCit <- function(phiG,beta1,sigmae,ttc,ubi,ubbi,uybi,uyyi,uyi,ui,x,z,nj,struc){ phi1 <- phiG[1] phi2 <- phiG[2] m <- length(nj)[1] p <- dim(x)[2] q1 <- dim(z)[2] beta1 <- as.vector(c(beta1)) soma <- 0 for (j in 1:m ){ x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) muii <- x1%*%beta1 tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] ub <- matrix(ubi[(((j-1)*q1)+1) : (j*q1), j], nrow=q1, ncol=1) ubb <- as.matrix(ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]) uyb <- matrix(uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)], ncol=q1) uyy <- as.matrix(uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]) uy <- matrix(uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j],ncol=1) u <- ui[j] Cii <- MatDec(tt1,phi1,phi2,struc) Cii <- (Cii + t(Cii))/2 if(det(Cii)<=0){A <- 1}else{A <- det(Cii)} invCii <- solve(Cii) Ai <- as.vector(sum(diag(uyy%*%invCii)) - sum(diag(invCii%*%((uyb)%*%t(z1)))) - sum(diag(invCii%*%(z1%*%t(uyb)))) + sum(diag(ubb%*%t(z1)%*%invCii%*%z1)) - t(uy)%*%invCii%*%muii - t(muii)%*%invCii%*%uy + t(muii)%*%invCii%*%z1%*%ub + t(ub)%*%t(z1)%*%invCii%*%muii + u*t(muii)%*%invCii%*%muii) soma <- soma - 0.5*log(A) - (0.5/sigmae)*Ai } return(-soma) } FCiphi1t <- function(phi1,phi2,beta1,sigmae,ttc,ubi,ubbi,uybi,uyyi,uyi,ui,x,z,nj,struc){ m <- length(nj)[1] p <- dim(x)[2] q1 <- dim(z)[2] beta1 <- as.vector(c(beta1)) soma <- 0 for (j in 1:m ){ x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) muii <- x1%*%beta1 tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] ub <- matrix(ubi[(((j-1)*q1)+1) : (j*q1), j], nrow=q1, ncol=1) ubb <- as.matrix(ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)]) uyb <- matrix(uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)], ncol=q1) uyy <- as.matrix(uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))]) uy <- matrix(uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j],ncol=1) u <- ui[j] Cii <- MatDec(tt1,phi1,phi2,struc) Cii <- (Cii + t(Cii))/2 if(det(Cii)<=0){A <- 1}else{A <- det(Cii)} invCii <- solve(Cii) Ai <- as.vector(sum(diag(uyy%*%invCii)) - sum(diag(invCii%*%((uyb)%*%t(z1)))) - sum(diag(invCii%*%(z1%*%t(uyb)))) + sum(diag(ubb%*%t(z1)%*%invCii%*%z1)) - t(uy)%*%invCii%*%muii - t(muii)%*%invCii%*%uy + t(muii)%*%invCii%*%z1%*%ub + t(ub)%*%t(z1)%*%invCii%*%muii + u*t(muii)%*%invCii%*%muii) soma <- soma - 0.5*log(A) - (0.5/sigmae)*Ai } return(-soma) } logliktslmec <- function(nu,y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,phi1,phi2,struc){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 Gama <- MatDec(tt1,phi1,phi2,struc) invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1),mu = as.vector(muii), S = as.matrix(SIGMA), df = nu )) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvt(lb = as.vector(LL1),ub=as.vector(LU1), mu= as.vector(muii),df=nu,sigma = as.matrix(SIGMA) )) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]),S =as.matrix(SIGMA[cc1==0,cc1==0]),df = nu)*as.numeric(TruncatedNormal::pmvt(lb = as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic),df=nu,sigma = as.matrix(Sc0)))) } } } logvero <- sum(log(ver)) return(logvero) } MatAr1<-function(tt,rho,gamma,sigma2){ H <- (abs(outer(tt, tt, "-")))^gamma diag(H)<-0 V <- sigma2*(rho^H) return(V) } DevEiAr1<-function(tt,rho,gamma,sigma2){ if(gamma<=0.0000001) { r <- length(tt) devR_r <- matrix(1,nrow=r,ncol=r) diag(devR_r) <- 0 devR_g <- matrix(0,nrow=r,ncol=r) } else { func1 <- function(x,y){(abs(x-y))^gamma*rho^((abs(x-y))^gamma-1)} H1 <- (outer(tt, tt, func1)) diag(H1) <- 0 func2 <- function(x,y){(abs(x-y))^gamma*log(abs(x-y))*log(rho)*rho^((abs(x-y))^gamma)} H2 <- (outer(tt, tt, func2)) diag(H2) <- 0 devR_r <- H1 devR_g <- H2 } obj.out <- list(devR_r = devR_r, devR_g = devR_g) return(obj.out) } Derivadas<-function(M){ m1<-dim(M)[1] m2<-dim(M)[2] d<-list() for(i in 1:m1){ d[[i]]<-list() for(j in 1:(m2+1-i)){ d[[i]][[j]]<-matrix(0,m1,m2) if(j==1){d[[i]][[j]][i,i]<-1} else{ d[[i]][[j]][i,i+(j-1)]<-d[[i]][[j]][i+(j-1),i]<-1} } } return(d=d) } FCi<-function(rhoG,beta1,sigmae,tt,ubi,ubbi,uybi,uyyi,uyi,xi,zi,nj){ rho<-rhoG[1] gamma<-rhoG[2] m<-length(nj) N<-sum(nj) p<-length(beta1) q1<-dim(ubi)[1]/dim(ubi)[2] m1<-m*p m2<-m*q1 soma=0 for (j in 1:m ){ ub<-ubi[(((j-1)*q1)+1) : (j*q1), j] ubb<-ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)] uyb<-uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)] uyy<-uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] uy<-uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] z1=zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)] x1=xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Cii<-MatAr1(tt1,rho,gamma,sigmae) if(nj[j]>1){ soma<- soma - 0.5*log(det(Cii))-0.5*(sum(diag(uyy%*%solve(Cii)))-t(uy)%*%solve(Cii)%*%gammai-t(gammai)%*%solve(Cii)%*%uy-sum(diag(solve(Cii)%*%((uyb)%*%t(z1))))-sum(diag(solve(Cii)%*%(z1%*%t(uyb)))) +t(gammai)%*%solve(Cii)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(Cii)%*%gammai+t(gammai)%*%solve(Cii)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(Cii)%*%z1))) } if(nj[j]==1){ soma<- soma - 0.5*log(det(Cii))-0.5*(sum(diag(uyy%*%solve(Cii)))-t(uy)%*%solve(Cii)%*%gammai-t(gammai)%*%solve(Cii)%*%uy-sum(diag(solve(Cii)%*%((uyb)%*%z1)))-sum(diag(solve(Cii)%*%(t(z1)%*%(uyb)))) +t(gammai)%*%solve(Cii)%*%z1%*%ub+t(ub)%*%z1%*%solve(Cii)%*%gammai+t(gammai)%*%solve(Cii)%*%gammai+sum(diag(ubb%*%z1%*%solve(Cii)%*%z1))) } } return(-soma) } FCi_gamma<-function(rhoG,gamma,beta1,sigmae,tt,ubi,ubbi,uybi,uyyi,uyi,xi,zi,nj){ rho<-rhoG m<-length(nj) N<-sum(nj) p<-length(beta1) q1<-dim(ubi)[1]/dim(ubi)[2] m1<-m*p m2<-m*q1 soma=0 for (j in 1:m ){ ub<-ubi[(((j-1)*q1)+1) : (j*q1), j] ubb<-ubbi[(((j-1)*q1)+1) : (j*q1), (((j-1)*q1)+1) : (j*q1)] uyb<-uybi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(((j-1)*q1)+1) : (j*q1)] uyy<-uyyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] uy<-uyi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])),j] z1=zi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*q1)+1) : (j*q1)] x1=xi[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), (((j-1)*p)+1) : (j*p)] tt1=tt[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] gammai=x1%*%beta1 Cii<-MatAr1(tt1,rho,gamma,sigmae) soma<- soma - 0.5*log(det(Cii))-0.5*(sum(diag(uyy%*%solve(Cii)))-t(uy)%*%solve(Cii)%*%gammai-t(gammai)%*%solve(Cii)%*%uy-sum(diag(solve(Cii)%*%((uyb)%*%t(z1))))-sum(diag(solve(Cii)%*%(z1%*%t(uyb)))) +t(gammai)%*%solve(Cii)%*%z1%*%ub+t(ub)%*%t(z1)%*%solve(Cii)%*%gammai+t(gammai)%*%solve(Cii)%*%gammai+sum(diag(ubb%*%t(z1)%*%solve(Cii)%*%z1))) } return(-soma) } logliknArplmec <- function(y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,pii){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 if(length(pii)>1 ) { eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(length(pii)==1 ) { if(pii!=0 ){ eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(pii==0) { Gama=diag(1,nj[j]) eGamma=Gama*sigmae} } invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvn(x = as.vector(y1),mu = as.vector(muii),Sigma = as.matrix(SIGMA))) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvnorm(lb = as.vector(LL1),ub=as.vector(LU1), mu = as.vector(muii),sigma = as.matrix(SIGMA))) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Sc0 <- Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvn(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]), Sigma =as.matrix(SIGMA[cc1==0,cc1==0]))* as.numeric(TruncatedNormal::pmvnorm(lb= as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic), sigma = as.matrix(Sc0)))) } } } logvero <- sum(log(ver)) return(logvero) } logliknslmec <- function(y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,phi1,phi2,struc){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 Gama <- MatDec(tt1,phi1,phi2,struc) invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvn(x = as.vector(y1),mu = as.vector(muii),Sigma = as.matrix(SIGMA))) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvnorm(lb = as.vector(LL1),ub=as.vector(LU1), mu = as.vector(muii),sigma = as.matrix(SIGMA))) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Sc0 <-Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvn(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]),Sigma =as.matrix(SIGMA[cc1==0,cc1==0]))* as.numeric(TruncatedNormal::pmvnorm(lb = as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic),sigma = as.matrix(Sc0)))) } } } logvero <- sum(log(ver)) return(logvero) } logliktArplmec_o <- function(nu,y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,pii){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 if(length(pii)>1 ) { eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(length(pii)==1 ) { if(pii!=0 ){ eGamma<-MatArp(pii,tt1,sigmae) Gama <- eGamma/sigmae} if(pii==0) { Gama=diag(1,nj[j]) eGamma=Gama*sigmae} } invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1), mu = as.vector(muii), S = as.matrix(SIGMA), df = nu )) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvt(lb = as.vector(LL1),ub=as.vector(LU1), mu = as.vector(muii),df= nu, sigma = as.matrix(SIGMA))) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]),S =as.matrix(SIGMA[cc1==0,cc1==0]),df= nu)*as.numeric(TruncatedNormal::pmvt(lb = as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic),df=nu, sigma = as.matrix(Sc0)))) } } } logvero <- sum(-log(ver)) return(logvero) } logliktslmec_o <- function(nu,y,x,z,cc,ttc,nj,LL,LU,betas,sigmae,D1,phi1,phi2,struc){ p <- dim(x)[2] m <- length(nj)[1] q1 <- dim(z)[2] gamma1 <- as.vector(c(betas)) iD1 <- solve(D1) iD1 <- (iD1 + t(iD1))/2 ver <- matrix(0,m,1) for(j in 1:m) { cc1 <- cc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] y1 <- y[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] x1 <- matrix(x[(sum(nj[1:j-1])+1) : (sum(nj[1:j])), ],ncol=p) z1 <- matrix(z[(sum(nj[1:j-1])+1) : (sum(nj[1:j])) , ],ncol=q1) tt1 <- ttc[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] W1 <- x1 LL1 <- LL[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] LU1 <- LU[(sum(nj[1:j-1])+1) : (sum(nj[1:j]))] muii <- W1%*%gamma1 Gama <- MatDec(tt1,phi1,phi2,struc) invGama <- solve(Gama) SIGMA <- (sigmae*Gama + (z1)%*%D1%*%t(z1)) SIGMA <-(SIGMA+t(SIGMA))/2 SIGMAinv <- solve(SIGMA) Lambda1 <- solve(iD1 + (t(z1)%*%invGama%*%z1)*(1/sigmae)) Lambda1 <- (Lambda1 + t(Lambda1))/2 if(sum(cc1)==0) { ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1),mu = as.vector(muii), S = as.matrix(SIGMA), df = nu )) } if(sum(cc1)>=1) { if(sum(cc1)==nj[j]) { ver[j,] <- suppressWarnings(TruncatedNormal::pmvt(lb = as.vector(LL1),ub=as.vector(LU1), mu= as.vector(muii),df=nu,sigma = as.matrix(SIGMA) )) } else{ muiic <- W1[cc1==1,]%*%gamma1 + SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1) Si <- SIGMA[cc1==1,cc1==1]-SIGMA[cc1==1,cc1==0]%*%solve(SIGMA[cc1==0,cc1==0])%*%SIGMA[cc1==0,cc1==1] Si <- (Si+t(Si))/2 Qy0 <- as.numeric(t(y1[cc1==0]-W1[cc1==0,]%*%gamma1)%*%solve(SIGMA[cc1==0,cc1==0])%*%(y1[cc1==0]-W1[cc1==0,]%*%gamma1)) auxQy0 <- as.numeric((nu + Qy0)/(nu + length(cc1[cc1==0]))) Sc0 <- auxQy0*Si LL1c <- LL1[cc1==1] LU1c <- LU1[cc1==1] ver[j,] <- suppressWarnings(LaplacesDemon::dmvt(x = as.vector(y1[cc1==0]),mu =as.vector(muii[cc1==0]),S =as.matrix(SIGMA[cc1==0,cc1==0]),df = nu)*as.numeric(TruncatedNormal::pmvt(lb = as.vector(LL1c),ub=as.vector(LU1c), mu = as.vector(muiic),df=nu,sigma = as.matrix(Sc0)))) } } } logvero <- sum(-log(ver)) return(logvero) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/FunGeral.R
MMsimu=function(m,x,z,tt,nj,beta,sigmae,D,phi,struc,typeModel,percCensu,nivel.Censu,cens.type,nu){ Arp=length(phi) if(is.null(x)){x<-matrix(runif(sum(nj)*length(beta),-1,1),sum(nj),length(beta));rx="yes"} if(is.null(z)){z<-matrix(runif(sum(nj)*dim(D)[1],-1,1),sum(nj),dim(D)[1]);rz="yes"} y<-matrix(0,sum(nj),1) if(typeModel=="Normal"){ if(struc=="ARp"){ for(i in 1:m){ tt1=tt[(sum(nj[1:i-1])+1) : (sum(nj[1:i]))] n=length(tt1) ome2=MatArpJ(phi,tt1,sigmae) errorp=as.vector(LaplacesDemon::rmvn(n=1,mu=rep(0,n), Sigma=ome2)) b<-LaplacesDemon::rmvn(n=1,mu=rep(0,dim(D)[1]), Sigma=D) y[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]=x[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%beta+z[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%t(b)+errorp } } if(struc!="ARp"){ for(i in 1:m){ tt1=tt[(sum(nj[1:i-1])+1) : (sum(nj[1:i]))] n=length(tt1) if(struc!="UNC")ome2=MatDec(tt1,phi[1],phi[2],struc) if(struc=="UNC")ome2=MatDec(tt1,phi[1],phi[2],struc) Omegai <- sigmae*ome2 errorp=as.vector(LaplacesDemon::rmvn(n=1,mu=rep(0,n), Sigma=Omegai )) b<-LaplacesDemon::rmvn(n=1,mu=rep(0,dim(D)[1]), Sigma=D) y[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]=x[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%beta+z[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%t(b)+errorp } } } if(typeModel=="Student"){ if(struc=="ARp"){ for(i in 1:m) { tt1 <- tt[(sum(nj[1:i-1])+1) : (sum(nj[1:i]))] n=length(tt1) ome2=MatArpJ(phi,tt1,sigmae) errorp=as.vector(LaplacesDemon::rmvt(n=1, mu=rep(0,n), S=ome2, df=nu)) b <-LaplacesDemon::rmvt(n=1, mu=rep(0,dim(D)[1]), S=D, df= nu) y[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]=x[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%beta+z[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%t(b)+errorp }} if(struc!="ARp"){ for(i in 1:m) { tt1 <- tt[(sum(nj[1:i-1])+1) : (sum(nj[1:i]))] n=length(tt1) ome2 <- MatDec(tt1,phi[1],phi[2],struc) Omegai <- sigmae*ome2 b <-LaplacesDemon::rmvt(n = 1,mu=rep(0,dim(D)[1]),S =D,df = nu) errorp <- as.vector(LaplacesDemon::rmvt(n = 1,mu = rep(0,nj[i]),S=Omegai,df = nu)) y[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]=x[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%beta+z[(sum(nj[1:i-1])+1) : (sum(nj[1:i])),]%*%t(b)+errorp } } } yy=y y_cc=y cc=rep(0,length(y_cc)) if(!is.null(percCensu)) { if(percCensu!=0) { if(cens.type=="left") { aa=sort(y, decreasing = FALSE) bb=aa[1:(percCensu*sum(nj))] cutof<-bb[percCensu*sum(nj)] cc=matrix(1,sum(nj),1)*(y< cutof) y[cc==1]=cutof y_cc=y } if(cens.type=="right") { aa=sort(y, decreasing = TRUE) bb=aa[1:(percCensu*sum(nj))] cutof<-bb[percCensu*sum(nj)] cc=matrix(1,sum(nj),1)*(y> cutof) y[cc==1]=cutof y_cc=y } if(cens.type=="interval") { aa=sort(y, decreasing = FALSE) bbi=aa[1:(percCensu*sum(nj)*0.5)] aa=sort(y, decreasing = TRUE) bbs=aa[1:(percCensu*sum(nj)*0.5)] cutofi<-bbi[percCensu*sum(nj)*0.5] cutofs<-bbs[percCensu*sum(nj)*0.5] cci=matrix(1,sum(nj),1)*(y< cutofi) y[cci==1]=cutofi ccs=matrix(1,sum(nj),1)*(y>cutofs) y[ccs==1]=cutofs y_cc=y cc=cci+ccs } } } if(!is.null(nivel.Censu)) { if(length(nivel.Censu)==1) { if(cens.type=="left") { cutof<-nivel.Censu cc=matrix(1,sum(nj),1)*(y< cutof) y[cc==1]=cutof y_cc=y } if(cens.type=="right") { cutof<-nivel.Censu cc=matrix(1,sum(nj),1)*(y> cutof) y[cc==1]=cutof y_cc=y } } if(length(nivel.Censu)>1) { if(cens.type=="interval") { cutofi<-nivel.Censu[1] cutofs<-nivel.Censu[2] cci=matrix(1,sum(nj),1)*(y< cutofi) y[cci==1]=cutofi ccs=matrix(1,sum(nj),1)*(y>cutofs) y[ccs==1]=cutofs y_cc=y cc=cci+ccs } } } return(list(cc=cc, y_cc=y_cc)) }
/scratch/gouwar.j/cran-all/cranData/ARpLMEC/R/MMsimu.R
#' State shapefile "STE_2016_simple"
/scratch/gouwar.j/cran-all/cranData/ASGS.foyer/R/data-STE_2016.R
#' Install a (nearly) complete package of the Australian Statistical Geography Standard #' @description The ASGS package provides a nearly comprehensive set of shapefiles, both unmodified and simplified from the Australian Bureau of Statistics. The ASGS package is over 700 MB, so cannot be hosted on CRAN. This function allows the package to be distributed almost as conveniently as through CRAN. #' #' Should you find ASGS lacks some shapefile that you require, please file an issue requesting it be added. #' #' Note that the package is quite large and provides no limits on access, so it is preferred that distribution occur as far as possible via other channels to ensure the method of access provided here is sustainable. #' @param temp.tar.gz A file to save the ASGS tarball after download. Since the package is quite large, #' it may be prudent to set this to a non-temporary file so that subsequent attempts to reinstall do not require additional downloads. #' @param overwrite (logical, default: \code{FALSE}). If \code{temp.tar.gz} already exists, should it be overwritten or should there be an error? #' @param lib,repos,type Passed to \code{\link[utils]{install.packages}} when installing ASGS's dependencies (if not already installed). #' @param ... Other arguments passed to \code{\link[utils]{install.packages}}. #' @param .reinstalls Number of times to attempt to install any (absent) dependencies of \code{ASGS} #' before aborting. Try restarting R rather than setting this number too high. #' @param url.tar.gz The URL of the tarball to be downloaded. Not normally #' needed by users, but may be in case the link becomes fallow, and #' a new one becomes available before the release of a new package entirely. #' #' If set to special value \code{"latest"}, an online file is consulted and #' set to the remote file there. #' #' @param verbose (logical, default: \code{FALSE}) Report logic paths? #' @return \code{temp.tar.gz}, invisibly. #' @export install_ASGS <- function(temp.tar.gz = tempfile(fileext = ".tar.gz"), overwrite = FALSE, lib = .libPaths()[1], repos = getOption("repos"), type = getOption("pkgType", "source"), ..., .reinstalls = 4L, url.tar.gz = NULL, verbose = FALSE) { if (is.null(url.tar.gz)) { url.tar.gz <- "https://github.com/HughParsonage/ASGS/releases/download/v2021.1/ASGS_2021.1.tar.gz" } if (identical(url.tar.gz, "latest")) { url.tar.gzs <- readLines("https://raw.githubusercontent.com/HughParsonage/ASGS.foyer/master/data-raw/ASGS-release-tarballs")[1] } tempf <- temp.tar.gz if (file.exists(tempf)) { if (!identical(overwrite, FALSE) && !isTRUE(overwrite)) { stop("`overwrite = ", deparse(substitute(overwrite)), "` but must be TRUE or FALSE.") } if (!overwrite) { stop(temp.tar.gz, " exists, yet `overwrite = FALSE`.") } if (overwrite && !file.remove(tempf)) { stop("Unable to overwrite ", tempf) } } asgs_deps <- c("dplyr", "leaflet", "sp", "htmltools", "magrittr", "data.table", "hutils", "spdep") absent_deps <- function(deps = asgs_deps) { deps[!vapply(deps, requireNamespace, lib.loc = lib, quietly = TRUE, FUN.VALUE = logical(1L))] } reinstalls <- .reinstalls backoff <- 2 while (reinstalls > 0L && length(absent_deps())) { reinstalls <- reinstalls - 1L backoff <- 2 * backoff message("Attempting to install the following uninstalled dependencies of ASGS:", paste0(absent_deps(), collapse = " "), ".\n", reinstalls, " reinstalls remaining.") Sys.sleep(backoff) if (backoff > 10) { message("Waiting ", backoff, " seconds before attempting reinstallation.", "Wait times double on each reattempt as a courtesy to repository maintainers.") } r <- repos if (identical(r["CRAN"], "@CRAN@")) { if (verbose) cat("AAAA\n") message("Setting CRAN repository to https://rstudio.cran.com") utils::install.packages(absent_deps(), lib = lib, repos = "https://rstudio.cran.com", type = "source", contrib.url = "https://rstudio.cran.com/src/contrib", ...) } else if ("@CRAN@" %in% repos) { if (verbose) cat("BBBB\n") options(repos = c(CRAN = "https://cran.ms.unimelb.edu.au/")) utils::install.packages(absent_deps(), repos = c(CRAN = "https://cran.ms.unimelb.edu.au/"), type = type, lib = lib, ...) } else { if (verbose) cat("CCCC\n") utils::install.packages(absent_deps(), repos = repos, lib = lib, type = type, ...) } } if (length(absent_deps())) { stop("ASGS requires the following packages: ", paste0(absent_deps(), collapse = " "), ". ", "Attempts to install did not succeed. Aborting before (lengthy) download.") } message("Attempting install of ASGS (700 MB) from GitHub. ", "This should take some minutes to download.") options(timeout = 3600) utils::download.file(url = url.tar.gz, mode = "wb", destfile = tempf) utils::install.packages(tempf, lib = lib, type = "source", repos = NULL, ...) invisible(tempf) }
/scratch/gouwar.j/cran-all/cranData/ASGS.foyer/R/install_ASGS.R
#' Determine whether coordinates lie in a given statistical area. #' @param lat,lon Numeric vector representing coordinates in decimal degrees. Coordinates south of the equator have \code{lat < 0}. #' @param to The statistical area to convert to. #' @param yr The year of the statistical area. #' @param return Whether to return an atomic vector \code{(v)} representing the shapefile for each point \code{lat, lon} or a spatial points object from package \code{sp}. #' @param NAME (logical, default: \code{TRUE}) whether to use the name or number of the statistical area #' @param .shapefile If specified, an arbitrary shapefile containing the statistical areas to locate. #' @return The statistical area that contains each point. #' @examples #' latlon2SA(-35.3, 149.2, to = "STE", yr = "2016") #' @importFrom methods is #' @export latlon2SA <- function(lat, lon, to = c("STE", "SA2", "SA1", "SA3", "SA4"), yr = c("2016", "2011"), return = c("v", "sp"), NAME = TRUE, .shapefile = NULL) { if (is.null(.shapefile)) { # Could use NSE but can't be arsed: to <- match.arg(to) if (to != "STE" && !isNamespaceLoaded("ASGS")) { stop("The ASGS package is required for any statistical area besides 2016") } yr <- match.arg(yr) if (yr != "2016" && !isNamespaceLoaded("ASGS")) { stop("The ASGS package is required for `year = 2011`.") } return <- match.arg(return) stopifnot(length(to) == 1, length(yr) == 1, length(lat) == length(lon)) if (to != "STE" && !isNamespaceLoaded("ASGS")) { shapefile <- get(paste0(to, "_", yr)) } else { shapefile <- ASGS.foyer::STE_2016_simple } } else { shapefile <- .shapefile stopifnot("proj4string" %in% methods::slotNames(shapefile), "data" %in% methods::slotNames(shapefile)) } if (!is(shapefile, "SpatialPolygonsDataFrame")) { stop("Attempted to retrieve `", paste0(to, "_", yr), "` internally ", "but that object is not a SpatialPolygonsDataFrame. Due to ", "limitations of this function, ensure that this object ", "does not exist except as the shapefile from the ASGS package.") } points <- sp::SpatialPoints(coords = sp::coordinates(data.frame(x = lon, y = lat)), proj4string = shapefile@proj4string) out <- sp::over(points, shapefile) if (return == "v") { if (NAME && to != "SA1") { suffix <- paste0("NAME", substr(yr, 3, 4)) v_name <- paste0(to, "_", suffix) } else { suffix <- names(out)[grepl(to, names(out)) & !grepl("NAME", names(out))] v_name <- suffix[1] } out <- out[[v_name]] } out }
/scratch/gouwar.j/cran-all/cranData/ASGS.foyer/R/latlon2SA.R
mstmap <- function(object, ...) UseMethod("mstmap") mstmap.default <- function(object, ...) stop("Only \"data.frame\" and \"cross\" methods are available. See specific help files for more details.") mstmap.cross <- function(object, chr, id = "Genotype", bychr = TRUE, suffix = "numeric", anchor = FALSE, dist.fun = "kosambi", objective.fun = "COUNT", p.value=1e-6, noMap.dist = 15.0, noMap.size = 0, miss.thresh = 1.0, mvest.bc = FALSE, detectBadData = FALSE, return.imputed = FALSE, trace = FALSE, ...) { if(!(id %in% names(object$pheno))) stop("The unique identifier for the genotypes, ", deparse(substitute(id)), ", cannot be found in the object") if(!inherits(object, c("bc","dh","riself","bcsft"))) stop("Cross object must inherit from one of the classes \"bc\", \"dh\",\"riself\",\"bcsft\". See ?mstmap.cross for more details.") if(!(dist.fun %in% c("haldane","kosambi"))) stop("Distance function needs to be \"haldane\" or \"kosambi\" (see ?mstmap.cross).") if(!(objective.fun %in% c("COUNT","ML"))) stop("Objective function needs to be \"COUNT\" or \"ML\" (see ?mstmap.cross).") if((miss.thresh > 1) | miss.thresh < 0) stop("Missing value threshold is required to be between 0 and 1.") if(class(object)[1] %in% c("bc","dh","riself")){ pop.type <- "DH" allele <- c("A","B","") } if(class(object)[1] %in% "bcsft"){ scheme <- attr(object, "scheme") if(scheme[1] > 0) stop("\"bcsft\" type is restricted to selfed populations only.") err <- lapply(object$geno, function(el){ if(any(c(4,5) %in% el$data)) stop("Only selfed populations with fully informative markers allowed. See ?mstmap.cross for more details.")}) pop.type <- paste("RIL", scheme[2], sep = "") allele <- c("A","X","B") } if(trace) trace <- "MSToutput.txt" if (is.character(trace)) { ftrace <- file(trace, "w") sink(trace, type = "output", append = FALSE) on.exit(sink(type = "output")) on.exit(close(ftrace), add = TRUE) trace <- TRUE } oldObject <- NULL if(!missing(chr)) { oldObject <- subset(object, paste("-", chr, sep ="")) object <- subset(object, chr) } geno <- as.character(object$pheno[[id]]) lmat <- lapply(object$geno, function(el, geno, allele) { rownames(el$data) <- geno el$data[el$data == 1] <- allele[1] el$data[el$data == 2] <- allele[2] el$data[el$data == 3] <- allele[3] el$data[is.na(el$data)] <- "U" t(el$data) }, geno, allele) mn <- markernames(object) if(!bychr) { names(object$geno) <- ochr <- paste("ALL", 1:length(object$geno), sep = "") object$geno$"L"$data <- do.call("cbind", lapply(object$geno, function(el) el$data)) object$geno$"L"$map <- unlist(lapply(object$geno, function(el) el$map)) class(object$geno$"L") <- class(object$geno[[1]]) lmat <- list(do.call("rbind", lmat)) names(object$geno$"L"$map) <- rownames(lmat[[1]]) <- mn object <- subset(object, paste("-", ochr, sep = "")) } nm <- names(object$geno) param <- list("population_type"=pop.type, "distance_function"=dist.fun, "cut_off_p_value"=as.double(p.value), "no_map_dist"=as.double(noMap.dist), "no_map_size"=as.integer(noMap.size), "missing_threshold"=as.double(miss.thresh), "estimation_before_clustering"=as.integer(mvest.bc), "detect_bad_data"=as.integer(detectBadData), "objective_function"=objective.fun, trace=as.integer(trace)) omit.list <- list() for(i in 1:length(lmat)){ mst <- .Call("mst", param, as.data.frame(lmat[[i]], stringsAsFactors = FALSE)) if (class(object)[1] == "riself") { imf <- switch(dist.fun, haldane = imf.h, kosambi = imf.k) mf <- switch(dist.fun, haldane = mf.h, kosambi = mf.k) mst <- lapply(mst, function(el, imf, mf){ nm <- names(el$map) rf <- mf(diff(el$map)) rf <- (rf/2)/(1 - rf) el$map <- cumsum(c(0, imf(rf))) names(el$map) <- nm el }, imf, mf) } chrw <- nm[i] nmm <- names(object$geno[[chrw]]$map) if(anchor) mst <- lapply(mst, function(el, nmm){ nmms <- nmm[nmm %in% names(el$map)] lens <- 1:length(nmms) apos <- pmatch(nmms, names(el$map)) rpos <- pmatch(nmms, names(rev(el$map))) if(sum(abs(rpos - lens)) < sum(abs(apos - lens))) { el$map <- rev(el$map) el$map <- abs(el$map - el$map[1]) } el }, nmm) map <- lapply(mst, function(el) el$map) ordm <- pmatch(names(unlist(map)), nmm) if(any(is.na(ordm))) omit.list[[i]] <- object$geno[[chrw]]$data[,is.na(ordm), drop = FALSE] ordm <- ordm[!is.na(ordm)] object$geno[[chrw]]$data <- object$geno[[chrw]]$data[,ordm, drop = FALSE] object$geno[[chrw]]$map <- unlist(map) if(length(map) > 1){ spl <- sapply(map, function(el) names(el)[length(el)]) spl <- list(spl[1:(length(spl) - 1)]) names(spl) <- chrw cobject <- subset(object, chr = chrw) cobject <- breakCross(cobject, split = spl, suffix = suffix) object$geno <- object$geno[!(names(object$geno) %in% chrw)] chrw <- names(cobject$geno) object$geno <- c(object$geno, cobject$geno) } if(return.imputed){ imp <- lapply(mst, function(el){ names(el)[2] <- "data" el$data <- t(el$data) el$map <- el$map[names(el$map) %in% dimnames(el$data)[[2]]] el$data <- el$data[,pmatch(names(el$map), dimnames(el$data)[[2]]),drop = FALSE] el}) names(imp) <- chrw if(!is.null(object$imputed.geno)) object$imputed.geno <- object$imputed.geno[!(names(object$imputed.geno) %in% chrw)] object$imputed.geno <- c(object$imputed.geno, imp) object$imputed.geno <- object$imputed.geno[mixedorder(names(object$imputed.geno))] } } if(exists("omit.list")) object$omit <- do.call("cbind", omit.list[!sapply(omit.list, is.null)]) object$geno <- c(object$geno, oldObject$geno) object$geno <- object$geno[mixedorder(names(object$geno))] object } breakCross <- function(cross, split = NULL, suffix = "numeric", sep = "."){ if(is.null(split)) stop("Split cannot be null.") chr <- names(split) if(!all(chr %in% names(nmar(cross)))) stop("Some linkage group names do not exist in linkage map.") if(is.character(suffix)){ if(!all(suffix %in% c("numeric","alpha"))) stop("Character values for post names must be a vector of either \"numeric\" or \"alpha\".") if((length(suffix) == 1) & length(chr) > 1) suffix <- rep(suffix, length(chr)) suffix <- as.list(suffix) names(suffix) <- chr } else if(is.list(suffix)){ if(!all(names(suffix) %in% names(split))) stop("Some linkage group names in argument suffix do not match names in argument split.") if(any(is.na(pm <- pmatch(chr, names(suffix))))) warning("Some linkage group names missing from suffix argument .. using \"numeric\".") pnam <- split pnam[is.na(pm)] <- "numeric" pe <- pm[!is.na(pm)] pnam[!is.na(pm)] <- suffix[pe] suffix <- pnam } else stop("Post names are of the wrong type, see ?breakCross.") for(i in chr){ cmap <- names(cross$geno[[i]]$map) if(any(is.na(mmatch <- pmatch(split[[i]], cmap)))) stop("Some marker names do not exist in specified linkage groups.") pm <- c(mmatch, length(cmap)) if(any(c("numeric","alpha") %in% suffix[[i]])) p.nam <- paste(i, switch(suffix[[i]], numeric = 1:length(pm), alpha = LETTERS[1:length(pm)]), sep = sep) else p.nam <- suffix[[i]] if(length(p.nam) != length(pm)) stop("Length of linkage group post names does match number of splits.") slg <- rep(p.nam, times = c(pm[1], diff(pm))) lg <- lapply(p.nam, function(el, gen, slg) { res <- list() res$data <- gen$data[,slg %in% el, drop = FALSE] res$map <- gen$map[slg %in% el] res$map <- res$map - res$map[1] class(res) <- class(gen) res }, gen = cross$geno[[i]], slg) names(lg) <- p.nam cross$geno <- cross$geno[!(names(cross$geno) %in% i)] cross$geno <- c(cross$geno, lg) } cross$geno <- cross$geno[mixedorder(names(cross$geno))] cross } mergeCross <- function(cross, merge = NULL, gap = 5){ if(is.null(merge)) stop("Need list of linkage groups to merge.") if(any(sapply(merge, length) < 2)) stop("Number of linkages groups to merge must be 2 or greater") if(any(!(unlist(merge) %in% names(nmar(cross))))) stop("Some listed linkage groups do not appear in cross object") for(i in 1:length(merge)){ nm <- nmar(cross) if(is.null(lnam <- names(merge[i]))) lnam <- paste(merge[[i]], sep = "") whl <- pmatch(merge[[i]], names(nm)) sc <- cross$geno[whl] sc[[1]]$data <- do.call("cbind", lapply(sc, function(el) el$data)) mapd <- sc[[1]]$map for(j in 2:length(sc)) mapd <- c(mapd, sc[[j]]$map + max(mapd)[1] + gap) sc[[1]]$map <- mapd names(sc[[1]]$map) <- dimnames(sc[[1]]$data)[[2]] sc <- sc[1] names(sc)[1] <- lnam cross$geno <- cross$geno[-whl] cross$geno <- c(cross$geno, sc) } cross$geno <- cross$geno[mixedorder(names(cross$geno))] cross } subsetCross <- function(cross, chr, ind, ...){ if(!inherits(cross, c("bc","dh","riself","bcsft"))) stop("Cross object must inherit from one of the classes \"bc\", \"dh\",\"riself\",\"bcsft\". See ?subsetCross for more details.") if(!missing(chr)) cross <- subset(cross, chr = chr, ...) if(!missing(ind)){ n.ind <- nind(cross) if(!(is.numeric(ind) | is.logical(ind))) stop("Argument ind can only be logical or numeric.") cross <- subset(cross, ind = ind, ...) if(is.numeric(ind) & all(ind < 0)) ind <- (1:n.ind)[ind] type <- c("co.located","seg.distortion","missing") if(any(wh <- type %in% names(cross))){ type <- type[wh] for(i in type){ cross[[i]]$data <- cross[[i]]$data[ind,,drop = FALSE] tcross <- cross if(i %in% c("seg.distortion","missing")){ tcross$geno[[i]]$data <- cross[[i]]$data tcross$geno[[i]]$map <- 1:ncol(cross[[i]]$data) class(tcross$geno[[i]]) <- "A" tab <- geno.table(tcross, chr = i, scanone.output = TRUE) if(class(cross)[1] == "bcsft") tab <- tab[,1:(ncol(tab) - 2)] cross[[i]]$table[,4:ncol(cross[[i]]$table)] <- tab[,3:ncol(tab)] } } } } cross } mstmap.data.frame <- function(object, pop.type= "DH", dist.fun = "kosambi", objective.fun= "COUNT", p.value=1e-6, noMap.dist=15.0, noMap.size=0, miss.thresh=1.0, mvest.bc=FALSE, detectBadData=FALSE, as.cross = TRUE, return.imputed = FALSE, trace=FALSE, ...) { if(trace) trace <- "MSToutput.txt" if (is.character(trace)) { ftrace <- file(trace, "w") sink(trace, type = "output", append = FALSE) on.exit(sink(type = "output")) on.exit(close(ftrace), add = TRUE) trace <- TRUE } if(!all(sapply(object, is.character)) & !all(sapply(object, is.numeric))) stop("Columns of the marker data.frame must be all of type \"character\" or all of \"numeric\".") RILN <- paste("RIL",1:20, sep = "") allow.pop <- c("BC","DH","ARIL",RILN) if(!(pop.type %in% allow.pop)) stop("Population type needs to be \"BC\",\"DH\",\"ARIL\" or \"RILn\" (see ?mstmap.data.frame).") if(pop.type %in% RILN) rnum <- substring(pop.type, 4, nchar(pop.type)) if(!(dist.fun %in% c("haldane","kosambi"))) stop("Distance function needs to be \"haldane\" or \"kosambi\" (see ?mstmap.data.frame).") if(!(objective.fun %in% c("COUNT","ML"))) stop("Objective function needs to be \"COUNT\" or \"ML\" (see ?mstmap.data.frame).") alleles <- unique(unlist(lapply(object, unique))) allow.list <- c("A","a","B","b","-","U") ptype <- pop.type if(pop.type %in% c("BC","DH","ARIL")){ if(all(sapply(object, is.numeric))){ if(any(apply(object, 2, function(el) length(el[is.na(el)])))) stop("Numeric input cannot contain missing values") if(any(apply(object, 2, function(el) el < 0 | el > 1))) stop("Ony values between 0 and 1 are allowed if input is numeric.") if(as.cross) stop("Cross object cannot be returned for numeric input.") } else { if(!all(alleles %in% allow.list)) stop("Non-allowable allele encodings in DH marker set") cons <- c("1", "1", "2", "2", NA, NA, NA) } pop.type <- "DH" } else { if(is.numeric(object)) stop("Numeric input is not available for RILn populations.") allow.list <- c(allow.list, "X") if(!all(alleles %in% allow.list)) stop("Non-allowable allele encodings in RIL marker set") cons <- c("1", "1", "3", "3", NA, NA, "2") } if(length(grep(" ", rownames(object)))){ rownames(object) <- gsub(" ", "-", rownames(object)) warning("Replacing spaces in genotype names with a "-" separator\n") } if(length(grep(" ", names(object)))){ rownames(object) <- gsub(" ", "-", names(object)) warning("Replacing spaces in marker nxames with a "-" separator\n") } dist.fun <- match.arg(dist.fun) objective.fun <- match.arg(objective.fun) param <- list("population_type"=pop.type, "distance_function"=dist.fun, "cut_off_p_value"=as.double(p.value), "no_map_dist"=as.double(noMap.dist), "no_map_size"=as.integer(noMap.size), "missing_threshold"=as.double(miss.thresh), "estimation_before_clustering"=as.integer(mvest.bc), "detect_bad_data"=as.integer(detectBadData), "objective_function"=objective.fun, trace=as.integer(trace)) mst <- .Call("mst", param, object) map <- lapply(mst, function(el) el$map) marks <- unlist(lapply(map, names)) ordm <- pmatch(marks, rownames(object)) if(any(is.na(ordm))) omit.list <- t(object[is.na(ordm), drop = FALSE]) ordm <- ordm[!is.na(ordm)] object <- object[ordm,] chn <- paste("L", 1:length(map), sep = "") if (ptype == "ARIL") { imf <- switch(dist.fun, haldane = imf.h, kosambi = imf.k) mf <- switch(dist.fun, haldane = mf.h, kosambi = mf.k) map <- lapply(map, function(el, imf, mf){ nams <- names(el) rf <- mf(diff(el)) rf <- (rf/2)/(1 - rf) el <- cumsum(c(0, imf(rf))) names(el) <- nams el }, imf, mf) } if(as.cross){ co <- list() spl <- rep(1:length(map), times = sapply(map, length)) object <- as.matrix(object) al <- allow.list %in% alleles cons <- cons[al] al <- allow.list[al] for(i in 1:length(al)) object[object == al[i]] <- cons[i] dn <- dimnames(object)[[1]] object <- apply(object, 2, function(el) as.numeric(el)) dimnames(object)[[1]] <- dn datm <- lapply(split.data.frame(object, spl), t) mo <- lapply(1:length(map), function(el, datm, map){ temp <- list() temp$data <- datm[[el]] temp$map <- map[[el]] class(temp) <- "A" temp }, datm, map) co$geno <- mo names(co$geno) <- chn if(return.imputed){ co$imputed.geno <- lapply(mst, function(el){ names(el)[2] <- "data" el$data <- t(el$data) el$map <- el$map[names(el$map) %in% dimnames(el$data)[[2]]] el$data <- el$data[,pmatch(names(el$map), dimnames(el$data)[[2]]),drop = FALSE] el}) names(co$imputed.geno) <- chn } co$pheno <- data.frame(Genotype = factor(dimnames(object)[[2]])) wp <- (1:23)[allow.pop %in% ptype] class(co) <- c(c("bc","dh","riself",rep("f2",20))[wp],"cross") if(wp %in% 4:23) co <- convert2bcsft(co, F.gen = wp - 3, estimate.map = FALSE) object <- co } else { do <- list() chrv <- rep(chn, times = sapply(map, length)) dist <- unlist(map) do$geno <- cbind.data.frame(markers = marks, chr = chrv, dist = dist, object) rownames(do$geno) <- NULL if(return.imputed){ imp <- lapply(mst, function(el){ nm <- names(el$map)[names(el$map) %in% dimnames(el$imputed_values)[[1]]] pm <- pmatch(nm, dimnames(el$imputed_values)[[1]]) el$imputed_values <- el$imputed_values[pm, , drop = FALSE] el$imputed_values }) chri <- rep(chn, times = sapply(imp, function(el) dim(el)[1])) marki <- unlist(lapply(imp, rownames)) disti <- dist[pmatch(marki, names(dist))] do$imputed.geno <- cbind.data.frame(markers = marki, chr = chri, disti = disti, do.call("rbind", imp)) rownames(do$imputed.geno) <- NULL } object <- do } if(exists("omit.list")) object$omit <- omit.list object } heatMap <- function (x, chr, mark, what = c("both", "lod", "rf"), lmax = 12, rmin = 0, markDiagonal = FALSE, color = rev(colorRampPalette(brewer.pal(11,"Spectral"))(256)), ...) { opar <- par(no.readonly = TRUE) if (!inherits(x, "cross")) stop("Input should have class \"cross\".") what <- match.arg(what) if ("onlylod" %in% names(attributes(x$rf)) && attr(x$rf, "onlylod")) { onlylod <- TRUE what <- "lod" } else onlylod <- FALSE if (!missing(chr)) x <- subset(x, chr = chr) n.mar <- nmar(x) if(!missing(mark)){ if(!is.list(mark)) mark <- list(mark) if(length(mark) == 1){ if(length(n.mar) != 1) mark <- rep(mark, length(n.mar)) names(mark) <- names(n.mar) } if(!all(names(mark) %in% names(n.mar))) stop("Names of marker subsets do not match names of linkage groups.") for(i in names(mark)){ if(max(mark[[i]]) > length(x$geno[[i]]$map)) stop("Range of marker subset greater than the number of markers in linkage group") x$geno[[i]]$map <- x$geno[[i]]$map[mark[[i]]] x$geno[[i]]$data <- x$geno[[i]]$data[,mark[[i]]] } } if (!("rf" %in% names(x))) { warning("Running est.rf.") x <- est.rf(x) } g <- x$rf old.xpd <- par("xpd") old.las <- par("las") par(las = 1) dots <- list(...) cols <- color if(is.null(dots$bigplot)) dots$bigplot <- c(0.05, 0.85, 0.15, 0.9) if(is.null(dots$smallplot)) dots$smallplot <- c(0.92, 0.94, 0.15, 0.9) if (what == "lod" && !onlylod) { g[lower.tri(g)] <- t(g)[lower.tri(g)] diag(g) <- lmax g[!is.na(g) & g > lmax] <- lmax lseq <- seq(0, lmax, length.out = 256) lmin <- min(g, na.rm = TRUE) cols <- cols[lmin - lseq < 0] iargs <- list(x = 1:nrow(g), y = 1:ncol(g), z = t(g), ylab = "Markers", xlab = "Markers", col = cols, legend.args = list(side = 4, text = "LOD", las = 0, line = 2)) do.call("image.plot", c(iargs, dots)) } else if (what == "rf") { g[upper.tri(g)] <- t(g)[upper.tri(g)] diag(g) <- rmin g[!is.na(g) & g < rmin] <- rmin maxg <- max(g, na.rm = TRUE) rseq <- seq(rmin, 0.5, length.out = 256) cols <- rev(cols)[maxg - rseq > 0] clen <- 256*(maxg/0.5 - 1) clen <- ifelse(clen < 0, 0, clen) cols <- c(cols, colorRampPalette(c(cols[length(cols)], "white"))(clen)) iargs <- list(x = 1:nrow(g), y = 1:ncol(g), z = t(g), ylab = "Markers", xlab = "", col = cols, legend.args = list(side = 4, text = "Recombination Fractions", las = 0, line = 2, crt = 180)) do.call("image.plot", c(iargs, dots)) } else { dots$bigplot[1] <- 0.15 diag(g) <- lmax g[!is.na(g) & g > lmax] <- lmax glt <- g[lower.tri(g)] g[lower.tri(g)] <- NA lseq <- seq(0, lmax, length.out = 256) lmin <- min(g, na.rm = TRUE) coll <- cols[lmin - lseq < 0] iargs <- list(x = 1:nrow(g), y = 1:ncol(g), z = t(g), ylab = "", xlab = "", col = coll, legend.args = list(side = 4, text = "Linkage", las = 0, line = 2)) do.call("image.plot", c(iargs, dots)) g[lower.tri(g)] <- glt g[upper.tri(g)] <- NA diag(g) <- NA g[!is.na(g) & g < rmin] <- rmin dots$smallplot[1:2] <- c(0.05, 0.07) maxg <- max(g, na.rm = TRUE) rseq <- seq(rmin, 0.5, length.out = 256) colr <- rev(cols)[maxg - rseq > 0] clen <- 256*(maxg/0.5 - 1) clen <- ifelse(clen < 0, 0, clen) colr <- c(colr, colorRampPalette(c(colr[length(colr)], "white"))(clen)) iargs <- list(x = 1:nrow(g), y = 1:ncol(g), z = t(g), ylab = "", xlab = "Markers", col = colr, add = TRUE, legend.args = list(side = 2, text = "Recombination", las = 0, line = 1.5)) do.call("image.plot", c(iargs, dots)) } par(plt = dots$bigplot) if (markDiagonal) { for (i in 1:ncol(g)) segments(i + c(-0.5, -0.5, -0.5, +0.5), i + c(-0.5, +0.5, -0.5, -0.5), i + c(-0.5, +0.5, +0.5, +0.5), i + c(+0.5, +0.5, -0.5, +0.5)) } n.mar <- nmar(x) n.chr <- nchr(x) a <- c(0.5, cumsum(n.mar) + 0.5) abline(v = a, xpd = TRUE, col = "white", lwd = 0) abline(h = a, xpd = FALSE, col = "white", lwd = 0) a <- par("usr") wh <- cumsum(c(0.5, n.mar)) chrnam <- names(x$geno) chrpos <- (wh[-1] + wh[-length(wh)])/2 par(plt = dots$bigplot) c.args <- c(list(side = 3, at = chrpos, labels = chrnam, tick = FALSE, line = -0.6), dots$axis.args) do.call("axis", c.args) c.args$side <- 4 do.call("axis", c.args) par(opar) if ("main" %in% names(dots)) title(main = dots$main, line = 2.5) else { if (what == "lod") title(main = "Pairwise LOD scores", line = 2.5) else if (what == "rf") title(main = "Recombination fractions", line = 2.5) else title("Pairwise recombination fractions and LOD scores", line = 2.5) } invisible() } quickEst <- function(object, chr, map.function = "kosambi", ...){ if (!any(class(object) == "cross")) stop("Input should have class \"cross\".") if (missing(chr)) chr <- names(nmar(object)) imf <- switch(map.function, kosambi = imf.k, haldane = imf.h, morgan = imf.m, cf = imf.cf) nm <- nmar(object) for(i in chr){ temp <- subset(object, chr = i) if(nmar(temp) != 1){ est <- est.rf(temp)$rf nc <- dim(est)[1] er <- est[cbind(2:nc,1:(nc - 1))] temp$geno[[i]]$map <- c(0,cumsum(imf(er))) names(temp$geno[[i]]$map) <- dimnames(temp$geno[[i]]$data)[[2]] tempa <- argmax.geno(temp, step = 0, map.function = map.function, ...) tempa$geno[[i]]$data <- tempa$geno[[i]]$argmax tempa$geno[[i]] <- tempa$geno[[i]][-3] esta <- est.rf(tempa)$rf era <- esta[cbind(2:nc,1:(nc - 1))] if(class(object)[1] == "riself") era <- (era/2)/(1 - era) object$geno[[i]]$map <- c(0,cumsum(imf(era))) names(object$geno[[i]]$map) <- dimnames(object$geno[[i]]$data)[[2]] } } object } genClones <- function(object, chr, tol = 0.9, id = "Genotype"){ pairsfun <- function(mat1 = mat1, mat2 = mat2){ ng <- c(unique(mat1),unique(mat2)) ng2 <- ng[!is.na(ng)]^2 mat1[is.na(mat1)] <- mat2[is.na(mat2)] <- pi apply(mat1*mat2, 1, function(el, ng2) { is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol ma <- sum(rep(1, length(el))[el %in% ng2]) nm <- sum(rep(1, length(el))[!(el %in% ng2) & is.wholenumber(el)]) na <- sum(rep(1, length(el))[el == (pi^2)]) c(ma, nm, na) }, ng2 = ng2) } if(!inherits(object, "cross")) stop(deparse(substitute(object)), " must be of class \"cross\"") if(!missing(chr)) object <- subset(object, chr) if(!(id %in% names(object$pheno))) stop("The unique identifier for the genotypes, ", deparse(substitute(id)), ", cannot be found in the object") gmat <- pull.geno(object) gnam <- as.character(object$pheno[[id]]) nm <- nmar(object) cgm <- cg <- comparegeno(object) cg[upper.tri(cg, diag = TRUE)] <- NA wh <- which(cg > tol, arr.ind = TRUE) if(dim(wh)[1] == 0){ message("There are no genotype pairs with matching allele proportions greater than ", tol,".") return(list(cgm = cgm)) } ind1 <- wh[,1]; ind2 <- wh[,2] cgd <- cbind.data.frame(G1 = gnam[ind1], G2 = gnam[ind2]) cgd$coef <- round(cg[wh], 4) cgd <- cbind.data.frame(cgd, t(pairsfun(gmat[ind1,, drop = FALSE],gmat[ind2,, drop = FALSE]))) names(cgd)[4:6] <- c("match", "diff","na.both") cgd$na.one <- ncol(gmat) - apply(cgd[,4:6], 1, sum) group <- rep(1, nrow(cgd)) ## organize groups if(nrow(cgd) > 1){ for(el in 2:nrow(cgd)){ ind <- rep(1:(el - 1), 2) if(any(wg <- as.character(unlist(cgd[1:(el - 1),1:2])) %in% as.character(unlist(cgd[el,1:2])))){ wg <- ind[wg] if(length(ug <- unique(group[wg])) > 1){ mug <- min(ug) oth <- ug[ug != mug] group[group %in% oth] <- mug ug <- mug } group[el] <- ug } else group[el] <- max(group) + 1 } } cgd$group <- group cgd <- cgd[order(cgd$group),] rownames(cgd) <- NULL dimnames(cgm) <- list(gnam, gnam) list(cgm = cgm, cgd = cgd) } fixClones <- function(object, gc, id = "Genotype", consensus = TRUE){ if(!inherits(object, "cross")) stop(deparse(substitute(object)), " must be of class \"cross\"") if(missing(gc)) stop("Argument \"gc\" cannot be missing.") if(!is.data.frame(gc)) stop("Argument \"gc\" needs to be a data frame.") if(!all(c("G1","G2","group") %in% names(gc))) stop("Argument \"gc\" must have names \"G1\", \"G2\" and \"group\", see ?fixClones.") if(!(id %in% names(object$pheno))) stop("The unique identifier for the genotypes, ", deparse(substitute(id)), ", cannot be found in the object") ph <- as.character(object$pheno[[id]]) nm <- nmar(object) sl <- split(gc, gc$group) bm <- pull.geno(object) rownames(bm) <- ph nd <- unlist(pull.map(object)) nc <- rep(names(nm), times = nm) cl <- sapply(object$geno, function(el) class(el)) colnames(bm) <- paste(nc, markernames(object), nd, cl, sep = ";") gomit <- list() for(i in 1:length(sl)){ gn <- unique(as.character(unlist(sl[[i]][,c("G1","G2")]))) gn <- gn[mixedorder(gn)] if(!all(gn %in% ph)) stop("Some genotypes in clone set cannot be found in cross object.") wh <- (1:length(ph))[ph %in% gn] if(consensus){ td <- apply(bm[wh, ], 2, function(mm){ mm <- mm[!is.na(mm)] if(length(mmu <- unique(mm)) > 1 | !length(mmu)) NA else mmu }) kp <- rownames(bm) %in% gn[1] bm[kp,] <- td gomit[[i]] <- gn[2:length(gn)] } else { len <- apply(bm[wh,], 1, function(el) length(el[!is.na(el)])) kp <- gn[len == max(len)][1] gomit[[i]] <- gn[!(gn %in% kp)] kp <- rownames(bm) %in% kp } rownames(bm)[kp] <- paste(gn, collapse = "_") } gomit <- unlist(gomit) object$pheno[[id]] <- factor(rownames(bm)) object$geno <- lapply(split.data.frame(t(bm), nc), function(el){ temp <- list() spl.c <- strsplit(rownames(el), ";") temp$data <- as.matrix(t(el)) temp$map <- as.numeric(sapply(spl.c, "[", 3)) names(temp$map) <- dimnames(temp$data)[[2]] <- sapply(spl.c, "[", 2) class(temp) <- unique(sapply(spl.c, "[", 4)) temp }) object <- subset(object, ind = !(object$pheno[[id]] %in% gomit)) object$geno <- object$geno[mixedorder(names(object$geno))] object } pp.init <- function(seg.thresh = 0.05, seg.ratio = NULL, miss.thresh = 0.1, max.rf = 0.25, min.lod = 3){ if(!is.null(seg.thresh)){ if(!(is.numeric(seg.thresh) | is.character(seg.thresh))) stop("seg.thresh argument of wrong type, see ?pullCross pr ?pushCross.") if(is.character(seg.thresh)){ if(seg.thresh != "bonf") stop("seg.thresh argument can only be numeric or equal to \"bonf\", see ?pullCross or ?pushCross") } if(is.numeric(seg.thresh)){ if(seg.thresh >= 1 | seg.thresh < 0 ) stop("seg.thresh cannot exceed 1 and must be between 0 and 1") } } if(!is.null(seg.ratio)){ seg.thresh <- NULL if(!is.character(seg.ratio)) stop("seg.ratio argument of wrong type, see ?pullCross or ?pushCross.") if(!length(grep(":", seg.ratio))) stop("Alelle ratios must be split by \":\".") } if(!is.numeric(miss.thresh)) stop("miss.thresh argument of wrong type, see ?pullCross or ?pushCross.") if(miss.thresh > 1 | miss.thresh < 0) stop("seg.thresh cannot exceed 1 and must be between 0 and 1") if(max.rf > 0.5) stop("Maximum recombination fraction cannot exceeed 0.5.") if(min.lod <= 0) stop("Maximum LOD score cannot be less than or equal to zero.") list(seg.thresh = seg.thresh, seg.ratio = seg.ratio, miss.thresh = miss.thresh, max.rf = max.rf, min.lod = min.lod) } pullCross <- function(object, chr, type = c("co.located","seg.distortion","missing"), pars = NULL, replace = FALSE, ...){ if(!is.null(object[[type]])){ if(dim(object$pheno)[1] != dim(object[[type]]$data)[1]) stop("Number of genotypes in linkage map does not match external ", type, " data.") } if(is.null(pars)) pars <- pp.init() if(!is.list(pars)) stop("argument pars must be a list object one or more named elements matching the results of pp.init()") pars <- do.call("pp.init", pars) type <- match.arg(type) if(!(class(object)[1] %in% c("bc","dh","riself","bcsft"))) stop("Pulling of markers is not supported for this population type, see ?pullCross.") oldObject <- NULL if (!missing(chr)) { oldObject <- subset(object, paste("-", chr, sep ="")) object <- subset(object, chr = chr) } nm <- nmar(object) chr <- names(nm) chrn <- rep(chr, times = nm) geno <- genot <- do.call("cbind", lapply(object$geno, function(el) el$data)) rownames(genot) <- as.character(object$pheno[[1]]) markn <- colnames(geno) whm <- FALSE if(type == "co.located"){ dm <- findDupMarkers(object, chr, exact.only = FALSE) if(!is.null(dm)){ cm <- unlist(dm) whm <- markn %in% cm km <- names(dm) markl <- chrl <- list() for(i in 1:length(dm)){ markl[[i]] <- c(km[i], dm[[i]]) chrl[[i]] <- chrn[markn %in% markl[[i]]] } lens <- sapply(dm, function(x) length(x) + 1) bins <- rep(1:length(lens), times = lens) tab <- cbind.data.frame(bins = bins, chr = unlist(chrl), mark = unlist(markl)) } } if(type %in% c("seg.distortion","missing")){ tab <- geno.table(object, scanone.output = TRUE) nc <- ncol(tab) if(class(object)[1] == "bcsft") nc <- nc - 2 if(type == "seg.distortion"){ if(!is.null(seg.thresh <- pars$seg.thresh)){ pval <- 10^(-tab$neglog10P) if(is.character(seg.thresh)){ if(!(seg.thresh %in% "bonf")) stop("Thresholding only exists for \"bonf\", see ?pull.cross") seg.thresh <- 0.05/totmar(object) } else if(seg.thresh > 1) stop("Numerical threshold cannot exceed a p.value of 1.") whm <- pval < seg.thresh } else if(!is.null(seg.ratio <- pars$seg.ratio)) { ctab <- tab[,5:nc] sr <- as.numeric(unlist(strsplit(seg.ratio, ":"))) if(length(sr) != ncol(ctab)) stop("Wrong number of ratio elements for cross type.") psr <- sr/sum(sr) props <- t(apply(ctab, 1, function(el) el/sum(el))) pmx <- apply(props, 1, max) pmn <- apply(props, 1, min) whm <- (pmx > max(psr)[1]) | (pmn < min(psr)[1]) } else stop("Need a threshold or a ratio (see ?pullCross).") } if(type == "missing") whm <- tab$missing > pars$miss.thresh tab <- cbind.data.frame(mark = markn[whm], tab[whm,1:nc]) } if(any(whm)){ rownames(tab) <- NULL object <- drop.markers(object, markn[whm]) if(is.null(object[[type]]) | replace) object[[type]] <- list() object[[type]]$table <- rbind(object[[type]]$table, tab) object[[type]]$data <- cbind(object[[type]]$data, genot[,whm, drop = FALSE]) } else message("No markers were found with these characteristics.") object$geno <- c(object$geno, oldObject$geno) object$geno <- object$geno[mixedorder(names(object$geno))] object } pushCross <- function(object, type = c("co.located","seg.distortion","missing","unlinked"), unlinked.chr = NULL, pars = NULL, ...){ fixObject <- function(x, type){ cc <- class(x) x <- unclass(x) x[[type]] <- NULL x <- x[!sapply(x, is.null)] class(x) <- cc x } if(type != "unlinked"){ if(dim(object$pheno)[1] != dim(object[[type]]$data)[1]) stop("Number of genotypes in linkage map does not match external ", type, " data.") } cs <- attr(object, "scheme") if(is.null(pars)) pars <- pp.init() if(!is.list(pars)) stop("Argument pars must be a list object with one or more named elements matching the results of pp.init()") pars <- do.call("pp.init", pars) type <- match.arg(type) if(!(class(object)[1] %in% c("bc","dh","riself","bcsft"))) stop("Pushing of markers is not supported for this population type, see ?pushCross.") if(is.null(object[[type]]) & !(type %in% "unlinked")) stop("There are no markers of this type to push back.") if(type == "co.located"){ odat <- object$co.located$data mtab <- object$co.located$table mnam <- markernames(object) bm <- split(mtab, mtab$bins) om <- sapply(bm, function(el) as.character(el$mark[1])) if(!all(mwh <- om %in% mnam)){ warning("Some co-located marker anchors do not exist in map .. using matching set only.") bm <- bm[mwh] om <- om[mwh] } chrn <- rep(names(nmar(object)), times = nmar(object)) nb <- sapply(bm, nrow) chrn <- chrn[pmatch(om, mnam)] uc <- unique(chrn) for(i in uc){ bmc <- bm[chrn %in% i] markc <- as.character(unlist(sapply(bmc, function(el) el$mark[2:length(el$mark)]))) omap <- object$geno[[i]]$map for (j in 1:length(bmc)){ whm <- (1:length(omap))[names(omap) == bmc[[j]]$mark[1]] nmap <- c(omap[1:whm], rep(omap[whm], length(bmc[[j]]$mark) - 1)) if (length(omap) != whm) nmap <- c(nmap, omap[(whm + 1):length(omap)]) names(nmap)[(whm + 1):(whm + length(bmc[[j]]$mark) - 1)] <- as.character(bmc[[j]]$mark)[-1] omap <- nmap } whc <- dimnames(odat)[[2]] %in% markc object$geno[[i]]$data <- cbind(object$geno[[i]]$data, odat[, whc, drop = FALSE]) object$geno[[i]]$data <- object$geno[[i]]$data[, names(nmap)] object$geno[[i]]$map <- omap } object <- fixObject(object, type) attr(object, "scheme") <- cs return(object) } if(type %in% c("seg.distortion","missing")){ push.object <- oe <- object push.object$geno <- list() whm <- rep(TRUE, ncol(object[[type]]$data)) if(type == "seg.distortion"){ tabs <- object$seg.distortion$table if(!is.null(seg.thresh <- pars$seg.thresh)){ pval <- 10^(-tabs$neglog10P) if(length(seg.thresh) == 1) whm <- pval > seg.thresh else if(length(seg.thresh) == 2) whm <- (pval > min(seg.thresh)) & (pval < max(seg.thresh)) else stop("A numerical seg.thresh should contain no more than two elements.") } else if(!is.null(seg.ratio <- pars$seg.ratio)) { sr <- as.numeric(unlist(strsplit(seg.ratio, ":"))) ptab <- tabs[,6:ncol(tabs)] if(length(sr) != ncol(ptab)) stop("Wrong number of ratio elements for cross type.") psr <- sr/sum(sr) ptab <- t(apply(ptab, 1, function(el) el/sum(el))) pmx <- apply(ptab, 1, max) pmn <- apply(ptab, 1, min) whm <- pmx < max(psr)[1] | pmn > min(psr)[1] } else stop("Need a threshold or ratio (see ?pushCross).") } if(type %in% "missing"){ tabs <- object$missing$table whm <- tabs$missing < pars$miss.thresh } if(any(whm)){ inds <- (1:ncol(object[[type]]$data))[whm] push.object$geno[[type]]$data <- object[[type]]$data[,whm, drop = FALSE] push.object$geno[[type]]$map <- 1:ncol(push.object$geno[[type]]$data) names(push.object$geno[[type]]$map) <- dimnames(push.object$geno[[type]]$data)[[2]] class(push.object$geno[[type]]) <- class(object$geno[[1]]) } else stop("There are no markers to push back with these characteristics") object[[type]]$data <- object[[type]]$data[,!whm, drop = FALSE] object[[type]]$table <- object[[type]]$table[!whm,] if(dim(object[[type]]$data)[2] == 0) object <- fixObject(object, type) object$geno <- c(object$geno, push.object$geno) } if(type %in% "unlinked"){ if(is.null(unlinked.chr)) stop("Argument unlinked.chr cannot be NULL.") if(!all(unlinked.chr %in% names(nmar(object)))) stop("Some names in unlinked.chr do not match linkage group names of object.") push.object <- subset(object, chr = unlinked.chr) oe <- subset(object, paste("-", unlinked.chr, sep ="")) } oe$geno <- lapply(oe$geno, function(el){ hm <- floor(el$map[length(el$map)]/10) if(hm != 0){ whp <- seq(el$map[1], el$map[length(el$map)], length.out = 2 + hm) whp <- whp[2:(length(whp) - 1)] pm <- c() for(i in 1:length(whp)){ dm <- abs(el$map - whp[i]) pm[i] <- (1:length(el$map))[dm == min(dm)][1] } } else pm <- NULL whp <- unique(c(1,pm,length(el$map))) el$data <- el$data[,whp, drop = FALSE] el$map <- el$map[whp] el }) nme <- totmar(oe) mn <- markernames(push.object) chrn <- rep(names(nmar(oe)), times = nmar(oe)) oe$geno <- c(oe$geno, push.object$geno) chru <- paste("UL", 1:length(mn), sep = "") chro <- c(chrn, chru) erf <- est.rf(oe)$rf lod <- erf lod[lower.tri(erf)] <- t(erf)[lower.tri(erf)] erf[upper.tri(erf)] <- t(erf)[upper.tri(erf)] diag(erf) <- 1; diag(lod) <- 0; tme <- totmar(oe) ind <- (nme + 1):tme for(i in (nme + 1):tme){ if(!(chro[i] %in% unique(chrn))){ wh <- (erf[,i] <= pars$max.rf) & (lod[,i] > pars$min.lod) link <- chro %in% chrn whl <- wh[link]; whr <- wh[!link] if(any(whl) | any(whr)){ if(any(whl)){ chrt <- chro[link][whl] erfs <- erf[,i][link][whl] } else if(any(whr)) { chrt <- chro[!link][whr] erfs <- erf[,i][!link][whr] } chi <- chrt[erfs == min(erfs)][1] chro[!link][whr] <- chro[chro == chro[i]] <- chi } } } for(i in 1:length(mn)) object <- movemarker(object, mn[i], chro[nme + i]) nm <- nmar(object) lenu <- grep("UL",names(nm)) names(object$geno)[lenu] <- paste("UL",1:length(lenu),sep= "") object$geno <- object$geno[mixedorder(names(object$geno))] attr(object, "scheme") <- cs object } combineMap <- function(..., id = "Genotype", keep.all = TRUE, merge.by = "genotype"){ mapl <- list(...) if(merge.by %in% "genotype"){ pkeep.all <- keep.all marku <- unlist(lapply(mapl, function(el) markernames(el))) if(any(table(marku) > 1)) stop("Non-unique markers between linkage maps.") } else { pkeep.all <- TRUE genu <- unlist(lapply(mapl, function(el) as.character(el$pheno[[id]]))) if(any(table(genu) > 1)) stop("Non-unique genotypes between linkage maps.") } if(length(unique(sapply(mapl, function(el) class(el)[1]))) > 1) stop("Classes of maps need to be identical.") scheme <- lapply(mapl, function(el) attr(el, "scheme")) if(any(!sapply(scheme, is.null))){ sc <- do.call("rbind", scheme) if((nrow(sc) != length(mapl)) | !all(duplicated(sc)[2:nrow(sc)])) stop("Mismatched cross schemes in linkage maps") } if(!all(sapply(mapl, function(el) id %in% names(el$pheno)))) stop("Some linkage maps do not contain column\"", id, "\".") mapl <- lapply(mapl, function(el){ names(el$geno) <- gsub("x","X", names(el$geno)) el}) maplb <- lapply(mapl, function(el, merge.by){ mapb <- do.call("cbind", lapply(el$geno, function(x) x$data)) mdist <- unlist(pull.map(el)) chrs <- rep(names(el$geno), times = nmar(el)) cl <- sapply(el$geno, function(el) class(el)) dimnames(mapb)[[2]] <- paste(chrs, markernames(el), as.character(mdist), cl, sep = ";") dimnames(mapb)[[1]] <- as.character(el$pheno[[id]]) if(merge.by %in% "marker"){ mapb <- as.data.frame(t(mapb)) mapb[[merge.by]] <- markernames(el) mapb[["dims"]] <- dimnames(mapb)[[1]] } else { mapb <- as.data.frame(mapb) mapb[[merge.by]] <- as.character(el$pheno[[id]]) } mapb }, merge.by) phelb <- lapply(mapl, function(el) el$pheno) mapm <- maplb[[1]] phem <- phelb[[1]] for(i in 1:(length(maplb) - 1)) { mapm <- merge(mapm, maplb[[i + 1]], by = merge.by, all = keep.all) phem <- merge(phem, phelb[[i + 1]], by = id, all = pkeep.all) } if(length(wh <- grep("dims", names(mapm)))){ dnam <- apply(mapm[,wh], 1, function(el){ el <- el[!is.na(el)] el[1] }) omit <- c(names(mapm)[wh], "marker") mapm <- mapm[,!(names(mapm) %in% omit)] rownames(mapm) <- dnam } else { rownames(mapm) <- mapm[["genotype"]] mapm <- t(mapm[,!(names(mapm) %in% "genotype")]) } nams <- dimnames(mapm)[[2]] mxo <- mixedorder(nams) mapm <- mapm[,mxo] phem <- phem[mixedorder(as.character(phem[[id]])),,drop = FALSE] spl.m <- strsplit(rownames(mapm), ";") ch <- sapply(spl.m, "[", 1) mapf <- list() mapf$geno <- lapply(split.data.frame(mapm, ch), function(el, nams){ temp <- list() spl.c <- strsplit(rownames(el), ";") temp$data <- as.matrix(t(el)) rownames(temp$data) <- nams temp$map <- as.numeric(sapply(spl.c, "[", 3)) names(temp$map) <- dimnames(temp$data)[[2]] <- sapply(spl.c, "[", 2) mo <- order(temp$map) temp$map <- temp$map[mo] temp$data <- temp$data[,mo, drop = FALSE] class(temp) <- unique(sapply(spl.c, "[", 4)) temp }, nams[mxo]) class(mapf) <- class(mapl[[1]]) attr(mapf, "scheme") <- scheme[[1]] mapf$pheno <- phem mapf$geno <- mapf$geno[mixedorder(names(mapf$geno))] mapf } statMark <- function(cross, chr, stat.type = c("marker","interval"), map.function = "kosambi"){ if (!any(class(cross) == "cross")) stop("Input should have class \"cross\".") if(!(class(cross)[1] %in% c("bc","dh","riself","bcsft"))) stop("This function is not suitable for this population type, see ?statMark.") if(any(!(stat.type %in% c("marker","interval")))) stop("Value for stat.type argument does not match allowable names, see ?statMak.") if (!missing(chr)) cross <- subset(cross, chr = chr) nm <- nmar(cross) mrk <- list() if("marker" %in% stat.type) { mrk$marker <- geno.table(cross, scanone.output = TRUE) mrk$marker$dxo <- unlist(lapply(cross$geno, function(el) { if(ncol(el$data) < 3) sxo <- rep(0, ncol(el$data)) else { sxo <- sapply(3:ncol(el$data), function(i, el) { out <- el$data[,i-2] == el$data[,i] left <- el$data[,i-2] != el$data[,i-1] sm <- out + left sm[sm != 2] <- 0 sm[sm == 2] <- 1 sum(sm, na.rm = TRUE) }, el) c(0, sxo, 0) }})) } if("interval" %in% stat.type){ ic <- sapply(cross$geno, function(el) length(el$map) > 1) icross <- subset(cross, chr = names(nmar(cross))[ic]) nmo <- nmar(icross) rf <- lod <- c() for(i in 1:length(nmo)){ sc <- subset(icross, chr = names(nmo)[i]) erf <- est.rf(sc)$rf snm <- nmo[i] inds <- cbind(1:(snm - 1), 2:snm) rf <- c(rf, erf[inds[,2:1, drop = FALSE]]) lod <- c(lod, erf[inds]) } mrk$interval <- cbind.data.frame(erf = rf, lod = lod) mrk$interval$dist <- unlist(lapply(icross$geno, function(el) diff(el$map))) mf <- switch(map.function, kosambi = mf.k, haldane = mf.h, morgan = mf.m, cf = mf.cf) mrk$interval$mrf <- mf(mrk$interval$dist) mrk$interval$recomb <- unlist(lapply(icross$geno, function(el){ sapply(2:ncol(el$data), function(i, el) sum(abs(el$data[,i] - el$data[,i - 1]) > 0, na.rm = TRUE), el) })) rownames(mrk$interval) <- unlist(lapply(icross$geno, function(el){ len <- length(el$map) paste("(",names(el$map)[1:(len - 1)],",", names(el$map)[2:len],")", sep = "") })) chr <- rep(names(nmo), times = nmo - 1) pos <- unlist(lapply(icross$geno, function(el) el$map[1:(length(el$map)-1)] + diff(el$map)/2), use.names = FALSE) mrk$interval <- cbind.data.frame(chr = chr, pos = pos, mrk$interval) mrk$interval$chr <- as.character(mrk$interval$chr) if(any(nm == 1)){ sc1 <- names(nm)[nm == 1] mark1 <- markernames(subset(cross, chr = sc1)) for(i in 1:length(sc1)) { mrk$interval[nrow(mrk$interval) + 1,1] <- sc1[i] mrk$interval$pos[nrow(mrk$interval)] <- 0 rownames(mrk$interval)[nrow(mrk$interval)] <- paste("(",mark1[i],")", sep = "") } mrk$interval <- mrk$interval[mixedorder(mrk$interval$chr),] } } mrk } profileMark <- function(cross, chr, stat.type = "marker", use.dist = TRUE, map.function = "kosambi", crit.val = NULL, display.markers = FALSE, mark.line = FALSE, ...){ if (!any(class(cross) == "cross")) stop("Input should have class \"cross\".") if(!(class(cross)[1] %in% c("bc","dh","riself","bcsft"))) stop("This function is not suitable for this population type, see ?profileMark.") if (!missing(chr)) cross <- subset(cross, chr) cross$geno <- cross$geno[mixedorder(names(nmar(cross)))] stypes <- c("seg.dist","miss","prop","dxo","erf","lod","dist","mrf","recomb") mtypes <- c("marker","interval") if(any(!(stat.type %in% c(mtypes, stypes)))) stop("Value for stat.type argument does not match allowable names, see ?profileMark.") rmtypes <- rep(mtypes, times = c(4,5)) if(!any(stypes %in% stat.type)){ call.type <- mtypes[mtypes %in% stat.type] stat.type <- stypes[rmtypes %in% call.type] } else call.type <- unique(rmtypes[stypes %in% stat.type]) if(!is.null(crit.val)){ if(crit.val != "bonf") stop("Argument crit.val can only be \"bonf\".") } stat.type <- stypes %in% stat.type stat <- stato <- statMark(cross, names(nmar(cross)), call.type, map.function) nm <- nmar(cross) if(use.dist) { dist <- lapply(cross$geno, function(el) { dist <- diff(el$map)/100 tel <- c(0.05, dist) names(tel)[1] <- names(el$map)[1] tel}) dist <- cumsum(unlist(dist)) } else dist <- 1:sum(nm) adist <- dist mn <- markernames(cross) chrn <- factor(rep(names(nm), times = nm), levels = names(nm)) plis <- list if("marker" %in% names(stat)) { nlp <- stat$marker$neglog10P mtype <- stat.type[1:4] nc <- ncol(stat$marker) mtype <- c(mtype[1:2],rep(mtype[3],nc - 5), mtype[4]) props <- paste("Prop. ",names(summary(cross)$typing.freq), sep = "") mnam <- c("Seg. Distortion","Missing",props,"Double Crossovers")[mtype] mnam <- paste("Marker: ", mnam, sep = "") stat$marker <- cbind.data.frame(val = unlist(stat$marker[,(3:nc)[mtype]])) stat$marker$dist <- rep(dist, length(mnam)) stat$marker$nams <- rep(mnam, each = length(dist)) stat$marker$lg <- rep(chrn, length(mnam)) if(!is.null(crit.val)){ crit.val <- 0.05/totmar(cross) critm <- mn logmc <- 10^(-nlp) > crit.val critm[logmc] <- "" stato$marker$crit.val <- logmc stat$marker$mark <- rep(critm, length(mnam)) } } if("interval" %in% names(stat)){ ni <- nm - 1 ni[ni == 0] <- 1 itype <- stat.type[5:9] inam <- c("Est. Recomb. Frac.", "LOD", "Map Dist.", "Map Recomb. Frac.","# Recombinations")[itype] inam <- paste("Interval: ", inam, sep = "") intn <- rownames(stat$interval) lod <- stat$interval$lod stat$interval <- cbind.data.frame(val = unlist(stat$interval[,(3:7)[itype]])) idist <- unlist(lapply(split(dist, chrn), function(el){ if(length(el) == 1) el else el[1:(length(el)-1)] + diff(el)/2 })) stat$interval$nams <- rep(inam, each = length(idist)) stat$interval$dist <- rep(idist, length(inam)) stat$interval$lg <- rep(rep(names(nm), times = ni), length(inam)) if(!is.null(crit.val)){ crit.val <- -log10(0.05/(sum(nm - 1))) criti <- intn logic <- lod > crit.val criti[logic] <- "" stato$interval$crit.val <- logic stat$interval$mark <- rep(criti, length(inam)) } } stat <- do.call("rbind.data.frame", stat) rownames(stat) <- NULL if(!display.markers){ labs <- rep("",length(adist)) wh <- c(1, cumsum(nm)[1:(length(nm) - 1)] + 1) labs[wh] <- paste(names(nm), "1", sep = ".") } else labs <- mn print(xyplot(val ~ dist | nams, data = stat, groups = stat$lg, panel = function(x, y, groups = groups, subscripts = subscripts, crit.mark, nams, adist, mark.line, ...) { if(mark.line) panel.abline(v = adist, lty = 2, col = gray(0.8)) if(length(grep("Prop.", unique(nams[subscripts])))) panel.abline(h = 0.5, lty = 2, col = gray(0.5)) if(!is.null(crit.mark)){ panel.text(x, y, labels = crit.mark[subscripts], ...) } panel.superpose(x, y, groups = groups, subscripts = subscripts, ...) }, ylab = "", scales = list(y = list(relation = "free"), x = list(labels = labs, rot = 45, at = adist, cex = 0.6)), xlab = "", mark.line = mark.line, crit.mark = stat$mark, nams = stat$nams, adist = adist, ...)) invisible(stato) } statGen <- function(cross, chr, bychr = TRUE, stat.type = c("xo","dxo","miss"), id = "Genotype"){ if (!any(class(cross) == "cross")) stop("Input should have class \"cross\".") if(!(class(cross)[1] %in% c("bc","dh","riself","bcsft"))) stop("This function not suitable for this population type, ?see statGen.") if(!(id %in% names(cross$pheno))) stop("The unique identifier for the genotypes, ", deparse(substitute(id)), ", cannot be found in the object") if(any(!(stat.type %in% c("xo","dxo","miss")))) stop("Value for stat.type argument does not match allowable names, see ?statGen.") if (!missing(chr)) cross <- subset(cross, chr = chr) nch <- nchr(cross) nm <- nmar(cross) gnam <- as.character(cross$pheno[[id]]) cnt <- list() if("xo" %in% stat.type) { cc <- sapply(cross$geno, function(el) length(el$map) > 1) ccross <- subset(cross, chr = names(nmar(cross))[cc]) cnt$xo <- do.call("cbind", lapply(ccross$geno, function(el) apply(el$data, 1, function(rows) { ad <- abs(diff(rows[!is.na(rows)])) length(ad[ad != 0]) }))) } if("dxo" %in% stat.type){ pc <- sapply(cross$geno, function(el) length(el$map) > 2) dcross <- subset(cross, chr = names(nmar(cross))[pc]) cnt$dxo <- do.call("cbind", lapply(dcross$geno, function(el) { sxo <- sapply(3:ncol(el$data), function(i, el) { out <- el$data[,i-2] == el$data[,i] left <- el$data[,i-2] != el$data[,i-1] sm <- out + left sm[sm != 2] <- 0 sm[sm == 2] <- 1 sm }, el) apply(sxo, 1, sum, na.rm = TRUE) })) } if("miss" %in% stat.type) cnt$miss <- do.call("cbind", lapply(cross$geno, function(el) apply(el$data, 1, function(rows) { rows[is.na(rows)] <- 0 length(rows[rows == 0]) }))) cnt <- lapply(cnt, function(el, gnam, bychr){ if(!bychr) { el <- apply(el, 1, sum) names(el) <- gnam } else rownames(el) <- gnam el }, gnam, bychr) cnt } profileGen <- function(cross, chr, bychr = TRUE, stat.type = c("xo","dxo","miss"), id = "Genotype", xo.lambda = NULL, ...){ if (!any(class(cross) == "cross")) stop("Input should have class \"cross\".") if(!(class(cross)[1] %in% c("bc","dh","riself","bcsft"))) stop("This function not suitable for this population type, see ?profileGen.") if(!(id %in% names(cross$pheno))) stop("The unique identifier for the genotypes, ", deparse(substitute(id)), ", cannot be found in the object") if (!missing(chr)) cross <- subset(cross, chr = chr) nm <- nmar(cross) cnt <- statGen(cross, names(nm), bychr, stat.type, id) pnam <- c("Crossovers","Double Crossovers","Missing") stat.type <- pnam[pmatch(names(cnt), c("xo","dxo","miss"))] ph <- as.character(cross$pheno[[id]]) if(("xo" %in% names(cnt)) & !is.null(xo.lambda)) { if(bychr) cxo <- apply(cnt$xo, 1, sum) else cxo <- cnt$xo xo.lambda <- ppois(cxo, xo.lambda, lower.tail = FALSE) < 0.05/length(ph) } nch <- nchr(cross) inter <- floor(seq(1, length(ph), length.out = 20)) labs <- ph[inter] dots <- list(...) if (!is.na(pmatch("cex", names(dots)))) p.cex <- dots$cex else p.cex <- par("cex") if(bychr){ no.splits <- nch*length(cnt) stat.type <- paste(rep(names(nm),length(cnt)), rep(stat.type, each = nch), sep = ": ") } else no.splits <- length(cnt) stat.type <- rep(stat.type, each = length(ph)) phv <- rep(ph, no.splits) noind <- rep(1:length(ph), no.splits) cntl <- unlist(cnt) form <- cntl ~ noind | stat.type grp <- rep(1, length(noind)) cntn <- cntl; noindn <- noind plot.xo <- FALSE if(!is.null(xo.lambda) & any(xo.lambda)){ inds <- (1:length(phv))[rep(xo.lambda,no.splits)] grp[inds] <- 2 cntn[-inds] <- noindn[-inds] <- phv[-inds] <- NA ylims <- lapply(split(cntl, stat.type), function(el) c(0, max(el)[1] + 0.15*max(el)[1])) plot.xo <- TRUE } else ylims <- lapply(split(cntl, stat.type), function(el) c(0, max(el)[1])) xo.panel <- function(x, y, subscripts = subscripts, cntn, noindn, phv, plot.xo, ...){ panel.xyplot(x, y, ...) panel.segments(x, 0, x, y, ...) if(plot.xo) panel.text(noindn[subscripts], cntn[subscripts], labels = phv[subscripts], adj = c(-0.2,-0.2), srt = 45, ...) } print(xyplot(form, groups = factor(grp), panel = panel.superpose, panel.groups = xo.panel, cntn = cntn, noindn = noindn, phv = phv, plot.xo = plot.xo, scales = list(x = list(labels = labs, rot = 45, at = inter, cex = p.cex), y = list(relation = "free")), xlab = "Genotypes", ylab = "",ylim = ylims, ...)) invisible(list(stat = cnt, xo.lambda = xo.lambda)) } alignCross <- function(object, chr, maps, ...){ if (class(object)[2] != "cross") stop("object should have class \"cross\".") if(missing(maps)) stop("map argument cannot be missing.") call <- match.call() mp <- deparse(call$maps) if(!grep("list", mp)) stop("maps argument must be a list of maps") mapn <- names(maps) if(any(mapn %in% "") | is.null(mapn)){ warning("Some maps have not been named .. using map object name.") mp <- substring(mp, 6, nchar(mp) - 1) mp <- gsub(" ", "", mp) mp <- strsplit(mp, ",")[[1]] if(length(mp) > 1){ gp <- grep("=", mp) if(length(gp)) ind <- (1:length(mp))[-grep("=",mp)] else ind <- (1:length(mp)) } else ind <- 1 mapn[ind] <- mp[ind] } if(!missing(chr)) object <- subset(object, chr = chr) maps <- lapply(maps, function(el){ if(class(el)[2] %in% "cross"){ mn <- markernames(el) ref.chr <- rep(names(nmar(el)), times = nmar(el)) ref.dist <- unlist(pull.map(el)) cbind.data.frame(marker = mn, ref.chr = ref.chr, ref.dist = ref.dist) } else if(class(el)[1] %in% "data.frame"){ nms <- c("marker", "ref.chr", "ref.dist") if(!all(nms %in% names(el))) stop("One of \"marker\", \"ref.chr\", \"ref.dist\" could not be found in data frame.") el[,nms] } else stop("All maps must inherit class \"cross\" or \"data.frame\".") }) mo <- pull.map(object) nm <- nmar(object) ldat <- list() for(i in 1:length(mo)){ mt <- mo[[i]] con <- lapply(maps, function(el, mt){ pm <- match(names(mt), el$marker) md <- mt[!is.na(pm)] pm <- pm[!is.na(pm)] if(!length(pm)) NULL else { el <- el[pm,] el$map.dist <- md el } }, mt) names(con) <- paste("Ref: ", mapn, sep = "") con <- con[!sapply(con, is.null)] if(length(con)){ nr <- sapply(con, nrow) ldat[[i]] <- do.call("rbind.data.frame", con) ldat[[i]]$map.chr <- names(nm)[i] ldat[[i]] <- cbind(map = rep(names(con), times = nr), ldat[[i]]) } } fdat <- do.call("rbind.data.frame", ldat) if(dim(fdat)[1] == 0) stop("There are no matching markers between the inputted map and the reference maps.") rownames(fdat) <- NULL fdat$map.chr <- factor(fdat$map.chr, levels = names(object$geno)) print(xyplot(map.dist ~ ref.dist | map.chr*map, groups = fdat$ref.chr, data = fdat, scales = list(relation = "free"), xlab = "Reference distance", ylab = "Map distance", panel = panel.superpose, panel.groups = function(x, y, subscripts = subscripts, labs = as.character(fdat$ref.chr), ...) panel.text(x, y, labels = labs[subscripts], ...), ...)) invisible(fdat) } pValue <- function(dist = seq(25,40, by = 5), pop.size = 100:500, map.function = "kosambi", LOD = FALSE){ colour_hue <- function(n) { hues = seq(15, 375, length = n + 1) hcl(h = hues, l = 65, c = 100)[1:n] } if(max(dist) > 100) stop("Genetic distance should not exceed 100cM.") if(min(dist) < 0.001) stop("Minimum genetic distance allowed is 0.001cM.") mf <- switch(map.function, kosambi = mf.k, haldane = mf.h, morgan = mf.m, cf = mf.cf) rf <- mf(dist) val <- list() for(i in 1:length(rf)){ rec <- rf[i]*pop.size if(LOD){ val[[i]] <- (pop.size - rec)*log10(2*(1 - rf[i])) + rec*log10(2*rf[i]) ylab <- list("LOD score of linkage", cex = 1.5) } else { val[[i]] <- -log10(exp(-2*((pop.size/2 - rec)^2)/pop.size)) ylab <- list(expression(paste("-log10 ",epsilon, sep = "")), cex = 1.5) } } dat <- cbind.data.frame(val = unlist(val)) dat$pop.size <- rep(pop.size, length(dist)) dat$dist <- rep(dist, each = length(pop.size)) cols <- colour_hue(length(dist)) labs <- paste(dist, " cM", sepm = "") print(xyplot(val ~ pop.size, type = "l", data = dat, groups = dat$dist, lwd = 2, col = cols, xlab = "Number of genotypes in population", ylab = ylab, key = list(x = 0.05, y = 0.9, text = list(labs, cex = 2), lines = list(col = cols, lwd = 3)))) }
/scratch/gouwar.j/cran-all/cranData/ASMap/R/mstmap14.R
## ----setup, include=FALSE, cache=FALSE---------------------------------------- library(knitr) library(formatR) # set global chunk options options(formatR.arrow=FALSE) opts_chunk$set(fig.path='figure/Rplots-',fig.align='center',fig.show='hold',comment=NA,background='white',highlight=FALSE,tidy=TRUE,size="small",continue=" ") knit_hooks$set(source=function(x,options){ prp <- c("R> ") if(!options$prompt) prp <- "" wd <- getOption("width") if(!is.null(width <- options$tidy.opts$width)) options(width = width) x <- strwrap(x, width = getOption("width")) lenx <- length(x) pl <- unlist(sapply(gregexpr("\\(", x), function(el){ if((length(el) == 1)) if(unique(el) == -1) 0 else 1 else length(el)})) pr <- unlist(sapply(gregexpr("\\)", x), function(el){ if((length(el) == 1)) if(unique(el) == -1) 0 else 1 else length(el)})) wp <- rep(prp, length(x)) if(length(x) > 1){ xns <- gsub(" ","",x) op <- gregexpr("\\+|-|\\*|\\|=",x) ct <- sapply(1:(length(x) - 1), function(i, xns, op) (nchar(x[i]) %in% op[[i]]) | (1 %in% op[[i + 1]]), xns,op) for(i in 2:length(x)){ if((sum(pl[1:(i-1)]) != sum(pr[1:(i-1)])) | ct[i - 1]) wp[i] <- paste(options$continue, " ", sep = "") } } options(width = wd) paste(c("\\begin{Rinput}",paste(wp, x, sep= ""), "\\end{Rinput}",""), collapse = "\n") }, output=function(x,options){ if(all(gregexpr("begin\\{tabu|begin\\{longtab",x)[[1]] > 0)) x else paste(c("\\begin{Routput}\n",x, "\\end{Routput}\n"), sep = "") }) ## ----setup-lib, include=FALSE, cache=FALSE------------------------------------ library(ASMap) ## ----data0, eval = TRUE, echo = TRUE,prompt = TRUE---------------------------- data(mapDHf, package = "ASMap") data(mapDH, package = "ASMap") data(mapBCu, package = "ASMap") ## ----mst-df,eval=FALSE,echo=TRUE,prompt=FALSE--------------------------------- # mstmap.data.frame(object, pop.type = "DH", dist.fun = "kosambi", # objective.fun = "COUNT", p.value = 1e-06, noMap.dist = 15, # noMap.size = 0, miss.thresh = 1, mvest.bc = FALSE, detectBadData = FALSE, # as.cross = TRUE, return.imputed = TRUE, trace = FALSE, ...) ## ----datadf,eval=TRUE,echo=TRUE,prompt=TRUE----------------------------------- testd <- mstmap(mapDHf, dist.fun = "kosambi", trace = TRUE, as.cross = TRUE) nmar(testd) chrlen(testd) ## ----mst-cr,eval = FALSE,echo=TRUE-------------------------------------------- # mstmap.cross(object, chr, id = "Genotype", bychr = TRUE, # suffix = "numeric", anchor = FALSE, dist.fun = "kosambi", # objective.fun = "COUNT", p.value = 1e-06, noMap.dist = 15, # noMap.size = 0, miss.thresh = 1, mvest.bc = FALSE, detectBadData = # FALSE, return.imputed = FALSE, trace = FALSE, ...) ## ----data, eval = TRUE, echo = TRUE, prompt = TRUE---------------------------- nmar(mapDH) pull.map(mapDH)[[4]] ## ----mst1,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapDHa <- mstmap(mapDH, bychr = FALSE, dist.fun = "kosambi", trace = TRUE) nmar(mapDHa) pull.map(mapDHa)[[4]] ## ----mst2,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapDHb <- mstmap(mapDH, bychr = TRUE, dist.fun = "kosambi", anchor = TRUE, trace = TRUE) nmar(mapDHb) ## ----mst3,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapDHc <- mstmap(mapDH, bychr = TRUE, dist.fun = "kosambi", anchor = TRUE, trace = TRUE, p.value = 1e-04) nmar(mapDHc) ## ----mst4,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapDHd <- mstmap(mapDH, chr = names(mapDH$geno)[1:3], bychr = FALSE, dist.fun = "kosambi", trace = TRUE, p.value = 1e-04) nmar(mapDHd) ## ----pp1,eval=FALSE,echo=TRUE------------------------------------------------- # pullCross(object, chr, type = c("co.located","seg.distortion","missing"), # pars = NULL, replace = FALSE, ...) # pushCross(object, chr, type = c("co.located","seg.distortion","missing","unlinked"), # unlinked.chr = NULL, pars = NULL, replace = FALSE, ...) # pp.init(seg.thresh = 0.05, seg.ratio = NULL, miss.thresh = 0.1, max.rf = # 0.25, min.lod = 3) ## ----pp2,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mapDHs <- pullCross(mapDH, type = "co.located") mapDHs <- pullCross(mapDHs, type = "seg.distortion", pars = list(seg.thresh = 0.02)) mapDHs <- pullCross(mapDHs, type = "missing", pars = list(miss.thresh = 0.03)) names(mapDHs) names(mapDHs$co.located) ## ----pp3,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mapDHs$seg.distortion$table ## ----pp4,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- head(mapDHs$co.located$table) ## ----pp5,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mapDHs <- mstmap(mapDHs, bychr = FALSE, dist.fun = "kosambi", trace = TRUE, anchor = TRUE) nmar(mapDHs) ## ----pp6,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mapDHs <- pushCross(mapDHs, type = "co.located") mapDHs <- pushCross(mapDHs, type = "seg.distortion", pars = list(seg.thresh = 0.001)) mapDHs <- pushCross(mapDHs, type = "missing", pars = list(miss.thresh = 0.05)) names(mapDHs) ## ----pp7,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- pull.map(mapDHs)[[4]] pull.map(mapDHs)[[21]] ## ----pp8,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mapDHs <- mstmap(mapDHs, bychr = TRUE, dist.fun = "kosambi", trace = TRUE, anchor = TRUE, p.value = 2) ## ----heat1,eval=FALSE,echo=TRUE----------------------------------------------- # heatMap(x, chr, mark, what = c("both", "lod", "rf"), lmax = 12, # rmin = 0, markDiagonal = FALSE, color = rev(rainbow(256, start = # 0, end = 2/3)), ...) ## ----heat2,echo=TRUE,eval=FALSE,prompt=TRUE----------------------------------- # heatMap(mapDH, lmax = 50) ## ----prof1, eval = FALSE------------------------------------------------------ # statGen(cross, chr, bychr = TRUE, stat.type = c("xo", "dxo", "miss"), id = "Genotype") # profileGen(cross, chr, bychr = TRUE, stat.type = c("xo", "dxo", "miss"), id = "Genotype", xo.lambda = NULL, ...) # statMark(cross, chr, stat.type = c("marker", "interval"), map.function = "kosambi") # profileMark(cross, chr, stat.type = "marker", use.dist = TRUE, map.function = "kosambi", crit.val = NULL, display.markers = FALSE, mark.line = FALSE, ...) ## ----prof2,fig.width = 15,fig.height = 8,fig.pos = "t",fig.env="figure",fig.scap="NA",fig.cap = "Genotype profiles of missing values, double recombinations and recombinations for \\texttt{mapDH}.",prompt=TRUE---- profileGen(mapDH, bychr = FALSE, stat.type = c("xo", "dxo", "miss"), id = "Genotype", xo.lambda = 25, layout = c(1,3), lty = 2) ## ----prof3,fig.width = 15,fig.height = 8,fig.pos = "t",fig.env="figure",fig.scap="NA",fig.cap = "Marker and interval profiles of segregation distortion, double crossovers, estimated recombination fractions and LOD scores for \\texttt{mapDH}.",prompt=TRUE, warning = FALSE---- profileMark(mapDH, stat.type = c("seg.dist", "dxo", "erf", "lod"), id = "Genotype", layout = c(1,4), type = "l") ## ----clones01,eval=FALSE,echo=TRUE,prompt=TRUE-------------------------------- # genClones(object, chr, tol = 0.9, id = "Genotype") # fixClones(object, gc, id = "Genotype", consensus = TRUE) ## ----clones02,eval=TRUE,echo=TRUE,prompt=TRUE--------------------------------- gc <- genClones(mapDH, tol = 0.9) gc$cgd ## ----clones03,eval=TRUE,echo=TRUE,prompt=TRUE--------------------------------- mapDHg <- fixClones(mapDH, gc$cgd, consensus = TRUE) levels(mapDHg$pheno[[1]])[grep("_", levels(mapDHg$pheno[[1]]))] ## ----mb1, eval = FALSE-------------------------------------------------------- # breakCross(cross, split = NULL, suffix = "numeric", sep = ".") # mergeCross(cross, merge = NULL, gap = 5) ## ----mb2, eval = TRUE,prompt=TRUE--------------------------------------------- mapDHb1 <- breakCross(mapDH, split = list("3B" = "3B.m.7","6A" = "6A.m.15")) nmar(mapDHb1) ## ----mb3, eval = TRUE,prompt=TRUE--------------------------------------------- mapDHb2 <- breakCross(mapDH, split = list("3B" = "3B.m.7"), suffix = list("3B" = c("3B1","3B2"))) nmar(mapDHb2) ## ----mb4,eval=TRUE,prompt=TRUE------------------------------------------------ mapDHm <- mergeCross(mapDHb1, merge = list("3B" = c("3B.1","3B.2"),"6A" = c("6A.1","6A.2"))) nmar(mapDHm) ## ----quick1,eval=FALSE-------------------------------------------------------- # quickEst(object, chr, map.function = "kosambi", ...) ## ----quick2,fig.width = 7,fig.height = 5,fig.pos = "t",fig.env="figure",fig.scap="NA",fig.cap = "Comparison of \\texttt{mapDH} using \\texttt{est.map} and \\texttt{quickEst}.",prompt=TRUE---- map1 <- est.map(mapDH, map.function = "kosambi") map1 <- subset(map1, chr = names(nmar(map1))[6:15]) map2 <- quickEst(mapDH, map.function = "kosambi") map2 <- subset(map2, chr = names(nmar(map2))[6:15]) plot.map(map1, map2) ## ----sc,eval=TRUE,prompt=TRUE------------------------------------------------- mapDH.s <- pullCross(mapDH, type = "seg.distortion") mapDH.s <- subsetCross(mapDH.s, ind = 3:218) dim(mapDH.s$seg.distortion$data)[1] ## ----comb1, eval = FALSE------------------------------------------------------ # combineMap(..., id = "Genotype", keep.all = TRUE) ## ----comb2,eval=TRUE,prompt=TRUE---------------------------------------------- mapDH1 <- mapDH names(mapDH1$geno)[5:14] <- paste("L",1:10, sep = "") mapDH1$geno <- lapply(mapDH1$geno, function(el){ names(el$map) <- dimnames(el$data)[[2]] <- paste(names(el$map), "A", sep = "") el}) mapDHc <- combineMap(mapDH, mapDH1) nmar(mapDHc) ## ----ex1, eval = FALSE-------------------------------------------------------- # data(mapBCu, package = "ASMap") ## ----ex3,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------- # plot.missing(mapBCu) ## ----ex4,echo=TRUE,prompt=TRUE------------------------------------------------ sg <- statGen(mapBCu, bychr = FALSE, stat.type = "miss") mapBC1 <- subset(mapBCu, ind = sg$miss < 1600) ## ----ex5,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- gc <- genClones(mapBC1, tol = 0.95) gc$cgd ## ----ex6,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- cgd <- gc$cgd[-c(1,4,5),] mapBC2 <- fixClones(mapBC1, cgd, consensus = TRUE) levels(mapBC2$pheno[[1]])[grep("_", levels(mapBC2$pheno[[1]]))] ## ----ex7,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------- # profileMark(mapBC2, stat.type = c("seg.dist", "prop", "miss"), crit.val = "bonf", layout = c(1,4), type = "l", cex = 0.5) ## ----ex8,echo=FALSE,fig.width=17,fig.height=10,warning=FALSE------------------ profileMark(mapBC2, stat.type = c("seg.dist", "prop", "miss"), crit.val = "bonf", layout = c(1,4), type = "l", cex = 0.5) ## ----ex9,eval=TRUE,echo=TRUE,prompt=TRUE-------------------------------------- mm <- statMark(mapBC2, stat.type = "marker")$marker$AB mapBC3 <- drop.markers(mapBC2, c(markernames(mapBC2)[mm > 0.98],markernames(mapBC2)[mm < 0.2])) ## ----ex10,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapBC3 <- pullCross(mapBC3, type = "missing", pars = list(miss.thresh = 0.1)) mapBC3 <- pullCross(mapBC3, type = "seg.distortion", pars = list(seg.thresh = "bonf")) mapBC3 <- pullCross(mapBC3, type = "co.located") names(mapBC3) sum(ncol(mapBC3$missing$data),ncol(mapBC3$seg.dist$data),ncol(mapBC3$co.located$data)) ## ----ex11,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- mapBC4 <- mstmap(mapBC3, bychr = FALSE, trace = TRUE, dist.fun = "kosambi", p.value = 1e-12) chrlen(mapBC4) ## ----ex12,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # heatMap(mapBC4, lmax = 70) ## ----ex14,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # pg <- profileGen(mapBC4, bychr = FALSE, stat.type = c("xo","dxo","miss"), id = "Genotype", xo.lambda = 14, layout = c(1,3), lty = 2, cex = 0.7) ## ----ex15,echo=FALSE,fig.width=17,fig.height=10,warning=FALSE----------------- pg <- profileGen(mapBC4, bychr = FALSE, stat.type = c("xo","dxo","miss"), id = "Genotype", xo.lambda = 14, layout = c(1,3), lty = 2, cex = 0.7) ## ----ex16,eval=TRUE,echo=TRUE,cache = TRUE,prompt=TRUE------------------------ mapBC5 <- subsetCross(mapBC4, ind = !pg$xo.lambda) mapBC6 <- mstmap(mapBC5, bychr = TRUE, dist.fun = "kosambi", trace = TRUE, p.value = 1e-12) chrlen(mapBC6) ## ----ex17,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # profileMark(mapBC6, stat.type = c("seg.dist","prop","dxo","recomb"), layout = c(1,5), type = "l") ## ----ex18,echo=FALSE,fig.width=17,fig.height=12,warning=FALSE----------------- profileMark(mapBC6, stat.type = c("seg.dist","prop","dxo","recomb"), layout = c(1,5), type = "l") ## ----ex19,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapBC6 <- pushCross(mapBC6, type = "missing", pars = list(miss.thresh = 0.22, max.rf = 0.3)) ## ----ex20,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # heatMap(mapBC6, chr = c("L.3","L.5","L.8","L.9"), lmax = 70) ## ----ex21,echo=FALSE,fig.width=14,fig.height=8,warning=FALSE,cache=TRUE------- heatMap(mapBC6, chr = c("L.3","L.5","L.8","L.9"), lmax = 70) ## ----ex22,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- mapBC6 <- mergeCross(mapBC6, merge = list("L.3" = c("L.3","L.5"), "L.8" = c("L.8","L.9"))) names(mapBC6$geno) <- paste("L.", 1:7, sep = "") mapBC7 <- mstmap(mapBC6, bychr = TRUE, trace = TRUE, dist.fun = "kosambi", p.value = 2) chrlen(mapBC7) ## ----ex23,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # pg1 <- profileGen(mapBC7, bychr = FALSE, stat.type = c("xo","dxo","miss"), id = "Genotype", xo.lambda = 14, layout = c(1,3), lty = 2, cex = 0.7) ## ----ex24,echo=FALSE,fig.width=17,fig.height=10,warning=FALSE----------------- pg1 <- profileGen(mapBC7, bychr = FALSE, stat.type = c("xo","dxo","miss"), id = "Genotype", xo.lambda = 14, layout = c(1,3), lty = 2, cex = 0.7) ## ----ex25,eval=TRUE,echo=TRUE,cache = TRUE,prompt=TRUE------------------------ mapBC8 <- subsetCross(mapBC7, ind = !pg1$xo.lambda) mapBC9 <- mstmap(mapBC8, bychr = TRUE, dist.fun = "kosambi", trace = TRUE, p.value = 2) chrlen(mapBC9) ## ----ex26,eval=FALSE,echo=TRUE,prompt=TRUE------------------------------------ # profileMark(mapBC9, stat.type = c("seg.dist","prop","dxo","recomb"), layout = c(1,5), type = "l") ## ----ex27,echo=FALSE,fig.width=17,fig.height=10,warning=FALSE----------------- profileMark(mapBC9, stat.type = c("seg.dist","prop"), layout = c(1,3), type = "l") ## ----ex28,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- dm <- markernames(mapBC9, "L.2")[statMark(mapBC9, chr = "L.2", stat.type = "marker")$marker$neglog10P > 6] mapBC10 <- drop.markers(mapBC9, dm) mapBC11 <- pushCross(mapBC10, type = "seg.distortion", pars = list(seg.ratio = "70:30")) mapBC12 <- mstmap(mapBC11, bychr = TRUE, trace = TRUE, dist.fun = "kosambi", p.value = 2) round(chrlen(mapBC12) - chrlen(mapBC9), 5) nmar(mapBC12) - nmar(mapBC10) ## ----ex29,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapBC <- pushCross(mapBC12, type = "co.located") names(mapBC) ## ----add1,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- set.seed(123) add1 <- drop.markers(mapBC, markernames(mapBC)[sample(1:3019, 2700, replace = FALSE)]) mapBCs <- drop.markers(mapBC, markernames(add1)) add3 <- add2 <- add1 add2 <- subset(add2, chr = "L.1") add3$geno[[1]]$data <- pull.geno(add1) add3$geno[[1]]$map <- 1:ncol(add3$geno[[1]]$data) names(add3$geno[[1]]$map) <- markernames(add1) names(add3$geno)[1] <- "ALL" add3 <- subset(add3, chr = "ALL") ## ----add2,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- add1 <- subset(add1, ind = 2:300) full1 <- combineMap(mapBCs, add1, keep.all = TRUE) full1 <- mstmap(full1, bychr = TRUE, trace = TRUE, anchor = TRUE, p.value = 2) ## ----add3,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- add2 <- subset(add2, ind = 2:300) full2 <- combineMap(mapBCs, add2, keep.all = TRUE) full2 <- mstmap(full2, chr = "L.1", bychr = TRUE, trace = TRUE, anchor = TRUE, p.value = 2) ## ----add4,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- add3 <- subset(add3, ind = 2:300) full3 <- combineMap(mapBCs, add3, keep.all = TRUE) full3 <- pushCross(full3, type = "unlinked", unlinked.chr = "ALL") full3 <- mstmap(full3, bychr = TRUE, trace = TRUE, anchor = TRUE, p.value = 2) ## ----dist1,eval=FALSE,echo=TRUE,prompt=TRUE----------------------------------- # plot.missing(mapBC4) ## ----dist3,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE------------------------- mapBC4i <- mstmap(mapBC3, bychr = FALSE, trace = TRUE, dist.fun = "kosambi", p.value = 1e-12, return.imputed = TRUE) mapBC4i$geno[[1]]$map[1:14] mapBC4i$imputed.geno[[1]]$map[1:5] ## ----dist4,echo = TRUE,prompt=TRUE-------------------------------------------- len <- apply(mapBC4$geno[[1]]$data[,c(1,5)], 1, function(el) length(el[!is.na(el)])) length(len[len > 1]) bca <- apply(mapBC4i$geno[[1]]$data[,c(1,5)], 1, function(el){ el <- el[!is.na(el)] sum(abs(diff(el)))}) bca[bca > 0] ## ----dist5,echo=TRUE,prompt=TRUE---------------------------------------------- mapBC4i$imputed.geno[[1]]$data[pg$xo.lambda,1:5] ## ----dist6,echo=TRUE,prompt=TRUE---------------------------------------------- mapBC4e <- quickEst(mapBC4) chrlen(mapBC4) chrlen(mapBC4e) ## ----mvest1,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE------------------------ mapBC4a <- mstmap(mapBC3, bychr = FALSE, trace = TRUE, dist.fun = "kosambi", p.value = 1e-12, mvest.bc = TRUE) nmar(mapBC4) ## ----mvest2,eval=TRUE,echo=TRUE,prompt=TRUE----------------------------------- sapply(mapBC4a$geno, function(el) length(unique(round(el$map, 4)))) - sapply(mapBC4$geno, function(el) length(unique(round(el$map, 4)))) chrlen(mapBC4a) ## ----mvest3,eval=TRUE,echo=TRUE,prompt=TRUE----------------------------------- mapBC4b <- quickEst(mapBC4a) chrlen(mapBC4b) ## ----dbd1,eval=TRUE,echo=TRUE,prompt=TRUE------------------------------------- mapBCd <- mapBC mapBCd$geno <- lapply(mapBCd$geno, function(el){ ns <- sample(1:ncol(el$data), ncol(el$data)/2, replace = TRUE) ns <- cbind(sample(1:nrow(el$data), ncol(el$data)/2, replace = TRUE), ns) el$data[ns] <- abs(1 - el$data[ns]) el$data[el$data == 0] <- 2 el}) mapBCd <- quickEst(mapBCd) chrlen(mapBCd) ## ----dbd2,eval=TRUE,echo=TRUE,prompt=TRUE,cache=TRUE-------------------------- mapBCda <- mstmap(mapBCd, bychr = TRUE, trace = TRUE, dist.fun = "kosambi", p.value = 1e-12, detectBadData = TRUE) chrlen(mapBCda)
/scratch/gouwar.j/cran-all/cranData/ASMap/inst/doc/asmapvignette.R
#' @keywords internal "_PACKAGE" #' @aliases ASRgenomics #' ## usethis namespace: start #' @import data.table #' @import factoextra #' @import ggplot2 #' @import scattermore #' @importFrom utils head #' @importFrom methods new getFunction #' @importFrom stats prcomp var cov2cor aggregate cor cov na.omit #' @importFrom Matrix nearPD #' @importFrom AGHmatrix Gmatrix #' @importFrom crayon blue #' @importFrom cowplot plot_grid #' @importFrom ellipse ellipse #' @importFrom superheat superheat ## usethis namespace: end NULL # Add global variables to avoid NOTES. utils::globalVariables( c("value", "Value", "density", "PC1", "PC2", "Ind", "SNP", "MAF", "het", "Row", "Col", "n.states", "..map", "state2", "state1", "code0", "code1A", "code1B", "code2"))
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/ASRgenomics-package.R
#' Obtains the inverse of the genomic relationship matrix G #' #' @description #' Generates the inverse of a genomic relationship matrix \eqn{\boldsymbol{G}} that is provided. #' This input matrix should be of the full form (\eqn{n \times n}) #' with individual names assigned to #' \code{rownames} and \code{colnames}. Several checks for the stability of the matrix are #' presented based on the reciprocal conditional number. #' #' In case of an ill-conditioned matrix, #' options of blending, bending or aligning before inverting are available. #' These options will be deprecated (discontinued) #' in future versions of \code{ASRgenomics} #' as they can be better implemented in the function \code{G.tuneup()}. #' #' Based on procedures published by Nazarian and Gezan \emph{et al.} (2016). #' #' @param G Input of the symmetric genomic relationship matrix \eqn{\boldsymbol{G}} in #' full form (\eqn{n \times n}), to obtain its inverse (default = \code{NULL}). #' @param A Input of the pedigree relationship matrix \eqn{\boldsymbol{A}} #' to perform blending or aligning in full form. #' It should be of the same dimension as the \eqn{\boldsymbol{G}} matrix #' (\eqn{n \times n}) (default = \code{NULL}) (to be deprecated). #' @param rcn.thr A threshold for identifying the \eqn{\boldsymbol{G}} matrix as an ill-conditioned matrix. #' Based on the reciprocal conditional number (default = \code{1e-12}). #' @param blend If \code{TRUE} a "blending" with identity matrix \eqn{\boldsymbol{I}} or pedigree relationship matrix #' \eqn{\boldsymbol{A}} (if provided) is performed (default = \code{FALSE}) (to be deprecated). #' @param pblend If blending is requested this is the proportion of the identity matrix \eqn{\boldsymbol{I}} or #' pedigree relationship matrix \eqn{\boldsymbol{A}} to blend for (default = \code{0.02}) (to be deprecated). #' @param bend If \code{TRUE} a "bending" is performed by making the matrix near positive definite (default = \code{FALSE}) (to be deprecated). #' @param eig.tol Defines relative positiveness (\emph{i.e.}, non-zero) of eigenvalues compared to the largest one. #' It determines which threshold of eigenvalues will be treated as zero (default = \code{NULL}) (to be deprecated). #' @param align If \code{TRUE} the genomic relationship matrix \eqn{\boldsymbol{G}} is aligned to the #' pedigree relationship matrix \eqn{\boldsymbol{A}} (default = \code{FALSE}) (to be deprecated). #' @param digits Set up the number of digits in used to round the output matrix (default = \code{8}). #' @param sparseform If \code{TRUE} it generates an inverse matrix in sparse form to be used directly in \pkg{asreml} with #' required attributes (default = \code{FALSE}). #' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}). #' #' @return A list with three of the following elements: #' \itemize{ #' \item{\code{Ginv}: the inverse of \eqn{\boldsymbol{G}} matrix in full form (only if \code{sparseform = FALSE})}. #' \item{\code{Ginv.sparse}: the inverse of \eqn{\boldsymbol{G}} matrix in sparse form (only if \code{sparseform = TRUE})}. #' \item{\code{status}: the status (\code{ill-conditioned} or \code{well-conditioned}) of #' the inverse of \eqn{\boldsymbol{G}} matrix.} #' \item{\code{rcn}: the reciprocal conditional number of the inverse of \eqn{\boldsymbol{G}} matrix.} #' } #' #' @references #' Nazarian A., Gezan S.A. 2016. GenoMatrix: A software package for pedigree-based #' and genomic prediction analyses on complex traits. Journal of Heredity 107:372-379. #' #' @export #' #' @examples #' # Example: An ill-conditioned matrix. #' #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden")$G #' G[1:5, 1:5] #' #' # Get the inverse of G. #' GINV <- G.inverse(G = G, bend = FALSE, blend = FALSE, align = FALSE) #' GINV$Ginv[1:5, 1:5] #' GINV$status #' G.inverse <- function(G = NULL, A = NULL, rcn.thr = 1e-12, blend = FALSE, pblend = 0.02, bend = FALSE, eig.tol = NULL, align = FALSE, digits = 8, sparseform = FALSE, message = TRUE){ # TODO check why eig.tol is not used in the function. # Deprecation traps --------------------------------------------------------------------------- if (!is.null(A) | blend | bend | align | !is.null(eig.tol)){ warning("The arguments \'A', \'blend', \'pblend', \'bend', \'align', \'eig.tol' are still active", " but will be discontinued in future versions. Please use \'G.tuneup'", " to perform the respective actions.") } # Traps --------------------------------------------------------------------------------------- # Check if the class of G is matrix if (is.null(G) || !inherits(G, "matrix")) { stop("G should be a valid object of class matrix.") } # Check the rownames/colnames if (is.null(rownames(G))){ stop("Individual names not assigned to rows of matrix G.") } if (is.null(colnames(G))){ stop('Individual names not assigned to columns of matrix G.') } if ((identical(rownames(G), colnames(G))) == FALSE){ stop("Rownames and colnames of matrix G do not match.") } # Checks for other input. if (pblend < 0 | pblend > 1) { stop("Specification of pblend must be between 0 and 1.") } if (rcn.thr <= 0) { stop("Value for rcn.thr must be positive.") } if(!is.null(eig.tol)){ if (eig.tol <= 0) { stop("Value for eig.tol must be positive.") } } # Get dimensions. n <- dim(G)[1] p <- dim(G)[2] # Reciprocal Condition Number RCN rcn <- rcond(G) if (message){ message('Reciprocal conditional number for original matrix is: ', rcn) } # Check the RCN default value and inverting the matrix if (rcn > rcn.thr){ # Try to get the inverse of G. ginverse <- tryCatch( expr = {chol2inv(chol(G))}, error = function(holder){return(NULL)} ) if (is.null(ginverse)){ stop("Matrix G is not positive definite.") } rownames(ginverse) <- rownames(G) # Add colnames(ginverse) <- colnames(G) # Add ginverse <- round(ginverse, digits) } if (rcn < rcn.thr & message){ message('Reciprocal conditional number of G is: ',rcn, ', which is lower than the threshold of ',rcn.thr) message("G seems to be an ill-conditioned matrix.") } if (rcn < rcn.thr & !isTRUE(blend) & !isTRUE(bend) & !isTRUE(align)){ stop('Consider bending, blending or aligning before invertion to make this matrix more stable.') } if (isTRUE(blend) || isTRUE(bend) || isTRUE(align)){ Gb <- G.tuneup(G=G, A=A, blend=blend, pblend=pblend, bend=bend, align=align, rcn=FALSE,sparseform=FALSE, determinant=FALSE, message=FALSE)$Gb # Try to get the inverse of Gb (tuned). ginverse <- tryCatch( expr = {chol2inv(chol(Gb))}, error = function(holder){return(NULL)} ) if (is.null(ginverse)){ stop("Matrix G (tuned) is not positive definite.") } rownames(ginverse) <- rownames(Gb) colnames(ginverse) <- colnames(Gb) } ginverse <- round(ginverse, digits) # Reciprocal Condition Number RCN rcninv<-rcond(ginverse) if (message){ message('Reciprocal conditional number for inverted matrix is: ', rcninv) } # Evaluating Matrix Empirically # Note: these are simple comparisons to check stability of matrix n <- ncol(ginverse) CN.1 <- ginverse[1,2]/sqrt(ginverse[1,1]*ginverse[1,1]) CN.N <- ginverse[(n-1),n]/sqrt(ginverse[(n-1),(n-1)]*ginverse[n,n]) max_diag <- abs(max(diag(ginverse))) max_off_diag <- max(abs(ginverse-diag(ginverse))) if(abs(CN.1)>0.99 | abs(CN.N)>0.99 | max_diag>1000 | max_off_diag>1000) { if (message){ message("Inverse of matrix G appears to be ill-conditioned.") } status <- 'ill-conditioned' } else { if (message){ message("Inverse of matrix G does not appear to be ill-conditioned.") } status <- 'well-conditioned' } # Obtaining Matrix in Sparse Form if requested (ready for ASReml-R v.4) if (isTRUE(sparseform)) { ginverse.sparse <- full2sparse(ginverse) attr(ginverse.sparse, "INVERSE") <- TRUE return(list(Ginv.sparse = ginverse.sparse, rcn=rcninv, status=status)) } else{ attr(ginverse, "rowNames") <- rownames(ginverse) attr(ginverse, "colNames") <- colnames(ginverse) attr(ginverse, "INVERSE") <- TRUE return(list(Ginv = ginverse, rcn=rcninv, status=status)) } }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/G_inverse.R
#' Obtains the genomic matrix from SNP data for additive or dominant relationships #' #' Generates the genomic numerator relationship matrix for #' additive (VanRaden or Yang) or dominant (Su or Vitezica) relationships. #' Matrix provided \eqn{\boldsymbol{M}} is of the form \eqn{n \times p}, with \eqn{n} individuals and \eqn{p} markers. #' Individual and #' marker names are assigned to \code{rownames} and \code{colnames}, respectively. #' SNP data is coded as 0, 1, 2 (integers or decimal numbers). #' Missing values, if present, need to be specified. #' #' @param M A matrix with SNP data of form \eqn{n \times p}, with \eqn{n} individuals and \eqn{p} markers. #' Individual and marker names are assigned to \code{rownames} and \code{colnames}, respectively. #' SNP data is coded as 0, 1, 2 (integers or decimal numbers) (default = \code{NULL}). #' @param method The method considered for calculation of genomic matrix. #' Options are: \code{"VanRaden"} and \code{"Yang"} for additive matrix, #' and \code{"Su"} and \code{"Vitezica"} for dominant matrix (default = \code{"VanRaden"}). #' @param na.string A character that is interpreted as missing values (default = \code{"NA"}). #' @param sparseform If \code{TRUE} it generates a matrix in sparse form to be used directly in \pkg{asreml} with #' required attributes (default = \code{FALSE}). #' @param digits Set up the number of digits used to round the output matrix (default = 8). #' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}). #' #' @details #' Note: If data is provided with missing values, it will process calculations #' of relationships on pairwise non-missing data. #' #' It uses function \code{Gmatrix} for calculations #' from R package \pkg{AGHmatrix} (Amadeu \emph{et al.} 2019). #' #' @return A list with one of these two elements: #' \itemize{ #' \item{\code{G}: the \eqn{\boldsymbol{G}} matrix in full form (only if \code{sparseform = FALSE}).} #' \item{\code{G.sparse}: the \eqn{\boldsymbol{G}} matrix in sparse form (only if \code{sparseform = TRUE})}. #' } #' #' @references #' Amadeu, R.R., Cellon, C., Olmstead, J.W., Garcia, A.A.F, Resende, M.F.R. and P.R. Munoz. 2016. #' AGHmatrix: R package to construct relationship matrices for autotetraploid and diploid species: #' A blueberry example. The Plant Genome 9(3). doi: 10.3835/plantgenome2016.01.0009 #' #' @export #' #' @examples #' # Example: Requesting a full matrix by VanRanden. #' #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden")$G #' G[1:5, 1:5] #' #' \donttest{ #' # Example: Requesting a sparse form by VanRanden. #' #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden", sparseform = TRUE)$G.sparse #' head(G) #' head(attr(G, "rowNames")) #' } #' G.matrix <- function(M = NULL, method = "VanRaden", na.string = "NA", sparseform = FALSE, digits = 8, message = TRUE){ use.dll <- FALSE # Check if the class of M is matrix if (is.null(M) || !inherits(M, "matrix")) { stop("M should be a valid object of class matrix.") } M <- as.matrix(M) n <- dim(M)[1] p <- dim(M)[2] if (p < n) {warning('There are more individuals than markers in this data!')} # Check the rownames/colnames of M if(is.null(colnames(M))) { stop('Marker names not assigned to columns of matrix M.') } if(is.null(rownames(M))) { stop('Individual names not assigned to rows of matrix M.') } # Call libraries to obtain G matrix # Using AGHmatrix if (!use.dll) { #options(echo=FALSE,message=FALSE) if (message){ GHAT <- Gmatrix(M, method=method, missingValue=na.string, maf=0) } else { GHAT <- silent_(Gmatrix(M, method=method, missingValue=na.string, maf=0)) } #options(echo=TRUE) # Using our own DLL } else { # # Preparing things for the DLL # for (i in 1:p) { M[,i] <- as.double(M[,i]) } # Coersing M to be double precision # M <- as.matrix(M) # if (method == 'VanRaden') {mm = 2} # if (method == 'Yang') {mm = 1} # if (method == 'Su') {stop('Method not available in DLL.')} # if (method == 'Vitizica') {stop('Method not available in DLL.')} # # dyn.load("ghat4.dll", type='Fortran') # dyn.load("src/ghat4.dll", type='Fortran') # Not sure this is the best way! # GHAT <- base::.Fortran("ghat", molM=M, ghatM=matrix(as.double(0),n,n), # mm=as.integer(2), n=as.integer(n), m=as.integer(p))$ghatM # #dyn.unload("ghat4.dll") } # round GHAT GHAT <- round(GHAT, digits) # Making sure GHAT has the rownames and colnames #GHAT <- as.matrix(GHAT) if (length(rownames(M)) == 0) { rownames(GHAT) <- as.character(1:n) colnames(GHAT) <- as.character(1:n) } else { rownames(GHAT) <- rownames(M) colnames(GHAT) <- rownames(M) } if (isTRUE(sparseform)) { GHAT.sparse <- full2sparse(GHAT) return(list(G.sparse = GHAT.sparse)) } else{ return(list(G = GHAT)) } }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/G_matrix.R
#' Generates the conditional predictions of random effects (BLUPs) #' #' @description #' Predicts random effects values for individuals with unobserved responses (here called \code{x}, #' a vector of length \eqn{nx}) based on known random effect values for individuals with #' observed responses (here called \code{y}, a vector of length \eqn{ny}). This is done using the #' common genomic relationship matrix \eqn{\boldsymbol{G}} for all #' individuals (full matrix of dimension \eqn{(nx + ny) \times (nx + ny)}). #' #' The prediction of unobserved responses will be performed through the #' multivariante Normal conditional distribution. These predictions are identical to #' what would be obtained if the entire set of individuals (\eqn{nx + ny}) were included into a #' GBLUP animal model fit with individuals in the set \code{x} coded as missing. #' #' The user needs to provide the matrix \eqn{\boldsymbol{G}} in full form. #' Individual names (\eqn{nx + ny}) should be assigned to \code{rownames} and \code{colnames}, and these #' can be in any order. If the variance-covariance matrix of the set \code{y} is provided, #' standard errors of random effects in set \code{x} are calculated. #' #' @param G Input of the genomic relationship matrix \eqn{\boldsymbol{G}} in full form #' (of dimension \eqn{(nx + ny) \times (nx + ny)}) (default = \code{NULL}). #' @param gy Input of random effects (\emph{e.g.} breeding values) for individuals with known values. #' Individual names should be assigned to \code{rownames} of this vector #' and be found on the matrix \eqn{\boldsymbol{G}} (default = \code{NULL}). #' @param vcov.gy The variance-covariance matrix associated with the random effects from the #' individuals with known values (set \code{y}, of dimension \eqn{ny \times ny}) #' (default = \code{NULL}). #' #' @return A data frame with the predicted random effect values for individuals with #' unobserved responses in the set \code{x}. If the variance-covariance matrix is provided, #' standard errors are included in an additional column. #' #' @export #' #' @examples #' \dontrun{ #' library(asreml) # Load asreml. #' #' # Example: Apple data creating 100 missing observations. #' #' # Prepare G (nx + ny). #' G <- G.matrix(M = geno.apple, method = "VanRaden", sparseform = FALSE)$G #' dim(G) #' #' # Prepare subset of data. #' # Select only 147 from 247 individuals from pheno.apple and geno.apple. #' Gy <- G[1:147, 1:147] #' phenoy <- pheno.apple[1:147, ] #' #' # Obtain the BLUPs for the 147 individuals using ASReml-R. #' #' # Blend Gy. #' Gyb <- G.tuneup(G = Gy, blend = TRUE, pblend = 0.02)$Gb #' #' # Get the Gy inverse #' Gyinv <- G.inverse(G = Gyb, sparseform = TRUE)$Ginv.sparse #' #' # Fit a GBLUP model #' phenoy$INDIV <- as.factor(phenoy$INDIV) #' modelGBLUP <- #' asreml( #' fixed = JUI_MOT ~ 1, #' random = ~vm(INDIV, Gyinv), #' workspace = 128e06, #' data = phenoy) #' #' # Obtain Predictions - BLUP (set y). #' BLUP <- summary(modelGBLUP,coef = TRUE)$coef.random #' head(BLUP) #' gy <- as.matrix(BLUP[, 1]) #' rownames(gy) <- phenoy$INDIV #' #' # Ready to make conditional predictions. #' g.cond <- G.predict(G = G, gy = gy) #' head(g.cond) #' } #' G.predict <- function(G = NULL, gy = NULL, vcov.gy = NULL){ # Check if the class of G is matrix if (is.null(G) || !inherits(G, "matrix")) { stop("G should be a valid object of class matrix.") } if (is.null(rownames(G))){ stop("Individual names not assigned to rows of matrix G.") } if (is.null(colnames(G))){ stop("Individual names not assigned to columns of matrix G.") } if ((identical(rownames(G), colnames(G))) == FALSE){ stop("Rownames and colnames of matrix G do not match.") } # Check if the class of gy is matrix if (is.null(gy) || !inherits(gy, "matrix")) { stop("gy should be a valid object of class matrix.") } if (is.null(rownames(gy))){ stop("Individual names not assigned to rows of gy.") } # Check for consistency between G and resp resp <- gy respind <- rownames(resp) Gind <- rownames(G) yidx <- which(Gind %in% respind == TRUE) xidx <- which(Gind %in% respind == FALSE) if (length(xidx) == 0) { stop("Individuals in G are the same as those in the vector gy, nothing to predict.") } # Creating indexes G_yy <- G[yidx, yidx] G_xy <- G[xidx, yidx] # Obtain inverse of G_yy G_yy.inv <- G.inverse(G=G_yy, bend=FALSE, blend=FALSE, sparseform=FALSE, message=FALSE) if (G_yy.inv$status == 'ill-conditioned') { stop("Inverse of matrix G is ill-conditioned, use G.tuneup and provide G.") } else { G_yy.inv <- G_yy.inv$Ginv } # Obtain predictions beta <- G_xy %*% G_yy.inv pred <- beta %*% resp pred <- data.frame(predicted.value=pred) # Obtaining vcov(preds) if (!is.null(vcov.gy)) { # Check if the class of Kinv is matrix if (is.null(vcov.gy) || !inherits(vcov.gy, "matrix")) { stop("Input vcov.resp should be a valid object of class matrix.") } if (is.null(rownames(vcov.gy))){ stop("Individual names not assigned to rows of matrix vcov.gy.") } if (is.null(colnames(vcov.gy))){ stop("Individual names not assigned to columns of matrix vcov.gy.") } if ((identical(row.names(vcov.gy), colnames(vcov.gy))) == FALSE){ stop("Rownames and colnames of matrix vcov.gy do not match.") } vcov.pred <- beta %*% vcov.gy %*% t(beta) st.pred <- sqrt(diag(vcov.pred)) pred <- data.frame(predicted.value=pred, std.error=st.pred) } return(pred=pred) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/G_predict.R
#' Tune-up the the genomic relationship matrix G #' #' @description #' Generates a new matrix that can be \emph{blended}, \emph{bended} or \emph{aligned} #' in order to make it stable for future use or matrix inversion. The input matrix should #' be of the full form (\eqn{n \times n}) with individual names assigned to #' \code{rownames} and \code{colnames}. #' #' This routine provides three options of tune-up: #' \itemize{ #' \item{\emph{Blend}. The \eqn{\boldsymbol{G}} matrix is blended (or averaged) #' with another matrix. \eqn{\boldsymbol{G}^\ast=(1-p) \boldsymbol{G} + p #' \boldsymbol{A}}, where \eqn{\boldsymbol{G}^\ast} is the blended matrix, #' \eqn{\boldsymbol{G}} is the original matrix, \eqn{p} is the proportion of #' the identity matrix or pedigree-based relationship matrix to consider, and #' \eqn{\boldsymbol{A}} is the matrix to blend. Ideally, the pedigree-based #' relationship matrix should be used, but if this is not available (or it is #' of poor quality), then it is replaced by an identity matrix #' \eqn{\boldsymbol{I}}.} #' \item{\emph{Bend}. It consists on adjusting the original #' \eqn{\boldsymbol{G}} matrix to obtain a near positive definite matrix, which #' is done by making all negative or very small eigenvalues slightly positive.} #' \item{\emph{Align}. The original \eqn{\boldsymbol{G}} matrix is aligned to #' the matching pedigree relationship matrix \eqn{\boldsymbol{A}} where an #' \eqn{\alpha} and \eqn{\beta} parameters are obtained. More information can #' be found in the manual or in Christensen \emph{et al.} (2012).} #' } #' #' The user should provide the matrices \eqn{\boldsymbol{G}} and #' \eqn{\boldsymbol{A}} in full form (\eqn{n \times n}) #' and matching individual names should be #' assigned to the \code{rownames} and \code{colnames} of the matrices. #' #' Based on procedures published by Nazarian and Gezan \emph{et al.} (2016). #' #' @param G Input of the genomic matrix \eqn{\boldsymbol{G}} to tune-up #' in full form (\eqn{n \times n}) (default = \code{NULL}). #' @param A Input of the matching pedigree relationship matrix \eqn{\boldsymbol{A}} #' in full form (\eqn{n \times n}) (default = \code{NULL}). #' @param blend If \code{TRUE} a \emph{blending} with identity matrix \eqn{\boldsymbol{I}} or pedigree relationship matrix #' \eqn{\boldsymbol{A}} (if provided) is performed (default = \code{FALSE}). #' @param pblend If blending is requested this is the proportion of the identity matrix \eqn{\boldsymbol{I}} or #' pedigree-based relationship matrix \eqn{\boldsymbol{A}} to blend for (default = \code{0.02}). #' @param bend If \code{TRUE} a \emph{bending} is performed by making the matrix near positive definite (default = \code{FALSE}). #' @param eig.tol Defines relative positiveness (\emph{i.e.}, non-zero) of eigenvalues compared to the largest one. #' It determines which threshold of eigenvalues will be treated as zero (default = \code{1e-06}). #' @param align If \code{TRUE} the genomic matrix \eqn{\boldsymbol{G}} is \emph{aligned} to the #' matching pedigree relationship matrix \eqn{\boldsymbol{A}} (default = \code{FALSE}). #' @param rcn If \code{TRUE} the reciprocal conditional number of the original and the bended, #' blended or aligned matrix will be calculated (default = \code{TRUE}). #' @param digits Set up the number of digits used to round the output matrix (default = \code{8}). #' @param sparseform If \code{TRUE} it generates an inverse matrix in sparse form to be used directly in \pkg{asreml} with #' required attributes (default = \code{FALSE}). #' @param determinant If \code{TRUE} the determinant will be calculated, otherwise, this is obtained for #' matrices of a dimension of less than 1,500 \eqn{\times} 1,500 (default = \code{TRUE}). #' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}). #' #' @return A list with six of the following elements: #' \itemize{ #' \item{\code{Gb}: the inverse of \eqn{\boldsymbol{G}} matrix in full form #' (only if sparseform = \code{FALSE}).} #' \item{\code{Gb.sparse}: if requested, the inverse of \eqn{\boldsymbol{G}} matrix in #' sparse form (only if sparseform = \code{TRUE}).} #' \item{\code{rcn0}: the reciprocal conditional number of the original matrix. #' Values near zero are associated with an ill-conditioned matrix.} #' \item{\code{rcnb}: the reciprocal conditional number of the blended, bended or aligned #' matrix. Values near zero are associated with an ill-conditioned matrix.} #' \item{\code{det0}: if requested, the determinant of the original matrix.} #' \item{\code{blend}: if the matrix was \emph{blended}.} #' \item{\code{bend}: if the matrix was \emph{bended}.} #' \item{\code{align}: if the matrix was \emph{aligned}.} #' } #' #' @references #' Christensen, O.F., Madsen, P., Nielsen, B., Ostersen, T. and Su, G. 2012. #' Single-step methods for genomic evaluation in pigs. Animal 6:1565-1571. #' doi:10.1017/S1751731112000742. #' #' Nazarian A., Gezan S.A. 2016. GenoMatrix: A software package for pedigree-based #' and genomic prediction analyses on complex traits. Journal of Heredity 107:372-379. #' #' @export #' #' @examples #' # Example: Apple dataset. #' #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden")$G #' G[1:5, 1:5] #' #' # Blend G matrix. #' G_blended <- G.tuneup(G = G, blend = TRUE, pblend = 0.05) #' G_blended$Gb[1:5, 1:5] #' #' # Bend G matrix. #' G_bended <- G.tuneup(G = G, bend = TRUE, eig.tol = 1e-03) #' G_bended$Gb[1:5, 1:5] #' #' \donttest{ #' # Example: Loblolly Pine dataset with pedigree - Aligned G matrix. #' #' A <- AGHmatrix::Amatrix(ped.pine) #' dim(A) #' #' # Read and filter genotypic data. #' M.clean <- qc.filtering( #' M = geno.pine655, #' maf = 0.05, #' marker.callrate = 0.2, ind.callrate = 0.20, #' na.string = "-9")$M.clean #' #' # Get G matrix. #' G <- G.matrix(M = M.clean, method = "VanRaden", na.string = "-9")$G #' G[1:5, 1:5] #' dim(G) #' #' # Match G and A. #' Aclean <- match.G2A(A = A, G = G, clean = TRUE, ord = TRUE, mism = TRUE)$Aclean #' #' # Align G with A. #' G_align <- G.tuneup(G = G, A = Aclean, align = TRUE) #' G_align$Gb[1:5, 1:5] #' } #' G.tuneup <- function(G = NULL, A = NULL, blend = FALSE, pblend = 0.02, bend = FALSE, eig.tol = 1e-06, align = FALSE, rcn = TRUE, digits = 8, sparseform = FALSE, determinant = TRUE, message = TRUE){ # Check if the class of G is matrix if (is.null(G) || !inherits(G, "matrix")) { stop('G should be a valid object of class matrix.') } # Check the rownames/colnames of G if (is.null(rownames(G))){ stop('Individual names not assigned to rows of matrix G.') } if (is.null(colnames(G))){ stop('Individual names not assigned to columns of matrix G.') } if ((identical(rownames(G), colnames(G))) == FALSE){ stop("Rownames and colnames of matrix G do not match.") } # Checks on other parameters if (pblend < 0 | pblend > 1) { stop("Specification of pblend must be between 0 and 1.") } if (eig.tol <= 0) { stop("Value for eig.tol must be positive.") } # Check option parameters if (isTRUE(blend) & isTRUE(bend) & isTRUE(align)){ stop('More than one option (blend, bend or align) was requested. Choose only one of them') } if (!isTRUE(blend) & !isTRUE(bend) & !isTRUE(align)){ stop('No option (blend, bend or aling) were requested.') } n <- dim(G)[1] p <- dim(G)[2] # #G <- as.matrix(forceSymmetric(G)) #Force a square matrix to be a symmetric Matrix # if (p != n) { stop('Matrix G is not a square matrix.') } if (!is.null(A)) { # Check if the class of A is matrix if (!inherits(A, "matrix")) { stop('A should be a valid object of class matrix.') } if (is.null(rownames(A))){ stop('Individual names not assigned to rows of matrix A.') } if (is.null(colnames(A))){ stop('Individual names not assigned to columns of matrix A.') } nA <- dim(A)[1] pA <- dim(A)[2] #A <- as.matrix(forceSymmetric(A)) #Force a square matrix to be a symmetric Matrix if (pA != nA) { stop('Matrix A is not a square matrix.') } if (p != pA | n != nA) { warning('Matrix A and G are not of the same dimensions. Use match_G2A() to obtain subset of matched genotypes.') } } # Reciprocal Condition Number RCN if (isTRUE(rcn)){ rcn0 <- rcond(G) if (message) { message('Reciprocal conditional number for original matrix is: ', rcn0) } } else { rcn0 <- NULL} # Checking for determinant if (n > 1500) { if (!determinant) { if (message) { message('Determinant is not calculated as matrix is too large.') } detG <- NULL } else { detG <- det(G) if (message) { message('Determinant for original matrix is: ', detG) } } } else { detG <- det(G) if (message) { message('Determinant for original matrix is: ', detG) } } # Performing blend with I if requested if (isTRUE(blend)){ if (is.null(A)) { Gb <- (1-pblend)*G + pblend*diag(x=1, nrow=n, ncol=n) if (message) { message('Matrix was BLENDED using an identity matrix.') } } # Performing blend with A if requested if (!is.null(A)) { if (p != pA | n != nA) { stop('Matrix A and G are not of the same dimension.') } if ( sum(rownames(G) == rownames(A)) < n) { warning('Names of rows/columns do not match between G and A matrix.') stop('You should use the function match.G2A to match these two matrices.') } if ( sum(rownames(G) == rownames(A)) == n) { Gb <- (1-pblend)*G + pblend*A if (message) { message('Matrix was BLENDED using the provided A matrix.') } } } } # Obtaining the near positive definite matrix (bend) if (isTRUE(bend)) { Gb <- as.matrix(Matrix::nearPD(G, corr=FALSE, keepDiag=FALSE, do2eigen=TRUE, doSym=TRUE, doDykstra=TRUE, only.values=FALSE, eig.tol=eig.tol, conv.tol=1e-07, posd.tol=1e-02, maxit=100, conv.norm.type="I", trace=FALSE)$mat) if (message) { message('Matrix was BENDED.') } } # Performing alignment with A matrix (align) if (isTRUE(align)) { if (is.null(A)) { stop('A matrix needs to be provided for align be performed.')} Xm <- cbind(c(1,1),c(mean(diag(G)),mean(G))) y <- as.matrix(c(mean(diag(A)),mean(A))) beta <- solve(Xm) %*% y Gb <- beta[1] + beta[2]*G if (message) { message('Matrix was ALIGNED.') } } # Rounding Gb Gb <- round(Gb, digits) ### Need to make sure rownames(Gb) and colnames(Gb) are correct # Reciprocal Condition Number RCN if (isTRUE(rcn)){ rcnb <- rcond(Gb) if (message) { message('Reciprocal conditional number for tune-up matrix is: ', rcnb) } } else { rcnb <- NULL} # Obtaining Matrix in Sparse Form if requested (ready for ASReml-R v.4) if (isTRUE(sparseform)) { Gb.sparse <- full2sparse(Gb) return(list(Gb.sparse=Gb.sparse, rcn0=rcn, det0=detG, rcnb=rcnb, blend=blend, bend=bend, align=align)) } else { return(list(Gb=Gb, rcn0=rcn0, det0=detG, rcnb=rcnb, blend=blend, bend=bend, align=align)) } }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/G_tuneup.R
#' Generates the inverse of the hybrid H matrix #' #' The single-step GBLUP approach combines the information from the pedigree relationship matrix #' \eqn{\boldsymbol{A}} and the genomic relationship matrix \eqn{\boldsymbol{G}} in one #' hybrid relationship matrix called \eqn{\boldsymbol{H}}. #' This function will calculate directly the inverse of this matrix \eqn{\boldsymbol{H}}. #' The user should provide the matrices \eqn{\boldsymbol{A}} or #' its inverse (only one of these is required) and the #' inverse of the matrix \eqn{\boldsymbol{G}} (\eqn{\boldsymbol{G_{inv}}}) in its full form. Individual names should #' be assigned to \code{rownames} and \code{colnames}, and individuals from #' \eqn{\boldsymbol{G_{inv}}} are verified to be all a subset within individuals from #' \eqn{\boldsymbol{A}} (or \eqn{\boldsymbol{A_{inv}}}). #' #' @param A Input of the pedigree relationship matrix \eqn{\boldsymbol{A}} #' in full form (\eqn{na \times na}) (default = \code{NULL}). #' @param Ainv Input of the inverse of the pedigree relationship matrix #' \eqn{\boldsymbol{A}^{-1}} in full form (\eqn{na \times na}) (default = \code{NULL}). #' @param Ginv Input of the inverse of the genomic relationship matrix #' \eqn{\boldsymbol{G}^{-1}} in full form (\eqn{ng \times ng}) (default = \code{NULL}). #' @param lambda The scaling factor for \eqn{(\boldsymbol{G}^{-1}-\boldsymbol{A}^{-1}_{22})} (default = \code{NULL}). #' @param tau The scaling factor for \eqn{\boldsymbol{G}^{-1}} (default = \code{1}). #' @param omega The scaling factor for \eqn{\boldsymbol{A}^{-1}_{22}} (default = \code{1}). #' @param sparseform If \code{TRUE} it generates the requested matrix in sparse form to be used #' directly in \pkg{asreml} with required attributes (default = \code{FALSE}). #' @param keep.order If \code{TRUE} the original order of the individuals from the #' \eqn{\boldsymbol{A}} or \eqn{\boldsymbol{A_{inv}}} matrix is kept. #' Otherwise the non-genotyped individuals are placed first and #' then genotyped individuals (default = \code{TRUE}). #' @param digits Set up the number of digits used to round the output matrix (default = \code{8}). #' @param inverse If \code{TRUE} it generates the inverse of \eqn{\boldsymbol{H}} matrix (default = \code{TRUE}) #' (to be deprecated). #' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}). #' #' @return #' The inverse of the hybrid matrix \eqn{\boldsymbol{H}} matrix, in full or sparse form with #' required attributes to be used in \pkg{asreml}. #' #' @details #' The generation of the \eqn{\boldsymbol{H^{-1}}} matrix contains a few scaling factors #' to help with the calculation of this inverse and #' to allow further exploration of the combination of the #' information from the \eqn{\boldsymbol{A^{-1}}} and \eqn{\boldsymbol{G^{-1}}}. #' We follow the specifications described by Martini \emph{et. al} (2018), #' which is done by specifying the parameters \eqn{\lambda}, or the pair #' \eqn{\tau} and \eqn{\omega}. #' #' \if{html}{ #' The general expression used is: #' \deqn{\boldsymbol{H^{-1}}=\boldsymbol{A^{-1}}+\begin{bmatrix}\boldsymbol{0}&\boldsymbol{0}\\\boldsymbol{0}&(\tau\boldsymbol{G^{-1}}-\omega\boldsymbol{{A_{22}^{-1}}})\end{bmatrix}} #' } #' #' \if{html}{ #' and a more common representation of the above expression is found when \eqn{\tau = \omega = \lambda}, as shown below: #' \deqn{\boldsymbol{H^{-1}}=\boldsymbol{A^{-1}}+\begin{bmatrix}\boldsymbol{0}&\boldsymbol{0}\\\boldsymbol{0}&\lambda(\boldsymbol{G^{-1}}-\boldsymbol{{A_{22}^{-1}}})\end{bmatrix}} #' } #' #' If \code{inverse = FALSE} the \eqn{\boldsymbol{H}} #' matrix is provided instead of its inverse. This option will be deprecated and #' it is better to use the function \link{H.matrix}. #' #' \if{html}{ #' The \eqn{\boldsymbol{H}} matrix is obtained with the following equations: #' \deqn{\boldsymbol{H}=\boldsymbol{A}+\begin{bmatrix}\boldsymbol{A}_{12}\boldsymbol{A}_{22}^{-1}(\boldsymbol{G}-\boldsymbol{A}_{22})\boldsymbol{A}_{22}^{-1}\boldsymbol{A}_{21}&\boldsymbol{A}_{12}\boldsymbol{A}_{22}^{-1}(\boldsymbol{G}-\boldsymbol{A}_{22})\\(\boldsymbol{G}-\boldsymbol{A}_{22})\boldsymbol{A}_{22}^{-1}\boldsymbol{A}_{21}&(\boldsymbol{G}-\boldsymbol{A}_{22})\end{bmatrix}} #' } #' #' @references #' Christensen, O.F., Lund, M.S. 2010. Genomic prediction matrix when some animals #' are not genotyped. Gen. Sel. Evol. 42(2):1–8. #' #' Christensen, O., Madsen, P., Nielsen, B., Ostersen, T., and Su, G. 2012. Single-step methods #' for genomic evaluation in pigs. Animal 6(10):1565–1571. #' #' Legarra, A., Aguilar, I., and Misztal, I. 2009. A relationship matrix including full #' pedigree and genomic information. J. Dairy Sci. 92:4656-4663. #' #' Martini, J.W.R., Schrauf, M.F., Garcia-Baccino, C.A., Pimentel, E.C.G., Munilla, S., #' Rogberg-Muñoz, A., Cantet, R.J.C., Reimer, C., Gao, N., Wimmer, V., and Simianer, H. 2018. #' The effect of the \eqn{H^{-1}} scaling factors \eqn{\tau} and \eqn{\omega} #' on the structure of \eqn{H} in the single-step procedure. #' Genet. Sel. Evol. 50:1-9. #' #' @export #' #' @examples #' \donttest{ #' # Get A matrix. #' A <- AGHmatrix::Amatrix(data = ped.pine) #' A[1:5,1:5] #' dim(A) #' #' # Read and filter genotypic data. #' M.clean <- qc.filtering( #' M = geno.pine655, #' maf = 0.05, #' marker.callrate = 0.2, ind.callrate = 0.20, #' na.string = "-9", #' plots = FALSE)$M.clean #' #' # Get G matrix. #' G <- G.matrix(M.clean, method = "VanRaden", na.string = "-9")$G #' G[1:5, 1:5] #' dim(G) #' #' # Match G and A. #' check <- match.G2A( #' A = A, G = G, #' clean = TRUE, ord = TRUE, mism = TRUE, RMdiff = TRUE) #' #' # Align G matrix with A. #' G_align <- G.tuneup(G = check$Gclean, A = check$Aclean, align = TRUE, sparseform = FALSE)$Gb #' #' # Get Ginverse using the G aligned. #' Ginv <- G.inverse(G = G_align, sparseform = FALSE)$Ginv #' Ginv[1:5, 1:5] #' dim(Ginv) #' #' # Obtain Hinv. #' Hinv <- H.inverse(A = A, Ginv = Ginv, lambda = 0.90, sparseform = TRUE) #' head(Hinv) #' attr(Hinv, "INVERSE") #' } #' H.inverse <- function(A = NULL, Ainv = NULL, Ginv = NULL, lambda = NULL,tau = 1, omega = 1, sparseform = FALSE, keep.order = TRUE, digits = 8, inverse = TRUE, message = TRUE){ # Check if we have lambda and make some checks if(!is.null(lambda)){ tau <- lambda omega <- lambda if(message){ message("A lambda value was provided and it will be used instead of tau and omega.") } if (lambda < 0 | lambda > 1) { stop("Value of lambda must be between 0 and 1.") } }else{ if(message){ message("No lambda value was provided, tau and omega scaling factors will be considered.") } } # Checks on some input parameters if (tau < 0 | tau > 2) { stop("Value of tau must be between 0 and 1.") } if (omega < -1 | omega > 1) { stop("Value of omega must be between -1 and 1.") } if (tau == 0 & omega == 0) { stop("Values of tau and omega can not be both equal to zero.") } # We need A or Ainv if (is.null(A) & is.null(Ainv)) { stop('A or Ainv needs to be provided.') } if (!is.null(Ainv)) { # We have Ainv and get A if (!inherits(Ainv, "matrix")) { stop("Ainv should be a valid object of class matrix.") } if (is.null(rownames(Ainv))){ stop("Individual names not assigned to rows of matrix Ainv.") } if (is.null(colnames(Ainv))){ stop("Individual names not assigned to columns of matrix Ainv.") } if ((identical(rownames(Ainv), colnames(Ainv))) == FALSE){ stop("Rownames and colnames of matrix Ainv do not match.") } crit<-sum(diag(Ainv)>=2) if (crit == 0) { warning('It seems like an A matrix was provided not an Ainv matrix.') } A <- G.inverse(G=Ainv, blend=FALSE, bend=FALSE, message = FALSE)$Ginv attributes(A)$INVERSE <- NULL } else { # Then we have A and we get Ainv to use if (!inherits(A, "matrix")) { stop("A should be a valid object of class matrix.") } if (is.null(rownames(A))){ stop("Individual names not assigned to rows of matrix A.") } if (is.null(colnames(A))){ stop("Individual names not assigned to columns of matrix A.") } if ((identical(rownames(A), colnames(A))) == FALSE){ stop("Rownames and colnames of matrix A do not match.") } crit<-sum(diag(A)>2) if (crit >0) { warning('It seems like an Ainv matrix was provided not an A matrix.') } Ainv <- G.inverse(G=A, blend=FALSE, bend=FALSE, message = FALSE)$Ginv } # We need Ginv # Check components of Ginv if (is.null(Ginv) || !inherits(Ginv, "matrix")) { stop("Ginv should be a valid object of class matrix.") } if (is.null(rownames(Ginv))){ stop("Individual names not assigned to rows of matrix Ginv.") } if (is.null(colnames(Ginv))){ stop("Individual names not assigned to columns of matrix Ginv.") } if ((identical(rownames(Ginv), colnames(Ginv))) == FALSE){ stop("Rownames and colnames of matrix Ginv do not match.") } # Check for consistency between Ainv and Ginv Aind <- rownames(Ainv) Gind <- rownames(Ginv) notGenotyped <- which((Aind %in% Gind) == FALSE) # which ind has Ainv but not Ginv notPedigree <- which(Gind %in% Aind == FALSE) # which ind has Ginv but no Ainv # If Ginv has different ind than Ainv, stop here. if (length(notPedigree) > 0) { stop("Matrix Ginv has ", length(notGenotyped), " individuals that are not present in A or Ainv.") } # Check if Ainv has different ind than Ginv if (length(notGenotyped) > 0 & message){ message("Matrix A or Ainv has ", length(notGenotyped), " individuals that are not present in Ginv.") } if (length(notGenotyped) < 0 & message){ message("Matrix A or Ainv has all individuals that are present in Ginv.") } # Creating indexes (sorted matrices) idA <- rownames(Ainv) idG <- rownames(Ginv) idH <- unique(c(idG,idA)) idH <- rev(idH) A <- A[idH,idH] Ainv <- Ainv[idH,idH] genotyped <- idH %in% idG == TRUE A11inv <- Ainv[!genotyped, !genotyped] A12inv <- Ainv[!genotyped, genotyped] A22inv <- solve(A[genotyped,genotyped]) Ginv <- Ginv[idH[genotyped],idH[genotyped]] # Making sure both Ginv and A22inv agree on sorted individuals if (all(rownames(Ginv) == row.names(A22inv)) == FALSE) { # Then we have a problem stop('Order of matrix Ginv does not match subset of A or Ainv.') } # Traditional method using Ginv & Ainv to obtain Hinv directly. if (isTRUE(inverse)){ # Obtaining Hinv H11inv <- matrix(0, nrow=dim(A11inv)[1], ncol=dim(A11inv)[2] ) H12inv <- matrix(0, nrow=dim(A12inv)[1], ncol=dim(A12inv)[2] ) H22inv <- tau*Ginv - omega*A22inv Hinv <- Ainv + cbind(rbind(H11inv, t(H12inv)), rbind(H12inv, H22inv)) # round Hinv Hinv <- round(Hinv, digits) # Same order as Ainverse (if requested) if (keep.order) { Hinv <- Hinv[match(Aind, rownames(Hinv)), match(Aind, rownames(Hinv))] } attr(Hinv, "INVERSE") <- TRUE # Generating sparseform matrix if (sparseform) { Hinv <- full2sparse(Hinv) attr(Hinv, "INVERSE") <- TRUE } return(Hinv=Hinv) } else { A11 <- A[!genotyped, !genotyped] A12 <- A[!genotyped, genotyped] A21 <- t(A12) A22 <- A[genotyped,genotyped] #H22 = solve((tau * Ginv + (1 - omega) * A22inv)) H22 <- G.inverse((tau * Ginv + (1 - omega) * A22inv), message = FALSE) if (H22$status == 'ill-conditioned') { stop("Matrix H is ill-conditioned, try different scaling factors.") } else { H22 <- H22$Ginv } # Old repetitive code. # H11 = A12 %*% A22inv %*% (H22 - A22) %*% A22inv %*% A21 # H12 = A12 %*% A22inv %*% (H22 - A22) # H21 = (H22 - A22) %*% A22inv %*% A21 # H22 = (H22 - A22) # Pre-calculated matrices. A22A21 <- A22inv %*% A21 # Finalize multiplications. H22 <- (H22 - A22) H12 <- A12 %*% A22inv %*% H22 H11 <- H12 %*% A22A21 H21 <- H22 %*% A22A21 # Bind matrices. H <- A + cbind(rbind(H11, H21), rbind(H12, H22)) # round H H <- round(H, digits) # Same order as A (if requested) if (keep.order) { H <- H[match(Aind, rownames(H)), match(Aind, rownames(H))] } # Generating sparseform matrix if (sparseform) { H <- full2sparse(H) attr(H, "INVERSE") <- FALSE } return(H=H) } }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/H_Inverse.R
#' Generates the hybrid \eqn{H} matrix #' #' The single-step GBLUP approach combines the information from the pedigree relationship matrix #' \eqn{\boldsymbol{A}} and the genomic relationship matrix \eqn{\boldsymbol{G}} in one #' hybrid relationship matrix called \eqn{\boldsymbol{H}}. #' This function will calculate directly this matrix \eqn{\boldsymbol{H}}. #' The user should provide the matrices \eqn{\boldsymbol{A}} or #' its inverse (only one of these is required) and the #' inverse of the matrix \eqn{\boldsymbol{G}} (\eqn{\boldsymbol{G_{inv}}}) in its full form. #' Individual names should #' be assigned to \code{rownames} and \code{colnames}, and individuals from #' \eqn{\boldsymbol{G_{inv}}} are verified to be all a subset within individuals from #' \eqn{\boldsymbol{A}} (or \eqn{\boldsymbol{A_{inv}}}). #' This function is a wrapper of the \link{H.inverse} function. #' #' @param A Input of the pedigree relationship matrix \eqn{\boldsymbol{A}} #' in full form (\eqn{na \times na}) (default = \code{NULL}). #' @param Ainv Input of the inverse of the pedigree relationship matrix #' \eqn{\boldsymbol{A}^{-1}} in full form (\eqn{na \times na}) (default = \code{NULL}). #' @param Ginv Input of the inverse of the genomic relationship matrix #' \eqn{\boldsymbol{G}^{-1}} in full form (\eqn{ng \times ng}) (default = \code{NULL}). #' @param lambda The scaling factor for \eqn{(\boldsymbol{G}^{-1}-\boldsymbol{A}^{-1}_{22})} (default = \code{NULL}). #' @param tau The scaling factor for \eqn{\boldsymbol{G}^{-1}} (default = \code{1}). #' @param omega The scaling factor for \eqn{\boldsymbol{A}^{-1}_{22}} (default = \code{1}). #' @param sparseform If \code{TRUE} it generates the requested matrix in sparse form to be used #' directly in \pkg{asreml} with required attributes (default = \code{FALSE}). #' @param keep.order If \code{TRUE} the original order of the individuals from the #' \eqn{\boldsymbol{A}} or \eqn{\boldsymbol{A_{inv}}} matrix is kept. #' Otherwise the non-genotyped individuals are placed first and #' then genotyped individuals (default = \code{TRUE}). #' @param digits Set up the number of digits used to round the output matrix (default = \code{8}). #' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}). #' #' @return #' The hybrid matrix \eqn{\boldsymbol{H}} matrix, in full or sparse form. #' #' @md #' @details #' This function is currently equivalent to using \link{H.inverse} with (\code{inverse = FALSE}). #' #' \if{html}{ #' The \eqn{\boldsymbol{H}} matrix is obtained with the following equations: #' \deqn{\boldsymbol{H}=\boldsymbol{A}+\begin{bmatrix}\boldsymbol{A}_{12}\boldsymbol{A}_{22}^{-1}(\boldsymbol{G}-\boldsymbol{A}_{22})\boldsymbol{A}_{22}^{-1}\boldsymbol{A}_{21}&\boldsymbol{A}_{12}\boldsymbol{A}_{22}^{-1}(\boldsymbol{G}-\boldsymbol{A}_{22})\\(\boldsymbol{G}-\boldsymbol{A}_{22})\boldsymbol{A}_{22}^{-1}\boldsymbol{A}_{21}&(\boldsymbol{G}-\boldsymbol{A}_{22})\end{bmatrix}} #' } #' #' #' @references #' Christensen, O.F., Lund, M.S. 2010. Genomic prediction matrix when some animals #' are not genotyped. Gen. Sel. Evol. 42(2):1–8. #' #' Christensen, O., Madsen, P., Nielsen, B., Ostersen, T., and Su, G. 2012. Single-step methods #' for genomic evaluation in pigs. Animal 6(10):1565–1571. #' #' Legarra, A., Aguilar, I., and Misztal, I. 2009. A relationship matrix including full #' pedigree and genomic information. J. Dairy Sci. 92:4656-4663. #' #' Martini, J.W.R., Schrauf, M.F., Garcia-Baccino, C.A., Pimentel, E.C.G., Munilla, S., #' Rogberg-Muñoz, A., Cantet, R.J.C., Reimer, C., Gao, N., Wimmer, V., and Simianer, H. 2018. #' The effect of the \eqn{H^{-1}} scaling factors \eqn{\tau} and \eqn{\omega} #' on the structure of \eqn{H} in the single-step procedure. #' Genet. Sel. Evol. 50:1-9. #' #' @export #' #' @examples #' \donttest{ #' # Get A matrix. #' A <- AGHmatrix::Amatrix(data = ped.pine) #' A[1:5,1:5] #' dim(A) #' #' # Read and filter genotypic data. #' M.clean <- qc.filtering( #' M = geno.pine655, #' maf = 0.05, #' marker.callrate = 0.2, ind.callrate = 0.20, #' na.string = "-9", #' plots = FALSE)$M.clean #' #' # Get G matrix. #' G <- G.matrix(M = M.clean, method = "VanRaden", na.string = "-9")$G #' G[1:5, 1:5] #' dim(G) #' #' # Match G2A. #' check <- match.G2A( #' A = A, G = G, #' clean = TRUE, ord = TRUE, mism = TRUE, RMdiff = TRUE) #' #' # Align G matrix with A. #' G_align <- G.tuneup(G = check$Gclean, A = check$Aclean, align = TRUE, sparseform = FALSE)$Gb #' #' # Get Ginverse using the aligned G. #' Ginv <- G.inverse(G = G_align, sparseform = FALSE)$Ginv #' Ginv[1:5, 1:5] #' dim(Ginv) #' #' # Obtaining H. #' H <- H.matrix(A = A, G = Ginv, lambda = 0.90, sparseform = FALSE) #' H[1:5, 1:5] #' } #' H.matrix <- function(A = NULL, Ainv = NULL, Ginv = NULL, lambda = NULL,tau=1, omega = 1, sparseform = FALSE, keep.order = TRUE, digits = 8, message = TRUE){ return( H.inverse(A = A, Ainv = Ainv, Ginv = Ginv, lambda = lambda,tau = tau, omega = omega, sparseform = sparseform, keep.order = keep.order, digits = digits, message = message, inverse = FALSE # This is the only one locked. ) ) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/H_matrix.R
#' Enhanced heatmap plot for a kinship matrix K #' #' Generates a heatmap with dendrogram based on a provided kinship matrix. #' This matrix can be a pedigree relationship matrix \eqn{\boldsymbol{A}}, a #' genomic relationship matrix \eqn{\boldsymbol{G}} or a hybrid relationship #' matrix \eqn{\boldsymbol{H}}. #' Individual names should be assigned to \code{rownames} and \code{colnames}. #' It sorts individuals according to dendrogram in both columns and rows. #' #' Uses the library \code{superheat} from Barter and Yu (2018) to generate plots. #' #' @param K Input of a kinship matrix in full format (\eqn{n \times n}) (default = \code{NULL}). #' @param dendrogram If \code{TRUE} a dendrogram is added to the columns based on the #' kinship matrix (default = \code{TRUE}). #' @param clustering.method The clustering method considered for the dendrogram. #' Options are: \code{"hierarchical"} and \code{"kmeans"} (default = \code{"hierarchical"}). #' @param dist.method The method considered to calculate the distance matrix between #' individuals used for hierarchical clustering. Options are: \code{"euclidean"}, #' \code{"maximum"}, \code{"manhattan"}, \code{"canberra"}, \code{"binary"} and #' \code{"minkowski"} (default = \code{"euclidean"}). #' @param row.label If \code{TRUE} the individual names (\code{rownames}) are added as labels to #' the left of the heatmap (default = \code{TRUE}). #' @param col.label If \code{TRUE} the individual names (\code{colnames}) are added as labels to #' the bottom of the heatmap (default = \code{FALSE}). #' #' @return #' A plot with the properties specified by the above arguments. #' #' @references #' Barter, R.L. and Yu, B. 2018. Superheat: An R package for creating beautiful #' and extendable heatmaps for visualizing complex data. #' J. Comput. Graph. Stat. 27(4):910-922. #' #' @export #' #' @examples #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden")$G #' G[1:5, 1:5] #' #' # Plot a subset of the individuals. #' kinship.heatmap(K = G[1:10, 1:10], dendrogram = TRUE, row.label = TRUE, col.label = TRUE) #' kinship.heatmap <- function(K = NULL, dendrogram = TRUE, clustering.method = c("hierarchical", "kmeans"), dist.method = c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski"), row.label = TRUE, col.label = FALSE){ # Check if the class of K is matrix if (is.null(K) || !inherits(K, "matrix")) { stop("K should be a valid object of class matrix.") } # Check the rownames/colnames if (is.null(rownames(K))){ stop("Individual names not assigned to rows of matrix K.") } if (is.null(colnames(K))){ stop('Individual names not assigned to columns of matrix K.') } if ((identical(rownames(K), colnames(K))) == FALSE){ stop("Rownames and colnames of matrix K do not match.") } # Check if the are missing values if (any(is.na(K))){ stop("Matrix K contains some missing data.") } clustering.method <- match.arg(clustering.method) dist.method <- match.arg(dist.method) if (isTRUE(col.label)) { labCol <- "variable" } else { labCol <- "none" } if (isTRUE(row.label)) { labRow <- "variable" } else { labRow <- "none" } pp <- superheat(K, pretty.order.rows = TRUE, pretty.order.cols = TRUE, col.dendrogram = dendrogram, dist.method = dist.method, clustering.method = clustering.method, scale = FALSE, left.label = labRow, left.label.col = "white", force.left.label = TRUE, bottom.label = labCol, bottom.label.col = "white", bottom.label.text.angle = 90, bottom.label.text.size = 2.5, force.bottom.label = TRUE, print.plot = TRUE, legend.text.size = 10, # default 12 legend.width = 2.0, left.label.text.size = 2.5) return(pp) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/Kinship_heatmap.R
#' Performs a Principal Component Analysis (PCA) based on a kinship matrix K #' #' Generates a PCA and summary statistics from a given kinship matrix for #' population structure. This matrix #' can be a pedigree-based relationship matrix \eqn{\boldsymbol{A}}, a genomic #' relationship matrix \eqn{\boldsymbol{G}} or a hybrid relationship matrix #' \eqn{\boldsymbol{H}}. Individual names should be assigned to \code{rownames} and #' \code{colnames}. There is additional output such as plots and other data frames #' to be used on other downstream analyses (such as GWAS). #' #' It calls function \code{eigen()} to obtain eigenvalues and later generate the PCA and the #' \code{factoextra} R package to extract and visualize results. #' #' @param K Input of a kinship matrix in full form (\eqn{n \times n}) (default = \code{NULL}). #' @param scale If \code{TRUE} the PCA analysis will scale the kinship matrix, otherwise #' it is used in its original scale (default = \code{TRUE}). #' @param label If \code{TRUE} then includes in output individuals names (default = \code{FALSE}). #' @param ncp The number of PC dimensions to be shown in the screeplot, and to provide #' in the output data frame (default = \code{10}). #' @param groups Specifies a vector of class factor that will be used to define different #' colors for individuals in the PCA plot. It must be presented in the same order as the individuals #' in the kinship matrix (default = \code{NULL}). #' @param ellipses If \code{TRUE}, ellipses will will be drawn around each of the define levels in #' \code{groups} (default = \code{FALSE}). #' #' @return A list with the following four elements: #' \itemize{ #' \item{\code{eigenvalues}: a data frame with the eigenvalues and its variances associated with each dimension #' including only the first \code{ncp} dimensions.} #' \item{\code{pca.scores}: a data frame with scores (rotated observations on the new components) including #' only the first \code{ncp} dimensions.} #' \item{\code{plot.pca}: a scatterplot with the first two-dimensions (PC1 and PC2) and their scores.} #' \item{\code{plot.scree}: a barchart with the percentage of variances explained by the \code{ncp} dimensions.} #' } #' #' @export #' #' @examples #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden")$G #' G[1:5, 1:5] #' #' # Perform the PCA. #' G_pca <- kinship.pca(K = G, ncp = 10) #' ls(G_pca) #' G_pca$eigenvalues #' head(G_pca$pca.scores) #' G_pca$plot.pca #' G_pca$plot.scree #' #' # PCA plot by family (17 groups). #' grp <- as.factor(pheno.apple$Family) #' G_pca_grp <- kinship.pca(K = G, groups = grp, label = FALSE, ellipses = FALSE) #' G_pca_grp$plot.pca #' kinship.pca <- function(K=NULL, scale = TRUE, label = FALSE, ncp = 10, groups = NULL, ellipses = FALSE){ # Check if the class of K is matrix if (is.null(K) || !inherits(K, "matrix")) { stop("K should be a valid object of class matrix.") } # Check the rownames/colnames if (is.null(rownames(K))){ stop("Individual names not assigned to rows of matrix K.") } if (is.null(colnames(K))){ stop('Individual names not assigned to columns of matrix K.') } if ((identical(rownames(K), colnames(K))) == FALSE){ stop("Rownames and colnames of matrix K do not match.") } # Check if the number of individuals is greater than 3 if (nrow(K) < 3 | ncol(K) < 3){ stop("Matrix K needs at least 3 individuals.") } # Check if the are missing values if (any(is.na(K))){ stop("K matrix contains some missing data.") } if (ncp < 0 | ncp > nrow(K)) { stop("Value ncp must be positive and smaller than the number of rows in matrix K.") } # Generating the pca if (scale) { K <- cov2cor(K) } Peig <- eigen(K) # PCA-eigenvalues loadings <- Peig$vectors # Loadings PCAcoord <- as.matrix(K) %*% as.matrix(loadings) # New PCA coordinates colnames(PCAcoord) <- paste('PC',c(1:ncol(K)),sep='') colnames(loadings) <- paste('PC',c(1:ncol(K)),sep='') rownames(loadings) <- rownames(K) # Preparing the prcomp class object sdev <- sqrt(Peig$values) rotation <- loadings x <- PCAcoord pca <- list(sdev=sdev, rotation=rotation, center=scale, scale=scale, x=x) class(pca) <- "prcomp" # Percentage of variances explained by each principal component scree_plot <- fviz_eig(pca, addlabels=TRUE, ncp=ncp, barfill = "#0072B2", barcolor = "#0072B2", ggtheme = theme_classic()) # Extract the eigenvalues/variances of the principal dimensions eig_var <- get_eig(pca) # Plot PCA if (isTRUE(label)) { if(is.null(groups)) { pca_plot <- fviz_pca_ind(pca, geom=c("point","text"), repel=TRUE, col.ind = "#0072B2", ggtheme = theme_classic()) } else { pca_plot <- fviz_pca_ind(pca, geom = c("point","text"), repel = TRUE, col.ind = groups, # color by groups mean.point = FALSE, legend.title = "Groups", ggtheme = theme_classic()) } } if (isFALSE(label)) { if(is.null(groups)) { pca_plot <- fviz_pca_ind(pca, geom="point", col.ind = "#0072B2", ggtheme = theme_classic()) } else { pca_plot <- fviz_pca_ind(pca, geom = "point", col.ind = groups, # color by groups, mean.point = FALSE, legend.title = "Groups", ggtheme = theme_classic()) } } # Process ellipses if requested. if (!is.null(groups) & ellipses){ # TODO this can be more memory efficient. group.comps <- cbind.data.frame(pca$x[, 1:2], groups) # Get centroids. centroids <- aggregate(cbind(PC1, PC2) ~ groups, data = group.comps, FUN = mean) # Get ellipses. ellipses.data <- do.call( rbind, lapply(unique(group.comps$groups), function(t) { data.frame( groups = as.character(t), ellipse( cov(group.comps[group.comps$groups == t, 1:2]), centre = as.matrix(centroids[t, 2:3]), level = 0.95), stringsAsFactors=FALSE) } ) ) # Add ellipses to plot. pca_plot <- pca_plot + geom_path(data = ellipses.data, linewidth = .5, inherit.aes = F, aes(x = PC1, y = PC2, color = groups)) } # Scores (rotated X observations on the new components) for ncp components scores <- pca$x[,c(1:ncp)] eigenvalues <- eig_var[c(1:ncp),] return(list(pca.scores=scores, eigenvalues=eigenvalues, plot.scree=scree_plot, plot.pca=pca_plot)) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/Kinship_pca.R
#' Function that silences everything (e.g., \code{cat()}, \code{print()}, \code{message()}, ...) #' #' @param code code to be silenced. #' #' @return None. #' #' @keywords internal silent_ <- function(code) { sink(tempfile()) ; on.exit(sink()) ; invisible(force(code)) } #' Creates a dummy map if not provided #' #' @param marker.id vector with names of markers to compose dummy map. #' @param message logical value indicating whether diagnostic messages should be printed on screen (default = \code{TRUE}). #' #' @return Data frame with dummy map. A single chromosome/linkage group is created and marker #' distances are a sequence from one to the number of markers. #' #' @keywords internal dummy.map_ <- function(marker.id = NULL, message = TRUE) { # Report if required. if (message) message("Creating dummy map.") # Get dummy map. map <- data.frame(marker = marker.id, chrom = 1, pos = seq_along(marker.id)) } #' Assess condition of the inverse of \strong{K} #' #' @param Kinv An inverse relationship matrix in full or sparse form. #' #' @return An object of class character with the condition of the matrix: #' well-conditioned or ill-conditioned. #' #' @keywords internal # TODO check with can be further improved. Kinv.condition <- function(Kinv){ if(nrow(Kinv) != ncol(Kinv)){ Kinv <- sparse2full(Kinv) } n <- ncol(Kinv) CN.1 <- Kinv[1, 2]/sqrt(Kinv[1, 1] * Kinv[1, 1]) CN.N <- Kinv[(n - 1), n]/sqrt(Kinv[(n - 1), (n - 1)] * Kinv[n, n]) max_diag <- abs(max(diag(Kinv))) max_off_diag <- max(abs(Kinv - diag(Kinv))) if (abs(CN.1) > 0.99 | abs(CN.N) > 0.99 | max_diag > 1000 | max_off_diag > 1000) { status <- "ill-conditioned" } else { status <- "well-conditioned" } return(status) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/auxiliar.R
#' Check data class #' #' @param data_ Data to be checked. #' @param class_ The expected class of data_. #' #' @keywords internal check.data_ <- function(data_ = NULL, class_ = NULL){ object_ <- get(data_, envir = parent.frame()) # Test if data class is compliant. if( !any(class(object_) %in% class_) ) # If data has different classes: break stop(paste0("The ", data_, " argument should be of class(es) ", paste0(class_, collapse = " or ") , ".")) } #' Check data mode #' #' @param data_ Data to be checked. #' @param mode_ The expected mode of data_. #' #' @keywords internal check.data.mode_ <- function(data_ = NULL, mode_ = NULL){ object_ <- get(data_, envir = parent.frame()) # Test if data mode is compliant. if( !any(mode(object_) %in% mode_) ) # If data has different classes: break stop(paste0("The ", data_, " argument should be of mode(es) \'", paste0(mode_, collapse = " or ") , "'.")) } # #' Check class of objects inside list # #' # #' @param data_ Data to be checked. # #' @param class_ The expected class of data_. # #' # #' @keywords internal # # check.objects.list_ <- function(data_ = NULL, # class_ = NULL){ # # object_ <- get(data_, envir = parent.frame()) # # # Test if data class is compliant. # if( !any(class(object_) %in% class_) ) # If data has different classes: break # stop(paste0("Objects inside list \'", data_, "' should be of class(es) ", # paste0(class_, collapse = " or ") , ".")) # # } #' Check logical arguments #' #' @param arg_ The boolean argument to be checked. #' #' @keywords internal check.logical_ <- function(arg_ = NULL){ # Check if logical and stop if not. if( !is.logical( get(arg_, envir = parent.frame()) ) ) stop(paste0("The value of \'", arg_, "' argument should be of class \'logical' (TRUE or FALSE).")) } #' Check string arguments #' #' @param data_ Data to be checked. #' @param mandatory_ If the argument is mandatory for the analysis. #' @param arg_ The string with the name of the function argument (e.g., \code{"gen"}). #' @param class_ The expected class of the variable in data. #' @param class.action_ The action to be taken if the variable has the wrong class #' (e.g., \code{"message"}, \code{"warning"}, \code{"stop"}). #' @param mutate_ If the argument should be mutated into the desired class. #' @param message_ If \code{class.action_ == "message"}, write \code{message = "message"} #' to capture upstream message command. #' #' @details This functions uses the \code{get} and \code{assign} which are need access to objects that #' are one environment up on the hierarchy. The \code{envir} is set to \code{parent.frame}. If the function is looking #' for something two or more environments up, the arguments of \code{parent.frame} have to be changed. #' #' @keywords internal check.args_ <- function(data_ = NULL, mandatory_ = FALSE, arg_ = NULL, class_ = NULL, class.action_ = NULL, mutate_ = NULL, message_ = message){ # Get data name. data.name_ <- deparse(substitute(data_)) # Get argument name. arg.name_ <- deparse(substitute(arg_)) # Get class. class.fun_ <- paste0("is.", class_) # Capture relevant info. variable.name_ <- get(arg.name_, envir = parent.frame()) # Evaluate if variable is not null. if(!is.null(variable.name_)){ # Perform actions for each variable passed. for (cur.variable.name_ in variable.name_){ # Check cur.variable.name_ is in data (mandatory stop). if(!cur.variable.name_ %in% names(data_)) stop(paste0("\'", cur.variable.name_, "' does not correspond to a variable name of \'pheno.data'.")) # Check class of variable in data. if(!getFunction(class.fun_)(data_[[cur.variable.name_]])){ # Do action unless action is message and message is FALSE and there is no mutation. if(!(class.action_ == "message" & isFALSE(message_)) & is.null(mutate_)){ getFunction(class.action_)( paste0("Variable \'", cur.variable.name_, "' must be of class \'", class_, "'.")) } # Mutate variable if requested. if (!is.null(mutate_)){ data_[[cur.variable.name_]] <- getFunction(paste0("as.", class_))(data_[[cur.variable.name_]]) assign(x = data.name_, value = data_, envir = parent.frame()) if (message_){ message("Coercing \'", cur.variable.name_, "' to class \'", class_, "'.") } } } } } # Evaluate if arg is null. if( is.null(variable.name_) & mandatory_ ) stop(paste0("The argument \'", arg.name_, "' is mandatory.")) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/check_args.R
#' Generates a sparse form matrix from a full form matrix #' #' Modifies the input square matrix into its sparse form with three columns per line, #' corresponding to the set: \code{Row, Col, Value}. This matrix defines the lower triangle #' row-wise of the original matrix and it is sorted as columns within row. #' Values of zero on the matrix are dropped by default. Individual #' names should be assigned to \code{rownames} and \code{colnames}. #' #' Based on the function published by Borgognone \emph{et al.} (2016). #' #' @param K A square matrix in full form (\eqn{n \times n}) (default = \code{NULL}). #' @param drop.zero If \code{TRUE} observations equal to zero are dropped #' (default = \code{TRUE}). #' #' @return A matrix in sparse form with columns: \code{Row, Col, Value} #' together with the attributes \code{rowNames} and \code{colNames}. #' If attribute \code{INVERSE} is found this is also passed to the sparse matrix. #' #' @references #' Borgognone, M.G., Butler, D.G., Ogbonnaya, F.C and Dreccer, M.F. 2016. #' Molecular marker information in the analysis of multi-environment trial helps #' differentiate superior genotypes from promising parents. Crop Science 56:2612-2628. #' #' @export #' #' @examples #' # Get G matrix. #' G <- G.matrix(M = geno.apple, method = "VanRaden", sparseform = FALSE)$G #' G[1:5, 1:5] #' #' # Transform matrix into sparse. #' G.sparse <- full2sparse(K = G) #' head(G.sparse) #' head(attr(G.sparse, "rowNames")) #' head(attr(G.sparse, "colNames")) #' full2sparse <- function (K = NULL, drop.zero = TRUE) { if (is.null(K) || !inherits(K, "matrix")) { stop('K should be a valid object of class matrix.') } # Check the attributes of K if (is.null(rownames(K))){ stop('Individual names not assigned to rows of matrix K.') } if (is.null(colnames(K))){ stop('Individual names not assigned to columns of matrix K.') } if ((identical(rownames(K), colnames(K))) == FALSE){ stop("Rownames and colnames of matrix K do not match.") } INVERSE <- attr(K, "INVERSE") if(drop.zero) { which <- (K != 0 & lower.tri(K, diag = TRUE)) } else { which <- lower.tri(K, diag = TRUE) } sparse.frame <- data.frame(Row = t(row(K))[t(which)], Col = t(col(K))[t(which)], Value = t(K)[t(which)]) sparse.frame <- as.matrix(sparse.frame) # Add attributes. attr(sparse.frame, "rowNames") <- rownames(K) attr(sparse.frame, "colNames") <- colnames(K) # Pass collected attributes. if (!is.null(INVERSE)) {attr(sparse.frame, "INVERSE") <- INVERSE} return(sparse.frame) }
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/full2sparse.R
#' Genotypic data for apple dataset #' #' Genotypic data on 247 apple clones (\emph{i.e.}, genotypes) with a total of #' 2,828 SNP markers (coded as 0, 1, 2 and there are no missing records). #' Dataset obtained from supplementary material in Kumar \emph{et al.} (2015). #' #' @docType data #' #' @usage geno.apple #' #' @format matrix #' #' @keywords datasets #' #' @references #' Kumar S., Molloy C., Muñoz P., Daetwyler H., Chagné D., and Volz R. 2015. #' Genome-enabled estimates of additive and nonadditive genetic variances and prediction #' of apple phenotypes across environments. G3 Genes, Genomes, Genetics 5:2711-2718. #' #' @examples #' geno.apple[1:5, 1:5] #' #' @name geno.apple NULL
/scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/geno_apple.R