content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Variance estimation of the sample surveys in domain by the ultimate cluster method #' #' @description Computes the variance estimation of the sample surveys in domain by the ultimate cluster method. #' #' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param Dom Optional variables used to define population domains. If supplied, variables of interest are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers. #' @param period Optional variable for survey period. If supplied, residual estimation of calibration is done independently for each time period. One dimensional object convertible to one-column \code{data.table}. #' @param PSU_sort optional; if PSU_sort is defined, then variance is calculated for systematic sample. #' @param N_h Number of primary sampling units in population for each stratum (and period if \code{period} is not \code{NULL}). If \code{N_h = NULL} and \code{fh_zero = FALSE} (default), \code{N_h} is estimated from sample data as sum of weights (\code{w_final}) in each stratum (and period if \code{period} is not \code{NULL}). Optional for single-stage sampling design as it will be estimated from sample data. Recommended for multi-stage sampling design as \code{N_h} can not be correctly estimated from the sample data in this case. If \code{N_h} is not used in case of multi-stage sampling design (for example, because this information is not available), it is advisable to set \code{fh_zero = TRUE}. If \code{period} \bold{is} \code{NULL}. A two-column matrix with rows for each stratum. The first column should contain stratum code. The second column - the number of primary sampling units in the population of each stratum. If \code{period} \bold{is not} \code{NULL}. A three-column matrix with rows for each intersection of strata and period. The first column should contain period. The second column should contain stratum code. The third column - the number of primary sampling units in the population of each stratum and period. #' @param fh_zero by default FALSE; \code{fh} is calculated as division of n_h and N_h in each strata, if TRUE, \code{fh} value is zero in each strata. #' @param PSU_level by default TRUE; if PSU_level is TRUE, in each strata \code{fh} is calculated as division of count of PSU in sample (n_h) and count of PSU in frame(N_h). if PSU_level is FALSE, in each strata \code{fh} is calculated as division of count of units in sample (n_h) and count of units in frame (N_h), which calculated as sum of weights. #' @param Z Optional variables of denominator for ratio estimation. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param dataset Optional survey data object convertible to \code{data.table}. #' @param confidence Optional positive value for confidence interval. This variable by default is 0.95. #' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1. #' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out. #' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out. #' #' @return A list with objects is returned by the function: #' \itemize{ #' \item \code{lin_out} - a \code{data.table} containing the linearized values of the ratio estimator with id and PSU. #' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with id and PSU. #' \item \code{betas} - a numeric \code{data.table} containing the estimated coefficients of calibration. #' \item \code{all_result} - a \code{data.table}, which containing variables: #' \code{variable} - names of variables of interest, \cr #' \code{Dom} - optional variable of the population domains, \cr #' \code{period} - optional variable of the survey periods, \cr #' \code{respondent_count} - the count of respondents, \cr #' \code{pop_size} - the estimated size of population, \cr #' \code{n_nonzero} - the count of respondents, who answers are larger than zero, \cr #' \code{estim} - the estimated value, \cr #' \code{var} - the estimated variance, \cr #' \code{se} - the estimated standard error, \cr #' \code{rse} - the estimated relative standard error (coefficient of variation), \cr #' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr #' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr #' \code{relative_margin_of_error} - the estimated relative margin of error in percentage, \cr #' \code{CI_lower} - the estimated confidence interval lower bound, \cr #' \code{CI_upper} - the estimated confidence interval upper bound, \cr #' \code{confidence_level} - the positive value for confidence interval, \cr #' \code{S2_y_HT} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using non-calibrated weights, \cr #' \code{S2_y_ca} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using calibrated weights, \cr #' \code{S2_res} - the estimated variance of the regression residuals, \cr #' \code{var_srs_HT} - the estimated variance of the HT estimator under SRS, \cr #' \code{var_cur_HT} - the estimated variance of the HT estimator under current design, \cr #' \code{var_srs_ca} - the estimated variance of the calibrated estimator under SRS, \cr #' \code{deff_sam} - the estimated design effect of sample design, \cr #' \code{deff_est} - the estimated design effect of estimator, \cr #' \code{deff} - the overall estimated design effect of sample design and estimator, \cr #' \code{n_eff} - the effective sample size. #' } #' @details Calculate variance estimation in domains based on book of Hansen, Hurwitz and Madow. #' #' @references #' Morris H. Hansen, William N. Hurwitz, William G. Madow, (1953), Sample survey methods and theory Volume I Methods and applications, 257-258, Wiley. \cr #' Guillaume Osier and Emilio Di Meglio. The linearisation approach implemented by Eurostat for the first wave of EU-SILC: what could be done from the second wave onwards? 2012 \cr #' Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr #' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr #' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr #' #' #' @seealso \code{\link{domain}}, #' \code{\link{lin.ratio}}, #' \code{\link{residual_est}}, #' \code{\link{vardomh}}, #' \code{\link{var_srs}}, #' \code{\link{variance_est}}, #' \code{\link{variance_othstr}} #' #' @keywords vardpoor #' #' @examples #' library("data.table") #' library("laeken") #' data(eusilc) #' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc) #' #' aa <- vardom(Y = "eqIncome", H = "db040", PSU = "db030", #' w_final = "rb050", id = "rb030", Dom = "db040", #' period = NULL, N_h = NULL, Z = NULL, #' X = NULL, g = NULL, q = NULL, dataset = dataset1, #' confidence = .95, percentratio = 100, #' outp_lin = TRUE, outp_res = TRUE) #' #' #' @import data.table #' @export vardom vardom <- function(Y, H, PSU, w_final, id = NULL, Dom = NULL, period = NULL, PSU_sort=NULL, N_h = NULL, fh_zero=FALSE, PSU_level = TRUE, Z = NULL, X = NULL, ind_gr = NULL, g = NULL, q = NULL, dataset = NULL, confidence = .95, percentratio = 1, outp_lin = FALSE, outp_res = FALSE) { ### Checking fh_zero <- check_var(vars = fh_zero, varn = "fh_zero", varntype = "logical") PSU_level <- check_var(vars = PSU_level, varn = "PSU_level", varntype = "logical") outp_lin <- check_var(vars = outp_lin, varn = "outp_lin", varntype = "logical") outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical") percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger") confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01") Y <- check_var(vars = Y, varn = "Y", dataset = dataset, check.names = TRUE, isnumeric = TRUE, grepls = "__") Ynrow <- nrow(Y) Yncol <- ncol(Y) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = FALSE, ischaracter = TRUE) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE) id <- check_var(vars = id, varn = "id", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, periods = period) PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(id)) Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE, grepls = "__") namesDom <- names(Dom) w_final <- check_var(vars = w_final, varn = "w_final", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) Z <- check_var(vars = Z, varn = "Z", dataset = dataset, check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow, isnumeric = TRUE, mustbedefined = FALSE) PSU_sort <- check_var(vars = PSU_sort, varn = "PSU_sort", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, isvector = TRUE, mustbedefined = FALSE, PSUs = PSU) if (!is.null(X) | !is.null(g) | !is.null(q) | !is.null(ind_gr)) { X <- check_var(vars = X, varn = "X", dataset = dataset, check.names = TRUE, Ynrow = Ynrow, isnumeric = TRUE, dif_name = c(names(period), "g", "q"), dX = "X") Xnrow <- nrow(X) ind_gr <- check_var(vars = ind_gr, varn = "ind_gr", dataset = dataset, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, dX = "X", dif_name = c(names(period), names(X), "g", "q")) g <- check_var(vars = g, varn = "g", dataset = dataset, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") q <- check_var(vars = q, varn = "q", dataset = dataset, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") } N <- dataset <- NULL # N_h np <- sum(ncol(period)) if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", np + 2," columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numeric") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (any(is.na(merge(unique(H), N_h, by = names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") pH <- NULL } setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } ### Calculation # Domains if (!is.null(Dom)) Y1 <- domain(Y = Y, D = Dom, dataset = NULL, checking = FALSE) else Y1 <- Y Y <- NULL n_nonzero <- copy(Y1) Z1 <- NULL if (!is.null(period)){ n_nonzero <- data.table(period, n_nonzero) n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), keyby = names(period), .SDcols = names(Y1)] } else n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), .SDcols = names(Y1)] sar_nr <- respondent_count <- pop_size <- NULL nhs <- data.table(respondent_count = 1, pop_size = w_final) if (!is.null(period)) nhs <- data.table(period, nhs) if (!is.null(Dom)) nhs <- data.table(Dom, nhs) if (!is.null(c(Dom, period))) {nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), keyby = eval(names(nhs)[0 : 1 - ncol(nhs)]), .SDcols = c("respondent_count", "pop_size")] } else nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), .SDcols = c("respondent_count", "pop_size")] # Design weights if (!is.null(X)) w_design <- w_final / g else w_design <- w_final # Ratio of two totals linratio_outp <- variableZ <- estim <- deff_sam <- NULL deff_est <- deff <- var_est2 <- se <- rse <- cv <- NULL absolute_margin_of_error <- relative_margin_of_error <- NULL S2_y_HT <- S2_y_ca <- S2_res <- CI_lower <- CI_upper <- NULL variable <- deff_sam <- deff_est <- deff <- n_eff <- NULL aH <- names(H) idper <- id if (!is.null(period)) idper <- data.table(idper, period) if (!is.null(Z)) { if (!is.null(Dom)) Z1 <- domain(Y = Z, D = Dom, dataset = NULL, checking = FALSE) else Z1 <- Z if (is.null(period)) { Y2 <- lin.ratio(Y = Y1, Z = Z1, weight = w_final, Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE) } else { periodap <- do.call("paste", c(as.list(period), sep = "_")) lin1 <- lapply(split(Y1[, .I], periodap), function(i) data.table(sar_nr = i, lin.ratio(Y = Y1[i], Z = Z1[i], weight = w_final[i], Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE))) Y2 <- rbindlist(lin1) setkeyv(Y2, "sar_nr") Y2[, sar_nr := NULL] } if (any(is.na(Y2))) print("Results are calculated, but there are cases where Z = 0") if (outp_lin) linratio_outp <- data.table(idper, PSU, Y2) } else { Y2 <- Y1 } # Total estimation lin1 <- Z <- Y_est <- Z_est <- variableDZ <- NULL hY <- data.table(Y1 * w_final) if (is.null(period)) { Y_est <- hY[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Y1)] } else { hY <- data.table(period, hY) Y_est <- hY[, lapply(.SD, sum, na.rm = TRUE), keyby = names(period), .SDcols = names(Y1)] } Y_est <- transpos(Y_est, is.null(period), "Y_est", names(period)) all_result <- Y_est if (!is.null(Z1)) { YZnames <- data.table(variable = names(Y1), variableDZ = names(Z1)) all_result <- merge(all_result, YZnames, all = TRUE, by = "variable") hZ <- data.table(Z1 * w_final) if (is.null(period)) { Z_est <- hZ[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Z1)] } else { hZ <- data.table(period, hZ) Z_est <- hZ[, lapply(.SD, sum, na.rm = T), keyby = names(period), .SDcols = names(Z1)] } Z_est <- transpos(Z_est, is.null(period), "Z_est", names(period), "variableDZ") all_result <- merge(all_result, Z_est, all = TRUE, by = c(names(period), "variableDZ")) } vars <- data.table(variable = names(Y1), nr_names = 1 : ncol(Y1)) all_result <- merge(vars, all_result, all = TRUE, by = "variable") n_nonzero <- transpos(n_nonzero, is.null(period), "n_nonzero", names(period)) all_result <- merge(all_result, n_nonzero, all = TRUE, by = c(names(period), "variable")) n_nonzero <- vars <- Y1 <- Z1 <- Y_est <- Z_est <- hY <- hZ <- YZnames <- NULL # Calibration res_outp <- NULL betas <- NULL if (!is.null(X)) { if (!is.null(period)) ind_gr <- data.table(ind_gr, period) ind_gr1 <- copy(ind_gr) ind_gr <- do.call("paste", c(as.list(ind_gr), sep = "_")) lin1 <- lapply(split(Y2[,.I], ind_gr), function(i) { resid <- residual_est(Y = Y2[i], X = X[i], weight = w_design[i], q = q[i], dataset = NULL, checking = FALSE) pers0 <- ind_gr1[i, .N, keyby = c(names(ind_gr1))] list(data.table(sar_nr = i, resid$residuals), data.table(pers0[, N := NULL], resid$betas)) }) Y3 <- rbindlist(lapply(lin1, function(x) x[[1]])) betas <- rbindlist(lapply(lin1, function(x) x[[2]])) setkeyv(Y3, "sar_nr") Y3[, sar_nr := NULL] lin1 <- X <- g <- q <- NULL if (outp_res) res_outp <- data.table(idper, PSU, Y3) } else Y3 <- Y2 var_est <- variance_est(Y = Y3, H = H, PSU = PSU, w_final = w_final, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Current variance estimation", checking = FALSE) var_est <- transpos(var_est, is.null(period), "var_est", names(period)) all_result <- merge(all_result, var_est, all = TRUE, by = c(names(period), "variable")) # Variance of HT estimator under current design var_cur_HT <- variance_est(Y = Y2, H = H, PSU = PSU, w_final = w_design, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Variance of HT estimator under current design", checking = FALSE) idper <- H <- PSU <- PSU_sort <- N_h <- NULL var_cur_HT <- transpos(var_cur_HT, is.null(period), "var_cur_HT", names(period)) all_result <- merge(all_result, var_cur_HT, all = TRUE, by = c(names(period), "variable")) var_est <- var_cur_HT <- NULL # Variance of HT estimator under SRS if (is.null(period)) { varsrs <- var_srs(Y = Y2, w = w_design) S2_y_HT <- varsrs$S2p S2_y_ca <- var_srs(Y = Y2, w = w_final)$S2p var_srs_HT <- varsrs$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1:nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y2)),] ind <- (rowSums(per == period) == ncol(period)) varsrs <- var_srs(Y = Y2[ind], w = w_design[ind]) varsca <- var_srs(Y = Y2[ind], w = w_final[ind]) list(S2p = data.table(period_agg[i,], varsrs$S2p), varsrs = data.table(period_agg[i,], varsrs$varsrs), S2pca = data.table(period_agg[i,], varsca$S2p)) }) S2_y_HT <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_HT <- rbindlist(lapply(lin1, function(x) x[[2]])) S2_y_ca <- rbindlist(lapply(lin1, function(x) x[[3]])) } Y2 <- w_design <- NULL var_srs_HT <- transpos(var_srs_HT, is.null(period), "var_srs_HT", names(period)) all_result <- merge(all_result, var_srs_HT, all = TRUE, by = c(names(period), "variable")) S2_y_HT <- transpos(S2_y_HT, is.null(period), "S2_y_HT", names(period)) all_result <- merge(all_result, S2_y_HT, all = TRUE, by = c(names(period), "variable")) S2_y_ca <- transpos(S2_y_ca, is.null(period), "S2_y_ca", names(period)) all_result <- merge(all_result, S2_y_ca, all = TRUE, by = c(names(period), "variable")) # Variance of calibrated estimator under SRS if (is.null(period)) { varsres <- var_srs(Y = Y3, w = w_final) S2_res <- varsres$S2p var_srs_ca <- varsres$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1:nrow(period_agg), function(i) { per <- period_agg[i][rep(1, nrow(Y3))] ind <- (rowSums(per == period) == ncol(period)) varsres <- var_srs(Y = Y3[ind], w = w_final[ind]) list(S2p = data.table(period_agg[i,], varsres$S2p), varsrs = data.table(period_agg[i,], varsres$varsrs)) }) S2_res <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_ca <- rbindlist(lapply(lin1, function(x) x[[2]])) } Y3 <- w_final <- NULL var_srs_ca <- transpos(var_srs_ca, is.null(period), "var_srs_ca", names(period), ) all_result <- merge(all_result, var_srs_ca, all = TRUE, by = c(names(period), "variable")) S2_res <- transpos(S2_res, is.null(period), "S2_res", names(period), "variable") all_result <- merge(all_result, S2_res, all = TRUE, by = c(names(period), "variable")) S2_y_HT <- S2_y_ca <- S2_res <- var_srs_HT <- var_srs_ca <- NULL all_result[, estim := Y_est] if (!is.null(all_result$Z_est)) all_result[, estim := Y_est / Z_est * percentratio] if (nrow(all_result[var_est < 0]) > 0) stop("Estimation of variance are negative!") # Design effect of sample design all_result[, deff_sam := var_cur_HT / var_srs_HT] # Design effect of estimator all_result[, deff_est := var_est / var_cur_HT] # Overall effect of sample design and estimator all_result[, deff := deff_sam * deff_est] all_result[, var_est2 := var_est] all_result[xor(is.na(var_est2), var_est2 < 0), var_est2 := NA] all_result[, se := sqrt(var_est2)] all_result[(estim != 0) & !is.nan(estim), rse := se / estim] all_result[estim == 0 | is.nan(estim), rse := NA] all_result[, cv := rse * 100] tsad <- qnorm(0.5 * (1 + confidence)) all_result[, absolute_margin_of_error := tsad * se] all_result[, relative_margin_of_error:= tsad * cv] all_result[, CI_lower := estim - absolute_margin_of_error] all_result[, CI_upper := estim + absolute_margin_of_error] variableD <- NULL setnames(all_result, c("variable", "var_est"), c("variableD", "var")) if (!is.null(all_result$Z_est)) { nosrZ <- data.table(all_result[, "variableDZ"], all_result[, tstrsplit(variableDZ, "__")][, 1]) nosrZ <- nosrZ[!duplicated(nosrZ)] setnames(nosrZ, "V1", "variableZ") all_result <- merge(all_result, nosrZ, by = "variableDZ") nosrZ <- NULL } nosr <- data.table(all_result[, "variableD"], all_result[, tstrsplit(variableD, "__")]) nosr <- nosr[!duplicated(nosr)] nosr <- nosr[, lapply(nosr, as.character)] setnames(nosr, names(nosr)[2], "variable") namesDom1 <- namesDom if (!is.null(Dom)) { setnames(nosr, names(nosr)[3:ncol(nosr)], paste0(namesDom, "_new")) nhs[, (paste0(namesDom, "_new")) := lapply(namesDom, function(x) make.names(paste0(x, ".", get(x))))] namesDom1 <- paste0(namesDom, "_new") } all_result <- merge(nosr, all_result, by = "variableD") namesDom <- nosr <- confidence_level <- NULL if (!is.null(all_result$Z_est)) { all_result[, variable := paste("R", get("variable"), get("variableZ"), sep = "__")] } if (!is.null(c(Dom, period))) { all_result <- merge(all_result, nhs, all = TRUE, by = c(namesDom1, names(period))) } else { all_result[, respondent_count := nhs$respondent_count] all_result[, pop_size := nhs$pop_size]} all_result[, n_eff := ifelse(is.na(deff) | deff < .Machine$double.eps, NA, respondent_count / deff)] all_result[, confidence_level := confidence] variab <- c("respondent_count", "n_nonzero", "pop_size") if (!is.null(all_result$Z_est)) variab <- c(variab, "Y_est", "Z_est") variab <- c(variab, "estim", "var", "se", "rse", "cv", "absolute_margin_of_error", "relative_margin_of_error", "CI_lower", "CI_upper", "confidence_level") if (is.null(Dom)) variab <- c(variab, "S2_y_HT", "S2_y_ca", "S2_res") variab <- c(variab, "var_srs_HT", "var_cur_HT", "var_srs_ca", "deff_sam", "deff_est", "deff", "n_eff") setkeyv(all_result, c("nr_names", names(Dom), names(period))) all_result <- all_result[, c("variable", names(Dom), names(period), variab), with = FALSE] list(lin_out = linratio_outp, res_out = res_outp, betas = betas, all_result = all_result) } transpos <- function(variable, period_NULL, valnames, pernames, variabname = NULL) { if (period_NULL) {dati <- data.table(nv = names(variable), t(variable)) setnames(dati, names(dati), c("variable", valnames)) } else { dati <- melt(variable, id=c(pernames)) setnames(dati, names(dati)[ncol(dati)], valnames) } dati[, variable := as.character(variable)] if (!is.null(variabname)) { setnames(dati, "variable", variabname) } else variabname <- "variable" setkeyv(dati, c(pernames, variabname)) return(dati) }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardom.R
#' Variance estimation for sample surveys in domain by the two stratification #' #' @description Computes the variance estimation for sample surveys in domain by the two stratification. #' #' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param H2 The unit new stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param Dom Optional variables used to define population domains. If supplied, linearization of the at-risk-of-poverty rate is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers. #' @param period Optional variable for survey period. If supplied, residual estimation of calibration is done independently for each time period. One dimensional object convertible to one-column \code{data.table}. #' @param N_h optional data object convertible to \code{data.table}. If period is supplied, the time period is at the beginning of the object and after time period in the object is stratum. If period is not supplied, the first column in the object is stratum. In the last column is the total of the population in each stratum. #' @param N_h2 optional data object convertible to \code{data.table}. If period is supplied, the time period is at the beginning of the object and after time period in the object is new stratum. If period is not supplied, the first column in the object is new stratum. In the last column is the total of the population in each stratum. #' @param Z optional variables of denominator for ratio estimation. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param dataset Optional survey data object convertible to \code{data.table}. #' @param confidence Optional positive value for confidence interval. This variable by default is 0.95. #' @param percentratio}{Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1. #' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out. #' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out. #' #' @return A list with objects are returned by the function: #' \itemize{ #' \item \code{lin_out} - a \code{data.table} containing the linearized values of the ratio estimator with id and PSU. #' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with id and PSU. #' \item \code{betas} - a numeric \code{data.table} containing the estimated coefficients of calibration. #' \item \code{s2g} - a \code{data.table} containing the s^2g value. #' \item \code{all_result} - a \code{data.table}, which containing variables: \cr #' \code{respondent_count} - the count of respondents, \cr #' \code{pop_size} - the estimated size of population, \cr #' \code{n_nonzero} - the count of respondents, who answers are larger than zero, \cr #' \code{estim} - the estimated value, \cr #' \code{var} - the estimated variance, \cr #' \code{se} - the estimated standard error, \cr #' \code{rse} - the estimated relative standard error (coefficient of variation), \cr #' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr #' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr #' \code{relative_margin_of_error} - the estimated relative margin of error in percentage, \cr #' \code{CI_lower} - the estimated confidence interval lower bound, \cr #' \code{CI_upper} - the estimated confidence interval upper bound, \cr #' \code{confidence_level} - the positive value for confidence interval, \cr #' \code{var_srs_HT} - the estimated variance of the HT estimator under SRS, \cr #' \code{var_cur_HT} - the estimated variance of the HT estimator under current design, \cr #' \code{var_srs_ca} - the estimated variance of the calibrated estimator under SRS, \cr #' \code{deff_sam} - the estimated design effect of sample design, \cr #' \code{deff_est} - the estimated design effect of estimator, \cr #' \code{deff} - the overall estimated design effect of sample design and estimator. \cr #' } #' @references #'Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr #'M. Liberts. (2004) Non-response Analysis and Bias Estimation in a Survey on Transportation of Goods by Road. \cr #' #' @seealso \code{\link{domain}}, #' \code{\link{lin.ratio}}, #' \code{\link{residual_est}}, #' \code{\link{vardomh}}, #' \code{\link{var_srs}}, #' \code{\link{variance_est}}, #' \code{\link{variance_othstr}} #' #' @keywords vardpoor #' #' @examples #' library("laeken") #' library("data.table") #' data("eusilc") #' #' # Example 1 #' eusilc1 <- eusilc[1:1000, ] #' dataset1 <- data.table(IDd = paste0("V", 1:nrow(eusilc1)), eusilc1) #' dataset1[, db040_2 := get("db040")] #' N_h2 <- dataset1[, sum(rb050, na.rm = FALSE), keyby = "db040_2"] #' #' aa <- vardom_othstr(Y = "eqIncome", H = "db040", H2 = "db040_2", #' PSU = "db030", w_final = "rb050", id = "rb030", #' Dom = "db040", period = NULL, N_h = NULL, #' N_h2 = N_h2, Z = NULL, X = NULL, g = NULL, #' q = NULL, dataset = dataset1, confidence = .95, #' outp_lin = TRUE, outp_res = TRUE) #' #' \dontrun{ #' # Example 2 #' dataset1 <- data.table(IDd = 1:nrow(eusilc), eusilc) #' dataset1[, db040_2 := get("db040")] #' N_h2 <- dataset1[, sum(rb050, na.rm = FALSE), keyby = "db040_2"] #' #' aa <- vardom_othstr(Y = "eqIncome", H = "db040", H2 = "db040_2", #' PSU = "db030", w_final = "rb050", id = "rb030", #' Dom = "db040", period = NULL, N_h2 = N_h2, #' Z = NULL, X = NULL, g = NULL, dataset = dataset1, #' q = NULL, confidence = .95, outp_lin = TRUE, #' outp_res = TRUE) #' aa} #' #' #' @import data.table #' #' @export vardom_othstr vardom_othstr <- function(Y, H, H2, PSU, w_final, id = NULL, Dom = NULL, period = NULL, N_h = NULL, N_h2 = NULL, Z = NULL, X = NULL, ind_gr = NULL, g = NULL, q = NULL, dataset = NULL, confidence = .95, percentratio = 1, outp_lin = FALSE, outp_res = FALSE) { ### Checking outp_lin <- check_var(vars = outp_lin, varn = "outp_lin", varntype = "logical") outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical") percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger") confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01") Y <- check_var(vars = Y, varn = "Y", dataset = dataset, check.names = TRUE, isnumeric = TRUE, grepls = "__") Ynrow <- nrow(Y) Yncol <- ncol(Y) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = FALSE, ischaracter = TRUE) H2 <- check_var(vars = H2, varn = "H2", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = FALSE, ischaracter = TRUE, dif_name = names(H)) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE) np <- sum(ncol(period)) id <- check_var(vars = id, varn = "id", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, periods = period) PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(id)) Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE, grepls = "__") namesDom <- names(Dom) w_final <- check_var(vars = w_final, varn = "w_final", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) Z <- check_var(vars = Z, varn = "Z", dataset = dataset, check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow, isnumeric = TRUE, mustbedefined = FALSE) if (!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q)) { X <- check_var(vars = X, varn = "X", dataset = dataset, check.names = TRUE, Ynrow = Ynrow, isnumeric = TRUE, dif_name = c(names(period), "g", "q")) Xnrow <- nrow(X) ind_gr <- check_var(vars = ind_gr, varn = "ind_gr", dataset = dataset, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, dif_name = c(names(period), "g", "q")) g <- check_var(vars = g, varn = "g", dataset = dataset, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE) q <- check_var(vars = q, varn = "q", dataset = dataset, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE) } N <- dataset <- NULL # N_h if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", np + 2," columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numeric") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (names(H) != names(N_h)[1]) stop("Strata titles for 'H' and 'N_h' is not equal") if (any(is.na(merge(unique(H), N_h, by=names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(names(pH) != names(N_h)[c(1:(1 + np))])) stop("Strata titles for 'period' with 'H' and 'N_h' is not equal") if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") pH <- NULL } setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } # N_h2 if (!is.null(N_h2)) { N_h2 <- data.table(N_h2) if (anyNA(N_h2)) stop("'N_h2' has missing values") if (ncol(N_h2) != np + 2) stop(paste0("'N_h2' should be ", np + 2, " columns")) if (!is.numeric(N_h2[[ncol(N_h2)]])) stop("The last column of 'N_h2' should be numeric") nams2 <- c(names(period), names(H2)) if (all(nams2 %in% names(N_h2))) {N_h2[, (nams2) := lapply(.SD, as.character), .SDcols = nams2] } else stop(paste0("All strata titles of 'H2'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h2'")) if (is.null(period)) { if (names(H2) != names(N_h2)[1]) stop("Strata titles for 'H2' and 'N_h2' is not equal") if (any(is.na(merge(unique(H2), N_h2, by = names(H2), all.x = TRUE)))) stop("'N_h2' is not defined for all strata") } else { pH2 <- data.table(period, H2) if (any(names(pH2) != names(N_h2)[c(1 : (1 + np))])) stop("Strata titles for 'period' with 'H2' and 'N_h2' is not equal") if (any(is.na(merge(unique(pH2), N_h2, by = names(pH2), all.x = TRUE)))) stop("'N_h2' is not defined for all strata and periods") } setkeyv(N_h2, names(N_h2)[c(1 : (1 + np))]) } else stop ("N_h2 is not defined!") ### Calculation # Domains if (!is.null(Dom)) Y1 <- domain(Y = Y, D = Dom, dataset = NULL, checking = FALSE) else Y1 <- Y n_nonzero <- copy(Y1) Z1 <- NULL if (!is.null(Z)) { if (!is.null(Dom)) Z1 <- domain(Y = Z, D = Dom, dataset = NULL, checking = FALSE) else Z1 <- Z Z0 <- copy(Z1) setnames(Z0, names(Z0), names(Y1)) n_nonzero <- n_nonzero + Y1 Z0 <- NULL } if (!is.null(period)){ n_nonzero <- data.table(period, n_nonzero) n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), keyby = names(period), .SDcols = names(Y1)] } else n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), .SDcols = names(Y1)] respondent_count <- pop_size <- NULL nhs <- data.table(respondent_count = 1, pop_size = w_final) if (!is.null(period)) nhs <- data.table(period, nhs) if (!is.null(Dom)) nhs <- data.table(Dom, nhs) if (!is.null(c(Dom, period))) {nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), keyby = eval(names(nhs)[0 : 1 - ncol(nhs)]), .SDcols = c("respondent_count", "pop_size")] } else nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), .SDcols = c("respondent_count", "pop_size")] # Design weights if (!is.null(X)) w_design <- w_final / g else w_design <- w_final # Ratio of two totals linratio_outp <- per <- variableZ <- estim <- deff_sam <- NULL deff_est <- deff <- var_est2 <- se <- rse <- cv <- NULL absolute_margin_of_error <- relative_margin_of_error <- NULL sar_nr <- CI_lower <- CI_upper <- variable <- n_eff <- NULL idper <- id if (!is.null(period)) idper <- data.table(idper, period) if (!is.null(Z)) { if (is.null(period)) { Y2 <- lin.ratio(Y = Y1, Z = Z1, weight = w_final, Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE) } else { periodap <- do.call("paste", c(as.list(period), sep = "_")) lin1 <- lapply(split(Y1[, .I], periodap), function(i) data.table(sar_nr = i, lin.ratio(Y = Y1[i], Z = Z1[i], weight = w_final[i], Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE))) Y2 <- rbindlist(lin1) setkeyv(Y2, "sar_nr") Y2[, sar_nr := NULL] } if (any(is.na(Y2))) print("Results are calculated, but there are cases where Z = 0") if (outp_lin) linratio_outp <- data.table(idper, PSU, Y2) } else { Y2 <- Y1 } Y <- Z <- NULL # Calibration betas <- res_outp <- NULL if (!is.null(X)) { ind_gr <- data.table(nsk = rep(1, nrow(X))) if (!is.null(period)) ind_gr <- data.table(ind_gr, period) ind_gr1 <- copy(ind_gr) ind_gr <- do.call("paste", c(as.list(ind_gr), sep = "_")) lin1 <- lapply(split(Y2[, .I], ind_gr), function(i) { resid <- residual_est(Y = Y2[i], X = X[i], weight = w_design[i], q = q[i], dataset = NULL, checking = FALSE) pers0 <- ind_gr1[i, .N, keyby = c(names(ind_gr1))] list(data.table(sar_nr = i, resid$residuals), data.table(pers0[, N := NULL], resid$betas)) }) Y3 <- rbindlist(lapply(lin1, function(x) x[[1]])) betas <- rbindlist(lapply(lin1, function(x) x[[2]])) setkeyv(Y3, "sar_nr") Y3[, sar_nr := NULL] if (outp_res) res_outp <- data.table(idper, PSU, Y3) } else Y3 <- Y2 var_est <- variance_othstr(Y = Y3, H = H, H2 = H2, w_final = w_final, N_h = N_h, N_h2 = N_h2, period = period, dataset = NULL, checking = FALSE) s2g <- var_est$s2g var_est <- var_est$var_est var_est <- transpos(var_est, is.null(period), "var_est", names(period)) all_result <- var_est n_nonzero <- transpos(n_nonzero, is.null(period), "n_nonzero", names(period)) all_result <- merge(all_result, n_nonzero, all = TRUE) # Variance of HT estimator under current design var_cur_HT <- variance_othstr(Y = Y2, H = H, H2 = H2, w_final = w_design, N_h = N_h, N_h2 = N_h2, period = period, dataset = NULL, checking = FALSE) var_cur_HT <- var_cur_HT$var_est var_cur_HT <- transpos(var_cur_HT, is.null(period), "var_cur_HT", names(period)) all_result <- merge(all_result, var_cur_HT) n_nonzero <- var_est <- var_cur_HT <- NULL # Variance of HT estimator under SRS if (is.null(period)) { var_srs_HT <- var_srs(Y2, w = w_design)$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1 : nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y2a)),] ind <- (rowSums(per == period) == ncol(period)) data.table(period_agg[i,], var_srs(Y2a[ind], w = w_design[ind])$varsrs) }) var_srs_HT <- rbindlist(lin1) } var_srs_HT <- transpos(var_srs_HT, is.null(period), "var_srs_HT", names(period)) all_result <- merge(all_result, var_srs_HT) # Variance of calibrated estimator under SRS if (is.null(period)) { var_srs_ca <- var_srs(Y3, w = w_final)$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1:nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y2a)),] ind <- (rowSums(per == period) == ncol(period)) data.table(period_agg[i,], var_srs(Y3[ind], w = w_final[ind])$varsrs) }) var_srs_ca <- rbindlist(lin1) } Y3 <- Y2a <- NULL var_srs_ca <- transpos(var_srs_ca, is.null(period), "var_srs_ca", names(period)) all_result <- merge(all_result, var_srs_ca) # Total estimation Y_nov <- Z_nov <- .SD <- confidence_level <- NULL hY <- data.table(Y1 * w_final) if (is.null(period)) { Y_nov <- hY[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Y1)] } else { hY <- data.table(period, hY) Y_nov <- hY[, lapply(.SD, sum, na.rm = TRUE), keyby = names(period), .SDcols = names(Y1)] } Y_nov <- transpos(Y_nov, is.null(period), "Y_nov", names(period)) all_result <- merge(all_result, Y_nov) if (!is.null(Z1)) { YZnames <- data.table(variable = names(Y1), variableDZ = names(Z1)) all_result <- merge(all_result, YZnames, by = "variable") hZ <- data.table(Z1 * w_final) if (is.null(period)) { Z_nov <- hZ[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Z1)] } else { hZ <- data.table(period, hZ) Z_nov <- hZ[, lapply(.SD, sum, na.rm = TRUE), keyby = names(period), .SDcols = names(Z1)] } Z_nov <- transpos(Z_nov, is.null(period), "Z_nov", names(period), "variableDZ") all_result <- merge(all_result, Z_nov, by = "variableDZ") } vars <- data.table(variable = names(Y1), nr_names = 1 : ncol(Y1)) all_result <- merge(vars, all_result, by = "variable") vars <- idper <- Y1 <- Z1 <- Y_nov <- NULL Z_nov <- hY <- hZ <- YZnames <- dati <- NULL all_result[, estim := Y_nov] if (!is.null(all_result$Z_nov)) all_result[, estim := Y_nov / Z_nov] if (nrow(all_result[var_est < 0]) > 0) print("Estimation of variance are negative!") # Design effect of sample design all_result[, deff_sam := var_cur_HT / var_srs_HT] # Design effect of estimator all_result[, deff_est := var_est / var_cur_HT] # Overall effect of sample design and estimator all_result[, deff := deff_sam * deff_est] all_result[, var_est2:= var_est] all_result[xor(is.na(var_est2), var_est2 < 0), var_est2 := NA] all_result[, se := sqrt(var_est2)] all_result[(estim != 0) & !is.nan(estim), rse := se / estim] all_result[estim == 0 | is.nan(estim), rse := NA] all_result[, cv := rse * 100] tsad <- qnorm(0.5 * (1 + confidence)) all_result[, absolute_margin_of_error := tsad * se] all_result[, relative_margin_of_error := tsad * cv] all_result[, CI_lower := estim - absolute_margin_of_error] all_result[, CI_upper := estim + absolute_margin_of_error] setnames(all_result, c("variable", "var_est"), c("variableD", "var")) if (!is.null(all_result$Z_nov)) { nosrZ <- all_result$variableDZ nosrZ <- nosrZ[!duplicated(nosrZ)] nosrZ1 <- data.table(variableZ = t(data.frame(strsplit(nosrZ, "__")))[, c(1)]) nosrZ <- data.table(variableDZ = nosrZ, nosrZ1) all_result <- merge(all_result, nosrZ, by = "variableDZ") nosrZ <- nosrZ1 <- NULL } nosr <- data.table(variableD = all_result$variableD, t(data.frame(strsplit(all_result$variableD, "__")))) nosr <- nosr[!duplicated(nosr)] nosr <- nosr[, lapply(nosr, as.character)] setnames(nosr, names(nosr)[2], "variable") namesDom1 <- namesDom if (!is.null(Dom)) { setnames(nosr, names(nosr)[3:ncol(nosr)], paste0(namesDom, "_new")) nhs[, (paste0(namesDom, "_new")) := lapply(namesDom, function(x) make.names(paste0(x, ".", get(x))))] namesDom1 <- paste0(namesDom, "_new") } all_result <- merge(nosr, all_result, by = "variableD") namesDom <- nosr <- NULL if (!is.null(all_result$Z_nov)) { all_result[, variable := paste("R", get("variable"), get("variableZ"), sep = "__")] } if (!is.null(c(Dom, period))) { all_result <- merge(all_result, nhs, all = TRUE, by = c(namesDom1, names(period))) } else { all_result[, respondent_count := nhs$respondent_count] all_result[, pop_size := nhs$pop_size]} all_result[, confidence_level := confidence] variab <- c("respondent_count", "n_nonzero", "pop_size", "estim", "var", "se", "rse", "cv", "absolute_margin_of_error", "relative_margin_of_error", "CI_lower", "CI_upper", "confidence_level", "var_srs_HT", "var_cur_HT", "var_srs_ca", "deff_sam", "deff_est", "deff") setkeyv(all_result, c("nr_names", names(Dom), names(period))) all_result <- all_result[, c("variable", names(Dom), names(period), variab), with = FALSE] list(lin_out = linratio_outp, res_out = res_outp, betas = betas, s2g = s2g, all_result = all_result) }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardom_othstr.R
#' Variance estimation for sample surveys in domain for one or two stage surveys by the ultimate cluster method #' #' @description Computes the variance estimation in domain for ID_level1. #' #' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param ID_level2 Variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param period Optional variable for the survey periods. If supplied, the values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param Dom Optional variables used to define population domains. If supplied, values are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers. #' @param N_h Number of primary sampling units in population for each stratum (and period if \code{period} is not \code{NULL}). If \code{N_h = NULL} and \code{fh_zero = FALSE} (default), \code{N_h} is estimated from sample data as sum of weights (\code{w_final}) in each stratum (and period if \code{period} is not \code{NULL}) #' Optional for single-stage sampling design as it will be estimated from sample data. Recommended for multi-stage sampling design as \code{N_h} can not be correctly estimated from the sample data in this case. If \code{N_h} is not used in case of multi-stage sampling design (for example, because this information is not available), it is advisable to set \code{fh_zero = TRUE}. #' If \code{period} \bold{is} \code{NULL}. A two-column data object convertible to \code{data.table} with rows for each stratum. The first column should contain stratum code. The second column - the number of primary sampling units in the population of each stratum. #' If \code{period} \bold{is not} \code{NULL}. A three-column data object convertible to \code{data.table} with rows for each intersection of strata and period. The first column should contain period. The second column should contain stratum code. The third column - the number of primary sampling units in the population of each stratum and period. #' @param PSU_sort optional; if PSU_sort is defined, then variance is calculated for systematic sample. #' @param fh_zero by default FALSE; \code{fh} is calculated as division of n_h and N_h in each strata, if TRUE, \code{fh} value is zero in each strata. #' @param PSU_level by default TRUE; if PSU_level is TRUE, in each strata \code{fh} is calculated as division of count of PSU in sample (n_h) and count of PSU in frame (N_h). if PSU_level is FALSE, in each strata \code{fh} is calculated as division of count of units in sample (n_h) and count of units in frame (N_h), which calculated as sum of weights. #' @param Z Optional variables of denominator for ratio estimation. Object convertible to \code{data.table} or variable names as character, column numbers or logical vector (length of the vector has to be the same as the column count of \code{dataset}). #' @param dataset Optional survey data object convertible to \code{data.table}. #' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param periodX Optional variable of the survey periods. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param datasetX Optional survey data object in level1 convertible to \code{data.table}. #' @param confidence Optional positive value for confidence interval. This variable by default is 0.95. #' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1. #' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out. #' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out. #' #' @return A list with objects are returned by the function: #' \itemize{ #' \item lin_out A \code{data.table} containing the linearized values of the ratio estimator with ID_level2 and PSU. #' \item res_out A \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU. #' \item betas A numeric \code{data.table} containing the estimated coefficients of calibration. #' \item all_result A \code{data.table}, which containing variables: #' \code{variable} - names of variables of interest, \cr #' \code{Dom} - optional variable of the population domains, \cr #' \code{period} - optional variable of the survey periods, \cr #' \code{respondent_count} - the count of respondents, \cr #' \code{pop_size} - the estimated size of population, \cr #' \code{n_nonzero} - the count of respondents, who answers are larger than zero, \cr #' \code{estim} - the estimated value, \cr #' \code{var} - the estimated variance, \cr #' \code{se} - the estimated standard error, \cr #' \code{rse} - the estimated relative standard error (coefficient of variation), \cr #' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr #' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr #' \code{relative_margin_of_error} - the estimated relative margin of error in percentage, \cr #' \code{CI_lower} - the estimated confidence interval lower bound, \cr #' \code{CI_upper} - the estimated confidence interval upper bound, \cr #' \code{confidence_level} - the positive value for confidence interval, \cr #' \code{S2_y_HT} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using non-calibrated weights, \cr #' \code{S2_y_ca} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using calibrated weights, \cr \code{S2_res} - the estimated variance of the regression residuals, \cr #' \code{S2_res} - the estimated variance of the regression residuals, \cr #' \code{var_srs_HT} - the estimated variance of the HT estimator under SRS for household, \cr #' \code{var_cur_HT} - the estimated variance of the HT estimator under current design for household, \cr #' \code{var_srs_ca} - the estimated variance of the calibrated estimator under SRS for household, \cr #' \code{deff_sam} - the estimated design effect of sample design for household, \cr #' \code{deff_est} - the estimated design effect of estimator for household, \cr #' \code{deff} - the overall estimated design effect of sample design and estimator for household #' } #' #' @details Calculate variance estimation in domains for household surveys based on book of Hansen, Hurwitz and Madow. #' #' @references #' Morris H. Hansen, William N. Hurwitz, William G. Madow, (1953), Sample survey methods and theory Volume I Methods and applications, 257-258, Wiley. \cr #' Guillaume Osier and Emilio Di Meglio. The linearisation approach implemented by Eurostat for the first wave of EU-SILC: what could be done from the second wave onwards? 2012 \cr #' Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr #' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr #' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr #' #' @seealso \code{\link{domain}}, #' \code{\link{lin.ratio}}, #' \code{\link{residual_est}}, #' \code{\link{var_srs}}, #' \code{\link{variance_est}} #' #' @keywords vardpoor #' #' @examples #' library("data.table") #' library("laeken") #' data("eusilc") #' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc) #' aa <- vardomh(Y = "eqIncome", H = "db040", PSU = "db030", #' w_final = "rb050", ID_level1 = "db030", #' ID_level2 = "rb030", Dom = "db040", period = NULL, #' N_h = NULL, Z = NULL, dataset = dataset1, X = NULL, #' X_ID_level1 = NULL, g = NULL, q = NULL, #' datasetX = NULL, confidence = 0.95, percentratio = 1, #' outp_lin = TRUE, outp_res = TRUE) #' #' \dontrun{ #' dataset2 <- copy(dataset1) #' dataset1$period <- 1 #' dataset2$period <- 2 #' dataset1 <- data.table(rbind(dataset1, dataset2)) #' #' # by default without using fh_zero (finite population correction) #' aa2 <- vardomh(Y = "eqIncome", H = "db040", PSU = "db030", #' w_final = "rb050", ID_level1 = "db030", #' ID_level2 = "rb030", Dom = "db040", period = "period", #' N_h = NULL, Z = NULL, dataset = dataset1, #' X = NULL, X_ID_level1 = NULL, #' g = NULL, q = NULL, datasetX = NULL, #' confidence = .95, percentratio = 1, #' outp_lin = TRUE, outp_res = TRUE) #' aa2 #' #' # without using fh_zero (finite population correction) #' aa3 <- vardomh(Y = "eqIncome", H = "db040", PSU = "db030", #' w_final = "rb050", ID_level1 = "db030", #' ID_level2 = "rb030", Dom = "db040", #' period = "period", N_h = NULL, fh_zero = FALSE, #' Z = NULL, dataset = dataset1, X = NULL, #' X_ID_level1 = NULL, g = NULL, q = NULL, #' datasetX = NULL, confidence = .95, #' percentratio = 1, outp_lin = TRUE, #' outp_res = TRUE) #' aa3 #' #' # with using fh_zero (finite population correction) #' aa4 <- vardomh(Y = "eqIncome", H = "db040", PSU = "db030", #' w_final = "rb050", ID_level1 = "db030", #' ID_level2 = "rb030", Dom = "db040", #' period = "period", N_h = NULL, fh_zero = TRUE, #' Z = NULL, dataset = dataset1, #' X = NULL, X_ID_level1 = NULL, #' g = NULL, q = NULL, datasetX = NULL, #' confidence = .95, percentratio = 1, #' outp_lin = TRUE, outp_res = TRUE) #' aa4} #' #' #' @import data.table #' @import laeken #' #' @export vardomh vardomh <- function(Y, H, PSU, w_final, ID_level1, ID_level2, Dom = NULL, period = NULL, N_h = NULL, PSU_sort = NULL, fh_zero = FALSE, PSU_level = TRUE, Z = NULL, dataset = NULL, X = NULL, periodX = NULL, X_ID_level1 = NULL, ind_gr = NULL, g = NULL, q = NULL, datasetX = NULL, confidence = .95, percentratio = 1, outp_lin = FALSE, outp_res = FALSE) { ### Checking fh_zero <- check_var(vars = fh_zero, varn = "fh_zero", varntype = "logical") PSU_level <- check_var(vars = PSU_level, varn = "PSU_level", varntype = "logical") outp_lin <- check_var(vars = outp_lin, varn = "outp_lin", varntype = "logical") outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical") percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger") confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01") if(!is.null(X)) { if (is.null(datasetX)) datasetX <- copy(dataset) if (identical(dataset, datasetX) & !is.null(dataset)) X_ID_level1 <- ID_level1 } Y <- check_var(vars = Y, varn = "Y", dataset = dataset, check.names = TRUE, isnumeric = TRUE, grepls = "__") Ynrow <- nrow(Y) Yncol <- ncol(Y) ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, duplicatednames = TRUE, mustbedefined = FALSE) ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1), periods = period) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1), dif_name = "dataH_stratas") w_final <- check_var(vars = w_final, varn = "w_final", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) Z <- check_var(vars = Z, varn = "Z", dataset = dataset, check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow, isnumeric = TRUE, mustbedefined = FALSE) Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE, grepls = "__") PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1)) PSU_sort <- check_var(vars = PSU_sort, varn = "PSU_sort", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, isvector = TRUE, mustbedefined = FALSE, PSUs = PSU) if(!is.null(X) | !is.null(ind_gr) |!is.null(g) | !is.null(q) | !is.null(periodX) | !is.null(X_ID_level1) | !is.null(datasetX)) { X <- check_var(vars = X, varn = "X", dataset = datasetX, check.names = TRUE, isnumeric = TRUE, dif_name = c(names(Y), names(period), "g", "q", "weight"), dX = "X") Xnrow <- nrow(X) ind_gr <- check_var(vars = ind_gr, varn = "ind_gr", dataset = datasetX, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, dX = "X", dif_name = c(names(Y), names(period), "g", "q", "weight")) g <- check_var(vars = g, varn = "g", dataset = datasetX, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") q <- check_var(vars = q, varn = "q", dataset = datasetX, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") periodX <- check_var(vars = periodX, varn = "periodX", dataset = datasetX, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, mustbedefined = !is.null(period), duplicatednames = TRUE, varnout = "period", varname = names(period), periods = period, dX = "X") X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1", dataset = datasetX, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, varnout = "ID_level1", varname = names(ID_level1), periods = period, periodsX = periodX, ID_level1 = ID_level1, dX = "X") } N <- dataset <- datasetX <- NULL # N_h np <- sum(ncol(period)) if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", np + 2, " columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numeric") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (any(is.na(merge(unique(H), N_h, by = names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") pH <- NULL } setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } ### Calculation # Domains psusn <- as.integer(!is.null(PSU_sort)) namesDom <- names(Dom) aPSU <- names(PSU) if (!is.null(Dom)) Y1 <- domain(Y = Y, D = Dom, dataset = NULL, checking = FALSE) else Y1 <- Y Y <- NULL n_nonzero <- copy(Y1) Z1 <- NULL if (!is.null(Z)) { if (!is.null(Dom)) Z1 <- domain(Y = Z, D = Dom, dataset = NULL, checking = FALSE) else Z1 <- Z Z0 <- copy(Z1) setnames(Z0, names(Z0), names(Y1)) n_nonzero <- n_nonzero + Y1 Z0 <- NULL } if (!is.null(period)){ n_nonzero <- data.table(period, n_nonzero) n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), keyby = names(period), .SDcols = names(Y1)] } else n_nonzero <- n_nonzero[, lapply(.SD, function(x) sum(as.integer(abs(x) > .Machine$double.eps))), .SDcols = names(Y1)] respondent_count <- sample_size <- pop_size <- NULL nhs <- data.table(respondent_count = 1, pop_size = w_final) if (!is.null(period)) nhs <- data.table(period, nhs) if (!is.null(Dom)) nhs <- data.table(Dom, nhs) if (!is.null(c(Dom, period))) {nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), keyby = eval(names(nhs)[0 : 1 - ncol(nhs)]), .SDcols = c("respondent_count", "pop_size")] } else nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), .SDcols = c("respondent_count", "pop_size")] # Design weights if (!is.null(X)) { ID_level1h <- data.table(ID_level1) if (!is.null(period)) { ID_level1h <- data.table(period, ID_level1h) X_ID_level1 <- data.table(periodX, X_ID_level1) } idhx <- data.table(X_ID_level1, g = g) setnames(idhx, names(idhx)[c(1 : (ncol(idhx) - 1))], names(ID_level1h)) idg <- merge(ID_level1h, idhx, by = names(ID_level1h), sort = FALSE) w_design <- w_final / idg[["g"]] idg <- data.table(idg, w_design = w_design) idh <- idg[, .N, keyby = c(names(ID_level1h), "w_design")] if (nrow(X) != nrow(idh)) stop("Aggregated 'w_design' length must the same as matrix 'X'") idg <- idhx <- ID_level1h <- NULL } else w_design <- w_final # Ratio of two totals sar_nr <- persort <- linratio_outp <- estim <- NULL var_est2 <- se <- rse <- cv <- absolute_margin_of_error <- NULL relative_margin_of_error <- CI_lower <- S2_y_HT <- NULL S2_y_ca <- S2_res <- CI_upper <- variable <- variableZ <- NULL .SD <- deff_sam <- deff_est <- deff <- n_eff <- NULL aH <- names(H) idper <- ID_level2 period0 <- copy(period) if (!is.null(period)) idper <- data.table(idper, period) if (!is.null(Z)) { if (is.null(period)) { Y2 <- lin.ratio(Y = Y1, Z = Z1, weight = w_final, Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE) } else { periodap <- do.call("paste", c(as.list(period), sep="_")) lin1 <- lapply(split(Y1[, .I], periodap), function(i) data.table(sar_nr = i, lin.ratio(Y = Y1[i], Z = Z1[i], weight = w_final[i], Dom = NULL, dataset = NULL, percentratio = percentratio, checking = FALSE))) Y2 <- rbindlist(lin1) setkeyv(Y2, "sar_nr") Y2[, sar_nr := NULL] } if (any(is.na(Y2))) print("Results are calculated, but there are cases where Z = 0") if (outp_lin) linratio_outp <- data.table(idper, PSU, Y2) } else { Y2 <- Y1 } # Total estimation lin1 <- Y_est <- Z_est <- .SD <- variableDZ <- NULL hY <- data.table(Y1 * w_final) if (is.null(period)) { Y_est <- hY[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Y1)] } else { hY <- data.table(period0, hY) Y_est <- hY[, lapply(.SD, sum, na.rm = TRUE), keyby = names(period), .SDcols = names(Y1)] } Y_est <- transpos(Y_est, is.null(period), "Y_est", names(period)) all_result <- Y_est if (!is.null(Z1)) { YZnames <- data.table(variable = names(Y1), variableDZ = names(Z1)) setkeyv(YZnames, "variable") setkeyv(all_result, "variable") all_result <- merge(all_result, YZnames) hZ <- data.table(Z1 * w_final) if (is.null(period)) { Z_est <- hZ[, lapply(.SD, sum, na.rm = TRUE), .SDcols = names(Z1)] } else { hZ <- data.table(period, hZ) Z_est <- hZ[, lapply(.SD, sum, na.rm = TRUE), keyby = names(period), .SDcols = names(Z1)] } Z_est <- transpos(Z_est, is.null(period), "Z_est", names(period), "variableDZ") all_result <- merge(all_result, Z_est, all = TRUE, by = c(names(period), "variableDZ")) } vars <- data.table(variable = names(Y1), nr_names = 1 : ncol(Y1)) all_result <- merge(vars, all_result, by = "variable") n_nonzero <- transpos(n_nonzero, is.null(period), "n_nonzero", names(period)) all_result <- merge(all_result, n_nonzero, all = TRUE, by = c(names(period), "variable")) n_nonzero <- vars <- Y1 <- Z1 <- Y_est <- Z_est <- hY <- hZ <- YZnames <- NULL # Calibration YY <- data.table(idper, ID_level1, H, PSU, check.names = TRUE) if (!is.null(PSU_sort)) YY <- data.table(YY, PSU_sort, check.names = TRUE) YY <- data.table(YY, w_design, w_final, Y2, check.names = TRUE) YY2 <- YY[, lapply(.SD, sum, na.rm = TRUE), by = c(names(YY)[c(2 : (6 + np + psusn))]), .SDcols = names(YY)[-(1 : (6 + np + psusn))]] Y3 <- YY2[, c(-(1 : (5 + np + psusn))), with = FALSE] idper <- period <- NULL if (np > 0) period <- YY2[, c(1 : np), with = FALSE] ID_level1h <- YY2[, np + 1, with = FALSE] H <- YY2[, np + 2, with = FALSE] setnames(H, names(H), aH) PSU <- YY2[, np + 3, with = FALSE] setnames(PSU, names(PSU), aPSU) if (!is.null(PSU_sort)) PSU_sort <- YY2[[np + 4]] w_design2 <- YY2[[np + 4 + psusn]] w_final2 <- YY2[[np + 5 + psusn]] YY <- YY2 <- NULL # Calibration betas <- res_outp <- NULL if (!is.null(X)) { if (np > 0) ID_level1h <- data.table(period, ID_level1h) X0 <- data.table(X_ID_level1, ind_gr, q, g, X) D1 <- merge(ID_level1h, X0, by = names(ID_level1h), sort = FALSE) ind_gr <- D1[, np + 2, with = FALSE] if (!is.null(period)) ind_gr <- data.table(D1[, names(periodX), with = FALSE], ind_gr) ind_period <- do.call("paste", c(as.list(ind_gr), sep = "_")) lin1 <- lapply(split(Y3[, .I], ind_period), function(i) { resid <- residual_est(Y = Y3[i], X = D1[i, (np + 5) : ncol(D1), with = FALSE], weight = w_design2[i], q = D1[i][["q"]], checking = FALSE) pers0 <- ind_gr[i, .N, keyby = c(names(ind_gr))] list(data.table(sar_nr = i, resid$residuals), data.table(pers0[, N := NULL], resid$betas)) }) Y4 <- rbindlist(lapply(lin1, function(x) x[[1]])) betas <- rbindlist(lapply(lin1, function(x) x[[2]])) setkeyv(Y4, "sar_nr") Y4[, sar_nr := NULL] if (outp_res) res_outp <- data.table(ID_level1h, PSU, w_final2, Y4) } else Y4 <- Y3 X0 <- D1 <- X_ID_level1 <- ID_level1h <- ind_gr <- lin1 <- X <- g <- q <- NULL var_est <- variance_est(Y = Y4, H = H, PSU = PSU, w_final = w_final2, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Current variance estimation", checking = FALSE) var_est <- transpos(var_est, is.null(period), "var_est", names(period)) all_result <- merge(all_result, var_est, all = TRUE, by = c(names(period), "variable")) # Variance of HT estimator under current design var_cur_HT <- variance_est(Y = Y3, H = H, PSU = PSU, w_final = w_design2, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Variance of HT estimator under current design", checking = FALSE) var_cur_HT <- transpos(var_cur_HT, is.null(period), "var_cur_HT", names(period)) all_result <- merge(all_result, var_cur_HT, all = TRUE, by = c(names(period), "variable")) n_nonzero <- var_est <- var_cur_HT <- NULL H <- PSU <- PSU_sort <- N_h <- NULL # Variance of HT estimator under SRS if (is.null(period)) { varsrs <- var_srs(Y = Y3, w = w_design2) S2_y_HT <- varsrs$S2p S2_y_ca <- var_srs(Y = Y3, w = w_final2)$S2p var_srs_HT <- varsrs$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1 : nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y3)),] ind <- (rowSums(per == period) == ncol(period)) varsrs <- var_srs(Y = Y3[ind], w = w_design2[ind]) varsca <- var_srs(Y = Y3[ind], w = w_final2[ind]) list(S2p = data.table(period_agg[i,], varsrs$S2p), varsrs = data.table(period_agg[i,], varsrs$varsrs), S2ca = data.table(period_agg[i,], varsca$S2p)) }) S2_y_HT <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_HT <- rbindlist(lapply(lin1, function(x) x[[2]])) S2_y_ca <- rbindlist(lapply(lin1, function(x) x[[3]])) } var_srs_HT <- transpos(var_srs_HT, is.null(period), "var_srs_HT", names(period)) all_result <- merge(all_result, var_srs_HT, all = TRUE, by = c(names(period), "variable")) S2_y_HT <- transpos(S2_y_HT, is.null(period), "S2_y_HT", names(period)) all_result <- merge(all_result, S2_y_HT, all = TRUE, by = c(names(period), "variable")) S2_y_ca <- transpos(S2_y_ca, is.null(period), "S2_y_ca", names(period)) all_result <- merge(all_result, S2_y_ca, all = TRUE, by = c(names(period), "variable")) Y3 <- w_design2 <- var_srs_HT <- S2_y_HT <- S2_y_ca <- NULL # Variance of calibrated estimator under SRS if (is.null(period)) { varsres <- var_srs(Y4, w = w_final2) S2_res <- varsres$S2p var_srs_ca <- varsres$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1 : nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y4)),] ind <- (rowSums(per == period) == ncol(period)) varsres <- var_srs(Y = Y4[ind], w = w_final2[ind]) list(S2p = data.table(period_agg[i,], varsres$S2p), varsrs = data.table(period_agg[i,], varsres$varsrs)) }) S2_res <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_ca <- rbindlist(lapply(lin1, function(x) x[[2]])) } var_srs_ca <- transpos(var_srs_ca, is.null(period), "var_srs_ca", names(period), "variable") all_result <- merge(all_result, var_srs_ca, all = TRUE, by = c(names(period), "variable")) S2_res <- transpos(S2_res, is.null(period), "S2_res", names(period), "variable") all_result <- merge(all_result, S2_res, all = TRUE, by = c(names(period), "variable")) Y4 <- w_final2 <- var_srs_ca <- S2_res <- NULL all_result[, estim := Y_est] if (!is.null(all_result$Z_est)) all_result[, estim := Y_est / Z_est * percentratio] if (nrow(all_result[var_est < 0]) > 0) stop("Estimation of variance are negative!") # Design effect of sample design all_result[, deff_sam := var_cur_HT / var_srs_HT] # Design effect of estimator all_result[, deff_est := var_est / var_cur_HT] # Overall effect of sample design and estimator all_result[, deff := deff_sam * deff_est] all_result[, var_est2 := var_est] all_result[xor(is.na(var_est2), var_est2 < 0), var_est2 := NA] all_result[, se := sqrt(var_est2)] all_result[(estim != 0) & !is.nan(estim), rse := se / estim] all_result[estim == 0 | is.nan(estim), rse := NA] all_result[, cv := rse * 100] tsad <- qnorm(0.5 * (1 + confidence)) all_result[, absolute_margin_of_error := tsad * se] all_result[, relative_margin_of_error := tsad * cv] all_result[, CI_lower := estim - absolute_margin_of_error] all_result[, CI_upper := estim + absolute_margin_of_error] variableD <- NULL setnames(all_result, c("variable", "var_est"), c("variableD", "var")) if (!is.null(all_result$Z_est)) { nosrZ <- data.table(all_result[, "variableDZ"], all_result[, tstrsplit(variableDZ, "__")][, 1]) nosrZ <- nosrZ[!duplicated(nosrZ)] setnames(nosrZ, "V1", "variableZ") all_result <- merge(all_result, nosrZ, by = "variableDZ") nosrZ <- NULL } nosr <- data.table(all_result[, "variableD"], all_result[, tstrsplit(variableD, "__")]) nosr <- nosr[!duplicated(nosr)] nosr <- nosr[, lapply(nosr, as.character)] setnames(nosr, names(nosr)[2], "variable") namesDom1 <- namesDom if (!is.null(Dom)) { setnames(nosr, names(nosr)[3 : ncol(nosr)], paste0(namesDom, "_new")) nhs[, (paste0(namesDom, "_new")) := lapply(namesDom, function(x) make.names(paste0(x,".", get(x))))] namesDom1 <- paste0(namesDom, "_new") } all_result <- merge(nosr, all_result, by="variableD") namesDom <- nosr <- confidence_level <- NULL if (!is.null(all_result$Z_est)) { all_result[, variable := paste("R", get("variable"), get("variableZ"), sep="__")] } if (!is.null(c(Dom, period))) { all_result <- merge(all_result, nhs, all = TRUE, by = c(namesDom1, names(period))) } else { all_result[, respondent_count := nhs$respondent_count] all_result[, pop_size := nhs$pop_size]} all_result[, confidence_level := confidence] variab <- c("respondent_count", "n_nonzero", "pop_size") if (!is.null(all_result$Z_est)) variab <- c(variab, "Y_est", "Z_est") variab <- c(variab, "estim", "var", "se", "rse", "cv", "absolute_margin_of_error", "relative_margin_of_error", "CI_lower", "CI_upper", "confidence_level") if (is.null(Dom)) variab <- c(variab, "S2_y_HT", "S2_y_ca", "S2_res") variab <- c(variab, "var_srs_HT", "var_cur_HT", "var_srs_ca", "deff_sam", "deff_est", "deff") setkeyv(all_result, c("nr_names", names(Dom), names(period))) all_result <- all_result[, c("variable", names(Dom), names(period), variab), with = FALSE] list(lin_out = linratio_outp, res_out = res_outp, all_result = all_result) }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardomh.R
#' Variance estimation for sample surveys by the ultimate cluster method #' #' @description Computes the variance estimation by the ultimate cluster method. #' #' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param N_h Number of primary sampling units in population for each stratum (and period if \code{period} is not \code{NULL}). If \code{N_h = NULL} and \code{fh_zero = FALSE} (default), \code{N_h} is estimated from sample data as sum of weights (\code{w_final}) in each stratum (and period if \code{period} is not \code{NULL}). #' Optional for single-stage sampling design as it will be estimated from sample data. Recommended for multi-stage sampling design as \code{N_h} can not be correctly estimated from the sample data in this case. If \code{N_h} is not used in case of multi-stage sampling design (for example, because this information is not available), it is advisable to set \code{fh_zero = TRUE}. #' If \code{period} \bold{is} \code{NULL}. A two-column matrix with rows for each stratum. The first column should contain stratum code. The second column - the number of primary sampling units in the population of each stratum. #' If \code{period} \bold{is not} \code{NULL}. A three-column matrix with rows for each intersection of strata and period. The first column should contain period. The second column should contain stratum code. The third column - the number of primary sampling units in the population of each stratum and period. #' @param fh_zero by default FALSE; \code{fh} is calculated as division of n_h and N_h in each strata, if TRUE, \code{fh} value is zero in each strata. #' @param PSU_level by default TRUE; if PSU_level is TRUE, in each strata \code{fh} is calculated as division of count of PSU in sample (n_h) and count of PSU in frame (N_h). if PSU_level is FALSE, in each strata \code{fh} is calculated as division of count of units in sample (n_h) and count of units in frame (N_h), which calculated as sum of weights. #' @param PSU_sort optional; if PSU_sort is defined, then variance is calculated for systematic sample. #' @param period Optional variable for the survey periods. If supplied, the values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param dataset an optional name of the individual dataset \code{data.table}. #' @param msg an optional printed text, when function print error. #' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE. #' #' @return a \code{data.table} containing the values of the variance estimation by totals. #' #' @details #'If we assume that \eqn{n_h \geq 2}{n_h>=2} for all \eqn{h}, that is, two or more PSUs are selected from each stratum, then the variance of \eqn{\hat{\theta}}{\theta} can be estimated from the variation among the estimated PSU totals of the variable \eqn{Z}: #' \deqn{\hat{V} \left(\hat{\theta} \right)=\sum\limits_{h=1}^{H} \left(1-f_h \right) \frac{n_h}{n_{h}-1} \sum\limits_{i=1}^{n_h} \left( z_{hi\bullet}-\bar{z}_{h\bullet\bullet}\right)^2, }{V(\theta)=\sum h=1...H (1-f_h)*n_h/(n_h-1)* \sum i=1...n_h ( z_hi.- z_h..)^2, } #' #' where #' \eqn{\bullet}{} #' \eqn{z_{hi\bullet}=\sum\limits_{j=1}^{m_{hi}} \omega_{hij} z_{hij}}{z_hi.=\sum j=1...m_hi \omega_hij * z_hij} #' #' \eqn{\bullet}{} #' \eqn{\bar{z}_{h\bullet\bullet}=\frac{\left( \sum\limits_{i=1}^{n_h} z_{hi\bullet} \right)}{n_h}}{z_h..=(\sum i=1...n_h z_hi.)/n_h} #' #' \eqn{\bullet}{} #' \eqn{f_h} is the sampling fraction of PSUs within stratum #' #' \eqn{\bullet}{} #' \eqn{h} is the stratum number, with a total of H strata #' #' \eqn{\bullet}{} #' \eqn{i} is the primary sampling unit (PSU) number within stratum \eqn{h}, with a total of \eqn{n_h} PSUs #' #' \eqn{\bullet}{} #' \eqn{j} is the household number within cluster \eqn{i} of stratum \eqn{h}, with a total of \eqn{m_{hi}}{m_hi} household #' #' \eqn{\bullet}{} #' \eqn{w_{hij}}{w_hij} is the sampling weight for household \eqn{j} in PSU \eqn{i} of stratum \eqn{h} #' #' \eqn{\bullet}{} #' \eqn{z_{hij}}{z_hij} denotes the observed value of the analysis variable \eqn{z} for household \eqn{j} in PSU \eqn{i} of stratum \eqn{h} #' #' #' @references #' Morris H. Hansen, William N. Hurwitz, William G. Madow, (1953), Sample survey methods and theory Volume I Methods and applications, 257-258, Wiley. \cr #' Guillaume Osier and Emilio Di Meglio. The linearisation approach implemented by Eurostat for the first wave of EU-SILC: what could be done from the second onwards? 2012 \cr #' Eurostat Methodologies and Working papers, Standard error estimation for the EU-SILC indicators of poverty and social exclusion, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr #' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' #' @seealso \code{\link{domain}}, \code{\link{lin.ratio}}, \code{\link{linarpr}}, #' \code{\link{linarpt}}, \code{\link{lingini}}, \code{\link{lingini2}}, #' \code{\link{lingpg}}, \code{\link{linpoormed}}, \code{\link{linqsr}}, #' \code{\link{linrmpg}}, \code{\link{residual_est}}, \code{\link{vardom}}, #' \code{\link{vardomh}}, \code{\link{varpoord}}, \code{\link{variance_othstr}} #' #' @keywords vardpoor #' #' @examples #' Ys <- rchisq(10, 3) #' w <- rep(2, 10) #' PSU <- 1 : length(Ys) #' H <- rep("Strata_1", 10) #' #' # by default without using fh_zero (finite population correction) #' variance_est(Y = Ys, H = H, PSU = PSU, w_final = w) #' #' #' \dontrun{ #' # without using fh_zero (finite population correction) #' variance_est(Y = Ys, H = H, PSU = PSU, w_final = w, fh_zero = FALSE) #' #' # with using fh_zero (finite population correction) #' variance_est(Y = Ys, H = H, PSU = PSU, w_final = w, fh_zero = TRUE) #' } #' #' @import data.table #' @export variance_est variance_est <- function(Y, H, PSU, w_final, N_h = NULL, fh_zero = FALSE, PSU_level = TRUE, PSU_sort = NULL, period = NULL, dataset = NULL, msg = "", checking = TRUE) { ### Checking . <- NULL if (checking) { fh_zero <- check_var(vars = fh_zero, varn = "fh_zero", varntype = "logical") PSU_level <- check_var(vars = PSU_level, varn = "PSU_level", varntype = "logical") Y <- check_var(vars = Y, varn = "Y", dataset = dataset, check.names = TRUE, isnumeric = TRUE) Ynrow <- nrow(Y) Yncol <- ncol(Y) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = FALSE, ischaracter = TRUE) w_final <- check_var(vars = w_final, varn = "w_final", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE) PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE) PSU_sort <- check_var(vars = PSU_sort, varn = "PSU_sort", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, isvector = TRUE, mustbedefined = FALSE, PSUs = PSU) } np <- sum(ncol(period)) vars <- names(period) # N_h if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", toString(np + 2)," columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numeric") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (any(is.na(merge(unique(H), N_h, by = names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") } setnames(N_h, names(N_h)[ncol(N_h)], "N_h") setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } else { Nh <- data.table(H, w_final) if (!is.null(period)) Nh <- data.table(period, Nh) N_h <- Nh[, .(N_h = sum(w_final, na.rm = TRUE)), keyby = c(names(Nh)[1 : (1 + np)])] } psuag <- pH <- NULL ### Calculation namY <- names(Y) # z_hi ids <- nhc <- f_h <- .SD <- N <- NULL hpY <- data.table(H, PSU, Y * w_final) if (!is.null(PSU_sort)) hpY <- data.table(H, PSU, PSU_sort, Y * w_final) if (!is.null(period)) hpY <- data.table(period, hpY) psusn <- as.integer(!is.null(PSU_sort)) z_hi <- hpY[, lapply(.SD, sum, na.rm = TRUE), keyby = c(names(hpY)[1 : (2 + np + psusn)]), .SDcols = names(hpY)[-(1 : (2 + np + psusn))]] setkeyv(z_hi, names(z_hi)[c(1 : (1 + np))]) # n_h n_h <- data.table(z_hi[, c(1 : (1 + np)), with = FALSE]) n_h <- n_h[, .(n_h = .N), keyby = c(names(n_h)[1 : (1 + np)])] # var_z_hi var_z_hi <- z_hi[, lapply(.SD, var, na.rm = FALSE), keyby = c(names(z_hi)[1 : (1 + np)]), .SDcols = namY] if (!is.null(PSU_sort)) { setkeyv(z_hi, c(names(z_hi)[c(1:(1 + np),3 + np)])) z_hi[, (paste0("lag_", namY)) := lapply(.SD, function(x) shift(x, 1)), by=c(names(z_hi)[1 : (1 + np)]), .SDcols = namY] laY <- paste0("lag_", namY[1]) z_hi <- z_hi[!is.na(get(laY))] var_z_hi <- z_hi[, lapply(namY, function(x) sum((get(x) - get(paste0("lag_", x)))^2)), keyby = c(names(z_hi)[1 : (1 + np)])] setnames(var_z_hi, names(var_z_hi)[(2 + np) : ncol(var_z_hi)], namY) } # f_h F_h <- merge(N_h, n_h, by = names(hpY)[c(1 : (1 + np))], sort = TRUE) F_h[, N_h := round2(N_h, 8)] F_h[, f_h := n_h / N_h] if (nrow(F_h[n_h == 1 & f_h != 1]) > 0) { print(msg) print("There are strata, where n_h == 1 and f_h <> 1") print("Not possible to estimate the variance in these strata!") print("At these strata estimation of variance was not calculated") nh <- F_h[n_h == 1 & f_h != 1] print(nh) } if (nrow(F_h[f_h > 1]) > 0) { print(msg) print("There are strata, where f_h > 1") print("At these strata estimation of variance will be 0") print(F_h[f_h > 1]) F_h[f_h > 1, f_h := 1] } # fh1 if (!(PSU_level)) { n_h1 <- Nh1 <- NULL fh1 <- data.table(hpY[, c(1 : (1 + np)), with = FALSE], w_final) fh1 <- fh1[, .(n_h1 = .N, Nh1 = sum(w_final, na.rm = TRUE)), keyby = c(names(fh1)[1 : (1 + np)])] F_h <- merge(F_h, fh1, by = c(names(fh1)[1 : (1 + np)])) F_h[, f_h := n_h1 / Nh1] } var_z_hi <- merge(F_h, var_z_hi, by = c(names(F_h)[1 : (1 + np)])) fh1 <- F_h <- NULL # var_h if (!is.null(PSU_sort)) { var_z_hi[, nhc := ifelse(n_h > 1, n_h / (2 * (n_h - 1)), NA)] } else var_z_hi[, nhc := n_h] var_z_hi[, ids := 1 : .N] var_z_hi[, (paste0("var_", namY)) := lapply(.SD[, namY, with = FALSE], function(x) (1 - f_h * (1 - fh_zero)) * nhc * x), by = "ids"] # Variance_est var_est <- var_z_hi[, lapply(.SD, sum, na.rm = TRUE), keyby = vars, .SDcols = paste0("var_", namY)] setnames(var_est, paste0("var_", namY), namY) return(var_est) } round2 <- function(x, n) { sign(x) * trunc(abs(x) * 10 ^ n + 0.5) / 10 ^ n }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/variance_est.R
#' Variance estimation for sample surveys by the new stratification #' #' @description Computes s2g and the variance estimation by the new stratification. #' #' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). #' @param H2 The unit new stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). #' @param N_h optional; either a \code{data.frame} giving the first column - stratum, but the second column - the total of the population in each stratum. #' @param N_h2 optional; either a \code{data.frame} giving the first column - new stratum, but the second column - the total of the population in each new stratum. #' @param period Optional variable for the survey periods. If supplied, the values for each period are computed independently. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). #' @param dataset Optional survey data object convertible to \code{data.table}. #' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE. #' #' @return A list with objects are returned by the function: #' \itemize{ #' \item betas A numeric \code{data.table} containing the estimated coefficients of calibration. #' \item s2g A \code{data.table} containing the s^2g value. #' \item var_est A \code{data.table} containing the values of the variance estimation. #' } #' #' #' @details #' #' It is possible to compute population size \eqn{M_g} from sampling frame. The standard deviation of \eqn{g}-th stratum is #' \deqn{S_g^2 =\frac{1}{M_g-1} \sum\limits_{k=1}^{M_g} \left(y_{gk}-\bar{Y}_g \right)^2= \frac{1}{M_g-1} \sum\limits_{k=1}^{M_g} y_{gk}^2 - \frac{M_g}{M_g-1}\bar{Y}_g^2}{S_g^2 =1/(M_g-1) \sum k=1...M_g (y_gk - Ym_g)^2= 1/(M_g-1) \sum k=1...M_g (y_gk)^2 - M_g/(M_g-1)*(Ym_g)^2} #' #'\eqn{\sum\limits_{k=1}^{M_g} y_{gk} ^2}{\sum k=1...M_g (y_gk)^2} and \eqn{\bar{Y}_g^2}{Ym_g^2} have to be estimated to estimate \eqn{S_g^2}. Estimate of \eqn{\sum\limits_{k=1}^{M_g} y_{gk}^2}{\sum k=1...M_g (y_gk)^2} is \eqn{\sum\limits_{h=1}^{H} \frac{N_h}{n_h} \sum\limits_{i=1}^{n_h} y_{gi}^2 z_{hi}}{\sum h=1...H N_h/n_h \sum i=1...n_h (y_gi)^2*z_hi}, where #' #'\eqn{ z_{hi} = \left\{ #' \begin{array}{ll} #' 0, & h_i \notin \theta_g \\ #' 1, & h_i \in \theta_g #' \end{array} #' \right. }{z_hi=if(0, h_i notin \theta_g; 1, h_i in \theta_g)} #', \eqn{\theta_g}{\theta_g} is the index group of successfully surveyed units belonging to \eqn{g}{g}-th stratum. #'Estimate of \eqn{\bar{Y}_g^2}{(Y_g)^2} #'is #' #'\deqn{\hat{\bar{Y}}_g^2=\left( \hat{\bar{Y}}_g \right)^2-\hat{Var} \left(\hat{\bar{Y}} \right)}{Ym_g^2=(Ym_g)^2- Var(Ym)} #' #' #' #'\deqn{\hat{\bar{Y}}_g =\frac{\hat{Y}_g}{M_g}= \frac{1}{M_g} \sum\limits_{h=1}^{H} \frac{N_h}{n_h} \sum\limits_{i=1}^{n_h} y_{hi} z_{hi}}{Ym_g =Ym_g/M_g= 1/M_g \sum h=1...H N_h/n_h \sum i=1...n_h y_hi z_hi} #' #' #'So the estimate of \eqn{S_g^2} is #' #'\eqn{s_g^2=\frac{1}{M_g-1} \sum\limits_{h=1}^{H} \frac{N_h}{n_h} \sum\limits_{i=1}^{n_h} y_{hi}^2 z_{hi} -}{s_g^2=\1/(M_g-1) \sum h=1...H N_h/n_h \sum i=1...n_h (y_hi)^2 * z_hi -} #' #' #'\eqn{-\frac{M_g}{M_g-1} \left( \left( \frac{1}{M_g} \sum\limits_{h=1}^{H} \frac{N_h}{n_h} \sum\limits_{i=1}^{n_h} y_{hi} z_{hi} \right)^2 - \frac{1}{M_g^2} \sum\limits_{h=1}^{H} N_h^2 \left(\frac{1}{n_h} - \frac{1}{N_h}\right) \frac{1}{n_h-1} \sum\limits_{i=1}^{n_h} \left(y_{hi} z_{hi} - \frac{1}{n_h} \sum\limits_{t=1}^{n_h} y_{ht} z_{ht} \right)^2 \right)}{-M_g/(M_g-1) (1/M_g \sum h=1...H N_h/n_h \sum i=1...n_h y_hi z_hi)^2} #' #' #'Two conditions have to realize to estimate \eqn{S_g^2: n_h>1, \forall g}{S_g^2: n_h>1, forall g} and \eqn{\theta_g \ne 0, \forall g.}{\theta_g <> 0, forall g.} #' #'Variance of \eqn{\hat{Y}}{Y} is #'\deqn{ Var\left( \hat{Y} \right) = \sum\limits_{g=1}^{G} M_g^2 \left( \frac{1}{m_g} - \frac{1}{M_g} \right) S_g^2 }{Var(Y) = \sum g=1...G M_g^2 (1/m_g - 1/M_g)*(S_g)^2 } #' #' #'Estimate of \eqn{\hat{Var}\left( \hat{Y} \right)}{Var(Y)} is #' #'\deqn{\hat{Var}\left( \hat{Y} \right) = \sum\limits_{g=1}^{G} M_g^2 \left( \frac{1}{m_g} - \frac{1}{M_g} \right)s_g^2}{Var(Y)= \sum g=1...G M_g^2 (1/m_g - 1/M_g)*(s_g)^2} #' #' #' @references #' M. Liberts. (2004) Non-response Analysis and Bias Estimation in a Survey on Transportation of Goods by Road. #' #' @seealso #' \code{\link{domain}}, \code{\link{lin.ratio}}, \code{\link{linarpr}}, #' \code{\link{linarpt}}, \code{\link{lingini}}, \code{\link{lingini2}}, #' \code{\link{lingpg}}, \code{\link{linpoormed}}, \code{\link{linqsr}}, #' \code{\link{linrmpg}}, \code{\link{residual_est}}, \code{\link{vardom}}, #' \code{\link{vardom_othstr}}, \code{\link{vardomh}}, \code{\link{varpoord}} #' #' @keywords vardpoor #' @examples #' library("data.table") #' Y <- data.table(matrix(runif(50) * 5, ncol = 5)) #' #' H <- data.table(H = as.integer(trunc(5 * runif(10)))) #' H2 <- data.table(H2 = as.integer(trunc(3 * runif(10)))) #' #' N_h <- data.table(matrix(0 : 4, 5, 1)) #' setnames(N_h, names(N_h), "H") #' N_h[, sk:= 10] #' #' N_h2 <- data.table(matrix(0 : 2, 3, 1)) #' setnames(N_h2, names(N_h2), "H2") #' N_h2[, sk2:= 4] #' #' w_final <- rep(2, 10) #' #' vo <- variance_othstr(Y = Y, H = H, H2 = H2, #' w_final = w_final, #' N_h = N_h, N_h2 = N_h2, #' period = NULL, #' dataset = NULL) #' vo #' #' @import data.table #' #' @export variance_othstr variance_othstr <- function(Y, H, H2, w_final, N_h = NULL, N_h2, period = NULL, dataset = NULL, checking = TRUE) { . <- NULL ### Checking if (checking) { Y <- check_var(vars = Y, varn = "Y", dataset = dataset, check.names = TRUE, isnumeric = TRUE) Ynrow <- nrow(Y) Yncol <- ncol(Y) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, isnumeric = FALSE, ischaracter = TRUE) H2 <- check_var(vars = H2, varn = "H2", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = FALSE, ischaracter = TRUE, dif_name = names(H)) w_final <- check_var(vars = w_final, varn = "w_final", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE) } np <- sum(ncol(period)) # N_h if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", np + 2, " columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numerical") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (any(is.na(merge(unique(H), N_h, by = names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") pH <- NULL } setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } else { Nh <- data.table(H, w_final) if (!is.null(period)) Nh <- data.table(period, Nh) N_h <- Nh[, .(N_h = sum(w_final, na.rm = TRUE)), keyby = c(names(Nh)[1 : (1 + np)])] } Nh1 <- names(N_h)[ncol(N_h)] # N_h2 if (!is.null(N_h2)) { N_h2 <- data.table(N_h2) if (anyNA(N_h2)) stop("'N_h2' has missing values") if (ncol(N_h2) != np + 2) stop(paste0("'N_h2' should be ", np + 2, " columns")) if (!is.numeric(N_h2[[ncol(N_h2)]])) stop("The last column of 'N_h2' should be numerical") nams2 <- c(names(period), names(H2)) if (all(nams2 %in% names(N_h2))) {N_h2[, (nams2) := lapply(.SD, as.character), .SDcols = nams2] } else stop(paste0("All strata titles of 'H2'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h2'")) if (is.null(period)) { if (names(H2) != names(N_h2)[1]) stop("Strata titles for 'H2' and 'N_h2' is not equal") if (any(is.na(merge(unique(H2), N_h2, by = names(H2), all.x = TRUE)))) stop("'N_h2' is not defined for all stratas") } else { pH2 <- data.table(period, H2) if (any(names(pH2) != names(N_h2)[c(1 : (1 + np))])) stop("Strata titles for 'period' with 'H2' and 'N_h2' is not equal") if (any(is.na(merge(unique(pH2), N_h2, by = names(pH2), all.x = TRUE)))) stop("'N_h2' is not defined for all stratas and periods") } setkeyv(N_h2, names(N_h2)[c(1 : (1 + np))]) } else stop ("N_h2 is not defined!") Nh2 <- names(N_h2)[ncol(N_h2)] ### Calculation # z_hi f_h1 <- .SD <- .N <- NULL Ys <- copy(Y) Ys[, paste0(names(Y),"_sa") := lapply(Y, function(x) w_final * x^2)] Ys[, paste0(names(Y),"_sb") := lapply(Y, function(x) x * w_final)] Ys[, paste0(names(Y),"_sc") := lapply(Y, function(x) x ^ 2)] Ys[, paste0(names(Y),"_sd") := Y] Ys <- data.table(H, H2, Ys) if (!is.null(period)) Ys <- data.table(period, Ys) # n_h1 n_h1 <- data.table(H) if (!is.null(period)) n_h1 <- data.table(period, n_h1) n_h1 <- n_h1[, .(n_h1 = .N), keyby = c(names(n_h1))] F_h1 <- merge(N_h, n_h1, keyby = c(names(N_h)[1 : (1 + np)])) F_h1[, f_h1 := n_h1 / get(Nh1)] if (nrow(F_h1[n_h1 == 1 & f_h1 != 1]) > 0) { print("There are strata, where n_h1 == 1 and f_h1 <> 1") print("Not possible to estimate the variance in these strata!") print("At these strata estimation of variance was not calculated") nh1 <- F_h1[n_h1 == 1 & f_h1 != 1] print(nh1) } # n_h2 n_h2 <- data.table(H2) if (!is.null(period)) n_h2 <- data.table(period, n_h2) nn_h2 <- names(n_h2) n_h2 <- n_h2[, .(n_h2 = .N), keyby = nn_h2] F_h2 <- merge(N_h2, n_h2, keyby = nn_h2) F_h2[, f_h2 := n_h2 / get(Nh2)] if (nrow(F_h2[n_h2 == 1 & f_h2 != 1]) > 0) { print("There are strata, where n_h2 == 1 and f_h2 <> 1") print("Not possible to estimate the variance in these strata!") print("At these strata estimation of variance was not calculated") nh2 <- F_h2[n_h2 == 1 & f_h2 != 1] print(nh2) } if (nrow(F_h2[f_h2 > 1]) > 0) { print("There are strata, where f_h2 > 1") print("At these strata estimation of variance will be 0") print(F_h2[f_h2 > 1]) F_h2[f_h2 > 1, f_h2 := 1] } z_h_h2 <- Ys[, lapply(.SD, sum, na.rm = TRUE), keyby = c(names(Ys)[1 : (2 + np)]), .SDcols = names(Ys)[-(0 : (ncol(Y) + 2 + np))]] z_h_h2 <- merge(z_h_h2, F_h1, keyby = names(z_h_h2)[c(1 : (1 + np))]) pop <- z_h_h2[[Nh1]] z_h_h2[, paste0(names(Y), "_sc") := lapply(.SD[, paste0(names(Y), "_sc"), with = FALSE], function(x) x * pop ^ 2 * ( 1 / n_h1 - 1 / pop)/(n_h1 - 1))] z_h_h2[, paste0(names(Y), "_sd") := lapply(.SD[, paste0(names(Y), "_sd"), with = FALSE], function(x) (1 / n_h1) * x ^ 2 * pop ^ 2 * (1 / n_h1 - 1 / pop)/(n_h1 - 1))] z_h_h2[n_h1 == 1, paste0(names(Y), "_sc") := NA] z_h_h2[n_h1 == 1, paste0(names(Y), "_sd") := NA] nameszh2 <- names(H2) if (!is.null(period)) nameszh2 <- c(names(period), nameszh2) zh2 <- z_h_h2[, lapply(.SD, sum, na.rm = TRUE), keyby = nameszh2, .SDcols = names(z_h_h2)[-(1 : (2 + np))]] zh2 <- merge(zh2, F_h2, by = nn_h2) pop2 <- zh2[[names(N_h2)[ncol(N_h2)]]] nh2 <- zh2[["n_h2"]] f_h2 <- zh2[["f_h2"]] # s2 s2_g <- zh2[, mapply(function(sa, sb, sc, sd) sa / (pop2 - 1) - pop2 / (pop2 - 1) * ((sb / pop2)^2 - (sc - sd) / pop2^2), zh2[, paste0(names(Y), "_sa"), with = FALSE], zh2[, paste0(names(Y), "_sb"), with = FALSE], zh2[, paste0(names(Y), "_sc"), with = FALSE], zh2[, paste0(names(Y), "_sd"), with = FALSE])] # var_g if (is.null(nrow(s2_g))) s2_g <- t(s2_g) s2_g <- data.table(s2_g) setnames(s2_g, names(s2_g), names(Y)) s2g <- data.table(zh2[, nn_h2, with = FALSE], s2_g) s2_g <- matrix(pop2^2 * 1 / nh2 * (1 - f_h2)) * s2_g if (np > 0) s2_g <- data.table(zh2[, names(period), with = FALSE], s2_g) # Variance_est if (np == 0) {var_est <- data.table(t(colSums(s2_g, na.rm = TRUE))) } else var_est <- s2_g[, lapply(.SD, sum, na.rm = TRUE), keyby = c(names(s2_g)[c(1 : np)]), .SDcols = names(Y)] list(s2g = s2g, var_est = var_est) }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/variance_othstr.R
#' Estimation of the variance and deff for sample surveys for indicators on social exclusion and poverty #' #' @description Computes the estimation of the variance for indicators on social exclusion and poverty. #' #' @param Y Study variable (for example equalized disposable income or gross pension income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param age Age variable. One dimensional object convertible to one-column \code{data.frame} or variable name as character, column number. #' @param pl085 Retirement variable (Number of months spent in retirement or early retirement). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param month_at_work}{Variable for total number of month at work (sum of the number of months spent at full-time work as employee, number of months spent at part-time work as employee, number of months spent at full-time work as self-employed (including family worker), number of months spent at part-time work as self-employed (including family worker)). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param Y_den Denominator variable (for example gross individual earnings). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param Y_thres Variable (for example equalized disposable income) used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{inc} is used as \code{income_thres} if \code{income_thres} is not defined. #' @param wght_thres Weight variable used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{weight} is used as \code{wght_thres} if \code{wght_thres} is not defined. #' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param ID_level2 Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param N_h Number of primary sampling units in population for each stratum (and period if \code{period} is not \code{NULL}). If \code{N_h = NULL} and \code{fh_zero = FALSE} (default), \code{N_h} is estimated from sample data as sum of weights (\code{w_final}) in each stratum (and period if \code{period} is not \code{NULL}). #' Optional for single-stage sampling design as it will be estimated from sample data. Recommended for multi-stage sampling design as \code{N_h} can not be correctly estimated from the sample data in this case. If \code{N_h} is not used in case of multi-stage sampling design (for example, because this information is not available), it is advisable to set \code{fh_zero = TRUE}. #' If \code{period} \bold{is} \code{NULL}. A two-column data object convertible to \code{data.table} with rows for each stratum. The first column should contain stratum code. The second column - the number of primary sampling units in the population of each stratum. #' If \code{period} \bold{is not} \code{NULL}. A three-column data object convertible to \code{data.table} with rows for each intersection of strata and period. The first column should contain period. The second column should contain stratum code. The third column - the number of primary sampling units in the population of each stratum and period. #' @param PSU_sort optional; if PSU_sort is defined, then variance is calculated for systematic sample. #' @param fh_zero by default FALSE; \code{fh} is calculated as division of n_h and N_h in each strata, if TRUE, \code{fh} value is zero in each strata. #' @param PSU_level by default TRUE; if PSU_level is TRUE, in each strata \code{fh} is calculated as division of count of PSU in sample (n_h) and count of PSU in frame(N_h). if PSU_level is FALSE, in each strata \code{fh} is calculated as division of count of units in sample (n_h) and count of units in frame(N_h), which calculated as sum of weights. #' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param Dom Optional variables used to define population domains. If supplied, variables is calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers. #' @param period Optional variable for survey period. If supplied, variables is calculated for each time period. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param dataset Optional survey data object convertible to \code{data.frame}. #' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param periodX Optional variable of the survey periods. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers. #' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. #' @param datasetX Optional survey data object in household level convertible to \code{data.table}. #' @param percentage A numeric value in range \eqn{[0,100]} for \eqn{p} in the formula for poverty threshold computation: #' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).} #' For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60. #' @param order_quant A numeric value in range \eqn{[0,100]} for \eqn{\alpha} in the formula for poverty threshold computation: #' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).} #' For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50. #' @param alpha a numeric value in range \eqn{[0,100]} for the order of the income quantile share ratio (in percentage). #' @param confidence Optional positive value for confidence interval. This variable by default is 0.95. #' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out. #' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out. #' @param type a character vector (of length one unless several.ok is TRUE), example "linarpr","linarpt", "lingpg", "linpoormed", "linrmpg", "lingini", "lingini2", "linqsr", "linarr", "linrmir". #' #' @return A list with objects are returned by the function: #' \itemize{ #' \item \code{lin_out} - a \code{data.table} containing the linearized values of the ratio estimator with ID_level2 and PSU. #' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU. #' \item \code{betas} - a numeric \code{data.table} containing the estimated coefficients of calibration. #' \item \code{all_result} - a \code{data.table}, which containing variables: \cr #' \code{respondent_count} - the count of respondents, \cr #' \code{pop_size} - the estimated size of population, \cr #' \code{n_nonzero} - the count of respondents, who answers are larger than zero, \cr #' \code{value} - the estimated value, \cr #' \code{var} - the estimated variance, \cr #' \code{se} - the estimated standard error, \cr #' \code{rse} - the estimated relative standard error (coefficient of variation), \cr #' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr #' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr #' \code{relative_margin_of_error} - the estimated relative margin of error in percentage, \cr #' \code{CI_lower} - the estimated confidence interval lower bound, \cr #' \code{CI_upper} - the estimated confidence interval upper bound, \cr #' \code{confidence_level} - the positive value for confidence interval, \cr #' \code{S2_y_HT} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using non-calibrated weights, \cr #' \code{S2_y_ca} - the estimated variance of the y variable in case of total or the estimated variance of the linearised variable in case of the ratio of two totals using calibrated weights, \cr #' \code{S2_res} - the estimated variance of the regression residuals, \cr #' \code{var_srs_HT} - the estimated variance of the HT estimator under SRS for household, \cr #' \code{var_cur_HT} - the estimated variance of the HT estimator under current design for household, \cr #' \code{var_srs_ca} - the estimated variance of the calibrated estimator under SRS for household, \cr #' \code{deff_sam} - the estimated design effect of sample design for household, \cr #' \code{deff_est} - the estimated design effect of estimator for household, \cr #' \code{deff} - the overall estimated design effect of sample design and estimator for household #' } #' #' #' @references #' Eric Graf and Yves Tille, Variance Estimation Using Linearization for Poverty and Social Exclusion Indicators, Survey Methodology, June 2014 61 Vol. 40, No. 1, pp. 61-79, Statistics Canada, Catalogue no. 12-001-X, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/12-001-x2014001-eng.pdf} \cr #' Guillaume Osier and Emilio Di Meglio. The linearisation approach implemented by Eurostat for the first wave of EU-SILC: what could be done from the second wave onwards? 2012 \cr #' Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr #' Eurostat Methodologies and Working papers, Standard error estimation for the EU-SILC indicators of poverty and social exclusion, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr #' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr #' Matti Langel, Yves Tille, Corrado Gini, a pioneer in balanced sampling and inequality theory. \emph{Metron - International Journal of Statistics}, 2011, vol. LXIX, n. 1, pp. 45-65, URL \url{http://dx.doi.org/10.1007/BF03263549}. \cr #' Morris H. Hansen, William N. Hurwitz, William G. Madow, (1953), Sample survey methods and theory Volume I Methods and applications, 257-258, Wiley. \cr #' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr #' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat.\cr #' #' #' @seealso \code{\link{vardom}}, \code{\link{vardomh}}, \code{\link{linarpt}} #' #' @keywords varpoord #' #' #' @examples #' library("data.table") #' library("laeken") #' data("eusilc") #' dataset <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc) #' dataset1 <- dataset[1 : 1000] #' #' #use dataset1 by default without using fh_zero (finite population correction) #' aa <- varpoord(Y = "eqIncome", w_final = "rb050", #' Y_thres = NULL, wght_thres = NULL, #' ID_level1 = "db030", ID_level2 = "IDd", #' H = "db040", PSU = "rb030", N_h = NULL, #' sort = NULL, Dom = NULL, #' gender = NULL, X = NULL, #' X_ID_level1 = NULL, g = NULL, #' q = NULL, datasetX = NULL, #' dataset = dataset1, percentage = 60, #' order_quant = 50L, alpha = 20, #' confidence = .95, outp_lin = FALSE, #' outp_res = FALSE, type = "linarpt") #' aa #' #' \dontrun{ #' # use dataset1 by default with using fh_zero (finite population correction) #' aa2 <- varpoord(Y = "eqIncome", w_final = "rb050", #' Y_thres = NULL, wght_thres = NULL, #' ID_level1 = "db030", ID_level2 = "IDd", #' H = "db040", PSU = "rb030", N_h = NULL, #' fh_zero = TRUE, sort = NULL, Dom = "db040", #' gender = NULL, X = NULL, X_ID_level1 = NULL, #' g = NULL, datasetX = NULL, dataset = dataset1, #' percentage = 60, order_quant = 50L, #' alpha = 20, confidence = .95, outp_lin = FALSE, #' outp_res = FALSE, type = "linarpt") #' aa2 #' aa2$all_result #' #' #' # using dataset1 #' aa4 <- varpoord(Y = "eqIncome", w_final = "rb050", #' Y_thres = NULL, wght_thres = NULL, #' ID_level1 = "db030", ID_level2 = "IDd", #' H = "db040", PSU = "rb030", N_h = NULL, #' sort = NULL, Dom = "db040", #' gender = NULL, X = NULL, #' X_ID_level1 = NULL, g = NULL, #' datasetX = NULL, dataset = dataset, #' percentage = 60, order_quant = 50L, #' alpha = 20, confidence = .95, #' outp_lin = TRUE, outp_res = TRUE, #' type = "linarpt") #' aa4$lin_out[20 : 40]} #' #' #' @import data.table #' #' @export varpoord varpoord <- function(Y, w_final, age = NULL, pl085 = NULL, month_at_work=NULL, Y_den = NULL, Y_thres = NULL, wght_thres = NULL, ID_level1, ID_level2 = NULL, H, PSU, N_h, PSU_sort = NULL, fh_zero = FALSE, PSU_level=TRUE, sort = NULL, Dom = NULL, period = NULL, gender = NULL, dataset = NULL, X = NULL, periodX = NULL, X_ID_level1 = NULL, ind_gr = NULL, g = NULL, q = NULL, datasetX = NULL, percentage = 60, order_quant = 50, alpha = 20, confidence = .95, outp_lin = FALSE, outp_res = FALSE, type="linrmpg") { ### Checking all_choices <- c("linarpr","linarpt","lingpg","linpoormed", "linrmpg","lingini","lingini2", "linqsr", "linrmir", "linarr") type <- tolower(type) type <- match.arg(type, all_choices, length(type) > 1) fh_zero <- check_var(vars = fh_zero, varn = "fh_zero", varntype = "logical") PSU_level <- check_var(vars = PSU_level, varn = "PSU_level", varntype = "logical") outp_lin <- check_var(vars = outp_lin, varn = "outp_lin", varntype = "logical") outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical") percentage <- check_var(vars = percentage, varn = "percentage", varntype = "numeric0100") order_quant <- check_var(vars = order_quant, varn = "order_quant", varntype = "numeric0100") alpha <- check_var(vars = alpha, varn = "alpha", varntype = "numeric0100") confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01") Y <- check_var(vars = Y, varn = "Y", dataset = dataset, ncols = 1, isnumeric = TRUE, isvector = TRUE, grepls = "__") Ynrow <- length(Y) w_final <- check_var(vars = w_final, varn = "weight", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE) age <- check_var(vars = age, varn = "age", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE, mustbedefined = any(c("linarr", "linrmir") %in% type)) pl085 <- check_var(vars = pl085, varn = "pl085", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE, mustbedefined = any(type == "linarr")) month_at_work <- check_var(vars = month_at_work, varn = "month_at_work", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE, mustbedefined = any(type == "linarr")) gender <- check_var(vars = gender, varn = "gender", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE, mustbedefined = any(type == "lingpg")) Y_den <- check_var(vars = Y_den, varn = "Y_den", dataset = dataset, ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE, mustbedefined = any("linarr" == type)) Y_thres <- check_var(vars = Y_thres, varn = "Y_thres", dataset = dataset, ncols = 1, Ynrow = Ynrow, mustbedefined = FALSE, isnumeric = TRUE, isvector = TRUE) wght_thres <- check_var(vars = wght_thres, varn = "wght_thres", dataset = dataset, ncols = 1, Ynrow = Ynrow, mustbedefined = FALSE, isnumeric = TRUE, isvector = TRUE) ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE) ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1), periods = period) H <- check_var(vars = H, varn = "H", dataset = dataset, ncols = 1, Yncol = 0, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1), dif_name = "dataH_stratas") sort <- check_var(vars = sort, varn = "sort", dataset = dataset, ncols = 1, Ynrow = Ynrow, mustbedefined = FALSE, isnumeric = TRUE, isvector = TRUE) period <- check_var(vars = period, varn = "period", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE) Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset, Ynrow = Ynrow, ischaracter = TRUE, mustbedefined = FALSE, duplicatednames = TRUE, grepls = "__") PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset, ncols = 1, Yncol = 0, Ynrow = Ynrow, ischaracter = TRUE, namesID1 = names(ID_level1)) PSU_sort <- check_var(vars = PSU_sort, varn = "PSU_sort", dataset = dataset, ncols = 1, Ynrow = Ynrow, ischaracter = TRUE, isvector = TRUE, mustbedefined = FALSE, PSUs = PSU) if(!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q) | !is.null(periodX) | !is.null(X_ID_level1) | !is.null(datasetX)) { X <- check_var(vars = X, varn = "X", dataset = datasetX, check.names = TRUE, isnumeric = TRUE, dif_name = c(names(period) , "g", "q"), dX = "X") Xnrow <- nrow(X) ind_gr <- check_var(vars = ind_gr, varn = "ind_gr", dataset = datasetX, ncols = 1, Xnrow = Xnrow, dX = "X", ischaracter = TRUE, dif_name = c(names(period) , "g", "q")) g <- check_var(vars = g, varn = "g", dataset = datasetX, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") q <- check_var(vars = q, varn = "q", dataset = datasetX, ncols = 1, Xnrow = Xnrow, isnumeric = TRUE, isvector = TRUE, dX = "X") periodX <- check_var(vars = periodX, varn = "periodX", dataset = datasetX, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, mustbedefined = !is.null(period), duplicatednames = TRUE, varnout = "period", varname = names(period), dX = "X") X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1", dataset = datasetX, ncols = 1, Xnrow = Xnrow, ischaracter = TRUE, varnout = "ID_level1", varname = names(ID_level1), periods = period, periodsX = periodX, ID_level1 = ID_level1, dX = "X") } # N_h np <- sum(ncol(period)) if (!is.null(N_h)) { N_h <- data.table(N_h) if (anyNA(N_h)) stop("'N_h' has missing values") if (ncol(N_h) != np + 2) stop(paste0("'N_h' should be ", np + 2, " columns")) if (!is.numeric(N_h[[ncol(N_h)]])) stop("The last column of 'N_h' should be numeric") nams <- c(names(period), names(H)) if (all(nams %in% names(N_h))) {N_h[, (nams) := lapply(.SD, as.character), .SDcols = nams] } else stop(paste0("All strata titles of 'H'", ifelse(!is.null(period), "and periods titles of 'period'", ""), " have not in 'N_h'")) if (is.null(period)) { if (any(is.na(merge(unique(H), N_h, by = names(H), all.x = TRUE)))) stop("'N_h' is not defined for all strata") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique") } else { pH <- data.table(period, H) if (any(is.na(merge(unique(pH), N_h, by = names(pH), all.x = TRUE)))) stop("'N_h' is not defined for all strata and periods") if (any(duplicated(N_h[, head(names(N_h), -1), with = FALSE]))) stop("Strata values for 'N_h' must be unique in all periods") pH <- NULL } setkeyv(N_h, names(N_h)[c(1 : (1 + np))]) } N <- dataset <- datasetX <- NULL if (is.null(Y_thres)) Y_thres <- Y if (is.null(wght_thres)) wght_thres <- w_final psusn <- as.integer(!is.null(PSU_sort)) # Design weights if (!is.null(X)) { ID_level1h <- data.table(ID_level1) if (!is.null(period)) { ID_level1h <- data.table(period, ID_level1h) X_ID_level1 <- data.table(period, X_ID_level1) } idhx <- data.table(X_ID_level1, g) setnames(idhx, names(idhx)[c(1 : (ncol(idhx) - 1))], names(ID_level1h)) idg <- merge(ID_level1h, idhx, by = names(ID_level1h), sort = FALSE) w_design <- w_final / idg[[ncol(idg)]] idg <- data.table(idg, w_design = w_design) idh <- idg[, .N, keyby = c(names(ID_level1h), "w_design")] if (nrow(X) != nrow(idh)) stop("Aggregated 'w_design' length must the same as matrix 'X'") idg <- idhx <- ID_level1h <- NULL } else w_design <- w_final ### Calculation sar_nr <- respondent_count <- pop_size <- n_nonzero <- NULL nhs <- data.table(respondent_count = 1, pop_size = w_final, n_nonzero = as.integer(abs(Y) > .Machine$double.eps)) if (!is.null(period)) nhs <- data.table(period, nhs) if (!is.null(Dom)) nhs <- data.table(Dom, nhs) if (!is.null(c(Dom, period))) {nhs <- nhs[, lapply(.SD, sum, na.rm = TRUE), keyby = eval(names(nhs)[0:2-ncol(nhs)]), .SDcols = c("respondent_count", "pop_size", "n_nonzero")] } else nhs <- nhs[, lapply(.SD, sum, na.rm=TRUE), .SDcols=c("respondent_count", "pop_size", "n_nonzero")] estim <- c() aH <- names(H) idper <- copy(ID_level2) Y1sort <- Y1asort <- NULL aPSU <- names(PSU) if (!is.null(period)) idper <- data.table(idper, period) Y1 <- data.table(idper, ID_level1, H, PSU, check.names = TRUE) if (!is.null(PSU_sort)) Y1 <- data.table(Y1, PSU_sort, check.names = TRUE) Y1 <- data.table(Y1, w_design, w_final) Y1[, Y1sort := .I] setkeyv(Y1, names(idper)) value <- NULL if ("linarpt" %in% type) { varpt <- linarpt(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, percentage = percentage, order_quant = order_quant, var_name = "lin_arpt", checking = FALSE) Y1 <- merge(Y1, varpt$lin, all.x = TRUE) esti <- data.table("ARPT", varpt$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) varpt <- esti <- NULL } if ("linarpr" %in% type) { varpr <- linarpr(Y = Y, id = ID_level2, weight = w_final, Y_thres = Y_thres, wght_thres = wght_thres, sort = sort, Dom = Dom, period = period, dataset = NULL, percentage = percentage, order_quant = order_quant, var_name = "lin_arpr", checking = FALSE) Y1 <- merge(Y1, varpr$lin, all.x = TRUE) esti <- data.table("ARPR", varpr$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) varpr <- esti <- NULL } if (("lingpg" %in% type) & all(!is.null(gender))) { vgpg <- lingpg(Y = Y, gender = gender, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, var_name = "lin_gpg", checking = FALSE) Y1 <- merge(Y1, vgpg$lin, all.x = TRUE) esti <- data.table("GPG", vgpg$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vgpg <- esti <- NULL } if ("linpoormed" %in% type) { vporm <- linpoormed(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, percentage = percentage, order_quant = order_quant, var_name = "lin_poormed", checking = FALSE) Y1 <- merge(Y1, vporm$lin, all.x = TRUE) esti <- data.table("POORMED", vporm$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vporm <- esti <- NULL } if ("linrmpg" %in% type) { vrmpg <- linrmpg(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, percentage = percentage, order_quant = order_quant, var_name = "lin_rmpg", checking = FALSE) Y1 <- merge(Y1, vrmpg$lin, all.x = TRUE) esti <- data.table("RMPG", vrmpg$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vrmpg <- esti <- NULL } if ("linqsr" %in% type) { vqsr <- linqsr(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, alpha = alpha, var_name = "lin_qsr", checking = FALSE) Y1 <- merge(Y1, vqsr$lin, all.x = TRUE) esti <- data.table("QSR", vqsr$value) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vqsr <- esti <- NULL } if ("lingini" %in% type) { vgini <- lingini(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom=Dom, period = period, dataset = NULL, var_name = "lin_gini", checking = FALSE) Y1 <- merge(Y1, vgini$lin, all.x = TRUE) esti <- data.table("GINI", vgini$value) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vgini <- esti <- NULL } if ("lingini2" %in% type) { vgini2 <- lingini2(Y = Y, id = ID_level2, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, var_name = "lin_gini2", checking = FALSE) Y1 <- merge(Y1, vgini2$lin, all.x = TRUE) esti <- data.table("GINI2", vgini2$value) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vgini2 <- esti <- NULL } if (("linrmir" %in% type) & all(!is.null(age))) { vrmir <- linrmir(Y = Y, id = ID_level2, age = age, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, order_quant = order_quant, var_name = "lin_rmir", checking = FALSE) Y1 <- merge(Y1, vrmir$lin, all.x = TRUE) esti <- data.table("RMIR", vrmir$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) vrmir <- esti <- NULL } if (("linarr" %in% type) & all(!is.null(age) & !is.null(pl085) & !is.null(month_at_work))) { varr <- linarr(Y = Y, Y_den = Y_den, id = ID_level2, age = age, pl085 = pl085, month_at_work = month_at_work, weight = w_final, sort = sort, Dom = Dom, period = period, dataset = NULL, order_quant = order_quant, var_name = "lin_arr", checking = FALSE) Y1 <- merge(Y1, varr$lin, all.x = TRUE) esti <- data.table("ARR", varr$value, NA) setnames(esti, names(esti)[c(1, -1 : 0 + ncol(esti))], c("type", "value", "value_eu")) estim <- rbind(estim, esti) varr <- esti <- NULL } estim[, variable := paste0("lin_", tolower(type))] nDom <- names(Dom) if (!is.null(nDom)) estim[, (paste0(nDom, "at1at")) := lapply(nDom, function(x) paste(x, get(x), sep = "."))] Dom <- estim[, "variable"] if (!is.null(nDom)) Dom <- estim[, c("variable", paste0(nDom, "at1at")), with = FALSE] estim$variable <- do.call("paste", c(as.list(Dom), sep = "__")) estim[, variable := str_replace_all(variable, "[ ]", ".")] if (!is.null(nDom)) estim[, (paste0(nDom, "at1at")) := NULL] all_result <- estim setkey(Y1, Y1sort) Y1[, Y1sort := NULL] estim <- .SD <- lin_outp <- NULL if (outp_lin) lin_outp <- Y1[, c(-(3 : 5) - np), with = FALSE] Y2 <- Y1[, lapply(.SD, sum, na.rm = TRUE), by = c(names(Y1)[c(2 : (6 + np + psusn))]), .SDcols = names(Y1)[- (1 : (6 + np + psusn))]] Y3 <- Y2[, c(-(1 : (5 + np + psusn))), with = FALSE] idper <- period <- NULL if (np > 0) period <- Y2[, c(1 : np), with = FALSE] ID_level1h <- Y2[, np + 1, with = FALSE] H <- Y2[, np + 2, with = FALSE] setnames(H, names(H), aH) PSU <- Y2[, np + 3, with = FALSE] setnames(PSU, names(PSU), aPSU) if (!is.null(PSU_sort)) PSU_sort <- Y2[[np + 4]] w_design2 <- Y2[[np + 4 + psusn]] w_final2 <- Y2[[np + 5 + psusn]] Y1 <- Y2 <- NULL # Calibration res_outp <- betas <- variable <- NULL if (!is.null(X)) { if (np > 0) ID_level1h <- data.table(period, ID_level1h) setnames(ID_level1h, names(ID_level1h), names(X_ID_level1)) X0 <- data.table(X_ID_level1, ind_gr, q, g, X) D1 <- merge(ID_level1h, X0, by = names(ID_level1h), sort = FALSE) ind_gr <- D1[, np + 2, with = FALSE] if (!is.null(period)) ind_gr <- data.table(D1[, names(periodX), with = FALSE], ind_gr) ind_period <- do.call("paste", c(as.list(ind_gr), sep = "_")) lin1 <- lapply(split(Y3[, .I], ind_period), function(i) { resid <- residual_est(Y = Y3[i], X = D1[i, (np + 5) : ncol(D1), with = FALSE], weight = w_design2[i], q = D1[i][["q"]], dataset = NULL, checking = FALSE) pers0 <- ind_gr[i, .N, keyby = c(names(ind_gr))] list(data.table(sar_nr = i, resid$residuals), data.table(pers0[, N := NULL], resid$betas)) }) Y4 <- rbindlist(lapply(lin1, function(x) x[[1]])) betas <- rbindlist(lapply(lin1, function(x) x[[2]])) setkeyv(Y4, "sar_nr") Y4[, sar_nr := NULL] if (outp_res) res_outp <- data.table(ID_level1h, PSU, w_final2, Y4) } else Y4 <- Y3 lin1 <- X0 <- D1 <- ind_gr <- ID_level1h <- X_ID_level1 <- q <- g <- NULL var_est <- variance_est(Y = Y4, H = H, PSU = PSU, w_final = w_final2, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Current variance estimation", checking = FALSE) var_est <- transpos(var_est, is.null(period), "var_est", names(period)) all_result <- merge(var_est, all_result, all = TRUE, by = c(names(period), "variable")) # Variance of HT estimator under current design var_cur_HT <- variance_est(Y = Y3, H = H, PSU = PSU, w_final = w_design2, N_h = N_h, fh_zero = fh_zero, PSU_level = PSU_level, PSU_sort = PSU_sort, period = period, dataset = NULL, msg = "Variance of HT estimator under current design", checking = FALSE) var_cur_HT <- transpos(var_cur_HT, is.null(period), "var_cur_HT", names(period)) all_result <- merge(all_result, var_cur_HT, by = c(names(period), "variable")) H <- PSU <- PSU_sort <- N_h <- var_est <- var_cur_HT <- NULL # Variance of HT estimator under SRS if (is.null(period)) { varsrs <- var_srs(Y = Y3, w = w_design2) S2_y_HT <- varsrs$S2p S2_y_ca <- var_srs(Y = Y3, w = w_final2)$S2p var_srs_HT <- varsrs$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1 : nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y3)),] ind <- (rowSums(per == period) == ncol(period)) varsrs <- var_srs(Y = Y3[ind], w = w_design2[ind]) S2_y_ca <- var_srs(Y = Y3[ind], w = w_final2[ind])$S2p list(S2p = data.table(period_agg[i,], varsrs$S2p), varsrs = data.table(period_agg[i,], varsrs$varsrs), S2_y_ca = data.table(period_agg[i,], S2_y_ca)) }) S2_y_HT <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_HT <- rbindlist(lapply(lin1, function(x) x[[2]])) S2_y_ca <- rbindlist(lapply(lin1, function(x) x[[3]])) } var_srs_HT <- transpos(var_srs_HT, is.null(period), "var_srs_HT", names(period)) all_result <- merge(all_result, var_srs_HT, all = TRUE, by = c(names(period), "variable")) S2_y_HT <- transpos(S2_y_HT, is.null(period), "S2_y_HT", names(period)) all_result <- merge(all_result, S2_y_HT, all = TRUE, by = c(names(period), "variable")) S2_y_ca <- transpos(S2_y_ca, is.null(period), "S2_y_ca", names(period)) all_result <- merge(all_result, S2_y_ca, all = TRUE, by = c(names(period), "variable")) Y3 <- w_design2 <- var_srs_HT <- S2_y_HT <- S2_y_ca <- NULL # Variance of calibrated estimator under SRS if (is.null(period)) { varsres <- var_srs(Y = Y4, w = w_final2) S2_res <- varsres$S2p var_srs_ca <- varsres$varsrs } else { period_agg <- unique(period) lin1 <- lapply(1:nrow(period_agg), function(i) { per <- period_agg[i,][rep(1, nrow(Y4)),] ind <- (rowSums(per == period) == ncol(period)) varsres <- var_srs(Y = Y4[ind], w = w_final2[ind]) list(S2p = data.table(period_agg[i,], varsres$S2p), varsrs = data.table(period_agg[i,], varsres$varsrs)) }) S2_res <- rbindlist(lapply(lin1, function(x) x[[1]])) var_srs_ca <- rbindlist(lapply(lin1, function(x) x[[2]])) } var_srs_ca <- transpos(var_srs_ca, is.null(period), "var_srs_ca", names(period), "variable") all_result <- merge(all_result, var_srs_ca, all = TRUE, by = c(names(period), "variable")) S2_res <- transpos(S2_res, is.null(period), "S2_res", names(period), "variable") all_result <- merge(all_result, S2_res, all = TRUE, by = c(names(period), "variable")) var_srs_ca <- S2_res <- Y4 <- w_final2 <- NULL all_result[, variable := NULL] deff_sam <- deff_est <- deff <- n_eff <- var_est2 <- NULL se <- rse <- cv <- absolute_margin_of_error <- NULL relative_margin_of_error <- CI_lower <- CI_upper <- NULL if (nrow(all_result[var_est < 0]) > 0) stop("Estimation of variance are negative!") # Design effect of sample design all_result[, deff_sam := var_cur_HT / var_srs_HT] # Design effect of estimator all_result[, deff_est := var_est / var_cur_HT] # Overall effect of sample design and estimator all_result[, deff := deff_sam * deff_est] all_result[, var_est2 := var_est] all_result[xor(is.na(var_est2), var_est2 < 0), var_est2 := 0] all_result[, se := sqrt(var_est2)] all_result[xor(is.na(var_est2), var_est2 < 0), se := NA] all_result[(value != 0) & (!is.nan(value)), rse := se / value] all_result[value == 0 | is.nan(value), rse := NA] all_result[, cv := rse * 100] tsad <- qnorm(0.5 * (1 + confidence)) all_result[, absolute_margin_of_error := tsad * se] all_result[, relative_margin_of_error := tsad * cv] all_result[, CI_lower := value - absolute_margin_of_error] all_result[, CI_upper := value + absolute_margin_of_error] setnames(all_result, "var_est", "var") if (!is.null(c(nDom, period))) { all_result <- merge(all_result, nhs, all = TRUE, by = c(nDom, names(period))) } else { all_result[, respondent_count := nhs$respondent_count] all_result[, pop_size := nhs$pop_size] all_result[, n_nonzero := nhs$n_nonzero]} variabl <- c("respondent_count", "n_nonzero", "pop_size", "value", "value_eu", "var", "se", "rse", "cv", "absolute_margin_of_error", "relative_margin_of_error", "CI_lower", "CI_upper") if (is.null(nDom)) variabl <- c(variabl, "S2_y_HT", "S2_y_ca", "S2_res") variabl <- c(variabl, "var_srs_HT", "var_cur_HT", "var_srs_ca", "deff_sam", "deff_est", "deff") type <- "type" if (!is.null(period)) type <- c(type, names(period)) setkeyv(all_result, c(type, nDom)) list(lin_out = lin_outp, res_out = res_outp, betas = betas, all_result = all_result[, c(type, nDom, variabl), with = FALSE]) }
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/varpoord.R
## Function Description: ## A function to assess if a vector can be interpreted as numbers check.numeric <- function(v = NULL, na.rm = FALSE, only.integer = FALSE, exceptions=c(""), ignore.whitespace = TRUE){ #----[ checking the input ]----# { # if the only.integer is NOT a single TRUE or FALSE if (!is.logical(only.integer) | length(only.integer) != 1) { # complain stop("The parameter \"only.integer\" should be either TRUE or FALSE.") } # if user has not defined the vector v if (is.null(v)) { # complain stop("The parameter \"v\" is not defined. It can be character vector, numeric vector, factor vector or logical vector.") # if user has defined but the class is NOT character or factor }else if (!inherits(v, c("character", "factor"))) { # if the class is NOT numeric or integer either if (!inherits(v, c("numeric", "integer", "logical"))) { # complain stop("The parameter \"v\" can only be a character vector, numeric vector, factor vector or logical vector.") # if the class is numeric or integer }else{ # if user wants to specifically filter out non-integers, there # is a chance that the vector contains some non-integer numbers # so we should turn the vector to character and run the function if(only.integer){ # convert the vector to character v <- as.character(v) }else{ # since it is already a number return(rep(x = TRUE, length(v))) } } } #-------[ na.rm ]-------# { # if the na.rm is NOT a single TRUE or FALSE if (!is.logical(na.rm) | length(na.rm) != 1) { # complain stop("The parameter \"na.rm\" should be either TRUE or FALSE.") } } #-------[ ignore.whitespace ]-------# { # if the ignore.whitespace is NOT a single TRUE or FALSE if (!is.logical(ignore.whitespace) | length(ignore.whitespace) != 1) { # complain stop("The parameter \"ignore.whitespace\" should be either TRUE or FALSE.") } } } #----[ pre-processing ]----# { # convert to character if it is vector if (inherits(v, "factor")) { # convert to character v <- as.character(v) } # if user wants to ignore NAs if (na.rm) { v <- stats::na.omit(v) } # if user wants to ignore leading or tailing white space if (ignore.whitespace) { # substitute whitespaces in the beginning and at the ending of each item in v v <- gsub("^\\s+|\\s+$", "", v) } } #----[ processing ]----# { # if user wants to only detect integers if (only.integer) { regexp_pattern <- "(^(-|\\+)?\\d+$)|(^(-|\\+)?(\\d*)e(-|\\+)?(\\d+)$)" # if user wants to detect all numbers }else{ #regexp_pattern <- "^(-|\\+)?\\d+(\\.?\\d+)?$" regexp_pattern <- "(^(-|\\+)?((\\.?\\d+)|(\\d+\\.\\d+)|(\\d+\\.?))$)|(^(-|\\+)?((\\.?\\d+)|(\\d+\\.\\d+)|(\\d+\\.?))e(-|\\+)?(\\d+)$)" } # perform the regexp output <- grepl(pattern = regexp_pattern, x = v) # check for existance of exceptions exception_index <- is.element(v, exceptions) # if there are is exception detected if (any(exception_index)) { # turn their output value to TRUE output[exception_index] <- TRUE } # if user wants to keep NA if (!na.rm) { # NAs are marked as FALSE by grepl and we replace it with TRUE instead output[is.na(v)] <- TRUE } # return the result return(output) } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/check.numeric.R
## Function Description: ## This function provides a summary of NAs in the given matrix or data.frame ## either feature-wise (by column) or sample-wise (by row). inspect.na <- function(d, hist = FALSE, summary = TRUE, byrow = FALSE, barplot = TRUE, na.value = NA){ # if the d was not defined by user if (missing("d")) { stop("Please provide the d which can be a matrix or data.frame.") # if the data was provided, check the class of the provided data }else if (inherits(d, c("data.frame", "matrix"))) { # use the pin.na function to pin the NAs pin.na.output <- pin.na(d, na.value = na.value) # check if there is any NA if (!is.null(pin.na.output)) { # if user want to have the results row-wise if (byrow) { # extract the rw column that contains the rows that contain NA pin.na.output.rw <- pin.na.output[, "row_index"] # calculated the NA frequency in each of the reported columns na.density <- unlist(lapply(unique(pin.na.output.rw), function(x){ sum(pin.na.output.rw == x) })) # calculate the ratio of having NA regarding the total number of values in the row na.ratio <- na.density / ncol(d) # return the report data.frame result <- data.frame(row_index = unique(pin.na.output.rw), row_name = row.names(d)[unique(pin.na.output.rw)], number_of_NAs = na.density, ratio_of_NA = na.ratio) # if user want to have the results column-wise }else{ # extract the clmn column that contains the columns that contain NA pin.na.output.clmn <- pin.na.output[, "column_index"] # calculated the NA frequency in each of the reported columns na.density <- unlist(lapply(unique(pin.na.output.clmn), function(x){ sum(pin.na.output.clmn == x) })) # calculate the ratio of having NA regarding the total number of values in the column na.ratio <- na.density / nrow(d) ## if d is matrix and does not have column names, the colnames() ## will return NULL, so we catch it and replace it with column index if (is.null(d.colnames <- colnames(d)[unique(pin.na.output.clmn)])) { d.colnames <- unique(pin.na.output.clmn) } # return the report data.frame result <- data.frame(column_index = unique(pin.na.output.clmn), column_name = d.colnames, number_of_NAs = na.density, ratio_of_NA = na.ratio) } # if user want to have the histogram to be plotted if (hist) { # plot the histogram hist(pin.na.output.clmn, ncol(d), xlab = paste("Index number of the", if (byrow) {"rows"}else{"columns"}), ylab = "NA Frequency") } # if user wants to have barplot as well if (barplot) { ## construct a vector for colors colorlist <- rep.int("gray", nrow(result)) colorlist[result$ratio_of_NA > 0.1] <- "yellow" colorlist[result$ratio_of_NA > 0.3] <- "orange" colorlist[result$ratio_of_NA > 0.5] <- "red" # create a backup from the user's current par() settings default_par <- par(no.readonly = TRUE) # set all the margins to zero par(mar = c(0, 0, 0, 0)) # define the plan layout layout(matrix(c(1, 2, 3), byrow = TRUE, ncol = 1), heights = c(1, 4, 0.7), widths = c(4)) ## plot{1} par(mar = c(0, 0, 0, 1)) plot.new() # text(x = 0.5, y = 0.5, labels = "Missing values", cex = 2, font = 2) text(x = 0.5, y = 0.5, labels = paste("Missing values for", if (byrow) { "rows" }else{ "columns" }), cex = 2, font = 2) ## plot{2} # make label text perpendicular to axis and set the margins par(las = 2, mar = c(8, 4, 0, 1)) barplot(result$ratio_of_NA, names.arg = if(byrow) { result$row_name }else{ result$column_name }, col = colorlist, ylab = "Ratio of NA", # xlab = if(byrow) {"Row Names"}else{"Column Names"}) xlab = "") ## draw lines for each cutoff abline(h = 0.5, col = "gray88", lty = 2) abline(h = 0.3, col = "gray88", lty = 2) abline(h = 0.2, col = "gray88", lty = 2) abline(h = 0.1, col = "gray88", lty = 2) # plot{3} par(mar = c(0, 0, 0, 1)) plot.new() legend("top", fill = c("red", "orange", "yellow", "gray"), legend = c(">50%", ">30%", ">10%", "<=10%"), horiz = TRUE, text.width = strwidth(c(">50%", ">30%", ">10%", "<=10%")) * 1.4) # set back the par() settings we changed to user's original setting par(mfrow = default_par$mfrow, mar = default_par$mar) } # if user wants to get the summary data.frame if (summary) { # return the result data.frame to user return(result) } # if pin.na function didn't find any NAs }else{ return(NULL) } }else{ stop("Please provide the d which can be a matrix or data.frame") } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/inspect.na.R
## Function Description: ## This function finds NAs in a vector, data.frame or matrix and returns a ## data.frame which contains two columns that includes the row number and column ## number of each NA. pin.na <- function(x = NULL, na.value = NA){ #----[ checking the input ]----# { # check if user has not provided x argument if (is.null(x)) { stop("Please provide parameter \"x\". It can be a matrix, data.frame or vector.") } } #----[ pre-processing ]----# { ## convert to character if it is factor if (inherits(x, "factor")) { x <- as.character(x) } } #----[ processing ]----# { ## count the NAs # create an empty variable to be filled in the following for loop missingness_index <- c() # iterate through the items in na.value for (i in na.value) { # if i is NA if (is.na(i)) { missingness_index <- c(missingness_index, which(is.na(x))) }else{ missingness_index <- c(missingness_index, which(x == i)) } } # if x is either data.frame or matrix if (inherits(x, c("data.frame", "matrix"))) { # if the count of NA is not zero if (length(missingness_index) != 0) { # get the column number clmn <- (missingness_index %/% dim(x)[1]) + 1 # get the row number rw <- missingness_index %% dim(x)[1] clmn[rw == 0] <- clmn[rw == 0] - 1 rw[rw == 0] <- dim(x)[1] # create the output output <- data.frame(row_index = rw, column_index = clmn) }else{ ## no NA was found output <- NULL } # if x is either numeric, integer or character vector }else if (inherits(x, c("numeric", "integer", "character"))) { # if the count of NA is not zero if (length(missingness_index) != 0) { output <- missingness_index }else{ ## no NA was found output <- NULL } }else{ # if the input is something other than classes above, we can not handle it!! stop("Please provide the d which can be a matrix, data.frame or vector.") } # return the final result return(output) } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/pin.na.R
## Function Description: ## This function removes all variables except those which are specified in ## the given character vector, regular expression or both. rm.all.but <- function(keep = NULL, envir = .GlobalEnv, keep_functions = TRUE, gc_limit = 100, regex = "auto"){ #----[ checking the input ]----# { ## Check the envir attribute if (!is.environment(envir)) { stop("You should specify an existing environment") } ## ckeck if keep is defined if (is.null(keep)) { stop("The parameter `keep` is not defined. It should be a chacter vector with length 1 or more.") # if the provided object for keep is not a character vector } else if(!inherits(keep, "character")) { stop("The parameter `keep` should be a chacter vector with length 1 or more.") } ## Check if the keep is a character vector if ((!inherits(keep, "character")) | typeof(keep) != "character" ) { stop("The value of `keep` parameter should be a chacter vector with length 1 or more.") } ## check if the length of keep is more than or equal to 1 if (!length(keep)) { stop("The value of `keep` parameter should be a chacter vector with length 1 or more.") } ## if the keepFunctions is not a logical vector if (!is.logical(keep_functions) | length(keep_functions) != 1) { stop("The value of the `keepFunctions` should ve either TRUE or FLASE.") } ## check if the gc_limit has a valid value if (!is.numeric(gc_limit) | length(gc_limit) != 1) { stop("The value of `gc_limit` parameter should be numeric with length 1. It's unit is MB.") } ## check if the regex has a valid value if (!is.element(regex, c(TRUE, FALSE, "auto", 0, 1))) { stop("The value of `regex` should be either TRUE, FALSE or \"auto\".") } } #----[ processing ]----# { # if user wants to automatically detect regex or manually force it if (is.element(regex, c(TRUE, "auto", 1))) { # get the index of which items in keep possibly contain regular expression regex_index <- grep(x = keep, pattern = "[\\|\\(\\)\\[\\{\\^\\$\\*\\+\\?\\]") # if regex_index has one or more indices if (length(regex_index)) { regex <- TRUE ## remove regular expression pettern(s) from keep and put them in separate variable regex_patterns <- keep[regex_index] keep <- keep[-regex_index] }else{ regex <- FALSE } # if user manually force the code not to concider regulat expression }else{ regex <- FALSE } # in case user decides to only operates on non-function variables if (keep_functions) { # only put non-functions in the list var_list <- setdiff(ls(envir = as.environment(envir)), lsf.str(envir = as.environment(envir))) # in case user wants to consider functions as well and remove them too }else{ # put everything in the list var_list <- ls(envir = as.environment(envir)) } #----[ check if user inserted a variable name or regexp that does not exist ]----# { # create an empty vector to store names and patterns that does not match to anything bad_input <- c() #----[ variable names ]----# { # If the keep has a name that is not in ls(), show error with the location of bad variable name if (any(!is.element(keep, var_list))) { # find which input item is not a real variable bad_input <- keep[which(is.element(keep, var_list) == FALSE)] } } #----[ regex patterns ]----# { ## check if there is any regex_patterns that does not match to anything # if there is any regex if (regex) { # iterate through the patterns for (i in regex_patterns) { # if the count of found match for each pattern is zero if (length(grep(pattern = i, x = var_list)) == 0) { # add it's index in keep to bad_input bad_input <- c(bad_input, i) } } } } # if there is any bad input if (length(bad_input) > 0) { # complain to user stop(paste("All the items in the keep should be a real existing variable or valid regular expressions.\nThe following is/are not among variables of selected environment or patterns that match anything!\n", bad_input, sep = " ")) } } # initialise a variable that contains all the possible variables removables <- var_list # if user have used a regular expression if (regex) { ## apply the regex in the following lines # iterate through the regex patterns for (i in regex_patterns) { # get indices of items in removables that match the ith pattern tmp_pattern_remove_list <- grep(pattern = i, x = removables) # if there was any matches based on the pattern if (length(tmp_pattern_remove_list)) { # remove the variables from the removables vector removables <- removables[-tmp_pattern_remove_list] } } } # if there is anything left in keep variable if (length(keep)) { # list the name of variables that should be removed removables <- removables[!(removables %in% keep)] # avoid any duplicates which might appear by having regexp and having normal variable names removables <- unique(removables) } # if anything has left to be removed if (length(removables)) { # get to total sum of the variables that are going to be removed in bytes total_size <- sum(sapply(removables, function(x){ object.size(get(x, envir = as.environment(envir))) })) # remove the variables remove(list = removables, envir = as.environment(envir)) # if the total size of removed varibale exceeded the threshold if (total_size > (gc_limit * 1024 ^ 2)) { # call garbage collection gc() } }else{ warning("Nothing is left to be removed!") } } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/rm.all.but.R
## Function Description: ## This function gets a list of variables as a character vector and save ## each variable name in a separate file, so that they can be loaded ## separately. save.var <- function(varlist = ls(envir = as.environment(.GlobalEnv)), path = getwd(), newdir = TRUE, newdirtag = NULL, envir = .GlobalEnv){ #----[ checking input ]----# { ## Check the envir attribute if (!is.environment(envir)) { stop("You should specify an existing environment") } ## check the varlist attribute # Check if the varlist is empty, show error and mention default if (!length(varlist)) { stop("The varlist parameter should contain an existing variable.") } # If the varlist has a name that is not in ls(), show error with the location of bad variable name if (sum(is.element(varlist, ls(envir = as.environment(envir)))) != length(varlist)) { bad_var_location <- which(is.element(varlist, ls()) == FALSE) stop(paste("All the items in the varlist should be a real existing variable.\nThe item number", bad_var_location, "is not among variables of selected environment!\n", sep=" ")) } ## check the path attribute # The path should be a vector of size one and the folder should exist if (length(path) != 1) { stop("The path attribute should be a vector with length 1.") } # checking if the path exists if (!file.exists(path)) { stop("The directory path should fully exist. Please use a correct existing path.") } ## check the newdir attribute if (newdir != TRUE & newdir != FALSE) { stop("The newdir attribute only accepts TRUE or FALSE as input.") } ## check newdirtag attribute if (length(newdirtag)) { # The newdirtag should be a vector of size one if (length(newdirtag) != 1) { stop("The newdirtag attribute should be a vector with length 1.") } # If the newdirtag was empty or contained more than 240 character if (nchar(newdirtag) > 240) { stop("The acceptable number of characters for newdirtag attribute is 1 up to 240.") } } } #----[ create new directory ]----# { # if user wants them in new directory if (newdir == TRUE) { # Getting time time.now <- format(Sys.time(), "%Y%m%d-%H%M%S") if (length(newdirtag)) { # Create new folder name with tag final.dir <- paste(time.now, "-", newdirtag, sep = '') }else{ # Create new folder name without tag final.dir <- time.now } # create the cirectory in the correct path dir.create(file.path(path, final.dir)) }else{ # if user do not want to create a new folder, define the final folder as the path itself final.dir <- "" } } #----[ save variables ]----# { # Write variables one by one in separate files. for (i in varlist) { save(file = file.path(path, final.dir, paste0(i, ".RData")), list = i, envir = envir) } } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/save.var.R
## Function Description: ## This function turns a categorical character column into multiple binary ## columns. eg: change the gender (male/female) into male column and female ## column. to.dummy <- function(v = NULL, prefix = NULL){ #----[ checking the input ]----# { # check if user has not provided prefix argument if (is.null(prefix)) { stop("The input \"prefix\" is missing. This will be added to the begining of each column name to avoid conflicts with other column names.") }else if (length(prefix) != 1 | nchar(prefix) == 0 | !inherits(x = prefix, what = "character")) { stop("The input \"prefix\" should be a character vector with length of 1 and character number more than 0.") } # check if user has not provided v argument if (is.null(v)) { stop("The input \"v\" is missing. It should be a vector with categories that you are willing to create dummy variables from. It can be a factor, character or numeric vector.") } } #----[ pre-processing ]----# { ## convert to character vector if the input is factor if (inherits(v, "factor")) { # get the levels of v v_levels <- levels(v) # convert the factor to character vector v <- as.character(v) }else{ # find tha NAs and turn them into character to have a separate column for NAs v[which(is.na(v))] <- "NA" # get the categories v_levels <- names(table(v, useNA = "ifany")) } } #----[ processing ]----# { # go through categories one by one for (i in 1:length(v_levels)) { # create a logical vector which has 1 for places that has the category assign(x = paste("v", i, sep = ""), value = as.numeric(v == v_levels[i])) } # create a cbind command and run it. It attaches the variables generated in the for loop above. df <- eval(parse(text = paste("cbind(", paste('v', 1:i, sep = '', collapse = ", "), ")", collapse = "", sep = ""))) # strip the white space from begining and end of the name and the middle white space with "_" factor_levels <- gsub("\\s+", "_", gsub("^\\s+|\\s+$", "", v_levels)) # if one of the levels are "", we should create a name for it, so we use "BLANK" factor_levels[which(factor_levels == "")] <- "BLANK" # set the colnames colnames(df) <- paste(prefix, factor_levels, sep = ".") # return the final data.frame return(df) } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/to.dummy.R
## Function Description: ## Turn factors to their real values. ## If given a matrix or data.frame, it detects the factor columns and ## unfactor them. If everything in that column are just numbers and a ## decimal character, it change it to numeric otherwise to character. unfactor <- function(obj = NULL, auto_class_conversion = TRUE, verbose = FALSE){ #-------[ checking the input ]-------# { # if the obj was not defined by user if (is.null(obj)) { stop("Please provide the obj which can be a matrix, data.frame or a vector.") }else if (!inherits(obj, c("data.frame", "matrix", "factor"))) { # if the provided object was not a data.frame, matrix or fector vector, throw an error. stop(paste("Please provide the obj which can be a matrix, data.frame or a vector. The provided obj has the class of", paste(class(obj), collapse = ", "))) } } #-------[ internal functions ]-------# { # an internal function to perform conversion on one single vector inner_func_convert <- function(x){ # turn the x into character x <- as.character(x) # if user want the conversion to numeric happens automatically if(auto_class_conversion){ # check if there is nothing but numbers if (all(check.numeric(v = x, na.rm = FALSE))) { # everything is numbers, so change it to numeric x <- as.numeric(x) } } # return theresult from lapply function return(x) } # an internal function to create messages if verbose is TRUE inner_func_msg <- function(...){ message_text <- paste(unlist(list(...)), collapse = "") if(verbose){ message(message_text) } } } #-------[ processing ]-------# { # save the obj class obj_class <- class(obj) inner_func_msg("The provided object has the following class", ifelse(length(obj_class) > 1, "es", ""), ":\n\t", paste(obj_class, collapse = ", "), "\n") # if the obj is a factor vector if (inherits(obj, "factor")) { # convert obj to character obj <- as.character(obj) # check if there is nothing but numbers if (all(check.numeric(v = obj, na.rm = FALSE))) { # everything is numbers, so change it to numeric obj <- as.numeric(obj) } # if the object was matrix or data.frame }else{ # find the index for factor columns factor_columns_indecies <- which(sapply(obj, is.factor)) inner_func_msg("The provided object has ", length(factor_columns_indecies), " columns with class of factor.\n") if(length(factor_columns_indecies)){ if(length(factor_columns_indecies) == 1) { # iterate through the factor columns and trun them into appropriate class obj[, factor_columns_indecies] <- inner_func_convert(obj[, factor_columns_indecies]) }else{ # iterate through the factor columns and trun them into appropriate class obj[, factor_columns_indecies] <- data.frame(lapply(data.frame(obj[, factor_columns_indecies]), inner_func_convert), stringsAsFactors = F) } }else{ inner_func_msg("No factor was found, hence no action was taken.") } } # return the result return(obj) } }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/unfactor.R
## create good info table for variables # http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session # the total memory the variables are using. var.info <- function(list = "ALL", regex = NULL, envir = .GlobalEnv, human.readable = TRUE, sortby = "size", decreasing = TRUE, n = Inf, beautify = FALSE, progressbar = FALSE){ #-------[ checking the input ]-------# { #-------[ envir ]-------# { if(!is.environment(envir)){ stop("The provided object for the \"envir\" argument is not an environment. use the following command to verify if the object is environment:\n\tis.environment()") } } #-------[ list ]-------# { if (is.null(list)) { stop("The parameter \"list\" should be defined. It should contain a list of variable names with the length of 1 or more.") }else if (!inherits(list, "character")) { stop('The parameter \"list\" should contain the name of variables you want to get their info in form of a character vector (put the names in "")') }else if (length(list) == 1) { if (list == "ALL") { # if user has selected nothing for list or "ALL", the function will consider it as ls() for provided environment var_list <- ls(envir = envir) # if the variable list is empty if(length(var_list) == 0){ # if user have not set a specific environment if(identical(envir, .GlobalEnv)){ stop("There is no varibale to be processed.") }else{ stop("There is no varibale in the specified environment to be processed.") } } }else{ # if user has provided a name, put it into the var_list var_list <- list } }else{ # if user has provided a series of names, pour them into the var_list var_list <- list } # If there is a name that is not in ls(), show error with the location of bad variable name if (any(!is.element(var_list, ls(envir = as.environment(envir))))) { # get the index of faulty variable names bad_var_location <- which(!is.element(var_list, ls(envir = as.environment(envir)))) stop(paste("All the items in the \"list\" should be a real existing variable.\nThe item", paste(var_list[bad_var_location], collapse = ", "), "is not among variables of the selected environment!\n", sep = " ")) } } #-------[ regex ]-------# { # if user has defined a regular expression to narrow down the search space for variable names if(!is.null(regex)){ if(!any(grepl(x = var_list, pattern = regex))){ stop("After applying the provided regex, no variable is left in the list to be processed! Check you regular expression.") } # apply the regular expression to the list of variables var_list <- var_list[grepl(x = var_list, pattern = regex)] } } #-------[ sortby ]-------# { # check if the parameter `sortby` argument is among valid column names sortby.index <- match(sortby, c("name", "class", "size", "detail")) if (is.na(sortby.index) | length(sortby) != 1) { stop("The column specified by the parameter \"sortby\" is not valid. Valid columns are:\n name, class, size, detail\nOne column should be selected.") } } #-------[ human.readable ]-------# { # check human.readable input if (!is.logical(human.readable)) { stop("The parameter \"human.readable\" should either be TRUE or FALSE.") } } #-------[ decreasing ]-------# { # check decreasing input if (!is.logical(decreasing)) { stop("The parameter \"decreasing\" should either be TRUE or FALSE.") } } #-------[ n ]-------# { # check the n input if ((!inherits(n, "numeric")) | n < 1 | length(n) != 1) { stop("The parameter \"n\" should be a single positive integer number more than zero.") } } #-------[ beautify ]-------# { # check the progressbar input if (!is.logical(beautify)) { stop("The parameter \"beautify\" should either be TRUE or FALSE.") } } #-------[ progressbar ]-------# { # check the progressbar input if (!is.logical(progressbar)) { stop("The parameter \"progressbar\" should either be TRUE or FALSE.") } } } #-------[ processing ]-------# { ## define variables to fill in the for loop var.size.readable <- vector() var.size.byte <- vector() var.class <- vector() var.detail <- vector() # if user wants to have progressbar if (progressbar) { # create the progressbar pb <- txtProgressBar(min = 1, max = length(var_list), style = 3) } # iterate through the var_list for (i in var_list) { # get the variable the.var <- get(i, envir = as.environment(envir)) ## get the size # size in byte format var.size.byte <- c(var.size.byte, object.size(x = the.var)) # if user wants to get object sizes in human readable format if (human.readable) { # this will be in human readable format var.size.readable <- c(var.size.readable, format(object.size(x = the.var), units = "auto")) } # get the class and append to the collective variable var.class <- c(var.class, paste0(class(get(i, envir = as.environment(envir))), collapse = ", ")) ## get dim for matrix/dataframe if (inherits(the.var, c("data.frame", "matrix", "Matrix"))) { var.detail <- c(var.detail, paste("dimension:", paste0(dim(the.var), collapse = ", "))) }else if (inherits(the.var, c("integer", "numeric", "character", "factor", "logical", "list"))) { var.detail <- c(var.detail, paste("length:", length(the.var))) }else{ var.detail <- c(var.detail, NA) } # if user wants to have progressbar if (progressbar) { # increment the progressbar by one step setTxtProgressBar(pb, getTxtProgressBar(pb) + 1) } } # if user wants to have progressbar if (progressbar) { # close the progressbar close(pb) } ## construct the output # if user has selected to have human readable variable size values if (human.readable) { output <- data.frame(name = var_list, class = var.class, size = var.size.readable, detail = var.detail) }else{ output <- data.frame(name = var_list, class = var.class, size = var.size.byte, detail = var.detail) } ## sort based on user's preference if((sortby == "size") & human.readable) { output <- output[order(var.size.byte, decreasing = decreasing), ] }else{ output <- output[order(output[, sortby.index], decreasing = decreasing), ] } # if user have chosen to get beautified table if(beautify){ # add visual cue for sorting direction in the column name colnames(output)[sortby.index] <- paste0(ifelse(decreasing == TRUE, paste0("[", intToUtf8(0x25BE),"] "), paste0("[", intToUtf8(0x25B4),"] ")), colnames(output)[sortby.index]) } # correct the rownames row.names(output) <- 1:nrow(output) ## select the number of desired rows if (is.finite(n)) { # convert to integer n <- as.integer(n) # if the n is larger than number of rows we have if (nrow(output) < n) { # use number of output rows as n n <- nrow(output) } # select the columns output <- output[1:n, ] } } # return the output return(output) }
/scratch/gouwar.j/cran-all/cranData/varhandle/R/var.info.R
#' @title Analysis of Covariance #' @param data traits to be analyzed #' @param genotypes vector containing genotypes #' @param replication vector containing replications #' @return ANCOVA, genotypic and phenotypic covariance #' @importFrom stats pf #' @export #' @examples #' data(vardata) #' ancova(vardata[3:11],vardata$Genotypes,vardata$Replication) ancova<-function(data,genotypes,replication){ convert<-function (data1){ data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1)} datam<-convert(data) num<-ncol(data) analysis<-function(genotypes,replication,trait1,trait2){ genotypes<-as.factor(genotypes) replication<-as.factor(replication) sumch1<-tapply(trait1,genotypes,sum) sumch2<-tapply(trait2,genotypes,sum) sumr1<-tapply(trait1,replication,sum) sumr2<-tapply(trait2,replication,sum) repli<-nlevels(replication) genotype<-nlevels(genotypes) GT1<-sum(trait1) GT2<-sum(trait2) CF<-(GT1*GT2)/(repli*genotype) TSP<-round(sum(trait1*trait2)-CF,5) GSP<-round((sum(sumch1*sumch2)/repli)-CF,4) RSP<-round((sum(sumr1*sumr2)/genotype)-CF,4) ESP<-TSP-GSP-RSP DFR<-repli-1 DFG<-genotype-1 DFE<-DFR*DFG RMP<-round(RSP/DFR,4) GMP<-round(GSP/DFG,4) EMP<-round(ESP/DFE,4) RFval<-round(RMP/EMP,4) GFval<-round(GMP/EMP,4) rpvalue<-round(pf(RFval,DFR,DFE,lower.tail = FALSE),4) gpvalue<-round(pf(GFval,DFG,DFE,lower.tail = FALSE),4) EnvCov<-EMP GenCov<-round((GMP-EMP)/repli,4) PhenCov<-round(EMP+((GMP-EMP)/repli),4) ANCOVA<-matrix(data=c(DFR,RSP,RMP,RFval,rpvalue,DFG,GSP,GMP,GFval,gpvalue,DFE,ESP,EMP,NA,NA,EMP,NA,NA,NA,NA,GenCov,NA,NA,NA,NA,PhenCov,NA,NA,NA,NA),dimnames = list(c("Replication", "Genotypes","Error","Environmental Covariance","Genotypic Covariance","Phenotypic Covariance"),c("DF", "SP","MP","F Cal","p-value")),nrow = 6,byrow = T) table1<-as.table(ANCOVA,useNa=F) return(table1) } list1 <- list() index=0 for (i in 1:(num-1)){ for (j in (i+1):num){ index=index+1 list1[[index]]<-analysis(genotypes,replication,datam[[i]],datam[[j]]) }} naming<-names(datam) combi<-c() index=0 for (i in 1:(num-1)){ for (j in (i+1):num){ index=index+1 combi[index]<-paste(naming[i],naming[j]) }} names(list1)<-combi return(list1) }
/scratch/gouwar.j/cran-all/cranData/variability/R/ancova.R
#' @title Genotypic Correlation Analysis #' @param data traits to be analyzed #' @param genotypes vector containing genotypes #' @param replication vector containing replications #' @return Genotypic correlation matrix #' @importFrom stats anova lm pt #' @examples #' data(vardata) #' geno.corr(vardata[3:11],vardata$Genotypes,vardata$Replication) #' @export geno.corr<-function(data,genotypes,replication){ convert<-function (data1) { data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1) } genotypes<-as.factor(genotypes) replication<-as.factor(replication) colnumber <- ncol(data) headings<-names(data) data2<-convert(data) gen.cor1<-function(genotypes,replication,trait1,trait2){ genotypes<-as.factor(genotypes) replication<-as.factor(replication) sumch1<-tapply(trait1,genotypes,sum) sumch2<-tapply(trait2,genotypes,sum) sumr1<-tapply(trait1,replication,sum) sumr2<-tapply(trait2,replication,sum) repli<-nlevels(replication) genotype<-nlevels(genotypes) GT1<-sum(trait1) GT2<-sum(trait2) CF<-(GT1*GT2)/(repli*genotype) TSP<-round(sum(trait1*trait2)-CF,6) GSP<-round((sum(sumch1*sumch2)/repli)-CF,6) RSP<-round((sum(sumr1*sumr2)/genotype)-CF,6) ESP<-TSP-GSP-RSP DFR<-repli-1 DFG<-genotype-1 DFE<-DFR*DFG RMP<-round(RSP/DFR,6) GMP<-round(GSP/DFG,6) EMP<-round(ESP/DFE,6) EnvCov<-EMP GenCov<-round((GMP-EMP)/repli,6) model <- lm(trait1 ~ replication + genotypes) anova.model <- anova(model) EMS<-anova.model[3,3] GV<-round((anova.model[2,3]-EMS)/repli,6) model1 <- lm(trait2 ~ replication + genotypes) anova.model1 <- anova(model1) EMS1<-anova.model1[3,3] GV1<-round((anova.model1[2,3]-EMS1)/repli,6) r<-round(GenCov/sqrt(GV*GV1),4) SEm<-sqrt((abs(1-(r^2)))/(nlevels(genotypes)-2)) tvalue<-r/SEm pvalue<-2*pt(abs(tvalue),nlevels(genotypes)-2,lower.tail = F) ifelse(pvalue<=0.01,r<-paste(r,"**"),ifelse(pvalue>0.05,r<-paste(r,"NS"),r<-paste(r,"*"))) return(r) } genetic.corr <- c() index=0 for (i in 1:(colnumber)){ for (j in 1:colnumber){ index=index+1 genetic.corr[index]<-gen.cor1(genotypes,replication,data2[[i]],data2[[j]]) }} matrix1<-noquote(matrix(genetic.corr,nrow = colnumber,dimnames =list(headings,headings))) matrix2<-as.table(matrix1,useNa=F) Note1<-"The sig of genotypic correlation was tested using t test (two-tail). The degree of freedom used is number of genotypes - 2" Note2<-"If NaNs are produced checkout for negative genotypic variance for one or more traits" output<-list(GenotypicCorrelation=matrix2,Note1=Note1,Note2=Note2) return(output) }
/scratch/gouwar.j/cran-all/cranData/variability/R/gencorr.R
#' @title Genotypic Path Analysis #' @param dependent.var trait to be used a dependent variable #' @param independent.var traits to be used as an indpendent variables #' @param genotypes vector containing genotpes #' @param replication vector containing replications #' @return Direct effects, indirect effects and residual #' @importFrom stats anova lm #' @examples #' data(vardata) #' # Grain yield is considered as a dependent variable #' geno.path(vardata[11],vardata[3:10],vardata$Genotypes,vardata$Replication) #' @export geno.path<-function(dependent.var,independent.var,genotypes,replication){ convert<-function (data1) { data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1) } genotypes<-as.factor(genotypes) replication<-as.factor(replication) colnumber <- ncol(independent.var) totalnumber<-colnumber+1 headings<-names(independent.var) totaldata<-data.frame(independent.var,dependent.var) data2<-convert(totaldata) gen.cor1<-function(genotypes,replication,trait1,trait2){ sumch1<-tapply(trait1,genotypes,sum) sumch2<-tapply(trait2,genotypes,sum) sumr1<-tapply(trait1,replication,sum) sumr2<-tapply(trait2,replication,sum) repli<-nlevels(replication) genotype<-nlevels(genotypes) GT1<-sum(trait1) GT2<-sum(trait2) CF<-(GT1*GT2)/(repli*genotype) TSP<-round(sum(trait1*trait2)-CF,5) GSP<-round((sum(sumch1*sumch2)/repli)-CF,5) RSP<-round((sum(sumr1*sumr2)/genotype)-CF,5) ESP<-TSP-GSP-RSP DFR<-repli-1 DFG<-genotype-1 DFE<-DFR*DFG RMP<-round(RSP/DFR,5) GMP<-round(GSP/DFG,5) EMP<-round(ESP/DFE,5) EnvCov<-EMP GenCov<-round((GMP-EMP)/repli,5) model <- lm(trait1 ~ replication + genotypes) anova.model <- anova(model) EMS<-anova.model[3,3] GV<-abs(round((anova.model[2,3]-EMS)/repli,5)) model1 <- lm(trait2 ~ replication + genotypes) anova.model1 <- anova(model1) EMS1<-anova.model1[3,3] GV1<-abs(round((anova.model1[2,3]-EMS1)/repli,5)) r<-round(GenCov/sqrt(GV*GV1),5) return(r) } genetic.corr <- c() index=0 for (i in 1:(totalnumber)){ for (j in 1:totalnumber){ index=index+1 genetic.corr[index]<-gen.cor1(genotypes,replication,data2[[i]],data2[[j]]) }} matribapu<-matrix(genetic.corr,nrow = totalnumber) corr.ind<-matribapu[1:colnumber,1:colnumber] corr.dep<-matribapu[1:colnumber,totalnumber] Direct <- solve(corr.ind, corr.dep) Coefficient <- corr.ind for (i in 1:colnumber) { for (j in 1:colnumber) { Coefficient[i, j] <- Direct[j] * corr.ind[i, j] } } Coefficient<-round(Coefficient,5) rownames(Coefficient)<-headings colnames(Coefficient)<-headings residual <- round(1 - t(Direct) %*% corr.dep,4) finaloutput<-list(Effects=Coefficient,Residual=residual) return(finaloutput) }
/scratch/gouwar.j/cran-all/cranData/variability/R/genopath.R
#' @title Estimation of Genetic Parameters #' @param data traits to be analyzed #' @param genotypevector vector containing genotypes #' @param replicationvector vector containig replications #' @importFrom stats anova lm qt #' @return ANOVA, genotypic and phenotypic coefficient of variance, heritability, genetic advance and genetic advance as percentage of mean. #' @export #' @examples #' data(vardata) #' gen.var(vardata[3:11],vardata$Genotypes,vardata$Replication) gen.var<-function (data, genotypevector, replicationvector) { convert<-function (data1) { data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1) } analysis <- function(data1, genotypevector, replicationvector){ data2<-as.numeric(data1) genotype <- as.factor(genotypevector) replication <- as.factor(replicationvector) r <- nlevels(replication) model <- lm(data2 ~ replication + genotype) anova.model <- anova(model) Maxi<-round(max(data2),4) Mini<-round(min(data2),4) GM<-round(mean(data2),4) EMS<-anova.model[3,3] Env.var<-round(EMS,4) SEm<-round(sqrt(EMS/r),4) CD5<-round(sqrt(EMS/r)*sqrt(2)*abs(qt(0.025,anova.model[3,1])),4) if(anova.model[2,5]>0.05){ CD5<-paste(round(sqrt(EMS/r)*sqrt(2)*abs(qt(0.025,anova.model[3,1])),4),"NS") } CD1<-round(sqrt(EMS/r)*sqrt(2)*abs(qt(0.005,anova.model[3,1])),4) if(anova.model[2,5]>0.01){ CD1<-paste(round(sqrt(EMS/r)*sqrt(2)*abs(qt(0.005,anova.model[3,1])),4),"NS") } GV<-round((anova.model[2,3]-EMS)/r,4) GVV<-round((anova.model[2,3]-EMS)/r,4) if(GV<0){ GV<-paste("Note: GV is negative",round((anova.model[2,3]-EMS)/r,4)) } PV<-round(GVV+EMS,4) if(GVV<0){ GCV<-paste("Note: GV is negative,GCV calculated by using absolute GV",round((sqrt(abs(GVV))/mean(data2))*100,4)) }else{ GCV<-round((sqrt(GVV)/mean(data2))*100,4) } PCV<-round((sqrt(PV)/mean(data2))*100,4) ECV<-round((sqrt(EMS)/mean(data2))*100,4) hs<-round((GVV/PV),4) ga<-round((GVV/PV)*2.06*sqrt(PV),4) gam<-round((ga/mean(data2))*100,4) matri<-matrix(data = c(Maxi,Mini,GM,SEm,CD5,CD1,Env.var,GV,PV,ECV,GCV,PCV,hs,ga,gam),dimnames = list(c("Maximum","Minimum","Grand Mean","Standard Error of Mean (SEm)","Critical Difference (CD) 5%","Critical Difference (CD) 1%","Environmental Variance","Genotypic Variance","Phenotypic Variance","Environmental Coefficient of Variance","Genotypic Coefficient of Variance","Phenotypic Coefficient of Variance","Heritability (Broad Sense)","Genetic Advance","Genetic Advance as percentage of mean")),nrow = 15) table1<-as.table(matri,useNa=F) my.list<-list(anova.model,table1) return(my.list) } fiftn <- convert(data) colnumber <- ncol(data) output <- list() for (j in 1:colnumber) { output[[j]] <- analysis(fiftn[[j]], genotypevector, replicationvector) } names(output) <- names(data) return(output) }
/scratch/gouwar.j/cran-all/cranData/variability/R/genvar.R
#' @title Phenotypic Correlation Analysis #' @param data traits to be analyzed #' @param genotypes vector containing genotypes #' @param replication vector containing replications #' @return Phenotypic correlation #' @export #' @importFrom stats anova lm pt #' @examples #' data(vardata) #' pheno.corr(vardata[3:11],vardata$Genotypes,vardata$Replication) pheno.corr<-function(data,genotypes,replication){ convert<-function (data1) { data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1) } genotypes<-as.factor(genotypes) replication<-as.factor(replication) colnumber <- ncol(data) headings<-names(data) data2<-convert(data) phenotypic.cor1<-function(genotypes,replication,trait1,trait2){ genotypes<-as.factor(genotypes) replication<-as.factor(replication) sumch1<-tapply(trait1,genotypes,sum) sumch2<-tapply(trait2,genotypes,sum) sumr1<-tapply(trait1,replication,sum) sumr2<-tapply(trait2,replication,sum) repli<-nlevels(replication) genotype<-nlevels(genotypes) tdf<-repli*genotype GT1<-sum(trait1) GT2<-sum(trait2) CF<-(GT1*GT2)/(repli*genotype) TSP<-round(sum(trait1*trait2)-CF,6) GSP<-round((sum(sumch1*sumch2)/repli)-CF,6) RSP<-round((sum(sumr1*sumr2)/genotype)-CF,6) ESP<-TSP-GSP-RSP DFR<-repli-1 DFG<-genotype-1 DFE<-DFR*DFG RMP<-round(RSP/DFR,6) GMP<-round(GSP/DFG,6) EMP<-round(ESP/DFE,6) EnvCov<-EMP GenCov<-round((GMP-EMP)/repli,6) PhenoCov<-EnvCov+GenCov model <- lm(trait1 ~ replication + genotypes) anova.model <- anova(model) EMS<-anova.model[3,3] GV<-round((anova.model[2,3]-EMS)/repli,6) PV<-GV+EMS model1 <- lm(trait2 ~ replication + genotypes) anova.model1 <- anova(model1) EMS1<-anova.model1[3,3] GV1<-round((anova.model1[2,3]-EMS1)/repli,6) PV1<-EMS1+GV1 r<-round(PhenoCov/sqrt(PV*PV1),4) SEm<-sqrt((abs(1-(r^2)))/(tdf-2)) tvalue<-r/SEm pvalue<-2*pt(abs(tvalue),tdf-2,lower.tail = F) ifelse(pvalue<=0.01,r<-paste(r,"**"),ifelse(pvalue>0.05,r<-paste(r,"NS"),r<-paste(r,"*"))) return(r) } phenotypic.corr <- c() index=0 for (i in 1:(colnumber)){ for (j in 1:colnumber){ index=index+1 phenotypic.corr[index]<-phenotypic.cor1(genotypes,replication,data2[[i]],data2[[j]]) }} matribapu<-noquote(matrix(phenotypic.corr,nrow = colnumber,dimnames =list(headings,headings))) matribapu1<-as.table(matribapu,useNa=F) Note<-"The sig of phenotypic correlation was tested using t test (two-tail). The degree of freedom used is (genotypes*replication) - 2" output<-list(PhenotypicCorrelation=matribapu1,Note=Note) return(output) }
/scratch/gouwar.j/cran-all/cranData/variability/R/phencorr.R
#' @title Phenotypic Path Analysis #' @param dependent.var trait to be considered as a dependent variable #' @param independent.var traits to be connsidered as an independent variables #' @param genotypes vector containing genotypes #' @param replication vector containing replicatons #' @return Direct effects, indirect effects and residual #' @importFrom stats anova lm #' @examples #' data(vardata) #' pheno.path(vardata[11],vardata[3:10],vardata$Genotypes,vardata$Replication) #' @export pheno.path<-function(dependent.var,independent.var,genotypes,replication){ convert<-function (data1) { data1 <- as.data.frame(sapply(data1, as.numeric)) data1 <- as.list(data1) return(data1) } genotypes<-as.factor(genotypes) replication<-as.factor(replication) colnumber <- ncol(independent.var) totalnumber<-colnumber+1 headings<-names(independent.var) totaldata<-data.frame(independent.var,dependent.var) data2<-convert(totaldata) phenotypic.cor1<-function(genotypes,replication,trait1,trait2){ sumch1<-tapply(trait1,genotypes,sum) sumch2<-tapply(trait2,genotypes,sum) sumr1<-tapply(trait1,replication,sum) sumr2<-tapply(trait2,replication,sum) repli<-nlevels(replication) genotype<-nlevels(genotypes) GT1<-sum(trait1) GT2<-sum(trait2) CF<-(GT1*GT2)/(repli*genotype) TSP<-round(sum(trait1*trait2)-CF,3) GSP<-round((sum(sumch1*sumch2)/repli)-CF,3) RSP<-round((sum(sumr1*sumr2)/genotype)-CF,3) ESP<-TSP-GSP-RSP DFR<-repli-1 DFG<-genotype-1 DFE<-DFR*DFG RMP<-round(RSP/DFR,3) GMP<-round(GSP/DFG,3) EMP<-round(ESP/DFE,3) EnvCov<-EMP GenCov<-round((GMP-EMP)/repli,3) PhenCov<-round(EMP+((GMP-EMP)/repli),3) model <- lm(trait1 ~ replication + genotypes) anova.model <- anova(model) EMS<-anova.model[3,3] GV<-abs(round((anova.model[2,3]-EMS)/repli,4)) PV<-round(GV+EMS,4) model1 <- lm(trait2 ~ replication + genotypes) anova.model1 <- anova(model1) EMS1<-anova.model1[3,3] GV1<-abs(round((anova.model1[2,3]-EMS1)/repli,4)) PV1<-round(GV1+EMS1,4) r<-round(PhenCov/sqrt(PV*PV1),4) return(r) } phenotypic.corr <- c() index=0 for (i in 1:(totalnumber)){ for (j in 1:totalnumber){ index=index+1 phenotypic.corr[index]<-phenotypic.cor1(genotypes,replication,data2[[i]],data2[[j]]) }} matribapu<-matrix(phenotypic.corr,nrow = totalnumber) corr.ind<-matribapu[1:colnumber,1:colnumber] corr.dep<-matribapu[1:colnumber,totalnumber] Direct <- solve(corr.ind, corr.dep) Coefficient <- corr.ind for (i in 1:colnumber) { for (j in 1:colnumber) { Coefficient[i, j] <- Direct[j] * corr.ind[i, j] } } Coefficient<-round(Coefficient,5) rownames(Coefficient)<-headings colnames(Coefficient)<-headings residual <- round(1 - t(Direct) %*% corr.dep,4) finaloutput<-list(Effects=Coefficient,Residual=residual) return(finaloutput) }
/scratch/gouwar.j/cran-all/cranData/variability/R/phenopath.R
#' @title Variability Data #' @description The data consists of genotypes, replications and nine traits #' @format The data has 11 columns and 120 rows #' \describe{ #' \item{Genotypes}{40 genotypes} #' \item{Replication}{3 replications} #' \item{DFF}{Days to 50 per cent flowering} #' \item{PH}{Plant height} #' \item{PL}{Panicle length} #' \item{PW}{Panicle weight} #' \item{HI}{Harvest index} #' \item{TW}{Test weight} #' \item{MILL}{Milling percentage} #' \item{HRR}{Head rice recovery} #' \item{GY}{Grain Yield} #' } "vardata"
/scratch/gouwar.j/cran-all/cranData/variability/R/vardata.R
.var <- function(name, desc = NULL) { ret <- list(name = name, desc = desc) class(ret) <- "var" ret } factor_var <- function(name, desc = NULL, levels, ...) { ret <- .var(name = name, desc = desc) ret$support <- factor(levels, levels = levels, labels = levels) class(ret) <- c("factor_var", class(ret)) ret } ordered_var <- function(name, desc = NULL, levels, sparse = FALSE, ...) { ret <- factor_var(name = name, desc = desc, levels = levels) ret$support <- as.ordered(ret$support) ret$sparse <- sparse class(ret) <- c("ordered_var", class(ret)) ret } numeric_var <- function(name, desc = NULL, unit = NULL, support = c(0.0, 1.0), add = c(0, 0), bounds = NULL, ...) { ret <- .var(name = name, desc = desc) ret$unit <- unit stopifnot(length(support) >= 2L) stopifnot(all(is.finite(support))) stopifnot(is.integer(support) || is.double(support)) if (is.integer(support) && length(support) == 2L) support <- support[1]:support[2] ret$support <- support discrete <- is.integer(support) || (length(support) > 2L) if (discrete) { ### <FIXME> why??? stopifnot(is.null(bounds)) ### </FIXME> class(ret) <- c("discrete_var", "numeric_var", class(ret)) return(ret) } if (is.null(bounds)) bounds <- c(-Inf, Inf) stopifnot(bounds[1] <= min(support)) stopifnot(max(support) <= bounds[2]) ret$bounds <- bounds stopifnot(add[1] <= 0 && add[2] >= 0) ret$add <- add class(ret) <- c("continuous_var", "numeric_var", class(ret)) ret } c.var <- function(...) { ret <- list(...) nm <- sapply(ret, variable.names) stopifnot(all(!duplicated(nm))) ### make sure no duplicate names names(ret) <- nm stopifnot(all(sapply(ret, function(x) inherits(x, "var")))) class(ret) <- "vars" ret } variable.names.var <- function(object, ...) object$name variable.names.vars <- function(object, ...) sapply(object, variable.names) desc <- function(object) UseMethod("desc") desc.var <- function(object) object$desc desc.vars <- function(object) sapply(object, desc) unit <- function(object) UseMethod("unit") unit.numeric_var <- function(object) object$unit unit.var <- function(object) return(NA) unit.vars <- function(object) sapply(object, unit) support <- function(object) UseMethod("support") support.var <- function(object) return(structure(list(object$support), names = variable.names(object))) support.vars <- function(object) structure(do.call("c", lapply(object, support)), names = variable.names(object)) levels.factor_var <- function(x) levels(support(x)[[variable.names(x)]]) levels.discrete_var <- function(x) support(x)[[variable.names(x)]] levels.var <- function(x) return(NA) bounds <- function(object) UseMethod("bounds") bounds.continuous_var <- function(object) structure(list(object$bounds), names = variable.names(object)) bounds.discrete_var <- function(object) { s <- support(object)[[variable.names(object)]] structure(list(range(s)), names = variable.names(object)) } bounds.ordered_var <- function(object) { f <- support(object)[[variable.names(object)]] structure(list(f[c(1, nlevels(f))]), names = variable.names(object)) } bounds.vars <- function(object) structure(do.call("c", lapply(object, bounds)), names = variable.names(object)) bounds.default <- function(object) structure(list(NA), names = variable.names(object)) is.bounded <- function(object) UseMethod("is.bounded") is.bounded.continuous_var <- function(object) any(is.finite(bounds(object)[[variable.names(object)]])) is.bounded.var <- function(object) return(TRUE) is.bounded.vars <- function(object) sapply(object, is.bounded) mkgrid <- function(object, ...) UseMethod("mkgrid") mkgrid.var <- function(object, ...) return(support(object)) mkgrid.continuous_var <- function(object, n = 2, add = TRUE, ...) { s <- support(object)[[variable.names(object)]] if (add) { add <- object$add } else { add <- c(0, 0) } if (any(max(abs(add)) > 0)) s <- s + add b <- bounds(object)[[variable.names(object)]] if (is.finite(b[1]) & (add[1] == 0)) s[1] <- b[1] if (is.finite(b[2]) & (add[2] == 0)) s[2] <- b[2] stopifnot(n > 0) if (n == 1L) return(structure(list(diff(s)), names = variable.names(object))) return(structure(list(seq(from = s[1], to = s[2], length.out = n)), names = variable.names(object))) } mkgrid.vars <- function(object, ...) structure(do.call("c", lapply(object, mkgrid, ...)), names = variable.names(object)) as.data.frame.vars <- function(x, row.names = NULL, optional = FALSE, n = 1L, ...) { g <- mkgrid(x, n = n) len <- max(sapply(g, length)) as.data.frame(lapply(g, function(x) rep_len(x, length.out = len))) } as.data.frame.var <- as.data.frame.vars as.vars <- function(object) UseMethod("as.vars") as.vars.data.frame <- function(object) { v <- lapply(colnames(object), function(x) { if (is.ordered(object[[x]])) return(ordered_var(x, levels = levels(object[[x]]))) if (is.factor(object[[x]])) return(factor_var(x, levels = levels(object[[x]]))) b <- NULL if (is.integer(object[[x]])) { s <- sort(unique(object[[x]])) } else if (inherits(object[[x]], "Surv")) { ### <FIXME>: only right censored </FIXME> s <- c(.Machine$double.eps, max(object[[x]][,1], na.rm = TRUE)) b <- c(0, Inf) } else { s <- range(object[[x]], na.rm = TRUE) } return(numeric_var(x, support = s, bounds = b)) }) return(do.call("c", v)) } check <- function(object, data) UseMethod("check") check.ordered_var <- function(object, data) { if (!is.atomic(data)) { v <- variable.names(object) stopifnot(v %in% names(data)) data <- data[[v]] ### data might be censored if (inherits(data, "response")) data <- data$cleft } is.ordered(data) && isTRUE(all.equal(levels(data), levels(object))) } check.factor_var <- function(object, data) { if (!is.atomic(data)) { v <- variable.names(object) stopifnot(v %in% names(data)) data <- data[[v]] ### data might be censored if (inherits(data, "response")) data <- data$cleft } is.factor(data) && isTRUE(all.equal(levels(data), levels(object))) } check.discrete_var <- function(object, data) { if (!is.atomic(data)) { v <- variable.names(object) stopifnot(v %in% names(data)) data <- data[[v]] } all(data %in% support(object)[[variable.names(object)]]) } check.continuous_var <- function(object, data) { if (!is.atomic(data)) { v <- variable.names(object) stopifnot(v %in% names(data)) data <- data[[v]] } b <- bounds(object)[[variable.names(object)]] min(data, na.rm = TRUE) >= b[1] && max(data, na.rm = TRUE) <= b[2] } check.vars <- function(object, data) all(sapply(object, check, data = data))
/scratch/gouwar.j/cran-all/cranData/variables/R/variables.R
#' Plot the posterior distributions of the focal parameters from a VM model #' #' This function plots the univariate and bivariate (if applicable) distributions #' of the focal (alpha) parameters from a Variability Model where the variability #' is used as a predictor in a second-stage model. The latent variability estimates are #' referred to as \dQuote{Sigma} and, if used, the latent intercepts are referred #' to as \dQuote{U}. #' #' @param alpha Results from running \code{varian} and \code{extract}ing the #' results. #' @param useU Logical indicating whether to plot the latent intercepts #' (defaults to \code{TRUE}). Must set to \code{FALSE} if not available. #' @param plot Logical whether to plot the results or just return the grob #' for the plots. Defaults to \code{TRUE}. #' @param digits Integer indicating how many digits should be used #' for displaying p-values #' @param \dots Additional arguments (not currently used) #' @return A list containing the \code{Combined} and the \code{Individual} plot objects. #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @importFrom grid grid.draw #' @export #' @keywords hplot #' @examples #' # Using made up data because the real models take a long time to run #' set.seed(1234) # make reproducible #' vmp_plot(matrix(rnorm(1000), ncol = 2)) vmp_plot <- function(alpha, useU = TRUE, plot = TRUE, digits = 3, ...) { alpha <- as.data.frame(alpha) n <- ncol(alpha) stopifnot(n == 1 | n == 2) colnames(alpha) <- c("Est_Sigma", "Est_U")[1:n] p.sigma <- ggplot(alpha, aes_string("Est_Sigma")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(alpha$Est_Sigma, na.rm=TRUE))/50, position = "identity") + theme_classic() sig.sigma <- empirical_pvalue(alpha$Est_Sigma) sig.dat <- data.frame(X = c(0, 0), Count = sig.sigma[1:2], Level = names(sig.sigma)[1:2], Pvalue = c(paste0("P = ", format.pval(sig.sigma["p-value"], digits = digits)), ""), stringsAsFactors = FALSE) if (!useU) { p.sig <- ggplot(sig.dat, aes_string("X", "Count", fill = "Level")) + geom_bar(stat = 'identity', position = 'stack') + scale_fill_manual(values = c("<= 0" = 'grey80', "> 0" = 'grey30')) + scale_x_continuous("", breaks = 0, labels = c("Est_Sigma")) + geom_text(aes_string("X", "1", label = "Pvalue"), vjust = 0) + theme_classic() graphs <- list(p.sigma, p.sig) } else if (useU) { p.u <- ggplot(alpha, aes_string("Est_U")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(alpha$Est_U, na.rm=TRUE))/50, position = "identity") + theme_classic() p.joint <- ggplot(alpha, aes_string("Est_Sigma", "Est_U")) + geom_point(alpha = .25) + theme_classic() sig.u <- empirical_pvalue(alpha$Est_U) sig.dat <- rbind(sig.dat, data.frame(X = c(1, 1), Count = sig.u[1:2], Level = names(sig.sigma)[1:2], Pvalue = c(paste0("P = ", format.pval(sig.u["p-value"], digits = digits)), ""), stringsAsFactors = FALSE)) p.sig <- ggplot(sig.dat, aes_string("X", "Count", fill = "Level")) + geom_bar(stat = 'identity', position = 'stack') + scale_fill_manual(values = c("<= 0" = 'grey80', "> 0" = 'grey30')) + scale_x_continuous("", breaks = 0:1, labels = c("Est_Sigma", "Est_U")) + geom_text(aes_string("X", "1", label = "Pvalue"), vjust = 0) + theme_classic() graphs <- list(p.sigma, p.u, p.joint, p.sig) } p.out <- do.call(arrangeGrob, c(graphs, ncol = 2)) if (plot) grid.draw(p.out) invisible(list(Combined = p.out, Individual = graphs)) } #' Plot diagnostics from a VM model #' #' This function plots a variety of diagnostics from a Variability Model. #' These include a histogram of the Rhat values (so-called percent scale reduction #' factors). An Rhat value of 1 indicates that no reduction in the variability of #' the estimates is possible from running the chain longer. Values below 1.10 or 1.05 #' are typically considered indicative of convergence, with higher values indicating #' the model did not converge and should be changed or run longer. #' A histogram of the effective sample size indicates for every parameter estimated how #' many effective posterior samples are available for inference. Low values may indicate #' high autocorrelation in the samples and may be a sign of failure to converge. #' The maximum possible will be the total iterations available. #' Histograms of the posterior medians for the latent variability and intercept estimates #' are also shown. #' #' @param object Results from running \code{varian}. #' @param plot Logical whether to plot the results or just return the grob #' for the plots. Defaults to \code{TRUE}. #' @param \dots Additional arguments not currently used #' @return A graphical object #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @keywords hplot #' @examples #' # Make Me! vm_diagnostics <- function(object, plot=TRUE, ...) { if (inherits(object, "vm")) { object <- object$results } res.s <- as.data.frame(summary(object)$summary) est <- extract(object, permute=TRUE) p.rhat <- ggplot(res.s, aes_string("Rhat")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(res.s$Rhat))/50, position = "identity") + labs(x = "Rhat for all parameters") + theme_classic() p.neff <- ggplot(res.s, aes_string("n_eff")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(res.s$n_eff))/50, position = "identity") + labs(x = "N_Effective for all parameters") + theme_classic() sigma <- as.data.frame(t(apply(est$Sigma_V, 2, quantile, probs = c(.025, .5, .975), na.rm=TRUE))) colnames(sigma) <- c("LL", "Median", "UL") sigma <- sigma[order(sigma[, "Median"]), ] sigma$Index <- 1:nrow(sigma) U <- as.data.frame(t(apply(est$U, 2, quantile, probs = c(.025, .5, .975), na.rm=TRUE) )) colnames(U) <- c("LL", "Median", "UL") U <- U[order(U[, "Median"]), ] U$Index <- 1:nrow(U) p.sigma.h <- ggplot(sigma, aes_string("Median")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(sigma$Median))/50, position = "identity") + labs(x = "Median Est_Sigma") + theme_classic() p.u.h <- ggplot(U, aes_string("Median")) + geom_histogram(fill = 'white', colour = 'black', binwidth = diff(range(U$Median))/50, position = "identity") + labs(x = "Median Est_U") + theme_classic() p.sigma <- ggplot(sigma, aes_string("Index", "Median", ymin = "LL", ymax = "UL")) + geom_pointrange() + labs(y = "Median + 95% CI for Sigma") + theme_classic() p.u <- ggplot(U, aes_string("Index", "Median", ymin = "LL", ymax = "UL")) + geom_pointrange() + labs(y = "Median + 95% CI for U") + theme_classic() p.diag <- arrangeGrob( arrangeGrob(p.rhat, p.neff, ncol = 2), arrangeGrob(p.sigma.h, p.u.h, ncol = 2), p.sigma, p.u, ncol = 1) if (plot) { grid.draw(p.diag) } invisible(p.diag) }
/scratch/gouwar.j/cran-all/cranData/varian/R/diagnostics.R
#' @name Variability_Measures #' @title Variability Measures #' @rdname VarMeasures #' @aliases by_id #' @aliases sd_id #' @aliases rmssd #' @aliases rmssid_id #' @aliases rolling_diff #' @aliases rolling_diff_id #' #' @note These are a set of functions designed to calculate various #' measures of variability either on a single data vector, or #' calculate them by an ID. #' #' @param x A data vector to operate on. Should be a numeric or #' integer vector, or coercible to such (e.g., logical). #' @param ID an ID variable indicating how to split up the \code{x} #' vector. Should be the same length as \code{x}. #' @param fun The function to calculate by ID #' @param long A logical indicating whether to return results in #' \dQuote{long} form (the default) or wide (if \code{FALSE}). #' @param \dots Additional arguments passed on to \code{fun} #' @author Joshua F. Wiley <josh@@elkhartgroup.com> NULL #' Variability Measures #' #' \code{by_id} - Internal function to allow a simple statistic (e.g., SD) #' to be calculated individually by an ID variable and returned #' either as per ID (i.e., wide form) or for every observation of an #' ID (i.e., long form). #' @return \code{by_id} - A vector the same length as \code{x} #' if \code{long=TRUE}, or the length of unique \code{ID}s if #' \code{long=FALSE}. #' @rdname VarMeasures by_id <- function(x, ID, fun, long=TRUE, ...) { if (long) { ave(x, ID, FUN = function(x) fun(x, ...)) } else { tapply(x, ID, FUN = function(x) fun(x, ...)) } } #' Variability Measures #' #' \code{sd_id} - Calculates the standard deviation of observations by \code{ID}. #' #' @return \code{sd_id} - A vector of the standard deviations by ID #' @keywords utilities #' @export #' @rdname VarMeasures #' @examples #' #' sd_id(mtcars$mpg, mtcars$cyl, long=TRUE) #' sd_id(mtcars$mpg, mtcars$cyl, long=FALSE) sd_id <- function(x, ID, long=TRUE) { by_id(x, ID, fun = sd, long = long, na.rm=TRUE) } #' Variability Measures #' #' \code{rmssd} - Calculates the root mean square of successive differences (RMSSD). #' Note that missing values are removed. #' #' @return \code{rmssd} - The RMSSD for the data. #' @export #' @rdname VarMeasures #' @examples #' rmssd(1:4) #' rmssd(c(1, 3, 2, 4)) rmssd <- function(x) { x <- na.omit(diff(x)) as.vector(sqrt(mean(x^2))) } #' Variability Measures #' #' \code{rmssd_id} - Calculates the RMSSD by ID. #' #' @return \code{rmssd_id} - A vector of the RMSSDs by ID #' @export #' @rdname VarMeasures #' @examples #' rmssd_id(mtcars$mpg, mtcars$cyl) #' rmssd_id(mtcars$mpg, mtcars$cyl, long=FALSE) rmssd_id <- function(x, ID, long=TRUE) { by_id(x, ID, fun = rmssd, long = long) } #' Variability Measures #' #' \code{rolling_diff} - Calculates the average rolling difference of the data. #' Within each window, the difference between the maximum and minimum value is #' computed and these are averaged across all windows. The equation is: #' \deqn{\frac{\sum_{t = 1}^{N - k} max(x_{t}, \ldots, x_{t + k}) - min(x_{t}, \ldots, x_{t + k})}{N - k}} #' #' @param window An integer indicating the size of the rolling window. #' Must be at least the length of \code{x}. #' @return \code{rolling_diff} - The average of the rolling differences between maximum and minimum. #' @export #' @rdname VarMeasures #' @examples #' rolling_diff(1:7, window = 4) #' rolling_diff(c(1, 4, 3, 4, 5)) rolling_diff <- function(x, window = 4) { stopifnot(length(x) >= window) index <- 1:(length(x) + 1 - window) mean(sapply(index, function(i) { x <- na.omit(x[i:(i + window - 1)]) if (length(x) < 2) { NA } else { diff(range(x)) } }), na.rm=TRUE) } #' Variability Measures #' #' \code{rolling_diff_id} - Calculates the average rolling difference by ID #' #' @return \code{rolling_diff_id} - A vector of the average rolling differences by ID #' @export #' @rdname VarMeasures #' @examples #' rolling_diff_id(mtcars$mpg, mtcars$cyl, window = 3) rolling_diff_id <- function(x, ID, long=TRUE, window = 4) { by_id(x, ID, fun = rolling_diff, long = long, window = window) } #' Estimate the parameters for a Gamma distribution #' #' This is a simple function to estimate what the parameters for a Gamma #' distribution would be from a data vector. It is used internally to #' generate start values. #' #' @param x a data vector to operate on #' @return a list of the shape (alpha) and rate (beta) parameters #' and the mean and variance #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @keywords utilities gamma_params <- function(x) { m <- mean(x, na.rm=TRUE) v <- var(x, na.rm=TRUE) beta <- m / v alpha <- m * beta list(alpha = alpha, beta = beta, mean = m, variance = v) } #' Estimates the parameters of a Gamma distribution from SDs #' #' This function calcualtes the parameters of a Gamma distribution #' from the residuals from an individuals' own mean. #' That is, the distribution of (standard) deviations from individuals' #' own mean are calculated and then an estimate of the parameters of a #' Gamma distribution are calculated. #' #' @param x A data vector to operate on #' @param ID an ID variable of the same length as \code{x} #' @return a list of the shape (alpha) and rate (beta) parameters #' and the mean and variance #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @keywords utilities #' @examples #' #' set.seed(1234) #' y <- rgamma(100, 3, 2) #' x <- rnorm(100 * 10, mean = 0, sd = rep(y, each = 10)) #' ID <- rep(1:100, each = 10) #' res_gamma(x, ID) res_gamma <- function(x, ID) { gamma_params(sd_id(x, ID, long = FALSE)) } #' Calculates an empirical p-value based on the data #' #' This function takes a vector of statistics and calculates #' the empirical p-value, that is, how many fall on the other #' side of zero. It calculates a two-tailed p-value. #' #' @param x a data vector to operate on #' @param na.rm Logical whether to remove NA values. Defaults to \code{TRUE} #' @return a named vector with the number of values falling at #' or below zero, above zero, and the empirical p-value. #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @keywords utilities #' @examples #' #' empirical_pvalue(rnorm(100)) empirical_pvalue <- function(x, na.rm = TRUE) { x <- as.integer(x <= 0) tmp <- table(factor(x, levels = 1:0, labels = c("<= 0", "> 0"))) m <- mean(x, na.rm = na.rm) pval2 <- 2 * min(m, 1 - m) out <- c(as.vector(tmp), pval2) names(out) <- c(names(tmp), "p-value") out } #' nice formatting for p-values #' #' @param p a numeric pvalue #' @param d the digits less than which should be displayed as less than #' @param sd scientific digits for round #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @keywords utilities #' @examples #' varian:::pval_smartformat(c(1, .15346, .085463, .05673, .04837, .015353462, #' .0089, .00164, .0006589, .0000000053326), 3, 5) pval_smartformat <- function(p, d = 3, sd = 5) { p.out <- ifelse(p < 1/(10^d), paste0("< .", paste(rep(0, d - 1), collapse = ""), "1"), format(round(p, digits = d), digits = d, nsmall = d, scientific = sd)) gsub("0\\.", ".", p.out) } #' Calculates summaries for a parameter #' #' This function takes a vector of statistics and calculates #' several summaries: mean, median, 95% CI, and #' the empirical p-value, that is, how many fall on the other #' side of zero. #' #' @param x a data vector to operate on #' @param digits Number of digits to round to for printing #' @param pretty Logical value whether prettified values should be returned. #' Defaults to \code{FALSE}. #' @param \dots Additional arguments passed to \code{pval_smartformat} #' to control p-value printing. #' @param na.rm Logical whether to remove NA values. Defaults to \code{TRUE} #' @return . #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @keywords utilities #' @examples #' #' param_summary(rnorm(100)) #' param_summary(rnorm(100), pretty = TRUE) param_summary <- function(x, digits = 2, pretty = FALSE, ..., na.rm = TRUE) { res <- round(data.frame(Mean = mean(x, na.rm = na.rm), Median = median(x, na.rm = na.rm), SE = sd(x, na.rm = na.rm), LL2.5 = as.vector(quantile(x, probs = .025, na.rm = na.rm)), UL97.5 = as.vector(quantile(x, probs = .975, na.rm = na.rm))), digits = digits) p <- pval_smartformat(empirical_pvalue(x)[["p-value"]], ...) if (pretty) { out <- sprintf("%s [%s, %s], %s", as.character(res$Mean), as.character(res$LL2.5), as.character(res$UL97.5), ifelse(grepl("<", p), paste0("p ", p), paste0("p = ", p))) } else { res[, 'p-value'] <- p out <- res } return(out) } #' Simulate a Gamma Variability Model #' #' This function facilitates simulation of a Gamma Variability Model #' and allows the number of units and repeated measures to be varied #' as well as the degree of variability. #' #' @param n The number of repeated measures on each unit #' @param k The number of units #' @param mu The grand mean of the variable #' @param mu.sigma The standard deviation of the random mean of the variable #' @param sigma.shape the shape (alpha) parameter of the Gamma distribution #' controlling the residual variability #' @param sigma.rate the rate (beta) parameter of the Gamma distribution #' controlling the residual variability #' @param seed the random seed, used to make simulations reproductible. #' Defaults to 5346 (arbitrarily). #' @return a list of the data, IDs, and the parameters used for the simulation #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @import MASS #' @keywords utilities #' @examples #' raw.sim <- simulate_gvm(12, 140, 0, 1, 4, .1, 94367) #' sim.data <- with(raw.sim, { #' set.seed(265393) #' x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2)) #' y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3) #' data.frame( #' y = Data$y, #' y2 = y2[Data$ID2], #' x1 = x2[Data$ID2, 1], #' x2 = x2[Data$ID2, 2], #' ID = Data$ID2) #' }) simulate_gvm <- function(n, k, mu, mu.sigma, sigma.shape, sigma.rate, seed = 5346) { set.seed(seed) m <- rnorm(k, mu, mu.sigma) sigma <- rgamma(k, sigma.shape, sigma.rate) y <- rnorm(n * k, mean = rep(m, each = n), sd = rep(sigma, each = n)) y2 <- rnorm(k, sigma/sqrt(sigma.shape/(sigma.rate^2)), sd = 3) list( Data = data.frame(y = y, y2 = y2, ID1 = 1:(n * k), ID2 = rep(1:k, each = n)), n = n, k = k, mu = mu, mu.sigma, sigma.shape, sigma.rate, sigma = sigma, seed) } #' Wrapper for the stan function to parallelize chains #' #' This funcntion takes Stan model code, compiles the Stan model, #' and then runs multiple chains in parallel. #' #' @param model_code A character string of Stan code #' @param standata A data list suitable for Stan for the model given #' @param totaliter The total number of iterations for inference. #' Note that the total number of iterations is automatically #' distributed across chains. #' @param warmup How many warmup iterations should be used? Note #' that every chain will use the same number of warmups and these #' will be \emph{added on top of the total iterations} for each chain. #' @param thin The thin used, default to 1 indicating that all samples #' be saved. #' @param chains The number of independent chains to run. #' @param cl (optional) The name of a cluster to use to run the chains. #' If not specified, the function will make a new cluster. #' @param cores (optional) If the \code{cl} argument is not used, #' this specifies the number of cores to make on the new cluster. #' If both \code{cl} and \code{cores} are missing, defaults to #' the minimum of the number of chains specified or the number of #' cores available on the machine. #' @param seeds (optional) A vector of random seeds the same length as the number #' of independent chains being run, to make results replicable. #' If missing, random seeds will be generated and stored for reference #' in the output. #' @param modelfit (optional) A compiled Stan model, if available, saves #' compiling \code{model_code}. #' @param verbose A logical whether to print verbose output #' (defaults to \code{FALSE}) #' @param pars Parameter names from Stan to store #' @param sample_file The sample file for Stan #' @param diagnostic_file The diagnostic file for Stan #' @param init A character string (\dQuote{random}) or a named list of starting values. #' @param \dots Additional arguments, not currently used. #' @return a named list with three elements, the \code{results}, #' compiled Stan \code{model}, and the random \code{seeds} #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @export #' @keywords utilities #' @examples #' # Make me! parallel_stan <- function(model_code, standata, totaliter, warmup, thin = 1, chains, cl, cores, seeds, modelfit, verbose = FALSE, pars = NA, sample_file=NA, diagnostic_file = NA, init = "random", ...) { if (missing(cl)) { if (missing(cores)) { cores <- detectCores() } # make a cluster with nodes cl <- makeCluster(min(cores, chains)) } on.exit(stopCluster(cl)) if (!missing(modelfit)) { stanmodel <- modelfit } else { if (verbose) cat("Compiling Stan model\n") stanmodel <- stan_model(model_code = model_code, save_dso = TRUE) } if (verbose) cat("loading varian on workers\n") clusterEvalQ(cl, { require(varian) }) if (missing(seeds)) { seeds <- sample(.Random.seed, chains) } eachiter <- ceiling(totaliter/chains) fooenv <- environment() clusterExport(cl, varlist = c( "stanmodel", "standata", "pars", "eachiter", "warmup", "thin", "seeds", "init", "sample_file", "diagnostic_file"), envir = fooenv) if (verbose) cat("Sampling from Stan\n") stanres <- parLapplyLB(cl, 1:chains, function(i) { stanres <- sampling(object = stanmodel, data = standata, pars = pars, chains = 1, iter = eachiter + warmup, warmup = warmup, thin = thin, seed = seeds[1], init = init, check_data = TRUE, sample_file = sample_file, diagnostic_file = diagnostic_file, chain_id = i) }) if (chains > 1) { if (verbose) cat("Combining chains\n") stanres <- tryCatch(sflist2stanfit(stanres), error = function(e) return(results = e)) } else { stanres <- stanres[[1]] } list(results = stanres, model = stanmodel, seeds = seeds) }
/scratch/gouwar.j/cran-all/cranData/varian/R/helpers.R
## # Currently Available ## V -> Y ## V -> M -> Y ## V ## # Planned ## X -> V ## X -> V -> Y ## X -> M -> V ## varian(v, y, m, design = ## c("V -> Y", "V -> M -> Y", "V", ## "X -> V", "X -> V -> Y", "X -> M -> V")) ## data { ## int<lower=1> N; ## real rrt[N]; //outcome ## real so[N]; //predictor ## int<lower=1> I; //number of subjects ## int<lower=1> K; //number of items ## int<lower=1, upper=I> subj[N]; //subject id ## int<lower=1, upper=K> item[N]; //item id ## vector[2] mu_prior; //vector of zeros passed in from R ## } ## parameters { ## vector[2] beta; // intercept and slope ## vector[2] u[I]; // random intercept and slope ## real w[K]; // random intercept item ## real<lower = 0> sigma_e; // residual sd ## vector<lower=0>[2] sigma_u; // subj sd ## real<lower=0> sigma_w; // item sd ## corr_matrix[2] Omega; // correlation matrix for random intercepts and slopes ## } ## transformed parameters { ## matrix[2,2] D; ## D <- diag_matrix(sigma_u); ## } ## model { ## matrix[2,2] L; ## matrix[2,2] DL; ## // priors ## beta ~ normal(0,5); ## sigma_e ~ cauchy(0,2); ## sigma_u ~ cauchy(0,2); ## sigma_w ~ cauchy(0,2); ## Omega ~ lkj_corr(2.0); ## L <- cholesky_decompose(Omega); ## DL <- D * L; ## for (i in 1:I) // loop for subj random effects ## u[i] ~ multi_normal_cholesky(mu_prior, DL); ## for (k in 1:K) // loop for item random effects ## w[k] ~ normal(0,sigma_w); ## // likelihood ## for (n in 1:N) { ## rrt[n] ~ normal(beta[1] + beta[2]*so[n] + u[subj[n], 1] + u[subj[n], 2]*so[n], ## sigma_e); ## } ## } ## generated quantities { ## cov_matrix[2] Sigma; ## Sigma <- D * Omega * D; ## } #' Create a Stan class VM object #' #' Internal function to create and compile a Stan model. #' #' @param design A character string indicating the type of model to be run. One of #' \dQuote{V -> Y} for variability predicting an outcome, #' \dQuote{V -> M -> Y} for mediation of variability on an outcome, #' \dQuote{V} to take posterior samples of individual variability estimates alone. #' @param useU A logical value whether the latent intercept estimated in Stage 1 should #' also be used as a predictor. Defaults to \code{TRUE}. #' @param \dots Additional arguments passed to \code{stan_model}. #' @return A compiled Stan model. #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @keywords models #' @examples #' # Make Me! #' \dontrun{ #' test1 <- vm_stan("V -> Y", useU=TRUE) #' test2 <- vm_stan("V -> Y", useU=FALSE) #' test3 <- vm_stan("V -> M -> Y", useU=TRUE) #' test4 <- vm_stan("V -> M -> Y", useU=FALSE) #' test5 <- vm_stan("V") #' } vm_stan <- function(design = c("V -> Y", "V -> M -> Y", "V", "X -> V", "X -> V -> Y", "X -> M -> V"), useU=TRUE, ...) { design <- match.arg(design) if (!design %in% c("V -> Y", "V -> M -> Y", "V")) { stop("Currently only V -> Y, V -> M -> Y, and V are implemented") } ## show the priors ## x <- seq(.001, 50, by = .01) ## plot(x, dcauchy(x, 0, 20), type = "l") model.core.V <- list( data = " int<lower=1> n; int<lower=1> k; int VID[n]; // Data related to V (variability) int<lower=1> pVX; real V[n]; matrix[n, pVX] VX; ", parameters = " // Parameters related to V vector[pVX] VB; real U[k]; real<lower=0> sigma_U; real<lower=0> shape; real<lower=0> rate; real<lower=0> Sigma_V[k]; ", tparameters.declarations = " // Params related to V real V_hat[n]; real Sigma_V_hat[n]; ", tparameters.statements = " // Params related to V for (i in 1:n) { V_hat[i] <- (VX[i] * VB) + U[VID[i]]; Sigma_V_hat[i] <- Sigma_V[VID[i]]; } ", model.declarations = " // Priors for V Location VB ~ normal(0, 1000); U ~ normal(0, sigma_U); // Priors for Stage 1 scale of random location sigma_U ~ cauchy(0, 10); // Priors for Stage 1 Scale shape ~ cauchy(0, 10); rate ~ cauchy(0, 10); // Model for Stage 1 Scale Sigma_V ~ gamma(shape, rate); ", model.statements = " // Likelihood for V V ~ normal(V_hat, Sigma_V_hat); ") model.core.Y <- list( data = " // Data related to Y (outcome) int<lower=1> pYX; int<lower=1> pYX2; real Y[k]; matrix[k, pYX] YX; ", parameters = " // Parameters related to Y vector[pYX] YB; vector[pYX2] Yalpha; real<lower=0> sigma_Y; ", tparameters.declarations = " // Params related to Y real Y_hat[k]; ", tparameters.statements = " // Params related to Y for (i in 1:k) { Y_hat[i] <- (YX[i] * YB) + Yalpha[1] * Sigma_V[i]YuseU; } ", model.delcarations = " // Priors for Y location YB ~ normal(0, 1000); Yalpha ~ normal(0, 1000); // Priors for Y scale sigma_Y ~ cauchy(0, 10); ", model.statements = " // Likelihood for Y Y ~ normal(Y_hat, sigma_Y); ") model.core.Y$tparameters.statements <- gsub("YuseU", ifelse(useU, " + Yalpha[2] * U[i]", ""), model.core.Y$tparameters.statements) model.core.M <- list( data = " // Data related to M (mediator) int<lower=1> pMX; int<lower=1> pMX2; real M[k]; matrix[k, pMX] MX; ", parameters = " // Parameters related to M vector[pMX] MB; vector[pMX2] Malpha; real<lower=0> sigma_M; ", tparameters.declarations = " // Params related to M real M_hat[k]; ", tparameters.statements = " // Params related to M for (i in 1:k) { M_hat[i] <- (MX[i] * MB) + Malpha[1] * Sigma_V[i]MuseU; } ", model.declarations = " // Priors for M location MB ~ normal(0, 1000); Malpha ~ normal(0, 1000); // Priors for M scale sigma_M ~ cauchy(0, 10); ", model.statements = " // Likelihood for M M ~ normal(M_hat, sigma_M); ") model.core.M$tparameters.statements <- gsub("MuseU", ifelse(useU, " + Malpha[2] * U[i]", ""), model.core.M$tparameters.statements) model_builder <- function(...) { pieces <- list(...) n <- names(pieces[[1]]) combined <- lapply(n, function(i) { do.call(paste, c(lapply(pieces, function(x) x[[i]]), list(collapse = "\n"))) }) names(combined) <- n with(combined, sprintf(" // upper case letters indicate vectors/matrices // lower case letters indicate scalars data { %s } parameters { %s } transformed parameters { %s %s } model { %s %s } ", data, parameters, tparameters.declarations, tparameters.statements, model.declarations, model.statements)) } model <- switch(design, `V -> Y` = model_builder(model.core.V, model.core.Y), `V -> M -> Y` = model_builder(model.core.V, model.core.Y, model.core.M), `V` = model_builder(model.core.V)) stan_model(model_code = model, save_dso=TRUE, ...) } #' Calculate Initial Values for Stan VM Model #' #' Internal function used to get rough starting values for a #' variability model in Stan. Uses inidivudal standard deviations, means, #' and linear regressions. #' #' @param stan.data A list containing the data to be passed to Stan #' @param design A character string indicating the type of model to be run. One of #' \dQuote{V -> Y} for variability predicting an outcome, #' \dQuote{V -> M -> Y} for mediation of variability on an outcome, #' \dQuote{V} to take posterior samples of individual variability estimates alone. #' @param useU whether to include the random intercepts #' @param \dots Additional arguments (not currently used) #' @return A named list containing the initial values for Stan. #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @keywords models #' @examples #' # make me! stan_inits <- function(stan.data, design = c("V -> Y", "V -> M -> Y", "V", "X -> V", "X -> V -> Y", "X -> M -> V"), useU, ...) { design <- match.arg(design) if (!design %in% c("V -> Y", "V -> M -> Y", "V")) { stop("Currently only V -> Y, V -> M -> Y, and V are implemented") } # V inits rg <- with(stan.data, res_gamma(V, VID)) out <- with(stan.data, list( VB = as.array(coef(lm.fit(VX, V))), U = as.array(by_id(V, VID, mean, FALSE, na.rm=TRUE)-mean(V, na.rm=TRUE)), shape = rg$alpha, rate = rg$beta, Sigma_V = as.array(sd_id(V, VID, FALSE)) )) index <- (out$Sigma_V == 0) | is.na(out$Sigma_V) # if zero or missing, replace with nonmissing minima out$Sigma_V[index] <- min(out$Sigma_V[!index], na.rm=TRUE) out$sigma_U <- sd(out$U, na.rm=TRUE) dv_init <- function(X, dv, k) { b <- coef(lm.fit(X, dv)) s.dv <- sd(dv, na.rm=TRUE) list(sigma_dv = s.dv, b = as.array(b[1:k]), alpha = as.array(b[(k + 1):length(b)])) } if (useU) { tmpV <- cbind(Res = out$Sigma_V, U = out$U) } else { tmpV <- cbind(Res = out$Sigma_V) } out <- c(out, switch(design, `V -> Y` = { tmpY <- with(stan.data, dv_init(cbind(YX, tmpV), Y, ncol(YX))) names(tmpY) <- c("sigma_Y", "YB", "Yalpha") tmpY }, `V -> M -> Y` = { tmpY <- with(stan.data, dv_init(cbind(YX, tmpV), Y, ncol(YX))) names(tmpY) <- c("sigma_Y", "YB", "Yalpha") tmpM <- with(stan.data, dv_init(cbind(MX, tmpV), M, ncol(MX))) names(tmpM) <- c("sigma_M", "MB", "Malpha") c(tmpY, tmpM) }, `V` = c())) return(out) } #' Variablity Analysis using a Bayesian Variability Model (VM) #' #' This function uses a linear mixed effects model that assumes the level 1 residual #' variance varies by Level 2 units. That is rather than assuming a homogenous residual #' variance, it assumes the residual standard deviations come from a Gamma distribution. #' In the first stage of this model, each Level 2's residual standard deviation is #' estimated, and in the second stage, these standard deviations are used to predict #' another Level 2 outcome. The interface uses an intuitive formula interface, but #' the underlying model is implemented in Stan, with minimally informative priors for all #' parameters. #' #' @param y.formula A formula describing a model for the outcome. At present, #' this must be a continuous, normally distributed variable. #' @param v.formula A formula describing a model for the variability. Note #' this must end with \code{ | ID}, where \code{ID} is the name of the #' ID variable in the dataset. At present, this must be a continuous, #' normally distributed variable. #' @param m.formula An optional formula decribing a model for a mediatior variable. #' At present, this must be a continuous normally distributed variable. #' @param data A long data frame containing an both the Level 2 and Level 1 outcomes, #' as well as all covariates and an ID variable. #' @param design A character string indicating the type of model to be run. One of #' \dQuote{V -> Y} for variability predicting an outcome, #' \dQuote{V -> M -> Y} for mediation of variability on an outcome, #' \dQuote{V} to take posterior samples of individual variability estimates alone. #' @param useU A logical value whether the latent intercept estimated in Stage 1 should #' also be used as a predictor. Defaults to \code{TRUE}. Note if there is a #' mediator as well as main outcome, the latent intercepts will be used as a predictor #' for both. #' @param totaliter The total number of iterations to be used (not including the #' warmup iterations), these are distributed equally across multiple independent #' chains. #' @param warmup The number of warmup iterations. Each independent chain #' has the same number of warmup iterations, before it starts the iterations #' that will be used for inference. #' @param chains The number of independent chains to run (default to 1). #' @param inits Initial values passed on to \code{stan}. If \code{NULL}, the default, #' initial values are estimated means, standard deviations, and coefficients from a #' single level linear regression. #' @param modelfit A compiled Stan model (e.g., from a previous run). #' @param opts A list giving options. Currently only \code{SD_Tol} which controls #' the tolerance for how small a variables standard deviation may be without #' stopping estimation (this ensures that duplicate variables, or variables without #' any variability are included as predictors). #' @param \dots Additional arguments passed to \code{stan}. #' @return A named list containing the model \code{results}, the \code{model}, #' the \code{variable.names}, the \code{data}, the random \code{seeds}, #' and the initial function \code{.call}. #' @author Joshua F. Wiley <josh@@elkhartgroup.com> #' @import Formula #' @export #' @keywords models #' @examples #' \dontrun{ #' sim.data <- with(simulate_gvm(4, 60, 0, 1, 3, 2, 94367), { #' set.seed(265393) #' x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2)) #' y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3) #' data.frame( #' y = Data$y, #' y2 = y2[Data$ID2], #' x1 = x2[Data$ID2, 1], #' x2 = x2[Data$ID2, 2], #' ID = Data$ID2) #' }) #' m <- varian(y2 ~ x1 + x2, y ~ 1 | ID, data = sim.data, design = "V -> Y", #' totaliter = 10000, warmup = 1500, thin = 10, chains = 4, verbose=TRUE) #' #' # check diagnostics #' vm_diagnostics(m) #' #' sim.data2 <- with(simulate_gvm(21, 250, 0, 1, 3, 2, 94367), { #' set.seed(265393) #' x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2)) #' y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3) #' data.frame( #' y = Data$y, #' y2 = y2[Data$ID2], #' x1 = x2[Data$ID2, 1], #' x2 = x2[Data$ID2, 2], #' ID = Data$ID2) #' }) #' # warning: may take several minutes #' m2 <- varian(y2 ~ x1 + x2, y ~ 1 | ID, data = sim.data2, design = "V -> Y", #' totaliter = 10000, warmup = 1500, thin = 10, chains = 4, verbose=TRUE) #' # check diagnostics #' vm_diagnostics(m2) #' } varian <- function(y.formula, v.formula, m.formula, data, design = c("V -> Y", "V -> M -> Y", "V", "X -> V", "X -> V -> Y", "X -> M -> V"), useU = TRUE, totaliter = 2000, warmup = 1000, chains = 1, inits = NULL, modelfit, opts = list(SD_Tol = .01, pars = NULL), ...) { design <- match.arg(design) if (!design %in% c("V -> Y", "V -> M -> Y", "V")) { stop("Currently only V -> Y, V -> M -> Y, and V are implemented") } stopifnot(is.data.frame(data)) stopifnot(!missing(v.formula)) stopifnot(is.logical(useU)) storedCall <- match.call() # logical flag for mediation med <- !missing(m.formula) # drop any missing levels to avoid redudant dummy codes in model matrix data <- droplevels(data) var.names <- c(list( V = all.vars(terms(as.Formula(v.formula), lhs = 1, rhs = -c(1, 2))), VID = all.vars(terms(as.Formula(v.formula), lhs = -1, rhs = 2))), switch(design, `V -> Y` = list( Y = all.vars(terms(as.Formula(y.formula), lhs = 1, rhs = -1))), `V -> M -> Y` = list( Y = all.vars(terms(as.Formula(y.formula), lhs = 1, rhs = -1)), M = all.vars(terms(as.Formula(m.formula), lhs = 1, rhs = -c(1, 2)))), `V` = list())) all.formula <- switch(design, `V -> Y` = as.Formula(y.formula, v.formula), `V -> M -> Y` = as.Formula(y.formula, v.formula, m.formula), `V` = as.Formula(v.formula)) # make sure ID is a numeric/integer or a factor stopifnot(class(data[, var.names$VID]) %in% c("numeric", "integer", "factor")) test.VID <- sd_id(data[, var.names$V], data[, var.names$VID], long=FALSE) if (!all(test.VID != 0, na.rm=TRUE)) { stop(sprintf("The following IDs have no variability in the first stage outcome:\n%s\nTry using\n%s\nto remove these from the data.", paste(names(test.VID)[which(test.VID == 0)], collapse = ', '), paste0("subset(your_data, sd_id(", var.names$V, ", ", var.names$VID, ") != 0)"))) } key <- list(OriginalID = data[, var.names$VID]) data[, var.names$VID] <- as.integer(data[, var.names$VID]) key$IntegerID <- data[, var.names$VID] data <- data[order(data[, var.names$VID]), ] mf <- model.frame(all.formula, data = data, na.action = na.omit) key$MFOriginalID <- mf[, var.names$VID] mf[, var.names$VID] <- as.integer(factor(mf[, var.names$VID])) key$MFIntegerID <- mf[, var.names$VID] key <- with(key, { tmp1 <- data.frame(OriginalID, IntegerID)[!duplicated(IntegerID), ] tmp2 <- data.frame(MFOriginalID, MFIntegerID)[!duplicated(MFIntegerID), ] data.frame(OriginalID = tmp1[match(tmp2[, 1], tmp1[, 2]), 1], InternalID = tmp2[, 2]) }) vars <- list(V = mf[, var.names$V], VID = mf[, var.names$VID]) keep.obs <- !duplicated(vars$VID) vars <- c(vars, switch(design, `V -> Y` = list( Y = mf[keep.obs, var.names$Y]), `V -> M -> Y` = list( Y = mf[keep.obs, var.names$Y], M = mf[keep.obs, var.names$M]), `V` = list())) mm <- c(list(V = model.matrix(as.Formula(v.formula), data = mf, rhs = 1)), switch(design, `V -> Y` = list( Y = model.matrix(y.formula, data = mf)[keep.obs, , drop = FALSE]), `V -> M -> Y` = list( Y = model.matrix(y.formula, data = mf)[keep.obs, , drop = FALSE], M = model.matrix(m.formula, data = mf)[keep.obs, , drop = FALSE]), `V` = list())) var.names <- c(var.names, list(VX <- colnames(mm$V)), switch(design, `V -> Y` = list( YX = colnames(mm$Y)), `V -> M -> Y` = list( YX = colnames(mm$Y), MX = colnames(mm$M)), `V` = list())) # Code to check the scaling of variables v.sds <- switch(design, `V -> Y` = c(sd(vars$Y, na.rm=TRUE), sd(vars$V, na.rm=TRUE), apply(mm$Y, 2, sd, na.rm=TRUE), apply(mm$V, 2, sd, na.rm=TRUE)), `V -> M -> Y` = c(sd(vars$Y, na.rm=TRUE), sd(vars$V, na.rm=TRUE), sd(vars$M, na.rm=TRUE), apply(mm$Y, 2, sd, na.rm=TRUE), apply(mm$V, 2, sd, na.rm=TRUE), apply(mm$M, 2, sd, na.rm=TRUE)), `V` = c(sd(vars$V, na.rm=TRUE), apply(mm$V, 2, sd, na.rm=TRUE))) names(v.sds) <- switch(design, `V -> Y` = with(var.names, c(Y, V, YX, VX)), `V -> M -> Y` = with(var.names, c(Y, V, M, YX, VX, MX)), `V` = with(var.names, c(V, VX))) v.sds <- v.sds[names(v.sds) != "(Intercept)"] v.sds.index <- is.na(v.sds) | v.sds < opts$SD_Tol | v.sds > 50 if (any(v.sds.index)) { stop(sprintf("The follow variables SDs are either too small or too large.\n Remove or rescale variables before modelling.\n Variables: %s", paste(names(v.sds)[v.sds.index], collapse = ", "))) } class.tests <- c(V = is.numeric(vars$V) & is.vector(vars$V), VX = (is.matrix(mm$V) & is.numeric(mm$V)), switch(design, `V -> Y` = c( Y = is.numeric(vars$Y) & is.vector(vars$Y), YX = is.matrix(mm$Y) & is.numeric(mm$Y), M = TRUE, MX = TRUE), `V -> M -> Y` = c( Y = is.numeric(vars$Y) & is.vector(vars$Y), YX = is.matrix(mm$Y) & is.numeric(mm$Y), M = is.numeric(vars$M) & is.vector(vars$M), MX = is.matrix(mm$M) & is.numeric(mm$M)), `V` = c(Y = TRUE, YX = TRUE, M = TRUE, MX = TRUE))) if (!all(class.tests)) { stop(c("V must be a numeric vector ", "VX must be a numeric matrix ", "Y must be a numeric vector ", "YX must be a numeric matrix ", "M must be a numeric vector ", "MX must be a numeric matrix ")[!class.tests]) } n <- length(vars$V) k <- length(unique(vars$VID)) dimension.tests <- c(V = all(identical(nrow(mm$V), n), identical(length(vars$VID), n)), switch(design, `V -> Y` = c( Y = all(identical(nrow(mm$Y), k), identical(length(vars$Y), k)), M = TRUE), `V -> M -> Y` = c( Y = all(identical(nrow(mm$Y), k), identical(length(vars$Y), k)), M = all(identical(nrow(mm$M), k), identical(length(vars$M), k))), `V` = c(Y = TRUE, M = TRUE))) if (!all(dimension.tests)) { stop(c("The length and rows of VX, V, and VID must all be equal ", "The length and rows of Y, YX, and the unique IDs must all be equal ", "The length and rows of M, MX, and the unique IDs must all be equal ")[!dimension.tests]) } p <- c(list(VX = ncol(mm$V)), switch(design, `V -> Y` = list(YX = ncol(mm$Y), YX2 = 1L + useU), `V -> M -> Y` = list(YX = ncol(mm$Y), YX2 = 1L + useU, MX = ncol(mm$M), MX2 = 1L + useU), `V` = list())) stan.data <- c(list(V = vars$V, VX = mm$V, VID = vars$VID, pVX = p$VX, n = n, k = k), switch(design, `V -> Y` = list(Y = vars$Y, YX = mm$Y, pYX = p$YX, pYX2 = p$YX2), `V -> M -> Y` = list(Y = vars$Y, YX = mm$Y, pYX = p$YX, pYX2 = p$YX2, M = vars$M, MX = mm$M, pMX = p$MX, pMX2 = p$MX2), `V` = list())) if (is.null(inits)) { inits <- tryCatch(list(stan_inits(stan.data, design, useU)), error = function(e) return(e)) if (inherits(inits, "error")) return(list(Inits = inits, stan.data = stan.data)) } if (!missing(modelfit)) { model <- modelfit } else { model <- vm_stan(design, useU=useU) } if (is.null(opts$pars)) { pars <- c("VB", "U", "sigma_U", "shape", "rate", "Sigma_V", switch(design, `V -> Y` = c("YB", "Yalpha", "sigma_Y"), `V -> M -> Y` = c("YB", "Yalpha", "sigma_Y", "MB", "Malpha", "sigma_M"), `V` = c())) } else { pars <- opts$pars } # inits is just one list because even when multiple chains # parallel_stan runs one chain per worker/core res <- parallel_stan(modelfit = model, standata = stan.data, totaliter = totaliter, warmup = warmup, chains = chains, pars = pars, init = inits, ...) out <- list( results = res$results, model = model, variable.names = var.names, data = c(stan.data, list(IDkey = key)), seeds = res$seeds, .call = storedCall, inits = list(inits), design = design ) class(out) <- c("vm", "list") return(out) }
/scratch/gouwar.j/cran-all/cranData/varian/R/modeler.R
#' @title The Variability Analysis in R Package #' @docType package #' @name varian #' @import rstan #' @import parallel #' @import ggplot2 #' @import gridExtra #' @import stats NULL
/scratch/gouwar.j/cran-all/cranData/varian/R/varian.R
# #' @export compile_jars<- function(){ spark_2.2 <- spark_compilation_spec( spark_version = "2.2.0", scalac_path = sparklyr::find_scalac("2.11"), jar_name = sprintf("variantspark-2.2-2.11.jar"), jar_path = NULL, scala_filter = NULL ) spark_2.4 <- spark_compilation_spec( spark_version = "2.4.0", scalac_path = sparklyr::find_scalac("2.11"), jar_name = sprintf("variantspark-2.4-2.11.jar"), jar_path = NULL, scala_filter = NULL ) sparklyr::compile_package_jars(spec = spark_2.2) sparklyr::compile_package_jars(spec = spark_2.4) sparklyr::compile_package_jars() }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/compile_jars.R
spark_dependencies <- function(spark_version, scala_version, ...) { sparklyr::spark_dependency( packages = c("au.csiro.aehrc.variant-spark:variant-spark_2.11:0.2.0-a1"), repositories = c( "https://mvnrepository.com/artifact/", "http://central.maven.org/maven2/" ) ) } #' @import sparklyr .onLoad <- function(libname, pkgname) { sparklyr::register_extension(pkgname) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/dependencies.R
#' Extract the importance data frame #' #' This function extracts the importance data frame from the Importance Analysis #' jobj. #' #' @param importance A jobj from the class \code{ImportanceAnalysis}, usually the #' output of \code{vs_importance_analysis()}. #' @param name The name to assign to the copied table in Spark. #' #' @examples #' \dontrun{ #' library(sparklyr) #' sc <- spark_connect(master = "local") #' vsc <- vs_connect(sc) #' #' hipster_vcf <- vs_read_vcf(vsc, #' system.file("extdata/hipster.vcf.bz2", #' package = "variantspark")) #' labels <- vs_read_labels(vsc, #' system.file("extdata/hipster_labels.txt", #' package = "variantspark")) #' #' importance <- vs_importance_analysis(vsc, hipster_vcf, labels, 10) #' importance_tbl(importance) #' } #' #' @export importance_tbl <- function(importance, name = "importance_tbl"){ importance %>% sparklyr::invoke("variableImportance") %>% sparklyr::sdf_register() }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/importance_tbl.R
#' Display sample names #' #' This function display the first N variant names. #' #' @param vcf_source An object with \code{VCFFeatureSource} class, usually the #' output of the \code{vs_read_vcf()}. #' @param n_samples The number os samples to display. #' #' @return spark_jobj, shell_jobj #' #' @examples #' \dontrun{ #' library(sparklyr) #' #' sc <- spark_connect(master = "local") #' vsc <- vs_connect(sc) #' #' hipster_vcf <- vs_read_vcf(vsc, #' system.file("extdata/hipster.vcf.bz2", #' package = "variantspark")) #' #' sample_names(hipster_vcf, 3) #' } #' #' @export sample_names <- function(vcf_source, n_samples = NULL){ names <- sparklyr::invoke(vcf_source, "sampleNames") if (!is.null(n_samples)){ names <- sparklyr::invoke(names, "take", as.integer(n_samples)) } names }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/sample_names.R
#' Creating a variantspark connection #' #' You need to create a variantspark connection to use this extension. To do this, #' you pass as argument a spark connection that you can create #' using \code{sparklyr::spark_connect()}. #' #' @param sc A spark connection. #' #' @return A variantspark connection #' #' @examples #' library(sparklyr) #' sc <- spark_connect(master = "spark://HOST:PORT") #' connection_is_open(sc) #' vsc <- vs_connect(sc) #' spark_disconnect(sc) #' #' @export vs_connect <- function(sc){ sparklyr::invoke_new(sc, "au.csiro.variantspark.api.VSContext", spark_session(sc)) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/vs_connect.R
#' Importance Analysis #' #' This function performs an Importance Analysis using random forest algorithm. #' For more details, please look at #' \href{https://variantspark.readthedocs.io/en/latest/overview.html#importance-analysis}{here}. #' #' @param vsc A variantspark connection. #' @param vcf_source An object with \code{VCFFeatureSource} class, usually the #' output of the \code{vs_read_vcf()}. #' @param labels An object with \code{CsvLabelSource} class, usually the output #' of the \code{vs_read_labels()}. #' @param n_trees The number of trees using in the random forest. #' #' @return spark_jobj, shell_jobj #' #' @examples #' \dontrun{ #' library(sparklyr) #' sc <- spark_connect(master = "local") #' vsc <- vs_connect(sc) #' #' hipster_vcf <- vs_read_vcf(vsc, #' system.file("extdata/hipster.vcf.bz2", #' package = "variantspark")) #' #' labels <- vs_read_labels(vsc, #' system.file("extdata/hipster_labels.txt", #' package = "variantspark")) #' #' vs_importance_analysis(vsc, hipster_vcf, labels, 10) #' } #' #' @export vs_importance_analysis <- function(vsc, vcf_source, labels, n_trees){ rf_param <- invoke_static(vsc$connection, "au.csiro.variantspark.api.ImportanceAnalysis", "defaultRFParams") sparklyr::invoke_new(vsc$connection, "au.csiro.variantspark.api.ImportanceAnalysis", invoke(vsc, "sqlContext"), vcf_source, labels, rf_param, as.integer(n_trees), 100L, 3L) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/vs_importance_analysis.R
#' Reading a CSV file #' #' The \code{vs_read_csv()} reads a CSV file format and returns a \code{jobj} #' object from \code{CsvFeatureSource} scala class. #' #' @param vsc A variantspark connection. #' @param path The file's path. #' #' @return spark_jobj, shell_jobj #' #' @examples #' \dontrun{ #' library(sparklyr) #' #' sc <- spark_connect(master = "local") #' vsc <- vs_context(sc) #' #' hipster_labels <- vs_read_csv(vsc, #' system.file("extdata/hipster_labels.txt", #' package = "variantspark")) #' #' hipster_labels #' } #' #' @export vs_read_csv <- function(vsc, path){ default_csv_format <- sparklyr::invoke_new(vsc$connection, "au.csiro.variantspark.input.DefaultCSVFormatSpec") sparklyr::invoke(vsc, "importCSV", path, default_csv_format) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/vs_read_csv.R
#' Reading labels #' #' This function reads only the label column of a CSV file and returns a \code{jobj} #' object from \code{CsvLabelSource} scala class. #' #' @param vsc A variantspark connection. #' @param path The file's path. #' @param label A string with the label column name. #' #' @return spark_jobj, shell_jobj #' #' @examples #' \dontrun{ #' library(sparklyr) #' #' sc <- spark_connect(master = "local") #' vsc <- vs_context(sc) #' #' labels <- vs_read_labels(vsc, #' system.file("extdata/hipster_labels.txt", #' package = "variantspark")) #' #' labels #' } #' @export vs_read_labels <- function(vsc, path, label = "label"){ sparklyr::invoke(vsc, "loadLabel", path, label) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/vs_read_labels.R
#' Reading a VCF file #' #' The Variant Call Format (VCF) specifies the format of a text file used in #' bioinformatics for storing gene sequence variations. The format has been developed #' with the advent of large-scale genotyping and DNA sequencing projects, such as #' the 1000 Genomes Project. The \code{vs_read_vcf()} reads this format and returns #' a \code{jobj} object from \code{VCFFeatureSource} scala class. #' #' @param vsc A variantspark connection. #' @param path The file's path. #' #' @return spark_jobj, shell_jobj #' #' @examples #' \dontrun{ #' library(sparklyr) #' #' sc <- spark_connect(master = "local") #' vsc <- vs_context(sc) #' #' hipster_vcf <- vs_read_vcf(vsc, #' system.file("extdata/hipster.vcf.bz2", #' package = "variantspark")) #' #' hipster_vcf #' } #' #' @export vs_read_vcf <- function(vsc, path){ sparklyr::invoke(vsc, "importVCF", path, 0L) }
/scratch/gouwar.j/cran-all/cranData/variantspark/R/vs_read_vcf.R
#' Artificial Q-matrix for 30 items 3 attributes #' #' this matrix represents an artificial Q-matrix for 30 items and 3 attributes #' #' @source {artificially simulated} "sim_Q_J30K3" #' Artificial Q-matrix for 80 items 5 attributes #' #' Artificial Q-matrix for a 80-item test measuring 5 attributes #' @source {artificially simulated} "sim_Q_J80K5" #' Artificial Q-matrix for MC-DINA model #' #' Artificial Q-matrix for a 30-item test measuring 5 attributes. #' #' @format A matrix with components #' \describe{ #' \item{column 1}{Item number} #' \item{column 2}{Stem} #' \item{column 3 to end}{attributes} #' } #' #' @references Yamaguchi, K. (2020). Variational Bayesian inference for the #' multiple-choice DINA model. \emph{Behaviormetrika}, 47(1), 159-187. #' \doi{10.1007/s41237-020-00104-w} "mc_sim_Q"
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/data.R
#' @title Artificial data generating function for the DINA model based on the given Q-matrix #' #' @description \code{dina_data_gen()} returns the artificially generated item response data for the DINA model #' #' @param Q the \eqn{J \times K} binary matrix #' @param I the number of assumed respondents #' @param attr_cor the true value of the correlation among attributes (default: 0.1) #' @param s the true value of the slip parameter (default: 0.2) #' @param g the true value of the guessing parameter (default: 0.2) #' @param seed the seed value used for random number generation (default: 17) #' @return A list including: #' \describe{ #' \item{X}{the generated artificial item response data} #' \item{att_pat}{the generated true vale of the attribute mastery pattern} #' } #' @references Oka, M., & Okada, K. (2023). Scalable Bayesian Approach for the Dina #' Q-Matrix Estimation Combining Stochastic Optimization and Variational Inference. #' \emph{Psychometrika}, 88, 302–331. \doi{10.1007/s11336-022-09884-4} #' @examples #' # load Q-matrix #' Q = sim_Q_J80K5 #' sim_data = dina_data_gen(Q=Q,I=200) #' @export dina_data_gen = function(Q,I,attr_cor=0.1,s=0.2,g=0.2,seed=17){ set.seed(seed) J = nrow(Q) K = ncol(Q) oneminus_s = 1 - s sigma = (1-attr_cor)*diag(K) + attr_cor*matrix(1,K,K) ch = chol(sigma) u = matrix(stats::rnorm(I*K), I, K) uc = u %*% ch cr = stats::pnorm(uc) alpha = matrix(0,I,K) for (k in 1:K) { alpha[,k] = as.integer(cr[,k] >= k/(K+1)) } tm = alpha %*% t(Q) natt = rowSums(Q) eta_ij = matrix(0,I,J) for (i in 1:I) { eta_ij[i,] = ifelse(tm[i,] == natt, 1, 0) } y = ifelse(eta_ij == 1, oneminus_s, g) comp = matrix(stats::rnorm(I*J),I,J) y = ifelse(y >= comp, 1, 0) list(X=y,att_pat=alpha) } # # DINA VB # dina = function( X, Q, max_it = 500, epsilon = 1e-04, verbose = TRUE, # Hyperparameters delta_0 = NULL, # For π alpha_s = NULL, # For s_j beta_s = NULL, # For s_j alpha_g = NULL, # For g_j beta_g = NULL # For g_j ){ if(!inherits(X, "matrix")){ X <- as.matrix(X) } if(!inherits(Q, "matrix")){ Q <- as.matrix(Q) } if(!all(X %in% c(0,1))) stop("item response data should only contain 0/1. \n") if(!all(Q %in% c(0,1))) stop("Q-matrix should only contain 0/1. \n") # Index I <- nrow(X) J <- ncol(X) K <- ncol(Q) L <- 2^K # All attribute pattern matrix A <- as.matrix(expand.grid(lapply(1:K, function(x)rep(0:1)))) eta_lj <- A %*% t(Q) QQ <- diag(Q %*% t(Q)) # hyperparameter if(is.null(delta_0)){ delta_0 = rep(1, L) # For π } if(is.null(alpha_s)){ alpha_s = 1 # For s_j } if(is.null(beta_s)){ beta_s = 1 # For s_j } if(is.null(alpha_g)){ alpha_g = 1 # For g_j } if(is.null(beta_g)){ beta_g = 1 # For g_j } # # Convert # for(j in 1:J)eta_lj[,j] <- 1*(eta_lj[,j] == QQ[j]) # # Initialization # # r_il <- matrix(runif(I*L) ,ncol=L, nrow = I) # r_il <- diag(1/rowSums(r_il)) %*% r_il r_il <- matrix(1/L ,ncol=L, nrow = I) # r_il <- r_il + matrix(runif(I*L,-1/L,1/L) ,ncol=L, nrow = I) # r_il <- 1/rowSums(r_il) * r_il # rowSums(r_il) log_rho_il <- matrix(0,ncol=L, nrow = I) # z_il <- r_il delta_ast <- rep(0,L) alpha_s_ast <- rep(0,J) beta_s_ast <- rep(0,J) alpha_g_ast <- rep(0,J) beta_g_ast <- rep(0,J) one_vec = matrix(1,nrow=I,ncol=1) m = 1 # l_lb = rep(0, max_it+1) l_lb[1] = -Inf for(m in 1:max_it){ # # M-step # delta_ast <- colSums(r_il) + delta_0 alpha_s_ast <- colSums((r_il %*% eta_lj) * (1-X)) + alpha_s beta_s_ast <- colSums((r_il %*% eta_lj) * X) + beta_s alpha_g_ast <- colSums((1- r_il %*% eta_lj) * X) + alpha_g beta_g_ast <- colSums((1- r_il %*% eta_lj) * (1-X)) + beta_g # # Calculate Expectations # E_log_s = digamma(alpha_s_ast) - digamma(alpha_s_ast + beta_s_ast) E_log_1_s = digamma(beta_s_ast) - digamma(alpha_s_ast + beta_s_ast) E_log_g = digamma(alpha_g_ast) - digamma(alpha_g_ast + beta_g_ast) E_log_1_g = digamma(beta_g_ast) - digamma(alpha_g_ast + beta_g_ast) E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) # # E-step # # Fast log_rho_il <- t(eta_lj %*% t(t((E_log_1_s - E_log_g) * t(X)) + t((E_log_s - E_log_1_g) * t(1-X)) ) ) + one_vec %*% E_log_pi # Slow # log_rho_il <- t(eta_lj %*% t(X %*% diag(E_log_1_s - E_log_g) + (1-X)%*% diag(E_log_s - E_log_1_g)) ) + one_vec %*% E_log_pi temp <- exp(log_rho_il) # Fast r_il <- 1/rowSums(temp) * temp # Slow # r_il = diag(1/rowSums(temp )) %*% temp # # Evidense of Lower Bound # #l_lb[m+1] = sum(lgamma(delta_ast) -lgamma(delta_0)) + lgamma(sum(delta_0)) - lgamma(sum(delta_ast)) + sum(lbeta(alpha_s_ast,beta_s_ast)+ lbeta(alpha_g_ast,beta_g_ast)) -length(alpha_s_ast)*(lbeta(alpha_s,beta_s) + lbeta(alpha_g,beta_g)) - sum(r_il*log(r_il)) r_eta <- r_il %*% eta_lj one_m_r_eta <- 1 - r_eta tmp1 <- sum(X * t(t(r_eta)*E_log_1_s +t(one_m_r_eta)*E_log_g) + (1-X)*t(t(r_eta)*E_log_s +t(one_m_r_eta)*E_log_1_g) ) tmp2 <- sum(r_il*(one_vec%*%E_log_pi - log(r_il))) tmp3 <- sum(lgamma(delta_ast) -lgamma(delta_0)) + lgamma(sum(delta_0)) - lgamma(sum(delta_ast)) +sum(E_log_pi) tmp4 <- sum(lbeta(alpha_s_ast,beta_s_ast)-lbeta(alpha_s,beta_s) + (alpha_s - alpha_s_ast)*E_log_s + (beta_s - beta_s_ast)*E_log_1_s ) tmp5 <- sum(lbeta(alpha_g_ast,beta_g_ast)-lbeta(alpha_g,beta_g) + (alpha_g - alpha_g_ast)*E_log_g + (beta_g - beta_g_ast)*E_log_1_g ) l_lb[m+1] = tmp1 + tmp2 +tmp3+tmp4+tmp5 if(verbose){ cat("\riteration = ", m+1, sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] - l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] # plot(l_lb,type="l") s_est <- alpha_s_ast /(alpha_s_ast + beta_s_ast ) g_est <- alpha_g_ast /(alpha_g_ast + beta_g_ast ) s_sd <- sqrt(alpha_s_ast*beta_s_ast /(((alpha_s_ast + beta_s_ast )^2)*(alpha_s_ast + beta_s_ast +1))) g_sd <- sqrt(alpha_g_ast*beta_g_ast /(((alpha_g_ast + beta_g_ast )^2)*(alpha_g_ast + beta_g_ast +1))) # # variance of estimates # delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum delta_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1))) model_params = list( s_est = s_est, g_est = g_est, s_sd = s_sd, g_sd = g_sd ) res = list(model_params = model_params, #r_il = r_il, alpha_s_ast = alpha_s_ast, beta_s_ast = beta_s_ast, alpha_g_ast = alpha_g_ast, beta_g_ast = beta_g_ast, pi_est = pi_est, delta_ast = delta_ast, delta_sd = delta_sd, l_lb = l_lb[l_lb != 0], att_pat_est = A[apply(r_il, 1, which.max),], eta_lj = eta_lj, m = m) return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/dina.R
# # DINO VB # dino = function( X, Q, max_it = 500, epsilon = 1e-04, verbose = TRUE, # Hyperparameters delta_0 = NULL, # For π alpha_s = NULL, # For s_j beta_s = NULL, # For s_j alpha_g = NULL, # For g_j beta_g = NULL # For g_j ){ if(!inherits(X, "matrix")){ X <- as.matrix(X) } if(!inherits(Q, "matrix")){ Q <- as.matrix(Q) } if(!all(X %in% c(0,1))) stop("item response data should only contain 0/1. \n") if(!all(Q %in% c(0,1))) stop("Q-matrix should only contain 0/1. \n") # Index I <- nrow(X) J <- ncol(X) K <- ncol(Q) L <- 2^K # # All attribute pattern matrix # A <- as.matrix(expand.grid(lapply(1:K, function(x)rep(0:1)))) eta_lj <- (1-A) %*% t(Q) QQ <- diag(Q %*% t(Q)) # # hyperparameter # if(is.null(delta_0)){ delta_0 = rep(1, L) # For π } if(is.null(alpha_s)){ alpha_s = 1 # For s_j } if(is.null(beta_s)){ beta_s = 1 # For s_j } if(is.null(alpha_g)){ alpha_g = 1 # For g_j } if(is.null(beta_g)){ beta_g = 1 # For g_j } # # Convert # for(j in 1:J)eta_lj[,j] <- 1*(eta_lj[,j] == QQ[j]) eta_lj <- 1 - eta_lj # # Initialization # # r_il <- matrix(runif(I*L) ,ncol=L, nrow = I) # r_il <- diag(1/rowSums(r_il)) %*% r_il r_il <- matrix(1/L ,ncol=L, nrow = I) # r_il <- r_il + matrix(runif(I*L,-1/L,1/L) ,ncol=L, nrow = I) # r_il <- 1/rowSums(r_il) * r_il # rowSums(r_il) log_rho_il <- matrix(0,ncol=L, nrow = I) # z_il <- r_il delta_ast <- rep(0,L) alpha_s_ast <- rep(0,J) beta_s_ast <- rep(0,J) alpha_g_ast <- rep(0,J) beta_g_ast <- rep(0,J) # one_vec = matrix(1,nrow=I,ncol=1) m = 1 # l_lb = rep(0, max_it+1) l_lb[1] = -Inf for(m in 1:max_it){ # # M-step # delta_ast <- colSums(r_il) + delta_0 alpha_s_ast <- colSums((r_il %*% eta_lj) * (1-X)) + alpha_s beta_s_ast <- colSums((r_il %*% eta_lj) * X) + beta_s alpha_g_ast <- colSums((1- r_il %*% eta_lj) * X) + alpha_g beta_g_ast <- colSums((1- r_il %*% eta_lj) * (1-X)) + beta_g # # Calculate Expectations # E_log_s = digamma(alpha_s_ast) - digamma(alpha_s_ast + beta_s_ast) E_log_1_s = digamma(beta_s_ast) - digamma(alpha_s_ast + beta_s_ast) E_log_g = digamma(alpha_g_ast) - digamma(alpha_g_ast + beta_g_ast) E_log_1_g = digamma(beta_g_ast) - digamma(alpha_g_ast + beta_g_ast) E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) # # E-step # # # # Fast # log_rho_il <- t(eta_lj %*% t(t((E_log_1_s - E_log_g) * t(X)) + t((E_log_s - E_log_1_g) * t(1-X)) ) ) + one_vec %*% E_log_pi # Slow # log_rho_il <- t(eta_lj %*% t(X %*% diag(E_log_1_s - E_log_g) + (1-X)%*% diag(E_log_s - E_log_1_g)) ) + one_vec %*% E_log_pi temp <- exp(log_rho_il) # Fast r_il <- 1/rowSums(temp) * temp # Slow # r_il = diag(1/rowSums(temp )) %*% temp # # Evidense of Lower Bound # #l_lb[m+1] = sum(lgamma(delta_ast) -lgamma(delta_0)) + lgamma(sum(delta_0)) - lgamma(sum(delta_ast)) + sum(lbeta(alpha_s_ast,beta_s_ast)+ lbeta(alpha_g_ast,beta_g_ast)) -length(alpha_s_ast)*(lbeta(alpha_s,beta_s) + lbeta(alpha_g,beta_g)) - sum(r_il*log(r_il)) r_eta <- r_il %*% eta_lj one_m_r_eta <- 1 - r_eta tmp1 <- sum(X * t(t(r_eta)*E_log_1_s +t(one_m_r_eta)*E_log_g) + (1-X)*t(t(r_eta)*E_log_s +t(one_m_r_eta)*E_log_1_g) ) tmp2 <- sum(r_il*(one_vec%*%E_log_pi - log(r_il))) tmp3 <- sum(lgamma(delta_ast) -lgamma(delta_0)) + lgamma(sum(delta_0)) - lgamma(sum(delta_ast)) +sum(E_log_pi) tmp4 <- sum(lbeta(alpha_s_ast,beta_s_ast)-lbeta(alpha_s,beta_s) + (alpha_s - alpha_s_ast)*E_log_s + (beta_s - beta_s_ast)*E_log_1_s ) tmp5 <- sum(lbeta(alpha_g_ast,beta_g_ast)-lbeta(alpha_g,beta_g) + (alpha_g - alpha_g_ast)*E_log_g + (beta_g - beta_g_ast)*E_log_1_g ) l_lb[m+1] = tmp1 + tmp2 +tmp3+tmp4+tmp5 if(verbose){ cat("\riteration = ", m+1, sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] -l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] # plot(l_lb,type="l") s_est <- alpha_s_ast /(alpha_s_ast + beta_s_ast ) g_est <- alpha_g_ast /(alpha_g_ast + beta_g_ast ) s_sd <- sqrt(alpha_s_ast*beta_s_ast /(((alpha_s_ast + beta_s_ast )^2)*(alpha_s_ast + beta_s_ast +1))) g_sd <- sqrt(alpha_g_ast*beta_g_ast /(((alpha_g_ast + beta_g_ast )^2)*(alpha_g_ast + beta_g_ast +1))) # # variance of estimates # delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum delta_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1)) ) model_params = list( s_est = s_est, g_est = g_est, s_sd = s_sd, g_sd = g_sd ) res = list(model_params = model_params, #r_il = r_il, alpha_s_ast = alpha_s_ast, beta_s_ast = beta_s_ast, alpha_g_ast = alpha_g_ast, beta_g_ast = beta_g_ast, pi_est = pi_est, delta_ast = delta_ast, delta_sd = delta_sd, l_lb = l_lb[l_lb != 0], att_pat_est = A[apply(r_il, 1, which.max),], eta_lj = eta_lj, m = m) return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/dino.R
#' @title Artificial data generating function for the hidden-Markov DCM based on the given Q-matrix #' #' @description \code{hm_dcm_data_gen()} returns the artificially generated item response data for the HM-DCM #' #' @param Q the \eqn{J \times K} binary matrix #' @param I the number of assumed respondents #' @param min_theta the minimum value of the item parameter \eqn{\theta_{jht}} #' @param max_theta the maximum value of the item parameter \eqn{\theta_{jht}} #' @param att_cor the true value of the correlation among attributes (default: 0.1) #' @param seed the seed value used for random number generation (default: 17) #' @return A list including: #' \describe{ #' \item{X}{the generated artificial item response data} #' \item{alpha_true}{the generated true vale of the attribute mastery pattern, matrix form} #' \item{alpha_patt_true}{the generated true vale of the attribute mastery pattern, string form} #' } #' @references Yamaguchi, K., & Martinez, A. J. (2024). Variational Bayes #' inference for hidden Markov diagnostic classification models. \emph{British Journal #' of Mathematical and Statistical Psychology}, 77(1), 55–79. \doi{10.1111/bmsp.12308} #' #' @examples #' indT = 3 #' Q = sim_Q_J30K3 #' hm_sim_Q = lapply(1:indT,function(time_point) Q) #' hm_sim_data = hm_dcm_data_gen(Q=hm_sim_Q,I=200) #' #' @export hm_dcm_data_gen <- function(I = 500, Q, min_theta = 0.2, max_theta = 0.8, att_cor = 0.1, seed = 17 ){ indI = I indK <- ncol(Q[[1]]) indT <- length(Q) indJt <- sapply(Q,nrow) indL <- 2^indK # cut_offs <- stats::qnorm(c(1:indK)/(indK+1)) cut_offs <- stats::qnorm((1:indK+0.5)/(indK+1)) item_par = "random" mean_norm <- rep(0,indK) vcov_norm <- matrix(att_cor,indK,indK) diag(vcov_norm) <- 1 alpha_cont_t_1 <- mvtnorm::rmvnorm(n = indI, mean = mean_norm,sigma = vcov_norm) alpha_true_t_1 <- t((t(alpha_cont_t_1) > cut_offs)*1) # # All attribute pattern matrix # K_jt <- sapply(Q,rowSums) H_jt <- 2^K_jt not_zero_q_t <- lapply(Q,function(y)apply(y, 1, function(x) which(x != 0))) A <- as.matrix(expand.grid(lapply(1:indK, function(x)rep(0:1)))) A_jt <- lapply(not_zero_q_t, function(y)lapply(y, function(x) A[,x,drop=F])) A_red <- lapply( A_jt, function(z)lapply(z , function(x) apply(x,1,function(y) paste0(y,collapse = "")))) # # Unique correct item response probability label for each time point and item. # A_red_uni <- lapply( A_jt,function(z)lapply(z , function(x) unique(apply(x,1,function(y) paste0(y,collapse = ""))))) # # Make G-matrix # G_jt <- lapply(1:indT,function(time_t)lapply(1:indJt[[time_t]], function(j) outer(A_red_uni[[time_t]][[j]], A_red[[time_t]][[j]], function(x,y) (x == y)*1 ))) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(time_t in 1:indT){ for(j in 1:indJt[time_t]) { colnames(G_jt[[time_t]][[j]]) <- att_pat row.names(G_jt[[time_t]][[j]]) <- A_red_uni[[time_t]][[j]] } } # # Transition matrix: Tau # Tau_true <- matrix(0, indL, indL) colnames(Tau_true) <- att_pat row.names(Tau_true) <- att_pat for(l in 1:indL){ temp <- rep(0, indL) for(ld in 1:indL){ temp <- A[ld,] - A[l,] temp[temp == 1] <- 3/10 temp[temp == -1] <- 1/10 temp[temp == 0] <- 6/10 Tau_true[l, ld] <- prod(temp) } } Tau_true <- Tau_true / rowSums(Tau_true) # Tau_true # # Alpha true, z_i # z_i_true <- vector("list",indT) alpha_patt_true <- vector("list",indT) alpha_true <- vector("list",indT) alpha_true[[1]] <- alpha_true_t_1 alpha_patt_true[[1]] <- apply(alpha_true[[1]], 1, function(x) paste0(x,collapse = "")) z_i_true[[1]] <- outer(alpha_patt_true[[1]], att_pat, function(x,y) (x == y)*1) colnames(z_i_true[[1]]) <- att_pat for(time_t in 1:(indT-1)){ tansition_prob <- z_i_true[[time_t]] %*% Tau_true z_i_true[[time_t+1]] <- t(apply(tansition_prob, 1, function(p)stats::rmultinom(1,1,p) )) alpha_true[[time_t+1]] <- z_i_true[[time_t+1]] %*% A alpha_patt_true[[time_t+1]] <- apply(alpha_true[[time_t+1]], 1, function(x) paste0(x,collapse = "")) } n_of_att <- lapply(A_red_uni, function(y)lapply(y,function(x) sapply(strsplit(x, ""), function(y)sum(y == "1") ))) # # π parameter # cut_off_k_dim_list <- lapply(1:indL, function(l){ cut_off_k_dim <- matrix(NA, ncol=2, nrow = indK) cut_off_k_dim[A[l,] == 1,1] <- cut_offs[A[l,] == 1] cut_off_k_dim[A[l,] == 1,2] <- Inf cut_off_k_dim[A[l,] == 0,1] <- -Inf cut_off_k_dim[A[l,] == 0,2] <- cut_offs[A[l,] == 0] cut_off_k_dim }) names(cut_off_k_dim_list) <- att_pat pi_true <- sapply(cut_off_k_dim_list, function(x)mvtnorm::pmvnorm(lower = x[,1],upper = x[,2],mean = mean_norm,sigma = vcov_norm)) pi_true <- pi_true/sum(pi_true) # pi_true # # True theta_jh. # theta_jht_true <- lapply(indJt, function(Jt)vector("list",Jt)) for(time_t in 1:indT){ for(j in 1:indJt[time_t]){ if(item_par == "random"){ true_par_temp <- stats::runif(min = min_theta, max = max_theta, max(n_of_att[[time_t]][[j]])+1) true_par_temp <- true_par_temp[order(true_par_temp)] theta_jht_true[[time_t]][[j]] <- n_of_att[[time_t]][[j]] }else{ true_par_temp <- seq(from = min_theta, to = max_theta, length.out = max(n_of_att[[time_t]][[j]])+1) } for(k in min(n_of_att[[time_t]][[j]]):max( n_of_att[[time_t]][[j]])){ theta_jht_true[[time_t]][[j]][n_of_att[[time_t]][[j]] == k] <- true_par_temp[k+1] } names(theta_jht_true[[time_t]][[j]]) <- A_red_uni[[time_t]][[j]] } } # theta_jht_true # # Generate item response matrix # X <- rand_mat <- item_res_prob <- vector("list", indT) for(time_t in 1:indT){ item_res_prob[[time_t]] <- sapply(1:indJt[[time_t]], function(j){z_i_ast <- z_i_true[[time_t]] %*% t(G_jt[[time_t]][[j]]) z_i_ast %*% theta_jht_true[[time_t]][[j]] }) rand_mat[[time_t]] <- matrix(stats::runif(indI*indJt[[time_t]]),ncol=indJt[[time_t]],nrow=indI) X[[time_t]] <- (item_res_prob[[time_t]] > rand_mat[[time_t]])*1 } list(X = X, alpha_patt_true = alpha_patt_true, alpha_true = alpha_true) } hmdcm_vb = function( X, Q, measurement_model = "general", A_0 = NULL, B_0 = NULL, delta_0 = NULL, omega_0 = NULL, max_it = 500, epsilon = 1e-04, random_start = FALSE, verbose = TRUE, random_block_design=FALSE, Test_versions=NULL, Test_order=NULL ){ if((random_block_design)){ stop("sorry, current version does not soppot the case where random_block_design is true when nondecreesing_atribute is false.\n") } indI <- sapply(X, nrow)[1] # Assume all individuals take all tests indK <- ncol(Q[[1]]) indT <- length(Q) indJt <- sapply(Q,nrow) indL <- 2^indK # # All attribute pattern matrix # not_zero_q_t <- lapply(Q,function(y)apply(y, 1, function(x) which(x != 0))) A <- as.matrix(expand.grid(lapply(1:indK, function(x)rep(0:1)))) A_jt <- lapply(not_zero_q_t, function(y)lapply(y, function(x) A[,x,drop=F])) A_red <- lapply( A_jt, function(z)lapply(z , function(x) apply(x,1,function(y) paste0(y,collapse = "")))) # # Unique correct item response probability label for each time point and item. # A_red_uni <- lapply( A_jt,function(z)lapply(z , function(x) unique(apply(x,1,function(y) paste0(y,collapse = ""))))) # # Make G-matrix # if(measurement_model == "general"){ G_jt <- lapply(1:indT,function(time_t)lapply(1:indJt[[time_t]], function(j) outer(A_red_uni[[time_t]][[j]], A_red[[time_t]][[j]], function(x,y) (x == y)*1 ))) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(time_t in 1:indT){ for(j in 1:indJt[time_t]) { colnames(G_jt[[time_t]][[j]]) <- att_pat row.names(G_jt[[time_t]][[j]]) <- A_red_uni[[time_t]][[j]] } } }else if(measurement_model == "dina"){ G_jt <- lapply(1:indT,function(time_t)lapply(1:indJt[time_t], function(j)matrix(0,ncol=indL,nrow=2))) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(time_t in 1:indT){ for(j in 1:indJt[time_t]) { temp_eta <- apply(t(t(A) ^ Q[[time_t]][j,]),1, prod) G_jt[[time_t]][[j]][1,] <- 1 - temp_eta G_jt[[time_t]][[j]][2,] <- temp_eta colnames(G_jt[[time_t]][[j]]) <- att_pat row.names(G_jt[[time_t]][[j]]) <- c("0","1") } } } else { stop("Error: Specify model general or dina.\n") } # # Hyper parameter # if(is.null(delta_0) ){ delta_0 = rep(1,indL)# For π } if(is.null(omega_0) ){ omega_0 = matrix(1,indL,indL)# For Tau matrix } # # Weekly Monotonicity constraint # number_of_attributes <- lapply(A_red_uni,function(y)lapply(y, function(x) sapply(strsplit(x, ""), function(y)sum(as.numeric(y))) ) ) if(measurement_model == "dina") {number_of_attributes <- lapply(1:indT,function(time_t){lapply(1:indJt[time_t],function(j)c(0,1))})} if(is.null(A_0)){ A_0_hyperparam <- lapply(number_of_attributes, function(x)seq(from = 1+epsilon, to = 2, length.out = max(unlist( x))+1) ) A_0 <- vector("list", length = indT) for(time_t in 1:indT){ A_0[[time_t]] <-lapply( number_of_attributes[[time_t]] , function(x){A_0_hyperparam[[time_t]][x + 1] }) } } if(is.null(B_0)){ B_0_hyperparam <- lapply(number_of_attributes, function(x)seq(from = 2, to = 1+epsilon, length.out = max(unlist( x))+1) ) B_0 <- vector("list", length = indT) for(time_t in 1:indT){ B_0[[time_t]] <-lapply( number_of_attributes[[time_t]] , function(x){B_0_hyperparam[[time_t]][x + 1] }) } } # # Initialization # if(random_start == TRUE){ E_z_itl_temp <- lapply(1:indT, function(time_t)matrix(stats::runif(indI*indL) ,ncol=indL, nrow = indI)) E_z_itl_temp <- lapply(E_z_itl_temp, function(x)diag(1/rowSums(x)) %*% x) E_z_itl <- array(0, dim=c(indI, indL, indT)) for(time_t in 1:indT){ E_z_itl[,,time_t] <- E_z_itl_temp[[time_t]] } E_z_itl_z_itm1l <- array(0, dim=c(indI, indL, indL, indT-1)) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l_temp <- matrix(stats::runif(indL*indL) ,ncol=indL, nrow = indL) E_z_itl_z_itm1l_temp <- E_z_itl_z_itm1l_temp/ sum(E_z_itl_z_itm1l_temp) E_z_itl_z_itm1l[i,,,time_t] <- E_z_itl_z_itm1l_temp } } }else{ E_z_itl <- array(0, dim=c(indI, indL, indT)) for(time_t in 1:indT){ E_z_itl[,,time_t] <- matrix(1/indL, nrow=indI, ncol=indL) } E_z_itl_z_itm1l <- array(0, dim=c(indI, indL, indL, indT-1)) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l[i,,,time_t] <- 1/(indL*indL) } } } # # Evidence of Lower Bound # llb_fun <- function(delta_ast,delta_0,omega_ast,omega_0, A_ast, A_0, B_ast, B_0, log_zeta_sum){ A_0_unlist <- unlist(A_0) B_0_unlist <- unlist(B_0) A_ast_unlist <- unlist(A_ast) B_ast_unlist <- unlist(B_ast) tmp1 <- sum( lbeta(A_ast_unlist, B_ast_unlist) - lbeta(A_0_unlist, B_0_unlist) + (A_0_unlist - A_ast_unlist)*(digamma(A_ast_unlist) - digamma(A_ast_unlist+B_ast_unlist)) + (B_0_unlist - B_ast_unlist)*( digamma(B_ast_unlist)-digamma(A_ast_unlist+B_ast_unlist)) ) tmp2 <- (sum(lgamma(delta_ast)) - lgamma(sum(delta_ast))) - (sum(lgamma(delta_0)) - lgamma(sum(delta_0))) + sum((delta_0 - delta_ast)*(digamma(delta_ast) - digamma(sum(delta_ast))) ) tmp3 <- 0 for(l in 1:indL){ tmp3 <- tmp3 + (sum(lgamma(omega_ast[l,])) - lgamma(sum(omega_ast[l,]))) - (sum(lgamma(omega_0[l,])) - lgamma(sum(omega_0[l,]))) + sum((omega_0[l,] - omega_ast[l,])*(digamma(omega_ast[l,]) - digamma(sum(omega_ast[l,]))) ) } tmp1 + tmp2 + tmp3 + log_zeta_sum } # # Make objects for variational parameters # E_log_theta <- E_log_1_theta <- B_ast <- A_ast <- A_0 delta_ast <- delta_0 omega_ast <- omega_0 b_z_it <- f_z_it <- array(0, dim=c(indI, indL ,indT) ) gamma_t_x_it <- matrix(0, nrow=indI,ncol=indT) P_til_x_it_z_it <- array(0, dim=c(indI,indL,indT)) m = 1 l_lb = rep(0, max_it+1) l_lb[1] = -Inf for(m in 1:max_it){ # # M-step and Calculation of Expectations # delta_ast <- colSums(E_z_itl[,,1]) + delta_0 omega_ast <- apply(E_z_itl_z_itm1l, c(2,3),sum) + omega_0 #Check this point E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) E_log_tau = digamma(omega_ast) - digamma(rowSums(omega_ast)) for(time_t in 1:indT){ A_ast[[time_t]] <- lapply(1:indJt[[time_t]], function(j) t(G_jt[[time_t]][[j]] %*% (t(E_z_itl[,,time_t]) %*% X[[time_t]][,j]) + A_0[[time_t]][[j]] )) B_ast[[time_t]] <- lapply(1:indJt[[time_t]], function(j) t(G_jt[[time_t]][[j]] %*% (t(E_z_itl[,,time_t]) %*% (1-X[[time_t]][,j])) + B_0[[time_t]][[j]] )) E_log_theta[[time_t]] = lapply(1:indJt[[time_t]], function(j) digamma(A_ast[[time_t]][[j]]) - digamma(A_ast[[time_t]][[j]] + B_ast[[time_t]][[j]])) E_log_1_theta[[time_t]] = lapply(1:indJt[[time_t]], function(j) digamma(B_ast[[time_t]][[j]]) - digamma(A_ast[[time_t]][[j]] + B_ast[[time_t]][[j]])) } # # E-step # # f_z_it # b_z_it for(time_t in 1:indT){ temp <- matrix(0, nrow = indI, ncol=indL) for(j in 1:indJt[time_t]){ temp <- temp + ( X[[time_t]][,j]%*% E_log_theta[[time_t]][[j]] + (1-X[[time_t]][,j]) %*% E_log_1_theta[[time_t]][[j]]) %*% G_jt[[time_t]][[j]] } P_til_x_it_z_it[,,time_t] <- exp(temp) } f_z_it[,,1] <- exp(t(log(t(P_til_x_it_z_it[,,1])) + E_log_pi)) gamma_t_x_it[,1] <- rowSums(f_z_it[,,1]) f_z_it[,,1] <- f_z_it[,,1]/gamma_t_x_it[,1] # Normarize b_z_it[,,indT] <- 1 # # Recursive calculation # for(time_t in 2:indT){ # # calc f # f_z_it[,,time_t] <- P_til_x_it_z_it[,,time_t] * (f_z_it[,,time_t-1] %*% exp(E_log_tau)) gamma_t_x_it[,time_t] <- rowSums(f_z_it[,,time_t]) f_z_it[,,time_t] <- f_z_it[,,time_t]/gamma_t_x_it[,time_t] # Normarize # # calc b # b_z_it[,,indT - time_t + 1] <- (P_til_x_it_z_it[,,indT - time_t + 2] * b_z_it[,,indT - time_t + 2]) %*% t(exp(E_log_tau)) b_z_it[,,indT - time_t + 1] <- b_z_it[,,indT - time_t + 1] / rowSums(b_z_it[,,indT - time_t + 1]) } E_z_itl <- f_z_it*b_z_it E_z_itl_temp <- apply(E_z_itl, c(1,3),sum) for(time_t in 1:indT){ E_z_itl[,,time_t] <- E_z_itl[,,time_t]/E_z_itl_temp[,time_t] } for(l in 1:indL){ for(time_t in 2:indT){ E_z_itl_z_itm1l[,l,,time_t-1] <- t(t(P_til_x_it_z_it[,,time_t]*b_z_it[,,time_t]*f_z_it[,l,time_t-1]) * exp(E_log_tau[l,])) } } E_z_itl_z_itm1l_temp <- apply(E_z_itl_z_itm1l, c(1,4),sum) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l[i,,,time_t] <- E_z_itl_z_itm1l[i,,,time_t]/E_z_itl_z_itm1l_temp[i,time_t] } } log_zeta_sum <- sum(log(gamma_t_x_it)) l_lb[m+1] <- llb_fun(delta_ast,delta_0,omega_ast,omega_0, A_ast, A_0, B_ast, B_0, log_zeta_sum) if(verbose){ cat("\riteration = ", m+1, sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] - l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] # plot(l_lb,type="l") # # Calculation of mean and sd of VB posteriors # delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum pi_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1)) ) names(pi_est) <- att_pat names(pi_sd) <- att_pat omega_sum <- rowSums(omega_ast) Tau_est <- omega_ast/omega_sum Tau_sd <- matrix(0, indL, indL) for(l in 1:indL) Tau_sd[,l] <- sqrt(omega_ast[,l]*(omega_sum - omega_ast[,l])/(omega_sum^2*(omega_sum+1)) ) colnames(Tau_est) <- att_pat row.names(Tau_est) <- att_pat colnames(Tau_sd) <- att_pat row.names(Tau_sd) <- att_pat theta_sd <- theta_est <- vector("list",indT) for(time_t in 1:indT){ theta_est[[time_t]] <- mapply(function(x,y) x/(x+y), A_ast[[time_t]], B_ast[[time_t]]) theta_sd[[time_t]] <- mapply(function(x,y) sqrt((x*y)/(((x+y)^2) *(x+y+1)) ), A_ast[[time_t]], B_ast[[time_t]]) } # # MAP and EAP of attribute mastery. # post_max_class <- matrix(0, nrow=indI, ncol=indT) EAP_att_pat <- att_master_prob <- MAP_att_pat <- lapply(1:indT, function(time_t) matrix(0, nrow=indI, ncol=indK)) for(time_t in 1:indT){ post_max_class[,time_t] <- apply(E_z_itl[,,time_t], 1, function(x)which.max(x) ) MAP_att_pat[[time_t]] <- A[post_max_class[,time_t],] att_master_prob[[time_t]] <- E_z_itl[,,time_t] %*% A EAP_att_pat[[time_t]] <- (att_master_prob[[time_t]] > 0.5)*1 } list(theta_est = theta_est, theta_sd = theta_sd, pi_est = pi_est, pi_sd = pi_sd, Tau_est = Tau_est, Tau_sd = Tau_sd, post_max_class = post_max_class, MAP_att_pat = MAP_att_pat, att_master_prob = att_master_prob, EAP_att_pat = EAP_att_pat, A_ast = A_ast, B_ast = B_ast, delta_ast = delta_ast, omega_ast = omega_ast, E_z_itl = E_z_itl, E_z_itl_z_itm1l = E_z_itl_z_itm1l, A_0 = A_0, B_0 = B_0, delta_0 = delta_0, omega_0 = omega_0, l_lb = l_lb, gamma_t_x_it = gamma_t_x_it, log_zeta_sum = log_zeta_sum, E_z_itl = E_z_itl, E_z_itl_z_itm1l = E_z_itl_z_itm1l, A = A, Q = Q, X = X, G_jt = G_jt, m = m) } hmdcm_vb_nondec= function( X, Q, A_0 = NULL, B_0 = NULL, delta_0 = NULL, omega_0 = NULL, max_it = 500, epsilon = 10E-4, random_block_design=FALSE, Test_versions=NULL, Test_order=NULL, measurement_model="general", random_start = FALSE, verbose=TRUE ){ if(!(random_block_design)){ stop("sorry, current version does not soppot the case where random_block_design is false when nondecreesing_atribute is true.\n") } indI <- sapply(X, nrow)[1] # Assume all individuals take all tests. indK <- ncol(Q[[1]]) # Assume attributes are all same across time and individual. indT <- length(Q) # Assume time points is all same across individual. indJt <- sapply(Q,nrow) # Assume items presented at a time is just same across time. indL <- 2^indK # # All attribute pattern matrix # not_zero_q_t <- lapply(Q,function(y)apply(y, 1, function(x) which(x != 0))) A <- as.matrix(expand.grid(lapply(1:indK, function(x)rep(0:1)))) A_jt <- lapply(not_zero_q_t, function(y)lapply(y, function(x) A[,x,drop=F])) A_red <- lapply( A_jt, function(z)lapply(z , function(x) apply(x,1,function(y) paste0(y,collapse = "")))) # # Unique correct item response probability label for each time point and item. # A_red_uni <- lapply( A_jt,function(z)lapply(z , function(x) unique(apply(x,1,function(y) paste0(y,collapse = ""))))) # # Make G-matrix # # G_j <- lapply(1:J, function(j) t(sapply(A_red_uni[[j]], function(x) x == A_red[[j]] ))*1 ) if(measurement_model == "general"){ G_jt <- lapply(1:indT,function(time_t)lapply(1:indJt[[time_t]], function(j) outer(A_red_uni[[time_t]][[j]], A_red[[time_t]][[j]], function(x,y) (x == y)*1 ))) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(time_t in 1:indT){ for(j in 1:indJt[time_t]) { colnames(G_jt[[time_t]][[j]]) <- att_pat row.names(G_jt[[time_t]][[j]]) <- A_red_uni[[time_t]][[j]] } } }else if(measurement_model == "dina"){ G_jt <- lapply(1:indT,function(time_t)lapply(1:indJt[time_t], function(j)matrix(0,ncol=indL,nrow=2))) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(time_t in 1:indT){ for(j in 1:indJt[time_t]) { temp_eta <- apply(t(t(A) ^ Q[[time_t]][j,]),1, prod) G_jt[[time_t]][[j]][1,] <- 1 - temp_eta G_jt[[time_t]][[j]][2,] <- temp_eta colnames(G_jt[[time_t]][[j]]) <- att_pat row.names(G_jt[[time_t]][[j]]) <- c("0","1") } } } else { stop("Error: Specify model general or dina.\n") } # # Hyper parameter # if(is.null(delta_0) ){ delta_0 = rep(1,indL)# For π #delta_0 = rep(1/indL,indL)# For π } if(is.null(omega_0) ){ omega_0 = matrix(1,indL,indL)# For Tau matrix #omega_0 = matrix(1/indL,indL,indL)# For Tau matrix for(l in 1:indL){ for(ld in 1:indL){ dif_pat <- A[l,] - A[ld,] omega_0[l,ld] <- ifelse(any(dif_pat > 0), 0, 1) } } } # # Weekly Monotonicity constraint # number_of_attributes <- lapply(A_red_uni,function(y)lapply(y, function(x) sapply(strsplit(x, ""), function(y)sum(as.numeric(y))) ) ) if(measurement_model == "dina") {number_of_attributes <- lapply(1:indT,function(time_t){lapply(1:indJt[time_t],function(j)c(0,1))})} if(is.null(A_0)){ A_0_hyperparam <- lapply(number_of_attributes, function(x)seq(from = 1+epsilon, to = 2, length.out = max(unlist( x))+1) ) A_0 <- vector("list", length = indT) for(time_t in 1:indT){ A_0[[time_t]] <-lapply( number_of_attributes[[time_t]] , function(x){A_0_hyperparam[[time_t]][x + 1] }) } } if(is.null(B_0)){ B_0_hyperparam <- lapply(number_of_attributes, function(x)seq(from = 2, to = 1+epsilon, length.out = max(unlist( x))+1) ) B_0 <- vector("list", length = indT) for(time_t in 1:indT){ B_0[[time_t]] <-lapply( number_of_attributes[[time_t]] , function(x){B_0_hyperparam[[time_t]][x + 1] }) } } # # Initialization # if(random_start == TRUE){ E_z_itl_temp <- lapply(1:indT, function(time_t)matrix(stats::runif(indI*indL) ,ncol=indL, nrow = indI)) E_z_itl_temp <- lapply(E_z_itl_temp, function(x)diag(1/rowSums(x)) %*% x) E_z_itl <- array(0, dim=c(indI, indL, indT)) for(time_t in 1:indT){ E_z_itl[,,time_t] <- E_z_itl_temp[[time_t]] } E_z_itl_z_itm1l <- array(0, dim=c(indI, indL, indL, indT-1)) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l_temp <- matrix(stats::runif(indL*indL) ,ncol=indL, nrow = indL) E_z_itl_z_itm1l_temp[omega_0==0] <-0 E_z_itl_z_itm1l_temp <- E_z_itl_z_itm1l_temp/ sum(E_z_itl_z_itm1l_temp) E_z_itl_z_itm1l[i,,,time_t] <- E_z_itl_z_itm1l_temp } } }else{ E_z_itl <- array(0, dim=c(indI, indL, indT)) for(time_t in 1:indT){ E_z_itl[,,time_t] <- matrix(1/indL, nrow=indI, ncol=indL) } E_z_itl_z_itm1l <- array(0, dim=c(indI, indL, indL, indT-1)) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l_temp <- matrix(1/(indL*indL),indL,indL) E_z_itl_z_itm1l_temp[omega_0==0] <-0 E_z_itl_z_itm1l[i,,,time_t] <- E_z_itl_z_itm1l_temp/ sum(E_z_itl_z_itm1l_temp) } } } # # Evidence of Lower Bound # llb_fun <- function(delta_ast,delta_0,omega_ast,omega_0, A_ast, A_0, B_ast, B_0, log_zeta_sum){ A_0_unlist <- unlist(A_0) B_0_unlist <- unlist(B_0) A_ast_unlist <- unlist(A_ast) B_ast_unlist <- unlist(B_ast) tmp1 <- sum( lbeta(A_ast_unlist, B_ast_unlist) - lbeta(A_0_unlist, B_0_unlist) + (A_0_unlist - A_ast_unlist)*(digamma(A_ast_unlist) - digamma(A_ast_unlist+B_ast_unlist)) + (B_0_unlist - B_ast_unlist)*( digamma(B_ast_unlist)-digamma(A_ast_unlist+B_ast_unlist)) ) tmp2 <- (sum(lgamma(delta_ast)) - lgamma(sum(delta_ast))) - (sum(lgamma(delta_0)) - lgamma(sum(delta_0))) + sum((delta_0 - delta_ast)*(digamma(delta_ast) - digamma(sum(delta_ast))) ) tmp3 <- 0 for(l in 1:indL){ omega_not_0 <- omega_0[l,]!=0 tmp3 <- tmp3 + (sum(lgamma(omega_ast[l,omega_not_0])) - lgamma(sum(omega_ast[l,omega_not_0]))) - (sum(lgamma(omega_0[l,omega_not_0])) - lgamma(sum(omega_0[l,omega_not_0]))) + sum((omega_0[l,omega_not_0] - omega_ast[l,omega_not_0])*(digamma(omega_ast[l,omega_not_0]) - digamma(sum(omega_ast[l,omega_not_0]))) ) } tmp1 + tmp2 + tmp3 + log_zeta_sum } # # Make objects for variational parameters # E_log_theta <- E_log_1_theta <- B_ast <- A_ast <- A_0 delta_ast <- delta_0 omega_ast <- omega_0 omega_zero_elem <- omega_0 == 0 b_z_it <- f_z_it <- array(0, dim=c(indI, indL ,indT) ) gamma_t_x_it <- matrix(0, nrow=indI,ncol=indT) P_til_x_it_z_it <- array(0, dim=c(indI,indL,indT)) X_reord <- X for(i in 1:indI){ for(time_t in 1:indT){ X_reord[[Test_order[Test_versions[i],time_t ]]][i,] <- X[[time_t]][i,] } } m = 1 l_lb = rep(0, max_it+1) l_lb[1] = -Inf for(m in 1:max_it){ # # M-step and Calculation of Expectations # delta_ast <- colSums(E_z_itl[,,1]) + delta_0 omega_ast <- apply(E_z_itl_z_itm1l, c(2,3),sum) + omega_0 #Check this point E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) #digamma_omega <- try(digamma(omega_ast)) #digamma_omega[omega_zero_elem] <- 0 E_log_tau = try(digamma(omega_ast), silent = T) - digamma(rowSums(omega_ast)) E_log_tau[omega_zero_elem] <- 0 # # Reorder # E_z_itl_reord <- E_z_itl for(i in 1:indI){ for(time_t in 1:indT){ E_z_itl_reord[i,,Test_order[Test_versions[i],time_t]] <- E_z_itl[i,,time_t] } } for(time_t in 1:indT){ A_ast[[time_t]] <- lapply(1:indJt[[time_t]], function(j) t(G_jt[[time_t]][[j]] %*% (t(E_z_itl_reord[,,time_t]) %*% X_reord[[time_t]][,j]) + A_0[[time_t]][[j]] )) B_ast[[time_t]] <- lapply(1:indJt[[time_t]], function(j) t(G_jt[[time_t]][[j]] %*% (t(E_z_itl_reord[,,time_t]) %*% (1-X_reord[[time_t]][,j])) + B_0[[time_t]][[j]] )) E_log_theta[[time_t]] = lapply(1:indJt[[time_t]], function(j) digamma(A_ast[[time_t]][[j]]) - digamma(A_ast[[time_t]][[j]] + B_ast[[time_t]][[j]])) E_log_1_theta[[time_t]] = lapply(1:indJt[[time_t]], function(j) digamma(B_ast[[time_t]][[j]]) - digamma(A_ast[[time_t]][[j]] + B_ast[[time_t]][[j]])) } # # E-step # for(time_t in 1:indT){ temp <- matrix(0, nrow = indI, ncol=indL) for(i in 1:indI){ for(j in 1:indJt[time_t]){ # temp <- temp + ( X[[time_t]][,j]%*% E_log_theta[[time_t]][[j]] + (1-X[[time_t]][,j]) %*% E_log_1_theta[[time_t]][[j]]) %*% G_jt[[time_t]][[j]] temp[i,] <- temp[i,] + ( X[[time_t]][i,j]* E_log_theta[[Test_order[Test_versions[i],time_t]]][[j]] + (1-X[[time_t]][i,j]) * E_log_1_theta[[Test_order[Test_versions[i],time_t]]][[j]]) %*% G_jt[[Test_order[Test_versions[i],time_t]]][[j]] } } P_til_x_it_z_it[,,time_t] <- exp(temp) } f_z_it[,,1] <- exp(t(log(t(P_til_x_it_z_it[,,1])) + E_log_pi)) gamma_t_x_it[,1] <- rowSums(f_z_it[,,1]) f_z_it[,,1] <- f_z_it[,,1]/gamma_t_x_it[,1] # Normarize b_z_it[,,indT] <- 1 # # Recursive calculation # exp_E_log_tau <- exp(E_log_tau) exp_E_log_tau[omega_zero_elem] <- 0 for(time_t in 2:indT){ # # calc f # f_z_it[,,time_t] <- P_til_x_it_z_it[,,time_t] * (f_z_it[,,time_t-1] %*% exp_E_log_tau) gamma_t_x_it[,time_t] <- rowSums(f_z_it[,,time_t]) f_z_it[,,time_t] <- f_z_it[,,time_t]/gamma_t_x_it[,time_t] # Normarize # # calc b # b_z_it[,,indT - time_t + 1] <- (P_til_x_it_z_it[,,indT - time_t + 2] * b_z_it[,,indT - time_t + 2]) %*% t(exp_E_log_tau) b_z_it[,,indT - time_t + 1] <- b_z_it[,,indT - time_t + 1] / rowSums(b_z_it[,,indT - time_t + 1]) } E_z_itl <- f_z_it*b_z_it E_z_itl_temp <- apply(E_z_itl, c(1,3),sum) for(time_t in 1:indT){ E_z_itl[,,time_t] <- E_z_itl[,,time_t]/E_z_itl_temp[,time_t] } for(l in 1:indL){ for(time_t in 2:indT){ E_z_itl_z_itm1l[,l,,time_t-1] <- t(t(P_til_x_it_z_it[,,time_t]*b_z_it[,,time_t]*f_z_it[,l,time_t-1]) * exp_E_log_tau[l,]) } } E_z_itl_z_itm1l_temp <- apply(E_z_itl_z_itm1l, c(1,4),sum) for(i in 1:indI){ for(time_t in 1:(indT-1)){ E_z_itl_z_itm1l[i,,,time_t] <- E_z_itl_z_itm1l[i,,,time_t]/E_z_itl_z_itm1l_temp[i,time_t] } } log_zeta_sum <- sum(log(gamma_t_x_it)) l_lb[m+1] <- llb_fun(delta_ast,delta_0,omega_ast,omega_0, A_ast, A_0, B_ast, B_0, log_zeta_sum) if(verbose){ cat("\riteration = ", m+1, sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] - l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] # plot(l_lb,type="l") # # Calculation of mean and sd of VB posteriors # delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum pi_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1)) ) names(pi_est) <- att_pat names(pi_sd) <- att_pat omega_sum <- rowSums(omega_ast) Tau_est <- omega_ast/omega_sum Tau_sd <- matrix(0, indL, indL) for(l in 1:indL) Tau_sd[,l] <- sqrt(omega_ast[,l]*(omega_sum - omega_ast[,l])/(omega_sum^2*(omega_sum+1)) ) colnames(Tau_est) <- att_pat row.names(Tau_est) <- att_pat colnames(Tau_sd) <- att_pat row.names(Tau_sd) <- att_pat theta_sd <- theta_est <- vector("list",indT) for(time_t in 1:indT){ theta_est[[time_t]] <- mapply(function(x,y) x/(x+y), A_ast[[time_t]], B_ast[[time_t]]) theta_sd[[time_t]] <- mapply(function(x,y) sqrt((x*y)/(((x+y)^2) *(x+y+1)) ), A_ast[[time_t]], B_ast[[time_t]]) } # # MAP and EAP of attribute mastery. # post_max_class <- matrix(0, nrow=indI, ncol=indT) EAP_att_pat <- att_master_prob <- MAP_att_pat <- lapply(1:indT, function(time_t) matrix(0, nrow=indI, ncol=indK)) for(time_t in 1:indT){ post_max_class[,time_t] <- apply(E_z_itl[,,time_t], 1, function(x)which.max(x) ) MAP_att_pat[[time_t]] <- A[post_max_class[,time_t],] att_master_prob[[time_t]] <- E_z_itl[,,time_t] %*% A EAP_att_pat[[time_t]] <- (att_master_prob[[time_t]] > 0.5)*1 } list(theta_est = theta_est, theta_sd = theta_sd, pi_est = pi_est, pi_sd = pi_sd, Tau_est = Tau_est, Tau_sd = Tau_sd, post_max_class = post_max_class, MAP_att_pat = MAP_att_pat, att_master_prob = att_master_prob, EAP_att_pat = EAP_att_pat, A_ast = A_ast, B_ast = B_ast, delta_ast = delta_ast, omega_ast = omega_ast, E_z_itl = E_z_itl, E_z_itl_z_itm1l = E_z_itl_z_itm1l, A_0 = A_0, B_0 = B_0, delta_0 = delta_0, omega_0 = omega_0, l_lb = l_lb, gamma_t_x_it = gamma_t_x_it, log_zeta_sum = log_zeta_sum, E_z_itl = E_z_itl, E_z_itl_z_itm1l = E_z_itl_z_itm1l, A = A, Q = Q, X = X, G_jt = G_jt, m = m) } hm_dcm = function( X, Q, max_it = 500, epsilon = 1e-04, nondecreasing_attribute=FALSE, measurement_model="general", verbose = TRUE, random_block_design=FALSE, Test_versions=NULL, Test_order=NULL, random_start = FALSE, # hyperparameters A_0 = NULL, B_0 = NULL, delta_0 = NULL, omega_0 = NULL ){ # convert X,Q to list if(length(dim(X)) == 3){ X = lapply(1:dim(X)[3], function(i) X[,,i]) } if(length(dim(Q)) == 3){ Q = lapply(1:dim(Q)[3], function(i) Q[,,i]) } if(random_block_design){ if(is.null(Test_versions) || is.null(Test_order)){ stop("if random_block_design is true, Test_versions and Test_order must be entered.\n") } } if(!nondecreasing_attribute){ res = hmdcm_vb(X=X,Q=Q,max_it=max_it,epsilon=epsilon,measurement_model=measurement_model, random_block_design=random_block_design, Test_versions=Test_versions,Test_order=Test_order, verbose=verbose,random_start=random_start,A_0=A_0,B_0=B_0, delta_0=delta_0,omega_0=omega_0) }else{ res = hmdcm_vb_nondec(X=X,Q=Q,max_it=max_it,epsilon=epsilon,measurement_model=measurement_model, random_block_design=random_block_design, Test_versions=Test_versions,Test_order=Test_order, verbose=verbose,random_start=random_start,A_0=A_0,B_0=B_0, delta_0=delta_0,omega_0=omega_0) } model_params = list( theta_est = res$theta_est, theta_sd = res$theta_sd ) res = list(model_params = model_params, pi_est = res$pi_est, pi_sd = res$pi_sd, Tau_est = res$Tau_est, Tau_sd = res$Tau_sd, post_max_class = res$post_max_class, att_pat_est = res$MAP_att_pat, att_master_prob = res$att_master_prob, EAP_att_pat = res$EAP_att_pat, A_ast = res$A_ast, B_ast = res$B_ast, delta_ast = res$delta_ast, omega_ast = res$omega_ast, E_z_itl = res$E_z_itl, E_z_itl_z_itm1l = res$E_z_itl_z_itm1l, A_0 = res$A_0, B_0 = res$B_0, delta_0 = res$delta_0, omega_0 = res$omega_0, l_lb = res$l_lb[res$l_lb != 0], #gamma_t_x_it = res$gamma_t_x_it, #log_zeta_sum = res$log_zeta_sum, E_z_itl = res$E_z_itl, E_z_itl_z_itm1l = res$E_z_itl_z_itm1l, G_jt = res$G_jt, m = res$m) return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/hm_dcm.R
# # Make G matrix funciton ----- # make_G_mat <- function(Q){ J <- max(Q[,1]) K <- ncol(Q) -2 L <- 2^K H <- lapply(1:J, function(x) Q[Q[,1] == x ,2]) # # All attribute pattern matrix # A <- as.matrix(expand.grid(lapply(1:K, function(x)rep(0:1)))) G_mat <- vector("list", J) for(j in 1:J){ Q_j <- as.matrix(Q[Q[,1] == j,-c(1:2)]) if( all(rowSums(Q_j) != 0)){ Q_j <- rbind(Q_j, 0) } not_0_col <- colSums(Q_j) != 0 Q_j <- Q_j[,not_0_col, drop = F] A_temp <- A[,not_0_col,drop =F] G_j <- matrix(0,nrow=nrow(Q_j), ncol=L) q_patt <- apply(Q_j[rowSums(Q_j) != 0, ,drop = F], 1, function(x)paste0(x,collapse="")) alp_patt <- apply(A_temp, 1, function(x)paste0(x,collapse="")) alp_patt_not_in_q_patt <- !(alp_patt %in% q_patt) for(h in H[[j]]){ for(l in 1:L){ if(any(Q_j[h,] == 1)){ G_j[h,l] <- all(Q_j[h,] == A_temp[l,])*1 } else { G_j[h,l] <- alp_patt_not_in_q_patt[l]*1 } } } G_mat[[j]] <- G_j } list(G_mat = G_mat, H = H, A = A) } #' @title Artificial data generating function for the multiple-choice DINA model based on the given Q-matrix #' #' @description \code{mc_dina_data_gen()} returns the artificially generated item response data for the MC-DINA model #' #' @param Q the \eqn{J \times K} binary matrix #' @param I the number of assumed respondents #' @param att_cor the true value of the correlation among attributes (default: 0.1) #' @param seed the seed value used for random number generation (default: 17) #' @return A list including: #' \describe{ #' \item{X}{the generated artificial item response data} #' \item{att_pat}{the generated true vale of the attribute mastery pattern} #' } #' @references Yamaguchi, K. (2020). Variational Bayesian inference for the #' multiple-choice DINA model. \emph{Behaviormetrika}, 47(1), 159-187. #' \doi{10.1007/s41237-020-00104-w} #' #' @examples #' # load a simulated Q-matrix #' mc_Q = mc_sim_Q #' mc_sim_data = mc_dina_data_gen(Q=mc_Q,I=200) #' #' @export # # Data generation function------ # mc_dina_data_gen <- function(I, Q, att_cor = 0.1, seed = 17){ set.seed(seed) J <- max(Q[,1]) K = ncol(Q) - 2 L = 2^K match_p = 0.80 att_threshold = "rand" tmp <- make_G_mat(Q) A <- tmp$A H <- tmp$H G_mat <- tmp$G_mat # # item parameters # i_par <- vector("list", J) not_match = "equal" if( not_match == "equal"){ for(j in 1:J){ Q_j <- as.matrix(Q[Q[,1] == j,-c(1:2)]) not_0_col <- colSums(Q_j) != 0 Q_j <- Q_j[,not_0_col, drop = F] A_temp <- A[,not_0_col,drop =F] q_patt <- apply(Q_j[rowSums(Q_j) != 0, ,drop = F], 1, function(x)paste0(x,collapse="")) alp_patt <- apply(A_temp, 1, function(x)paste0(x,collapse="")) alp_patt_not_in_q_patt <- !(alp_patt %in% q_patt) dim_G_mat <- dim(G_mat[[j]]) i_par_temp <- array(NA, dim_G_mat) H_j <- H[[j]] for(h in H_j){ for(l in 1:L){ # # not included # if(alp_patt_not_in_q_patt[l]){ i_par_temp[h,l] <- 1/nrow(Q_j) }else if(!alp_patt_not_in_q_patt[l] & (G_mat[[j]][h,l] == 1)){ # # match # i_par_temp[h,l] <- match_p }else{ # others i_par_temp[h,l] <- (1-match_p)/(nrow(Q_j) -1) } } } i_par[[j]] <- i_par_temp } }else if (not_match == "rand"){ for(j in 1:J){ dim_G_mat <- dim(G_mat[[j]]) i_par_temp <- array(NA, dim_G_mat) match_p_temp <- stats::runif(dim_G_mat[1], min=0.5, max=1.0) i_par_temp <- diag(match_p_temp) i_par_temp[lower.tri(i_par_temp)] <- stats::runif(sum(lower.tri(i_par_temp)), min=0, max=0.5) i_par_temp[upper.tri(i_par_temp)] <- stats::runif(sum(upper.tri(i_par_temp)), min=0, max=0.5) i_par_temp[,ncol(i_par_temp)] <- stats::runif(nrow(i_par_temp),min=0.2,max=0.8) i_par_temp <- t(t(i_par_temp)/colSums(i_par_temp)) i_par[[j]] <- i_par_temp %*% G_mat[[j]] } } else { stop("Error: You should specify not_match as \"equal\" or \"rand\" ") } sigma <- diag(K) sigma[sigma == 0] <- att_cor if(any(att_threshold == "rand")){ att_threshold = stats::rnorm(K, 0, 1) } else if(is.null(att_threshold)) { att_threshold = rep(0,K) } else if(is.vector(att_threshold)){ } att_value <- mvtnorm::rmvnorm(n = I,mean = rep(0,K),sigma = sigma) att_threshold att_pat <- t((t(att_value) > att_threshold)*1) att_all_pat <- apply(A, 1, function(x)paste0(x,collapse = "")) cluss_num <- apply(att_pat, 1, function(x) which(paste0(x,collapse = "") == att_all_pat)) Z <- matrix(0, ncol=L,nrow=I) for(i in 1:I){ Z[i,cluss_num[i]] <- 1 } X <- matrix(NA, ncol=J, nrow = I) for(i in 1:I){ for(j in 1:J){ jh_prob <- H[[j]] jh_prob <- i_par[[j]] %*% Z[i, ] X[i,j] <- sample(x = H[[j]],size = 1, prob = jh_prob) } } list(X = X,att_pat = att_pat) } # extend_X <- function(X){ I <- nrow(X) J <- ncol(X) H_max <- max(X, na.rm = T) X_ijh <- array(NA, dim = c(I,J,H_max)) for(i in 1:I){ tmp <- matrix(0, nrow=J, ncol=H_max) for(j in 1:J){ tmp[j,X[i,j]] <- 1 } X_ijh[i,,] <- tmp } X_ijh } # # VB script # mc_dina = function( X, Q, max_it = 500, epsilon = 1e-04, verbose = TRUE, # hyperparameter delta_0 = NULL, a_0 = NULL ){ if(!inherits(X, "matrix")){ X <- as.matrix(X) } if(!inherits(Q, "matrix")){ Q <- as.matrix(Q) } # Index I <- nrow(X) J <- ncol(X) K <- ncol(Q) - 2 L <- 2^K # # G_matrix # tmp <- make_G_mat(Q) A <- tmp$A H <- tmp$H G_mat <- tmp$G_mat # # Data comvert # X_ijh <- extend_X(X) # # Hyper parameters # if(is.null(delta_0)){ delta_0 = rep(1, L) } if(is.null(a_0)){ a_0 = lapply(1:J, function(j) matrix(1, nrow = max(H[[j]]), ncol=nrow(G_mat[[j]]) )) } # # Objects for estimates # delta_ast <- as.numeric(table(sample(1:L,replace = T,size = I))) a_ast <- lapply(1:J, function(j) matrix(0, nrow = max(H[[j]]), ncol=nrow(G_mat[[j]]) )) for(j in 1:J){ n_h <- ncol(a_ast[[j]]) for(h in 1:n_h){ if(h != n_h){ prob <- rep(1,n_h) prob[h] <- 2*prob[h] prob <- prob/sum(prob) a_ast[[j]][,h] <- table(sample(1:n_h,replace = T,size = I, prob = prob)) } else{ a_ast[[j]][,h] <- table(sample(1:n_h,replace = T,size = I)) } } } # r_il <- matrix(runif(I*L) ,ncol=L, nrow = I) # r_il <- diag(1/rowSums(r_il)) %*% r_il r_il <- matrix(1/L ,ncol=L, nrow = I) one_vec = matrix(1,nrow=I,ncol=1) m = 1 l_lb = rep(0, max_it+1) l_lb[1] = -Inf for(m in 1:max_it){ # # Expectations # E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) E_log_theta_jhhd = lapply(1:J, function(j) digamma(a_ast[[j]]) - matrix(1,ncol=1, nrow=nrow(a_ast[[j]])) %*% digamma(colSums(a_ast[[j]]))) # # E-step: r_il # temp <- matrix(0, ncol=L, nrow = I) for(j in 1:J) temp <- temp + X_ijh[,j,H[[j]]] %*% E_log_theta_jhhd[[j]] %*% G_mat[[j]] log_rho_il <- t(t(temp) + E_log_pi) temp <- exp(log_rho_il) r_il = temp / rowSums(temp) # # M-step # delta_ast <- colSums(r_il) + delta_0 a_ast <- lapply(1:J, function(j) t(X_ijh[,j,H[[j]]]) %*% t(G_mat[[j]] %*% t(r_il))+ a_0[[j]] ) temp_order_l <- sample(1:L,L) for (l in temp_order_l) { f = function(x) { lgamma(delta_ast[l] - delta_0[l] + x) - lgamma(sum(delta_ast) - delta_0[l]+ x) - lgamma(x) + digamma(sum(delta_ast) - delta_0[l] + x) } temp_max <- stats::optimise(f, maximum = T,lower = 10E-5,upper = 10E5)$maximum delta_ast[l] <- delta_ast[l] - delta_0[l] + temp_max delta_0[l] <- temp_max } temp_order_j <- sample(1:J,J) for(j in temp_order_j){ for(hd in H[[j]]){ a_0_temp <- a_0[[j]][,hd] a_ast_temp <- a_ast[[j]][,hd] temp_order_h <- sample(H[[j]], max(H[[j]])) for(h in temp_order_h){ f = function(x) { lgamma(a_ast_temp[h] - a_0_temp[h] + x) - lgamma(sum(a_ast_temp) - a_0_temp[h]+ x) - lgamma(x) + digamma(sum(a_ast_temp) - a_0_temp[h] + x) } temp_max <- stats::optimise(f, maximum = T,lower = 10E-5,upper = 10E5)$maximum a_ast_temp[h] <- a_ast_temp[h] - a_0_temp[h] + temp_max a_0_temp[h] <- temp_max } a_ast[[j]][,hd] <- a_ast_temp a_0[[j]][,hd] <- a_0_temp } } # # Evidense of Lower Bound # tmp1 <- (sum(lgamma(delta_ast)) - lgamma(sum(delta_ast))) - (sum(lgamma(delta_0)) + lgamma(sum(delta_0))) tmp2 <- sum(unlist(lapply(1:J, function(j) (colSums(lgamma(a_ast[[j]])) - lgamma(colSums(a_ast[[j]]))) - (colSums(lgamma(a_0[[j]])) + lgamma(colSums(a_0[[j]]))) ) ) ) tmp3 <- -sum(r_il*log(r_il+10E-100)) l_lb[m+1] <- tmp1 + tmp2 + tmp3 if(verbose){ cat("\riteration = ", m+1,sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] - l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum pi_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1)) ) a_ast_sum <- lapply(1:J, function(j) colSums(a_ast[[j]])) theta_est <- lapply(1:J, function(j) a_ast[[j]] %*% diag(1/a_ast_sum[[j]])) theta_sd <- lapply(1:J, function(j) sqrt(a_ast[[j]]*( matrix(rep(1,max(H[[j]]),ncol=1 )) %*% a_ast_sum[[j]] - a_ast[[j]]) %*% diag(1/(a_ast_sum[[j]]^2*(a_ast_sum[[j]]+1)) ))) model_params = list( theta_est = theta_est, theta_sd = theta_sd ) res = list(model_params = model_params, pi_est = pi_est, pi_sd = pi_sd, #r_il = r_il, a_ast = a_ast, delta_ast = delta_ast, a_0 = a_0, delta_0 = delta_0, l_lb = l_lb[l_lb != 0], att_pat_est = A[apply(r_il, 1, which.max),], G_mat = G_mat, m = m) return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/mc_dina.R
satu_dcm = function(X, Q, max_it = 500, epsilon = 1e-04, verbose = TRUE, # hyperparameter delta_0 = NULL, A_0 = NULL, B_0 = NULL ){ if(!inherits(X, "matrix")){ X <- as.matrix(X) } if(!inherits(Q, "matrix")){ Q <- as.matrix(Q) } if(!all(X %in% c(0,1))) stop("item response data should only contain 0/1. \n") if(!all(Q %in% c(0,1))) stop("Q-matrix should only contain 0/1. \n") I <- nrow(X); J <- nrow(Q); K <- ncol(Q); L <- 2^K not_zero_q <- apply(Q, 1, function(x) which(x != 0)) # Attribute pattern matrix A <- as.matrix(expand.grid(lapply(1:K, function(x)rep(0:1)))) A_j <- lapply(not_zero_q, function(x) A[,x,drop=F]) A_red <- lapply(A_j , function(x) apply(x,1,function(y) paste0(y,collapse = ""))) A_red_uni <- lapply(A_j , function(x) unique(apply(x,1,function(y) paste0(y,collapse = "")))) # Make G-matrix G_j <- lapply(1:J, function(j) t(sapply(A_red_uni[[j]], function(x) x == A_red[[j]] ))*1 ) att_pat <- apply(A, 1, function(x) paste0(x, collapse="")) for(j in 1:J) { colnames(G_j[[j]]) <- att_pat } # Hyper parameters if(is.null(delta_0)){ delta_0 = rep(1,L) # For π } # Weekly informative prior number_of_attributes <- lapply(A_red_uni, function(x) sapply(strsplit(x, ""), function(y)sum(as.numeric(y))) ) if(is.null(A_0)){ A_0_hyperparam <- seq(from = 1+epsilon, to = 2, length.out = max(unlist( number_of_attributes))+1) A_0 <- lapply( number_of_attributes , function(x){A_0_hyperparam[x + 1] }) } if(is.null(B_0)){ B_0_hyperparam <- seq(from = 2, to = 1+epsilon, length.out = max(unlist( number_of_attributes))+1) B_0 <- lapply( number_of_attributes , function(x){B_0_hyperparam[x + 1] }) } # Initialization r_il <- matrix(1/L ,ncol=L, nrow = I) one_vec = matrix(1,nrow=I,ncol=1) # lower bound of log marginal likelihood llb_fun <- function(X,G_j,delta_ast,delta_0, A_ast, A_0, B_ast, B_0, r_il){ tmp1 <- 0 for(j in 1:length(G_j)){ tmp1 <- tmp1+ sum(((X[,j]%*%(digamma(A_ast[[j]]) - digamma(A_ast[[j]]+B_ast[[j]])) + (1-X[,j])%*%(digamma(B_ast[[j]]) - digamma(A_ast[[j]]+B_ast[[j]]))) %*% G_j[[j]]) * r_il) } tmp2 <- sum(r_il * ( one_vec%*%(digamma(delta_ast) - digamma(sum(delta_ast))) - log(r_il))) tmp3 <- sum(lgamma(delta_ast))- lgamma(sum(delta_ast)) - (sum(lgamma(delta_0))- lgamma(sum(delta_0))) + sum((delta_0 - delta_ast)*(digamma(delta_ast) - digamma(sum(delta_ast)))) A_ast_unlist <- unlist(A_ast) B_ast_unlist <- unlist(B_ast) A_0_unlist <- unlist(A_0) B_0_unlist <- unlist(B_0) tmp4 <- sum(lbeta(a =A_ast_unlist, b=B_ast_unlist) - lbeta(a =A_0_unlist, b=B_0_unlist) + (A_0_unlist - A_ast_unlist)*(digamma(A_ast_unlist) - digamma(A_ast_unlist + B_ast_unlist ) ) + (B_0_unlist - B_ast_unlist)*(digamma(B_ast_unlist) - digamma(A_ast_unlist + B_ast_unlist ) ) ) tmp1 + tmp2 + tmp3 + tmp4 } l_lb = rep(0, max_it+1) l_lb[1] = -Inf m = 1 for(m in 1:max_it){ # VM-step delta_ast <- colSums(r_il) + delta_0 A_ast <- lapply(1:J, function(j) t(G_j[[j]] %*% t(r_il) %*% X[,j] + A_0[[j]] )) B_ast <- lapply(1:J, function(j) t(G_j[[j]] %*% t(r_il) %*% (1-X[,j]) + B_0[[j]] )) E_log_pi = digamma(delta_ast) - digamma(sum(delta_ast)) E_log_theta = lapply(1:J, function(j) digamma(A_ast[[j]]) - digamma(A_ast[[j]] + B_ast[[j]])) E_log_1_theta = lapply(1:J, function(j) digamma(B_ast[[j]]) - digamma(A_ast[[j]] + B_ast[[j]])) # VE-step: r_il temp <- matrix(0, ncol=I, nrow = L) for(j in 1:J) temp <- temp + t(G_j[[j]] ) %*% (t(E_log_theta[[j]]) %*% t(X[,j,drop=F]) + t(E_log_1_theta[[j]]) %*% t(1 - X[,j,drop=F])) log_rho_il <- t(temp + E_log_pi) temp <- exp(log_rho_il) r_il = temp / rowSums(temp) l_lb[m+1] <- llb_fun(X,G_j,delta_ast,delta_0, A_ast, A_0, B_ast,B_0, r_il) if(verbose){ cat("\riteration = ", m+1,sprintf(",last change = %.05f", abs(l_lb[m] - l_lb[m+1]))) } if(abs(l_lb[m] - l_lb[m+1]) < epsilon){ if(verbose){ cat("\nreached convergence.\n") } break() } } l_lb <- l_lb[-1] delta_sum <- sum(delta_ast) pi_est <- delta_ast/delta_sum pi_sd <-sqrt(delta_ast*(delta_sum - delta_ast)/(delta_sum^2*(delta_sum+1)) ) theta_est <- mapply(function(x,y) x/(x+y), A_ast, B_ast) theta_sd <- mapply(function(x,y) sqrt((x*y)/(((x+y)^2) *(x+y+1)) ), A_ast, B_ast) model_params = list( theta_est = theta_est, theta_sd = theta_sd ) res = list(model_params = model_params, pi_est = pi_est, pi_sd = pi_sd, #r_il = r_il, A_ast = A_ast, B_ast = B_ast, delta_ast = delta_ast, A_0 = A_0, B_0 = B_0, delta_0 = delta_0, l_lb = l_lb[l_lb != 0], att_pat_est = A[apply(r_il, 1, which.max),], G_j = G_j, m = m) return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/satu_dcm.R
#' @include variationalDCM.R #' @export #' @describeIn variationalDCM print summary information summary.variationalDCM = function(object,...){ output = list( attr_mastery_pat = object$att_pat_est, ELBO = object$l_lb[length(object$l_lb)], time = object$time ) output = c(object$model_params, output) class(output) = "summary.variationalDCM" output }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/summary.R
#' @title Variational Bayesian estimation for DCMs #' @description \code{variationalDCM()} fits DCMs by VB algorithms. #' #' @section {variationalDCM}: The \code{variationalDCM()} function performs recently-developed #' variational Bayesian inference for various DCMs. The current version can #' support the DINA, DINO, MC-DINA, saturated DCM, HM-DCM models. We briefly #' introduce additional arguments that are specific to each model. #' #' #' @section {DINA model}: The DINA model has two types of model parameters: slip #' \eqn{s_j} and guessing \eqn{g_j} for \eqn{j=1,\cdots,J}. We name the #' hyperparameters for the DINA model: \code{delta_0} is a L-dimensional #' vector, which is a hyperparameter \eqn{\boldsymbol{\delta}^0} for the #' Dirichlet distribution for the class mixing parameter #' \eqn{\boldsymbol{\pi}} (default: NULL). When \code{delta_0} is specified as #' \code{NULL}, we set \eqn{\boldsymbol{\delta}^0=\boldsymbol{1}_L}. #' \code{alpha_s}, \code{beta_s}, \code{alpha_g}, and \code{beta_g} are #' positive values. They are hyperparameters \{\eqn{\alpha_s}, \eqn{\beta_s}, #' \eqn{\alpha_g}, \eqn{\beta_g}\} that determines the shape of prior beta #' distribution for the slip and guessing parameters (default: NULL). When #' they are specified as \code{NULL}, they are set \eqn{1}. #' #' #' @section {DINO model}: The DINO model has the same model parameters #' and hyperparameters as the DINA model. We thus refer the readers to the DINA model. #' #' #' @section {MC-DINA model}: The MC-DINA model has additional arguments #' \code{delta_0} and \code{a_0}. \code{a_0} corresponds to positive hyperparamters #' \eqn{\mathbf{a}_{jc^\prime}^0} for all \eqn{j} and \eqn{c^\prime}. \code{a_0} is by default set to \code{NULL}, and then it is specified as #' \eqn{1} for all elements. #' #' #' #' #' @section {Saturated DCM}: The saturated DCM is a generalized model such as #' the G-DINA and GDM. In the saturated DCM, we have hyperparameters #' \eqn{\mathbf{A}^0} and \eqn{\mathbf{B}^0} in addition to #' \eqn{\boldsymbol{\delta}^0}, which can be specified as arguments \code{A_0} #' and \code{B_0}. They are specified by default as \code{NULL}, and then we #' set weakly informative priors. #' #' #' #' #' #' #' @section {HM-DCM}: When \code{model} is specified as \code{"hm_dcm"}, users #' have additional arguments \code{nondecreasing_attribute}, #' \code{measurement_model}, \code{random_block_design}, \code{Test_versions}, #' \code{Test_order}, \code{random_start}, \code{A_0}, \code{B_0}, #' \code{delta_0}, and \code{omega_0}. Users can accommodate the #' nondecreasing attribute constraint, which represents the assumption that #' mastered attributes are not forgotten, by setting the logical valued #' argument \code{nondecreasing_attribute} as \code{TRUE} (default: #' \code{FALSE}). Users can also control the measurement model by specifying #' \code{measurement_model} (default: \code{"general"}), and the current #' version can deal with the HM-general DCM (\code{"general"}) and HM-DINA #' (\code{"dina"}) models. This function can also handle the datasets #' collected by a random block design by specifying the logical valued #' argument \code{random_block_design} (default: \code{FALSE}). When it is #' specified as \code{TRUE}, users must enter \code{Test_versions} and #' \code{Test_order}. \code{Test_versions} is an argument indicating which #' version of the test each respondent has been assigned to based on a random #' block design, while \code{Test_order} indicates the sequence in which items #' are rearranged based on the random block design. \code{A_0}, \code{B_0}, #' \code{delta_0}, and \code{omega_0} correspond to hyperparameters #' \eqn{\mathbf{A}^0}, \eqn{\mathbf{B}^0}, \eqn{\boldsymbol{\delta}^0}, and #' \eqn{\boldsymbol{\Omega}^0}. \eqn{\boldsymbol{\Omega}^0} is nonnegative #' hyperparameters of Dirichlet distributions for attribute transition #' probabilities. \code{omega_0} is by default set to \code{NULL}, and then #' we set \eqn{\boldsymbol{\Omega}^0=\mathbf{1}_L\mathbf{1}_L^\top}. #' #' #' #' #' @param X \eqn{N \times J} item response data for the DINA, DINO, MC-DINA, #' and saturated DCM models. Alternatively, \eqn{T}-length list or 3-dim array #' whose elements are \eqn{N \times J/T} binary item response data matrices #' for the HM-DCM #' @param Q \eqn{J \times K} binary Q-matrix for the DINA, DINO, and saturated #' DCM models. For the MC-DINA model, its size should be \eqn{J \times (K+2)}. #' Alternatively, \eqn{T}-length list or 3-dim array whose elements are #' \eqn{J/T \times K} Q-matrices for the HM-DCM #' @param model specify one of "dina", "dino", "mc_dina", "satu_dcm", and #' "hm_dcm" #' @param max_it Maximum number of iterations (default: \code{500}) #' @param epsilon convergence tolerance for iterations (default: \code{1e-4}) #' @param verbose logical, controls whether to print progress (default: #' \code{TRUE}) #' @param ... additional arguments such as hyperparameter values #' #' @param object the return of the \code{variationalDCM} function and the argument of our \code{summary} function #' #' #' #' @return \code{variationalDCM} returns an object of class #' \code{variationalDCM}. We provide the \code{summary} function to summarize a #' result and users can check the following information: #' \describe{ #' \item{model_params}{estimates of posteror means and posterior standard deviations of model parameters} #' \item{attr_mastery_pat}{MAP etimates of attribute mastery patterns} #' \item{ELBO}{resulting value of evidence lower bound} #' \item{time}{time spent in computation} #' } #' #' @references Yamaguchi, K., & Okada, K. (2020). Variational Bayes inference #' for the DINA model. \emph{Journal of Educational and Behavioral #' Statistics}, 45(5), 569-597. \doi{10.3102/1076998620911934} #' #' Yamaguchi, K. (2020). Variational Bayesian inference for the #' multiple-choice DINA model. \emph{Behaviormetrika}, 47(1), 159-187. #' \doi{10.1007/s41237-020-00104-w} #' #' Yamaguchi, K., Okada, K. (2020). Variational Bayes Inference Algorithm for #' the Saturated Diagnostic Classification Model. \emph{Psychometrika}, 85(4), #' 973–995. \doi{10.1007/s11336-020-09739-w} #' #' Yamaguchi, K., & Martinez, A. J. (2024). Variational Bayes inference for #' hidden Markov diagnostic classification models. \emph{British Journal of #' Mathematical and Statistical Psychology}, 77(1), 55–79. #' \doi{10.1111/bmsp.12308} #' #' #' @examples #' #' # fit the DINA model #' Q = sim_Q_J80K5 #' sim_data = dina_data_gen(Q=Q,I=200) #' res = variationalDCM(X=sim_data$X, Q=Q, model="dina") #' summary(res) #' #' #' #' @export variationalDCM = function( X, Q, model, max_it = 500, epsilon = 1e-04, verbose = TRUE, ... ){ t1 = Sys.time() variationalDCMcall = match.call() if(model == "dina"){ res = dina(X=X, Q=Q, max_it=max_it, epsilon=epsilon,...) } else if(model == "dino"){ res = dino(X=X, Q=Q, max_it=max_it, epsilon=epsilon,...) } else if(model == "satu_dcm"){ res = satu_dcm(X=X, Q=Q, max_it=max_it, epsilon=epsilon,...) } else if(model == "mc_dina"){ res = mc_dina(X=X, Q=Q, max_it=max_it, epsilon=epsilon,...) } else if(model == "hm_dcm"){ res = hm_dcm(X=X, Q=Q, max_it=max_it, epsilon=epsilon,...) } t2 = Sys.time() res$time = t2-t1 res$call = variationalDCMcall class(res) = "variationalDCM" return(res) }
/scratch/gouwar.j/cran-all/cranData/variationalDCM/R/variationalDCM.R
## ----eval=FALSE--------------------------------------------------------------- # Q = sim_Q_J30K3 # sim_data = dina_data_gen(Q=Q,I=200) # res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") # summary(res) ## ---- eval=FALSE-------------------------------------------------------------- # install.packages("variationalDCM") ## ---- eval=FALSE-------------------------------------------------------------- # if(!require(devtools)){ # install.packages("devtools") # } # devtools::install_github("khijikata/variationalDCM") ## ----eval=FALSE--------------------------------------------------------------- # Q = sim_Q_J30K3 # sim_data = dina_data_gen(Q=Q,I=200) ## ----eval=FALSE--------------------------------------------------------------- # res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") # summary(res) ## ----eval=FALSE--------------------------------------------------------------- # res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") # summary(res) ## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/variationalDCM/inst/doc/my-vignette.R
--- title: "variationalDCM vignette" author: "Keiichiro Hijikata" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{variationalDCM vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # Introduction Diagnostic classification models (DCMs) are a class of discrete latent variable models for classifying respondents into latent classes that typically represent distinct combinations of skills they possess. `variationalDCM` is an `R` package that performs recently-developed variational Bayesian inference for various DCMs. # DCMs DCMs have been employed for diagnostic assessment in various fields, such as education, psychology, psychiatry, and human development. Diagnostic assessment is an evaluation process that diagnoses the respondent's current status of knowledge, skills, abilities, and other characteristics in a particular domain. These underlying traits are collectively called *attributes* in the literature of DCMs. By identifying the mastery status of each respondent, the diagnostic results can help tailor instruction, intervention, and support to address the specific needs of the respondent. Researchers have developed a lot of sub-classes of DCMs, and we introduce five models the `variationalDCM` package supports. ## DINA model The deterministic input noisy AND gate (DINA) model is called a non-compensatory model, which assumes that respondents must have mastered all the required attributes associated with a particular item to respond correctly. The DINA model has two types of model parameters: slip $s_j$ and guessing $g_j$ for $j=1,\dots,J$. The ideal response of the DINA model is given as $$\eta_{lj} = \prod_k\alpha_{lk}^{q_{jk}}.$$ This ideal response equals one if the attribute mastery pattern $\boldsymbol{\alpha}_l$ of the $l$-th class satisfies $\alpha_{lk} \geq q_{jk}$ for all $k$ but zero otherwise. Using a class indicator vector $\mathbf{z}_i$, the ideal response for respondent $i$ is written as $$\eta_{ij} = \prod_l\eta_{lj}z_{il}.$$ Item response function of the DINA model can be written as $$P(X_{ij}=1|\eta_{ij},s_j,g_j)=(1-s_j)^{\eta_{ij}}g_j^{1-\eta_{ij}}.$$ ## DINO model The deterministic input noisy OR gate (DINO) model is another well-known model. In contrast to the DINA model, the DINO model is one of the compensatory models, which assumes that obtaining a correct response to an item necessitates mastery of at least one of the relevant attributes. The DINO model can be represented by slightly modifying the DINA model. The ideal response of the DINO model is given as $$\eta_{lj}=1-\prod_k(1-\alpha_{lk})^{q_{jk}}.$$ The ideal response takes the value of one when a respondent masters at least one of the relevant attributes with an item; otherwise, it is zero. ## saturated DCM The saturated DCM is a saturated formulation of DCMs, which contain as many parameters as possible under the given item-attribute relationship. This generalized models include the DINA and DINO models as the most parsimonious special cases, as well as many other sub-models that differ in their degree of generalization and parsimony. In the saturated DCM, model parameters are $\theta_{jh}$ for all $j$ and $h$ ($=1,\dots,H_j$), which represents the correct item response probability of the $h$-th item-specific attribute mastery pattern for the $j$-th item. ## MC-DINA model The MC-DINA (Multiple Choice DINA) model is an extension of the DINA model to capture nominal responses. In the MC-DINA model, each item has multiple response options. The model parameter is $\theta_{jcc^\prime}$, the probability that respondents who belong to the $c^\prime$-th attribute mastery pattern choose the $c$-th option of item $j$. ## HM-DCM HM-DCM was developed by combining the strengths of hidden Markov models and DCMs to extend DCMs to longitudinal data. We assume that the item response data is obtained across $T$ time points. The model parameter is $\theta_{jht}$, which represents the probability that a respondent with the $h$-th item-specific attribute mastery pattern correctly responds to the $j$-th item at time $t$. ```{r,eval=FALSE} Q = sim_Q_J30K3 sim_data = dina_data_gen(Q=Q,I=200) res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` # Installation The CRAN version of `variationalDCM` is installed using the following code: ```{r, eval=FALSE} install.packages("variationalDCM") ``` Alternatively, the latest development version on GitHub can be installed via the `devtools` package as follows: ```{r, eval=FALSE} if(!require(devtools)){ install.packages("devtools") } devtools::install_github("khijikata/variationalDCM") ``` # Example We illustrate some analyses based on artificial data. ## DINA model First, we fit the DINA model using `variationalDCM`. The analysis requires Q-matrix and item response data. `variationalDCM` provides three Q-matrices with different size, and we load one of those. Moreover, `variationalDCM` also provides data generation functions that generate artificial data based on pre-specified Q-matrix. We generate artificial data based on the loaded Q-matrix and DINA model, where we set the number of respondents to 200 as follows: ```{r,eval=FALSE} Q = sim_Q_J30K3 sim_data = dina_data_gen(Q=Q,I=200) ``` The analysis is performed using the `variationalDCM()` function. When we fit the DINA model, we specify the `model` argument as `"dina"`. After running the analysis, we use `summary()` function to summarize estimation results. With `summary()`, we can check estimated model parameters, scored attributed mastery patterns, resulting evidenced lower bound for evaluating goodness-of-fit, and estimation time. ```{r,eval=FALSE} res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` ## Saturated DCM Next, we illustrate the analysis of the saturated DCM. When we fit the saturated DCM, we specify `model="satu_dcm"` in `variationalDCM()`. The analysis is done as follows: ```{r,eval=FALSE} res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # References - Oka, M., & Okada, K. (2023). Scalable Bayesian Approach for the Dina Q-Matrix Estimation Combining Stochastic Optimization and Variational Inference. *Psychometrika*, 88, 302--331. <https://doi.org/10.1007/s11336-022-09884-4> - Yamaguchi, K., & Okada, K. (2020b). Variational Bayes Inference for the DINA Model. *Journal of Educational and Behavioral Statistics*, 45(5), 569--597. <https://doi.org/10.3102/1076998620911934> - Yamaguchi, K., Okada, K. (2020a). Variational Bayes Inference Algorithm for the Saturated Diagnostic Classification Model. *Psychometrika*, 85(4), 973--995. <https://doi.org/10.1007/s11336-020-09739-w> - Yamaguchi, K. (2020). Variational Bayesian inference for the multiple-choice DINA model. *Behaviormetrika*, 47(1), 159-187. <https://doi.org/10.1007/s41237-020-00104-w> - Yamaguchi, K., & Martinez, A. J. (2024). Variational Bayes inference for hidden Markov diagnostic classification models. *British Journal of Mathematical and Statistical Psychology*, 77(1), 55–79. <https://doi.org/10.1111/bmsp.12308>
/scratch/gouwar.j/cran-all/cranData/variationalDCM/inst/doc/my-vignette.Rmd
--- title: "variationalDCM vignette" author: "Keiichiro Hijikata" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{variationalDCM vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # Introduction Diagnostic classification models (DCMs) are a class of discrete latent variable models for classifying respondents into latent classes that typically represent distinct combinations of skills they possess. `variationalDCM` is an `R` package that performs recently-developed variational Bayesian inference for various DCMs. # DCMs DCMs have been employed for diagnostic assessment in various fields, such as education, psychology, psychiatry, and human development. Diagnostic assessment is an evaluation process that diagnoses the respondent's current status of knowledge, skills, abilities, and other characteristics in a particular domain. These underlying traits are collectively called *attributes* in the literature of DCMs. By identifying the mastery status of each respondent, the diagnostic results can help tailor instruction, intervention, and support to address the specific needs of the respondent. Researchers have developed a lot of sub-classes of DCMs, and we introduce five models the `variationalDCM` package supports. ## DINA model The deterministic input noisy AND gate (DINA) model is called a non-compensatory model, which assumes that respondents must have mastered all the required attributes associated with a particular item to respond correctly. The DINA model has two types of model parameters: slip $s_j$ and guessing $g_j$ for $j=1,\dots,J$. The ideal response of the DINA model is given as $$\eta_{lj} = \prod_k\alpha_{lk}^{q_{jk}}.$$ This ideal response equals one if the attribute mastery pattern $\boldsymbol{\alpha}_l$ of the $l$-th class satisfies $\alpha_{lk} \geq q_{jk}$ for all $k$ but zero otherwise. Using a class indicator vector $\mathbf{z}_i$, the ideal response for respondent $i$ is written as $$\eta_{ij} = \prod_l\eta_{lj}z_{il}.$$ Item response function of the DINA model can be written as $$P(X_{ij}=1|\eta_{ij},s_j,g_j)=(1-s_j)^{\eta_{ij}}g_j^{1-\eta_{ij}}.$$ ## DINO model The deterministic input noisy OR gate (DINO) model is another well-known model. In contrast to the DINA model, the DINO model is one of the compensatory models, which assumes that obtaining a correct response to an item necessitates mastery of at least one of the relevant attributes. The DINO model can be represented by slightly modifying the DINA model. The ideal response of the DINO model is given as $$\eta_{lj}=1-\prod_k(1-\alpha_{lk})^{q_{jk}}.$$ The ideal response takes the value of one when a respondent masters at least one of the relevant attributes with an item; otherwise, it is zero. ## saturated DCM The saturated DCM is a saturated formulation of DCMs, which contain as many parameters as possible under the given item-attribute relationship. This generalized models include the DINA and DINO models as the most parsimonious special cases, as well as many other sub-models that differ in their degree of generalization and parsimony. In the saturated DCM, model parameters are $\theta_{jh}$ for all $j$ and $h$ ($=1,\dots,H_j$), which represents the correct item response probability of the $h$-th item-specific attribute mastery pattern for the $j$-th item. ## MC-DINA model The MC-DINA (Multiple Choice DINA) model is an extension of the DINA model to capture nominal responses. In the MC-DINA model, each item has multiple response options. The model parameter is $\theta_{jcc^\prime}$, the probability that respondents who belong to the $c^\prime$-th attribute mastery pattern choose the $c$-th option of item $j$. ## HM-DCM HM-DCM was developed by combining the strengths of hidden Markov models and DCMs to extend DCMs to longitudinal data. We assume that the item response data is obtained across $T$ time points. The model parameter is $\theta_{jht}$, which represents the probability that a respondent with the $h$-th item-specific attribute mastery pattern correctly responds to the $j$-th item at time $t$. ```{r,eval=FALSE} Q = sim_Q_J30K3 sim_data = dina_data_gen(Q=Q,I=200) res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` # Installation The CRAN version of `variationalDCM` is installed using the following code: ```{r, eval=FALSE} install.packages("variationalDCM") ``` Alternatively, the latest development version on GitHub can be installed via the `devtools` package as follows: ```{r, eval=FALSE} if(!require(devtools)){ install.packages("devtools") } devtools::install_github("khijikata/variationalDCM") ``` # Example We illustrate some analyses based on artificial data. ## DINA model First, we fit the DINA model using `variationalDCM`. The analysis requires Q-matrix and item response data. `variationalDCM` provides three Q-matrices with different size, and we load one of those. Moreover, `variationalDCM` also provides data generation functions that generate artificial data based on pre-specified Q-matrix. We generate artificial data based on the loaded Q-matrix and DINA model, where we set the number of respondents to 200 as follows: ```{r,eval=FALSE} Q = sim_Q_J30K3 sim_data = dina_data_gen(Q=Q,I=200) ``` The analysis is performed using the `variationalDCM()` function. When we fit the DINA model, we specify the `model` argument as `"dina"`. After running the analysis, we use `summary()` function to summarize estimation results. With `summary()`, we can check estimated model parameters, scored attributed mastery patterns, resulting evidenced lower bound for evaluating goodness-of-fit, and estimation time. ```{r,eval=FALSE} res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` ## Saturated DCM Next, we illustrate the analysis of the saturated DCM. When we fit the saturated DCM, we specify `model="satu_dcm"` in `variationalDCM()`. The analysis is done as follows: ```{r,eval=FALSE} res = variationalDCM(X=sim_data$X, Q=Q, model="satu_dcm") summary(res) ``` ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # References - Oka, M., & Okada, K. (2023). Scalable Bayesian Approach for the Dina Q-Matrix Estimation Combining Stochastic Optimization and Variational Inference. *Psychometrika*, 88, 302--331. <https://doi.org/10.1007/s11336-022-09884-4> - Yamaguchi, K., & Okada, K. (2020b). Variational Bayes Inference for the DINA Model. *Journal of Educational and Behavioral Statistics*, 45(5), 569--597. <https://doi.org/10.3102/1076998620911934> - Yamaguchi, K., Okada, K. (2020a). Variational Bayes Inference Algorithm for the Saturated Diagnostic Classification Model. *Psychometrika*, 85(4), 973--995. <https://doi.org/10.1007/s11336-020-09739-w> - Yamaguchi, K. (2020). Variational Bayesian inference for the multiple-choice DINA model. *Behaviormetrika*, 47(1), 159-187. <https://doi.org/10.1007/s41237-020-00104-w> - Yamaguchi, K., & Martinez, A. J. (2024). Variational Bayes inference for hidden Markov diagnostic classification models. *British Journal of Mathematical and Statistical Psychology*, 77(1), 55–79. <https://doi.org/10.1111/bmsp.12308>
/scratch/gouwar.j/cran-all/cranData/variationalDCM/vignettes/my-vignette.Rmd
envelope <- function(vario, ...){ if (!class(vario)[1] %in% c("gstatVariogram","variogram")) stop( "The method 'envelope' must be applied to an object of class either 'gstatVariogram' or'variogram'") UseMethod("envelope",vario) } envelope.gstatVariogram <- function(vario, data, locations = coordinates(data), formula = NULL, cluster = FALSE, n.cluster = NULL, nsim = 999, conf.level = 0.95, save.sim = FALSE, ...) { dots <- list(...) variogramDefault <- vario if (cluster & is.null(n.cluster)) stop("The number of clusters is not specified.") if (!is.null(formula)){ if (inherits(formula, "formula")){ dataValues <- lm(formula, data = data)$residuals dots$object <- NULL }} simulation <- list() if (cluster){ simulation$ID <- as.factor(kmeans(locations, centers = n.cluster)$cluster) } else { simulation$ID <- rep(1, length(dataValues)) } dat <- data.frame(dataValues, locations, ID = simulation$ID) dat <- dat[order(dat$ID), ] simulation$data <- lapply(1:nsim, function(i) do.call(rbind, lapply(split(dat, dat$ID), function(x) x[sample(nrow(x)),]))) simulation$variogram <- cbind(variogramDefault$gamma, sapply(1:nsim, function(i){ data.temp <- data.frame(simulation$data[[i]][,c(2:3)], dat[,1]) names(data.temp) <- c("x","y","residuals") sp::coordinates(data.temp) = ~ x + y gstat::variogram(residuals~1, locations, data.temp, ...)$gamma })) simulation$upper <- apply(simulation$variogram, 1, quantile, probs = 1-(1-conf.level)/2) simulation$lower <- apply(simulation$variogram, 1, quantile, probs = (1-conf.level)/2) simulation$data.values <- dataValues simulation$variogram0 <- variogramDefault simulation$conf.level <- conf.level if (!save.sim){simulation$data <- NULL} return(simulation) } envelope.variogram <- function(vario, data, locations = data$coords, trend = NULL, cluster = FALSE, n.cluster = NULL, nsim = 999, conf.level = 0.95, save.sim = FALSE, ...) { if (requireNamespace("geoR", quietly = TRUE)) { dots <- list(...) dataValue <- data$data variogramDefault <- vario if (cluster & is.null(n.cluster)) stop("The number of clusters is not specified.") if (!is.null(trend)){ dataValues <- lm(dataValue ~ trend-1)$residuals } else { dataValues <- dataValue } simulation <- list() if (cluster){ simulation$ID <- as.factor(stats::kmeans(locations, centers = n.cluster)$cluster) } else { simulation$ID <- rep(1, length(dataValues)) } dat <- data.frame(dataValue, locations, ID = simulation$ID) dat <- dat[order(dat$ID), ] simulation$data <- lapply(1:nsim, function(i) do.call(rbind, lapply(split(dat, dat$ID), function(x) x[sample(nrow(x)),]))) simulation$variogram <- cbind(variogramDefault$v, sapply(1:nsim, function(i){ data.temp <- data.frame(simulation$data[[i]][,c(2:3)], dat[,1]) data.temp <- geoR::as.geodata(data.temp, coords.col = 1:2, data.col = 3) geoR::variog(data.temp, messages = FALSE, ...)$v })) simulation$upper <- apply(simulation$variogram, 1, quantile, probs = 1-(1-conf.level)/2) simulation$lower <- apply(simulation$variogram, 1, quantile, probs = (1-conf.level)/2) simulation$data.values <- dataValues simulation$variogram0 <- variogramDefault simulation$conf.level <- conf.level if (!save.sim){simulation$data <- NULL} return(simulation) } else { stop("The 'geoR' package is not installed, please install it first to use the envelope() function with a 'variogram' class.") } }
/scratch/gouwar.j/cran-all/cranData/variosig/R/envelope.R
envplot <- function(envlist, shade = TRUE, shade.color = "lightgrey", show.variance = FALSE, # out.point = list(pch = 20, col = "red"), in.point = list(pch = 20, col = "black"), xlim = NULL, ylim = NULL, main = NULL, xlab = "Distance", ylab = "Semivariance"){ if (!is.list(envlist)) stop( "The method 'envsig' must be applied to an object from the output of envelope()") if (is.null(envlist$variogram0)) stop( "The method 'envsig' must be applied to an object from the output of envelope()") check.arg <- function(x){ (is.null(x) | is.character(x)) } if (!check.arg(main)) stop("Argument 'main' must be a vector of type character") if (!check.arg(xlab)) stop("Argument 'xlab' must be a vector of type character") if (!check.arg(ylab)) stop("Argument 'ylab' must be a vector of type character") if (!is.logical(shade)) stop("Argument 'shade' must be a logical") switch(class(envlist$variogram0)[1], "gstatVariogram"={ if (is.null(ylim)){ ylim <- c(0, max(envlist$upper, envlist$variogram0$gamma)*1.1) } if (is.null(xlim)){ xlim <- c(0, max(envlist$variogram0$dist)) } plot(envlist$variogram0$dist, envlist$variogram0$gamma, type="n", xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main) if (shade){ polygon(c(envlist$variogram0$dist, rev(envlist$variogram0$dist)), c(envlist$lower,rev(envlist$upper)),col=shade.color,border=NA) } else { lines(envlist$variogram0$dist, envlist$upper, lty=2) lines(envlist$variogram0$dist, envlist$lower, lty=2) } idx <- c(which(envlist$variogram0$gamma < envlist$lower), which(envlist$variogram0$gamma > envlist$upper)) # Experimental # do.call(points, # c(list(x=envlist$variogram0$dist[-idx], y=envlist$variogram0$gamma[-idx]),in.point)) # do.call(points, # c(list(x=envlist$variogram0$dist[idx], y=envlist$variogram0$gamma[idx]), out.point)) points(envlist$variogram0$dist[-idx], envlist$variogram0$gamma[-idx],pch=20) points(envlist$variogram0$dist[idx], envlist$variogram0$gamma[idx], pch=20, col="red") if (show.variance){ abline(h=var(envlist$dataValues)) abline(h=var(envlist$dataValues) + qnorm(c(1-envlist$conf.level/2,envlist$conf.level/2))*sd(envlist$dataValues), col="blue") } outside <- length(idx) total <- length(envlist$variogram0$dist) }, "variogram"={ if (is.null(ylim)){ ylim <- c(0, max(envlist$upper, envlist$variogram0$v)*1.1) } if (is.null(xlim)){ xlim <- c(0, max(envlist$variogram0$u)) } plot(envlist$variogram0$u, envlist$variogram0$v, type = "n",xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main) if (shade){ polygon(c(envlist$variogram0$u, rev(envlist$variogram0$u)), c(envlist$lower,rev(envlist$upper)),col=shade.color,border=NA) } else { lines(envlist$variogram0$u, envlist$upper, lty=2) lines(envlist$variogram0$u, envlist$lower, lty=2) } idx <- c(which(envlist$variogram0$v < envlist$lower), which(envlist$variogram0$v > envlist$upper)) points(envlist$variogram0$u[-idx], envlist$variogram0$v[-idx],pch=20) points(envlist$variogram0$u[idx], envlist$variogram0$v[idx], pch=20, col="red") if (show.variance){ abline(h=var(envlist$dataValues)) abline(h=var(envlist$dataValues) + qnorm(c(1-envlist$conf.level/2,envlist$conf.level/2))*sd(envlist$dataValues), col="blue") } outside <- length(idx) total <- length(envlist$variogram0$u) }) print(paste0("There are ", outside ," out of ", total, " variogram estimates outside the ", envlist$conf.level*100, "% envelope.")) }
/scratch/gouwar.j/cran-all/cranData/variosig/R/envplot.R
envsig <- function(envlist, index = NULL, method = c("eb", "fisher", "min")){ if (!is.list(envlist)) stop( "The method 'envsig' must be applied to an object from the output of envelope()") if (is.null(envlist$variogram0)) stop( "The method 'envsig' must be applied to an object from the output of envelope()") switch(class(envlist$variogram0)[1], "gstatVariogram"={ dims <- dim(envlist$variogram) pvals <- sapply(1:dims[1], function(i){ # number of more extreme semivariances min(table(envlist$variogram0$gamma[i] < envlist$variogram[i,]))/dims[2] }) }, "variogram"={ dims <- dim(envlist$variogram) pvals <- sapply(1:dims[1], function(i){ min(table(envlist$variogram0$v[i] < envlist$variogram[i,]))/dims[2] }) }) if (is.null(index)){ index <- length(pvals) } # minimum p-value is 1/nsim pvals[pvals == 0] <- 1/dims[2] # p-value combination method rep <- ncol(envlist$variogram) - 1 Fxs <- matrix(NA, nrow = rep, ncol = index) for (i in 1:index){ Fxs[,i] <- ecdf(envlist$variogram[i,])(envlist$variogram[i,-1]) } del <- unique(unlist(sapply(1:index, function(x) which(Fxs[,x] == 1)))) covs <- matrix(NA, nrow = index, ncol = index) for (i in 1:(index-1)){ for (j in (i+1):(index)){ covs[i,j] <- stats::cov(-2 * log(1- Fxs[-del,i]), -2 * log(1- Fxs[-del,j])) } } pvals <- apply(envlist$variogram, 1, function(x){1 - mean(x[1] < x[-1])})[1:index] pvals[pvals == 0] <- 1/rep switch(method, "eb" = { sum_covs <- sum(covs[upper.tri(covs)]) E <- 2 * index Var <- 4 * index + 2*sum_covs f = 2*E^2 / Var c = Var/(2*E) p.overall <- pchisq(-2*sum(log(pvals))/c, df = f, lower.tail = F) }, "fisher" = { p.overall <- pchisq(-2*sum(log(pvals)), df = 2*length(pvals), lower.tail = F) }, "min" = { p.overall <- min(pvals) } ) return(list(p.pointwise = pvals, p.overall = p.overall)) }
/scratch/gouwar.j/cran-all/cranData/variosig/R/envsig.R
#' add.option #' #' @description #' Add option to nested list of options. Applied recursively #' #' @param name #' Option name. Nesting is indicated by character specified in nesting.character. #' @param value #' New value of option #' @param old.options #' Nested list the option should be added to #' @param nesting.character #' String giving Regex pattern of nesting indication string. Defaults to '\\.' #' #' @return Nested list with updated options #' add.option <- function(name, value, old.options, nesting.character = '\\.') { # split name of option into the immediate level and subsequent levels name.components <- stringr::str_split( name, pattern = nesting.character, n = 2 )[[ 1 ]]; option.level.name <- name.components[1]; new.options <- old.options; # if trying to add filters, need to use get.filter to expand default filters # treat this case separately and return if( 'filters' == name ) { new.options$filters <- get.filters(value); return(new.options); } if( 1 == length(name.components) ) { # if we're at a leaf - add to list new.options[[ option.level.name ]] <- value; } else { # not at a leaf... recursion! if( !(option.level.name %in% names(old.options) ) ) { error.message <- paste('Key', option.level.name, 'not found in list'); stop(error.message); } new.options[[ option.level.name ]] <- add.option( name.components[2], value, new.options[[ option.level.name ]] ); } return(new.options); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/add.option.R
#'. Make Venn diagram of variant caller overlap #' #' @param variants #' Data frame containing variants, typically from merge.variants function #' @param file.name #' Name of output file #' #' caller.overlap.venn.diagram <- function(variants, file.name) { ### INPUT TESTS ########################################################### if( !('caller' %in% names(variants) ) ) { stop('variant data frame must have a field callers'); } ### MAIN ################################################################## all.callers <- stringr::str_split(variants$caller, pattern = ':'); unique.callers <- unique( unlist(all.callers) ); if( length(unique.callers) > 5) { stop('Cannot make a Venn diagram for more than 5 unique callers'); } # create ID field to uniquely identify variants variants$id <- paste0( variants$sample.id, '-', variants$CHROM, ':', variants$POS, '-', variants$REF, '>', variants$ALT ); caller.results <- lapply( unique.callers, function(caller, variants) { return(variants$id[ grepl(caller, variants$caller) ]) }, variants = variants ); names(caller.results) <- capitalize.caller(unique.callers); # turn off log files if( requireNamespace('futile.logger', quietly = TRUE) ) { futile.logger::flog.threshold(futile.logger::ERROR, name = 'VennDiagramLogger'); } colour.scheme <- c( '#0039A6', '#FF6319', '#6CBE45', '#996633', '#A7A9AC', '#FCCC0A', '#B933AD', '#EE352E', '#808183', '#00933C' ); VennDiagram::venn.diagram( caller.results, filename = file.name, fill = colour.scheme[ 1:length(unique.callers) ], ext.text = FALSE, ext.percent = rep(0.01, 3), cat.pos = 0 ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/caller.overlap.venn.diagram.R
#' Convert output of iDES step 1 to variant call format #' #' @param filename Path to file #' @param output Logical indicating whether output should be saved to file. Defaults to true. #' @param output.suffix Suffix to be appended to input filename if saving results to file #' @param minreads Minimum numbers of reads #' @param mindepth Minimum depth #' #' @return potential.calls Data frame of converted iDES calls #' #' convert.ides.output <- function( filename, output = TRUE, output.suffix = '.calls.txt', minreads = 5, mindepth = 50 ) { ### INPUT TESTS ### MAIN results <- read.ides.file(filename); results <- results[results$Depth >= mindepth, ]; ref.calls <- results$Rplus + results$Rneg; # why is this Rplus when the others are Apos, Gpos, etc. ? # determine total number of reads supporting each base A <- results$Apos + results$Aneg; C <- results$Cpos + results$Cneg; T <- results$Tpos + results$Tneg; G <- results$Gpos + results$Gneg; reads.per.base <- data.frame(A, C, T, G); alt.base <- colnames(reads.per.base)[apply(reads.per.base, 1, which.max)]; alt.base.calls <- apply(reads.per.base, 1, max); # Note: this needs to match Annovar format, hence the duplicated position. # From Annovar documentation: # On each line, the first five space- or tab- delimited columns represent # chromosome, start position, end position, the reference nucleotides and the # observed nucleotides. all.data <- data.frame( 'Chr' = results$Chr, 'Start' = results$Pos, 'End' = results$Pos, 'Ref' = results$Ref, 'Alt' = alt.base, 'Depth' = results$Depth, ref.calls, alt.base.calls, reads.per.base ); # calculate fraction of reads supporting alternate allele all.data$AF <- all.data$alt.base.calls/all.data$Depth; # filter on minimum number of reads supporting alternate allele potential.calls <- all.data[all.data$alt.base.calls > minreads,]; # write output to file if requested if(TRUE == output) { utils::write.table( potential.calls, file = paste0(filename, output.suffix), sep = "\t", row.names = FALSE, quote = FALSE, col.names = FALSE ); } return(potential.calls); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/convert.ides.output.R
#' Filter variants in file. #' #' @description #' Filter variants from file, and save to output. Wrapper function that opens the #' variant file, calls filter.variants, and saves the result to file #' #' @param variant.file Path to variant file #' @param output.file Path to output file #' @inheritParams filter.variants #' #' @return None #' #' filter.variant.file <- function( variant.file, output.file, config.file = NULL, caller = c('vardict', 'ides', 'mutect', 'pgm', 'consensus') ) { caller <- match.arg(caller); ### INPUT TESTS ########################################################### if( !file.exists(variant.file) ) { error.message <- paste('File', variant.file, 'does not exist'); stop(error.message); } ### MAIN ################################################################## variants <- utils::read.table( variant.file, sep = '\t', header = TRUE ); filtered.variants <- filter.variants( variants, caller = caller, config.file = config.file ); utils::write.table( filtered.variants, output.file, sep = '\t', row.names = FALSE ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/filter.variant.file.R
#' Filter variant calls #' #' @description #' Filter data frame of variant calls based on thresholds specified in settings. #' #' @param variants Data frame of variant calls with ANNOVAR annotation, or path to variant file. #' @param caller Name of caller used (needed to match appropriate filters from settings) #' @param config.file Path to config file to be used. If not supplied, will use the pre-existing VariTAS options. #' @param verbose Logical indicating whether to output descriptions of filtering steps. Defaults to False, useful for debugging. #' #' @return filtered.variants Data frame of filtered variants #' #' filter.variants <- function( variants, caller = c('vardict', 'ides', 'mutect', 'pgm', 'consensus', 'isis', 'varscan', 'lofreq'), config.file = NULL, verbose = FALSE ) { ## TO DO: # - log file outputting how many variants were filtered? ### INPUT TESTS ########################################################### caller <- match.arg(caller); filters <- get.varitas.options(paste0('filters.', caller)); if( !in.varitas.options( paste0('filters.', caller) ) ) { error.message <- paste('No filters found in VariTAS settings for variant caller', caller); stop(error.message); } if( !is.data.frame(variants) ) { stop('variants must be a data frame'); } ### MAIN ################################################################## print(filters); # if a config file has been passed in, overwrite config options # this is useful if we call R from perl. if( !is.null(config.file) ) { config <- yaml::yaml.load_file(config.file); config$pkgname <- get.varitas.options('pkgname'); options(varitas = config); } # if American spelling in variant file header, change to British one # Do this before saving names to a variable – American spelling should be eradicated and we do not # want to change back! names(variants) <- stringr::str_replace( names(variants), pattern = 'TUMOR', replacement = 'TUMOUR' ); ## FILTERING old.names <- names(variants); # if .DEPTH instead of .DP, fix names(variants) <- gsub('.DEPTH', '.DP', names(variants)); # detect if paired analysis (i.e. normal provided in variants) paired <- FALSE; if( any(grepl('NORMAL', names(variants))) ) { paired <- TRUE; } filtered.variants <- variants; if(verbose) { cat('\nApplying filters to', nrow(variants), 'calls from', caller, '\n'); } # NOTES: # 1) Want to retain rows with NA in the column we are filtering on. There is no easy way to do this, so code it explicitly. #  2) We are dropping variants at each step => need to call mean.field.value repeatedly. If not for consensus option, we could just use subset function # reads suppporting variant if( 'min_tumour_variant_reads' %in% names(filters) ) { tumour.af <- mean.field.value(filtered.variants, field = 'TUMOUR.AF', caller = caller); tumour.dp <- mean.field.value(filtered.variants, field = 'TUMOUR.DP', caller = caller); passed.filter <- (tumour.dp*tumour.af >= filters$min_tumour_variant_reads) | is.na(tumour.dp*tumour.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied min_tumour_variant_reads filter, and removed', sum(!passed.filter), 'variants\n'); } } if( paired && 'max_normal_variant_reads' %in% names(filters) && caller != 'lofreq' ) { normal.af <- mean.field.value(filtered.variants, field = 'NORMAL.AF', caller = caller); normal.dp <- mean.field.value(filtered.variants, field = 'NORMAL.DP', caller = caller); passed.filter <- (normal.dp*normal.af <= filters$max_normal_variant_reads) | is.na(normal.dp*normal.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied max_normal_variant_reads filter, and removed', sum(!passed.filter), 'variants\n'); } } # depth if( 'min_tumour_depth' %in% names(filters) ) { tumour.dp <- mean.field.value(filtered.variants, field = 'TUMOUR.DP', caller = caller); passed.filter <- (tumour.dp >= filters$min_tumour_depth) | is.na(tumour.dp); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied min_tumour_depth filter, and removed', sum(!passed.filter), 'variants\n'); } } if( paired && 'min_normal_depth' %in% names(filters) ) { normal.dp <- mean.field.value(filtered.variants, field = 'NORMAL.DP', caller = caller); passed.filter <- (normal.dp >= filters$min_normal_depth) | is.na(normal.dp); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied min_normal_depth filter, and removed', sum(!passed.filter), 'variants\n'); } } # allele frequency if( 'min_tumour_allele_frequency' %in% names(filters) ) { tumour.af <- mean.field.value(filtered.variants, field = 'TUMOUR.AF', caller = caller); passed.filter <- (tumour.af >= filters$min_tumour_allele_frequency) | is.na(tumour.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied min_tumour_allele_frequency filter, and removed', sum(!passed.filter), 'variants\n'); } } if( paired && 'max_normal_allele_frequency' %in% names(filters) && caller != 'lofreq' ) { normal.af <- mean.field.value(filtered.variants, field = 'NORMAL.AF', caller = caller); passed.filter <- (normal.af <= filters$max_normal_allele_frequency) | is.na(normal.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied max_normal_allele_frequency filter, and removed', sum(!passed.filter), 'variants\n'); } } # higher threshold for indels if( 'indel_min_tumour_allele_frequency' %in% names(filters) ) { # not sure if data frame will always have this... recalculate mutation.type <- classify.variant(ref = filtered.variants$REF, alt = filtered.variants$ALT); tumour.af <- mean.field.value(filtered.variants, field = 'TUMOUR.AF', caller = caller); passed.filter <- ('indel' != mutation.type) | tumour.af >= filters$indel_min_tumour_allele_frequency | is.na(tumour.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied indel_min_tumour_allele_frequency filter, and removed', sum(!passed.filter), 'variants\n'); } } # quality if( 'min_quality' %in% names(filters) ) { qual <- mean.field.value(filtered.variants, field = 'QUAL', caller = caller); passed.filter <- (qual >= filters$min_quality) | is.na(qual); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied min_quality filter, and removed', sum(!passed.filter), 'variants\n'); } } # FFPE artefact filter if( 'ct_min_tumour_allele_frequency' %in% names(filters) ) { tumour.af <- mean.field.value(filtered.variants, field = 'TUMOUR.AF', caller = caller); base.substitutions <- get.base.substitution(ref = filtered.variants$REF, alt = filtered.variants$ALT); # replace NA with blank to avoid selection headaches base.substitutions[ is.na(base.substitutions) ] <- ''; passed.filter <- (base.substitutions != 'C>T') | (tumour.af >= filters$ct_min_tumour_allele_frequency) | is.na(tumour.af); filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied ct_min_tumour_allele_frequency filter, and removed', sum(!passed.filter), 'variants\n'); } } # appearance in 1000 genomes if( 'remove_1000_genomes' %in% names(filters) && filters$remove_1000_genomes ) { # try not to hardcode the 1000 genomes version, in case it updates if( sum(grepl('1000g', names(variants) )) > 1 ) { stop('More than one column matching 1000g found.'); } passed.filter <- '.' == filtered.variants[ , grepl('1000g', names(variants))]; filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied remove_1000_genomes filter, and removed', sum(!passed.filter), 'variants\n'); } } if( 'remove_exac' %in% names(filters) && filters$remove_exac ) { # try not to hardcode the 1000 genomes version, in case it updates if( sum(grepl('^ExAC', names(variants) )) > 1 ) { stop('More than one column matching ExAC found.'); } # keep anything at variant allele frequency below 0.01 passed.filter <- '.' == filtered.variants[ , grepl('^ExAC', names(variants))] | as.numeric(filtered.variants[ , grepl('^ExAC', names(variants))]) < 0.01; filtered.variants <- filtered.variants[passed.filter, ]; if(verbose) { cat('Applied remove_exac filter, and removed', sum(!passed.filter), 'variants\n'); } } if( 'remove_germline_status' %in% names(filters) && filters$remove_germline_status ) { status.columns <- grepl('STATUS$', names(filtered.variants)); if( 1 == sum(status.columns) ) { passed.filter <- is.na(filtered.variants[, status.columns]) | filtered.variants[, status.columns] != 'Germline'; filtered.variants <- filtered.variants[passed.filter, ]; } if(verbose) { cat('Applied remove_germline_status filter, and removed', sum(!passed.filter), 'variants\n'); } } # change back to original headers names(filtered.variants) <- old.names; return(filtered.variants); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/filter.variants.R
#' Fix variant call column names #' #' @description #' Fix headers of variant calls to prepare for merging. This mostly #' consists in making sure the column headers will be unique by prefixing #' the variant caller in question. #' #' @param column.names Character vector of column names #' @param variant.caller String giving name of variant caller #' @param sample.id Optional sample ID. Used to fix headers. #' #' #' @return new.column.names Vector of column names after fixing] #' fix.names <- function(column.names, variant.caller, sample.id = NULL) { ### INPUT TESTS ########################################################### if( !is.character(column.names) ) { stop('column.names must be a character vector'); } ### MAIN ################################################################## variant.caller <- toupper(variant.caller); replacement.key <- c( 'DP' = 'DEPTH', 'TUMOUR\\.match' = 'NORMAL', # TPU sample IDs '^[A-Z]\\d{6}' = 'TUMOUR', # MiniSeq IDs '^S\\d{1,2}\\.' = 'TUMOUR.', # isis variant caller IDs '.*\\.VF$' = 'TUMOUR.AF', '.*\\.GT$' = 'TUMOUR.GT', # Varscan labels 'Sample1' = 'TUMOUR', # isis calls allele frequency variant frequency # generic formatting 'NORMAL' = paste0(variant.caller, '.NORMAL'), 'TUMOUR' = paste0(variant.caller, '.TUMOUR'), 'TUMOR' = paste0(variant.caller, '.TUMOUR'), '.*((t|T)umour|(m|M)etastasis)(_\\d)?' = paste0(variant.caller, '.TUMOUR'), '.*(G|g)ermline(_\\d)?' = paste0(variant.caller, '.NORMAL'), 'QUAL' = paste0(variant.caller, '.QUAL'), 'STATUS' = paste0(variant.caller, '.STATUS') ); new.column.names <- column.names; if( !is.null(sample.id) ) { sample.id <- stringr::str_replace(sample.id, '-', '.') regex.sample.id <- paste0('^X?', gsub('\\.', '\\\\.', sample.id), '\\.'); new.column.names <- stringr::str_replace( new.column.names, pattern = regex.sample.id, replacement = 'TUMOUR.' ); } for( pattern in names(replacement.key) ) { new.column.names <- stringr::str_replace( new.column.names, pattern = pattern, replacement = replacement.key[ pattern ] ); } if( 'ISIS' == variant.caller ) { new.column.names <- stringr::str_replace( new.column.names, pattern = 'DEPTH', replacement = 'ISIS.TUMOUR.DEPTH' ); } return(new.column.names); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/fix.names.R
#' Process sample coverage per amplicon data #' #' @description #' Parse coverageBed output to get coverage by amplicon #' #' @references \url{http://bedtools.readthedocs.io/en/latest/content/tools/coverage.html} #' #' @inheritParams get.coverage.by.sample.statistics #' #' @return combined.data Data frame giving coverage per amplicon per sample. #' #' get.coverage.by.amplicon <- function(project.directory) { # This parses the bedtools coverage step (run without -hist option) # NOTE: might be able to use bedr for actual bedtools commands, but it's currently not working # # After each interval in A, bedtools coverage will report: # - The number of features in B that overlapped (by at least one base pair) the A interval. # - The number of bases in A that had non-zero coverage from features in B. # - The length of the entry in A. # - The fraction of bases in A that had non-zero coverage from features in B. # # These are the last four columns. Earlier columns will depend on the format of the panel BED file. coverage.paths <- system.ls(pattern = "*/*.sort.txt", directory = project.directory, error = TRUE); sample.ids <- extract.sample.ids(coverage.paths, from.filename = TRUE); combined.data <- data.frame(); for( i in 1:length(coverage.paths) ) { path <- coverage.paths[i]; sample.id <- sample.ids[i]; coverage.data <- utils::read.delim( path, sep = "\t", as.is = TRUE, header = FALSE ); reads.mapped.to.amplicon <- coverage.data[, ncol(coverage.data) - 3]; if(1 == i) { combined.data <- coverage.data[, 1:(ncol(coverage.data) - 4)]; names(combined.data)[1:3] <- c('chr', 'start', 'end'); } #? do we need input checks here? # checking that order matches will slow things down, and mismatch should not happen as long as coverage combined.data[, sample.id] <- reads.mapped.to.amplicon; } return(combined.data); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.coverage.by.amplicon.R
#' Get statistics about coverage per sample #' #' @param project.directory Path to project directory. Each sample should have its own subdirectory #'  #' @return coverage.by.sample.statistics Data frame with coverage statistics per sample #' #' get.coverage.by.sample.statistics <- function(project.directory) { ### INPUT TESTS ########################################################### if( !is.character(project.directory) || length(project.directory) > 1 ) { stop('project.directory must be a string.'); } if( !file.exists(project.directory) ) { error.message <- paste('Directory', project.directory, 'does not exist'); stop(error.message); } ### MAIN ################################################################## # gather data from individual files total.coverage.statistics <- process.total.coverage.statistics(project.directory); coverage.report.data <- process.coverage.reports(project.directory); # merge – this has to be done pairwise # does the sample order have to match Ros' ? coverage.by.sample.statistics <- merge( total.coverage.statistics, coverage.report.data, by = "sample.id", all = TRUE ); return(coverage.by.sample.statistics); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.coverage.by.sample.statistics.R
#' get.filters #' #' @description #' Determine filters per caller, given default and caller-specific values. #' #' @param filters #' List of filter values. These will be updated to use default as the baseline, #' with caller-specific filters taking precedence if supplied. #' #' @return #' A list with updated filters #' #' get.filters <- function(filters) { ### INPUT TESTS ########################################################### if( !is.list(filters) ) { stop('filters must be a list'); } ### MAIN ################################################################## # hardcode a minimal set of callers for now... # I suspect there are better ways of doing this callers <- union( c('mutect', 'pgm', 'vardict', 'isis', 'consensus'), names(filters)[ 'default' != names(filters) ] ); # defaults that will be updated by user-specified filters baseline.filters <- list(); if( 'default' %in% names(filters) ) { default.filters <- filters$default; # set baseline equal to default for each caller baseline.filters <- lapply( callers, function(caller, default.filters) return(default.filters), default.filters = default.filters ); names(baseline.filters) <- callers; } new.filters <- utils::modifyList(baseline.filters, filters); return(new.filters); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.filters.R
#' get.gene #' #' @description #' Use guesswork to extract gene from data frame of targeted panel data. The panel #' designer output can change, so try to guess what the format is. #' #' @param bed.data Data frame containing data from bed file #' #' @return vector of gene names, one entry for each row of \code{bed.data} #' #' get.gene <- function(bed.data) { ### INPUT TESTS ########################################################### if( !is.data.frame(bed.data) ) { stop('bed.data must be a data frame'); } ### MAIN ################################################################## # figure out which column contains gene gene.column <- 5; if( any( grepl('GENE_ID', bed.data[, 6] )) ) { gene.column <- 6; } # this needs to be more sophisticated if( all( grepl('GENE_ID', bed.data[, gene.column]) ) ) { genes <- gsub('(.*)GENE_ID=(.+?)(_|;|$)(.*)', '\\2', bed.data[, gene.column]); } else { genes <- sapply( strsplit(bed.data[, gene.column], split = ' '), function(x) x[1] ); } return(genes); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.gene.R
#' get.miniseq.sample.files #' #' @description #' Get files for a sample in a directory, ensuring there's only a single match per sample ID. #' #' @param sample.ids Vector of sample ids. Should form first part of file name #' @param directory Directory where files can be found #' @param file.suffix Regex expression for end of file name. For example, `file.suffix = '_S\\d{1,2}_.*_R1_.*'` will match R1 files.1 files. #' #' @return Character vector of file paths #' get.miniseq.sample.files <- function(sample.ids, directory, file.suffix = '_S\\d{1,2}_.*') { files <- sapply( sample.ids, function(sample.id, directory, file.suffix) { # MiniSeq sometimes converts underscores to dashes # check for either sample.id.regex <- paste0('(', sample.id, '|', gsub('_', '-', sample.id), ')'); sample.files <- list.files( pattern = paste0('^', sample.id.regex, file.suffix), path = directory, full.names = TRUE ); # be strict –  if( length(sample.files) > 1) { error.message <- paste( 'More than one file found in', directory, 'for sample', sample.id, '\n', paste(sample.files, collapse = ' ') ); stop(error.message); } if( 0 == length(sample.files) ) { error.message <- paste('No file found in', directory, 'for sample', sample.id); stop(error.message); } return(sample.files); }, directory = directory, file.suffix = file.suffix ); return(files); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.miniseq.sample.files.R
#' Helper function to recursively get an VariTAS option #' #' @param name Option name #' @param varitas.options Optional list of options to search in #' @param nesting.character String giving Regex pattern of nesting indication string. Defaults to '\\.' #' #' #' @return value Requested option get.option <- function(name, varitas.options = NULL, nesting.character = '\\.') { # if varitas.options not specified, get from settings if( is.null(varitas.options) ) { varitas.options <- get.varitas.options(); } # split name of option into the immediate level and subsequent levels name.components <- stringr::str_split( name, pattern = nesting.character, n = 2 )[[ 1 ]]; option.level.name <- name.components[1]; if( 1 == length(name.components) ) { # we've reached our destination – get requested option value <- varitas.options[[ option.level.name ]]; } else { if( !(option.level.name %in% names(varitas.options)) ) { stop('Key not found in list.'); } value <- get.option( name.components[2], varitas.options = varitas.options[[ option.level.name ]] ); } return(value); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.option.R
#' Return VariTAS settings #' #' @param option.name Optional name of option. If no name is supplied, the full list of VariTAS options will be provided. #' @param nesting.character String giving Regex pattern of nesting indication string. Defaults to '\\.' #' #' @return varitas.options list specifying VariTAS options #' #' @examples #' reference.build <- get.varitas.options('reference_build'); #' mutect.filters <- get.varitas.options('filters.mutect'); #' #' @export get.varitas.options <- function(option.name = NULL, nesting.character = '\\.') { if( is.null(option.name) ) { varitas.options <- getOption('varitas'); } else { varitas.options <- get.option( option.name, nesting.character = nesting.character ); } return( varitas.options ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/get.varitas.options.R
#' tabular.median #' #' @description #' Calculate the median of data in tabular format #' #' @param values #' Vector of values #' @param frequencies #' Frequency corresponding to each value #' #' @param ... #' Additional parameters passed to \code{sum} #' #' @return calculated median #' tabular.median <- function(values, frequencies, ...) { # TO DO: # - make this more robust to huge datasets # - input tests expanded.values <- rep(values, frequencies); median.value <- stats::median(expanded.values, ...); return( median.value ); } #' tabular.mean #' #' @description #' Calculate the mean of data in tabular format #' #' @param values #' vector of values #' @param frequencies #' frequency corresponding to each value #' @param ... #' Additional parameters passed to \code{sum} #' #' @return calculated mean #' tabular.mean <- function(values, frequencies, ...) { # TO DO: # - check this is actually robust to ... parameters # - figure out why Ros had as.numeric here mean.value <- sum(values*frequencies, ...)/sum(frequencies, ...); return(mean.value); } #' Extract sample IDs from file paths #' #' @description #' Extract sample IDs from a set of paths to files in sample-specific subfolders #' #' @param paths vector of file paths #' @param from.filename Logical indicating whether sample ID should be extracted from filename rather than path #' #' @return vector of extracted sample IDs extract.sample.ids <- function(paths, from.filename = FALSE) { # get all sample IDs # logic: coverage reports are in subdirectories named for each sample. # get second last component in file path split by / split.paths <- strsplit(paths, '/'); if(from.filename) { filenames <- sapply( split.paths, function(components) { return( components[ length(components) ] ); }); filename.components <- stringr::str_split( filenames, pattern = '\\.', n = 2 ); sample.ids <- sapply( filename.components, function(components) return( components[1] ) ); } else { sample.ids <- sapply( split.paths, function(components) { return( components[ length(components) - 1 ] ); }); } return(sample.ids); } #' Run ls command #' #' @description #' Runs ls command on system. This is a workaround since list.files can not match patterns based on subdirectory structure. #' #' @param pattern pattern to match files #' @param directory base directory command should be run from #' @param error logical indicating whether to throw an error if no matching founds found. Defaults to False. #' #' @return paths returned by ls command system.ls <- function(pattern = "", directory = "", error = FALSE) { ### INPUT TESTS ### MAIN # if a directory has been passed, make sure it ends with "/" # no need to check if it already does, as "//" will not do any harm if("" != directory) { directory <- paste0(directory, "/"); } system.command <- paste0("ls ", directory, pattern); paths <- try(system(system.command, intern = TRUE), silent = TRUE); if(error && 0 == length(paths)) { error.message <- paste("Did not find any files matching", pattern); if("" != directory) error.message <- paste(error.message, "in", directory); stop(error.message); } return(paths); } #' Get pool corresponding to each amplicon #' #' @description #' The bed files are not consistent, so it's not clear where the pool will appear. #' This function parses through the columns to identify where the pool #' #' @param panel.data data frame pool should be extracted from #' #' @return pools vector of pool information get.pool.from.panel.data <- function(panel.data) { # first three columns are chr, start, end (per bed file requirements) # => parse through remaining columns from right to left to see if they contain pool information pool.column <- NULL; for(i in seq( ncol(panel.data), 4 )) { contains.pool <- grepl("pool", panel.data[, i], ignore.case = TRUE); if( all(contains.pool) ) { pool.column <- i; break; } } if( is.null(pool.column) ) { warning("Unable to find pool information"); return(NULL); } pools <- stringr::str_extract( panel.data[, pool.column], "(P|p)ool(\\s)?=(\\s)?(.*)(;)?" ); return(pools); } #' Summarise panel coverage by gene #' #' @param panel.file path to panel #' @param gene.col index of column containing gene name #' #' @return panel.coverage.by.gene data frame giving the number of amplicons and their total length by gene get.panel.coverage.by.gene <- function(panel.file, gene.col = 5) { # TO DO: # - warn if content of gene.col does not look like a gene # - standardize whether functions take data frames or file paths panel.data <- utils::read.delim( panel.file, sep = "\t", header = FALSE ); panel.data$length <- panel.data[, 3] - panel.data[, 2]; # get gene without exon specification # column gives gene exX if specified at exon level -> keep anything before first space panel.data$gene <- stringr::str_extract( panel.data[, gene.col], "[^\\s]+" ); panel.coverage.by.gene <- panel.data %>% dplyr::group_by(gene) %>% dplyr::summarise( n.amplicons = n(), total.length = sum(length) ); return( data.frame(panel.coverage.by.gene) ); } #' Generate a colour scheme #' #' @param n Number of colours desired #' #' @return Colour.scheme generated colours get.colours <- function(n) { full.colour.scheme <- c( '#1f78b4', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#33a02c', # second half of pairs '#a6cee3', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99', '#b2df8a' ); colour.scheme <- full.colour.scheme[1:n]; return( colour.scheme ); } #' Make string with command line call from its individual components #' #' @param main.command String or vector of strings giving main part of command (e.g. "python test.py" or c("python", "test.py")) #' @param options Named vector or list giving options #' @param flags Vector giving flags to include. #' @param option.prefix String to preface all options. Defaults to "--" #' @param option.separator String to separate options form their values. Defaults to a single space. #' @param flag.prefix String to preface all flags. Defaults to "--" #' #' @return command string giving command line call #' #' make.command.line.call <- function( main.command, options = NULL, flags = NULL, option.prefix = "--", option.separator = " ", flag.prefix = "--" ) { ### INPUT TESTS if( !is.character(main.command) ) { stop("Argument main.command must be a string or character vector."); } ### MAIN # TO DO: # - add support for flags # - add tests # - add input tests # Remove any elements given as NULL or empty vectors. # Adding this removal step was easier than making sure the function would never be passed any empty options (for job dependencies) options <- options[!sapply(options, is.null) & sapply(options, length) > 0]; # if options or main command have been passed as vector, parse into string main.command <- paste(main.command, collapse = " "); # merge options into a single string combined.options <- ''; if( !is.null(options) ) { options <- lapply(options, paste, collapse = " "); option.strings <- paste0( option.prefix, names(options), option.separator, options ); combined.options <- paste(option.strings, collapse = " "); } # merge flags into a single string combined.flags <- ''; if( !is.null(flags) ) { flag.strings <- paste0( flag.prefix, flags ); combined.flags <- paste(flag.strings, collapse = " "); } # combine main command and options # if no options have been passed in, will the extra space cause problems? command <- paste(main.command, combined.options, combined.flags); return(command); } #' Parse job dependencies #' #' @description #' Parse job dependencies to make the functions more robust to alternate inputs (e.g. people writing alignment instead of bwa) #' #' @param dependencies Job dependency strings to be parsed. #' #' @return parsed.dependencies Vector of job dependencies after reformatting. parse.job.dependencies <- function(dependencies) { ### INPUT TESTS if( !is.character(dependencies) ) { stop('The argument dependencies must be a string or vector of strings.'); } ### MAIN # convert to lower case parsed.dependencies <- tolower(dependencies); parsed.dependencies[parsed.dependencies %in% c('bwa', 'alignment', 'bwa mem', 'bwamem', 'align')] <- 'bwa'; return(parsed.dependencies); } #' save.config #' #' @description #' Save current varitas config options to a temporary file, and return filename. #' #' @param output.file Path to output file. If NULL (default), the config file will be saved as a temporary file. #' #' @return Path to config file #' save.config <- function(output.file = NULL) { config <- getOption('varitas'); if( is.null(output.file) ) { config.file <- tempfile(fileext = '.yaml'); } else { config.file <- output.file; } # Note: writeLines does not support indentation # - use cat and capture.output instead utils::capture.output( cat(yaml::as.yaml(config, indent.mapping.sequence = TRUE)), file = config.file ); return(config.file); } #' get.file.path #' #' @description #' Get absolute path to sample-specific file for one or more samples #' #' @param sample.ids #' Vector of sample IDs to match filename on #' @param directory #' Path to directory containing files #' @param extension #' String giving extension of file #' @param allow.multiple #' Boolean indicating whether to allow multiple matching files. #' Defaults to false, which throws an error if the query matches more than one file. #' @param allow.none #' Boolean indicating whether to allow no matching files. #' Defaults to false, which throws an error if the query does not match any files. #' #' @return Paths to matched files #' get.file.path <- function( sample.ids, directory, extension = NULL, allow.multiple = FALSE, allow.none = FALSE ) { ### INPUT TESTS ########################################################### # avoid problems with identifying which sample is from if(allow.multiple && length(sample.ids) > 1) { stop('Cannot match multiple filepaths for more than one sample.'); } ### MAIN ################################################################## paths <- c(); for(sample.id in sample.ids) { if( !is.null(extension) ) { extension.pattern <- paste0('.*\\.', extension, '$'); pattern <- paste0(sample.id, extension.pattern); } else { pattern <- sample.id; } filename <- list.files( pattern = pattern, path = directory ); # error checks if( 0 == length(filename)) { if(allow.none) { return(NULL); } else { error.message <- paste('No file found for', sample.id, 'in directory', directory); stop(error.message); } } if( !allow.multiple && length(filename) > 1) { error.message <- paste( 'Found more than one file for', sample.id, 'in directory', directory, '\n\n', paste(filename, collapse = '\n') ); stop(error.message); } paths <- c(paths, file.path(directory, filename)); } return(paths); } #' create.directories #' #' @description #' Create directories in a given path #' #' @param directory.names #' Vector of names of directories to be created #' @param path #' Path where directories should be created #' create.directories <- function(directory.names, path) { ### INPUT TESTS if( !is.character(directory.names) ) { stop('directory.names must be a character vector.'); } if( !is.character(path) && 1 == length(path) ) { stop('path must be a single string.'); } ### MAIN # loop over directories to create, and create them for( directory.name in directory.names ) { directory.path <- file.path(path, directory.name); if( !dir.exists(directory.path) ) { dir.create(directory.path); } } } #' get.buildver #' #' @description #' Get build version (hg19/hg38) based on settings. #' #' @description #' Parses VariTAS pipeline settings to get the build version. When this function was first developed, the idea was to #' be able to explicitly set ANNOVAR filenames based on the build version. #' #' @return String giving reference genome build version (hg19 or hg38) #' get.buildver <- function() { varitas.options <- get.varitas.options(); reference.build <- tolower( varitas.options$reference_build ); if( 'grch37' == reference.build ) { buildver <- 'hg19'; } else if( 'grch38' == reference.build ) { buildver <- 'hg38'; } else { # unrecognized reference build, throw an error error.message <- paste(reference.build, 'is not a recognized reference build. Please update the VariTAS settings'); stop(error.message); } return(buildver); } #' classify.variant #' #' @description #' Classify a variant as SNV, MNV, or indel based on the reference and alternative alleles #' #' @param ref #' Vector of reference bases #' @param alt #' Vector of alternate bases #' #' @return Character vector giving type of variant. #' classify.variant <- function(ref, alt) { ### INPUT TESTS ########################################################### if( length(ref) != length(alt) ) { stop('ref and alt vectors must be the same length'); } ### MAIN ################################################################## is.indel <- nchar(ref) != nchar(alt); is.mnv <- (nchar(ref) == nchar(alt)) & (nchar(ref) > 1); is.snv <- (1 == nchar(ref)) & (1 == nchar(alt)); # quality control checks if( any(is.snv & is.indel) | any(is.mnv & is.indel) | any(is.snv & is.mnv) ) { stop('At least one variant has been classified in two or more categories.'); } variant.types <- rep(NA, length(ref)); variant.types[is.indel] <- 'indel'; variant.types[is.snv] <- 'SNV'; variant.types[is.mnv] <- 'MNV'; return(variant.types); } #' mean.field.value #' #' @description #' Get mean value of a variant annotation field #' #' #' @details #' As part of the variant merging process, annotated variant data frames are merged into #' one, with the value from each caller prefixed by CALLER. For example, the VarDict normal allele #' freqeuncy will have header VARDICT.NORMAL.AF. This function takes the average of all callers' value #' for a given field, removing NA's. If only a single caller is present in the data frame, that value is returned. #' #' @param variants Data frame with variants #' @param field String giving field of interest. #' @param caller String giving caller to calculate values from #' #' #' #' @return Vector of mean values. mean.field.value <- function( variants, field = c('TUMOUR.DP', 'NORMAL.DP', 'NORMAL.AF', 'TUMOUR.AF', 'QUAL'), caller = c('consensus', 'vardict', 'pgm', 'mutect', 'isis', 'varscan', 'lofreq') ) { # TO DO: # - add support for taking average of (say) two out of four variants? field <- match.arg(field); caller <- match.arg(caller); ### INPUT TESTS ########################################################### if( 'QUAL' == field && sum( grepl('QUAL$', names(variants)) ) > 1 && 'consensus' == caller ) { warning('Taking the average of several QUAL field. Probably not a good idea as they tend to be wildly different'); } ### MAIN ################################################################## if( 'consensus' == caller ) { # add end of line suffix $ to minimize risk of errors values <- rowMeans( variants[ , grepl(paste0(field, '$'), names(variants))], na.rm = TRUE ); # if no non-NA values for a field, NaN is returned. Change back to NA to avoid confusion values[ is.nan(values) ] <- NA; } else { caller.prefix <- ''; if( any( grepl(toupper(caller), names(variants)) ) ) { caller.prefix <- paste0(toupper(caller), '.'); } values <- variants[, paste0(caller.prefix, field)]; } return(values); } #' Get base substitution #' #' @description #' Get base substitution represented by pyrimidine in base pair. #' If more than one base in REF/ALT (i.e. MNV or indel rather than SNV), NA will be returned #' #' @param ref Vector of reference bases #' @param alt Vector of alternate bases #' #' @return base.substitutions get.base.substitution <- function(ref, alt) { variant.key <- c( 'A>C' = 'T>G', 'A>G' = 'T>C', 'A>T' = 'T>A', 'C>A' = 'C>A', 'C>G' = 'C>G', 'C>T' = 'C>T', 'T>G' = 'T>G', 'T>C' = 'T>C', 'T>A' = 'T>A', 'G>T' = 'C>A', 'G>C' = 'C>G', 'G>A' = 'C>T' ); base.substitutions <- variant.key[ paste(ref, alt, sep = '>') ]; return(base.substitutions); } #' build.variant.specification #' #' @description #' Build data frame with paths to variant files. #' #' @details #' Parses through sample IDs in a project directory and returns paths to variant files based on #' (theoretical) file name patterns. Useful for testing, or for entering the pipeline at non-traditional stages. #' #' @param sample.ids Vector of sample IDs. Must match subdirectories in project.directory. #' @param project.directory Path to directory where sample subdirectories #' #' @return Data frame with paths to variant files. #' #' build.variant.specification <- function(sample.ids, project.directory) { # TO DO: # - support for grch38 ### INPUT TESTS ########################################################### if( !dir.exists(project.directory) ) { error.message <- paste('Directory', project.directory, 'does not exist'); stop(error.message); } ### MAIN ################################################################# caller.file.suffixes <- c( 'vardict' = '.passed.ontarget.vcf.annovar.hg19_multianno.vcf.txt', 'mutect' = '.passed.ontarget.vcf.annovar.hg19_multianno.vcf.txt', 'pgm' = '.snvs_and_cnvs.vcf.annovar.hg19_multianno.vcf.txt' ); # store paths to variant files we come across along the way variant.specification <- list(); for(sample.id in sample.ids) { sample.directory <- file.path(project.directory, sample.id); if( !dir.exists(sample.directory) ) { error.message <- paste('No directory found for sample', sample.id); stop(error.message); } # loop over variant callers to see if file exists for(variant.caller in names(caller.file.suffixes) ) { variant.file <- file.path( sample.directory, variant.caller, paste0(sample.id, caller.file.suffixes[ variant.caller ]) ); if( file.exists(variant.file) ) { variant.specification[[ paste0(sample.id, variant.caller) ]] <- data.frame( sample.id = sample.id, variant.file = variant.file, caller = variant.caller ); } } # end of caller loop } # end of sample loop # make data frame and get rid of uninformative row names variant.specification <- do.call(rbind, variant.specification); rownames(variant.specification) <- NULL; return(variant.specification); } #' capitalize.caller #' #' @description #' Capitalize variant caller name #' #' @param caller #' Character vector of callers to be capitalized #' #' @return Vector of same length as caller where eligible callers have been capitalized #' #' capitalize.caller <- function(caller) { capitalization.key <- c( 'mutect' = 'MuTect', 'vardict' = 'VarDict', 'pgm' = 'PGM', 'isis' = 'Isis' ); capitalized.caller <- capitalization.key[ caller ]; capitalized.caller[ is.na(capitalized.caller) ] <- caller[is.na(capitalized.caller)] return(capitalized.caller); } #' @rdname capitalize.caller #' #' capitalise.caller <- capitalize.caller; #' split.on.column #' #' @description #' Split data frame on a concatenated column. #' #' @param dat Data frame to be processed #' @param column Name of column to split on #' @param split.character Pattern giving character to split column on #' #' @return Data frame after splitting on column split.on.column <- function(dat, column, split.character) { ### INPUT TESTS ########################################################### if( !is.character(column) ) { stop('column must be a character corresponding to the column header'); } if( length(column) > 1) { stop('column cannot be a vector.'); } if( !(column %in% names(dat)) ) { error.message <- paste(column, 'not found in data frame'); stop(error.message); } ### MAIN ################################################################## column.components <- stringr::str_split( dat[, column], pattern = split.character ); # variable for processed output new.dat <- list(); for(i in 1:nrow(dat) ) { components <- column.components[[i]]; for( j in 1:length(components) ) { data.row <- dat[i, names(dat) != column]; data.row[, column] <- components[j]; # add to list with a unique identifier to prevent overwriting previous entries new.dat[[ paste(i, j, sep = '-')]] <- data.row; } } # convert to data frame new.dat <- do.call(rbind, new.dat); # remove nonsensical row names rownames(new.dat) <- NULL; # reorder columns new.dat <- new.dat[, names(dat)]; # sanity check if( length(unlist(column.components)) != nrow(new.dat) ) { stop('Unexpected number of rows in processed data frame'); } return(new.dat); } #' get.fasta.chromosomes #' #' @description #' Extract chromosomes from fasta headers. #' #' @param fasta Path to reference fasta #' #' @return Vector containing all chromosomes in fasta file. #' #' get.fasta.chromosomes <- function(fasta) { # grep for headers header.file <- tempfile(); header.grep.command <- paste( "grep '>'", fasta ); headers <- system(header.grep.command, intern = TRUE); chromosomes <- stringr::str_extract( headers, pattern = '(?<=>)[^\\s]+' # match everything between > and first whitespace ); return(chromosomes); } #' get.vcf.chromosomes #' #' @description #' Extract chromosomes from a VCF file. #' #' @param vcf Path to VCF file #' #' @return Vector containing all chromosomes in VCF #' #' get.vcf.chromosomes <- function(vcf) { chromosome.command <- paste( "grep -v '^#'", vcf, "| awk -F '\t' '{print $1}' | uniq" ); chromosomes <- system(chromosome.command, intern = TRUE); return(chromosomes); } #' get.bed.chromosomes #' #' @description #' Extract chromosomes from bed file #' #' @param bed Path to BED file #' #' @return Vector containing all chromosomes in BED file #' #' get.bed.chromosomes <- function(bed) { # TO DO: allow for header # NOTE: # - bed file does not have to be sorted for our applications, sort here to get unique values chromosome.command <- paste( "awk -F '\t' '{print $1}'", bed, "| sort | uniq" ); chromosomes <- system(chromosome.command, intern = TRUE); # remove anything starting with "track name" – header chromosomes <- chromosomes[ !grepl('^track', chromosomes) ]; return(chromosomes); } #' logical.to.character #' #' @description #' Convert a logical vector to a T/F coded character vector. Useful for preventing unwanted T->TRUE nucleotide conversions #' #' @param x Vector to be converted #' #' @return Character vector after converting TRUE/FALSE #' #' #' logical.to.character <- function(x) { character.x <- as.character(x); # only convert if vector was logical to start with # people could have strings 'TRUE' and 'FALSE' and want to keep them (ugh) if( is.logical(x) ) { character.x[ 'TRUE' == character.x ] <- 'T'; character.x[ 'FALSE' == character.x ] <- 'F'; } return(character.x); } #' date.stamp.file.name #' #' @description #' Prefix file name with a date-stamp. #' #' @param file.name File name to be date-stamped #' @param date Date to be added. Defaults to current date. #' @param separator String that should separate the date from the file name. Defaults to a single underscore. #' #' @return String giving the datestamped file name #' #' @examples #' date.stamp.file.name('plot.png'); #' date.stamp.file.name('yesterdays_plot.png', date = Sys.Date() - 1); #' #' @aliases datestamp.file.name datestamp.filename #' #' @export date.stamp.file.name <- datestamp.file.name <- datestamp.filename <- function( file.name, date = Sys.Date(), separator = '_' ) { ### INPUT TESTS ########################################################### if( !is.character(file.name) ) stop('file.name must be a string'); if( grepl('/', file.name) ) stop('Detected forward slash in file.name. Unable to datestamp directories.'); if(grepl('\\s', file.name) ) warning('Your file name contains whitespace - are you sure you want to use it?'); ### MAIN ################################################################## datestamped.file.name <- paste( date, file.name, sep = separator ); return(datestamped.file.name); } #' read.yaml #' #' @description Read a yaml file #' #' @param file.name Path to yaml file #' #' @return list containing contents of yaml file #' #' @examples #' read.yaml(file.path(path.package('varitas'), 'config.yaml')) #' #' @export #' read.yaml <- function(file.name) { # use calling handlers to avoid warning if last line isn't blank # function is perfectly able to handle it, but throws a warning contents <- withCallingHandlers( yaml::yaml.load_file(file.name), warning = function(w) { if( grepl('incomplete final line', w$message) ) invokeRestart('muffleWarning'); } ); return(contents); } #' alternate.gene.sort #' #' @description #' Given a data frame containing coverage statistics and gene information, returns that frame #' with the rows sorted by alternating gene size (for plotting) #' #' @details #' Genes have varying numbers of associated amplicons and when plotting coverage statistics, #' if two genes with very low numbers of amplicons are next to each other, the labels will overlap. #' This function sorts the coverage statistics data frame in a way that places the genes #' with the most amplicons (largest) next to those with the least (smallest). #' #' @param coverage.statistics Data frame of coverage statistics #' #' @return Coverage statistics data frame sorted by alternating gene size #' #' #' alternate.gene.sort <- function(coverage.statistics) { genes <- get.gene(coverage.statistics); gene.start <- vapply( unique(genes), function(x, genes) match(x, genes), genes = genes, FUN.VALUE = 0 ); gene.end <- c(gene.start[-1], nrow(coverage.statistics) + 1); lengths <- c() for (i in 1:length(gene.start)) { lengths <- c(lengths, gene.end[i] - gene.start[i]) } combined <- data.frame(gene=unique(genes), length=lengths, stringsAsFactors = FALSE) length.order <- order(lengths, decreasing = TRUE) combined <- combined[ length.order, ] total.genes <- nrow(combined) alternate <- data.frame() alternate <- rbind(alternate, combined[total.genes, ]) big <- TRUE big.iter <- 1 small.iter <- total.genes - 1 for (i in 1:(total.genes-1)) { if (big) { alternate <- rbind(alternate, combined[big.iter, ]) big.iter <- big.iter + 1 big <- FALSE } else { alternate <- rbind(alternate, combined[small.iter, ]) small.iter <- small.iter - 1 big <- TRUE } } new.frame <- data.frame(stringsAsFactors = FALSE) old.frame <- coverage.statistics for (i in 1:total.genes) { sort.gene <- alternate[i,1] for (j in 1:nrow(old.frame)) { test.gene <- genes[j] if (sort.gene == test.gene){ new.frame <- rbind(new.frame, old.frame[j, ]) } } } return(new.frame) } #' fix.varscan.af #' #' @description #' VarScan does not output allele frequencies, so this script calculates them from the #' DP (depth) and AD (variant allele depth) values and adds them to the annotated vcf. #' #' @param variant.specification Data frame of variant file information #' #' #' fix.varscan.af <- function(variant.specification) { for (i in 1:nrow(variant.specification)) { if (variant.specification[i, "caller"] != "varscan") { next } variant.file <- variant.specification[i, "variant.file"] output.dir <- dirname(variant.file) sample.id <- variant.specification[i, "sample.id"] vcf.file <- file.path(output.dir, paste0(sample.id, '.passed.ontarget.vcf')) vcf.somatic.line <- readLines(vcf.file)[4] if (grepl('SOMATIC', vcf.somatic.line, fixed=TRUE)) { somatic <- TRUE } else { somatic <- FALSE } vcf.df <- try(utils::read.table(vcf.file, stringsAsFactors = FALSE, header = FALSE), silent = TRUE) if (inherits(vcf.df, 'try-error')) { next } variant.df <- utils::read.table(variant.file, stringsAsFactors = FALSE, header = TRUE) if (nrow(variant.df) < 1) { next } try(variant.df[,"NORMAL.AF"] <- variant.df[,"NORMAL.AD"] / variant.df[,"NORMAL.DP"], silent = TRUE) try(variant.df[,"TUMOR.AF"] <- variant.df[,"TUMOR.AD"] / variant.df[,"TUMOR.DP"], silent = TRUE) try(variant.df[,"TUMOR.AF"] <- variant.df[,"Sample1.AD"] / variant.df[,"Sample1.DP"], silent = TRUE) if (somatic) { variant.df <- variant.df[which(grepl('SOMATIC', vcf.df$V8, fixed=TRUE)), ] } utils::write.table(variant.df, file = variant.file, sep = '\t', quote = FALSE) } } #' fix.lofreq.af #' #' @description #' LoFreq also does not output allele frequencies, so this script calculates them from the #' DP (depth) and AD (variant allele depth) values--which are also not output nicely-- #' and adds them to the annotated vcf. #' #' @param variant.specification Data frame of variant file information #' #' #' fix.lofreq.af <- function(variant.specification) { variant.spec <- variant.specification[variant.specification["caller"] == 'lofreq', ] annotated.files <- variant.spec["variant.file"] if ( nrow(variant.spec) == 1 ) { single.file <- TRUE } else { single.file <- FALSE } for (i in 1:nrow(variant.spec)) { sample.id <- variant.spec[i, "sample.id"] if ( single.file ) { output.dir <- dirname(as.character(annotated.files[i])) } else { output.dir <- dirname(annotated.files[i, "variant.file"]) } vcf.file <- file.path(output.dir, paste0(sample.id, '.passed.ontarget.vcf')) variant.df <- try(utils::read.table(vcf.file, stringsAsFactors = FALSE, header = FALSE), silent = TRUE) if (inherits(variant.df, 'try-error')) { next } DP <- stringr::str_extract(variant.df[,8], "(?<=DP=)\\d+(?=;)") AF <- stringr::str_extract(variant.df[,8], "(?<=AF=)\\d\\.\\d+(?=;)") AD.str <- stringr::str_extract(variant.df[,8], "(?<=DP4=)\\d+,\\d+,\\d+,\\d+") AD <- sapply(AD.str, sum.dp4) if ( single.file ) { annovar.table <- utils::read.table(as.character(annotated.files[i]), stringsAsFactors = FALSE, header = TRUE) } else { annovar.table <- utils::read.table(annotated.files[i, "variant.file"], stringsAsFactors = FALSE, header = TRUE) } try(annovar.table["TUMOUR.DP"] <- DP, silent = TRUE) try(annovar.table["TUMOUR.AF"] <- AF, silent = TRUE) try(annovar.table["TUMOUR.AD"] <- AD, silent = TRUE) # Remove duplicated variants (happens when BED regions overlap) annovar.table <- annovar.table[!duplicated(annovar.table[,1:4]),] if ( single.file ) { utils::write.table(annovar.table, file = as.character(annotated.files[i]), sep = '\t', quote = FALSE) } else { utils::write.table(annovar.table, file = annotated.files[i, "variant.file"], sep = '\t', quote = FALSE) } } } #' sum.dp4 #' #' @description #' Simply calculates the depth of coverage of the variant allele given a string of #' DP4 values #' #' @param dp4.str String of DP4 values in the form "1234,1234,1234,1234" #' #' #' sum.dp4 <- function(dp4.str) { dp4.nums <- strsplit(dp4.str, ',') dp4.nums <- as.integer(dp4.nums[[1]]) return(dp4.nums[3] + dp4.nums[4]) }
/scratch/gouwar.j/cran-all/cranData/varitas/R/helper_functions.R
#' Check if a key is in VariTAS options #' #' @param option.name #' String giving name of option (with different levels joined by \code{nesting.character}) #' @param varitas.options #' Ampliseq options as a list. If missing, they will be obtained from \code{get.varitas.options()} #' @inheritParams get.varitas.options #' #' @return in.options Boolean indicating if the option name exists in the current varitas options in.varitas.options <- function( option.name = NULL, varitas.options = NULL, nesting.character = '\\.' ) { option.name <- gsub(nesting.character, '\\.', option.name); if( is.null(varitas.options) ) { varitas.options <- get.varitas.options(); } # split name of option into the immediate level and subsequent levels name.components <- stringr::str_split( option.name, pattern = nesting.character, n = 2 )[[ 1 ]]; option.level.name <- name.components[1]; if( option.level.name %in% names(varitas.options) ) { if( 1 == length(name.components) ) { # we have reached a leaf, and all components were in the options # => The option exists! in.options <- TRUE; } else { # we are not at a leaf yet, try next level in.options <- in.varitas.options( option.name = name.components[2], varitas.options = varitas.options[[ option.level.name ]], nesting.character = nesting.character ); } } else { # key not found -> return FALSE in.options <- FALSE; } return(in.options); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/in.varitas.options.R
#' Merge variants #' #' @description #' Merge variants from multiple callers and return a data frame of merged calls. By default filtering #' is also applied, although this behaviour can be turned off by setting apply.filters to FALSE. #' #' @param variant.specification #' Data frame containing details of file paths, sample IDs, and caller. #' @param apply.filters Logical indicating whether to apply filters. Defaults to TRUE. #' @param separate.consensus.filters #' Logical indicating whether to apply different thresholds to variants called by more than one caller #' (specified under consensus in config file). Defaults to FALSE. #' @param remove.structural.variants #' Logical indicating whether structural variants (including CNVs) should be removed. Defaults to TRUE. #' @param verbose Logical indicating whether to print information to screen #' #' @return Data frame merge.variants <- function( variant.specification, apply.filters = TRUE, remove.structural.variants = TRUE, separate.consensus.filters = FALSE, verbose = FALSE ) { # TO DO: # - file exist tests..? # - verbose output within function itself ### INPUT TESTS ########################################################### # dummy test until you figure out what to do if( !(all( c('sample.id', 'variant.file', 'caller') %in% names(variant.specification) ))) { stop('variant.specification is missing required columns'); } ### MAIN ################################################################## filter.immediately <- apply.filters; if( separate.consensus.filters ) { filter.immediately <- FALSE; } # loop over requested variant callers # make data frame with all variants from that caller, and merge with variants from other callers merged.variants <- NULL; variant.callers <- unique(variant.specification$caller); # keep track of variant callers that actually called mutations for( caller in variant.callers ) { caller.variant.files <- variant.specification[caller == variant.specification$caller, ]; # store caller-specific variants caller.variants <- list(); for( i in 1:nrow(caller.variant.files) ) { variant.file <- caller.variant.files$variant.file[i]; caller <- caller.variant.files$caller[i]; sample.id <- caller.variant.files$sample.id[i]; variant.calls <- read.variant.calls(variant.file, variant.caller = caller); if( !is.null(variant.calls) ) { caller.variants[[ as.character(sample.id) ]] <- data.frame( sample.id = sample.id, variant.calls, stringsAsFactors = FALSE ); } } # if no variants found, skip ahead if( 0 == length(caller.variants) ) { print('Skipping ahead'); next; } # make sure all variants have all columns # want to preserve order, so this may seem somewhat roundabout column.names <- lapply(caller.variants, names); unique.column.names <- unique( unlist(column.names) ); full.column.names <- column.names[[ which.max( sapply(column.names, length) ) ]]; # if any are missing from longest column names, add to it if( !all(unique.column.names %in% full.column.names) ) { full.column.names <- c( full.column.names, unique.column.names[ !(unique.column.names %in% full.column.names) ] ); } temp.caller.variants <- list(); for(i in 1:length(caller.variants) ) { temp.data <- caller.variants[[ i ]]; missing.columns <- full.column.names[ !(full.column.names %in% names(temp.data) ) ]; for( column in missing.columns ) temp.data[, column] <- '.'; temp.caller.variants[[ i ]] <- temp.data[, full.column.names]; } caller.variants <- do.call(rbind, temp.caller.variants); # note which caller called the variant caller.variants[, paste0('CALLED.', toupper(caller))] <- caller; # apply filters if requested if( filter.immediately ) { caller.variants <- filter.variants( caller.variants, caller = caller, verbose = verbose ); } # merge with other variant callers if( is.null(merged.variants) ) { merged.variants <- caller.variants; } else { merged.variants <- merge( merged.variants, caller.variants, all = TRUE ); } } print( utils::str(merged.variants) ); print(paste0('CALLED.', toupper(variant.callers) )); ### POST-PROCESSING if( length(variant.callers) > 1) { # throws an error if only one caller, handle separately merged.variants$caller <- apply( merged.variants[, grepl('^CALLED\\.', names(merged.variants))], 1, FUN = function(x) paste(x[!is.na(x)], collapse = ':') ); } else { merged.variants$caller <- variant.callers; } merged.variants[, paste0('CALLED.', toupper(variant.callers)) ] <- NULL; merged.variants$Type <- classify.variant(ref = merged.variants$REF, alt = merged.variants$ALT); ### FILTERING IF CONSENSUS RESCUING HAS BEEN REQUESTED # use filtered.variants from here on out – if merged.variants appears again it is a bug!! filtered.variants <- merged.variants; if( apply.filters && separate.consensus.filters ) { # want to identify variants that have been called by more than one variant caller, and apply less stringent # filters to them for(caller in variant.callers) { # identify variants that have only been called by this variant, and apply filters caller.only <- caller == filtered.variants$caller; filtered.caller.variants <- filter.variants( filtered.variants[caller.only, ], caller = caller, ); filtered.variants <- rbind( filtered.variants[!caller.only, ], filtered.caller.variants ); } # FILTER CONSENSUS CALLS # get rows corresponding to variants called by multiple callers multiple.callers <- grepl(':', filtered.variants$caller); filtered.consensus.variants <- filter.variants( filtered.variants[multiple.callers, ], caller = 'consensus' ); filtered.variants <- rbind( filtered.variants[!multiple.callers, ], filtered.consensus.variants ); } ### REMOVE SVs IF REQUESTED # generally do not trust CNV calling from targeted panel # remove all entries with anything other than a specific ALT base, e.g. <CNV> if( remove.structural.variants ) { filtered.variants <- filtered.variants[!grepl('<', filtered.variants$ALT), ]; } ## MEAN TUMOUR ALLELE FREQUENCY is.tumour.af.column <- grepl('TUMOUR.AF$', names(filtered.variants)); filtered.variants$MEAN.TUMOUR.AF <- apply( filtered.variants[, is.tumour.af.column], 1, FUN = function(AFs) { # remove blank values – caller doesn't have an estiwarnmate AFs <- AFs[ !is.na(AFs) & '' != AFs ]; return( mean( as.numeric(AFs)) ); } ); return(filtered.variants); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/merge.variants.R
#' overwrite.varitas.options #' #' @description #' Overwrite VariTAS options with options provided in config file. #' #' @param config.file Path to config file that should be used to overwrite options #' #' @return None #' @examples #' \dontrun{ #' config <- file.path(path.package('varitas'), 'config.yaml') #' overwrite.varitas.options(config) #' } #' #' @export overwrite.varitas.options <- function(config.file) { ### INPUT TESTS ########################################################### if( !file.exists(config.file) ) { error.message <- paste('File', config.file, 'not found'); stop(error.message); } ### MAIN ################################################################## config <- yaml::yaml.load_file(config.file); config$pkgname <- get.varitas.options('pkgname'); # if a mode has been set, start by setting those settings # they will later be overwritten by if( 'mode' %in% names(config) ) { # convert to lower case to allow users to specify ctDNA rather than ctdna config$mode <- tolower(config$mode); # only ctDNA and tumour supported if( !( config$mode %in% c('ctdna', 'tumour') ) ) { stop('mode must be either ctDNA or tumour'); } # read mode defaults from file mode.default.file <- system.file( paste0(config$mode, '_defaults.yaml'), package = get.varitas.options('pkgname') ); mode.defaults <- yaml::yaml.load_file( mode.default.file ); # update mode defaults with specified values config <- utils::modifyList(mode.defaults, config); } # update filters so defaults are considered as baseline for each caller if( 'filters' %in% names(config) ) { config$filters <- get.filters(config$filters); } options(varitas = config); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/overwrite.varitas.options.R
#' plot.amplicon.coverage.per.sample #' #' @description #' Create one scatterplot per sample, showing coverage per amplicon, and an additional plot giving the median #' #' @param coverage.statistics #' Data frame containing coverage per amplicon per sample, typically from \code{get.coverage.by.amplicon}. #' @param output.directory #' Directory where per sample plots should be saved #' #' @return None plot.amplicon.coverage.per.sample <- function( coverage.statistics, output.directory ) { # TO DO: # - figure out a smarter way of extracting the gene # - look into ordering by chromosome rather than lexicographic order # get first column containing a sample – first numeric one after chromosome coordinates first.sample.column <- 4; while( !is.numeric(coverage.statistics[, first.sample.column]) && first.sample.column <= ncol(coverage.statistics) ) { if( ncol(coverage.statistics) == first.sample.column ) { stop('Cannot find first sample column'); } first.sample.column <- first.sample.column + 1; } #config <- read.yaml(save.config()) #target.panel <- read.table(config[['target_panel']], stringsAsFactors = FALSE) #first.sample.column <- ncol(target.panel) + 1 #coverage.statistics <- alternate.gene.sort(coverage.statistics) genes <- get.gene(coverage.statistics); gene.start <- vapply( unique(genes), function(x, genes) match(x, genes), genes = genes, FUN.VALUE = 0 ); gene.end <- c(gene.start[-1], nrow(coverage.statistics) + 1); midpoints <- gene.start + (gene.end - gene.start)/2; chr.nums <- sapply(coverage.statistics$chr, function(x) substr(x, 4, nchar(x))) to.remain <- sapply(chr.nums, function(x) x != 'X' && x != 'Y') old.names <- names(coverage.statistics) coverage.statistics <- cbind(genes, chr.nums, coverage.statistics) names(coverage.statistics) <- c('gene', 'chr.no', old.names) first.sample.column <- first.sample.column + 2 for(i in which(sapply(coverage.statistics, class) == "factor")) coverage.statistics[[i]] = as.character(coverage.statistics[[i]]) colours <- c() shapes <- c() chr.palette = c( '#8DD3C7', '#081D58', '#BEBADA', '#FB8072', '#CCEBC5', '#FDB462', '#999999', '#FCCDE5', '#FC8D59', '#35978F', '#F781BF', '#FFED6F', '#E41A1C', '#377EB8', '#4DAF4A', '#984EA3', '#A65628', '#80B1D3', '#252525', '#A6761D', '#B3DE69', '#F0027F', '#FFFFCC', '#FDDBC7', '#004529' ) # print(length(sample.coverage)) # Sort by chromosome and position (sex chromosomes at the end) sex.chr.rows <- coverage.statistics[!to.remain, ] coverage.statistics <- coverage.statistics[to.remain, ] coverage.statistics$chr.no <- as.integer(coverage.statistics$chr.no) coverage.order <- order(coverage.statistics$chr.no, coverage.statistics$start, coverage.statistics$end); coverage.statistics <- coverage.statistics[ coverage.order, ]; sex.chr.order <- order(sex.chr.rows$chr.no, sex.chr.rows$start, sex.chr.rows$end); sex.chr.rows <- sex.chr.rows[ sex.chr.order, ]; coverage.statistics <- rbind(coverage.statistics, sex.chr.rows) genes <- unique(coverage.statistics$gene) # chr.list <- c() # for (g in 1:length(genes)) { # chr.list <- c(chr.list, unique(coverage.statistics[which(coverage.statistics$gene==genes[g]),2])) # } chr.list <- coverage.statistics$chr.no # Red and blue for odd/even chromosome numbers # for (j in 1:length(chr.list)) { # chr.ending <- chr.list[j] # if (chr.ending == 'X' || chr.ending == 'Y') { # colours <- c(colours, 'grey') # shapes <- c(shapes, 23) # next # } # chromosome <- as.integer(chr.ending) # # print(substr(coverage.statistics[j,2], 4, nchar(coverage.statistics[j,2]))) # if (chromosome %% 2 == 0) { # colour <- 'red' # shape <- 21 # } else { # colour <- 'blue' # shape <- 22 # } # shapes <- c(shapes, shape) # colours <- c(colours, colour) # } # Alternating red and blue for each chromosome present red <- TRUE prev.chr <- '' for (j in 1:length(chr.list)) { chr.ending <- chr.list[j] if (chr.ending != prev.chr){ if (red) { colours <- c(colours, 'red') shapes <- c(shapes, 21) red <- FALSE } else { colours <- c(colours, 'blue') shapes <- c(shapes, 22) red <- TRUE } } else { colours <- c(colours, colours[length(colours)]) shapes <- c(shapes, shapes[length(shapes)]) } prev.chr <- chr.ending } # Colour for each chromosome # for (j in 1:length(chr.list)) { # chr.ending <- chr.list[j] # if (chr.ending == 'X') { # colours <- c(colours, chr.palette[24]) # } else if (chr.ending == 'Y') { # colours <- c(colours, chr.palette[25]) # } else { # colours <- c(colours, chr.palette[as.integer(chr.ending)]) # } # } # loop over columns and plot all of them for(i in first.sample.column:ncol(coverage.statistics) ) { sample.id <- names(coverage.statistics)[i]; sample.coverage <- coverage.statistics[, i]; # sample.coverage <- tapply(coverage.statistics[, i], coverage.statistics$gene, sum) # sample.coverage <- sample.coverage[match(genes, names(sample.coverage))] x <- c() for (g in 1:length(unique(genes))) { for (i in 1:length(which(coverage.statistics$gene == unique(genes)[g]))) { x <- c(x, g) } } grDevices::png( file.path(output.directory, paste0(sample.id, '.png')), height = 4, width = 7, units = 'in', res = 400 ); graphics::par( mar = c(3.2, 4, 1.2, 0.2), cex.axis = 0.6, font.axis = 1, oma = c(0, 0, 0, 0), las = 2, tcl = -0.2 ); graphics::plot( x = jitter(x, amount = 0.15), y = sample.coverage, main = sample.id, cex = 0.8, pch = shapes, # pch = 21, bg = colours, col = 'black', xlab = '', ylab = 'Coverage', xaxt = 'n', xaxs = 'r' ); #graphics::abline(v = gene.start[-1], col = 'grey', lty = 'dashed'); graphics::axis(1, at = 1:length(unique(genes)), labels = unique(genes), font = 2); grDevices::dev.off(); } # TO DO: consolidate this plot with the one above into a function # this will also solve vignette issue # plot median if (first.sample.column < ncol(coverage.statistics)) { avg.coverage.stats <- stats::aggregate.data.frame( coverage.statistics[, first.sample.column:ncol(coverage.statistics)], list(coverage.statistics$gene), sum ); avg.coverage.stats <- avg.coverage.stats[match(genes, avg.coverage.stats$Group.1),] median.coverage <- apply( avg.coverage.stats[, 2:ncol(avg.coverage.stats)], 1, stats::median ); } else { # Only one sample median.coverage <- stats::median(coverage.statistics[, first.sample.column]) } grDevices::png( file.path(output.directory, 'median.png'), height = 4, width = 7, units = 'in', res = 400 ); graphics::par( mar = c(3.2, 4, 1.2, 0.2), cex.axis = 0.6, font.axis = 1, oma = c(0, 0, 0, 0), las = 2, tcl = -0.2 ); graphics::plot( x = seq_along(median.coverage), y = median.coverage, main = 'Median Coverage', cex = 0.8, pch = 22, bg = 'grey', col = 'black', xlab = '', ylab = 'Coverage', xaxt = 'n', xaxs = 'r' ); #graphics::abline(v = gene.start[-1], col = 'grey', lty = 'dashed'); graphics::axis(1, at = 1:length(unique(genes)), labels = unique(genes), font = 2); grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/plot.amplicon.coverage.per.sample.R
#' Plot amplicon coverage by genome order #' #' @description #' Use values obtained by bedtools coverage to make a plot of coverage by genome order #' #' @param coverage.data data frame with results from bedtools coverage command plot.coverage.by.genome.order <- function(coverage.data) { # TO DO: # - debug pool colour part of this function # make sure it is ordered by genome location coverage.data <- coverage.data[order(coverage.data[, 1], coverage.data[, 2]), ]; # not clear how many columns exist in this, as that will depend on the panel BED file reads.mapped.to.amplicon <- coverage.data[, ncol(coverage.data) - 3]; # if pool information is avaiable, colour by it pools <- get.pool.from.panel.data(coverage.data); if(is.null(pools)) { point.colours <- "black"; } else { # create a colour scheme to recode pool vector by unique.pools <- unique(pools); colour.scheme <- get.colours(length(unique.pools)); names(colour.scheme) <- unique.pools; point.colours <- colour.scheme[pools]; } graphics::plot( reads.mapped.to.amplicon, xlab = "Genome order", ylab = "Coverage", pch = 16, col = point.colours ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/plot.coverage.by.genome.order.R
#' plot.coverage.by.sample #' #' @description #' Make a barplot of coverage per sample #' #' @param coverage.sample Data frame of coverage data, typically from \code{get.coverage.by.sample.statistics} #' @param file.name Name of output file #' @param statistic Statistic to be plotted (mean or median) #' #' @return None plot.coverage.by.sample <- function( coverage.sample, file.name, statistic = c('mean', 'median') ) { statistic <- match.arg(statistic); ### INPUT TESTS ########################################################### statistic.column <- paste0(statistic, '.coverage'); if( !( statistic.column %in% names(coverage.sample) ) ) { error.message <- paste( 'coverage.sample data frame does not contain a column', statistic.column ); stop(error.message); } ### MAIN ################################################################## barplot.data <- coverage.sample[, statistic.column]; names(barplot.data) <- coverage.sample$sample.id; cex.axis <- 0.7; if( nrow(coverage.sample) > 30) cex.axis <- 0.4; yaxis.label <- c( 'mean' = 'Mean coverage', 'median' = 'Median coverage' ); if( !is.null(file.name) ) { grDevices::png( file.name, width = 7, height = 5, units = 'in', res = 400 ); } graphics::par( mar = c(5.3, 4, 0.5, 0.5), cex.axis = cex.axis, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylab = yaxis.label[ statistic ] ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/plot.coverage.by.sample.R
#' plot.ontarget.percent #' #' @description #' Make a scatterplot of ontarget percent per sample #' #' @param coverage.sample Data frame of coverage data, typically from \code{get.coverage.by.sample.statistics} #' @param file.name Name of output file #' #' @return None plot.ontarget.percent <- function(coverage.sample, file.name) { ### INPUT TESTS ########################################################### if( !('ontarget.percent' %in% names(coverage.sample) ) ) { stop('coverage.sample data frame does not contain a column ontarget.percent'); } ### MAIN ################################################################## barplot.data <- 100*coverage.sample$ontarget.percent; names(barplot.data) <- coverage.sample$sample.id; cex.axis <- 0.7; if( nrow(coverage.sample) > 30) cex.axis <- 0.4; if( !is.null(file.name) ) { grDevices::png( file.name, width = 7, height = 5, units = 'in', res = 400 ); } graphics::par( mar = c(5.1, 4, 0.5, 0.5), cex.axis = cex.axis, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylab = 'Ontarget reads (%)' ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/plot.ontarget.percent.R
#' plot.paired.percent #' #' @description #' Make a barplot of percent paired reads per sample #' #' @param coverage.sample Data frame of coverage data, typically from \code{get.coverage.by.sample.statistics} #' @param file.name Name of output file #' #' @return None plot.paired.percent <- function(coverage.sample, file.name) { ### INPUT TESTS ########################################################### if( !('paired.percent' %in% names(coverage.sample) ) ) { stop('coverage.sample data frame does not contain a column paired.percent'); } ### MAIN ################################################################## barplot.data <- 100*coverage.sample$paired.percent; names(barplot.data) <- coverage.sample$sample.id; cex.axis <- 0.7; if( nrow(coverage.sample) > 30) cex.axis <- 0.4; if( !is.null(file.name) ) { grDevices::png( file.name, width = 7, height = 5, units = 'in', res = 400 ); } graphics::par( mar = c(5.1, 4, 0.5, 0.5), cex.axis = cex.axis, font.axis = 2, oma = c(0, 0, 0, 0), tcl = -0.2, las = 2, mgp = c(3, 0.25, 0) ); graphics::barplot( barplot.data, ylab = 'Paired reads (%)' ); if( !is.null(file.name) ) grDevices::dev.off(); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/plot.paired.percent.R
#' Post-processing of variants to generate outputs #' #' @param variant.specification #' Data frame specifying variants to be processed, or path to data frame (useful if calling from Perl) #' @param project.directory #' Directory where output should be stored. Output files will be saved to a datestamped subdirectory #' @param config.file #' Path to config file specifying post-processing options. If not provided, the current options are used (i.e. from \code{get.varitas.options()}) #' @param variant.callers #' Optional vector of variant callers for which filters should be included in Excel file #' @param sleep #' Logical indicating whether script should sleep for 60 seconds before starting. #' @param verbose #' Logical indicating whether to print verbose output #' @inheritParams merge.variants #' #' @return None #' #' post.processing <- function( variant.specification, project.directory, config.file = NULL, variant.callers = NULL, remove.structural.variants = TRUE, separate.consensus.filters = FALSE, sleep = FALSE, verbose = FALSE ) { # some problems with hard drive being too slow to save all files # try to get around this by sleeping and hoping this is enough to let things settle if( sleep ) { Sys.sleep(5*60); } # TO DO: # - specification data frame for coverage QC? (if we could bugfix bedtools that would be ideal...) ### INPUT TESTS ########################################################### # project.directory assertthat::assert_that( is.character(project.directory) ); assertthat::assert_that( length(project.directory) == 1 ); assertthat::assert_that( dir.exists(project.directory), msg = paste( 'Directory', project.directory, 'does not exist or is not a directory' ) ); # variant.specification assertthat::assert_that( is.character(variant.specification) || is.data.frame(variant.specification), msg = 'variant.specification should be either a data frame of a file path' ); assertthat::assert_that( is.data.frame(variant.specification) || file.exists(variant.specification), msg = paste( 'variant.specification file', variant.specification, 'does not exist' ) ); ### MAIN ################################################################## if( !is.null(config.file) ) overwrite.varitas.options(config.file); if( is.character(variant.specification) ) { variant.specification <- utils::read.table( variant.specification, sep = '\t', header = TRUE, stringsAsFactors = FALSE ); } # keep track of all sample IDs sample.ids <- unique( variant.specification$sample.id ); # Get callers all.callers <- variant.specification$caller; unique.callers <- unique(all.callers); if( is.null(variant.callers) ) { variant.callers <- unique.callers } else { variant.callers <- union(variant.callers, unique.callers); } # Fix allele frequencies if necessary if ( "varscan" %in% variant.callers ) { fix.varscan.af(variant.specification) } if ( "lofreq" %in% variant.callers ) { fix.lofreq.af(variant.specification) } # create output directory and directory for plots # this directory contains the files that are meant to be sent to collaborators # (variant calls, read statistics, PDF report) output.directory <- file.path( project.directory, paste0(Sys.Date(), '-variant-data') ); # directory for raw plots # not meant to be sent to collaborators, but contains plots from PDF report in # PNG format plotting.directory <- file.path( project.directory, paste0(Sys.Date(), '-plots') ); if( !dir.exists(output.directory) ) dir.create(output.directory); if( !dir.exists(plotting.directory) ) dir.create(plotting.directory); ### QUALITY CONTROL DATA if( length( system.ls('*/*.stats', project.directory) ) > 0 ) { coverage.sample <- get.coverage.by.sample.statistics(project.directory); coverage.amplicon <- get.coverage.by.amplicon(project.directory); # plot amplicon coverage per sample sample.scatterplot.directory <- file.path(plotting.directory, 'sample-coverage'); if( !dir.exists(sample.scatterplot.directory) ) dir.create(sample.scatterplot.directory); plot.amplicon.coverage.per.sample( coverage.amplicon, sample.scatterplot.directory ); # Ontarget reads ontarget.percent.file.name <- file.path(plotting.directory, 'ontarget_percent.png'); plot.ontarget.percent( coverage.sample, file.name = ontarget.percent.file.name ); # Paired reads – not the most informative plot, but useful to detect if pairing worked paired.percent.file.name <- file.path(plotting.directory, 'paired_percent.png'); plot.paired.percent( coverage.sample, file.name = paired.percent.file.name ); # Mean coverage per sample mean.coverage.file.name <- file.path(plotting.directory, 'mean_coverage.png'); plot.coverage.by.sample( coverage.sample[order(-coverage.sample$mean.coverage),], file.name = mean.coverage.file.name, statistic = 'mean' ); # median coverage per sample median.coverage.file.name <- file.path(plotting.directory, 'median_coverage.png'); plot.coverage.by.sample( coverage.sample[order(-coverage.sample$median.coverage),], file.name = median.coverage.file.name, statistic = 'median' ); # Coverage statistics Excel file # Should be sent to collaborators, i.e. save in output directory file.name <- file.path(output.directory, 'Coverage_statistics.xlsx'); save.coverage.excel( project.directory = project.directory, file.name = file.name, overwrite = TRUE ); } ### GET VARIANTS # should these be parameterized? filtered.variants <- merge.variants( variant.specification, apply.filters = TRUE, remove.structural.variants = TRUE, separate.consensus.filters = FALSE, verbose = verbose ); # keep track of all sample IDs so they appear in plots filtered.variants$sample.id <- factor( filtered.variants$sample.id, levels = sample.ids ); # save to txt -> probably a good idea to keep raw data utils::write.table( filtered.variants, file.path(output.directory, 'filtered_variants.txt'), sep = '\t', row.names = FALSE ); # save to Excel filters <- get.varitas.options('filters')[ variant.callers ]; save.variants.excel( variants = filtered.variants, file.name = file.path(output.directory, 'Filtered_variants.xlsx'), filters = filters ); ### PLOTS # Caller overlap venn diagram caller.overlap.venn.diagram( filtered.variants, file.name = file.path(plotting.directory, 'caller_overlap.png') ); variants.sample.barplot( filtered.variants, file.name = file.path(plotting.directory, 'variants_per_sample.png') ); variants.caller.barplot( filtered.variants, file.name = file.path(plotting.directory, 'variants_caller_type.png'), group.by = 'type' ); variants.caller.barplot( filtered.variants, file.name = file.path(plotting.directory, 'variants_caller_substitution.png'), group.by = 'substitution' ); trinucleotide.barplot( filtered.variants, file.name = file.path(plotting.directory, 'trinucleotide_substitutions.png') ); variant.recurrence.barplot( filtered.variants, file.name = file.path(plotting.directory, 'variant_recurrence.png') ); ### REPORT report.template <- system.file('report_template.Rmd', package = 'varitas'); rmarkdown::render( report.template, output_format = 'pdf_document', output_file = file.path(output.directory, 'pipeline_report.pdf') ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/post.processing.R
#' Prepare BAM specification data frame to standardized format for downstream analyses. #' #' @description #' This function prepares a data frame that can be used to run variant callers. #' For matched normal variant calling, this data frame will contain three columns with names: sample.id, tumour.bam, normal.bam #' For unpaired variant calling, the data frame will contain two columns with names: sample.id, tumour.bam #' #' @param sample.details Data frame where each row represents a sample to be run. #' Must contain sample ID, path to tumour BAM, and path to normal BAM. #' @param paired Logical indicating whether the sample specification is for a paired analysis. #' @param sample.id.column Index or string giving column of sample.details that contains the sample ID #' @param tumour.bam.column Index or string giving column of sample.details that contains the path to the tumour BAM #' @param normal.bam.column Index or string giving column of sample.details that contains the path to the normal BAM #' #' @return bam.specification Data frame with one row per sample to be run #' #' prepare.bam.specification <- function( sample.details, paired = TRUE, sample.id.column = 1, tumour.bam.column = 2, normal.bam.column = 3 ) { ### INPUT TESTS ########################################################### # Verify that tumour and normal BAM columns exist # They can be passed in as either an index or a string # => treat two cases separately tumour.bam.error.message <- 'tumour.bam.column not found in input data.'; if( is.character(tumour.bam.column) && !(tumour.bam.column %in% names(sample.details)) ) { stop(tumour.bam.error.message); } if( is.numeric(tumour.bam.column) && tumour.bam.column > ncol(sample.details) ) { stop(tumour.bam.error.message); } normal.bam.error.message <- 'normal.bam.column not found in input data.'; if( is.character(normal.bam.column) && !(normal.bam.column %in% names(sample.details)) ) { stop(normal.bam.error.message); } if( is.numeric(normal.bam.column) && normal.bam.column > ncol(sample.details) ) { stop(normal.bam.error.message); } ### MAIN ################################################################## # make sure BAM specification is in a format we like bam.specification <- data.frame( sample.id = sample.details[, sample.id.column], tumour.bam = sample.details[, tumour.bam.column], stringsAsFactors = FALSE ); if(paired) { bam.specification$normal.bam <- sample.details[, normal.bam.column]; } # verify sample sheet has correct format verify.bam.specification(bam.specification); return(bam.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/prepare.bam.specification.R
#' prepare.fastq.specification #' #' @description #' Prepare FASTQ specification data frame to standardized format for downstream analyses. #' #' @details #' This function prepares a data frame that can be used to run alignment. #' For paired-end reads, this data frame will contain three columns with names: sample.id, reads, mates #' For single-end reads, the data frame will contain two columns with names: sample.id, reads #' #' @param sample.details #' Data frame where each row represents a sample to be run. Must contain sample ID, path to tumour BAM, and path to normal BAM. #' @param sample.id.column #' Index or string giving column of \code{sample.details} that contains the sample ID #' @param fastq.columns #' Index or string giving column(s) of \code{sample.details} that contain path to FASTQ files #' @param patient.id.column #' Index or string giving column of \code{sample.details} that contains the patient ID #' @param tissue.column #' Index or string giving column of \code{sample.details} that contains information on tissue (tumour/ normal) #' #' @return Data frame with one row per sample to be run #' #' prepare.fastq.specification <- function( sample.details, sample.id.column = 1, fastq.columns = c(2, 3), patient.id.column = NA, tissue.column = NA ) { ### INPUT TESTS ########################################################### # Note: Need to do some input tests here to avoid errors in assembling # the data frame to start with. # More tests are run in verification step # Verify that FASTQ columns exists # They can be passed in as either an index or a string # => treat two cases separately fastq.error.message <- 'FASTQ columns not found in input data.'; if( is.character(fastq.columns) && !all(fastq.columns %in% names(sample.details)) ) { stop(fastq.error.message); } if( is.numeric(fastq.columns) && max(fastq.columns) > ncol(sample.details)) { stop(fastq.error.message); } if( length(fastq.columns) > 2 ) { stop('Can accept at most two FASTQ columns.'); } ### MAIN ################################################################## fastq.specification <- data.frame( 'sample.id' = sample.details[, sample.id.column], 'reads' = sample.details[, fastq.columns[1]], stringsAsFactors = FALSE ); if( 2 == length(fastq.columns) ) { fastq.specification[, 'mates'] <- sample.details[, fastq.columns[2]]; } if( !is.na(patient.id.column) ) { fastq.specification[, 'patient.id'] <- sample.details[, patient.id.column]; } if( !is.na(tissue.column) ) { fastq.specification[, 'tissue'] <- sample.details[, tissue.column]; } verify.fastq.specification(fastq.specification); return(fastq.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/prepare.fastq.specification.R
#' prepare.miniseq.specifications #' #' @description #' Process a MiniSeq directory and sample sheet to get specification data frames #' that can be used to run the VariTAS pipeline. #' #' Note: This assumes normal samples are not available. #' #' @param sample.sheet #' Data frame containing sample information, or path to a MiniSeq sample sheet #' @param miniseq.directory #' Path to directory with MiniSeq files #' #' @return A list with specification data frames 'fastq', 'bam', and 'vcf' (as applicable) #' @examples #' miniseq.sheet <- file.path(path.package('varitas'), 'extdata/miniseq/Example_template.csv') #' miniseq.directory <- file.path(path.package('varitas'), 'extdata/miniseq') #' miniseq.info <- prepare.miniseq.specifications(miniseq.sheet, miniseq.directory) #' #' #' @export prepare.miniseq.specifications <- function( sample.sheet, miniseq.directory ) { # TO DO: # - make a read.miniseq.sample.sheet function (more robust version of current implementation) ### INPUT TESTS ########################################################### if( !dir.exists(miniseq.directory) ) { stop( paste(miniseq.directory, 'does not exist or is not a directory') ); } ### MAIN ################################################################## if( is.character(sample.sheet) ) { # check format – if starts with [Header], skip first 29 lines # as it means the file contains metadata, etc. first.line <- readLines(sample.sheet, n = 1); skip <- 0; if( '[Header]' == first.line ) skip <- 29; sample.sheet <- utils::read.csv( sample.sheet, skip = skip, stringsAsFactors = FALSE, row.names = NULL ); } directories <- file.path(miniseq.directory, c('fastq', 'bam_bai', 'isis')); names(directories) <- c('fastq', 'bam', 'vcf'); # isis could be named either VCF of isis, make sure you check both if( !dir.exists(directories['vcf']) ) { directories['vcf'] <- file.path(miniseq.directory, 'vcf'); } directories <- directories[ dir.exists(directories) ]; if( length(directories) > 0 ) { cat('Found directories', paste(names(directories), collapse = ' '), '\n'); } else { cat('No fastq/bam/vcf directories found\n'); return(NULL); } # use sample ID column, take everything until the first dash sample.ids <- sapply( strsplit(sample.sheet$Sample_ID, split = '-'), function(x) x[1] ); specifications <- list(); if( 'fastq' %in% names(directories) ) { r1.files <- get.miniseq.sample.files( sample.ids = sample.ids, directory = directories[ 'fastq' ], file.suffix = '_S\\d{1,2}_.*_R1_.*\\.fastq(\\.gz)?' ); r2.files <- get.miniseq.sample.files( sample.ids = sample.ids, directory = directories[ 'fastq' ], file.suffix = '_S\\d{1,2}_.*_R2_.*(\\.gz)?' ); fastq.specification <- data.frame( sample.id = sample.ids, reads = r1.files, mates = r2.files ); verify.fastq.specification(fastq.specification); specifications[[ 'fastq' ]] <- fastq.specification; } if( 'bam' %in% names(directories) ) { tumour.bams <- get.miniseq.sample.files( sample.ids = sample.ids, directory = directories[ 'bam' ], file.suffix = '_S\\d{1,2}.*\\.bam' ); bam.specification <- data.frame( sample.id = sample.ids, tumour.bam = tumour.bams ); specifications[[ 'bam' ]] <- bam.specification; } if( 'vcf' %in% names(directories) ) { vcf.files <- get.miniseq.sample.files( sample.ids = sample.ids, directory = directories[ 'vcf' ], file.suffix = '_S\\d{1,2}.*\\.vcf$' ); vcf.specification <- data.frame(sample.id = sample.ids, vcf = vcf.files); specifications[[ 'vcf' ]] <- vcf.specification; } return(specifications); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/prepare.miniseq.specifications.R
#' prepare.vcf.specification #' #' @description #' Prepare VCF specification data frame for annotation #' #' @param vcf.details #' Data frame containing details of VCF files #' @param sample.id.column #' Identifier of column in \code{vcf.details} containing sample IDs (index or name) #' @param vcf.column #' Identifier of column in \code{vcf.details} containing VCF file (index or name) #' @param job.dependency.column #' Identifier of column in \code{vcf.details} containing job dependency (index or name) #' @param caller.column #' Identifier of column in \code{vcf.details} containing caller (index or name) #' #' @return Properly formatted VCF details #' #' prepare.vcf.specification <- function( vcf.details, sample.id.column = 1, vcf.column = 2, job.dependency.column = NA, caller.column = NA ) { ### INPUT TESTS ########################################################### if( !is.data.frame(vcf.details) ) { stop('vcf.details must be a data frame'); } required.columns <- c( 'sample.id.column' = sample.id.column, 'vcf.column' = vcf.column ); for( column in names(required.columns) ) { column.identifier <- required.columns[column]; error.message <- paste(column, 'not found in vcf.details data frame'); if( is.numeric(column.identifier) && column.identifier > ncol(vcf.details) ) { stop(error.message); } if( is.character(column.identifier) && !(column.identifier %in% names(vcf.details)) ) { stop(error.message); } } optional.columns <- c( 'job.dependency.column' = job.dependency.column, 'caller.column' = caller.column ); for( column in names(optional.columns) ) { column.identifier <- optional.columns[column]; # if NA, skip ahead if( is.na(column.identifier) ) next; # if not NA, subject to same quality input tests as required columns error.message <- paste(column, 'not found in vcf.details data frame'); if( is.numeric(column.identifier) && column.identifier > ncol(vcf.details) ) { stop(error.message); } if( is.character(column.identifier) && !(column.identifier %in% names(vcf.details)) ) { stop(error.message); } } ### MAIN ################################################################## vcf.specification <- data.frame( sample.id = vcf.details[, sample.id.column], vcf = vcf.details[, vcf.column], stringsAsFactors = FALSE ); if( !is.na(job.dependency.column) ) { vcf.specification$job.dependency <- vcf.details[, job.dependency.column]; } if( !is.na(caller.column) ) { vcf.specification$caller <- vcf.details[, caller.column]; } verify.vcf.specification(vcf.specification); return(vcf.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/prepare.vcf.specification.R
#' Process coverageBed reports #' #' @description #' Process the coverage reports generated by bedtools coverage tool. #' #' @inheritParams get.coverage.by.sample.statistics #' #' @return final.statistics data frame of coverage statistics generated by parsing through coverage reports process.coverage.reports <- function(project.directory) { # TO DO: # - add tests for reports having expected format # - ask Ros what the "cumulative" coverage numbers are supposed to mean coverage.report.paths <- system.ls(pattern = "*/*all.coverage.report", directory = project.directory, error = TRUE); sample.ids <- extract.sample.ids(coverage.report.paths, from.filename = TRUE); single.sample <- !(length(sample.ids) > 1) # store mean and median coverage per patient mean.median.by.sample <- list(); # initialize data frame to store coverage data for all samples merged.coverage.data <- data.frame(); ### PROCESS EACH SAMPLE for(i in seq_along(coverage.report.paths)) { path <- coverage.report.paths[i]; sample.id <- sample.ids[i]; # generated from coverageBed, based on all target regions # 1) # 2) depth # 3) no. of bases at depth # 4) size of A # 5) % of A at depth coverage.data <- utils::read.delim( path, header = FALSE, stringsAsFactors = FALSE ); depth.values <- coverage.data[, 2]; depth.frequencies <- coverage.data[, 3]; # data frame for merging with all other patients patient.coverage <- data.frame(depth.values, depth.frequencies); names(patient.coverage) <- c('depth', sample.id); # get the median coverage for the sample median.coverage <- tabular.median( values = depth.values, frequencies = depth.frequencies ); # get the mean coverage for each sample mean.coverage <- tabular.mean( values = depth.values, frequencies = depth.frequencies ); mean.median.by.sample[[ sample.id ]] <- data.frame( "sample.id" = sample.id, "mean.coverage" = mean.coverage, "median.coverage" = median.coverage ); # merge with full data frame if( 0 == nrow(merged.coverage.data) ) { merged.coverage.data <- patient.coverage; } else { merged.coverage.data <- merge( merged.coverage.data, patient.coverage, by.x = "depth", by.y = "depth", all = TRUE ); } } ### POST-PROCESSING mean.median.by.sample <- do.call(rbind, mean.median.by.sample); merged.coverage.data$bin <- cut( merged.coverage.data$depth, breaks = c(-Inf, 1, 10, 20, 30, 40, Inf), labels = c("0", "1-10", "10-20", "20-30", "30-40", "40+") # labels = c(0, 1, 10, 20, 30, 40) ); # for each patient, get proportion of frequencies falling within each depth category if (!single.sample){ coverage.statistics <- apply( merged.coverage.data[, 2:(ncol(merged.coverage.data) - 1)], 2, FUN = function(x, coverage.bin) { tapply(x, coverage.bin, sum, na.rm = TRUE )/ sum(x, na.rm = TRUE); }, coverage.bin = merged.coverage.data$bin ); } else { coverage.statistics <- data.frame(sample.ids=tapply(merged.coverage.data[, 2], merged.coverage.data$bin, sum, na.rm = TRUE) / sum(merged.coverage.data[ ,2], na.rm = TRUE)) } # for all categories except the first one, get the proportion of frequencies falling into that category or higher # - first category (coverage zero) is still just the proportion falling in that category for( i in 2:(nrow(coverage.statistics)-1) ) { if (!single.sample) { coverage.statistics[i,] <- apply(coverage.statistics[i:nrow(coverage.statistics),], 2, sum, na.rm = TRUE) } else { coverage.statistics[i,] <- sum(coverage.statistics[i:nrow(coverage.statistics),], na.rm = TRUE) } } # transpose #if (!single.sample) { names(coverage.statistics) <- sample.ids; coverage.statistics <- t(coverage.statistics); #} else { # rownames(coverage.statistics) <- sample.ids #} # add mean/ median per sample # make sure they're ordered the same way – anything else will lead to disappointment if (!single.sample) { mean.median.by.sample <- mean.median.by.sample[rownames(coverage.statistics), ]; } print(mean.median.by.sample); print(coverage.statistics); # sanity check if( !identical(rownames(coverage.statistics), rownames(mean.median.by.sample) ) ) { stop("coverage.statistics and mean.median.by.sample do not appear to be in the same order. Please investigate."); } # assemble final data frame final.statistics <- cbind( coverage.statistics, mean.median.by.sample ); return(final.statistics); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/process.coverage.reports.R
#' Process sample contamination checks #' #' @description #' Takes *selfSM reports generated by VerifyBamID during alignment, and returns a vector of freemix scores. #' The freemix score is a sequence only estimate of sample contamination that ranges from 0 to 1. #' #' Note: Targeted panels are often too small for this step to work properly. #' #' @inheritParams get.coverage.by.sample.statistics #' #' @return freemix.scores Data frame giving sample contamination (column freemix) score per sample. #' #' @references \url{https://genome.sph.umich.edu/wiki/VerifyBamID} process.sample.contamination.checks <- function(project.directory) { sample.contamination.check.paths <- system.ls(pattern = "*/*selfSM", directory = project.directory, error = TRUE); sample.ids <- extract.sample.ids(sample.contamination.check.paths, from.filename = TRUE); freemix.scores <- list(); for(i in seq_along(sample.contamination.check.paths)) { path <- sample.contamination.check.paths[i]; sample.id <- sample.ids[i]; # Single row data frame, where header gives variable and the row gives value # The sample contamination score is stored in the column called FREEMIX. # For more information, see https://genome.sph.umich.edu/wiki/VerifyBamID#Column_information_in_the_output_files contamination.check <- utils::read.delim( path, sep = "\t", as.is = TRUE, header = TRUE, stringsAsFactors = FALSE ); freemix.scores[[ sample.id ]] <- data.frame( "sample.id" = sample.id, "freemix" = contamination.check[1, "FREEMIX"] ); } freemix.scores <- do.call(rbind, freemix.scores); return(freemix.scores); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/process.sample.contamination.checks.R
#' Process total coverage statistics #' #' @description #' Process reports generated by flagstat. #' Assumes reports for before and after off-target filtering have been written to the same file, with separating headers #' #' @inheritParams get.coverage.by.sample.statistics #' #' @return data frame with extracted statistics process.total.coverage.statistics <- function(project.directory) { flagstat.report.paths <- system.ls(pattern = "*/*stats", directory = project.directory, error = TRUE); sample.ids <- extract.sample.ids(flagstat.report.paths, from.filename = TRUE); total.coverage.statistics <- list(); for(i in 1:length(flagstat.report.paths)) { ### READ DATA filepath <- flagstat.report.paths[i]; sample.id <- sample.ids[i]; input.lines <- scan( filepath, what = character(), sep = "\n", quiet = TRUE ); ### INPUT TESTS if(28 != length(input.lines)) { error.message <- paste("Read", length(input.lines), "from Flagstat report. Expected 28 lines."); stop(error.message); } if("Flagstat before filtering off-target reads" != input.lines[1]) { stop('Expected line 1 to be "Flagstat before filtering off-target reads"'); } if("Flagstat after filtering off-target reads" != input.lines[15]) { stop('Expected line 15 to be "Flagstat after filtering off-target reads"'); } ### EXTRACT NUMBERS # # this is based on order of flagstat report => not very robust # have tried to add lots of input tests to compensate ontarget.reads <- stringr::str_extract(input.lines[16], "\\d+") %>% as.numeric; total.reads <- stringr::str_extract(input.lines[2], "\\d+") %>% as.numeric; mapped.reads <- stringr::str_extract(input.lines[6], "\\d+") %>% as.numeric; paired.reads <- stringr::str_extract(input.lines[7], "\\d+") %>% as.numeric; sample.coverage.statistics <- data.frame( "sample.id" = sample.id, "total.reads" = total.reads, "mapped.reads" = mapped.reads, "ontarget.reads" = ontarget.reads, "ontarget.percent" = ontarget.reads/total.reads, "mapped.percent" = mapped.reads/total.reads, "paired.reads" = paired.reads, "paired.percent" = paired.reads/total.reads ); total.coverage.statistics[[ sample.id ]] <- sample.coverage.statistics; } total.coverage.statistics <- do.call(rbind, total.coverage.statistics); ### SANITY CHECKS if( any(total.coverage.statistics$mapped.reads > total.coverage.statistics$total.reads) ) { stop("One or more samples has more mapped reads than total reads. That can't be right."); } if( any(total.coverage.statistics$paired.reads > total.coverage.statistics$total.reads) ) { stop("One or more samples has more paired reads than total reads. That can't be right."); } if( any(total.coverage.statistics$ontarget.percent > 1) ) { stop("One or more samples has on-target percent greater than 100."); } return(total.coverage.statistics); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/process.total.coverage.statistics.R
#' read.all.calls #' #' @description #' Read all calls made with a certain caller #' #' @param sample.ids #' Vector giving sample IDs to process #' @param caller #' String indicating which caller was used #' @param project.directory #' Path to project directory #' @param patient.ids #' Optional vector giving patient ID (or other group) corresponding to each sample #' @param apply.filters #' Logical indicating whether filters specified in VariTAS options should be applied. Defaults to TRUE. ! #' @param variant.file.pattern #' Pattern indicating where the variant file can be found. Sample ID should be indicated by SAMPLE_ID #' #' @return combined.variant.calls Data frame with variant calls from all patients #' #' read.all.calls <- function( sample.ids, caller = c('vardict', 'mutect', 'pgm'), project.directory, patient.ids = NULL, apply.filters = TRUE, variant.file.pattern = NULL ) { # TO DO:  # - move fix.names option to a two-tier system (no need to add variant caller to start with) # - add support for data frame specification – much more robust ### INPUT TESTS ########################################################### if( !is.character(project.directory) ) { stop('project.directory must be a character string'); } if( !dir.exists(project.directory) ) { error.message <- paste('Directory', project.directory, 'not found'); stop(error.message); } if( !is.character(sample.ids) ) { stop('sample.ids must be character vector'); } if( !is.null(patient.ids) && !is.character(patient.ids) ) { stop('patient.ids must be a character vector'); } ### MAIN ################################################################## caller <- match.arg(caller); # match caller to a pattern indicating where variants can be found # SAMPLE_ID will be replaced with sample.id variable file.path.patterns <- c( 'vardict' = 'SAMPLE_ID/vardict/SAMPLE_ID.passed.ontarget.vcf.annovar.hg19_multianno.vcf.txt', 'mutect' = 'SAMPLE_ID/mutect/SAMPLE_ID.passed.ontarget.vcf.annovar.hg19_multianno.vcf.txt', 'pgm' = 'SAMPLE_ID/pgm/SAMPLE_ID.snvs_and_cnvs.vcf.annovar.hg19_multianno.vcf.txt' ); uppercase.caller <- toupper(caller); combined.variant.calls <- list(); # loop over sample IDs and read in variants for(i in 1:length(sample.ids) ) { sample.id <- sample.ids[i]; if( is.null(variant.file.pattern) ) { variant.file.pattern <- file.path.patterns[ caller ]; } variant.file.path <- file.path( project.directory, gsub('SAMPLE_ID', sample.id, variant.file.pattern) ); # add warning? if( !file.exists(variant.file.path) ) { warning.message <- paste('File', variant.file.path, 'not found.'); warning(warning.message); next; } variant.calls <- utils::read.table( variant.file.path, header = TRUE, sep = '\t', stringsAsFactors = FALSE ); # if no variants in file, skip ahead if( 0 == nrow(variant.calls) ) { warning.message <- paste('File', variant.file.path, 'exists, but does not contain any variants'); warning(warning.message); next; } names(variant.calls) <- fix.names(names(variant.calls), caller); # add number of reads supporting variant variant.calls[, paste0(uppercase.caller, '.VARIANT.READS')] <- variant.calls[, paste0(uppercase.caller, '.TUMOUR.AF')]*variant.calls[, paste0(uppercase.caller, '.TUMOUR.DEPTH')]; # add to list # - need to do this separately based on whether patient.ids is defined (want PatientID to be first column if( !is.null(patient.ids) ) { combined.variant.calls[[ sample.id ]] <- data.frame( PatientID = rep(patient.ids[i], nrow(variant.calls)), SampleID = rep(sample.id, nrow(variant.calls)), variant.calls ); } else { combined.variant.calls[[ sample.id ]] <- data.frame( SampleID = rep(sample.id, nrow(variant.calls)), variant.calls ); } } # end of per sample for loop # consolidate to data frame combined.variant.calls <- do.call(rbind, combined.variant.calls); combined.variant.calls[, paste0('CALLED.', uppercase.caller)] <- tolower(caller); if(apply.filters) { combined.variant.calls <- filter.variants( variants = combined.variant.calls, caller = caller ); } return(combined.variant.calls); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/read.all.calls.R
#' Read iDES output #' #' @description #' Read output from iDES_step1.pl and return data frame #' #' @param filename path to file #' #' @return ides.data data frame read from iDES output read.ides.file <- function(filename) { ides.data <- utils::read.table( filename, sep = '\t', as.is = TRUE, header = TRUE ); # TO DO: why inconsistency in Rplus vs Apos ? colnames(ides.data) <-c( 'Chr', 'Pos', 'Depth', 'Ref', 'Rplus', 'Rneg', 'Apos', 'Aneg', 'Cpos', 'Cneg', 'Tpos', 'Tneg', 'Gpos', 'Gneg' ); return(ides.data); } #' Merge potential iDES calls with variant annotation. #' #' @details #' The VarDict variant calling includes a GATK call merging the call vcf file (allele frequency information etc.) with #' the ANNOVAR annotation, and saving the result as a table. This function is an attempt to emulate that step #' for the iDES calls. #' #' @param ides.filename #' Path to formatted iDES output (typically from convert.ides.output file) #' @param annovar.suffix.pattern #' Suffix to match ANNOAR file #' @inheritParams convert.ides.output #' #' @return annotated.calls Data frame of annotations and iDES output. merge.ides.annotation <- function( ides.filename, output = TRUE, output.suffix = '.ann.txt', annovar.suffix.pattern = '.annovar.hg(\\d{2})_multianno.txt' ) { # open ides file ides.calls <- utils::read.delim( ides.filename, sep = '\t', header = FALSE ); # TO DO: # - make sure VarDict depth (DP) has same definition names(ides.calls) <- c( 'Chr', 'Start', 'End', 'Ref', 'Alt', 'DP', 'RefCalls', 'AltCalls', 'A', 'C', 'G', 'T', 'AF' ); # check if annovar file exists # regex pattern to match filename of ANNOVAR annotation file annovar.pattern <- paste0(basename(ides.filename), annovar.suffix.pattern) # match regex in same directory as ides file annovar.file.matches <- list.files( pattern = annovar.pattern, path = dirname(ides.filename) ); if( 0 == length(annovar.file.matches) ) { stop('No ANNOVAR annotation file found.'); } annovar.annotation <- utils::read.delim( file.path(dirname(ides.filename), annovar.file.matches[1]), sep = '\t', header = TRUE ); # merge ANNOVAR annotation and iDES output merged.data <- merge(ides.calls, annovar.annotation); # coerce to data frame (not sure why merge returns a list), and make sure columns appear with chr, start, end, ref, alt first annotated.calls <- data.frame(merged.data)[, union(names(ides.calls), names(annovar.annotation))]; # sort by chromosome and position (start and end position are equal since only dealing with SNVs) # merging step treats chromosome as a character, sorting does not work as expected annotated.calls <- annotated.calls[order(annotated.calls$Chr, annotated.calls$Start), ]; # rename to match GATK VariantsToTable output # (both isis and VarDict use that tool) annotated.calls$End <- NULL; names(annotated.calls)[1:4] <- c('CHROM', 'POS' ,'REF', 'ALT'); # write to file if requested if( TRUE == output ) { utils::write.table( annotated.calls, paste0(ides.filename, output.suffix), sep = "\t", row.names = FALSE, quote = FALSE ); } return(annotated.calls); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/read.ides.file.R
#' Read variant calls from file and format for ease of downstream analyses. #' #' @param variant.file #' Path to variant file. #' @param variant.caller #' String indicating which variant caller was used. Needed to format the headers. #' #' @return variant.calls Data frame of variant calls #' #' read.variant.calls <- function(variant.file, variant.caller) { ### INPUT TESTS ########################################################### if( is.factor(variant.file) ) variant.file <- as.character(variant.file); if( !is.character(variant.file) ) { stop('variant.file must be a string.'); } if( !file.exists(variant.file) ) { error.message <- paste('File', variant.file, 'does not exist'); stop(error.message); } ### MAIN ################################################################## # get sample ID sample.id <- gsub('(.*)\\.passed\\.ontarget\\.vcf\\.annovar.*', '\\1', basename(variant.file) ); uppercase.caller <- toupper(variant.caller); variant.calls <- utils::read.table( variant.file, header = TRUE, sep = '\t', stringsAsFactors = FALSE, quote = "" ); # if no variants in file, skip ahead if( 0 == nrow(variant.calls) ) { warning.message <- paste('File', variant.file, 'exists, but does not contain any variants'); warning(warning.message); return(NULL); } # make sure REF and ALT are character vectors, don't want any accidental T -> TRUE conversion e variant.calls$REF <- logical.to.character(variant.calls$REF); variant.calls$ALT <- logical.to.character(variant.calls$ALT); names(variant.calls) <- fix.names(names(variant.calls), variant.caller, sample.id = sample.id); if(nrow(variant.calls) > 0) { variant.calls[, paste0(uppercase.caller, '.VARIANT.READS')] <- variant.calls[, paste0(uppercase.caller, '.TUMOUR.AD')]; } return(variant.calls); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/read.variant.calls.R
#' Run alignment #' #' @details #' Runs alignment (and related processing steps) on each sample. #' #' @param fastq.specification #' Data frame detailing FASTQ files to be processed, typically from prepare.fastq.specification #' @param output.directory #' Path to project directory #' @param paired.end #' Logical indicating whether paired-end sequencing was performed #' @param sample.directories #' Logical indicating whether all sample files should be saved to sample-specific subdirectories (will be created) #' @param output.subdirectory #' If further nesting is required, name of subdirectory. If no further nesting, set to FALSE #' @param job.name.prefix #' Prefix for job names on the cluster #' @param job.group #' Group job should be associated with on cluster #' @param quiet #' Logical indicating whether to print commands to screen rather than submit them #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' #' @examples #' run.alignment( #' fastq.specification = data.frame( #' sample.id = c('1', '2'), #' reads = c('1-R1.fastq.gz', '2-R1.fastq.gz'), #' mates = c('1-R2.fastq.gz', '2-R2.fastq.gz'), #' patient.id = c('P1', 'P1'), #' tissue = c('tumour', 'normal') #' ), #' output.directory = '.', #' quiet = TRUE, #' paired.end = TRUE #' ) #' #' @return None #' #' @export run.alignment <- function( fastq.specification, output.directory, paired.end = FALSE, sample.directories = TRUE, output.subdirectory = FALSE, job.name.prefix = NULL, job.group = 'alignment', quiet = FALSE, verify.options = !quiet ) { # TO DO: # - make job.dependencies argument more robust to misspecification # avoid factor problems fastq.specification$sample.id <- as.character(fastq.specification$sample.id); fastq.specification$reads <- as.character(fastq.specification$reads); if(paired.end) fastq.specification$mates <- as.character(fastq.specification$mates); ### INPUT TESTS ########################################################### # Check that output directory exists. # In theory we could create the directory if it does not exist, but it would have to be created recursively. # To be safe, I have opted to throw an error. if( !quiet && !dir.exists(output.directory) ) { error.message <- paste('Directory', output.directory, 'does not exist.'); stop(error.message); } # if not submitting commands, do not check that files exist verify.fastq.specification( fastq.specification, paired.end = paired.end, files.ready = !quiet ); # play it safe by refusing to proceed if unclear about whether it's paired-end if( 'mates' %in% names(fastq.specification) && !paired.end ) { stop('fastq.specification contains a column mates, but paired.end = FALSE'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options(stages.to.run = 'alignment'); } # create directories for log files and code # - should this be parameterized? if( !quiet ) { create.directories( directory.names = c('log', 'code'), path = output.directory ); } # determine output path for each sample if(sample.directories) { fastq.specification$output.path <- file.path( output.directory, fastq.specification$sample.id ); } else { fastq.specification$output.path <- output.directory; } # if further nesting has been requested, add to path if( !identical(output.subdirectory, FALSE) && !is.null(output.subdirectory) ) { fastq.specification$output.path <- file.path( fastq.specification$output.path, output.subdirectory ); } # Loop over samples and run each one bam.specification <- list(); # Special handling for normal samples if( 'tissue' %in% names(fastq.specification) ) { dict <- c() keys <- c() for (i in 1:nrow(fastq.specification)) { if (fastq.specification$tissue[i] == 'tumour') { keys <- c(keys, fastq.specification$patient.id[i]) dict <- c(dict, fastq.specification$sample.id[i]) } } names(dict) <- keys } for( i in 1:nrow(fastq.specification) ) { sample.id <- fastq.specification$sample.id[i]; sample.output.directory <- fastq.specification$output.path[i]; fastq.files <- fastq.specification$reads[i]; if(paired.end) { fastq.files <- c(fastq.files, fastq.specification$mates[i]); } # should the output pattern be parameterized ? output.filename <- paste0(sample.id, '.sorted.bam.ontarget.bam'); output.file <- file.path(sample.output.directory, output.filename); job.name <- paste0('align_', sample.id); if( !is.null(job.name.prefix) && '' != job.name.prefix ) { job.name <- paste(job.name.prefix, job.name, sep = '_'); } if( 'tissue' %in% names(fastq.specification) ) { if ( 'normal' == fastq.specification$tissue[i] ) { print(paste(sample.id, 'goes with', dict[fastq.specification$patient.id[i]])) tumour.sample <- dict[fastq.specification$patient.id[i]] sample.id <- paste0(tumour.sample, '-NORMAL') } } # should the config file be passed through? run.alignment.sample( sample.id = sample.id, fastq.files = fastq.files, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.group = job.group, job.name = job.name, verify.options = FALSE, quiet = quiet ); # TO DO: figure out how to deal with tumour/ normal issue sample.bam.specification <- data.frame( sample.id = sample.id, bam.file = output.file, job.dependency = job.name ); # add patient ID and caller to BAM speciication if the information is available if( 'patient.id' %in% names(fastq.specification) ) { sample.bam.specification$patient.id <- fastq.specification$patient.id[i]; } if( 'tissue' %in% names(fastq.specification) ) { sample.bam.specification$tissue <- fastq.specification$tissue[i]; } bam.specification[[ i ]] <- sample.bam.specification; } bam.specification <- do.call(rbind, bam.specification); # If tumour and normal is present, reformat to tumour.bam and normal.bam format # this should probably be moved to a different reformatted.bam.specification <- list(); if( 'patient.id' %in% names(bam.specification) && 'tissue' %in% names(bam.specification) ) { tumour.bams <- bam.specification['tumour' == bam.specification$tissue, ]; for( i in 1:nrow(tumour.bams) ) { sample.id <- tumour.bams$sample.id[i]; patient.id <- tumour.bams$patient.id[i]; patient.normal.samples <- bam.specification['normal' == bam.specification$tissue & patient.id == bam.specification$patient.id, ]; if( 0 == nrow(patient.normal.samples) ) { # no normal BAM normal.bam <- NA; normal.job.dependency <- NA; } else if ( 1 == nrow(patient.normal.samples) ) { normal.bam <- patient.normal.samples$bam.file[1]; normal.job.dependency <- patient.normal.samples$job.dependency[1]; } else { stop('Multi-region normal samples not supported - sorry!'); } merged.job.dependency <- tumour.bams$job.dependency[i]; if( !is.na(normal.job.dependency) ) { merged.job.dependency <- paste(merged.job.dependency, normal.job.dependency); } reformatted.bam.specification[[ sample.id ]] <- data.frame( sample.id = sample.id, tumour.bam = tumour.bams$bam.file[i], normal.bam = normal.bam, job.dependency = merged.job.dependency ); } bam.specification <- do.call(rbind, reformatted.bam.specification); } else { # no need to names(bam.specification)[ 'bam.file' == names(bam.specification) ] <- 'tumour.bam'; } return(bam.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.alignment.R
#' Run alignment for a single sample #' #' @param fastq.files Paths to FASTQ files (one file if single-end reads, two files if paired-end) #' @inheritParams run.vardict.sample #' run.alignment.sample <- function( fastq.files, sample.id, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, job.name = NULL, job.group = NULL, quiet = FALSE, verify.options = !quiet ) { ### INPUT TESTS ########################################################### # FASTQ files assertthat::assert_that( length(fastq.files) <= 2, msg = 'Cannot accept more than two FASTQ files' ); assertthat::assert_that( 0 != length(fastq.files), msg = 'Must supply at least one FASTQ file' ); assertthat::assert_that( quiet || !is.null(job.dependencies) || all( file.exists(fastq.files) ), msg = 'No job dependency supplied, yet FASTQ files do not exist' ); # sample ID assertthat::assert_that( 1 == length(sample.id), msg = 'sample.id must have length 1' ); # could possibly allow numeric.. but be strict to start with if( is.factor(sample.id) ) sample.id <- as.character(sample.id); assertthat::assert_that( is.character(sample.id) ); ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'alignment' ); } # if no config.file has been supplied, save a temporary one and pass to Perl if( is.null(config.file) ) { config.file <- save.config(); } script <- system.file('perl', 'run_alignment.pl', package = get.varitas.options('pkgname') ); # Might want to rename the ontarget_bam_filename option... command <- make.command.line.call( main.command = c('perl', script), options = list( 'fastq' = fastq.files, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'ontarget_bam_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ) ); if(quiet) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.alignment.sample.R
globalVariables('i') #' Run all the generated bash scripts without HPC commands # #' @description #' Run all the scripts generated by previous parts of the pipeline, without using HPC commands #' @param output.directory #' Main directory where all files should be saved #' @param stages.to.run #' A character vector of all stages that need running #' @param variant.callers #' A character vector of variant callers to run #' @param quiet #' Logical indicating whether to print commands to screen rather than submit jobs. Defaults to FALSE, #' can be useful to set to TRUE for testing. #' #' @return None #' #' @importFrom foreach '%dopar%' #' #' #' #' run.all.scripts <- function( output.directory, stages.to.run = c('alignment', 'qc', 'calling', 'annotation', 'merging'), variant.callers = NULL, quiet = FALSE ) { ### INPUT TESTS ############################################################# ### MAIN #################################################################### script.directory <- file.path(output.directory, 'code') log.directory <- file.path(output.directory, 'log/') ### SET UP DOPARALLEL config <- read.yaml(save.config()) num.cores <- config[['num_cpu']] doParallel::registerDoParallel(cores = num.cores) ### RUN STAGES print('Note: due to the nature of using multiple cores, jobs may complete out of order') if ('alignment' %in% stages.to.run){ print('Aligning...') script.files <- list.files(pattern = '.*align.*\\.sh$', path = script.directory, full.names = TRUE) script.names <- list.files(pattern = '.*align.*\\.sh$', path = script.directory, full.names = FALSE) if (length(script.files) > 0) { foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) print(paste0('Completed job ', i, ' of ', length(script.files), ' alignment jobs')) } } } } if ('qc' %in% stages.to.run){ print('Running QC...') script.files <- list.files(pattern = '.*target_qc.*\\.sh$', path = script.directory, full.names = TRUE) script.names <- list.files(pattern = '.*target_qc.*\\.sh$', path = script.directory, full.names = FALSE) if (length(script.files) > 0) { foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) print(paste0('Completed job ', i, ' of ', length(script.files), ' QC jobs')) } } } } if ('calling' %in% stages.to.run){ if ('mutect' %in% variant.callers){ print('Running Mutect...') script.files <- list.files(pattern = '.*mutect.*\\.sh$', path = script.directory, full.names = TRUE) script.names <- list.files(pattern = '.*mutect.*\\.sh$', path = script.directory, full.names = FALSE) if (length(script.files) > 0) { foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) print(paste0('Completed job ', i, ' of ', length(script.files), ' MuTect jobs')) } } } } if ('vardict' %in% variant.callers) { print('Running VarDict...') script.files <- list.files(pattern = '.*vardict.*\\.sh$', path = script.directory, full.names = TRUE) script.names <- list.files(pattern = '.*vardict.*\\.sh$', path = script.directory, full.names = FALSE) if (length(script.files) > 0) { foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) print(paste0('Completed job ', i, ' of ', length(script.files), ' VarDict jobs')) } } } } } if ('annotation' %in% stages.to.run){ print('Annotating...') script.files <- list.files(pattern = '.*annotate.*\\.sh$', path = script.directory, full.names = TRUE) script.names <- list.files(pattern = '.*annotate.*\\.sh$', path = script.directory, full.names = FALSE) if (length(script.files) > 0) { foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) print(paste0('Completed job ', i, ' of ', length(script.files), ' annotation jobs')) } } } } if ('merging' %in% stages.to.run){ print('Merging...') script.files <- list.files(pattern = '.*post_processing.*\\.sh$', path = file.path(script.directory, '..'), full.names = TRUE) script.names <- list.files(pattern = '.*post_processing.*\\.sh$', path = script.directory, full.names = FALSE) foreach::foreach(i=1:length(script.files)) %dopar% { if (quiet) { print(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } else { system(paste0('bash ', script.files[i], ' &> ', log.directory, script.names[i], '.out')) } } } print('All jobs executed') }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.all.scripts.R
#' Run annotation on a set of VCF files #' #' @description #' Takes a data frame with paths to VCF files, and runs ANNOVAR annotation on each file. #' To allow for smooth connections with downstream pipeline steps, the function returns a variant #' specification data frame that can be used as input to merging steps. #' #' @param vcf.specification #' Data frame detailing VCF files to be processed, from \code{prepare.vcf.specification}. #' @param output.directory #' Path to folder where code and log files should be stored in their respective subdirectories. #' If not supplied, code and log files will be stored in the directory with each VCF file. #' @param job.name.prefix #' Prefix to be added before VCF name in job name. Defaults to 'annotate', but should be changed if #' running multiple callers to avoid #' @param job.group #' Group job should be associated with on cluster #' @param quiet #' Logical indicating whether to print commands to screen rather than submit them #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' #' @return Data frame with details of variant files #' #' @examples #' run.annotation( #' data.frame( #' sample.id = c('a', 'b'), #' vcf = c('a.vcf', 'b.vcf'), #' caller = c('mutect', 'mutect') #' ), #' output.directory = '.', #' quiet = TRUE #' ) #' #' @export run.annotation <- function( vcf.specification, output.directory = NULL, job.name.prefix = NULL, job.group = NULL, quiet = FALSE, verify.options = !quiet ) { ### INPUT TESTS ########################################################### if( !is.null(output.directory) && !dir.exists(output.directory) && !quiet ) { error.message <- paste('Directory', output.directory, 'does not exist or is not a directory'); stop(error.message); } if( !is.null(job.name.prefix) && !is.character(job.name.prefix) ) { stop('job.name.prefix must be a string'); } if( !is.null(job.name.prefix) && length(job.name.prefix) != 1 && length(job.name.prefix) != nrow(vcf.specification) ) { stop('job.name.prefix must have length 1 or length equal to vcf.specification'); } ### MAIN ################################################################## # make sure to avoid factor errors vcf.specification$sample.id <- as.character(vcf.specification$sample.id); vcf.specification$vcf <- as.character(vcf.specification$vcf); if('caller' %in% names(vcf.specification) ) { vcf.specification$caller <- as.character( vcf.specification$caller ); } verify.vcf.specification(vcf.specification); if( verify.options) { verify.varitas.options( stages.to.run = 'annotation' ); } vcf.specification$job.name.prefix <- job.name.prefix; # create directories for log files and code # - should this be parameterized? code.directory <- NULL; log.directory <- NULL; if( !is.null(output.directory) ) { if( !quiet ) { create.directories( directory.names = c('log', 'code'), path = output.directory ); } code.directory <- file.path(output.directory, 'code'); log.directory <- file.path(output.directory, 'log'); } # want to store information about file paths and job dependencies to use downstream variant.specification <- list(); # store paths to output files, and their job dependencies for(i in 1:nrow(vcf.specification) ) { # TO DO: should VCF specification contain sample ID at all? sample.id <- vcf.specification$sample.id[i]; vcf.file <- vcf.specification$vcf[i]; # save to same directory as VCF annotation.output.directory <- dirname(vcf.file); vcf.filename <- basename(vcf.file); # get name of VCF file – specifying this here allows us to control it and output to buildver <- get.buildver(); annotation.filename <- paste0(vcf.filename, '.annovar.', buildver, '_multianno.vcf.txt'); # give job a name if( 'caller' %in% names(vcf.specification) ) { job.name <- paste(vcf.specification$caller[i], vcf.filename, sep = '_'); if( 'isis' == as.character(vcf.specification$caller[i]) ) { isis <- TRUE; } else { isis <- FALSE; } } else { job.name <- vcf.filename; isis <- FALSE; } # sort out job dependencies # (assume to be comma or semicolon separated – verify this in other scripts!) job.dependencies <- NULL; if( 'job.dependency' %in% names(vcf.specification) && '' != vcf.specification$job.dependency[i] && !is.na(vcf.specification$job.dependency[i]) ) { job.dependencies <- stringr::str_split( vcf.specification$job.dependency[i], pattern = ',(\\s)?|;(\\s)?|\\s' )[[1]]; } # add prefix to job name if( !is.null(job.name.prefix) && '' != job.name.prefix ) { job.name <- paste(job.name.prefix, job.name, sep = '_'); } print(job.name); run.annovar.vcf( vcf.file = vcf.file, output.filename = annotation.filename, output.directory = annotation.output.directory, code.directory = code.directory, log.directory = log.directory, job.name = job.name, job.dependencies = job.dependencies, job.group = 'annovar', verify.options = FALSE, isis = isis, quiet = quiet ); sample.variant.specification <- data.frame( sample.id = sample.id, variant.file = file.path(annotation.output.directory, annotation.filename), job.dependency = job.name, stringsAsFactors = FALSE ); if( 'caller' %in% names(vcf.specification) ) { sample.variant.specification$caller <- vcf.specification$caller[i]; } variant.specification[[ i ]] <- sample.variant.specification; } # make data frame of variant specifications variant.specification <- do.call(rbind, variant.specification); return(variant.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.annotation.R
#' Run ANNOVAR on a VCF file #' #' @param vcf.file Path to VCF file #' @param isis Logical indicating whether VCF files are from the isis (MiniSeq) variant caller #' @inheritParams run.vardict.sample #' #' @return None #' #' #' #' run.annovar.vcf <- function( vcf.file, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, job.group = NULL, job.name = NULL, isis = FALSE, quiet = FALSE, verify.options = !quiet ) { ### INPUT TESTS ########################################################### if( 0 == length(vcf.file) ) { stop('vcf.file must be provided.'); } if( length(job.name) > 1 ) { stop('job.name must have length 1'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'annotation' ); } if(is.null(config.file)) { config.file <- save.config(); } if(isis) { script <- system.file('perl', 'run_annovar_isis.pl', package = getOption('varitas')$pkgname ); } else { script <- system.file('perl', 'run_annovar.pl', package = getOption('varitas')$pkgname ); } command <- make.command.line.call( main.command = c('perl', script), options = c( 'vcf_file' = vcf.file, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ) ); if(quiet) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.annovar.vcf.R
#' Run filtering on an ANNOVAR-annotated txt file #' #' @param variant.file Path to variant file #' @param caller String giving variant caller that was used (affects which filters were applied. #' @inheritParams run.alignment.sample run.filtering.txt <- function( variant.file, caller = c('consensus', 'vardict', 'ides', 'mutect'), output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, job.group = NULL, quiet = FALSE ) { caller <- match.arg(caller); ### INPUT TESTS ########################################################### # TO DO: # - txt file extension # - required headers ### MAIN ################################################################## if( is.null(config.file) ) { # need to save to non-temporary directory to avoid errors # => perl calls R again, temp files already deleted config.output.directory <- NULL; config.output.filename <- paste0('config_filter_', caller, '_', basename(variant.file), '.yaml'); if( !is.null(log.directory) ) { config.output.directory <- code.directory; } else if( !is.null(output.directory) ) { config.output.directory <- output.directory; } if( !is.null(config.output.directory) ) { config.output.path <- file.path(config.output.directory, config.output.filename); } else { config.output.path <- config.output.filename; } config.file <- save.config(output.file = config.output.path); } script <- system.file('perl', 'filter.pl', package = getOption('varitas')$pkgname ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'variant_file' = variant.file, 'variant_caller' = caller, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_group' = job.group ) ); if(quiet) { print(command); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.filtering.txt.R
#' Run iDES #' #' @details #' Run iDES step 1on each sample, to tally up calls by strand. #' Files are output to a the sample subdirectory #' #' @param project.directory #' Directory containing files #' @param sample.id.pattern #' Regex pattern to match sample IDs #' @param sample.ids #' Vector of sample IDs #' @param job.dependencies #' Vector of job dependencies #' #' @return None #' #' @note #' Deprecated function for running iDES. #' Follows previous development package without specification data frames #' #' @references \url{https://cappseq.stanford.edu/ides/} #' run.ides <- function( project.directory, sample.id.pattern = "._S\\d+$", sample.ids = NULL, job.dependencies = NULL ) { # TO DO: # - should wrapper functions that submit jobs be named in a way that indicates this? ### INPUT TESTS ########################################################### if( !is.character(project.directory) || length(project.directory) > 1 ) { stop('project.directory should be a single string giving path to project directory'); } if( !dir.exists(project.directory) ) { stop( paste(project.directory, 'is not a directory.') ); } for(sample.id in sample.ids) { if( !dir.exists(file.path(project.directory)) ) { stop.message <- paste('Subdirectory', sample.id, 'does not exist in', project.directory); stop(stop.message); } } ### MAIN ################################################################## # if sample IDs have not been explicitly named, find all the directories # matching pattern in project directory if( is.null(sample.ids) ) { sample.ids <- dir( path = project.directory, pattern = sample.id.pattern ); } # TO DO: change this if we change the name! iDES.script <- system.file("perl", "run_iDES_sample.pl", package = getOption('')$pkgname ); config.file <- save.config(); # loop over samples to submit alignment job for each sample for(sample.id in sample.ids) { command <- make.command.line.call( main.command = c("perl", iDES.script), options = list( "project_directory" = project.directory, "sample_id" = sample.id, "config_file" = config.file, "job_dependencies" = job.dependencies ) ); # submit to system system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.ides.R
#' Run LoFreq for a sample #' #' @inheritParams run.vardict.sample #' #' #' #' run.lofreq.sample <- function( tumour.bam, sample.id, paired, normal.bam = NULL, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, quiet = FALSE, job.name = NULL, verify.options = !quiet, job.group = NULL ) { ### INPUT TESTS ########################################################### # make sure no factors have been passed in assertthat::assert_that( is.character(tumour.bam) ); assertthat::assert_that( is.character(sample.id) ); assertthat::assert_that( is.null(normal.bam) || is.character(normal.bam)); assertthat::assert_that( is.null(output.directory) || is.character(output.directory) ); if(paired && is.null(normal.bam)) { stop('paired is set to true but no normal sample BAM has been supplied.'); } if(!paired && !is.null(normal.bam)) { stop('Getting mixed signals: paired is set to false but a normal sample BAM has been supplied.'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'calling', variant.callers = 'lofreq' ); } # save temporary config file that can be read by Perl if(is.null(config.file)) { config.file <- save.config(); } # sort out whether to pass paired flag to Perl flags <- NULL; if(paired) { flags <- 'paired'; } script <- system.file('perl', 'run_lofreq.pl', package = get.varitas.options('pkgname') ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'tumour_bam' = tumour.bam, 'normal_bam' = normal.bam, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ), flags = flags ); if( quiet ) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.lofreq.sample.R
#' Run MuSE for a sample #' #' @inheritParams run.vardict.sample #' #' #' #' run.muse.sample <- function( tumour.bam, sample.id, paired, normal.bam = NULL, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, quiet = FALSE, job.name = NULL, verify.options = !quiet, job.group = NULL ) { ### INPUT TESTS ########################################################### # make sure no factors have been passed in assertthat::assert_that( is.character(tumour.bam) ); assertthat::assert_that( is.character(sample.id) ); assertthat::assert_that( is.null(normal.bam) || is.character(normal.bam)); assertthat::assert_that( is.null(output.directory) || is.character(output.directory) ); if(paired && is.null(normal.bam)) { stop('paired is set to true but no normal sample BAM has been supplied.'); } if(!paired && !is.null(normal.bam)) { stop('Getting mixed signals: paired is set to false but a normal sample BAM has been supplied.'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'calling', variant.callers = 'muse' ); } # save temporary config file that can be read by Perl if(is.null(config.file)) { config.file <- save.config(); } # sort out whether to pass paired flag to Perl flags <- NULL; if(paired) { flags <- 'paired'; } script <- system.file('perl', 'run_muse.pl', package = get.varitas.options('pkgname') ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'tumour_bam' = tumour.bam, 'normal_bam' = normal.bam, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ), flags = flags ); if( quiet ) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.muse.sample.R
#' Run MuTect for a sample #' #' @inheritParams run.vardict.sample #' #' #' #' run.mutect.sample <- function( tumour.bam, sample.id, paired, normal.bam = NULL, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, quiet = FALSE, job.name = NULL, verify.options = !quiet, job.group = NULL ) { ### INPUT TESTS ########################################################### # make sure no factors have been passed in assertthat::assert_that( is.character(tumour.bam) ); assertthat::assert_that( is.character(sample.id) ); assertthat::assert_that( is.null(normal.bam) || is.character(normal.bam)); assertthat::assert_that( is.null(output.directory) || is.character(output.directory) ); if(paired && is.null(normal.bam)) { stop('paired is set to true but no normal sample BAM has been supplied.'); } if(!paired && !is.null(normal.bam)) { stop('Getting mixed signals: paired is set to false but a normal sample BAM has been supplied.'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'calling', variant.callers = 'mutect' ); } # save temporary config file that can be read by Perl if(is.null(config.file)) { config.file <- save.config(); } # sort out whether to pass paired flag to Perl flags <- NULL; if(paired) { flags <- 'paired'; } script <- system.file('perl', 'run_mutect.pl', package = get.varitas.options('pkgname') ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'tumour_bam' = tumour.bam, 'normal_bam' = normal.bam, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ), flags = flags ); if( quiet ) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.mutect.sample.R
#' run.post.processing #' #' @description #' Submit post-processing job to the cluster with appropriate job dependencies #' #' @param variant.specification #' Data frame specifying files to be processed #' @param output.directory #' Path to directory where output should be saved #' @param code.directory #' Directory where code should be saved #' @param log.directory #' Directory where log files should be saved #' @param config.file #' Path to config file #' @param job.name.prefix #' Prefix for job names on the cluster #' @param quiet #' Logical indicating whether to print commands to screen rather than submit the job #' @param email #' Email address that should be notified when job finishes. If NULL or FALSE, no email is sent #' @param verify.options #' Logical indicating whether \code{verify.varitas.options()} should be run. #' #' @return None #' @examples #' run.post.processing( #' variant.specification = data.frame( #' sample.id = c('a', 'b'), #' vcf = c('a.vcf', 'b.vcf'), #' caller = c('mutect', 'mutect'), #' job.dependency = c('example1', 'example2') #' ), #' output.directory = '.', #' quiet = TRUE #' ) #' #' @export run.post.processing <- function( variant.specification, output.directory, code.directory = NULL, log.directory = NULL, config.file = NULL, job.name.prefix = NULL, quiet = FALSE, email = NULL, verify.options = !quiet ) { ### INPUT TESTS ########################################################### if( !is.data.frame(variant.specification) ) { stop('variant.specification must be a data frame.'); } if( !is.character(output.directory) || length(output.directory) > 1 ) { stop('output.directory must be a single string'); } if( !dir.exists(output.directory) && !quiet) { error.message <- paste('Directory', output.directory, 'does not exist or is not a directory'); stop(error.message); } if( identical(email, FALSE) ) email <- NULL; if( !is.null(email) && !is.character(email) ) { stop('email should be a character string'); } ### MAIN ################################################################## # if job dependencies in variant.specification and no job dependencies provided, # consider job.dependency column to be your job dependencies – hopefully less error-prone if( 'job.dependency' %in% names(variant.specification)) { job.dependencies <- unlist(stringr::str_split( variant.specification$job.dependency, pattern = ',(\\s)?|;(\\s)?|\\s' )); } # save variant specification to file variant.specification.file <- file.path( output.directory, paste0(Sys.Date(), '_variant_specification.txt') ); if( !quiet ) { utils::write.table( variant.specification, variant.specification.file, sep = '\t', row.names = FALSE ); } # config file # NOTE: this step is currently redundant due to no config-dependency in merging step # - include step anyways in case that changes in the future # # NOTE 2: run this before saving config to file – don't want to save wrong stuff to disk! if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'merging' ); } if( is.null(config.file) ) { config.file <- file.path( output.directory, paste0(Sys.Date(), '_config.yaml') ); if( !quiet ) save.config(output.file = config.file); } job.name <- 'post_processing'; if( !is.null(job.name.prefix) && '' != job.name.prefix ) { job.name <- paste(job.name.prefix, job.name, sep = '_'); } script <- system.file('perl', 'run_post_processing.pl', package = get.varitas.options('pkgname') ); post.processing.options <- list( 'variant_specification' = variant.specification.file, 'config_file' = config.file, 'output_directory' = output.directory, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name ); if( !is.null(email) ) { post.processing.options[[ 'email' ]] <- email; } command <- make.command.line.call( main.command = c('perl', script), options = post.processing.options ); if(quiet) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.post.processing.R
#' Perform sample QC by looking at target coverage. #' #' @inheritParams run.variant.calling #' @param project.directory #' Path to project directory where code and log files should be saved #' @param paired #' Logical indicating whether the analysis is paired. This does not affect QC directly, but means normal samples get nested #' @param output.subdirectory #' If further nesting is required, name of subdirectory. If no further nesting, set to FALSE #' @param job.group #' Group job should be associated with on cluster #' #' run.target.qc <- function( bam.specification, project.directory, sample.directories = TRUE, paired = FALSE, output.subdirectory = FALSE, quiet = FALSE, job.name.prefix = NULL, verify.options = FALSE, job.group = 'target_qc' ) { ### INPUT TESTS ########################################################### verify.bam.specification(bam.specification); ### MAIN ################################################################## bam.specification$sample.id <- as.character(bam.specification$sample.id); bam.specification$tumour.bam <- as.character(bam.specification$tumour.bam); if( 'normal.bam' %in% names(bam.specification) ) { bam.specification$normal.bam <- as.character(bam.specification$normal.bam); } if( verify.options ) { # TO DO! } # create directories for log files and code # - should this be parameterized? if( !quiet ) { create.directories( directory.names = c('log', 'code'), path = project.directory ); } # add a column to bam.specification data frame with path to # output direcory for that specific sample if( sample.directories ) { bam.specification$output.path <- file.path( project.directory, bam.specification$sample.id ); } else { bam.specification$output.path <- project.directory; } # if further nesting has been requested, add to path if( !identical(output.subdirectory, FALSE) ) { bam.specification$output.path <- file.path( bam.specification$output.path, output.subdirectory ); } job.depends <- c() for( i in 1:nrow(bam.specification) ) { sample.id <- bam.specification$sample.id[i]; tumour.bam <- bam.specification$tumour.bam[i]; sample.output.directory <- bam.specification$output.path[i]; job.dependencies <- NULL; if( 'job.dependency' %in% names(bam.specification) && '' != bam.specification$job.dependency[i] && !is.na(bam.specification$job.dependency[i]) ) { job.dependencies <- stringr::str_split( bam.specification$job.dependency[i], pattern = '\\s+' ); } job.name <- paste0('target_qc_', sample.id); if( !is.null(job.name.prefix) && '' != job.name.prefix ) { job.name <- paste(job.name.prefix, job.name, sep = '_'); } run.target.qc.sample( bam.file = tumour.bam, sample.id = sample.id, output.directory = sample.output.directory, job.dependencies = job.dependencies, code.directory = file.path(project.directory, 'code'), log.directory = file.path(project.directory, 'log'), job.group = 'target_qc', job.name = job.name, quiet = quiet ); bam.specification$tumour.bam[i] <- file.path(sample.output.directory, paste0(sample.id, '.sorted.bam.ontarget.bam')) job.dependency <- job.name if( paired ) { normal.bam <- bam.specification$normal.bam[i]; normal.job.name <- paste0('target_qc_', sample.id, '-NORMAL'); if( !is.null(job.name.prefix) && '' != job.name.prefix ) { normal.job.name <- paste(job.name.prefix, normal.job.name, sep = '_'); } run.target.qc.sample( bam.file = normal.bam, sample.id = paste0(sample.id, '-NORMAL'), output.directory = sample.output.directory, code.directory = file.path(project.directory, 'code'), log.directory = file.path(project.directory, 'log'), job.name = normal.job.name, job.group = 'target_qc', quiet = quiet ); bam.specification$normal.bam[i] <- file.path(sample.output.directory, paste0(sample.id, '-NORMAL', '.sorted.bam.ontarget.bam')) job.dependency <- paste(job.dependency, normal.job.name) } job.depends <- c(job.depends, job.dependency) } bam.specification['job.dependency'] <- job.depends return(bam.specification) }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.target.qc.R
#' Get ontarget reads and run coverage quality control #' #' @param bam.file Path to BAM file #' @inheritParams run.alignment.sample #' run.target.qc.sample <- function( bam.file, sample.id, output.directory = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, job.name = NULL, job.group = NULL, quiet = FALSE ) { ### INPUT TESTS ########################################################### ### MAIN ################################################################## if(is.null(config.file)) { config.file <- save.config(); } script <- system.file('perl', 'target_qc.pl', package = getOption('varitas')$pkgname ); # TO DO: # - add ontarget_bam_filename option (can name it output.filename for consistency?) command <- make.command.line.call( main.command = c('perl', script), options = c( 'bam_file' = bam.file, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ) ); if(quiet) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.target.qc.sample.R
#' run.vardict.sample #' #' @description #' Run VarDict on a sample. Idea: have a low-level function that simply submits job to Perl, after BAM paths have been found. #' and output paths already have been decided upon #' #' @param tumour.bam #' Path to tumour sample BAM file. #' @param sample.id #' Sample ID for labelling #' @param paired #' Logical indicating whether to do variant calling with a matched normal. #' @param proton #' Logical indicating whether the data was generated by proton sequencing. Defaults to False (i.e. Illumina) #' @param normal.bam #' Path to normal BAM file if \code{paired = TRUE} #' @param output.directory #' Path to output directory #' @param output.filename #' Name of resulting VCF file (defaults to SAMPLE_ID.vcf) #' @param code.directory #' Path to directory where code should be stored #' @param log.directory #' Path to directory where log files should be stored #' @param config.file #' Path to config file #' @param job.dependencies #' Vector with names of job dependencies #' @param job.name #' Name of job to be submitted #' @param job.group #' Group job should belong to #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' @param quiet #' Logical indicating whether to print command to screen rather than submit it to the system. Defaults to false, useful for debugging. #' #' #' #' run.vardict.sample <- function( tumour.bam, sample.id, paired, proton = FALSE, normal.bam = NULL, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, job.name = NULL, job.group = NULL, quiet = FALSE, verify.options = !quiet ) { ### INPUT TESTS ########################################################### if(paired && is.null(normal.bam)) { stop('paired is set to true but no normal sample BAM has been supplied.'); } if(!paired && !is.null(normal.bam)) { stop('Getting mixed signals: paired is set to false but a normal sample BAM has been supplied.'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'calling', variant.callers = 'vardict' ); } if(is.null(config.file)) { config.file <- save.config(); } # sort out flags flags <- c(); if(paired) { flags <- c('paired', flags); } if(proton) { flags <- c('proton', flags); } script <- system.file('perl', 'run_vardict.pl', package = get.varitas.options('pkgname') ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'tumour_bam' = tumour.bam, 'normal_bam' = normal.bam, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ), flags = flags ); if(quiet) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.vardict.sample.R
#' run.variant.calling #' #' @description #' Run variant calling for all samples #' #' @details #' Run VarDict on each sample, and annotate the results with ANNOVAR. #' Files are output to a vardict/ subdirectory within each sample directory. #' #' @param bam.specification #' Data frame containing details of BAM files to be processed, typically from \code{prepare.bam.specification}. #' @param output.directory #' Path to directory where output should be saved #' @param variant.callers #' Character vector of variant callers to be used #' @param paired #' Logical indicating whether to do variant calling with a matched normal #' @param proton #' Logical indicating whether data was generated by proton sequencing (ignored if running MuTect) #' @param sample.directories #' Logical indicating whether output for each sample should be put in its own directory (within output.directory) #' @param job.name.prefix #' Prefix for job names on the cluster #' @param quiet #' Logical indicating whether to print commands to screen rather than submit the job #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' #' @return None #' #' @examples #' run.variant.calling( #' data.frame(sample.id = c('Z', 'Y'), tumour.bam = c('Z.bam', 'Y.bam')), #' output.directory = '.', #' variant.caller = c('lofreq', 'mutect'), #' quiet = TRUE, #' paired = FALSE #' ) #' #' @export run.variant.calling <- function( bam.specification, output.directory, variant.callers = c('vardict', 'mutect', 'varscan', 'lofreq', 'muse'), paired = TRUE, proton = FALSE, sample.directories = TRUE, job.name.prefix = NULL, quiet = FALSE, verify.options = !quiet ) { ### INPUT TESTS ########################################################### # Check that output directory exists. # In theory we could create the directory if it does not exist, but it would have to be created recursively. # To be safe, I have opted to throw an error. if( !quiet && !dir.exists(output.directory) ) { error.message <- paste('Directory', output.directory, 'does not exist or is not a directory.'); stop(error.message); } variant.callers <- match.arg(variant.callers, several.ok = TRUE); if( 'muse' %in% variant.callers && !paired ) { error.message <- 'MuSE can only be run on paired data, provide matched normal samples or do not use MuSE'; stop(error.message); } ### MAIN ################################################################## # make sure all columns are characters, not factors! bam.specification$sample.id <- as.character(bam.specification$sample.id); bam.specification$tumour.bam <- as.character(bam.specification$tumour.bam); if( 'normal.bam' %in% names(bam.specification) ) { bam.specification$normal.bam <- as.character(bam.specification$normal.bam); } if( verify.options ) { verify.varitas.options( stages.to.run = 'annotation' ); } # create directories for log files and code # - should this be parameterized? if( !quiet ) { create.directories( directory.names = c('log', 'code'), path = output.directory ); } # start assembling a data frame of annovar.specification <- list(); # Loop over samples and run each one for( caller in variant.callers ) { # add a column to bam.specification data frame with path to # output direcory for that specific sample if( sample.directories ) { bam.specification$output.path <- file.path( output.directory, bam.specification$sample.id, caller ); } else { bam.specification$output.path <- file.path( output.directory, caller ); } for( i in seq_len( nrow(bam.specification) ) ) { sample.id <- bam.specification$sample.id[i]; tumour.bam <- bam.specification$tumour.bam[i]; sample.output.directory <- bam.specification$output.path[i]; job.dependencies <- NULL; if( 'job.dependency' %in% names(bam.specification) && '' != bam.specification$job.dependency[i] && !is.na(bam.specification$job.dependency[i]) ) { job.dependencies <- stringr::str_split( bam.specification$job.dependency[i], pattern = '\\s+' ); } normal.bam <- NULL; if(paired) { normal.bam <- bam.specification$normal.bam[i]; } # Sort out what will be the name of the job and the path to the final output file of interest # this will be used as input to the next step in the pipeline – job dependency and file to run annotation on job.name <- paste(caller, sample.id, sep = '_'); if( !is.null(job.name.prefix) && '' != job.name.prefix ) { job.name <- paste(job.name.prefix, job.name, sep = '_'); } output.filename <- paste0(sample.id, '.passed.ontarget.vcf'); output.vcf <- file.path(sample.output.directory, output.filename); if( 'vardict' == caller ) { run.vardict.sample( sample.id = sample.id, tumour.bam = tumour.bam, normal.bam = normal.bam, paired = paired, proton = proton, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.dependencies = job.dependencies, job.name = job.name, job.group = caller, verify.options = FALSE, quiet = quiet ); } else if( 'mutect' == caller ) { run.mutect.sample( sample.id = sample.id, tumour.bam = tumour.bam, normal.bam = normal.bam, paired = paired, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.dependencies = job.dependencies, job.group = caller, job.name = job.name, verify.options = FALSE, quiet = quiet ); } else if( 'varscan' == caller ) { run.varscan.sample( sample.id = sample.id, tumour.bam = tumour.bam, normal.bam = normal.bam, paired = paired, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.dependencies = job.dependencies, job.group = caller, job.name = job.name, verify.options = FALSE, quiet = quiet ); } else if( 'lofreq' == caller ) { run.lofreq.sample( sample.id = sample.id, tumour.bam = tumour.bam, normal.bam = normal.bam, paired = paired, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.dependencies = job.dependencies, job.group = caller, job.name = job.name, verify.options = FALSE, quiet = quiet ); } else if( 'muse' == caller ) { run.muse.sample( sample.id = sample.id, tumour.bam = tumour.bam, normal.bam = normal.bam, paired = paired, output.directory = sample.output.directory, output.filename = output.filename, code.directory = file.path(output.directory, 'code'), log.directory = file.path(output.directory, 'log'), job.dependencies = job.dependencies, job.group = caller, job.name = job.name, verify.options = FALSE, quiet = quiet ); } annovar.specification[[ paste(caller, sample.id, sep = '-') ]] <- data.frame( 'sample.id' = sample.id, 'vcf' = output.vcf, 'job.dependency' = job.name, 'caller' = caller, stringsAsFactors = FALSE ); } } annovar.specification <- do.call(rbind, annovar.specification); # sanity check to make sure format is as expected verify.vcf.specification(annovar.specification); return(annovar.specification); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.variant.calling.R
#' Run VariTAS pipeline in full. #' #' @description #' Run all steps in VariTAS processing pipeline, with appropriate dependencies. #' #' @param file.details #' Data frame containing details of files to be used during first processing step. #' Depending on what you want to be the first step in the pipeline, this can either be #' FASTQ files, BAM files, VCF files, or variant (txt) files. #' @param output.directory #' Main directory where all files should be saved #' @param run.name #' Name of pipeline run. Will be added as a prefix to all LSF jobs. #' @param start.stage #' String indicating which stage pipeline should start at. If starting at a later stage #' of the pipeline, appropriate input files must be provided. For example, if starting with annotation, #' VCF files with variant calls must be provided. #' @param variant.callers #' Vector specifying which variant callers should be run. #' @param proton #' Logical indicating if data was generated by proton sequencing. Used to set base quality #' thresholds in variant calling steps. #' @param quiet #' Logical indicating whether to print commands to screen rather than submit jobs. Defaults to FALSE, #' can be useful to set to TRUE for testing. #' @param email #' Email address that should be notified when pipeline finishes. If NULL or FALSE, no email is sent. #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' @param save.specification.files #' Logical indicating if specification files should be saved to project directory #' #' @return None #' @examples #' run.varitas.pipeline( #' file.details = data.frame( #' sample.id = c('1', '2'), #' reads = c('1-R1.fastq.gz', '2-R1.fastq.gz'), #' mates = c('1-R2.fastq.gz', '2-R2.fastq.gz'), #' patient.id = c('P1', 'P1'), #' tissue = c('tumour', 'normal') #' ), #' output.directory = '.', #' quiet = TRUE, #' run.name = "Test", #' variant.callers = c('mutect', 'varscan') #' ) #' #' @export run.varitas.pipeline <- function( file.details, output.directory, run.name = NULL, start.stage = c('alignment', 'qc', 'calling', 'annotation', 'merging'), variant.callers = NULL, proton = FALSE, quiet = FALSE, email = NULL, verify.options = !quiet, save.specification.files = !quiet ) { logo <- c("====================================================================", "`7MMF' `7MF' db MMP\"\"MM\"\"YMM db .M\"\"\"bgd ", " `MA ,V P' MM `7 ;MM: ,MI \"Y ", " VM: ,V ,6\"Yb. `7Mb,od8 `7MM MM ,V^MM. `MMb. ", " MM. M'8) MM MM' \"' MM MM ,M `MM `YMMNq. ", " `MM A' ,pm9MM MM MM MM AbmmmqMA . `MM ", " :MM; 8M MM MM MM MM A' VML Mb dM ", " VF `Moo9^Yo..JMML. .JMML. .JMML..AMA. .AMMA.P\"Ybmmd\" ", "====================================================================" ) cat(logo, sep = '\n') start.stage <- match.arg(start.stage); if( !is.null(variant.callers) ) variant.callers <- tolower(variant.callers); ### INPUT TESTS ########################################################### if( ( !quiet || save.specification.files) && !dir.exists(output.directory) ) { error.message <- paste('Directory', output.directory, 'does not exist'); stop(error.message); } if( start.stage %in% c('alignment', 'qc', 'calling') && is.null(variant.callers) ) { stop('Variant callers must be provided when starting pipeline at variant calling or earlier.'); } # check that all variants are supported supported.callers <- c('mutect', 'vardict', 'varscan', 'lofreq', 'muse'); if( !all(variant.callers %in% supported.callers) ) { unrecognized.callers <- variant.callers[ !(variant.callers %in% supported.callers ) ]; error.message <- paste( 'Unrecognzied variant callers:', paste(unrecognized.callers, collapse = ' ') ); stop(error.message); } # warn if doing alignment on proton data - should be using BAMs from machine if( 'alignment' == start.stage && proton ) { warning('Running alignment on proton sequencing data. This is probably not a good idea - using BAMs from the machine would be better'); } ### MAIN ################################################################## ### SORT OUT STAGES # which stages should be run stage.order <- c('alignment', 'qc', 'calling', 'annotation', 'merging'); start.stage.index <- match(start.stage, stage.order); stages.to.run <- stage.order[ seq_along(stage.order) >= start.stage.index]; # Make sure options contain all required fields if( verify.options ) { verify.varitas.options( stages.to.run = stages.to.run, variant.callers = variant.callers ); } # No need to run QC if running alignment stage if ( 'alignment' %in% stages.to.run ) { stages.to.run <- stages.to.run[c(1,3,4,5)] } cat('RUNNING STAGES:', paste(stages.to.run, collapse = ' '), '\n'); # what kind of input was provided # - might have to update this if adding support for multiple start points if( 'alignment' == start.stage ) { fastq.specification <- file.details; } else if( 'qc' == start.stage ) { bam.specification <- file.details; } else if( 'calling' == start.stage ) { bam.specification <- file.details; } else if( 'annotation' == start.stage ) { vcf.specification <- file.details; } else if( 'merging' == start.stage ) { variant.specification <- file.details; } # Determine if running on HPC HPC <- TRUE config <- read.yaml(save.config()) if (config[['cluster_scheduler']] == 'none') { HPC <- FALSE } ### RUN PIPELINE # STAGE 1: ALIGNMENT if( 'alignment' %in% stages.to.run ) { # is this the best way to run it? paired.end <- FALSE; if( 'mates' %in% names(fastq.specification) ) { paired.end <- TRUE; } bam.specification <- run.alignment( fastq.specification = fastq.specification, output.directory = output.directory, paired.end = paired.end, job.group = 'alignment', job.name.prefix = run.name, quiet = quiet, verify.options = FALSE ); if( save.specification.files ) { file.name <- date.stamp.file.name('bam_specification.txt'); utils::write.table( bam.specification, file.path(output.directory, file.name), sep = '\t', row.names = FALSE ); } } # STAGE 2: quality control if( 'qc' %in% stages.to.run ) { # TO DO: # - add job dependencies from QC to post processing stage paired <- FALSE; if( 'normal.bam' %in% names(bam.specification) ) { paired <- TRUE; } bam.specification <- run.target.qc( bam.specification, project.directory = output.directory, job.name.prefix = run.name, paired = paired, quiet = quiet ); } # STAGE 2: VARIANT CALLING if( 'calling' %in% stages.to.run ) { paired <- FALSE; if( 'normal.bam' %in% names(bam.specification) ) { paired <- TRUE; } vcf.specification <- run.variant.calling( bam.specification = bam.specification, output.directory = output.directory, variant.callers = variant.callers, job.name.prefix = run.name, paired = paired, proton = proton, quiet = quiet, verify.options = FALSE ); if( save.specification.files ) { file.name <- date.stamp.file.name('variant_vcf_specification.txt'); utils::write.table( vcf.specification, file.path(output.directory, file.name), sep = '\t', row.names = FALSE ); } } # STAGE 3: ANNOTATION if( 'annotation' %in% stages.to.run ) { job.name.prefix <- 'annotate'; # add name of run if it exists if( !is.null(run.name) && '' != run.name ) { job.name.prefix <- paste(run.name, job.name.prefix, sep = '_'); } variant.specification <- run.annotation( vcf.specification = vcf.specification, output.directory = output.directory, job.group = 'annotation', job.name.prefix = job.name.prefix, quiet = quiet, verify.options = FALSE ); if( save.specification.files ) { file.name <- date.stamp.file.name('annotated_variants_specification.txt'); utils::write.table( variant.specification, file.path(output.directory, file.name), sep = '\t', row.names = FALSE ); } } # STAGE 4: MERGING if( 'merging' %in% stages.to.run ) { run.post.processing( variant.specification = variant.specification, output.directory = output.directory, job.name.prefix = run.name, quiet = quiet, email = email ); } # RUN ALL SCRIPTS IF NOT ON HPC if (!HPC) { run.all.scripts( output.directory = output.directory, stages.to.run = stages.to.run, variant.callers = variant.callers, quiet = quiet ) } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.varitas.pipeline.R
#' run.varitas.pipeline.hybrid #' #' @description #' Run VariTAS pipeline starting from both VCF files and BAM/ FASTQ files. #' Useful for processing data from the Ion PGM or MiniSeq where variant calling has been done on the machine, #' but you are interested in running more variant callers. #' #' @param vcf.specification #' Data frame containing details of vcf files to be processed. Must contain columns sample.id, vcf, and caller #' @param output.directory #' Main directory where all files should be saved #' @param run.name #' Name of pipeline run. Will be added as a prefix to all LSF jobs. #' @param fastq.specification #' Data frame containing details of FASTQ files to be processed #' @param bam.specification #' Data frame containing details of BAM files to be processed #' @param variant.callers #' Vector specifying which variant callers should be run. #' @param proton #' Logical indicating if data was generated by proton sequencing. Used to set base quality #' thresholds in variant calling steps. #' @param quiet #' Logical indicating whether to print commands to screen rather than submit jobs. Defaults to FALSE, #' can be useful to set to TRUE for testing. #' @param email #' Email address that should be notified when pipeline finishes. If NULL or FALSE, no email is sent. #' @param verify.options #' Logical indicating whether to run verify.varitas.options #' @param save.specification.files #' Logical indicating if specification files should be saved to project directory #' #' @return None #' @examples #' run.varitas.pipeline.hybrid( #' bam.specification = data.frame(sample.id = c('Z', 'Y'), tumour.bam = c('Z.bam', 'Y.bam')), #' vcf.specification = data.frame( #' sample.id = c('a', 'b'), #' vcf = c('a.vcf', 'b.vcf'), #' caller = c('pgm', 'pgm') #' ), #' output.directory = '.', #' quiet = TRUE, #' run.name = "Test", #' variant.callers = c('mutect', 'varscan') #' ) #' #' @export run.varitas.pipeline.hybrid <- function( vcf.specification, output.directory, run.name = NULL, fastq.specification = NULL, bam.specification = NULL, variant.callers = c('mutect', 'vardict', 'varscan', 'lofreq', 'muse'), proton = FALSE, quiet = FALSE, email = NULL, verify.options = !quiet, save.specification.files = !quiet ) { logo <- c("====================================================================", "`7MMF' `7MF' db MMP\"\"MM\"\"YMM db .M\"\"\"bgd ", " `MA ,V P' MM `7 ;MM: ,MI \"Y ", " VM: ,V ,6\"Yb. `7Mb,od8 `7MM MM ,V^MM. `MMb. ", " MM. M'8) MM MM' \"' MM MM ,M `MM `YMMNq. ", " `MM A' ,pm9MM MM MM MM AbmmmqMA . `MM ", " :MM; 8M MM MM MM MM A' VML Mb dM ", " VF `Moo9^Yo..JMML. .JMML. .JMML..AMA. .AMMA.P\"Ybmmd\" ", "====================================================================" ) cat(logo, sep = '\n') variant.callers <- match.arg(variant.callers, several.ok = TRUE); old.vcf.specification <- vcf.specification; ### INPUT TESTS ########################################################### if( is.null(fastq.specification) && is.null(bam.specification) ) { stop('Must provide either fastq.specification or bam.specification'); } if( !is.null(fastq.specification) && !is.null(bam.specification) ) { stop('Can only handle one of fastq.specification and bam.specification'); } if( !( 'caller' %in% names(vcf.specification)) ) { stop("vcf.specification must contain a column 'caller'"); } if( is.null(fastq.specification) ) { stages.to.run <- c('qc', 'calling', 'annotation', 'merging'); } else { stages.to.run <- c('alignment', 'calling', 'annotation', 'merging'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( stages.to.run = stages.to.run, variant.callers = variant.callers ); } # Determine if running on HPC HPC <- TRUE config <- read.yaml(save.config()) if (config[['cluster_scheduler']] == 'none') { HPC <- FALSE } # STAGE 1: ALIGNMENT if( !is.null(fastq.specification) ) { paired.end <- FALSE; if( 'mates' %in% names(fastq.specification) ) { paired.end <- TRUE; } bam.specification <- run.alignment( fastq.specification = fastq.specification, output.directory = output.directory, paired.end = paired.end, job.group = 'alignment', job.name.prefix = run.name, quiet = quiet, verify.options = FALSE ); if( save.specification.files ) { file.name <- date.stamp.file.name('bam_specification.txt'); utils::write.table( bam.specification, file.path(output.directory, file.name), sep = '\t', row.names = FALSE ); } } # STAGE 2: QC paired <- FALSE; if( 'normal.bam' %in% names(bam.specification) ) { paired <- TRUE; } if ('qc' %in% stages.to.run) { bam.specification <- run.target.qc( bam.specification, project.directory = output.directory, job.name.prefix = run.name, paired = paired, quiet = quiet, verify.options = FALSE ); } # STAGE 3: VARIANT CALLING # store VCF specification for new variant calls new.vcf.specification <- run.variant.calling( bam.specification = bam.specification, output.directory = output.directory, variant.callers = variant.callers, job.name.prefix = run.name, paired = paired, proton = proton, quiet = quiet, verify.options = FALSE ); # make sure VCF specifications match missing.columns <- names(new.vcf.specification)[ !(names(new.vcf.specification) %in% names(old.vcf.specification)) ]; for(column in missing.columns) { old.vcf.specification[, column] <- ''; } # merge into a single vcf specification vcf.specification <- rbind(new.vcf.specification, old.vcf.specification); # Run scripts if not on HPC if (!HPC) { if (!is.null(fastq.specification)) { stages.to.run = c('alignment', 'qc', 'calling') } else { stages.to.run = c('qc', 'calling') } run.all.scripts( output.directory = output.directory, stages.to.run = stages.to.run, variant.callers = variant.callers, quiet = quiet ) } # STAGE 4: REST OF PIPELINE run.varitas.pipeline( vcf.specification, start.stage = 'annotation', run.name = run.name, output.directory = output.directory, quiet = quiet, verify.options = FALSE, email = email ); }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.varitas.pipeline.hybrid.R
#' Run VarScan for a sample #' #' @inheritParams run.vardict.sample #' #' #' #' run.varscan.sample <- function( tumour.bam, sample.id, paired, normal.bam = NULL, output.directory = NULL, output.filename = NULL, code.directory = NULL, log.directory = NULL, config.file = NULL, job.dependencies = NULL, quiet = FALSE, job.name = NULL, verify.options = !quiet, job.group = NULL ) { ### INPUT TESTS ########################################################### # make sure no factors have been passed in assertthat::assert_that( is.character(tumour.bam) ); assertthat::assert_that( is.character(sample.id) ); assertthat::assert_that( is.null(normal.bam) || is.character(normal.bam)); assertthat::assert_that( is.null(output.directory) || is.character(output.directory) ); if(paired && is.null(normal.bam)) { stop('paired is set to true but no normal sample BAM has been supplied.'); } if(!paired && !is.null(normal.bam)) { stop('Getting mixed signals: paired is set to false but a normal sample BAM has been supplied.'); } ### MAIN ################################################################## if( verify.options ) { verify.varitas.options( varitas.options = config.file, stages.to.run = 'calling', variant.callers = 'varscan' ); } # save temporary config file that can be read by Perl if(is.null(config.file)) { config.file <- save.config(); } # sort out whether to pass paired flag to Perl flags <- NULL; if(paired) { flags <- 'paired'; } script <- system.file('perl', 'run_varscan.pl', package = get.varitas.options('pkgname') ); command <- make.command.line.call( main.command = c('perl', script), options = c( 'tumour_bam' = tumour.bam, 'normal_bam' = normal.bam, 'sample_id' = sample.id, 'config_file' = config.file, 'output_directory' = output.directory, 'output_filename' = output.filename, 'code_directory' = code.directory, 'log_directory' = log.directory, 'job_dependencies' = job.dependencies, 'job_name' = job.name, 'job_group' = job.group ), flags = flags ); if( quiet ) { cat(command, '\n'); } else { system(command); } }
/scratch/gouwar.j/cran-all/cranData/varitas/R/run.varscan.sample.R