content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Read GPX File #' #' Reads data in GPX form. #' Examples of tourist routes saved in this format can be downloaded from #' mapa-turystyczna.pl. #' #' @param path path the the gpx file with information about the route #' @param name name of the route #' @param uniform if TRUE then route will be converted into a uniform grid of points #' @param dx if uniform is TRUE then dx is the grid size #' @param span if uniform is TRUE then span is smoothing parameter #' @param x routs to be plotted #' @param color names of colors for lines #' @param type what should be plotted? 'profile' for profiles, 'difference' for derivative, 'boxplot' absolute derivative #' @param ... other parameters #' #' @author #' Przemyslaw Biecek #' @importFrom stats loess predict #' #' @rdname read_gpx #' @export read_gpx <- function(path, name = NULL, uniform = TRUE, dx = 25, span = 0.1) { if (is.null(name)) { name <- gsub(path, pattern = ".gpx", replacement = "") } pfile <- XML::htmlTreeParse(path, error = function (...) {}, useInternalNodes = T) # Get all elevations, times and coordinates via the respective xpath elevations <- as.numeric(XML::xpathSApply(pfile, path = "//trkpt/ele", XML::xmlValue)) coords <- XML::xpathSApply(pfile, path = "//trkpt", XML::xmlAttrs) # Extract latitude and longitude from the coordinates lats <- as.numeric(coords["lat",]) lons <- as.numeric(coords["lon",]) # convert to geo points points <- sp::SpatialPoints(data.frame(lons, lats)) mat_dists <- raster::pointDistance(points, lonlat = TRUE) cons_dists <- diag(mat_dists[-1,]) df <- data.frame(lon = lons, lat = lats, ele = elevations, ele0 = elevations - elevations[1], dist = cumsum(c(0,cons_dists)), d_ele = c(0, diff(elevations)), d_dist = c(0,cons_dists), dd = c(0, diff(elevations))/c(0,cons_dists), name = name) class(df) = c("gpx_file", "data.frame") if (uniform) { dist_seq <- seq(min(df$dist), max(df$dist), by = dx) ele_seq <- predict(loess(ele~dist, data = df, span = span, degree = 1), data.frame(dist = dist_seq)) df <- data.frame(ele = ele_seq, ele0 = ele_seq - ele_seq[1], dist = dist_seq, d_ele = c(0, diff(ele_seq)), d_dist = c(0,diff(dist_seq)), dd = c(0, diff(ele_seq))/c(0,diff(dist_seq)), name = df$name[1]) class(df) = c("gpx_file", "data.frame") } df } #' @rdname read_gpx #' @export plot.gpx_file <- function(x, ..., type = "profile", color = "magenta") { for (sx in list(...)) { if ("gpx_file" %in% class(sx)) { x <- rbind(x, sx) } } dist = ele = dd = name = route = NULL # one plot or more? if (length(unique(x$name)) > 1) { x$name <- factor(x$name, levels = unique(x$name)) x$route <- as.numeric(x$name) # more than one profile if (type == "profile") { pl <- ggplot2::ggplot(x, ggplot2::aes(dist, ele, color = route, group = name)) + ggplot2::geom_line() + ggplot2::geom_point() + DALEX::theme_ema() + ggplot2::ylab("Wysokosc n.p.m. [m]") + ggplot2::xlab("Odleglosc od poczatku szlaku [m]") + ggplot2::ggtitle(paste0(unique(x$name), collapse = ", ")) + ggplot2::scale_color_gradient(low = color, high = "black") } if (type == "difference") { pl <- ggplot2::ggplot(x, ggplot2::aes(dist, dd, color = route, group = name)) + ggplot2::geom_smooth(se= FALSE, span= 0.2, method = "loess", formula = y~x) + DALEX::theme_ema() + ggplot2::geom_hline(yintercept = 0) + ggplot2::ylab("Zmiana wysokosci [m/m]") + ggplot2::xlab("Odleglosc od poczatku szlaku [m]") + ggplot2::ggtitle(paste0(unique(x$name), collapse = ", ")) + ggplot2::scale_color_gradient(low = color, high = "black") } if (type == "boxplot") { pl <- ggplot2::ggplot(x, ggplot2::aes(abs(dd), color = route, group = name)) + ggplot2::geom_boxplot(size=2,alpha=.20) + DALEX::theme_ema_vertical() + ggplot2::ylab("") + ggplot2::xlab("Bezwzgledne nachylenie szlaku [m/m]") + ggplot2::scale_y_continuous("", breaks = NULL) + ggplot2::scale_color_gradient(low = color, high = "black") + ggplot2::facet_wrap(~name, ncol=1) + ggplot2::ggtitle("") } } else { # just one plot if (type == "profile") { pl <- ggplot2::ggplot(x, ggplot2::aes(dist, ele)) + ggplot2::geom_line(color = color) + ggplot2::geom_point(color = color) + DALEX::theme_ema() + ggplot2::ylab("Wysokosc n.p.m. [m]") + ggplot2::xlab("Odleglosc od poczatku szlaku [m]") + ggplot2::ggtitle(paste0(unique(x$name))) } if (type == "difference") { pl <- ggplot2::ggplot(x, ggplot2::aes(dist, dd)) + ggplot2::geom_smooth(se= FALSE, span= 0.2, method = "loess", formula = y~x) + DALEX::theme_ema() + ggplot2::geom_hline(yintercept = 0) + ggplot2::ylab("Zmiana wysokosci [m/m]") + ggplot2::xlab("Odleglosc od poczatku szlaku [m]") + ggplot2::ggtitle(paste0(unique(x$name))) } if (type == "boxplot") { pl <- ggplot2::ggplot(x, ggplot2::aes(abs(dd))) + ggplot2::geom_boxplot(color = color, size=2,alpha=.20) + DALEX::theme_ema_vertical() + ggplot2::ylab("") + ggplot2::xlab("Bezwzgledne nachylenie szlaku [m/m]") + ggplot2::scale_y_continuous("", breaks = NULL) + ggplot2::ggtitle(paste0(unique(x$name))) } } pl }
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/read_gpx.R
#' @title The Regression Game #' #' @description #' The \code{regression} function is used for solving problems in the data-based #' game ,,The regression Game''. #' #' @param ... \code{regression} function is called with different arguments, which #' vary depending on a problem that Beta and Bit are trying to solve. See #' \code{Details} in order to learn more about the list of possible arguments. #' #' @details Every time when some additional hints are needed one should add #' \code{hint = TRUE} or \code{techHint = TRUE} argument to the \code{regression} function. #' Technical hints will point out R packages and/or functions which might help #' you to solve the task while "normal" hints provide you with methodological #' advices. #' #' In this game you are helping Professor Pearson. #' You can communicate with him through the \code{regression} function. #' #' In each call include the \code{subject} parameter (indicating which task #' you are trying to answer) and the \code{content} parameter (providing #' information Professor Pearson is asking you for in a given task). #' #' Data used in the game comes from the study of Polish upper-secondary #' schools first grade students. It was conducted together with the #' PISA 2009 study using the same cognitive tests and questionnaires as #' in PISA 2009 but on a different group of students (in Poland most of the #' students in a PISA sample attends lower-secondary schools). The students who #' participated in the first wave of the study were followed in the 2nd grade of #' upper-secondary school within the research program \emph{Our further study #' and work} (\emph{Nasza Dalsza Nauka i Praca}). Both studies were conducted by #' the Institute of Philosophy and Sociology Polish Academy of Sciences. #' \strong{The original data was changed a little, to better fit the purpose of #' the game.} #' #' ,,The Regression Game'' is a free of charge, educational project of the #' SmarterPoland.pl Foundation. #' @return #' Function returns one of three possible values: #' \itemize{ #' \item{\code{TRUE} if you provided correct answer to a task,} #' \item{\code{FALSE} if you provided wrong answer to a task,} #' \item{\code{NULL} if function can't identify task you wanted to answer.} #' } #' @author #' \itemize{ #' \item{Tomasz Zoltak - the idea and the implementation,} #' \item{Mateusz Zoltak - comments, contribution to hints,} #' \item{Zuzanna Brzozowska - proofreading,} #' \item{Przemyslaw Biecek - comments and the integration with the `BetaBit` package.} #' } #' @examples #' regression() #' regression(hint = TRUE) #' regression(techHint = TRUE) #' @rdname regression #' @importFrom stats lm deviance formula #' @export regression <- function(...) { args = list(...) textsRegression = as.list(dcode(.regression.)) if (length(args) == 0) { cat(textsRegression$regressionInit) return(invisible(NULL)) } if ("subject" %in% names(args)) { args$subject = tolower(args$subject) . = list(structure(c(-0.143966665283887, -0.137825041716732, -0.157386920578125), .Names = c("MATH_2009", "READ_2009", "SCIE_2009")), "highconf", NULL, list(c("RAVEN_WYN", "STAI_C_WYN", "ZAMPS_WYN"), c("csesi", "STAI_C_WYN", "ZAMPS_WYN")), c(-0.6375, 1.1661, 1.5848, 0.2752, 1.26, 0.2914, 0.4058, 0.2478, 0.7394, 0.8916, 0.0043, 0.7219, -0.282, -1.2267, 1.5022, 0.8074, 1.1414, 0.8687, -1.3389, -0.0902, 4.945, 0.5476, -0.2839, -0.1818, -0.4485, -0.074, 0.2379, 0.249, 2.4501, -0.1493, -0.6062, -0.4345, -0.6615, 0.1084, 0.4113, 1.2611, -0.3029, 0.5105, 0.3736, 0.1127, -0.1324, 0.8799, -1.9888, 1.1488, -0.1969, 0.3077, 0.8966, -0.3707, 1.8101, 0.0376, 0.622, 1.0357, -0.8864, 1.3858, 0.8317, -0.4309, 0.4432, 0.4627, -0.0403, 0.4621, -1.5229, 0.7644, 0.0242, 1.0579, 0.4841, -0.9998, 0.9073, -0.0962, 0.4208, 1.1767, 0.5754, 1.4375, -1.6987, 1.8664, 0.5893, 0.837, 0.0181, -0.1621, -0.0472, 0.5972, -0.1009, -0.0312, -1.5385, 0.825, -0.9025, 0.9718, 0.0498, 0.4716, 0.9132, 2.2402, 1.6773, 0.3976, 0.9948, 3.1998, -2.0272, 2.1335, 1.1378, 0.7727, -0.9487, 1.6824, 0.8196, -0.9819, -1.1, -0.8967, -1.7019, -0.7351, 2.1406, -0.7031, 0.7061, 1.4704, 0.575, -2.271, -0.2446, 0.677, 1.0731, 0.5211, -1.3844, -0.3239, 1.9043, 1.6658,-1.4841, 2.8874, 2.064, -0.4097, 1.582, -0.5039, 1.2635, 0.4873, -0.2557, -0.4544, -0.4648, 1.3875, -1.7898, 1.2693, 2.4197, 3.1531, 0.3177, -1.7153, -1.4094, 0.2531, 0.2441, -0.4066, 0.0512, -0.4713, -0.0557, 5.334, 0.0388, 0.0423, 1.3605, -0.2314, 1.4575, 0.5591, -0.5954, 0.2199, 3.2709, 0.1701, -1.3674, -0.3077, -0.1179, -0.7117, -1.2782, -0.3209, -0.983, -1.298, 1.6415, 3.7454, 1.2008, 1.6935, 0.1726, -0.2236, -0.4053, 0.1983, -0.1504, 4.4081, 0.3223, 0.4518, -1.0513, -0.2394, 3.6358, 0.9655, 0.2501, -0.0744, -1.3324, 1.8961, -0.4012, 1.5268, 4.609, -0.3739, 0.7202, -0.5722, 0.3523, 1.1486, 1.5051, 1.7594, 2.9057, 1.0941, -1.1194), list(c(43, 61, 73, 83, 94, 95, 96, 105, 117, 133, 136, 146, 155, 190), c(43, 61, 73, 83, 94, 95, 96, 105, 112, 117, 133, 136, 146, 155, 190)), 24446455.2489998) if (args$subject == "summer internship") { if ("hint" %in% names(args)) { cat(textsRegression$hint0) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint0) return(invisible(FALSE)) } cat(textsRegression$task1) return(invisible(TRUE)) } else if (!any(c("content", "hint") %in% names(args))) { cat("You forgot to send the results!\nUse argument `content` of the `regression()` function to send the data.\n") return(invisible(FALSE)) } # zadanie 1. - korelacje if (args$subject == "correlations") { if ("hint" %in% names(args)) { cat(textsRegression$hint1) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint1) return(invisible(FALSE)) } if (!is.vector(args$content)) { cat("Argument `content` must be a vector of mode `numeric` and length of 3.\n") return(invisible(FALSE)) } else if (!is.numeric(args$content) | length(args$content) != 3) { cat("Argument `content` must be a vector of mode `numeric` and length of 3.\n") return(invisible(FALSE)) } if (!is.null(names(args$content)) & is.vector(args$content)) { args$content = args$content[order(names(args$content))] } if (all.equal(unname(args$content), unname(.[[1]]))[1] %in% TRUE) { cat(textsRegression$task2) return(invisible(TRUE)) } else { cat(textsRegression$wrongAnswer) return(invisible(FALSE)) } # zadanie 2. - diagnostyka liniowosci } else if (args$subject == "name of the variable") { if ("hint" %in% names(args)) { cat(textsRegression$hint2) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint2) return(invisible(FALSE)) } if (!is.vector(args$content)) { cat("Argument `content` must be a character vector of length 1.\n") return(invisible(FALSE)) } else if (!is.character(args$content) | length(args$content) != 1) { cat("Argument `content` must be a character vector of length 1.\n") return(invisible(FALSE)) } if (all.equal(unname(args$content), .[[2]])[1] %in% TRUE) { cat(textsRegression$task3) return(invisible(TRUE)) } else { cat(textsRegression$wrongAnswer) return(invisible(FALSE)) } # zadanie 3. - przeksztalcenie zmiennej niezaleznej } else if (args$subject == "transformation") { if ("hint" %in% names(args)) { cat(textsRegression$hint3) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint3) return(invisible(FALSE)) } if (!is.vector(args$content)) { cat("Argument `content` must contain an expression.\n") return(invisible(FALSE)) } else if (!is.expression(args$content[1])) { cat("Argument `content` must contain an expression.\n") return(invisible(FALSE)) } else if (!all(all.vars(args$content[1]) %in% "income")) { cat("There should be no variables other than `income` in your expression.\n") return(invisible(FALSE)) } incomeTr = tryCatch( eval(args$content[1], BetaBit::FSW), error = function(e) { cat( "Trying to evaluate your expression: `", as.character(args$content)[1], "` causes an error:\n\n", sep = "" ) print(e) return(NULL) } ) if (is.null(incomeTr)) { return(invisible(FALSE)) } mTemp = with(BetaBit::FSW, lm(READ_2009 ~ cultpos + incomeTr)) if (summary(mTemp)$coef[3, 4] <= 0.05) { functionsUsed = setdiff(all.names(args$content[1]), "income") if ( length(functionsUsed) == 1 & all(functionsUsed %in% c("log2", "log10")) ) { commentReplace = paste0("It's nice you decided to use logarithmic transformation. The slope parameter for transformed income has clear interpretation: that's the change in prediction when the value of income rises ", ifelse(functionsUsed == "log2", "twice", "ten times"), ".") } else if (length(functionsUsed) == 1 & all(functionsUsed %in% "log")) { commentReplace = "It's nice you decided to use logarithmic transformation. However if you used base 2 or 10 instead of e, it would be a little easier to interpret the slope parameter coefficient value. It looks quite good, but there is still a little problem. Perhaps there is something wrong with a way you determine value of SCHOOL_ID on the basis of the names (or perhaps order) of the model contrasts (dummy variables). Please, check what values of SCHOOL_ID appear in the dataset and compare with how they are described in names of the model contrasts." } else { commentReplace = paste0( "Note however that if you used base 2 logarithm to ", "transform `income`, then the slope parameter would be ", "more easily interpretable." ) } textsRegression$task4 = sub( "comment on 3rd task", commentReplace, textsRegression$task4 ) # trzeba zbadac rozwiazanie i podmienic komentarz do niego cat(textsRegression$task4) return(invisible(TRUE)) } else { cat("Unfortunately after this transformation `income` is still insignifficant.\n") return(invisible(FALSE)) } # zadanie 4. - wspolliniowosc } else if (args$subject == "collinearity") { if ("hint" %in% names(args)) { cat(textsRegression$hint4) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint4) return(invisible(FALSE)) } varsTemp = all.vars( ~ SEX + SCHOOL_TYPE + log(income) + homepos + hisei + csesi + RAVEN_WYN + STAI_C_WYN + STAI_S_WYN + SES_WYN + ZAMPS_WYN ) if (!is.vector(args$content)) { cat("Argument `content` must be a vector of mode `character`.\n") return(invisible(FALSE)) } else if (!is.character(args$content)) { cat("Argument `content` must be a vector of mode `character`.\n") return(invisible(FALSE)) } else if (!all(args$content %in% varsTemp)) { cat("Some of variables you gave don't appear in the model. Check variable names.\n") return(invisible(FALSE)) } else if ( any(sapply( .[[4]], function(x, y) {return(all(y %in% x))}, y = args$content )) ) { cat(textsRegression$task5) return(invisible(TRUE)) } varsTemp = setdiff(varsTemp, args$content) varsTemp = sub("income", "log(income)", varsTemp) varsTemp = formula(paste("READ_2009 ~ ", paste(varsTemp, collapse = "+"))) mTemp = lm(varsTemp, BetaBit::FSW) if (any(summary(mTemp)$coef[-1, 4] > 0.05)) { cat("Unfortunately, there is/are still some insignificant parameter(s) in the model.\n") print(summary(mTemp)) return(invisible(FALSE)) } else { cat("All variables in the model are statistically significant, but you removed more variables than in the optimal solution. Try removing other variables.") } # zadanie 5. - regresja w ramach grup (interakacje I) } else if (args$subject == "groups") { if ("hint" %in% names(args)) { cat(textsRegression$hint5) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint5) return(invisible(FALSE)) } if (!is.data.frame(args$content)) { cat("Argument `content` must be a data frame with two columns: `SCHOOL_ID`and `par_hisei`.\n") return(invisible(FALSE)) } else if (!(all(names(args$content) %in% c("SCHOOL_ID", "par_hisei")))) { cat("Argument `content` must be a data frame with two columns: `SCHOOL_ID`and `par_hisei`.\n") return(invisible(FALSE)) } args$content = args$content[order(args$content$SCHOOL_ID), ] if ( all.equal( args$content$par_hisei, .[[5]], tolerance = 0.001 )[1] %in% TRUE ) { cat(textsRegression$task6) return(invisible(TRUE)) } else { cat(textsRegression$wrongAnswer) return(invisible(FALSE)) } # zadanie 6. - istotnosc roznic (interakcje II) } else if (args$subject == "significant differences") { if ("hint" %in% names(args)) { cat(textsRegression$hint6) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint6) return(invisible(FALSE)) } if (!is.vector(args$content)) { cat("Argument `content` must be a vector of mode `numeric`.\n") return(invisible(FALSE)) } else if (!is.numeric(args$content)) { cat("Argument `content` must be a vector of mode `numeric`.\n") return(invisible(FALSE)) } else if (!all(args$content %in% BetaBit::FSW$SCHOOL_ID)) { cat("Some values you gave don't appear in `FSW$SCHOOL_ID`.\n") return(invisible(FALSE)) } else if ( any(sapply( .[[6]][1:2], function(x, y) {return(all(y %in% x))}, y = args$content )) ) { if (length(args$content) == length(.[[6]][[1]])) { commentReplace = "Note however, that you treated the mean value of slope parameters as it was estimated without any error. Do you know what can you do to account for this error while checking significance of the differences?" } else { commentReplace = "That's nice you took into account that the mean value of slope parameters is also estimated with error." } textsRegression$task7 = sub( "comment on 6th task", commentReplace, textsRegression$task7 ) cat(textsRegression$task7) return(invisible(TRUE)) } else if ( any(sapply( .[[6]][3:4], function(x, y) {return(all(y %in% x))}, y = args$content )) ) { cat("You are close to the right solution but it looks like you messed up SCHOOL_ID values/labels. Please compare SCHOOL_ID values from the dataset and the contrasts variables names in the model. If you use `contr.sum`, note that it does not use names (labels) of factor levels to construct names of contrasts (and this is behaviour different to what `contr.treatment` does).\n") return(invisible(FALSE)) } else { cat(textsRegression$wrongAnswer) return(invisible(FALSE)) } # zadanie 7. - modelowanie wieku } else if (args$subject == "age") { if ("hint" %in% names(args)) { cat(textsRegression$hint7) return(invisible(FALSE)) } if ("techHint" %in% names(args)) { cat(textsRegression$techHint7) return(invisible(FALSE)) } if (!("formula" %in% class(args$content))) { cat("Argument `content` must be a model formula.\n") return(invisible(FALSE)) } else if (args$content[[2]] != "READ_2009") { cat("There should be simply `READ_2009` on the left side of the model formula given by `content`.\n") return(invisible(FALSE)) } if ("vars" %in% names(args)) { if (!is.list(args$vars)) { cat("Argument `vars` must be a list of expressions.\n") return(invisible(FALSE)) } else if (!all(sapply(args$vars, is.expression))) { cat("Argument `vars` must be a list of expressions.\n") return(invisible(FALSE)) } else if ( !all(sapply(args$vars, function(x) {all(all.vars(x) %in% "RAVEN_AGE")})) ) { cat("No other variable than `RAVEN_AGE` can appear in expressions given in the `vars` argument.\n") return(invisible(FALSE)) } else if ( !all(all.vars(args$content) %in% c("READ_2009", "RAVEN_AGE", names(args$vars))) ) { cat("Expressions defining some variables that appear in model formula given by argument `content` do not appear in argument `vars`. Check your formula and names of elements of the list of expressions.\n") return(invisible(FALSE)) } varsTemp = lapply(args$vars, function(x) { return(tryCatch( eval(x, BetaBit::FSW), error = function(e) { cat( "Trying to evaluate your expression: `", as.character(x), "` causes an error:\n\n", sep = "" ) print(e) return(NULL) } )) }) if (any(sapply(varsTemp, is.null))) { return(invisible(FALSE)) } dataTemp = cbind(BetaBit::FSW, as.data.frame(varsTemp)) } else { if (!all(all.vars(args$content[[3]]) %in% "RAVEN_AGE")) { cat("No other variable than `RAVEN_AGE` can appear on the right side the model formula unless you provide expressions describing how to compute them by specifying the `vars` argument.\n") return(invisible(FALSE)) } dataTemp = BetaBit::FSW } mTemp = tryCatch( lm(args$content, dataTemp), error = function(e) { cat("Trying to estimate regression model caused an error. Probably there's something wrong with a model formula you provided.\n\n") print(e) return(NULL) } ) if (is.null(mTemp)) { return(invisible(FALSE)) } if (deviance(mTemp) <= .[[7]]) { cat(textsRegression$congratulations) return(invisible(TRUE)) } else { cat("Try to change something - your model should fit the data better.") return(invisible(FALSE)) } # niepoprawny `subject` } else { cat("Please check the subject. Something is wrong there!") return(invisible(NULL)) } } else if ("hint" %in% names(args)) { cat("Just type `regression()` into the console and hit `enter` :)") return(invisible(FALSE)) } return(invisible(NULL)) }
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/regression.R
#' The vector of 1000 most popular passwords. #' #' The character vector of 1000 most commonly used passwords. #' It is sorted by the frequency of password's usage. First passwords in the vector are the most frequently used. #' #' @docType data #' @keywords datasets #' @name top1000passwords #' @format a character vector with 1000 elements. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/top1000passwords.R
#' The vector of 100 most common words in English. #' #' The character vector of 100 most commonly used words in English. #' It is sorted by the frequency of usage. #' May be used to refine the transliteraton. #' #' @docType data #' @keywords datasets #' @name top100commonWords #' @usage data(top100commonWords) #' @format a character vector with 100 elements. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/top100commonWords.R
#' The data frame containng labels of the variables from \code{dataDNiP} and #' \code{DNiP} datasets. #' #' @docType data #' @keywords datasets #' @name varLabels #' @format data frame: 54 obs. of 2 variables NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/varLabels.R
#' List with quotes in 18 languages. #' #' The named list with 18 languages. Based on <https://wikiquote.org/>. #' #' @docType data #' @keywords datasets #' @name wikiquotes #' @usage data(wikiquotes) #' @format a named list with 18 elements. NULL
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/wikiquotes.R
.onAttach <- function(...) { BB.start = " _____ _ _ _____ _ _ _____ | __ |___| |_ ___ ___ ___ _| | | __ |_| |_ | __|___ _____ ___ ___ | __ -| -_| _| .'| | .'| | . | | __ -| | _| | | | .'| | -_|_ -| |_____|___|_| |__,| |__,|_|_|___| |_____|_|_| |_____|__,|_|_|_|___|___| Choose your game. Just type the name of the selected game in the console. It's a function so do not forget about parentheses! 1. proton() 2. frequon() 3. regression() " packageStartupMessage(BB.start) } dcode <- function(tex) { tmp1 <- c(LETTERS, letters) tmp2 <- setdiff(unique(unlist(strsplit(tex, split=""))), tmp1) let <- c(tmp1, tmp2) names(let) <- c(rev(tmp1), tmp2) sapply(strsplit(tex, split=""), function(x){ paste(let[x], collapse="") }) } .pouch <- new.env()
/scratch/gouwar.j/cran-all/cranData/BetaBit/R/zzz.R
betapwr <- function(mu0,sd0,mu1,sampsize,trials,seed,link.type,equal.precision,sd1,sig.level){ betapwr.base <- function(seed){ #Set seed set.seed(seed) #Set parameters phi<- ((mu0*(1-mu0))/(sd0*sd0))-1 if(phi < 0){ stop("phi must be greater than 0") } a0<- mu0*phi b0<- (1-mu0)*phi if(equal.precision == TRUE){ a1<- mu1*phi b1<- (1-mu1)*phi } else{ if(is.null(sd1)==TRUE){ stop("miss sd1 with equal dispersion parameter assumption") } else{ phi1 <- ((mu1*(1-mu1))/(sd1*sd1))-1 if(phi1 < 0){ stop("phi1 must be greater than 0") } a1<- mu1*phi1 b1<- (1-mu1)*phi1 } } Y.H0 <- cbind(rep(1:sampsize,trials),rep(1:trials,rep(sampsize,trials)),rbeta(sampsize*trials,a0,b0)) Y.Ha <- cbind(rep(1:sampsize,trials),rep(1:trials,rep(sampsize,trials)),rbeta(sampsize*trials,a1,b1)) #Combine Y.H0 and Y.H1 Y.mat <- rbind(Y.H0,Y.Ha) colnames(Y.mat) <- c( "sample","trials","y") tmt <-c(rep(0,(trials*sampsize)),rep(1,(trials*sampsize))) #Combine "sample trial y" with "tmt"(0,1) #Set simulation matrix as sim, ordered by trials sim <- data.frame(Y.mat,tmt) if(max(sim[,3]) > (1-1e-16) | min(sim[,3]) < 1e-16){ sim[,3] <- (sim[,3] * (sampsize - 1) + 0.5) / sampsize } if(link.type=="wilcoxon"){ outtest <- matrix(NA,nrow=trials,ncol=1) outtest <- sapply(1:trials,function(i){ sub.sim <- subset(sim,trials == i) out.wil <- wilcox.test(sub.sim[which(sub.sim[,4]==0),3],sub.sim[which(sub.sim[,4]==1),3]) return(as.numeric(out.wil$p.value)) }) Power <- mean(as.numeric(outtest<sig.level)) } else{ outtest <- sapply(1:trials, function(i){ sub.sim <- subset(sim, trials == i) X <- cbind(rep(1,nrow(sub.sim)),sub.sim$tmt) colnames(X) <- c("(Intercept)","tmt") fit1 <- suppressWarnings(do.call(betareg::betareg.fit,list(x=X, y=as.numeric(sub.sim$y), link = link.type,type ="ML"))) cf <- as.vector(do.call("c",fit1$coefficients)) se <- sqrt(diag(fit1$vcov)) wald.pvalue <- 2*pnorm(-abs(cf/se))[2] return(wald.pvalue) }) Power = mean(as.numeric(outtest<sig.level)) } return(Power) } seed.new <- seed Power <- tryCatch(betapwr.base(seed.new),error=function(e){return(NA)}) while(is.na(Power[1])){ seed.new <- seed.new + 1 Power <- tryCatch(betapwr.base(seed.new),error=function(e){return(NA)}) } return(Power) } #' @export print.betapower <- function(x,...){ cat(" Two beta-distributed samples power calculation\n") cat("\n mu0 = ",x$mu0,"\n sd0 = ",x$sd0,"\n") if(x$equal.precision==FALSE){ cat(" sd1 = ",x$sd1,"\n") } cat(" sig.level = ",x$sig.level,"\n number of trials = ",x$trials, "\n link.type = ",x$link.type,"\n \n") cat(" Estimated power\n") print.default(x$Power.matrix,...) } #' @export plot.betapower <- function(x,...,link.type,by){ betapower.matrix <- data.frame(x$Power.matrix, check.names = FALSE) if(link.type[1]=="all"){ link.type <- c("logit", "probit", "cloglog", "log", "loglog") } if(by!="linktype"&by!="samplesize"&by!="mu1"){ stop("Wrong plot type") } name.plot <- c(paste0("beta regression(",link.type,")"),"Wilcoxon") output.name.plot <- c(paste0("Power: beta regression(",link.type,")"), "Power: Wilcoxon") input.data <- reshape(betapower.matrix,varying = name.plot, v.names = "power", timevar = "subj", times = output.name.plot, direction = "long", new.row.names = c(1:(length(name.plot)*nrow(betapower.matrix)))) if(by == "linktype"){ input.data$`sample size` <- as.factor(input.data$`sample size`) levels(input.data$`sample size`) <- paste0("sample size = ",levels(input.data$`sample size`)) Labels <- as.factor(input.data$`sample size`) input.data$subj <- as.factor(input.data$subj) g <- ggplot2::ggplot(data=input.data,ggplot2::aes_string(x = "mu1",y= "power",colour = "Labels"))+ ggplot2::geom_line(ggplot2::aes(linetype = Labels)) + ggplot2::geom_point(ggplot2::aes(shape = Labels)) + ggplot2::ylab("Power") + ggplot2::xlab("mu1")+ ggplot2::facet_grid(~ input.data$subj) } else if(by == "samplesize"){ Labels <- as.factor(input.data$subj) input.data$`sample size` <- as.factor(input.data$`sample size`) levels(input.data$`sample size`) <- paste0("sample size = ",levels(input.data$`sample size`)) g <- ggplot2::ggplot(data=input.data,ggplot2::aes_string(x = "mu1",y= "power",colour = "Labels"))+ ggplot2::geom_line(ggplot2::aes(linetype = Labels)) + ggplot2::geom_point(ggplot2::aes(shape = Labels)) + ggplot2::ylab("Power") + ggplot2::xlab("mu1")+ ggplot2::facet_grid(~ input.data$`sample size`) } else if(by == "mu1"){ Labels <- as.factor(input.data$subj) input.data$mu1 <- as.factor(input.data$mu1) levels(input.data$mu1) <- paste0("mu1 = ",levels(input.data$mu1)) g <- ggplot2::ggplot(data=input.data,ggplot2::aes_string(x = input.data[,"sample size"],y= "power",colour = "Labels"))+ ggplot2::geom_line(ggplot2::aes(linetype = Labels)) + ggplot2::geom_point(ggplot2::aes(shape = Labels)) + ggplot2::ylab("Power") + ggplot2::xlab("Sample Size")+ ggplot2::facet_grid(~ input.data$mu1) } text_size <- 12 panel_spacing <- 1 g + ggplot2::theme( axis.line = ggplot2::element_blank(), axis.text.x = ggplot2::element_text(size = text_size * 0.8 , lineheight = 0.9, vjust = 1), axis.text.y = ggplot2::element_text(size = text_size * 0.8, lineheight = 0.9, hjust = 1), axis.ticks = ggplot2::element_line(colour = "black", size = 0.2), axis.title.x = ggplot2::element_text(size = text_size, vjust = 1), axis.title.y = ggplot2::element_text(size = text_size, angle = 90, vjust = 0.5), axis.ticks.length = ggplot2::unit(0.3, "lines"), legend.justification=c(1,0), legend.background = ggplot2::element_rect(colour=NA), legend.key = ggplot2::element_rect(colour = "grey80"), legend.key.size = ggplot2::unit(1.2, "lines"), legend.text = ggplot2::element_text(size = text_size * 0.8), legend.title = ggplot2::element_text(size = text_size * 0.8, face = "bold", hjust = 0), legend.position = "right", panel.background = ggplot2::element_rect(fill = "white", colour = NA), panel.border = ggplot2::element_rect(fill = NA, colour="grey50"), panel.grid.major = ggplot2::element_line(colour = "grey90", size = 0.2), panel.grid.minor = ggplot2::element_line(colour = "grey98", size = 0.5), panel.spacing = ggplot2::unit(panel_spacing, "lines"), aspect.ratio = 2, plot.background = ggplot2::element_rect(colour = NA), plot.title = ggplot2::element_text(size = text_size * 1.2), plot.margin = ggplot2::unit(c(1, 1, 0.5, 0.5), "lines"), strip.background = ggplot2::element_rect(fill = "grey",size = 1) ) + ggplot2::scale_y_continuous(breaks=seq(0,1,0.1)) } #' @title Find Power with Beta distribution #' @description Find the power for a given sample size when testing the null hypothesis that the means for the control and treatment groups are equal against a two-sided alternative. #' @details betapower function allows you to control the number of trials in the simulation, #' the sample sizes used, and the alternative means. #' You can fix the alternative and vary sample size to match a desired power; #' You can fix the sample size and vary the alternative to see which will match a desired power; #' You can vary both; #' Start with a small number of trials (say 100) to determine the rough range of sample sizes or alternatives; #' Use a larger number of trials (say 1000) to get better estimates.\cr #' The plot function will return different plots depends on "by" statement. #' Type of link used in the beta regression. You can choose one or more of the following: "logit", "probit", "cloglog", "cauchit", "log", "loglog", "all"\cr #' by = "linktype": return graphs that plot power against mu1, #' where mu1 is the mean for the treatment group under the alternative. #' The number of plots will vary depending on the number of link types selected with the last plot showing power based on Wilcoxon Rank Sum Test. #' The first one or several plots show comparisons of power with different sample size, using GLM method with one or several link types. #' The last plot shows a comparison of the power with different sample size using Wilcoxon Rank Sum Test. #' Y-axis denotes power and X-axis denotes mu1, the mean for the treatment group under the alternative.\cr #' by = "samplesize": return a number of plots equal to the number of sample sizes tested. #' Each plot compares power calculated with different link types and the Wilcoxon Rank Sum Test. #' Y-axis denotes power and X-axis denotes mu1, the mean for the treatment group under the alternative.\cr #' by = "mu1": return a number of plots equal to the number of mu1 used in the procedure. #' Each plot compares power calculated with different link types and the Wilcoxon Rank Sum Test. #' Y-axis denotes power and X-axis denotes sample size.\cr #' @usage betapower(mu0, sd0, mu1.start, mu1.end = NULL, mu1.by = NULL, #' ss.start, ss.end = NULL, ss.by = NULL, sig.level = 0.05, #' trials = 100, seed = 1, link.type="logit", #' equal.precision=TRUE, sd1 = NULL) #' @param mu0 mean for the control group #' @param sd0 standard deviation for the control group #' @param mu1.start starting value of mean for the treatment group under the alternative mu1 #' @param mu1.end ending value of mean for the treatment group under the alternative mu1 #' @param mu1.by step length of mean for the treatment group under the alternative mu1 #' @param ss.start starting value of sample size #' @param ss.end ending value of sample size #' @param ss.by step length of sample size #' @param sig.level significant level of test; default value is 0.05 #' @param trials number of trials #' @param seed seed used in the simulation #' @param link.type type of link used in the beta regression. Default value is "logit", or you can use "all" or choose one or more of the following: "logit", "probit", "cloglog", "cauchit", "log", "loglog" #' @param equal.precision equal dispersion parameter assumption in simulation #' @param sd1 standard deviation for the treatment group. Only applicable when equal.precision = FALSE #' @return Return a betapower object including basic settings (mean and standard deviation for the control group, #' significant level, number of trials and link types), and a matrix of estimated power with given sample size and mu1. #' \item{beta regression(link name)}{estimated power using beta regression method; it will return the power with every links if you use link.type = "all" statement.} #' \item{Wilcoxon}{estimated power from Wilcoxon Rank sum test.} #' \item{sample size}{sample size.} #' \item{mu1}{mean for the treatment group under the alternative.} #' @examples #' BPmat <- betapower(mu0 = 0.56, sd0 = 0.255, mu1.start = .70, mu1.end = .75, mu1.by = .05, #' ss.start = 30, ss.end = 50, ss.by = 20, trials = 100) #' ## show the results #' BPmat #' ## add plot #' plot(BPmat, link.type = "logit", by = "mu1") #' @importFrom stats rbeta wilcox.test pnorm reshape #' @export betapower <-function(mu0, sd0, mu1.start, mu1.end = NULL, mu1.by = NULL, ss.start, ss.end = NULL, ss.by = NULL, sig.level = 0.05, trials = 100, seed = 1, link.type="logit", equal.precision=TRUE, sd1 = NULL){ print("An updated version is available on CRAN as PASSED.\n Please check CRAN for more details.") # define link.type = "all" if(link.type[1]=="all"){ link.type <- c("logit", "probit", "cloglog", "log", "loglog") } # if mu1.end & mu1.by = NULL, set mu1.end as mu1.start if(is.null(mu1.end) & is.null(mu1.by)){ mu1.end <- mu1.start mu1.by <- 0 } # if ss.end & ss.by = NULL, set ss.end as ss.start if(is.null(ss.end) & is.null(ss.by)){ ss.end <- ss.start ss.by <- 0 } Power.matrix <- pbapply::pbmapply(function(mu1,ss){ Power.PAR <- sapply(link.type, function(link.type.unit) { return(do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = ss, trials = trials, seed = seed, link.type = link.type.unit, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level) ) ) }) Power.NPAR <- do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = ss, trials = trials, seed = seed, link.type = "wilcoxon", equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level)) power.unit <- c(Power.PAR, Power.NPAR, ss, mu1) return(power.unit) },rep(seq(mu1.start,mu1.end,mu1.by),length(seq(ss.start,ss.end,ss.by))), rep(seq(ss.start,ss.end,ss.by),rep(length(seq(mu1.start,mu1.end,mu1.by)),length(seq(ss.start,ss.end,ss.by))))) Power.matrix <- matrix(Power.matrix, ncol = (length(link.type)+3),byrow = TRUE) Power.names <- paste0("beta regression(",link.type,")") colnames(Power.matrix) <- c(Power.names, "Wilcoxon","sample size","mu1") Power.list <- list(Power.matrix = Power.matrix, mu0 = mu0, sd0 = sd0, trials = trials, link.type = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level) class(Power.list) <- "betapower" # output power table return(Power.list) }
/scratch/gouwar.j/cran-all/cranData/BetaPASS/R/betapower.R
betapwr <- function(mu0,sd0,mu1,sampsize,trials,seed,link.type,equal.precision,sd1,sig.level){ betapwr.base <- function(seed){ #Set seed set.seed(seed) #Set parameters phi<- ((mu0*(1-mu0))/(sd0*sd0))-1 if(phi < 0){ stop("phi must be greater than 0") } a0<- mu0*phi b0<- (1-mu0)*phi if(equal.precision == TRUE){ a1<- mu1*phi b1<- (1-mu1)*phi } else{ if(is.null(sd1)==TRUE){ stop("miss sd1 with equal dispersion parameter assumption") } else{ phi1 <- ((mu1*(1-mu1))/(sd1*sd1))-1 if(phi1 < 0){ stop("phi1 must be greater than 0") } a1<- mu1*phi1 b1<- (1-mu1)*phi1 } } Y.H0 <- cbind(rep(1:sampsize,trials),rep(1:trials,rep(sampsize,trials)),rbeta(sampsize*trials,a0,b0)) Y.Ha <- cbind(rep(1:sampsize,trials),rep(1:trials,rep(sampsize,trials)),rbeta(sampsize*trials,a1,b1)) #Combine Y.H0 and Y.H1 Y.mat <- rbind(Y.H0,Y.Ha) colnames(Y.mat) <- c( "sample","trials","y") tmt <-c(rep(0,(trials*sampsize)),rep(1,(trials*sampsize))) #Combine "sample trial y" with "tmt"(0,1) #Set simulation matrix as sim, ordered by trials sim <- data.frame(Y.mat,tmt) if(max(sim[,3]) > (1-1e-16) | min(sim[,3]) < 1e-16){ sim[,3] <- (sim[,3] * (sampsize - 1) + 0.5) / sampsize } if(link.type=="wilcoxon"){ outtest <- matrix(NA,nrow=trials,ncol=1) outtest <- sapply(1:trials,function(i){ sub.sim <- subset(sim,trials == i) out.wil <- wilcox.test(sub.sim[which(sub.sim[,4]==0),3],sub.sim[which(sub.sim[,4]==1),3]) return(as.numeric(out.wil$p.value)) }) Power <- mean(as.numeric(outtest<sig.level)) } else{ outtest <- sapply(1:trials, function(i){ sub.sim <- subset(sim, trials == i) X <- cbind(rep(1,nrow(sub.sim)),sub.sim$tmt) colnames(X) <- c("(Intercept)","tmt") fit1 <- suppressWarnings(do.call(betareg::betareg.fit,list(x=X, y=as.numeric(sub.sim$y), link = link.type,type ="ML"))) cf <- as.vector(do.call("c",fit1$coefficients)) se <- sqrt(diag(fit1$vcov)) wald.pvalue <- 2*pnorm(-abs(cf/se))[2] return(wald.pvalue) }) Power = mean(as.numeric(outtest<sig.level)) } return(Power) } seed.new <- seed Power <- tryCatch(betapwr.base(seed.new),error=function(e){return(NA)}) while(is.na(Power[1])){ seed.new <- seed.new + 1 Power <- tryCatch(betapwr.base(seed.new),error=function(e){return(NA)}) } return(Power) } sample.size.mid <- function(mu0,sd0,mu1,power.min,sig.level,trials,delta,seed,link.type,equal.precision,sd1){ # use two-sample t test to get a starting value estimation sample.size.starting <- round(do.call("power.t.test",list(delta = (mu1-mu0), sd = sd0, sig.level = sig.level,power = power.min))$n,0) # step 1: get an interval of sample size [ss.lower, ss.upper], which satisfies power.ss.lower < target power < power.ss.upper reach.flag <- 0 ss.lower <- ss.upper <- sample.size.starting power.lower <- power.upper <- do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = sample.size.starting, trials = trials, seed = seed, link.type = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level)) while(reach.flag == 0){ if(power.lower > power.min){ ss.upper <- ss.lower power.upper <- power.lower # sample size should be greater than or equal to 3 ss.lower <- max(floor(ss.lower/2),3) power.lower <- do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = ss.lower, trials = trials, seed = seed, link.type = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level)) } if(power.upper < power.min){ ss.lower <- ss.upper power.lower <- power.upper ss.upper <- sample.size.starting*2 power.upper <- do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = ss.upper, trials = trials, seed = seed, link.type = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level)) } if((power.lower <= power.min & power.min <= power.upper)|(ss.upper <= 3)){ reach.flag <- 1 } } # step 2: find exact sample size reach.flag <- 0 while(reach.flag == 0){ if((ss.upper-ss.lower)<=delta){ reach.flag <- 1 sample.size.min <- ss.upper power.output <- power.upper } else{ sample.size.midpt <- (ss.upper+ss.lower)%/%2 power.midpt <- do.call("betapwr",list(mu0 = mu0, sd0 = sd0, mu1 = mu1,sampsize = sample.size.midpt, trials = trials, seed = seed, link.type = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level)) if(power.midpt < power.min){ ss.lower <- sample.size.midpt power.lower <- power.midpt } else{ ss.upper <- sample.size.midpt power.upper <- power.midpt } } } output <- matrix(c(sample.size.min,power.output),nrow = 1) colnames(output) <- c("minimum sample size","minimum power") return(output) } #' @export print.samplesize <- function(x,...){ cat(" Two beta-distributed samples sample size calculation\n") cat("\n mu0 = ",x$mu0,"\n sd0 = ",x$sd0,"\n") if(x$equal.precision==FALSE){ cat(" sd1 = ",x$sd1,"\n") } cat(" sig.level = ",x$sig.level,"\n number of trials = ",x$trials, "\n \n") cat(" Minimum sample size(corresponding power)\n") betareg.links <- setdiff(x$method,"wilcoxon") print.minss <- NULL if(length(betareg.links)>0){ betareg.minss <- mapply(function(i,j) return(paste0(x$Power.matrix[i,paste("minimum sample size:",j)],"(", x$Power.matrix[i,paste("minimum power:",j)],")")), rep(1:nrow(x$Power.matrix),length(betareg.links)), rep(betareg.links,rep(nrow(x$Power.matrix),length(betareg.links)))) betareg.minss <- matrix(betareg.minss,nrow = nrow(x$Power.matrix)) betareg.links <- paste0("beta regression(",betareg.links,")") colnames(betareg.minss) <- betareg.links print.minss <- cbind(print.minss,betareg.minss) } if("wilcoxon" %in% x$method){ Wilcoxon <- sapply(1:nrow(x$Power.matrix), function(i) return(paste0(paste(x$Power.matrix[i,grep("wilcoxon",colnames(x$Power.matrix))],sep = "",collapse = "("),")"))) print.minss <- cbind(print.minss,Wilcoxon) } if(nrow(print.minss)==1){ print.minss<- cbind(print.minss,matrix(x$Power.matrix[,c("target power","mu1")],nrow = 1)) colnames(print.minss)[(ncol(print.minss)-1):ncol(print.minss)] <- c("target power","mu1") } else{ print.minss <- cbind(print.minss,x$Power.matrix[,c("target power","mu1")]) } print.noquote(print.minss,...) } #' @export plot.samplesize <- function(x,...,link.type){ SS.matrix <- data.frame(x$Power.matrix,check.names = FALSE) if(link.type[1]=="all"){ link.type <- c("logit", "probit", "cloglog", "log", "loglog") } name.plot <- c(paste("minimum sample size:",link.type)) output.name.plot <- c(paste0("beta regression(",link.type,")")) output.name.plot[grep("wilcoxon",link.type)] #combine minimum sample size input.data <- reshape(SS.matrix,varying = name.plot, v.names = "SS", timevar = "subj", times = output.name.plot, direction = "long", new.row.names = c(1:(length(name.plot)*nrow(SS.matrix)))) #combine minimum power Power.loc.col <- rep(c(1:length(link.type)),rep(nrow(SS.matrix),length(link.type))) minimum.power <- sapply(1:nrow(input.data),function(i) return(input.data[i,Power.loc.col[i]])) input.data <- cbind(input.data,minimum.power) text_size <- 12 panel_spacing <- 1 Labels <- as.factor(input.data$subj) input.data$mu1 <- as.factor(input.data$mu1) levels(input.data$mu1) <- paste0("mu1 = ",levels(input.data$mu1)) ggplot2::ggplot(data=input.data,ggplot2::aes_string(x = input.data[,"minimum.power"],y= "SS",colour = "Labels"))+ ggplot2::geom_line(ggplot2::aes(linetype = Labels)) + ggplot2::geom_point(ggplot2::aes(shape = Labels)) + ggplot2::ylab("Minimum Sample Size") + ggplot2::xlab("Minimum Power")+ ggplot2::facet_wrap(~input.data$mu1,scales = "free")+ ggplot2::theme( axis.line = ggplot2::element_blank(), axis.text.x = ggplot2::element_text(size = text_size * 0.8 , lineheight = 0.9, vjust = 1), axis.text.y = ggplot2::element_text(size = text_size * 0.8, lineheight = 0.9, hjust = 1), axis.ticks = ggplot2::element_line(colour = "black", size = 0.2), axis.title.x = ggplot2::element_text(size = text_size, vjust = 1), axis.title.y = ggplot2::element_text(size = text_size, angle = 90, vjust = 0.5), axis.ticks.length = ggplot2::unit(0.3, "lines"), legend.justification=c(1,0), legend.background = ggplot2::element_rect(colour=NA), legend.key = ggplot2::element_rect(colour = "grey80"), legend.key.size = ggplot2::unit(1.2, "lines"), legend.text = ggplot2::element_text(size = text_size * 0.8), legend.title = ggplot2::element_text(size = text_size * 0.8, face = "bold", hjust = 0), legend.position = "right", panel.background = ggplot2::element_rect(fill = "white", colour = NA), panel.border = ggplot2::element_rect(fill = NA, colour="grey50"), panel.grid.major = ggplot2::element_line(colour = "grey90", size = 0.2), panel.grid.minor = ggplot2::element_line(colour = "grey98", size = 0.5), panel.spacing = ggplot2::unit(panel_spacing, "lines"), aspect.ratio = 2, plot.background = ggplot2::element_rect(colour = NA), plot.title = ggplot2::element_text(size = text_size * 1.2), plot.margin = ggplot2::unit(c(1, 1, 0.5, 0.5), "lines"), strip.background = ggplot2::element_rect(fill = "grey",size = 1) ) } #' @title Find minimum sample size with Beta distribution #' @description Find minimum sample sizes with Beta distribution and given mu0,sd0,mu1 and target powers. #' @details The samplesize function allows you to control the number of trials in the simulation, #' the target power, delta, and the alternative means. #' You can fix the alternative and vary power to match a desired sample size; #' Use default values for the number of trials for a quick view; #' Use a larger number of trials (say 1000) and a smaller delta (say 1) to get better estimates.\cr #' The plot function will return a series of plots equal to the number of mu1 used in the procedure. #' Type of link used in the beta regression. You can choose one or more of the following: "logit", "probit", "cloglog", "cauchit", "log", "loglog", "all". #' Y-axis denotes minimum sample size and X-axis denotes minimum power.\cr #' @usage samplesize(mu0, sd0, mu1.start, mu1.end = NULL, mu1.by = NULL, #' power.start, power.end = NULL, power.by = NULL, sig.level = 0.05, #' trials = 100, delta = 1, seed = 1, link.type = "logit", #' equal.precision = TRUE, sd1 = NULL) #' @param mu0 mean for the control group #' @param sd0 standard deviation for the control group #' @param mu1.start starting value of mean for the treatment group under the alternative mu1 #' @param mu1.end ending value of mean for the treatment group under the alternative mu1 #' @param mu1.by step length of mean for the treatment group under the alternative mu1 #' @param power.start starting value of target power #' @param power.end ending value of target power #' @param power.by step length of target power #' @param sig.level significant level; default value is 0.05 #' @param trials number of trials; default value is 100 #' @param delta accuracy of the result; must be integer #' @param seed seed used in the simulation #' @param link.type type of link used in the beta regression. Default link is "logit". Other link options include: "logit", "probit", "cloglog", "log", "loglog", "wilcoxon", or you can use "all" for all types of link #' @param equal.precision equal dispersion parameter assumption in simulation #' @param sd1 standard deviation for the treatment group. Only applicable when equal.precision = FALSE #' @return Return a samplesize object including basic settings (mean and standard deviation for the control group, #' significant level, number of trials and link types), and a matrix of estimated power with given mu1 and target power. #' \item{minimum sample size: link type:}{minimum sample size for given given mu0, sd0, mu1, target power and type of link.} #' \item{minimum power: link type:}{the minimum power greater than or equal to target power.} #' \item{target power:}{target power.} #' \item{mu1:}{mean for the treatment group under alternative.} #' @examples #' SSmat <- samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.75, #' power.start = 0.8, power.end = 0.9, power.by = 0.1, #' trials = 25, link.type = c("log","wilcoxon")) #' ## show the results #' SSmat #' ## add plot #' plot(SSmat, link.type = c("log","wilcoxon")) #' @importFrom stats rbeta wilcox.test pnorm reshape #' @export samplesize <- function(mu0, sd0, mu1.start, mu1.end = NULL, mu1.by = NULL, power.start, power.end = NULL, power.by = NULL, sig.level=0.05, trials=100, delta=1, seed=1, link.type="logit", equal.precision=TRUE, sd1=NULL){ print("An updated version is available on CRAN as PASSED.\n Please check CRAN for more details.") # provide "all" option for link types if(link.type[1]=="all"){ link.type <- c("logit", "probit", "cloglog", "log", "loglog","wilcoxon") } # allow single mu1 and power situations if(is.null(mu1.end) & is.null(mu1.by)){ mu1.end <- mu1.start mu1.by <- 0 } if(is.null(power.end) & is.null(power.by)){ power.end <- power.start power.by <- 0 } # restrict target power within (0,1) if(max(c(power.start, power.end))>=1|min(c(power.start, power.end))<=0){ stop("target power should be within (0,1)") } Power.matrix <- pbapply::pbmapply(function(mu1,power.target){ power.unit <- sapply(link.type, function(link.type.unit) { return(as.numeric(do.call("sample.size.mid",list(mu0 = mu0, sd0 = sd0, mu1 = mu1, power.min = power.target,sig.level = sig.level, trials = trials,delta = delta,seed = seed, link.type = link.type.unit,equal.precision=equal.precision,sd1=sd1))) ) }) return.unit <- c(power.unit, power.target, mu1) return(return.unit) },rep(seq(mu1.start,mu1.end,mu1.by),length(seq(power.start,power.end,power.by))), rep(seq(power.start,power.end,power.by),rep(length(seq(mu1.start,mu1.end,mu1.by)),length(seq(power.start,power.end,power.by))))) Power.matrix <- matrix(Power.matrix, ncol = (length(link.type)*2+2),byrow = TRUE) Power.names <- paste(c("minimum sample size:","minimum power:"),rep(link.type,rep(2,length(link.type)))) colnames(Power.matrix) <- c(Power.names, "target power","mu1") Power.list <- list(Power.matrix = Power.matrix, mu0 = mu0, sd0 = sd0, trials = trials, method = link.type, equal.precision = equal.precision, sd1 = sd1, sig.level = sig.level) class(Power.list) <- "samplesize" return(Power.list) }
/scratch/gouwar.j/cran-all/cranData/BetaPASS/R/samplesize.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- eval=FALSE-------------------------------------------------------------- # if (!require(BetaPASS)){ # devtools::install_github("CastleLi/draft/BetaPASS") # Needed_packages <- c("Rcpp","betareg","ggplot") # install.packages(Needed_packages) # } ## ---- results='hide'---------------------------------------------------------- library(BetaPASS) Power.mat <- betapower(mu0 = 0.56, sd0 = 0.255, mu1.start = .70, mu1.end = .75, mu1.by = .05, ss.start = 30, ss.end = 50, ss.by = 5, trials = 40, seed = 1, link.type = c("logit")) ## ---- echo=FALSE, results='asis'---------------------------------------------- knitr::kable(Power.mat$Power.matrix) ## ---- fig.show='hold', fig.width = 9, fig.height =6--------------------------- plot(Power.mat, link.type = "logit", by = "mu1") ## ----------------------------------------------------------------------------- samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.75, power.start = 0.8, trials = 40, link.type = c("logit","wilcoxon")) ## ----------------------------------------------------------------------------- SS.mat <- samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.70, mu1.end = 0.75, mu1.by = 0.05, power.start = 0.8, power.end = 0.9, power.by = 0.1, trials = 40, link.type = c("logit","wilcoxon")) ## ---- echo=FALSE, results='asis'---------------------------------------------- knitr::kable(SS.mat$Power.matrix) ## ---- fig.show='hold', fig.width = 9, fig.height =6--------------------------- plot(SS.mat, link.type = c("logit","wilcoxon"))
/scratch/gouwar.j/cran-all/cranData/BetaPASS/inst/doc/BetaPASS.R
--- title: "Calculating Power and Sample Size with BetaPASS" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Calculating Power and Sample Size with BetaPASS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Important Notes An updated version is available on CRAN as **PASSED**. ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Assume the response variable $Y$ is a proportion or otherwise constrained between 0 and 1 which can be modeled with a beta dbn. This program will help find the power for a given sample size or the sample size given power, when testing the null hypothesis that the means for the control and treatment groups are equal against a two-sided alternative. The user must supply the mean and standard deviation for the control group ($\mu_0$ and $sd_0$) as well as the mean for the treatment group under the alternative, namely $\mu_1$. If $Y \sim beta(a,b)$, then $\mu=\frac{a}{a+b}$ and the variance of $Y$ can be expressed in terms of $\mu$ using the parameter $\phi$ as $Var(Y)=\frac{\mu {(1-\mu)}}{1+\phi}$. The value of $\phi$ is found from $\mu_0$ and $sd_0$. That value is then used to find the variance under the alternative. Given $\mu$ and $\phi$ the parameters $a$ and $b$ can be found from $a = \mu \phi$ and $b=(1-\mu) \phi$. The values of $a$ and $b$ are then used to generate random beta variables for the simulation. ## Calculating Power To illustrate this we use an example. A client proposed a study with an intervention designed to increase 'adherence' to protocols designed to minimize effects of lymphedema (LE). Each patient has her own set of activities to be done on a regular basis. The adherence score is based on a weekly diary where the subject notes which activities were done each day. The client has not used the diary before and has no pilot information. Nevertheless she wants to know what sample size (for the treatment group and the control group) is needed to have 'good power'. The client did find a similar study (using a different diary) that gave the following information: Collectively, BCRL self-care adherence among the 128 women prescribed with one or more lymphedema self-care modality was as follows: 16 (13$\%$) reported a mean of less than 25$\%$ of adherence, 36 (28$\%$) reported a mean of 25$\%$-49$\%$ of adherence, 40 (31$\%$) reported a mean of 50$\%$-74$\%$ of adherence, and 36 (28$\%$) reported a mean of 75$\%$-100$\%$ of adherence. Using the mid-points of the intervals we have the following: | Range | Mid-pt | Prob | |------:|:------:|------| | 0-.25| .125| 0.13| |.25-.50| .375| 0.28| |.50-.75| .625| 0.31| |.75-1.0| .875| 0.28| Treating this as a probability distribution, the mean and variance are 0.56 and 0.0625. A beta distribution with a=1.56 and b=1.22 (or $\mu$=0.56 and $\phi$=2.78) would have the same mean and variance. | Range | Mid-pt | Prob | Beta model| |------:|:------:|------|:---------:| | 0-.25| .125| 0.13| 0.144| |.25-.50| .375| 0.28| 0.263| |.50-.75| .625| 0.31| 0.312| |.75-1.0| .875| 0.28| 0.281| The client thought that a difference of 0.56 in the control group and 0.70 or 0.75 in the treatment group would be clinically important. If the mean and variance of the beta distribution under Ho are 0.56 and 0.0625, what sample sizes would give good power under Ha: $\mu$=0.70 or 0.75? This problem can be addressed parametrically or nonparametrically. Non-parametric method uses Wilcoxon Rank sum test. A parametric approach is to assume the underlying distribution is beta. Then generate beta data under the null hypothesis and under the alternative and run simulations to estimate the probability of rejecting the null hypothesis based on GLM method. You can vary the sample size and/or the alternative. In this case, you can run following codes (if necessary, first install the BetaPASS and prerequisite library): ```{r, eval=FALSE} if (!require(BetaPASS)){ devtools::install_github("CastleLi/draft/BetaPASS") Needed_packages <- c("Rcpp","betareg","ggplot") install.packages(Needed_packages) } ``` Next load the BetaPASS, and then use betapower function: ```{r, results='hide'} library(BetaPASS) Power.mat <- betapower(mu0 = 0.56, sd0 = 0.255, mu1.start = .70, mu1.end = .75, mu1.by = .05, ss.start = 30, ss.end = 50, ss.by = 5, trials = 40, seed = 1, link.type = c("logit")) ``` The output will give the estimated power for each sample size and alternative mean combination, for both parametrical and non-parametrical approach. ```{r, echo=FALSE, results='asis'} knitr::kable(Power.mat$Power.matrix) ``` You can generate the plots comparing the power using the Wilcoxon Rank Sum test and GLM method with following codes: ```{r, fig.show='hold', fig.width = 9, fig.height =6} plot(Power.mat, link.type = "logit", by = "mu1") ``` It appears that the parametric test does better(a savings of about 10% in sample size). ## Calculating Sample Size Also you can calculate the minimum sample size directly with given power and alternative using following codes: ```{r} samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.75, power.start = 0.8, trials = 40, link.type = c("logit","wilcoxon")) ``` If you want to compare the minimum sample sizes with different powers and alternatives, or different types of link, you can use following codes: ```{r} SS.mat <- samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.70, mu1.end = 0.75, mu1.by = 0.05, power.start = 0.8, power.end = 0.9, power.by = 0.1, trials = 40, link.type = c("logit","wilcoxon")) ``` The output will give the estimated sample size for each target power and alternative mean combination. ```{r, echo=FALSE, results='asis'} knitr::kable(SS.mat$Power.matrix) ``` You can generate the plots comparing the sample size with following codes: ```{r, fig.show='hold', fig.width = 9, fig.height =6} plot(SS.mat, link.type = c("logit","wilcoxon")) ```
/scratch/gouwar.j/cran-all/cranData/BetaPASS/inst/doc/BetaPASS.Rmd
--- title: "Calculating Power and Sample Size with BetaPASS" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Calculating Power and Sample Size with BetaPASS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Important Notes An updated version is available on CRAN as **PASSED**. ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Assume the response variable $Y$ is a proportion or otherwise constrained between 0 and 1 which can be modeled with a beta dbn. This program will help find the power for a given sample size or the sample size given power, when testing the null hypothesis that the means for the control and treatment groups are equal against a two-sided alternative. The user must supply the mean and standard deviation for the control group ($\mu_0$ and $sd_0$) as well as the mean for the treatment group under the alternative, namely $\mu_1$. If $Y \sim beta(a,b)$, then $\mu=\frac{a}{a+b}$ and the variance of $Y$ can be expressed in terms of $\mu$ using the parameter $\phi$ as $Var(Y)=\frac{\mu {(1-\mu)}}{1+\phi}$. The value of $\phi$ is found from $\mu_0$ and $sd_0$. That value is then used to find the variance under the alternative. Given $\mu$ and $\phi$ the parameters $a$ and $b$ can be found from $a = \mu \phi$ and $b=(1-\mu) \phi$. The values of $a$ and $b$ are then used to generate random beta variables for the simulation. ## Calculating Power To illustrate this we use an example. A client proposed a study with an intervention designed to increase 'adherence' to protocols designed to minimize effects of lymphedema (LE). Each patient has her own set of activities to be done on a regular basis. The adherence score is based on a weekly diary where the subject notes which activities were done each day. The client has not used the diary before and has no pilot information. Nevertheless she wants to know what sample size (for the treatment group and the control group) is needed to have 'good power'. The client did find a similar study (using a different diary) that gave the following information: Collectively, BCRL self-care adherence among the 128 women prescribed with one or more lymphedema self-care modality was as follows: 16 (13$\%$) reported a mean of less than 25$\%$ of adherence, 36 (28$\%$) reported a mean of 25$\%$-49$\%$ of adherence, 40 (31$\%$) reported a mean of 50$\%$-74$\%$ of adherence, and 36 (28$\%$) reported a mean of 75$\%$-100$\%$ of adherence. Using the mid-points of the intervals we have the following: | Range | Mid-pt | Prob | |------:|:------:|------| | 0-.25| .125| 0.13| |.25-.50| .375| 0.28| |.50-.75| .625| 0.31| |.75-1.0| .875| 0.28| Treating this as a probability distribution, the mean and variance are 0.56 and 0.0625. A beta distribution with a=1.56 and b=1.22 (or $\mu$=0.56 and $\phi$=2.78) would have the same mean and variance. | Range | Mid-pt | Prob | Beta model| |------:|:------:|------|:---------:| | 0-.25| .125| 0.13| 0.144| |.25-.50| .375| 0.28| 0.263| |.50-.75| .625| 0.31| 0.312| |.75-1.0| .875| 0.28| 0.281| The client thought that a difference of 0.56 in the control group and 0.70 or 0.75 in the treatment group would be clinically important. If the mean and variance of the beta distribution under Ho are 0.56 and 0.0625, what sample sizes would give good power under Ha: $\mu$=0.70 or 0.75? This problem can be addressed parametrically or nonparametrically. Non-parametric method uses Wilcoxon Rank sum test. A parametric approach is to assume the underlying distribution is beta. Then generate beta data under the null hypothesis and under the alternative and run simulations to estimate the probability of rejecting the null hypothesis based on GLM method. You can vary the sample size and/or the alternative. In this case, you can run following codes (if necessary, first install the BetaPASS and prerequisite library): ```{r, eval=FALSE} if (!require(BetaPASS)){ devtools::install_github("CastleLi/draft/BetaPASS") Needed_packages <- c("Rcpp","betareg","ggplot") install.packages(Needed_packages) } ``` Next load the BetaPASS, and then use betapower function: ```{r, results='hide'} library(BetaPASS) Power.mat <- betapower(mu0 = 0.56, sd0 = 0.255, mu1.start = .70, mu1.end = .75, mu1.by = .05, ss.start = 30, ss.end = 50, ss.by = 5, trials = 40, seed = 1, link.type = c("logit")) ``` The output will give the estimated power for each sample size and alternative mean combination, for both parametrical and non-parametrical approach. ```{r, echo=FALSE, results='asis'} knitr::kable(Power.mat$Power.matrix) ``` You can generate the plots comparing the power using the Wilcoxon Rank Sum test and GLM method with following codes: ```{r, fig.show='hold', fig.width = 9, fig.height =6} plot(Power.mat, link.type = "logit", by = "mu1") ``` It appears that the parametric test does better(a savings of about 10% in sample size). ## Calculating Sample Size Also you can calculate the minimum sample size directly with given power and alternative using following codes: ```{r} samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.75, power.start = 0.8, trials = 40, link.type = c("logit","wilcoxon")) ``` If you want to compare the minimum sample sizes with different powers and alternatives, or different types of link, you can use following codes: ```{r} SS.mat <- samplesize(mu0=0.56, sd0=0.255, mu1.start = 0.70, mu1.end = 0.75, mu1.by = 0.05, power.start = 0.8, power.end = 0.9, power.by = 0.1, trials = 40, link.type = c("logit","wilcoxon")) ``` The output will give the estimated sample size for each target power and alternative mean combination. ```{r, echo=FALSE, results='asis'} knitr::kable(SS.mat$Power.matrix) ``` You can generate the plots comparing the sample size with following codes: ```{r, fig.show='hold', fig.width = 9, fig.height =6} plot(SS.mat, link.type = c("logit","wilcoxon")) ```
/scratch/gouwar.j/cran-all/cranData/BetaPASS/vignettes/BetaPASS.Rmd
#'Compute Likelihood Ratio Chi-square for Binomial Logistic Regression with up to 10 predictors #' #'@param data name of your datafile, loaded #'@param y dependent variable name #'@param x1 first predictor variable name #'@param x2 second predictor variable name #'@param x3 third predictor variable name #'@param x4 fourth predictor variable name #'@param x5 fifth predictor variable name #'@param x6 sixth predictor variable name #'@param x7 seventh predictor variable name #'@param x8 eighth predictor variable name #'@param x9 ninth predictor variable name #'@param x10 tenth predictor variable name #'@param numpred number of predictors #'@examples #'LRchi(data=testlog, y="dv", x1="iv1", x2="iv2",numpred=2) #'@importFrom stats anova binomial glm hatvalues cor lm var #'@importFrom utils tail #'@export #' #' LRchi<-function(data=NULL,y=NULL, x1=NULL, x2=NULL,x3=NULL,x4=NULL,x5=NULL,x6=NULL, x7=NULL,x8=NULL,x9=NULL,x10=NULL,numpred=NULL){ if (numpred ==2) { xx<-dplyr::select(data,y,x1,x2) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3], family = binomial()) Model.1<-glm(xx[,1]~xx[,3], family = binomial()) Model.2<-glm(xx[,1]~xx[,2], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) } if (numpred ==3) { xx<-dplyr::select(data,y,x1,x2,x3) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) } if (numpred ==4) { xx<-dplyr::select(data,y,x1,x2,x3,x4) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) } if (numpred ==5) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) } if (numpred ==6) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5,x6) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6]+xx[,7], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6]+xx[,7], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6]+xx[,7], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,7], family = binomial()) Model.6<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") comp.6<-anova(Model.6,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.6<-round(comp.6$Deviance[2],2) p.6<-round(comp.6$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) message("Predictor: ", x6, "; LR squared ",LR.6, ", p= ", p.6) } if (numpred ==7) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5,x6,x7) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6]+xx[,7]+xx[,8], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6]+xx[,7]+xx[,8], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,7]+xx[,8], family = binomial()) Model.6<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,8], family = binomial()) Model.7<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") comp.6<-anova(Model.6,Model, test="Chisq") comp.7<-anova(Model.7,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.6<-round(comp.6$Deviance[2],2) p.6<-round(comp.6$`Pr(>Chi)`[2],2) LR.7<-round(comp.7$Deviance[2],2) p.7<-round(comp.7$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) message("Predictor: ", x6, "; LR squared ",LR.6, ", p= ", p.6) message("Predictor: ", x7, "; LR squared ",LR.7, ", p= ", p.7) } if (numpred ==8) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5,x6,x7,x8) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,7]+xx[,8]+xx[,9], family = binomial()) Model.6<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,8]+xx[,9], family = binomial()) Model.7<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,9], family = binomial()) Model.8<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") comp.6<-anova(Model.6,Model, test="Chisq") comp.7<-anova(Model.7,Model, test="Chisq") comp.8<-anova(Model.8,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.6<-round(comp.6$Deviance[2],2) p.6<-round(comp.6$`Pr(>Chi)`[2],2) LR.7<-round(comp.7$Deviance[2],2) p.7<-round(comp.7$`Pr(>Chi)`[2],2) LR.8<-round(comp.8$Deviance[2],2) p.8<-round(comp.8$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) message("Predictor: ", x6, "; LR squared ",LR.6, ", p= ", p.6) message("Predictor: ", x7, "; LR squared ",LR.7, ", p= ", p.7) message("Predictor: ", x8, "; LR squared ",LR.8, ", p= ", p.8) } if (numpred ==9) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5,x6,x7,x8,x9) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.6<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,8]+xx[,9]+xx[,10], family = binomial()) Model.7<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,9]+xx[,10], family = binomial()) Model.8<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,10], family = binomial()) Model.9<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") comp.6<-anova(Model.6,Model, test="Chisq") comp.7<-anova(Model.7,Model, test="Chisq") comp.8<-anova(Model.8,Model, test="Chisq") comp.9<-anova(Model.9,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.6<-round(comp.6$Deviance[2],2) p.6<-round(comp.6$`Pr(>Chi)`[2],2) LR.7<-round(comp.7$Deviance[2],2) p.7<-round(comp.7$`Pr(>Chi)`[2],2) LR.8<-round(comp.8$Deviance[2],2) p.8<-round(comp.8$`Pr(>Chi)`[2],2) LR.9<-round(comp.9$Deviance[2],2) p.9<-round(comp.9$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) message("Predictor: ", x6, "; LR squared ",LR.6, ", p= ", p.6) message("Predictor: ", x7, "; LR squared ",LR.7, ", p= ", p.7) message("Predictor: ", x8, "; LR squared ",LR.8, ", p= ", p.8) message("Predictor: ", x9, "; LR squared ",LR.9, ", p= ", p.9) } if (numpred ==10) { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10) xx<-as.data.frame(xx) Model<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.1<-glm(xx[,1]~xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.2<-glm(xx[,1]~xx[,2]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.3<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.4<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.5<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,7]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.6<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,8]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.7<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,9]+xx[,10]+xx[,11], family = binomial()) Model.8<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,10]+xx[,11], family = binomial()) Model.9<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,11], family = binomial()) Model.10<-glm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]+xx[,7]+xx[,8]+xx[,9]+xx[,10], family = binomial()) comp.1<-anova(Model.1,Model, test="Chisq") comp.2<-anova(Model.2,Model, test="Chisq") comp.3<-anova(Model.3,Model, test="Chisq") comp.4<-anova(Model.4,Model, test="Chisq") comp.5<-anova(Model.5,Model, test="Chisq") comp.6<-anova(Model.6,Model, test="Chisq") comp.7<-anova(Model.7,Model, test="Chisq") comp.8<-anova(Model.8,Model, test="Chisq") comp.9<-anova(Model.9,Model, test="Chisq") comp.10<-anova(Model.10,Model, test="Chisq") LR.1<-round(comp.1$Deviance[2],2) p.1<-round(comp.1$`Pr(>Chi)`[2],2) LR.2<-round(comp.2$Deviance[2],2) p.2<-round(comp.2$`Pr(>Chi)`[2],2) LR.3<-round(comp.3$Deviance[2],2) p.3<-round(comp.3$`Pr(>Chi)`[2],2) LR.4<-round(comp.4$Deviance[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.5<-round(comp.5$Deviance[2],2) p.5<-round(comp.5$`Pr(>Chi)`[2],2) p.4<-round(comp.4$`Pr(>Chi)`[2],2) LR.6<-round(comp.6$Deviance[2],2) p.6<-round(comp.6$`Pr(>Chi)`[2],2) LR.7<-round(comp.7$Deviance[2],2) p.7<-round(comp.7$`Pr(>Chi)`[2],2) LR.8<-round(comp.8$Deviance[2],2) p.8<-round(comp.8$`Pr(>Chi)`[2],2) LR.9<-round(comp.9$Deviance[2],2) p.9<-round(comp.9$`Pr(>Chi)`[2],2) LR.10<-round(comp.10$Deviance[2],2) p.10<-round(comp.10$`Pr(>Chi)`[2],2) message("Predictor: ", x1, "; LR squared ",LR.1, ", p= ", p.1) message("Predictor: ", x2, "; LR squared ",LR.2, ", p= ", p.2) message("Predictor: ", x3, "; LR squared ",LR.3, ", p= ", p.3) message("Predictor: ", x4, "; LR squared ",LR.4, ", p= ", p.4) message("Predictor: ", x5, "; LR squared ",LR.5, ", p= ", p.5) message("Predictor: ", x6, "; LR squared ",LR.6, ", p= ", p.6) message("Predictor: ", x7, "; LR squared ",LR.7, ", p= ", p.7) message("Predictor: ", x8, "; LR squared ",LR.8, ", p= ", p.8) message("Predictor: ", x9, "; LR squared ",LR.9, ", p= ", p.9) message("Predictor: ", x10, "; LR squared ",LR.10, ", p= ", p.10) } }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/LRchi.R
#'Compute Mahalanobis Distance for Multiple Regression #' #'@param model name of model #'@param pred number of predictors #'@param values number of Mahal values to print (highest values). Default is 10 #' #'@examples #'mymodel<-lm(y~x1+x2+x3+x4, testreg) #'Mahal(model=mymodel, pred=5, values = 10) #' #'@return Mahalanobis Distance to detect MV outliers #'@export #' #' Mahal<-function(model=NULL, pred=NULL, values=5){ hat<-hatvalues(model) n<-model$df.residual + pred + 1 mah<-((n-1)*((hat)))-1 tail(sort(mah),values) }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/Mahal.R
#'R-square change for Hierarchical Multiple Regression #'@param model1 first regression model #'@param model2 second regression model #'@examples #'mymodel1<-lm(y~x1+x2, data=testreg) #'mymodel2<-lm(y~x1+x2+x3+x4, data=testreg) #'R2change(model1=mymodel1, model2=mymodel2) #'@export R2change<-function(model1=NULL, model2=NULL){ comp<-stats::anova(model1,model2) df1<-comp$Df[2] df2<-model2$df.residual m1<-summary(model1) m2<-summary(model2) r2.1<-m1$r.squared r2.2<-m2$r.squared R2change<-round((r2.2-r2.1),3) F<-round((comp$F[2]),3) p<-comp$`Pr(>F)`[2] message("R-square change = ", R2change) message("F(", df1,",", df2,") = ", F, ", p = ", p) }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/R2change.R
#' testreg #' #' A dataset to test regression functions #' #' @format A data frame with 1000 rows and 6 variables: #' \describe{ #' \item{y}{DV} #' \item{x1}{1st predictor} #' \item{x2}{2nd predictor} #' \item{x3}{3rd predictor} #' \item{x4}{4th predictor} #' \item{x5}{5th predictor} #' } #' "testreg" #' #' testlog #' #' A dataset to test logistic regression functions #' #' @format A data frame with 164 rows and 11 variables: #' \describe{ #' \item{dv}{DV} #' \item{iv1}{1st predictor} #' \item{iv2}{2nd predictor} #' \item{iv3}{3rd predictor} #' \item{iv4}{4th predictor} #' \item{iv5}{5th predictor} #' \item{iv6}{6th predictor} #' \item{iv7}{7th predictor} #' \item{iv8}{8th predictor} #' \item{iv9}{9th predictor} #' \item{iv10}{10th predictor} #' } "testlog"
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/data.R
#'Power for Comparing Dependent Coefficients in Multiple Regression with Two or Three Predictors #'Requires correlations between all variables as sample size. Means, sds, and alpha are option. Also computes Power(All) #'@param data name of your datafile, loaded #'@param y dependent variable name #'@param x1 first predictor variable name #'@param x2 second predictor variable name #'@param x3 third predictor variable name #'@param x4 fourth predictor variable name #'@param x5 fifth predictor variable name #'@param data name of data file #'@param numpred number of predictors #'@param comps Type of comparison, "abs" for absolute values or "raw" for raw coefficients #'@examples #'depbcomp(data=testreg,y=y,x1=x1,x2=x2,x3=x3,x4=x4,x5=x5, numpred=5,comps="abs") #'@return Comparing Dependent Coefficients in Multiple Regression #'@export #' depbcomp<-function(data=NULL,y=NULL, x1=NULL, x2=NULL,x3=NULL,x4=NULL,x5=NULL,numpred=NULL, comps="abs") { if (numpred ==2 && comps=="abs") { xx<-dplyr::select(data,y,x1,x2) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] df<- values$df[2] varx1<-var(xx[,2]) varx2<-var(xx[,3]) r12<-abs(cor(xx[,2],xx[,3])) mat<-cbind(c(1,r12),c(r12,1)) inv<-solve(mat)*mat pij<-inv[1,2] #inv of cor between pred1 and 2 pii<-inv[1,1] #inv of cov, v1 pjj<-inv[2,2] #inv of cov, v2 den1<-seb1^2+seb2^2 den2<-2*seb1*seb2 den3<-pij/(pii+pjj) den<-(den1-(den2*den3))^.5 t12<-abs((abs(b1)-(abs(b2)))) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) } if (numpred ==3 && comps=="abs") { xx<-dplyr::select(data,y,x1,x2,x3) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] df<- values$df[2] r12<-abs(cor(xx[,2],xx[,3])) r13<-abs(cor(xx[,2],xx[,4])) r23<-abs(cor(xx[,3],xx[,4])) mat<-cbind(c(1,r12,r13),c(r12,1,r23), c(r13,r23,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-abs((abs(b1)-(abs(b2)))) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-abs(abs(b1)-abs(b3)) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) } if (numpred ==4 && comps=="abs") { xx<-dplyr::select(data,y,x1,x2,x3,x4) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] b4<-(values$coefficients)[5,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] seb4<-(values$coefficients)[5,2] df<- values$df[2] r12<-abs(cor(xx[,2],xx[,3])) r13<-abs(cor(xx[,2],xx[,4])) r14<-abs(cor(xx[,2],xx[,5])) r23<-abs(cor(xx[,3],xx[,4])) r24<-abs(cor(xx[,3],xx[,5])) r34<-abs(cor(xx[,4],xx[,5])) mat<-cbind(c(1,r12,r13,r14),c(r12,1,r23,r24), c(r13,r23,1,r34),c(r14,r24,r34,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-abs((abs(b1)-(abs(b2)))) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-abs(abs(b1)-abs(b3)) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #1 vs 4 pij4<-inv[1,4] #inv of cor between pred of interest 1 vs. 2 pii4<-inv[1,1] #inv of cov, v1 pjj4<-inv[4,4] #inv of cov, v2 den1d<-seb1^2+seb4^2 den2d<-2*seb1*seb4 den3d<-pij4/(pii4+pjj4) dend<-(den1d-(den2d*den3d))^.5 t14<-abs(abs(b1)-abs(b4)) / dend p14<-2*(1-(pt(q=abs(t14), df=df, lower.tail=TRUE))) t14<-round((t14),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) #2 vs 4 pij5<-inv[2,4] #inv of cor between pred of interest 1 vs. 2 pii5<-inv[2,2] #inv of cov, v1 pjj5<-inv[4,4] #inv of cov, v2 den1e<-seb2^2+seb4^2 den2e<-2*seb2*seb4 den3e<-pij5/(pii5+pjj5) denc<-(den1e-(den2e*den3e))^.5 t24<-abs(abs(b2)-abs(b4)) / denc p24<-2*(1-(pt(q=abs(t24), df=df, lower.tail=TRUE))) t24<-round((t24),3) #3 vs 4 pij6<-inv[3,4] #inv of cor between pred of interest 1 vs. 2 pii6<-inv[3,3] #inv of cov, v1 pjj6<-inv[4,4] #inv of cov, v2 den1f<-seb3^2+seb4^2 den2f<-2*seb3*seb4 den3f<-pij6/(pii6+pjj6) denc<-(den1f-(den2f*den3f))^.5 t34<-abs(abs(b3)-abs(b4)) / denc p34<-2*(1-(pt(q=abs(t34), df=df, lower.tail=TRUE))) t34<-round((t34),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 1 vs. Pred 4 "," : t = ", t14,", p = ", p14) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) message("Pred 2 vs. Pred 4 "," : t = ", t24,", p = ", p24) message("Pred 3 vs. Pred 4 "," : t = ", t34,", p = ", p34) } if (numpred ==5 && comps=="abs") { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] b4<-(values$coefficients)[5,1] b5<-(values$coefficients)[6,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] seb4<-(values$coefficients)[5,2] seb5<-(values$coefficients)[6,2] df<- values$df[2] r12<-abs(cor(xx[,2],xx[,3])) r13<-abs(cor(xx[,2],xx[,4])) r14<-abs(cor(xx[,2],xx[,5])) r15<-abs(cor(xx[,2],xx[,6])) r23<-abs(cor(xx[,3],xx[,4])) r24<-abs(cor(xx[,3],xx[,5])) r25<-abs(cor(xx[,3],xx[,6])) r34<-abs(cor(xx[,4],xx[,5])) r35<-abs(cor(xx[,4],xx[,5])) r45<-abs(cor(xx[,5],xx[,6])) mat<-cbind(c(1,r12,r13,r14,r15),c(r12,1,r23,r24,r25), c(r13,r23,1,r34,r35), c(r14,r24,r34,1,r45),c(r15,r25,r35,r45,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-abs((abs(b1)-(abs(b2)))) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-abs(abs(b1)-abs(b3)) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #1 vs 4 pij4<-inv[1,4] #inv of cor between pred of interest 1 vs. 2 pii4<-inv[1,1] #inv of cov, v1 pjj4<-inv[4,4] #inv of cov, v2 den1d<-seb1^2+seb4^2 den2d<-2*seb1*seb4 den3d<-pij4/(pii4+pjj4) dend<-(den1d-(den2d*den3d))^.5 t14<-abs(abs(b1)-abs(b4)) / dend p14<-2*(1-(pt(q=abs(t14), df=df, lower.tail=TRUE))) t14<-round((t14),3) #1 vs 5 pij6<-inv[1,5] #inv of cor between pred of interest 1 vs. 2 pii6<-inv[1,1] #inv of cov, v1 pjj6<-inv[5,5] #inv of cov, v2 den1f<-seb1^2+seb5^2 den2f<-2*seb1*seb5 den3f<-pij6/(pii4+pjj6) denf<-(den1f-(den2f*den3f))^.5 t15<-abs(abs(b1)-abs(b5)) / dend p15<-2*(1-(pt(q=abs(t15), df=df, lower.tail=TRUE))) t15<-round((t15),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) #2 vs 4 pij5<-inv[2,4] #inv of cor between pred of interest 1 vs. 2 pii5<-inv[2,2] #inv of cov, v1 pjj5<-inv[4,4] #inv of cov, v2 den1e<-seb2^2+seb4^2 den2e<-2*seb2*seb4 den3e<-pij5/(pii5+pjj5) denc<-(den1e-(den2e*den3e))^.5 t24<-abs(abs(b2)-abs(b4)) / denc p24<-2*(1-(pt(q=abs(t24), df=df, lower.tail=TRUE))) t24<-round((t24),3) #2 vs 5 pij7<-inv[2,5] #inv of cor between pred of interest 1 vs. 2 pii7<-inv[2,2] #inv of cov, v1 pjj7<-inv[5,5] #inv of cov, v2 den1g<-seb2^2+seb5^2 den2g<-2*seb2*seb5 den3g<-pij7/(pii7+pjj7) deng<-(den1g-(den2g*den3g))^.5 t25<-abs(abs(b2)-abs(b5)) / deng p25<-2*(1-(pt(q=abs(t25), df=df, lower.tail=TRUE))) t25<-round((t25),3) #3 vs 4 pij8<-inv[3,4] #inv of cor between pred of interest 1 vs. 2 pii8<-inv[3,3] #inv of cov, v1 pjj8<-inv[4,4] #inv of cov, v2 den1h<-seb3^2+seb4^2 den2h<-2*seb3*seb4 den3h<-pij8/(pii8+pjj8) denc<-(den1h-(den2h*den3h))^.5 t34<-abs(abs(b3)-abs(b4)) / denc p34<-2*(1-(pt(q=abs(t34), df=df, lower.tail=TRUE))) t34<-round((t34),3) #3 vs 5 pij9<-inv[3,5] #inv of cor between pred of interest 1 vs. 2 pii9<-inv[3,3] #inv of cov, v1 pjj9<-inv[5,5] #inv of cov, v2 den1i<-seb3^2+seb5^2 den2i<-2*seb3*seb5 den3i<-pij9/(pii9+pjj9) deni<-(den1i-(den2i*den3i))^.5 t35<-abs(abs(b3)-abs(b5)) / denc p35<-2*(1-(pt(q=abs(t35), df=df, lower.tail=TRUE))) t35<-round((t35),3) #4 vs 5 pij10<-inv[4,5] #inv of cor between pred of interest 1 vs. 2 pii10<-inv[4,4] #inv of cov, v1 pjj10<-inv[5,5] #inv of cov, v2 den1j<-seb4^2+seb5^2 den2j<-2*seb4*seb5 den3j<-pij10/(pii10+pjj10) denj<-(den1h-(den2h*den3h))^.5 t45<-abs(abs(b4)-abs(b5)) / denc p45<-2*(1-(pt(q=abs(t45), df=df, lower.tail=TRUE))) t45<-round((t45),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 1 vs. Pred 4 "," : t = ", t14,", p = ", p14) message("Pred 1 vs. Pred 5 "," : t = ", t15,", p = ", p15) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) message("Pred 2 vs. Pred 4 "," : t = ", t24,", p = ", p24) message("Pred 2 vs. Pred 5 "," : t = ", t25,", p = ", p25) message("Pred 3 vs. Pred 4 "," : t = ", t34,", p = ", p34) message("Pred 3 vs. Pred 5 "," : t = ", t35,", p = ", p35) message("Pred 4 vs. Pred 5 "," : t = ", t45,", p = ", p45) } if (numpred ==2 && comps=="raw") { xx<-dplyr::select(data,y,x1,x2) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] df<- values$df[2] varx1<-var(xx[,2]) varx2<-var(xx[,3]) r12<-cor(xx[,2],xx[,3]) mat<-cbind(c(1,r12),c(r12,1)) inv<-solve(mat)*mat pij<-inv[1,2] #inv of cor between pred1 and 2 pii<-inv[1,1] #inv of cov, v1 pjj<-inv[2,2] #inv of cov, v2 den1<-seb1^2+seb2^2 den2<-2*seb1*seb2 den3<-pij/(pii+pjj) den<-(den1-(den2*den3))^.5 t12<-(b1-b2)/ den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) } if (numpred ==3 && comps=="raw") { xx<-dplyr::select(data,y,x1,x2,x3) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] df<- values$df[2] r12<-cor(xx[,2],xx[,3]) r13<-cor(xx[,2],xx[,4]) r23<-cor(xx[,3],xx[,4]) mat<-cbind(c(1,r12,r13),c(r12,1,r23), c(r13,r23,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-(b1-b2) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-(b1-b3) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) } if (numpred ==4 && comps=="raw") { xx<-dplyr::select(data,y,x1,x2,x3,x4) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] b4<-(values$coefficients)[5,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] seb4<-(values$coefficients)[5,2] df<- values$df[2] r12<-cor(xx[,2],xx[,3]) r13<-cor(xx[,2],xx[,4]) r14<-cor(xx[,2],xx[,5]) r23<-cor(xx[,3],xx[,4]) r24<-cor(xx[,3],xx[,5]) r34<-cor(xx[,4],xx[,5]) mat<-cbind(c(1,r12,r13,r14),c(r12,1,r23,r24), c(r13,r23,1,r34),c(r14,r24,r34,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-(b1-b2) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-(b1-b3) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #1 vs 4 pij4<-inv[1,4] #inv of cor between pred of interest 1 vs. 2 pii4<-inv[1,1] #inv of cov, v1 pjj4<-inv[4,4] #inv of cov, v2 den1d<-seb1^2+seb4^2 den2d<-2*seb1*seb4 den3d<-pij4/(pii4+pjj4) dend<-(den1d-(den2d*den3d))^.5 t14<-(b1-b4) / dend p14<-2*(1-(pt(q=abs(t14), df=df, lower.tail=TRUE))) t14<-round((t14),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) #2 vs 4 pij5<-inv[2,4] #inv of cor between pred of interest 1 vs. 2 pii5<-inv[2,2] #inv of cov, v1 pjj5<-inv[4,4] #inv of cov, v2 den1e<-seb2^2+seb4^2 den2e<-2*seb2*seb4 den3e<-pij5/(pii5+pjj5) denc<-(den1e-(den2e*den3e))^.5 t24<-(b2-b4) / denc p24<-2*(1-(pt(q=abs(t24), df=df, lower.tail=TRUE))) t24<-round((t24),3) #3 vs 4 pij6<-inv[3,4] #inv of cor between pred of interest 1 vs. 2 pii6<-inv[3,3] #inv of cov, v1 pjj6<-inv[4,4] #inv of cov, v2 den1f<-seb3^2+seb4^2 den2f<-2*seb3*seb4 den3f<-pij6/(pii6+pjj6) denc<-(den1f-(den2f*den3f))^.5 t34<-(b3-b4) / denc p34<-2*(1-(pt(q=abs(t34), df=df, lower.tail=TRUE))) t34<-round((t34),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 1 vs. Pred 4 "," : t = ", t14,", p = ", p14) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) message("Pred 2 vs. Pred 4 "," : t = ", t24,", p = ", p24) message("Pred 3 vs. Pred 4 "," : t = ", t34,", p = ", p34) } if (numpred ==5 && comps=="raw") { xx<-dplyr::select(data,y,x1,x2,x3,x4,x5) xx<-as.data.frame(xx) mod<-lm(xx[,1]~xx[,2]+xx[,3]+xx[,4]+xx[,5]+xx[,6]) values<-summary(mod) b1<-(values$coefficients)[2,1] #grabs b from each analysis b2<-(values$coefficients)[3,1] b3<-(values$coefficients)[4,1] b4<-(values$coefficients)[5,1] b5<-(values$coefficients)[6,1] seb1<-(values$coefficients)[2,2] seb2<-(values$coefficients)[3,2] seb3<-(values$coefficients)[4,2] seb4<-(values$coefficients)[5,2] seb5<-(values$coefficients)[6,2] df<- values$df[2] r12<-cor(xx[,2],xx[,3]) r13<-cor(xx[,2],xx[,4]) r14<-cor(xx[,2],xx[,5]) r15<-cor(xx[,2],xx[,6]) r23<-cor(xx[,3],xx[,4]) r24<-cor(xx[,3],xx[,5]) r25<-cor(xx[,3],xx[,6]) r34<-cor(xx[,4],xx[,5]) r35<-cor(xx[,4],xx[,5]) r45<-cor(xx[,5],xx[,6]) mat<-cbind(c(1,r12,r13,r14,r15),c(r12,1,r23,r24,r25), c(r13,r23,1,r34,r35), c(r14,r24,r34,1,r45),c(r15,r25,r35,r45,1)) inv<-solve(mat)*mat # 1 vs 2 pij1<-inv[1,2] #inv of cor between pred of interest 1 vs. 2 pii1<-inv[1,1] #inv of cov, v1 pjj1<-inv[2,2] #inv of cov, v2 den1a<-seb1^2+seb2^2 den2a<-2*seb1*seb2 den3a<-pij1/(pii1+pjj1) den<-(den1a-(den2a*den3a))^.5 t12<-(b1-b2) / den p12<-2*(1-(pt(q=abs(t12), df=df, lower.tail=TRUE))) t12<-round((t12),3) #1 vs 3 pij2<-inv[1,3] #inv of cor between pred of interest 1 vs. 2 pii2<-inv[1,1] #inv of cov, v1 pjj2<-inv[3,3] #inv of cov, v2 den1b<-seb1^2+seb3^2 den2b<-2*seb1*seb3 den3b<-pij2/(pii2+pjj2) denb<-(den1b-(den2b*den3b))^.5 t13<-(b1-b3) / denb p13<-2*(1-(pt(q=abs(t13), df=df, lower.tail=TRUE))) t13<-round((t13),3) #1 vs 4 pij4<-inv[1,4] #inv of cor between pred of interest 1 vs. 2 pii4<-inv[1,1] #inv of cov, v1 pjj4<-inv[4,4] #inv of cov, v2 den1d<-seb1^2+seb4^2 den2d<-2*seb1*seb4 den3d<-pij4/(pii4+pjj4) dend<-(den1d-(den2d*den3d))^.5 t14<-(b1-b4) / dend p14<-2*(1-(pt(q=abs(t14), df=df, lower.tail=TRUE))) t14<-round((t14),3) #1 vs 5 pij6<-inv[1,5] #inv of cor between pred of interest 1 vs. 2 pii6<-inv[1,1] #inv of cov, v1 pjj6<-inv[5,5] #inv of cov, v2 den1f<-seb1^2+seb5^2 den2f<-2*seb1*seb5 den3f<-pij6/(pii4+pjj6) denf<-(den1f-(den2f*den3f))^.5 t15<-(b1-b5) / dend p15<-2*(1-(pt(q=abs(t15), df=df, lower.tail=TRUE))) t15<-round((t15),3) #2 vs 3 pij3<-inv[2,3] #inv of cor between pred of interest 1 vs. 2 pii3<-inv[2,2] #inv of cov, v1 pjj3<-inv[3,3] #inv of cov, v2 den1c<-seb2^2+seb3^2 den2c<-2*seb2*seb3 den3c<-pij3/(pii3+pjj3) denc<-(den1c-(den2c*den3c))^.5 t23<-abs(abs(b2)-abs(b3)) / denc p23<-2*(1-(pt(q=abs(t23), df=df, lower.tail=TRUE))) t23<-round((t23),3) #2 vs 4 pij5<-inv[2,4] #inv of cor between pred of interest 1 vs. 2 pii5<-inv[2,2] #inv of cov, v1 pjj5<-inv[4,4] #inv of cov, v2 den1e<-seb2^2+seb4^2 den2e<-2*seb2*seb4 den3e<-pij5/(pii5+pjj5) denc<-(den1e-(den2e*den3e))^.5 t24<-(b2-b4) / denc p24<-2*(1-(pt(q=abs(t24), df=df, lower.tail=TRUE))) t24<-round((t24),3) #2 vs 5 pij7<-inv[2,5] #inv of cor between pred of interest 1 vs. 2 pii7<-inv[2,2] #inv of cov, v1 pjj7<-inv[5,5] #inv of cov, v2 den1g<-seb2^2+seb5^2 den2g<-2*seb2*seb5 den3g<-pij7/(pii7+pjj7) deng<-(den1g-(den2g*den3g))^.5 t25<-(b2-b5) / deng p25<-2*(1-(pt(q=abs(t25), df=df, lower.tail=TRUE))) t25<-round((t25),3) #3 vs 4 pij8<-inv[3,4] #inv of cor between pred of interest 1 vs. 2 pii8<-inv[3,3] #inv of cov, v1 pjj8<-inv[4,4] #inv of cov, v2 den1h<-seb3^2+seb4^2 den2h<-2*seb3*seb4 den3h<-pij8/(pii8+pjj8) denc<-(den1h-(den2h*den3h))^.5 t34<-(b3-b4) / denc p34<-2*(1-(pt(q=abs(t34), df=df, lower.tail=TRUE))) t34<-round((t34),3) #3 vs 5 pij9<-inv[3,5] #inv of cor between pred of interest 1 vs. 2 pii9<-inv[3,3] #inv of cov, v1 pjj9<-inv[5,5] #inv of cov, v2 den1i<-seb3^2+seb5^2 den2i<-2*seb3*seb5 den3i<-pij9/(pii9+pjj9) deni<-(den1i-(den2i*den3i))^.5 t35<-(b3-b5) / denc p35<-2*(1-(pt(q=abs(t35), df=df, lower.tail=TRUE))) t35<-round((t35),3) #4 vs 5 pij10<-inv[4,5] #inv of cor between pred of interest 1 vs. 2 pii10<-inv[4,4] #inv of cov, v1 pjj10<-inv[5,5] #inv of cov, v2 den1j<-seb4^2+seb5^2 den2j<-2*seb4*seb5 den3j<-pij10/(pii10+pjj10) denj<-(den1h-(den2h*den3h))^.5 t45<-(b4-b5) / denc p45<-2*(1-(pt(q=abs(t45), df=df, lower.tail=TRUE))) t45<-round((t45),3) message("Pred 1 vs. Pred 2 "," : t = ", t12,", p = ", p12) message("Pred 1 vs. Pred 3 "," : t = ", t13,", p = ", p13) message("Pred 1 vs. Pred 4 "," : t = ", t14,", p = ", p14) message("Pred 1 vs. Pred 5 "," : t = ", t15,", p = ", p15) message("Pred 2 vs. Pred 3 "," : t = ", t23,", p = ", p23) message("Pred 2 vs. Pred 4 "," : t = ", t24,", p = ", p24) message("Pred 2 vs. Pred 5 "," : t = ", t25,", p = ", p25) message("Pred 3 vs. Pred 4 "," : t = ", t34,", p = ", p34) message("Pred 3 vs. Pred 5 "," : t = ", t35,", p = ", p35) message("Pred 4 vs. Pred 5 "," : t = ", t45,", p = ", p45) } }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/depbcomp.R
utils::globalVariables(c("mymodel")) utils::globalVariables(c("pt")) utils::globalVariables(c("mymodel1")) utils::globalVariables(c("mymodel2"))
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/global.R
#'Comparing Independent Coefficients in Multiple Regression #'@param model1 Summary of first model (see example for how to summarize) #'@param model2 Summary of second model (see example for how to summarize) #'@param comps Type of comparison. "abs" - absolute value of coefficient #'(recommended). "raw" raw values of coefficient #'@examples #'y_1<-rnorm(200); x1_1<-rnorm(200); x2_1<-rnorm(200) #'y_2<-rnorm(200); x1_2<-rnorm(200);x2_2<-rnorm(200) #'df1<-as.data.frame(cbind(y_1, x1_1,x2_1)) #'df2<-as.data.frame(cbind(y_2, x1_2,x2_2)) #'model1_2<-summary(lm(y_1~x1_1+x2_1, data=df1)) #'model2_2<-summary(lm(y_2~x1_2+x2_2, data=df2)) #'indbcomp(model1 = model1_2, model2 = model2_2, comps="abs") #'@return Comparing Independent Coefficients in Multiple Regression #'@export #' indbcomp<-function(model1=NULL, model2=NULL, comps="abs") { pred1<-model1$df[1]-1 pred2<-model2$df[1]-1 try(if(pred1!=pred2) stop("Models must be identical")) if ((pred1 =="2") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] sebb1<-((seb1_1^2)+(seb2_1^2))^.5 sebb2<-((seb1_2^2)+(seb2_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 df1<-model1$df[2]+model2$df[2] df2<-model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df2, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) } if ((pred1 =="3") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) } if ((pred1 =="4") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) } if ((pred1 =="5") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) } if ((pred1 =="6") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 t6<-abs(abs(b6_1)-abs(b6_2))/sebb6 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) } if ((pred1 =="7") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 t6<-abs(abs(b6_1)-abs(b6_2))/sebb6 t7<-abs(abs(b7_1)-abs(b7_2))/sebb7 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) } if ((pred1 =="8") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^82))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 t6<-abs(abs(b6_1)-abs(b6_2))/sebb6 t7<-abs(abs(b7_1)-abs(b7_2))/sebb7 t8<-abs(abs(b8_1)-abs(b8_2))/sebb8 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) } if ((pred1 =="9") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b9_1<-(model1$coefficients)[10,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] b9_2<-(model2$coefficients)[10,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb9_1<-(model1$coefficients)[10,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] seb9_2<-(model2$coefficients)[10,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 sebb9<-((seb9_1^2)+(seb9_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 t6<-abs(abs(b6_1)-abs(b6_2))/sebb6 t7<-abs(abs(b7_1)-abs(b7_2))/sebb7 t8<-abs(abs(b8_1)-abs(b8_2))/sebb8 t9<-abs(abs(b9_1)-abs(b9_2))/sebb9 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) p9<-2*round(1-(pt(q=abs(t9), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) t9<-round((t9),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) message("Predictor 9: "," t = ", t9,", p = ", p9) } if ((pred1 =="10") && (comps=="abs")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b9_1<-(model1$coefficients)[10,1] b10_1<-(model1$coefficients)[11,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] b9_2<-(model2$coefficients)[10,1] b10_2<-(model2$coefficients)[11,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb9_1<-(model1$coefficients)[10,2] seb10_1<-(model1$coefficients)[11,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] seb9_2<-(model2$coefficients)[10,2] seb10_2<-(model2$coefficients)[11,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 sebb9<-((seb9_1^2)+(seb9_2^2))^.5 sebb10<-((seb10_1^2)+(seb10_2^2))^.5 t1<-abs(abs(b1_1)-abs(b1_2))/sebb1 t2<-abs(abs(b2_1)-abs(b2_2))/sebb2 t3<-abs(abs(b3_1)-abs(b3_2))/sebb3 t4<-abs(abs(b4_1)-abs(b4_2))/sebb4 t5<-abs(abs(b5_1)-abs(b5_2))/sebb5 t6<-abs(abs(b6_1)-abs(b6_2))/sebb6 t7<-abs(abs(b7_1)-abs(b7_2))/sebb7 t8<-abs(abs(b8_1)-abs(b8_2))/sebb8 t9<-abs(abs(b9_1)-abs(b9_2))/sebb9 t10<-abs(abs(b10_1)-abs(b10_2))/sebb10 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) p9<-2*round(1-(pt(q=abs(t9), df=df1, lower.tail=TRUE)),3) p10<-2*round(1-(pt(q=abs(t10), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) t9<-round((t9),3) t10<-round((t10),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) message("Predictor 9: "," t = ", t9,", p = ", p9) message("Predictor 10: "," t = ", t10,", p = ", p10) } if ((pred1 =="2") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 df1<-model1$df[2]+model2$df[2] df2<-model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df2, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) } if ((pred1 =="3") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) } if ((pred1 =="4") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) } if ((pred1 =="5") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) } if ((pred1 =="6") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 t6<-((b6_1-b6_2))/sebb6 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) } if ((pred1 =="7") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 t6<-((b6_1-b6_2))/sebb6 t7<-((b7_1-b7_2))/sebb7 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) } if ((pred1 =="8") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 t6<-((b6_1-b6_2))/sebb6 t7<-((b7_1-b7_2))/sebb7 t8<-((b8_1-b8_2))/sebb8 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) } if ((pred1 =="9") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b9_1<-(model1$coefficients)[10,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] b9_2<-(model2$coefficients)[10,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb9_1<-(model1$coefficients)[10,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] seb9_2<-(model2$coefficients)[10,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 sebb9<-((seb9_1^2)+(seb9_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 t6<-((b6_1-b6_2))/sebb6 t7<-((b7_1-b7_2))/sebb7 t8<-((b8_1-b8_2))/sebb8 t9<-((b9_1-b9_2))/sebb9 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) p9<-2*round(1-(pt(q=abs(t9), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) t9<-round((t9),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) message("Predictor 9: "," t = ", t9,", p = ", p9) } if ((pred1 =="10") && (comps=="raw")){ b1_1<-(model1$coefficients)[2,1] b2_1<-(model1$coefficients)[3,1] b3_1<-(model1$coefficients)[4,1] b4_1<-(model1$coefficients)[5,1] b5_1<-(model1$coefficients)[6,1] b6_1<-(model1$coefficients)[7,1] b7_1<-(model1$coefficients)[8,1] b8_1<-(model1$coefficients)[9,1] b9_1<-(model1$coefficients)[10,1] b10_1<-(model1$coefficients)[11,1] b1_2<-(model2$coefficients)[2,1] b2_2<-(model2$coefficients)[3,1] b3_2<-(model2$coefficients)[4,1] b4_2<-(model2$coefficients)[5,1] b5_2<-(model2$coefficients)[6,1] b6_2<-(model2$coefficients)[7,1] b7_2<-(model2$coefficients)[8,1] b8_2<-(model2$coefficients)[9,1] b9_2<-(model2$coefficients)[10,1] b10_2<-(model2$coefficients)[11,1] seb1_1<-(model1$coefficients)[2,2] seb2_1<-(model1$coefficients)[3,2] seb3_1<-(model1$coefficients)[4,2] seb4_1<-(model1$coefficients)[5,2] seb5_1<-(model1$coefficients)[6,2] seb6_1<-(model1$coefficients)[7,2] seb7_1<-(model1$coefficients)[8,2] seb8_1<-(model1$coefficients)[9,2] seb9_1<-(model1$coefficients)[10,2] seb10_1<-(model1$coefficients)[11,2] seb1_2<-(model2$coefficients)[2,2] seb2_2<-(model2$coefficients)[3,2] seb3_2<-(model2$coefficients)[4,2] seb4_2<-(model2$coefficients)[5,2] seb5_2<-(model2$coefficients)[6,2] seb6_2<-(model2$coefficients)[7,2] seb7_2<-(model2$coefficients)[8,2] seb8_2<-(model2$coefficients)[9,2] seb9_2<-(model2$coefficients)[10,2] seb10_2<-(model2$coefficients)[11,2] sebb1<-((seb1_1^2)+(seb1_2^2))^.5 sebb2<-((seb2_1^2)+(seb2_2^2))^.5 sebb3<-((seb3_1^2)+(seb3_2^2))^.5 sebb4<-((seb4_1^2)+(seb4_2^2))^.5 sebb5<-((seb5_1^2)+(seb5_2^2))^.5 sebb6<-((seb6_1^2)+(seb6_2^2))^.5 sebb7<-((seb7_1^2)+(seb7_2^2))^.5 sebb8<-((seb8_1^2)+(seb8_2^2))^.5 sebb9<-((seb9_1^2)+(seb9_2^2))^.5 sebb10<-((seb10_1^2)+(seb10_2^2))^.5 t1<-((b1_1-b1_2))/sebb1 t2<-((b2_1-b2_2))/sebb2 t3<-((b3_1-b3_2))/sebb3 t4<-((b4_1-b4_2))/sebb4 t5<-((b5_1-b5_2))/sebb5 t6<-((b6_1-b6_2))/sebb6 t7<-((b7_1-b7_2))/sebb7 t8<-((b8_1-b8_2))/sebb8 t9<-((b9_1-b9_2))/sebb9 t10<-((b10_1-b10_2))/sebb10 df1<-model1$df[2]+model2$df[2] p1<-2*round(1-(pt(q=abs(t1), df=df1, lower.tail=TRUE)),3) p2<-2*round(1-(pt(q=abs(t2), df=df1, lower.tail=TRUE)),3) p3<-2*round(1-(pt(q=abs(t3), df=df1, lower.tail=TRUE)),3) p4<-2*round(1-(pt(q=abs(t4), df=df1, lower.tail=TRUE)),3) p5<-2*round(1-(pt(q=abs(t5), df=df1, lower.tail=TRUE)),3) p6<-2*round(1-(pt(q=abs(t6), df=df1, lower.tail=TRUE)),3) p7<-2*round(1-(pt(q=abs(t7), df=df1, lower.tail=TRUE)),3) p8<-2*round(1-(pt(q=abs(t8), df=df1, lower.tail=TRUE)),3) p9<-2*round(1-(pt(q=abs(t9), df=df1, lower.tail=TRUE)),3) p10<-2*round(1-(pt(q=abs(t10), df=df1, lower.tail=TRUE)),3) t1<-round((t1),3) t2<-round((t2),3) t3<-round((t3),3) t4<-round((t4),3) t5<-round((t5),3) t6<-round((t6),3) t7<-round((t7),3) t8<-round((t8),3) t9<-round((t9),3) t10<-round((t10),3) message("Predictor 1: "," t = ", t1,", p = ", p1) message("Predictor 2: "," t = ", t2,", p = ", p2) message("Predictor 3: "," t = ", t3,", p = ", p3) message("Predictor 4: "," t = ", t4,", p = ", p4) message("Predictor 5: "," t = ", t5,", p = ", p5) message("Predictor 6: "," t = ", t6,", p = ", p6) message("Predictor 7: "," t = ", t7,", p = ", p7) message("Predictor 8: "," t = ", t8,", p = ", p8) message("Predictor 9: "," t = ", t9,", p = ", p9) message("Predictor 10: "," t = ", t10,", p = ", p10) } }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/indbcomp.R
#'Compute squared semi partial correlations for Multiple Regression #' #'@param model name of model #'@param pred number of predictors #' #'@examples #'mymodel<-lm(y~x1+x2+x3+x4+x5, data=testreg) #'parts(model=mymodel, pred=5) #' #'@return Squared semipartial correlations for MRC with up to 10 predictors #'@export #' #' parts<-function(model=NULL, pred=NULL){ values<-summary(model) R2<-values$r.squared[1] dfr<-values$df[2] if (pred=="2"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) } if (pred=="3"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) } if (pred=="4"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) } if (pred=="5"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) } if (pred=="6"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] t6<-values$coefficients[7,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) sr6<-round(((t6^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) semi6<-round(sqrt(sr6),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) message("Predictor 6: semi partial = ", semi6, "; squared semipartial = ",sr6)} if (pred=="7"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] t6<-values$coefficients[7,3] t7<-values$coefficients[8,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) sr6<-round(((t6^2)/dfr)*(1-R2),3) sr7<-round(((t7^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) semi6<-round(sqrt(sr6),3) semi7<-round(sqrt(sr7),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) message("Predictor 6: semi partial = ", semi6, "; squared semipartial = ",sr6) message("Predictor 7: semi partial = ", semi7, "; squared semipartial = ",sr7)} if (pred=="8"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] t6<-values$coefficients[7,3] t7<-values$coefficients[8,3] t8<-values$coefficients[9,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) sr6<-round(((t6^2)/dfr)*(1-R2),3) sr7<-round(((t7^2)/dfr)*(1-R2),3) sr8<-round(((t8^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) semi6<-round(sqrt(sr6),3) semi7<-round(sqrt(sr7),3) semi8<-round(sqrt(sr8),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) message("Predictor 6: semi partial = ", semi6, "; squared semipartial = ",sr6) message("Predictor 7: semi partial = ", semi7, "; squared semipartial = ",sr7) message("Predictor 8: semi partial = ", semi8, "; squared semipartial = ",sr8)} if (pred=="9"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] t6<-values$coefficients[7,3] t7<-values$coefficients[8,3] t8<-values$coefficients[9,3] t9<-values$coefficients[10,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) sr6<-round(((t6^2)/dfr)*(1-R2),3) sr7<-round(((t7^2)/dfr)*(1-R2),3) sr8<-round(((t8^2)/dfr)*(1-R2),3) sr9<-round(((t9^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) semi6<-round(sqrt(sr6),3) semi7<-round(sqrt(sr7),3) semi8<-round(sqrt(sr8),3) semi9<-round(sqrt(sr9),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) message("Predictor 6: semi partial = ", semi6, "; squared semipartial = ",sr6) message("Predictor 7: semi partial = ", semi7, "; squared semipartial = ",sr7) message("Predictor 8: semi partial = ", semi8, "; squared semipartial = ",sr8) message("Predictor 9: semi partial = ", semi9, "; squared semipartial = ",sr9)} if (pred=="10"){ t1<-values$coefficients[2,3] t2<-values$coefficients[3,3] t3<-values$coefficients[4,3] t4<-values$coefficients[5,3] t5<-values$coefficients[6,3] t6<-values$coefficients[7,3] t7<-values$coefficients[8,3] t8<-values$coefficients[9,3] t9<-values$coefficients[10,3] t10<-values$coefficients[11,3] #Calculates the sr2 values using the formula earlier in the handout sr1<-round(((t1^2)/dfr)*(1-R2),3) sr2<-round(((t2^2)/dfr)*(1-R2),3) sr3<-round(((t3^2)/dfr)*(1-R2),3) sr4<-round(((t4^2)/dfr)*(1-R2),3) sr5<-round(((t5^2)/dfr)*(1-R2),3) sr6<-round(((t6^2)/dfr)*(1-R2),3) sr7<-round(((t7^2)/dfr)*(1-R2),3) sr8<-round(((t8^2)/dfr)*(1-R2),3) sr9<-round(((t9^2)/dfr)*(1-R2),3) sr10<-round(((t10^2)/dfr)*(1-R2),3) #Calculates semi partial corr semi1<-round(sqrt(sr1),3) semi2<-round(sqrt(sr2),3) semi3<-round(sqrt(sr3),3) semi4<-round(sqrt(sr4),3) semi5<-round(sqrt(sr5),3) semi6<-round(sqrt(sr6),3) semi7<-round(sqrt(sr7),3) semi8<-round(sqrt(sr8),3) semi9<-round(sqrt(sr9),3) semi10<-round(sqrt(sr10),3) message("Predictor 1: semi partial = ", semi1, "; squared semipartial = ",sr1) message("Predictor 2: semi partial = ", semi2, "; squared semipartial = ",sr2) message("Predictor 3: semi partial = ", semi3, "; squared semipartial = ",sr3) message("Predictor 4: semi partial = ", semi4, "; squared semipartial = ",sr4) message("Predictor 5: semi partial = ", semi5, "; squared semipartial = ",sr5) message("Predictor 6: semi partial = ", semi6, "; squared semipartial = ",sr6) message("Predictor 7: semi partial = ", semi7, "; squared semipartial = ",sr7) message("Predictor 8: semi partial = ", semi8, "; squared semipartial = ",sr8) message("Predictor 9: semi partial = ", semi9, "; squared semipartial = ",sr9) message("Predictor 10: semi partial = ", semi10, "; squared semipartial = ",sr10)} }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/parts.R
#'Pseudo R-square Values for Binomial Logistic Regression #' #'@param model name of model #'@examples #'mymodel<-glm(dv~iv1+iv2+iv3+iv4, testlog,family = binomial()) #'pseudo(model=mymodel) #' #'@return Pseudo R-square Values for Logistic Regression #'@export pseudo<-function(model=NULL) { mymodel<-model nulldev<-mymodel$null.deviance moddev<-mymodel$deviance n<-mymodel$df.null+1 R2_L<-round(1-(moddev/nulldev),3) R2_M <-round(1-exp(-(nulldev-moddev)/n),3) R2_N <-round(R2_M/(1-exp(-(nulldev/n))),3) message("Likelihood Ratio R-squared (McFadden, Recommended) = ",R2_L) message("Cox-Snell R-squared) = ",R2_M) message("Nagelkerk R-squared = ",R2_N) }
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/pseudo.R
#'Compute tolerance for Multiple Regression #'@param model name of model #'@examples #'mymodel<-lm(y~x1+x2+x3+x4+x5, data=testreg) #'tolerance(model=mymodel) #'@return Tolerance for MR #'@export #' tolerance<-function(model=NULL){ 1/car::vif(model)}
/scratch/gouwar.j/cran-all/cranData/BetterReg/R/tolerance.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 bevimed_mc <- function(its, y, var_block_start_index, var_block_stop_index, cases, counts, min_ac, tau_shape1, tau_shape2, pi_shape1, pi_shape2, z_shape1, z_shape2, z0, estimate_logit_z_rate, logit_z_rates, logit_z_rate_proposal_sds, z_weights, estimate_phi, log_phis, log_phi_mean, log_phi_sd, log_phi_proposal_sds, t, swaps, annealing, tandem_variant_updates, y1_case_block_start_index, y1_case_block_stop_index, y1_variants, return_z_trace, return_x_trace) { .Call('_BeviMed_bevimed_mc', PACKAGE = 'BeviMed', its, y, var_block_start_index, var_block_stop_index, cases, counts, min_ac, tau_shape1, tau_shape2, pi_shape1, pi_shape2, z_shape1, z_shape2, z0, estimate_logit_z_rate, logit_z_rates, logit_z_rate_proposal_sds, z_weights, estimate_phi, log_phis, log_phi_mean, log_phi_sd, log_phi_proposal_sds, t, swaps, annealing, tandem_variant_updates, y1_case_block_start_index, y1_case_block_stop_index, y1_variants, return_z_trace, return_x_trace) }
/scratch/gouwar.j/cran-all/cranData/BeviMed/R/RcppExports.R
subset_bevimed_m <- function(G, min_ac=1L, variant_weights=NULL, ...) { vars <- subset_variants(G=G, min_ac=min_ac, return_variants=TRUE) bevimed_m( G=G[,vars,drop=FALSE], min_ac=min_ac, variant_weights=if (is.null(variant_weights)) NULL else variant_weights[vars], ... ) } #' @title Bayesian Evaluation of Variant Involvement in Mendelian Disease #' #' @description Infer probabilities of association between disease label and locus and posterior parameter values under BeviMed model. #' #' @template y #' @template G_matrix #' @template ploidy #' @template prior_prob_association #' @template prior_prob_dominant #' @param dominant_args Arguments to pass to \code{\link{bevimed_m}} conditioning on dominant inheritance. #' @param recessive_args Arguments to pass to \code{\link{bevimed_m}} conditioning on recessive inheritance. #' @param ... Arguments to be passed to \code{\link{bevimed_m}} for both modes of inheritance. #' @return \code{BeviMed} object containing results of inference. #' @export #' @seealso \code{\link{prob_association}}, \code{\link{bevimed_m}}, \code{\link{summary.BeviMed}}, \code{\link{bevimed_polytomous}} #' @template paper bevimed <- function( y, G, ploidy=rep(2L, length(y)), prior_prob_association=0.01, prior_prob_dominant=0.5, dominant_args=NULL, recessive_args=NULL, ... ) { if (prior_prob_association > 1 | prior_prob_association < 0) stop("'prior_prob_association' must be between 0 and 1") if (prior_prob_dominant > 1 | prior_prob_dominant < 0) stop("'prior_prob_dominant' must be between 0 and 1") priors <- prior_prob_association * c(dominant=prior_prob_dominant, recessive=1-prior_prob_dominant) bevimed_polytomous( y=y, G=G, ploidy=ploidy, prior_prob_association=priors, variant_sets=list(dominant=seq(length.out=ncol(G)), recessive=seq(length.out=ncol(G))), moi=c("dominant","recessive"), model_specific_args=list(dominant=dominant_args, recessive=recessive_args), ... ) } #' @title Calculate marginal probability of observed case-control status y under model gamma = 0 #' @description Marginal probability calculated exactly by integration. #' @template y #' @param tau0_shape Beta shape hyper-priors for prior on rate of case labels #' @return Log marginal likelihood. #' @export #' @seealso \code{\link{bevimed}}, \code{\link{gamma1_evidence}} gamma0_evidence <- function( y, tau0_shape=c(1, 1) ) { lbeta(sum(y)+tau0_shape[1], length(y)-sum(y)+tau0_shape[2]) - lbeta(tau0_shape[1], tau0_shape[2]) } #' @title Extract evidence for model gamma = 1 #' #' @description Extract evidence from \code{BeviMed_m} object. #' @template x_BeviMed_m #' @return Log marginal likelihood. #' @export #' @seealso \code{\link{gamma1_evidence}}, \code{\link{bevimed_m}} extract_gamma1_evidence <- function(x) { stopifnot(class(x) == "BeviMed_m") sum_ML_over_PP(x[["traces"]][["y_log_lik_t_equals_1"]], x[["parameters"]][["temperatures"]]) } #' @title Calculate evidence under model gamma = 1 #' #' @description Use \code{\link{bevimed_m}} to perform inference under model gamma = 1 and return only the log evidence/integrated likelihood. #' #' @template dots_to_bevimed_m #' @return Log marginal likelihood. #' @export #' @seealso \code{\link{bevimed_m}}, \code{\link{extract_gamma1_evidence}} gamma1_evidence <- function( ... ) { bv <- subset_bevimed_m( return_z_trace=FALSE, return_x_trace=FALSE, ... ) extract_gamma1_evidence(bv) } #' @title Extract expected number of explained cases #' #' @description Extract expected number of cases explained by pathogenic configurations of alleles from \code{BeviMed_m} object. #' #' @template x_BeviMed_m #' @return Numeric value. #' @export #' @seealso \code{\link{expected_explained}}, \code{\link{bevimed_m}} extract_expected_explained <- function(x) { stopifnot(class(x) == "BeviMed_m") if (("x" %in% names(x[["traces"]]))*dim(x[["traces"]][["x"]])[1] == 0) stop("Must make sure to set 'return_x_trace=TRUE' in call to 'bevimed_m' to use this function") mean(apply(x$traces$x[,x$parameters$y,drop=FALSE], 1, sum)) } #' @title Calculate expected number of explained cases #' #' @description Use \code{\link{bevimed_m}} to perform inference under model gamma = 1 and return only the expected number of cases explained by pathogenic allele configurations. #' #' @template dots_to_bevimed_m #' @return Numeric value. #' @export #' @seealso \code{\link{bevimed_m}}, \code{\link{extract_expected_explained}} expected_explained <- function(...) { extract_expected_explained(subset_bevimed_m( return_z_trace=FALSE, return_x_trace=TRUE, ... )) } #' @title Extract expected number of pathogenic variants in cases #' @description Extract expected number of variants involved in cases explained by pathogenic conigurations of alleles from \code{BeviMed_m} object. #' #' @template x_BeviMed_m #' @return Numeric value. #' @export #' @seealso \code{\link{explaining_variants}}, \code{\link{bevimed_m}} extract_explaining_variants <- function(x) { stopifnot(class(x) == "BeviMed_m") if (("z" %in% names(x[["traces"]]))*dim(x[["traces"]][["z"]])[1] == 0) stop("Must make sure to set 'return_z_trace=TRUE' and 'return_x_trace=TRUE' in call to 'bevimed_m' to use this function") G <- x$parameters$G logicalG <- matrix(G > 0, nrow=nrow(G), ncol=ncol(G)) y <- x$parameters$y mean(mapply( SIMPLIFY=TRUE, FUN=function(z, x) sum(apply(logicalG[y & x, z, drop=FALSE], 2, any)), split(x$traces$z[,seq(to=ncol(x$traces$z), length.out=x$parameters$k),drop=FALSE], seq(length.out=nrow(x$traces$z))), split(x$traces$x, seq(length.out=nrow(x$traces$z))) )) } #' @title Calculate expected number of pathogenic variants in cases #' #' @description Use \code{\link{bevimed_m}} to perform inference under model gamma = 1 and return only the expected number of pathogenic variants in cases. #' #' @template dots_to_bevimed_m #' @return Numeric value. #' @export #' @seealso \code{\link{extract_explaining_variants}}, \code{\link{bevimed_m}} explaining_variants <- function(...) { extract_explaining_variants(subset_bevimed_m(return_z_trace=TRUE, return_x_trace=TRUE, ...)) } #' Calculate log Bayes factor between an association model with a given mode of inheritance and model gamma = 0 #' #' @description Compute log Bayes factor of an association model and model gamma = 0. #' #' @template y #' @template dots_to_bevimed_m #' @template tau0_shape #' @return Log Bayes factor. #' @export #' @seealso \code{\link{bevimed_m}}, \code{\link{prob_association_m}} log_BF <- function( y, tau0_shape=c(1, 1), ... ) { gamma1_evidence( y=y, ... ) - gamma0_evidence(y, tau0_shape=tau0_shape) } #' @title Calculate probability of association for one mode of inheritance #' #' @description Equivalent to \code{\link{prob_association}} where the prior probability of one mode of inheritance is 1. This function is faster, as it only calls \code{\link{bevimed_m}} once. #' #' @template y #' @template min_ac #' @template prior_prob_association #' @param ... Other arguments to pass to \code{\link{log_BF}}. #' @return Probability value. #' @export #' @seealso \code{\link{log_BF}}, \code{\link{prob_association}}, \code{\link{bevimed_m}} prob_association_m <- function( y, min_ac=1L, prior_prob_association=0.01, ... ) { bf <- log_BF(y, min_ac=min_ac, ...) num <- prior_prob_association*exp(bf) num/(1-prior_prob_association+num) } #' @title Extract the posterior probability of association #' #' @description Get posterior probability of association as numeric value, or optionally as numeric vector of length two with probabilities broken down by mode of inheritance (by passing \code{by_model=TRUE}), from a \code{BeviMed} object. #' @template x_BeviMed #' @template by_model #' @return Probability values. #' @export #' @seealso \code{\link{prob_association}}, \code{\link{bevimed}} extract_prob_association <- function(x, by_model=FALSE) { stopifnot(class(x) == "BeviMed") priors_by_model <- x[["parameters"]][["prior_prob_association"]] evidence_by_model <- sapply(x[["models"]], extract_gamma1_evidence) g0_ev <- gamma0_evidence(y=x[["parameters"]][["y"]], tau0_shape=x[["parameters"]][["tau0_shape"]]) modal_model_ev <- max(c(evidence_by_model, g0_ev)) numerator <- priors_by_model * exp(evidence_by_model-modal_model_ev) numerator_g0 <- (1-sum(priors_by_model)) * exp(g0_ev-modal_model_ev) denom <- sum(c(numerator, numerator_g0)) probs_by_model <- numerator/denom if (by_model) probs_by_model else sum(probs_by_model) } #' @title Calculate probability of association #' #' @description Calculate probability of an association between case/control label and allele configuration, optionally broken down by mode of inheritance/model. #' @template by_model #' @param ... Arguments to pass to \code{\link{bevimed}}. #' @return Probability of association. #' @export #' @seealso \code{\link{bevimed}}, \code{\link{extract_prob_association}} prob_association <- function( by_model=FALSE, ... ) { bv <- bevimed(..., return_z_trace=FALSE, return_x_trace=FALSE) extract_prob_association(bv, by_model=by_model) } variant_marginals <- function(var_probs_by_model, variant_sets, k) { if (k == 0) numeric(0) else apply(matrix(nrow=k, ncol=length(variant_sets), data=mapply( SIMPLIFY=TRUE, FUN=function(probs, inds) { p <- rep(0, k) p[inds] <- probs p }, var_probs_by_model, variant_sets )), 1, sum) } #' @title Extract variant marginal probabilities of pathogenicity #' #' @description Extract the marginal probability of pathogenicity for individual variants from \code{BeviMed} object, optionally broken down by mode of inheritance/model. #' #' @template x_BeviMed #' @template by_model #' @return A vector of probabilities of pathogenicity for individual variants, or if \code{by_model} is \code{TRUE}, then a matrix of probabilities, with rows corresponding to modes of inheritance and columns to variants. #' @export #' @seealso \code{\link{prob_pathogenic}}, \code{\link{bevimed}} extract_prob_pathogenic <- function(x, by_model=TRUE) { probs <- extract_prob_association(x, by_model=TRUE) var_probs_by_model <- mapply(SIMPLIFY=FALSE, FUN="*", probs, lapply(x$models, extract_conditional_prob_pathogenic)) if (by_model) { var_probs_by_model } else { k <- ncol(x[["parameters"]][["G"]]) variant_marginals(var_probs_by_model, x[["parameters"]][["variant_sets"]], k) } } #' Calculate variant marginal probabilities of pathogencity #' #' Calls \code{\link{bevimed}} and \code{\link{extract_prob_pathogenic}} to obtain marginal probabilities of pathogenicity. #' #' @template by_model #' @param ... Arguments to pass to \code{\link{bevimed}}. #' @return If \code{by_model} is \code{FALSE}, a vector of probabilities of pathogenicity for each variant, otherwise a list of vectors of probabilities of pathogenicity conditional on each compared association model. #' @export #' @seealso \code{\link{extract_prob_pathogenic}}, \code{\link{bevimed}} prob_pathogenic <- function( by_model=FALSE, ... ) { bv <- bevimed(..., return_z_trace=TRUE, return_x_trace=FALSE) extract_prob_pathogenic(bv, by_model=by_model) } t1_z_trace <- function(x) { k <- x[["parameters"]][["k"]] x[["traces"]][["z"]][,k*(length(x[["parameters"]][["temperatures"]])-1L)+seq(length.out=k),drop=FALSE] } #' @title Extract probability of pathogenicity for variant conditional on a given association model #' #' @description Extract the probability of pathogenicity for individual variants from a \code{BeviMed_m} object. #' #' @template x_BeviMed_m #' @return Vector of probabilities of pathogenicity for individual variants. #' @export #' @seealso \code{\link{conditional_prob_pathogenic}}, \code{\link{bevimed_m}} extract_conditional_prob_pathogenic <- function(x) { if (x[["parameters"]][["k"]] == 0) numeric(0) else apply(t1_z_trace(x), 2, mean) } #' Calculate probability of pathogencity for variants conditional on mode of inheritance. #' #' Calls \code{\link{bevimed_m}} and \code{\link{extract_conditional_prob_pathogenic}} to obtain probabilities of pathogenicity. #' @template dots_to_bevimed_m #' @return Probabilities of pathogenicity. #' @export #' @seealso \code{\link{extract_conditional_prob_pathogenic}}, \code{\link{bevimed_m}} conditional_prob_pathogenic <- function( ... ) { bv <- bevimed_m( return_z_trace=TRUE, return_x_trace=FALSE, ... ) extract_conditional_prob_pathogenic(bv) } #' @title Model selection for multiple association models #' @description Apply bevimed to the no association model (gamma = 0) and multiple association models for different sets of variants, for instance, corresponding to different functional consequences. #' @template y #' @template G_matrix #' @template ploidy #' @param variant_sets List of integer vectors corresponding to sets of indices of \code{G}, each of which is to be considered in a model explaining the phenotype, \code{y}. #' @template prior_prob_association #' @template tau0_shape #' @param moi Character vector giving mode of inheritance for each model. #' @param model_specific_args List of named lists of parameters to use in \code{\link{bevimed_m}} applications for specific models. #' @param ... Other arguments to pass to \code{\link{bevimed_m}}. #' @seealso \code{\link{bevimed_m}}, \code{\link{bevimed}} #' @template paper #' @export bevimed_polytomous <- function( y, G, ploidy=rep(2L, length(y)), variant_sets, prior_prob_association=rep(0.01/length(variant_sets), length(variant_sets)), tau0_shape=c(1, 1), moi=rep("dominant", length(variant_sets)), model_specific_args=vector(mode="list", length=length(variant_sets)), ... ) { if (sum(prior_prob_association) > 1 | sum(prior_prob_association) < 0) stop("The sum of 'prior_prob_association' must be between 0 and 1") if (any(prior_prob_association > 1) | any(prior_prob_association < 0)) stop("Each element of 'prior_prob_association' must be between 0 and 1") n_models <- length(variant_sets) if (length(model_specific_args) != n_models) stop("The length of 'variant_sets' must be the same as 'model_specific_args'") if (length(prior_prob_association) != n_models) stop("The length of 'variant_sets' must be the same as 'prior_prob_association'") if (length(moi) != n_models) stop("The length of 'variant_sets' must be the same as 'moi'") if (!all(unlist(use.names=FALSE, variant_sets) %in% seq(length.out=ncol(G)))) stop("All indices given in 'variant_sets' must be between one and the number of variants in 'G'") shared <- list(...) structure( class="BeviMed", list( parameters=list( tau0_shape=tau0_shape, prior_prob_association=setNames(nm=names(variant_sets), prior_prob_association), ploidy=ploidy, y=y, G=G, variant_table=to_var_tab(get_G_args(G)), variant_sets=variant_sets, moi=moi ), models=Map( f=function(var_inds, min_ac, args) { do.call(what=bevimed_m, c(list(y=y, G=G[,var_inds,drop=FALSE], min_ac=min_ac), args, shared)) }, variant_sets, lapply(moi, function(m) if (m=="dominant") { 1L } else { if (length(ploidy) == 0) 2L else ploidy }), model_specific_args ) ) ) }
/scratch/gouwar.j/cran-all/cranData/BeviMed/R/convenience.R
#' @title R interface to BeviMed c++ MCMC procedure #' @description Allows other functions in the package to call the c++ function passing arguments more succinctly and by name. #' @template samples_per_chain #' @param y Logical vector of subject affectedness status. #' @param block_starts Integer vector of k 0-indexed start positions (with respect to \code{cases} and \code{counts}) for contiguous blocks relating to the k variants. #' @param block_ends Integer vector of (exclusive) k 0-indexed end positions. #' @param cases 0 based vector of case indices with respect to y. #' @param counts Vector of variant counts. #' @template min_ac #' @param tau_shape Beta distribution parameterisation of benign variant configuration rate of affection, q. #' @param pi_shape Beta distribution parameterisation of pathogenic variant configuration rate of affection, p. #' @param omega_shape Beta distribution of global rate of pathogenicty of variants in gene given pathogenicity of gene, omega. #' @template temperatures #' @param z0_matrix Matrix of logicals, where the rows are used as an initial zs for the chains. #' @param estimate_omega Logical value determining whether to estimate the parameter omega. #' @param logit_omegas Numeric vector of logit omega values, one value per chain. #' @param logit_omega_proposal_sds Numeric vector of proposal standard deviations for Metropolis-Hastings sampling of logit omega parameter, one value per chain. #' @template variant_weights #' @template estimate_phi #' @param log_phis Numeric vector of log phi values, one value per chain. #' @template log_phi_mean #' @template log_phi_sd #' @param log_phi_proposal_sds Numeric vector of proposal standard deviations for Metropolis-Hastings sampling of log phi parameter, one value per chain. #' @param chain_swaps_per_cycle Number of chain swaps to propose per update cycle. #' @param annealing Logical value determining whether to anneal the chains, e.g. for optimisation. #' @template tandem_variant_updates #' @param comphet_variant_block_starts 0-indexed start positions for contiguous blocks of variants in \code{comphet_variants}. #' @param comphet_variant_block_ends As \code{comphet_variant_block_starts} for (exclusive) stop positions. #' @param comphet_variants Integer vector giving variant numbers (0-based, i.e. between 0 and k-1). Used to pick pairs of variants for tandem updates from. #' @template return_z_trace #' @template return_x_trace #' @template burn #' @param check Logical value indicating whether to perform validation on the arguments before calling the c++ function. #' @return Object of class \code{BeviMed_raw}, containing the output of the MCMC sampling. #' @importFrom Rcpp evalCpp #' @useDynLib BeviMed call_cpp <- function( samples_per_chain, y, block_starts, block_ends, cases, counts, min_ac, tau_shape, pi_shape, omega_shape, temperatures, z0_matrix, estimate_omega, logit_omegas, logit_omega_proposal_sds, variant_weights, estimate_phi, log_phis, log_phi_mean, log_phi_sd, log_phi_proposal_sds, chain_swaps_per_cycle, annealing, tandem_variant_updates, comphet_variant_block_starts, comphet_variant_block_ends, comphet_variants, return_z_trace, return_x_trace, burn=0, check=TRUE ) { if (check) { stopifnot(length(omega_shape) == 2 & min(omega_shape) > 0) stopifnot(length(tau_shape) == 2 & min(tau_shape) > 0) stopifnot(length(pi_shape) == 2 & min(pi_shape) > 0) stopifnot(is.matrix(z0_matrix)) stopifnot(ncol(z0_matrix) == length(variant_weights)) stopifnot(length(temperatures) == nrow(z0_matrix)) stopifnot(length(temperatures) == nrow(logit_omegas)) stopifnot(length(temperatures) == nrow(log_phis)) stopifnot(length(temperatures) == length(logit_omega_proposal_sds)) stopifnot(length(temperatures) == length(log_phi_proposal_sds)) stopifnot(estimate_phi | all(log_phis == 0)) stopifnot(length(block_ends) == length(block_starts)) stopifnot(block_ends[length(block_ends)] == length(cases)) stopifnot(length(cases) == length(counts)) if (tandem_variant_updates > 0 & length(unique(comphet_variants)) < 2) stop("Must have more than 1 variant to select from if making tandem updates") } raw <- bevimed_mc( samples_per_chain, y, block_starts, block_ends, cases, counts, if (length(min_ac) == length(y)) min_ac else rep(min_ac, length(y)), tau_shape[1], tau_shape[2], pi_shape[1], pi_shape[2], omega_shape[1], omega_shape[2], z0_matrix, estimate_omega, logit_omegas, logit_omega_proposal_sds, variant_weights, estimate_phi, log_phis, log_phi_mean, log_phi_sd, log_phi_proposal_sds, temperatures, chain_swaps_per_cycle, annealing, tandem_variant_updates, comphet_variant_block_starts, comphet_variant_block_ends, comphet_variants, return_z_trace, return_x_trace ) if (burn > 0) { raw$traces <- lapply( raw$traces, function(x) x[-seq(length.out=burn),,drop=FALSE] ) raw$swaps <- lapply( raw$swaps, function(x) x[-seq(length.out=chain_swaps_per_cycle * burn)] ) } structure( class="BeviMed_raw", raw ) } #' @title Calculate marginal likelihood from power posteriors output #' @description Calculate the Marginal Likelihood by summation over power posterior likelihood exptectances #' @template y_log_lik_t_equals_1_traces #' @param temperatures Numeric vector of temperatures used to produce \code{y_log_lik_t_equals_1_traces}. #' @return Numeric value of estimated log marginal likelihood. sum_ML_over_PP <- function(y_log_lik_t_equals_1_traces, temperatures) { sum(mapply(FUN=function(y_lik, t_diff) { log(mean(exp(t_diff*y_lik-max(t_diff*y_lik))))+max(t_diff*y_lik) }, split(t(y_log_lik_t_equals_1_traces)[-length(temperatures),], seq(length.out=length(temperatures)-1)), diff(temperatures))) } #' @title Concatenate objects of class \code{BeviMed_raw} #' @description This function could be used to stitch together consecutive chains to create one larger sampled set of states from the MCMC procedure. #' @param objects \code{list} of \code{BeviMed_raw} objects. #' @return \code{BeviMed} object. #' @importFrom stats setNames stack_BeviMeds <- function(objects) { stopifnot(all(sapply(objects, class) == "BeviMed_raw")) structure( list( traces=do.call(what=Map, c(list(f=rbind), lapply(objects, "[[", "traces"))), swaps=do.call(what=c, c(list(f=rbind), lapply(objects, "[[", "swaps"))), final=objects[[length(objects)]]$final ), class="BeviMed_raw" ) } #' @title Estimate confidence interval for estimated marginal likelihood #' @description Central limit theorem not applicable so use simulation to estimate confidence interval for evidence. #' @template temperatures #' @template y_log_lik_t_equals_1_traces #' @template confidence #' @template simulations #' @return Confidence interval as numeric vector of length 2. #' @importFrom stats rt quantile CI_gamma1_evidence <- function( temperatures, y_log_lik_t_equals_1_traces, confidence=0.95, simulations=1000 ) { #we want all blocks to have the same length a <- b <- as.integer(sqrt(nrow(y_log_lik_t_equals_1_traces))) if (a < 2) stop("Longer sample-block lengths are required to make confidence interval estimation") #with columns for batches and rows for temperatures... batch_means <- simplify2array(tapply(X=(nrow(y_log_lik_t_equals_1_traces)-a*b+1):nrow(y_log_lik_t_equals_1_traces), INDEX=gl(n=a, k=b), FUN=function(rows) apply(MARGIN=1, FUN=mean, X=exp(t(y_log_lik_t_equals_1_traces[rows,-ncol(y_log_lik_t_equals_1_traces),drop=FALSE]) * diff(temperatures))))) overall_mean <- apply(MARGIN=1, FUN=mean, X=exp(t(y_log_lik_t_equals_1_traces[,-ncol(y_log_lik_t_equals_1_traces)]) * diff(temperatures))) estimate_var_by_temp <- mapply(FUN=function(overall, batch) b / (a-1) * sum((batch-overall)^2), overall_mean, split(batch_means, seq(length.out=nrow(batch_means)))) samples <- mapply(SIMPLIFY=TRUE, FUN=function(est_mean, est_var) est_mean + (rt(df=a-1, n=simulations) * sqrt(est_var) / sqrt(a)), overall_mean, estimate_var_by_temp) simulated_MLs <- apply(samples, 1, function(expectance_at_temp_i) sum(log(ifelse(expectance_at_temp_i < 0, 0, ifelse(expectance_at_temp_i > 1, 1, expectance_at_temp_i))))) quantile(probs=c((1-confidence)/2,1-(1-confidence)/2), simulated_MLs) } #' @title Remove variants with no data for pathogenicity #' @description Subset an allele count matrix given a minimum allele count threshold for pathogenicity per individual so that only variants for which data relevant to pathogencity are retained. This is useful to apply before running \code{\link{bevimed}} as it reduces the size of the parameter space used in the inference. #' @template G_matrix #' @template min_ac #' @param return_variants Logical value determining whether to return an integer vector of indices of retained variants or the subsetted allele count matrix #' @export subset_variants <- function(G, min_ac=1L, return_variants=FALSE) { vars <- which(apply(G[apply(G, 1, sum) >= min_ac,,drop=FALSE], 2, sum) > 0L) if (return_variants) vars else G[,vars,drop=FALSE] } #' @title Apply the MCMC algorithm in blocks until conditions are met #' @description Sample blocks of a given size until either the estimated log marginal likelihood falls within a given confidence interval, there is sufficient confidence that the evidence model gamma = 1 is at most a certain quantity, or a certain number of blocks have been sampled. #' @template y #' @param blocks_remaining Maximum number of blocks left before termination. #' @param start_zs Initial (logical) z-matrix. #' @param start_logit_omegas Initial values of logit_omega (numeric vector - one value per chain). #' @param start_log_phis Initial values of log_phi (numeric vector - one value per chain). #' @template temperatures #' @param tolerance Maximum width for confidence_interval of log marginal likelihood to allow before stopping the chain. #' @template confidence #' @template simulations #' @param log_evidence_threshold Numeric value used to determine whether to stop the sampling procedure after successive blocks. If we are confident (to the level of \code{confidence}) that the evidence for model gamma = 1 is under this value, sampling is halted. #' @template y_log_lik_t_equals_1_traces #' @param full_block_traces List of outputs of calls to MCMC routine. #' @template verbose #' @param ... Other arguments passed to \code{\link{call_cpp}} #' @return An object of class \code{BeviMed}. stop_chain <- function( y, blocks_remaining, start_zs, start_logit_omegas, start_log_phis, temperatures, tolerance=1, confidence=0.95, simulations=1000, log_evidence_threshold=-Inf, y_log_lik_t_equals_1_traces=matrix(ncol=length(temperatures),nrow=0), full_block_traces=list(), verbose=FALSE, ... ) { if (verbose) cat("Sampling up to ", blocks_remaining, " more blocks to get marginal likelihood within tolerance of ", tolerance, "\n", sep="") mc <- call_cpp( z0_matrix=start_zs, logit_omegas=start_logit_omegas, log_phis=start_log_phis, temperatures=temperatures, y=y, ... ) y_ll <- rbind( y_log_lik_t_equals_1_traces, mc[["traces"]][["y_log_lik_t_equals_1"]] ) confidence_interval <- CI_gamma1_evidence(temperatures, y_ll, confidence=confidence, simulations=simulations) if (verbose) { cat(paste0(round(confidence * 100), "% confidence interval, width = ", round(digits=2, diff(confidence_interval)), ":\n")) print(round(digits=2, confidence_interval)) } if (diff(confidence_interval) < tolerance | confidence_interval[2] < log_evidence_threshold | blocks_remaining <= 1) { stack_BeviMeds(c(full_block_traces, list(mc))) } else { stop_chain( y=y, blocks_remaining=blocks_remaining - 1, tolerance=tolerance, confidence=confidence, start_zs=mc[["final"]][["z"]], start_logit_omegas=mc[["final"]][["logit_omega"]], start_log_phis=mc[["final"]][["log_phi"]], temperatures=temperatures, log_evidence_threshold=log_evidence_threshold, y_log_lik_t_equals_1_traces=y_ll, full_block_traces=c(full_block_traces, list(mc)), verbose=verbose, ... ) } } #' @title Tune proposal standard deviation for MH sampled parameters #' @description Tune the proposal standard deviations for the Metropolis-Hastings updates of either phi or omega #' @param tune_for Character vector of length one, naming which variable to tune the proposal SDs for: either \code{"logit_omega"} or \code{"log_phi"}. #' @param initial_proposal_sds Numeric vector with the initial values of the proposal SDs. #' @param target_acceptance_range Numeric vector of length 2 where the first element is the lower bound for the acceptance interval and the second is the upper bound. #' @param other_param_proposal_sd The proposal SD to use for \code{log_phi} when tuning \code{logit_omega} or vice versa. #' @param max_tuning_cycles Maximum number of tuning cycles to perform before returning the proposal SDs as they are. #' @param initial_rate Initial rate at which to mutate the proposal SDs. #' @param rate_decay Geometric rate of decay for size of proposal SD mutation with each successive tuning cycle. #' @template verbose #' @param ... Other arguments to be passed to \code{\link{call_cpp}}. #' @return Numeric vector of proposal SDs for the different temperature chains. tune_proposal_sds <- function(tune_for=c("logit_omega"), initial_proposal_sds, target_acceptance_range=c(0.3,0.7), other_param_proposal_sd=0.7, max_tuning_cycles=10, initial_rate=1, rate_decay=1.2, verbose=FALSE, ...) { stopifnot(all(tune_for %in% c("logit_omega", "log_phi"))) if (verbose) { cat("Tuning proposal standard deviations for ", tune_for[1], " targeting acceptance rate range (", target_acceptance_range[1], ",", target_acceptance_range[2], ")\n", sep="") } current_proposal_sds <- initial_proposal_sds acceptances <- rep(-Inf, length(current_proposal_sds)) cycle <- 0 while ( cycle <= max_tuning_cycles & ( any(acceptances < target_acceptance_range[1]) |any(acceptances > target_acceptance_range[2])) ) { if (verbose) cat("Tuning cycle ", cycle, "\n\tTest proposal SDs:\n\t\t", paste0(collapse=" : ", round(digits=2, current_proposal_sds)), "\n", sep="") out <- if (tune_for[1] == "logit_omega") call_cpp(logit_omega_proposal_sds=current_proposal_sds, log_phi_proposal_sds=rep(other_param_proposal_sd, length(current_proposal_sds)), ...) else call_cpp(log_phi_proposal_sds=current_proposal_sds, logit_omega_proposal_sds=rep(other_param_proposal_sd, length(current_proposal_sds)), ...) acceptances <- apply(out[["traces"]][[tune_for[1]]], 2, function(var_vals) mean(var_vals[-length(var_vals)] != var_vals[-1])) current_proposal_sds <- mapply(SIMPLIFY=TRUE, FUN=function(prop_sd, acc_rate) if (acc_rate < target_acceptance_range[1] | acc_rate > target_acceptance_range[2]) { match.fun(if (acc_rate < target_acceptance_range[1]) "/" else "*")(prop_sd, (1 + initial_rate * rate_decay ^ (-cycle+1))) } else { prop_sd }, current_proposal_sds, acceptances) if (verbose) cat("\tAcceptance rates:\n\t\t", paste0(collapse=" : ", round(digits=2, acceptances)), "\n", sep="") cycle <- cycle + 1 } if (verbose) cat("Terminating\n") current_proposal_sds } #' @title Tune temperatures #' @description Tune temperatures using interval bisection to minimimise Kullback-Liebler divergence between adjacent power posteriors #' @param number_of_temperatures Integer value giving number of tuned temperatures (including 0 and 1) to obtain. #' @param return_temperatures Logical value determining whether to return just the numeric vector of tuned temperatures or to return the \code{BeviMed_m}-classed object containing the output of the MCMC sampling. #' @param ... Other arguments to pass to \code{call_cpp}. #' @return If \code{return_temperatures == TRUE}, a numeric vector of tuned temperatures, otherwise an object of class \code{BeviMed_m}. #' @importFrom stats var tune_temperatures <- function( number_of_temperatures, return_temperatures=FALSE, ... ) { temperatures <- 0:1 chains <- lapply(temperatures, function(t) call_cpp( temperatures=t, ... )) E <- sapply(chains, function(ch) mean(ch[["traces"]][["y_log_lik_t_equals_1"]])) V <- sapply(chains, function(ch) var(ch[["traces"]][["y_log_lik_t_equals_1"]])) while (length(temperatures) <= number_of_temperatures) { areas <- diff(temperatures) * diff(E) largest <- which.max(areas) t_pair <- c(largest, largest+1) t_intersect <- mean(temperatures[t_pair]) temperatures <- c(temperatures[seq(length.out=largest)], t_intersect, temperatures[(largest+1):length(temperatures)]) chains <- c(chains[seq(length.out=largest)], list(call_cpp(temperatures=t_intersect, ...)), chains[(largest+1):length(chains)]) E <- sapply(chains, function(ch) mean(ch[["traces"]][["y_log_lik_t_equals_1"]])) V <- sapply(chains, function(ch) var(ch[["traces"]][["y_log_lik_t_equals_1"]])) } if (return_temperatures) temperatures else list(chains=chains, E=E, V=V, temperatures=temperatures) } #' @importFrom methods is slot get_G_args <- function(G) { if (is.matrix(G)) { counts <- as.integer(G) variants <- rep(seq(length.out=ncol(G)), each=nrow(G)) cases <- rep(seq(length.out=nrow(G)), times=ncol(G)) block_ends <- unname(cumsum(lapply(split(counts, factor(variants, levels=seq(length.out=ncol(G)))), function(cnts) sum(cnts > 0)))) block_starts <- if (length(block_ends) > 0) c(0, block_ends[-length(block_ends)]) else integer(0) list( cases=cases[counts > 0], counts=counts[counts > 0], block_ends=as.integer(block_ends), block_starts=as.integer(block_starts) ) } else if (is(G, "sparseMatrix")) { p <- slot(G, "p") G_i <- slot(G, "i") return(list( cases=rep(1L, length(G_i))+G_i, counts=as.integer(slot(G, "x")), block_ends=p[-1L], block_starts=p[-length(p)] )) } else { stop("'G' must be a matrix!") } } to_var_tab <- function(G_args) { data.frame( variant=unlist(mapply(SIMPLIFY=FALSE, FUN=rep, seq(length.out=length(G_args$block_starts)), G_args$block_ends-G_args$block_starts)), case=G_args$cases, count=G_args$counts ) } #' @title Perform inference under model gamma = 1 conditional on mode of inheritance #' #' @description Sample from posterior distribution of parameters under model gamma = 1 and conditional on mode of inheritance, set via the \code{min_ac} argument. #' @template y #' @template G_matrix #' @template min_ac #' @template tau_shape #' @template pi_shape #' @template omega_shape #' @template samples_per_chain #' @param stop_early Logical value determining whether to attempt to stop the sampling as soon as certain conditions are met (i.e. either the estimated marginal log likelihood lies within a certain confidence interval, or we are sufficiently confidence that the log Bayes factor against of model gamma = 1 over model gamma = 0 is sufficiently low). #' @param blocks Maximum number of blocks of \code{samples_per_chain} samples to draw before either the confidence interval for the marginal likelihood under the model gamma = 1 is sufficiently small or terminating the sampling. This parameter is ignored if unless \code{stop_early==TRUE}. #' @template burn #' @template temperatures #' @param tune_temps Integer value - if greater than 0, the \code{temperatures} argument is ignored, and instead \code{tune_temps} tuned temperatures are used instead. #' @template return_z_trace #' @template return_x_trace #' @param raw_only Logical value determining whether to return raw output of MCMC routine only. #' @param swaps Number of swaps between adjacent tempered chains to perform per update cycle. #' @param optimise_z0 Logical value determining whether to use a simulated annealing optimisation run to tune the initial values of \code{z}. #' @param tune_omega_and_phi_proposal_sd Logical value determining whether the proposal SDs of the Metropolis-Hastings estimated parameters should be tuned for a target acceptance range. #' @param tune_block_size Integer value giving number of samples to draw when estimatating the acceptance rate of the omega/phi proposals. #' @template variant_weights #' @param standardise_weights Boolean value determining whether weights should be standardised by subtracting their mean and dividing by their sample standard deviation. If \code{FALSE}, weights are untransformed. #' @template log_phi_mean #' @param log_phi_sd SD for normal prior on scaling factor phi. Setting to 0 causes the weights to be fixed and not estimated. #' @template tandem_variant_updates #' @param ... Other arguments to be passed to \code{\link{stop_chain}} and/or \code{\link{tune_proposal_sds}}. #' @return An object of class \code{BeviMed_m}. #' @details A \code{BeviMed_m} object is a list containing elements: #' \itemize{ #' \item `parameters': a list containing arguments used in the function call, including the adjusted weights used in the inference in the `c_weights' slot, #' \item `traces': a list of traces of model parameters from all MCMC chains for each parameter. Parameters sampled are z, omega, phi and x (the indicator of having a pathogenic configuration of alleles). The list of traces is named by parameter name, and each is a matrix where the rows correspond to samples. $z has k columns for each temperature, with the samples from the true posterior (i.e. with temperature equal to 1) of z corresponding to the final k columns. Likewise, the true posterior is given by the final column for the traces of phi and omega. The trace of x is only given for temperature equal to 1 to reduce memory usage. #' \item `final': a list named by model parameter giving the final sample of each, #' \item `swaps': a list with an element named `accept' which is a logical vector whose ith element indicates whether the ith swap between adjacent tempered chains was accepted or not, and an element named `at_temperature`, an integer vector whose ith element indicates which pair of consecutive temperatures was the ith to be proposed for swapping (giving the lowest one). #' } #' @export #' @importFrom stats rnorm runif rbeta sd #' @importFrom methods is #' @importFrom Matrix rowSums #' @seealso \code{\link{bevimed_m}}, \code{\link{prob_association_m}} #' @template paper bevimed_m <- function( y, G, min_ac=1L, tau_shape=c(1, 1), pi_shape=c(6, 1), omega_shape=if (max(min_ac) == 1L) c(2, 8) else c(2, 2), samples_per_chain=1000, stop_early=FALSE, blocks=5, burn=as.integer(samples_per_chain/10), temperatures=(0:6/6)^2, tune_temps=0, return_z_trace=TRUE, return_x_trace=TRUE, raw_only=FALSE, swaps=as.integer(length(temperatures)/2), optimise_z0=FALSE, tune_omega_and_phi_proposal_sd=FALSE, tune_block_size=100, variant_weights=NULL, standardise_weights=TRUE, log_phi_mean=-0.15, log_phi_sd=sqrt(0.3), tandem_variant_updates=if (max(min_ac) == 1) 0 else min(sum(y), ncol(G)), ... ) { stopifnot(is.matrix(G)||is(G, "sparseMatrix")) stopifnot(nrow(G)==length(y)) stopifnot(!is.matrix(G)||is.numeric(G)) stopifnot(is.logical(y)) stopifnot(min_ac > 0) stopifnot(identical(as.numeric(range(temperatures)), as.numeric(c(0, 1)))) estimate_phi <- !((log_phi_sd == 0) | is.null(variant_weights)) c_weights <- if (is.null(variant_weights)) { rep(0, ncol(G)) } else { stopifnot(is.numeric(variant_weights)) stopifnot(length(variant_weights) == ncol(G)) if (standardise_weights) { if (length(variant_weights) == 1L | sd(variant_weights) == 0) rep(0, ncol(G)) else (variant_weights-mean(variant_weights))/sd(variant_weights) } else { variant_weights } } G_args <- get_G_args(G) G_logical <- G > 0 comphet_cases <- rowSums(G_logical) > 1L comphet_variants <- lapply(split(as.matrix(G_logical[comphet_cases,,drop=FALSE]), seq(length.out=sum(comphet_cases))), which) comphet_block_ends <- unname(cumsum(lapply(comphet_variants, length))) comphet_block_starts <- if (length(comphet_block_ends) > 0) c(0, comphet_block_ends[-length(comphet_block_ends)]) else integer(0) adjusted_tvu <- if (sum(comphet_cases) > 0) tandem_variant_updates else 0 initial_log_phi_proposal_sd <- 0.5 initial_logit_omega_proposal_sd <- 1 estimate_omega <- !is.null(variant_weights) reused_arguments <- list( y=y, block_starts=G_args$block_starts, block_ends=G_args$block_ends, cases=G_args$cases-1L, counts=G_args$counts, min_ac=min_ac, tau_shape=tau_shape, pi_shape=pi_shape, omega_shape=omega_shape, estimate_omega=estimate_omega, variant_weights=c_weights, estimate_phi=estimate_phi, log_phi_mean=log_phi_mean, log_phi_sd=log_phi_sd, chain_swaps_per_cycle=swaps, tandem_variant_updates=adjusted_tvu, comphet_variant_block_starts=comphet_block_starts, comphet_variant_block_ends=comphet_block_ends, comphet_variants=unlist(use.names=FALSE, comphet_variants)-1 ) if (tune_temps > 0) { temperatures <- do.call( what=tune_temperatures, c( reused_arguments, list( samples_per_chain=samples_per_chain, number_of_temperatures=tune_temps, return_temperatures=TRUE, z0_matrix=matrix(runif(ncol(G)) < omega_shape[1]/sum(omega_shape), nrow=1, ncol=ncol(G)), logit_omegas=local({ w <- rbeta(n=1, shape1=omega_shape[1], shape2=omega_shape[2]); log(w)-log(1-w) }), logit_omega_proposal_sds=rep(initial_logit_omega_proposal_sd, 1), log_phis=if (estimate_phi) rnorm(n=1, mean=log_phi_mean, sd=log_phi_sd) else rep(0, 1), log_phi_proposal_sds=rep(initial_log_phi_proposal_sd, 1), annealing=FALSE, return_z_trace=FALSE, return_x_trace=FALSE ) ) ) } initial_z <- if (optimise_z0) { do.call(what=call_cpp, c( reused_arguments, list( samples_per_chain=samples_per_chain, z0_matrix=matrix(runif(ncol(G)*length(temperatures)) < omega_shape[1]/sum(omega_shape), nrow=length(temperatures), ncol=ncol(G)), logit_omegas=local({ w <- rbeta(n=temperatures, shape1=omega_shape[1], shape2=omega_shape[2]); log(w)-log(1-w) }), logit_omega_proposal_sds=rep(initial_logit_omega_proposal_sd, length(temperatures)), log_phis=if (estimate_phi) rnorm(n=length(temperatures), mean=log_phi_mean, sd=log_phi_sd) else rep(0, length(temperatures)), log_phi_proposal_sds=rep(initial_log_phi_proposal_sd, length(temperatures)), temperatures=rep(1, length(temperatures)), chain_swaps_per_cycle=swaps, annealing=TRUE, return_z_trace=FALSE, return_x_trace=FALSE ) ))[["final"]][["z"]] } else { matrix(runif(ncol(G) * length(temperatures)) < omega_shape[1]/sum(omega_shape), nrow=length(temperatures), ncol=ncol(G)) } proposal_sds <- lapply( setNames(nm=c("logit_omega", "log_phi")), FUN=if (tune_omega_and_phi_proposal_sd & (estimate_phi | estimate_omega)) { function(tune_for) do.call(what=tune_proposal_sds, c( reused_arguments, list( tune_for=tune_for, initial_proposal_sds=rep(if (tune_for == "log_phi") initial_log_phi_proposal_sd else initial_logit_omega_proposal_sd, length(temperatures)), samples_per_chain=tune_block_size, burn=0, z0_matrix=initial_z, logit_omegas=local({ w <- rbeta(n=length(temperatures), shape1=omega_shape[1], shape2=omega_shape[2]); log(w)-log(1-w) }), log_phis=if (estimate_phi) rnorm(n=length(temperatures), mean=log_phi_mean, sd=log_phi_sd) else rep(0, length(temperatures)), temperatures=temperatures, annealing=FALSE, return_z_trace=FALSE, return_x_trace=FALSE ), list(...)[intersect(names(list(...)), names(formals(tune_proposal_sds)))] )) } else { function(tune_for) rep(if (tune_for == "log_phi") initial_log_phi_proposal_sd else initial_logit_omega_proposal_sd, length(temperatures)) }) result <- if (stop_early) { burn <- do.call(what=call_cpp, c( reused_arguments, list( samples_per_chain=samples_per_chain, burn=0, z0_matrix=initial_z, logit_omegas=local({ w <- rbeta(n=length(temperatures), shape1=omega_shape[1], shape2=omega_shape[2]); log(w)-log(1-w) }), logit_omega_proposal_sds=proposal_sds[["logit_omega"]], log_phis=if (estimate_phi) rnorm(n=length(temperatures), mean=log_phi_mean, sd=log_phi_sd) else rep(0, length(temperatures)), log_phi_proposal_sds=proposal_sds[["log_phi"]], temperatures=temperatures, annealing=FALSE, return_z_trace=FALSE, return_x_trace=FALSE ) )) do.call(what=stop_chain, c( reused_arguments, list( samples_per_chain=samples_per_chain, y_log_lik_t_equals_1=matrix(ncol=length(temperatures),nrow=0), burn=0, start_zs=burn[["final"]][["z"]], start_logit_omegas=burn[["final"]][["logit_omega"]], start_log_phis=burn[["final"]][["log_phi"]], temperatures=temperatures, blocks_remaining=max(1, blocks), logit_omega_proposal_sds=proposal_sds[["logit_omega"]], log_phi_proposal_sds=proposal_sds[["log_phi"]], annealing=FALSE, return_z_trace=return_z_trace, return_x_trace=return_x_trace ), list(...)[intersect(names(list(...)), names(formals(stop_chain)))] )) } else { do.call( what=call_cpp, c( reused_arguments, list( samples_per_chain=samples_per_chain, burn=burn, z0_matrix=initial_z, logit_omegas=local({ w <- rbeta(n=length(temperatures), shape1=omega_shape[1], shape2=omega_shape[2]); log(w)-log(1-w) }), logit_omega_proposal_sds=proposal_sds[["logit_omega"]], log_phis=if (estimate_phi) rnorm(n=length(temperatures), mean=log_phi_mean, sd=log_phi_sd) else rep(0, length(temperatures)), log_phi_proposal_sds=proposal_sds[["log_phi"]], temperatures=temperatures, annealing=FALSE, return_z_trace=return_z_trace, return_x_trace=return_x_trace ) ) ) } if (raw_only) result else structure( class="BeviMed_m", c( result, list(parameters=list( omega_shape=omega_shape, pi_shape=pi_shape, tau_shape=tau_shape, variant_weights=variant_weights, temperatures=temperatures, estimate_phi=estimate_phi, estimate_omega=estimate_omega, y=y, min_ac=min_ac, variant_table=to_var_tab(G_args), c_weights=c_weights, G=G, N=length(y), k=ncol(G) )) ) ) }
/scratch/gouwar.j/cran-all/cranData/BeviMed/R/functions.R
#' Summarise a \code{BeviMed_m} object #' #' Create a summary of inference conditional on mode of inheritance. #' #' @param object Object of class \code{BeviMed_m}. See function \code{\link{bevimed_m}}. #' @template confidence #' @template simulations #' @param ... Unused arguments. #' @details Returns a \code{BeviMed_m_summary} object, which is a list containing elements: #' \itemize{ #' \item `gamma1_evidence': the log evidence under model gamma = 1, #' \item `gamma1_evidence_confidence_interval': a confidence interval for the log evidence under model gamma = 1, #' \item `conditional_prob_pathogenic': vector of marginal probabilities of pathogenicity for individual variants, #' \item `expected_explained': the expected number of cases with a pathogenic configuration of alleles, #' \item `explaining_variants': the expected number of variants present for which cases harbour a rare allele, #' \item `number_of_posterior_samples': the number of samples from the posterior distribution of the model parameters which upon which the summary is based, #' \item `omega_estimated': logical value indicating whether the parameter omega was estimated, #' \item `omega': the posterior mean of omega, #' \item `omega_acceptance_rate': if omega was estimated, the rate of acceptance of proposed omega values in the Metropolis-Hastings sampling routine, #' \item `phi_estimated': logical value indicating whether the parameter phi was estimated, #' \item `phi': the posterior mean of phi, #' \item `phi_acceptance_rate': if phi was estimated, the rate of acceptance of proposed phi values in the Metropolis-Hastings sampling routine, #' \item `N`: number of samples in the analysis, #' \item `k`: number of variants in the analysis, #' \item `variant_counts': list of counts of each variant for cases and controls, #' \item `temperatures': numeric vector of temperatures used as temperatures for tempered MCMC chains #' } #' @return Object of class \code{BeviMed_m_summary}. #' @method summary BeviMed_m #' @export #' @seealso \code{\link{summary.BeviMed}} summary.BeviMed_m <- function(object, confidence=0.95, simulations=1000, ...) { vt <- object[["parameters"]][["variant_table"]] y <- object[["parameters"]][["y"]] k <- object[["parameters"]][["k"]] variant_counts <- lapply(setNames(nm=c(F,T)), function(y_is) as.integer(table(factor(vt$variant[y[vt$case]==y_is], levels=seq(length.out=k))))) gamma1_evidence <- extract_gamma1_evidence(object) temps <- object[["parameters"]][["temperatures"]] num_temps <- length(temps) phi_estimated <- object[["parameters"]][["estimate_phi"]] omega_estimated <- object[["parameters"]][["estimate_omega"]] has_z <- dim(object[["traces"]][["z"]])[1] > 0 has_x <- dim(object[["traces"]][["x"]])[1] > 0 structure(list( gamma1_evidence=gamma1_evidence, gamma1_evidence_confidence_interval=CI_gamma1_evidence( temperatures=temps, y_log_lik_t_equals_1_traces=object[["traces"]][["y_log_lik_t_equals_1"]], confidence=confidence, simulations=simulations ), conditional_prob_pathogenic=if (has_z) setNames(nm=colnames(object[["parameters"]][["G"]]), extract_conditional_prob_pathogenic(object)), expected_explained=if (has_x) extract_expected_explained(object) else NULL, explaining_variants=if (has_z & has_x) extract_explaining_variants(object) else NULL, number_of_posterior_samples=nrow(object[["traces"]][["y_log_lik_t_equals_1"]]), omega_estimated=omega_estimated, phi_estimated=phi_estimated, phi=if (phi_estimated) mean(exp(object[["traces"]][["log_phi"]][,num_temps])) else NA, omega=if (omega_estimated) { mean(1-1/(1+exp(object[["traces"]][["logit_omega"]][,num_temps]))) } else { if (has_z & all(object[["parameters"]][["c_weights"]] == 0)) { sumZ <- apply(t1_z_trace(object),1,sum) (object[["parameters"]][["omega_shape"]][1] + mean(sumZ))/sum(c(k,object[["parameters"]][["omega_shape"]])) } else { NA } }, phi_acceptance_rate=if (phi_estimated) apply(object[["traces"]][["log_phi"]], 2, function(log_phis) mean(log_phis[-length(log_phis)] != log_phis[-1])) else NA, omega_acceptance_rate=if (omega_estimated) apply(object[["traces"]][["logit_omega"]], 2, function(logit_omegas) mean(logit_omegas[-length(logit_omegas)] != logit_omegas[-1])) else NA, N=length(object[["parameters"]][["y"]]), k=object[["parameters"]][["k"]], variant_counts=variant_counts, temperatures=temps ), class="BeviMed_m_summary") } #' Summarise a \code{BeviMed} object #' #' Create a summary of inference over model gamma = 0 and association models. #' #' @param object Object of class \code{BeviMed}. #' @param ... Arguments passed to \code{summary.BeviMed_m}. #' @details Returns a \code{BeviMed_summary} object, which is a list containing elements: #' \itemize{ #' \item `prob_association`: the probability of association under each association model, #' \item `prior_prob_association`: the prior probability of association for each association model, #' \item `gamma0_evidence': the log evidence under model gamma = 0, #' \item `models': a list of summaries of model conditional inferences, i.e. objects of class \code{BeviMed_m_summary}. See \code{\link{summary.BeviMed_m}} for more details. #' } #' @return Object of class \code{BeviMed_summary}. #' @method summary BeviMed #' @seealso \code{\link{summary.BeviMed_m}} #' @export summary.BeviMed <- function(object, ...) { structure( class="BeviMed_summary", c( object[["parameters"]][c( "prior_prob_association", "variant_sets", "moi" )], list( gamma0_evidence=gamma0_evidence(y=object[["parameters"]][["y"]], tau0_shape=object[["parameters"]][["tau0_shape"]]), prob_association=extract_prob_association(object, by_model=TRUE), models=lapply(object[["models"]], summary.BeviMed_m, ...), N=length(object[["parameters"]][["y"]]), k=ncol(object[["parameters"]][["G"]]), variant_names=colnames(object[["parameters"]][["G"]]) ) ) ) } #' Print readable summary of \code{BeviMed_summary} object. #' #' @template print_description #' @param x \code{BeviMed_summary} object. #' @param print_prob_pathogenic Logical value indicating whether to print list of marginal probabilities of \code{z_j = 1} for all variants \code{j} under each mode of inheritance. #' @param ... Unused arguments #' @return Prints a summary #' @method print BeviMed_summary #' @export print.BeviMed_summary <- function(x, print_prob_pathogenic=TRUE, ...) { stopifnot(class(x) == "BeviMed_summary") dashed <- paste0(rep("-", getOption("width")), collapse="") cat(dashed, "\n") cat("Posterior probability of association: \n\t", round(sum(x[["prob_association"]]), digits=3), " [prior: ", round(sum(x[["prior_prob_association"]]), digits=3), "]\n", sep="") cat(dashed, "\n") model_names <- if (!is.null(names(x[["prob_association"]]))) names(x[["prob_association"]]) else seq(length.out=length(x[["prob_association"]])) summary_mat <- data.frame( check.names=FALSE, stringsAsFactors=FALSE, Model=model_names, `MOI`=substr(x[["moi"]], 1, 3), `Prior`=x[["prior_prob_association"]]/sum(x[["prior_prob_association"]]), `Post`=x[["prob_association"]]/sum(x[["prob_association"]]), `Cases`=sapply(x[["models"]], function(m) { ee <- "expected_explained"; if (ee %in% names(m)) { if (is.null(m[[ee]])) NA else m[[ee]] } else { NA } }), `Variants`=sapply(x[["models"]], function(m) { ee <- "explaining_variants"; if (ee %in% names(m)) { if (is.null(m[[ee]])) NA else m[[ee]] } else { NA } }) )[order(x[["prob_association"]], decreasing=TRUE),] print(row.names=FALSE, digits=3, summary_mat) cat("\n") cat("MOI: mode of inheritance, dominant (dom) or recessive (rec)\n") cat("Prior: prior probability of model given association\n") cat("Post: posterior probability of model given association\n") cat("Cases: posterior expected number of cases explained\n") cat("Variants: posterior expected number of variants involved in explained cases\n") cat(dashed, "\n") if (print_prob_pathogenic) { if (x[["k"]] > 0) { cat("Probabilities of pathogenicity for individual variants given association\n\n") patho <- variant_marginals(Map(f="*", lapply(x[["models"]], "[[", "conditional_prob_pathogenic"), x[["prob_association"]]), x[["variant_sets"]], x[["k"]])/sum(x[["prob_association"]]) patho_names <- if (!is.null(x[["variant_names"]])) x[["variant_names"]] else seq_along(patho) patho_rounded <- lapply(patho, function(vals) sprintf("%.2f", vals)) bar_width <- 17 print(row.names=FALSE, data.frame( check.names=FALSE, stringsAsFactors=FALSE, Var=substr(patho_names, 1, 22), `Probability pathogenic`=sapply(seq(length.out=length(patho)), function(j) paste0("[", patho_rounded[j], " ", paste0(collapse="", rep("=", as.integer(round(patho[j]*bar_width, digits=0L)))), paste0(collapse="", rep(" ", bar_width-as.integer(round(patho[j]*bar_width, digits=0L)))), "]")) )) } else { cat("Specified models contain no variants\n") } cat(dashed, "\n") } } #' @title Print readable summary of \code{BeviMed} object #' #' @template print_description #' @param x \code{BeviMed} object. #' @param ... Arguments passed to \code{\link{summary.BeviMed}} #' @return Prints a summary. #' @method print BeviMed #' @export #' @seealso \code{\link{summary.BeviMed}} print.BeviMed <- function(x, ...) { stopifnot(class(x) == "BeviMed") print(summary(x, ...)) } #' @title Print \code{BeviMed_m} object #' #' @description Print summary statistics for \code{BeviMed_m} object. #' @template x_BeviMed_m #' @param ... Unused arguments. #' @return Prints a summary. #' @seealso \code{\link{summary.BeviMed_m}} #' @export #' @method print BeviMed_m print.BeviMed_m <- function(x, ...) { stopifnot(class(x) == "BeviMed_m") print(summary(x, ...)) }
/scratch/gouwar.j/cran-all/cranData/BeviMed/R/generics.R
## ----setup,echo=FALSE,results='hide',include=FALSE, cache=FALSE--------------- library(knitr) theme <- list( highlight=paste0(collapse="\n", c( "\\definecolor{fgcolor}{rgb}{0, 0, 0}", "\\newcommand{\\hlnum}[1]{\\textcolor[rgb]{0,0,0}{#1}}%", "\\newcommand{\\hlstr}[1]{\\textcolor[rgb]{0, 0, 0}{#1}}%", "\\newcommand{\\hlcom}[1]{\\textcolor[rgb]{0,0,0}{\\textit{#1}}}%", # dollar "\\newcommand{\\hlopt}[1]{\\textcolor[rgb]{0,0,0}{\\textbf{#1}}}%", "\\newcommand{\\hlstd}[1]{\\textcolor[rgb]{0,0,0}{#1}}%", # 'function' "\\newcommand{\\hlkwa}[1]{\\textcolor[rgb]{0,0,0}{\\textbf{#1}}}%", # assign to "\\newcommand{\\hlkwb}[1]{\\textcolor[rgb]{0,0,0}{\\textbf{#1}}}%", # argument names "\\newcommand{\\hlkwc}[1]{\\textcolor[rgb]{0,0,0}{#1}}%", # function names "\\newcommand{\\hlkwd}[1]{\\textcolor[rgb]{0,0,0}{\\textbf{#1}}}%", "\\let\\hlipl\\hlkwb" )), background="#ffffff", foreground="#000000" ) knit_theme$set(theme) opts_chunk$set(prompt=TRUE) library(BeviMed) set.seed(1) N <- 10 k <- 5 af <- 0.1 G <- matrix(nrow=N, ncol=k, data=rbinom(n=N*k, size=2, prob=af)) k_patho <- 3 z <- c(rep(TRUE, k_patho), rep(FALSE, k-k_patho)) y <- apply(G[,z,drop=FALSE], 1, sum) > 0 ## ----simple, echo=TRUE-------------------------------------------------------- obj <- bevimed(y=y, G=G) ## ----print, echo=TRUE--------------------------------------------------------- obj ## ----polytomous, echo=TRUE---------------------------------------------------- bevimed_polytomous(y=G[,1] > 0, G=G, variant_sets=list(`first`=1, `all`=1:ncol(G))) ## ----multiple, eval=FALSE, echo=TRUE------------------------------------------ # source(paste0(system.file(package="BeviMed", "/scripts/vcf.R"))) # all_variants <- vcf2matrix("my-vcf.vcf.gz", chr="1", from=1, to=1e9, include_variant_info=TRUE) # row_indices_per_gene <- lapply(1:nrow(chr1genes), function(i) { # which(all_variants$info$POS >= chr1genes$start[i] & all_variants$info$POS <= chr1genes$end[i]) # }) # names(row_indices_per_gene) <- chr1genes$gene # # results <- mclapply( # mc.cores=16L, # X=chr1genes$gene, # FUN=function(gene) { # G <- all_variants$G[row_indices_per_gene[[gene]],,drop=FALSE] # c( # list(gene=gene), # summary(bevimed(y=y, G=G))) }) # # results_table <- do.call(what=rbind, lapply(results, function(x) data.frame( # Gene=x[["gene"]], # `Prob. assoc`=sum(x[["prob_association"]]), # `Prob. dominance`=x[["prob_association"]]["dominant"]/sum(x[["prob_association"]]), # check.names=FALSE, # stringsAsFactors=FALSE # )))
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/doc/Guide.R
## ----------------------------------------------------------------------------- library(BeviMed) set.seed(0) ## ----------------------------------------------------------------------------- G <- matrix(rbinom(size=2, prob=0.02, n=100*20), nrow=100, ncol=20) y_random <- runif(n=nrow(G)) < 0.1 prob_association(G=G, y=y_random) ## ----------------------------------------------------------------------------- y_dependent <- apply(G, 1, function(variants) sum(variants[1:3]) > 0) prob_association(G=G, y=y_dependent) ## ----------------------------------------------------------------------------- output <- summary(bevimed(G=G, y=y_dependent)) output
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/doc/Intro.R
--- title: "BeviMed Introduction" author: "Daniel Greene" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BeviMed Introduction} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- `BeviMed`, which stands for *Bayesian Evaluation of Variant Involvement in Mendelian Disease* [1], is an association test which estimates the probability of an association between a given set of variants and a case/control label, the mode of inheritance for the disease, and the probability that each individual variant is pathogenic. This vignette gives a quick description of what can be done with the package and demonstrates how to use it on simulated data. For more detailed explanations, see the 'BeviMed Guide' vignette and the individual function help pages. Inference is performed by the `bevimed` function, which evaluates the data with respect to three models: a model of no association between case/control label and allele counts, and models of dominant and recessive association. The function depends on input parameters: * case-control label `y`, a length `N` (number of samples) `logical` vector, * genotype matrix `G`, an `N` by `k` integer matrix of allele counts for `N` individuals at `k` rare variant sites, * `prior_prob_association` - the prior probability of association between the disease label and the variants in the locus. Defaults to `0.01`. * `prior_prob_dominant` - the prior probability of dominant as opposed to recessive inheritance, given an association with the locus. Defaults to `0.5`. * `ploidy` - a vector the same length as the case-control label `y` giving the ploidy of each individual in the locus. Defaults to `2` for each sample. * other arguments controlling the inference procedure and prior distributions of parameters (see `?bevimed` for more details). `bevimed` returns an object of class `BeviMed` which contains the output of the (MCMC-based) inference procedure, including samples from the posterior distributions of the model parameters. The object can be evaluated at the command line to print a summary of inference, telling you summary statistics of interest, including the probability of association. The object is likely to take up a lot of memory, so it is useful to store a summary, computed with `summary`, for each result if the procedure is being applied to multiple loci. Summary statistics can also be computed directly from the arguments using the functions (see help for individual functions for more information): * `prob_association` - returning the probability of association, optionally broken down by mode of inheritance. * `conditional_prob_pathogenic` - the probabilities of pathogenicity for the individual variants conditional on a mode of inheritance. * `expected_explained` - the expected number of cases explained by variants. * `explaining_variants` - the expected number of variants involved in explained cases. Here we demonstrate a simple application of BeviMed for some simulated data. ```{r} library(BeviMed) set.seed(0) ``` Firstly, we'll generate a random allele-count matrix `G` for 100 samples at 20 variant sites (each with an allele frequency of 0.02) and an independently generated case-control label, `y_random`. ```{r} G <- matrix(rbinom(size=2, prob=0.02, n=100*20), nrow=100, ncol=20) y_random <- runif(n=nrow(G)) < 0.1 prob_association(G=G, y=y_random) ``` The results indicate that there is a low probability of association. We now generate a new case control label `y_dependent` which depends on `G` - specifically, we treat variants 1 to 3 as 'pathogenic', and label any samples harbouring alleles for any of these variants as cases. ```{r} y_dependent <- apply(G, 1, function(variants) sum(variants[1:3]) > 0) prob_association(G=G, y=y_dependent) ``` Notice that there is now a higher estimated probability of association. By default, `prob_association` integrates over mode of inheritance (e.g. are at least 1 or 2 pathogenic variants required for a pathogenic configuration?). The probabilities of association with each mode of inheritance can by shown by passing the option `by_MOI=TRUE` (for more details, including how to set the ploidy of the samples within the region, see `?prob_pathogenic`). For a more detailed output, the `bevimed` function can be used, and it's returned values can be summarised and stored/printed. ```{r} output <- summary(bevimed(G=G, y=y_dependent)) output ``` ## References 1. Greene et al., A Fast Association Test for Identifying Pathogenic Variants Involved in Rare Diseases, The American Journal of Human Genetics (2017), http://dx.doi.org/10.1016/j.ajhg.2017.05.015
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/doc/Intro.Rmd
## ----------------------------------------------------------------------------- library(BeviMed) source(paste0(system.file(package="BeviMed", "/scripts/vcf.R"))) ## ----eval=FALSE--------------------------------------------------------------- # ac_matrix <- vcf2matrix("my-vcf.vcf.gz", chr="2", from=1, to=1e4) # pheno <- read.table(file="my-phenotype-data.txt", header=TRUE) # # bevimed(y=pheno$disease_status, G=ac_matrix)
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/doc/vcf.R
--- title: "BeviMed with VCFs" author: "Daniel Greene" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BeviMed with VCFs} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- The `BeviMed` package comes with a script containing functions to simplify reading allele count matrices from VCF files. The functions depend on `tabix`, but have the advantage of allowing variants in local regions to be read in, reducing the amount of memory consumed at any one time. However, if you want to analyse many regions, it may be more efficient to read in larger parts of the file - in which case, a package such as `vcfR` might be more appropriate. In order to make the functions available, we must source the script: ```{r} library(BeviMed) source(paste0(system.file(package="BeviMed", "/scripts/vcf.R"))) ``` The script creates the function `vcf2matrix`, which depends on the external program `tabix` (available from http://www.htslib.org/download/) for reading allele count matrices from VCF files. It uses arguments: * `vcf_file_name` - path to vcf file. * `chr` - `character` value giving chromosome. * `from`/`to` - `integer` values giving from/to coordinates for chromosome. * `samples` - `character` vector of sample names as used in the VCF. * `include_variant_info` - `boolean` value determining whether to return just a matrix of allele counts (`TRUE`, default) or a list of allele count matrix `G` and `data.frame` of variant information `info` (`FALSE`). The variant information `info` could be useful for filtering the variants, for example if the VCF has not been pre-filtered for rare variants. * `description_columns` - `integer` value giving number of columns of description fields in the VCF file (i.e. before the genotype columns begin), defaults to `9`. * `warn_if_AF_greater_than` - `numeric` value giving threshold allele frequency for generating a warning. You can invoke the function simply to obtain the allele count matrix and pass straight to `bevimed`, along with phenotype label: ```{r eval=FALSE} ac_matrix <- vcf2matrix("my-vcf.vcf.gz", chr="2", from=1, to=1e4) pheno <- read.table(file="my-phenotype-data.txt", header=TRUE) bevimed(y=pheno$disease_status, G=ac_matrix) ```
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/doc/vcf.Rmd
get_sample_names <- function(file, connection_type=base::file, description_columns=9) { cnx <- connection_type(file, open="r") on.exit(close(cnx)) headings <- NULL while (is.null(headings)) { l <- readLines(cnx, n=1) if (grepl(x=l, pattern="^#CHROM")) headings <- strsplit(l, split="\t")[[1]] } headings[(description_columns+1):length(headings)] } compressed_vcf_sample_names <- function(vcf_file_name) { get_sample_names(paste0("zcat ", vcf_file_name), connection_type=pipe) } just_counts <- function(parts, description_columns=9) { y <- sapply(parts, "[", -(1:description_columns)) structure(grepl(x=y, pattern="^[^0.][/|].") + grepl(x=y, pattern="^.[/|][^0.]"), dim=c(if (length(parts) > 0) length(parts[[1]])-description_columns else 0, length(parts))) } var_info <- function(parts, description_columns=9) structure(dimnames=list(NULL, c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","FORMAT","INFO")), structure(dim=c(length(parts),description_columns), t(sapply(parts, "[", seq(length.out=description_columns))))) get_block_parts <- function(vcf_file_name, chr, from, to) { cmd <- paste("tabix ", vcf_file_name, " ", chr, ":", from, "-", to, sep="") z <- pipe(cmd) lines <- grep(value=TRUE, pattern="^#", invert=TRUE, x=readLines(z)) close(z) strsplit(lines, split="\t") } vcf2matrix <- function(vcf_file_name, chr, from, to, samples=compressed_vcf_sample_names(vcf_file_name), include_variant_info=FALSE, description_columns=9, warn_if_AF_greater_than=0.1) { file_samples <- compressed_vcf_sample_names(vcf_file_name) sample_inds <- match(samples, file_samples) parts <- get_block_parts(vcf_file_name, chr, from, to) info <- var_info(parts, description_columns) if (length(parts) == 0) stop("No data") if (any(grepl(",",info[,"ALT"]))) stop("Region contains variants with multiple alternate alleles: please use 'bcftools norm' to split such variants onto separate rows") counts <- just_counts(parts, description_columns)[sample_inds,,drop=FALSE] if (any(apply(counts > 0, 2, mean) > warn_if_AF_greater_than)) warning(paste0("Allele count matrix contains variants with allele frequency greater than ", warn_if_AF_greater_than)) if (include_variant_info) list( info=info, G=counts) else counts }
/scratch/gouwar.j/cran-all/cranData/BeviMed/inst/scripts/vcf.R
--- title: "BeviMed Introduction" author: "Daniel Greene" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BeviMed Introduction} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- `BeviMed`, which stands for *Bayesian Evaluation of Variant Involvement in Mendelian Disease* [1], is an association test which estimates the probability of an association between a given set of variants and a case/control label, the mode of inheritance for the disease, and the probability that each individual variant is pathogenic. This vignette gives a quick description of what can be done with the package and demonstrates how to use it on simulated data. For more detailed explanations, see the 'BeviMed Guide' vignette and the individual function help pages. Inference is performed by the `bevimed` function, which evaluates the data with respect to three models: a model of no association between case/control label and allele counts, and models of dominant and recessive association. The function depends on input parameters: * case-control label `y`, a length `N` (number of samples) `logical` vector, * genotype matrix `G`, an `N` by `k` integer matrix of allele counts for `N` individuals at `k` rare variant sites, * `prior_prob_association` - the prior probability of association between the disease label and the variants in the locus. Defaults to `0.01`. * `prior_prob_dominant` - the prior probability of dominant as opposed to recessive inheritance, given an association with the locus. Defaults to `0.5`. * `ploidy` - a vector the same length as the case-control label `y` giving the ploidy of each individual in the locus. Defaults to `2` for each sample. * other arguments controlling the inference procedure and prior distributions of parameters (see `?bevimed` for more details). `bevimed` returns an object of class `BeviMed` which contains the output of the (MCMC-based) inference procedure, including samples from the posterior distributions of the model parameters. The object can be evaluated at the command line to print a summary of inference, telling you summary statistics of interest, including the probability of association. The object is likely to take up a lot of memory, so it is useful to store a summary, computed with `summary`, for each result if the procedure is being applied to multiple loci. Summary statistics can also be computed directly from the arguments using the functions (see help for individual functions for more information): * `prob_association` - returning the probability of association, optionally broken down by mode of inheritance. * `conditional_prob_pathogenic` - the probabilities of pathogenicity for the individual variants conditional on a mode of inheritance. * `expected_explained` - the expected number of cases explained by variants. * `explaining_variants` - the expected number of variants involved in explained cases. Here we demonstrate a simple application of BeviMed for some simulated data. ```{r} library(BeviMed) set.seed(0) ``` Firstly, we'll generate a random allele-count matrix `G` for 100 samples at 20 variant sites (each with an allele frequency of 0.02) and an independently generated case-control label, `y_random`. ```{r} G <- matrix(rbinom(size=2, prob=0.02, n=100*20), nrow=100, ncol=20) y_random <- runif(n=nrow(G)) < 0.1 prob_association(G=G, y=y_random) ``` The results indicate that there is a low probability of association. We now generate a new case control label `y_dependent` which depends on `G` - specifically, we treat variants 1 to 3 as 'pathogenic', and label any samples harbouring alleles for any of these variants as cases. ```{r} y_dependent <- apply(G, 1, function(variants) sum(variants[1:3]) > 0) prob_association(G=G, y=y_dependent) ``` Notice that there is now a higher estimated probability of association. By default, `prob_association` integrates over mode of inheritance (e.g. are at least 1 or 2 pathogenic variants required for a pathogenic configuration?). The probabilities of association with each mode of inheritance can by shown by passing the option `by_MOI=TRUE` (for more details, including how to set the ploidy of the samples within the region, see `?prob_pathogenic`). For a more detailed output, the `bevimed` function can be used, and it's returned values can be summarised and stored/printed. ```{r} output <- summary(bevimed(G=G, y=y_dependent)) output ``` ## References 1. Greene et al., A Fast Association Test for Identifying Pathogenic Variants Involved in Rare Diseases, The American Journal of Human Genetics (2017), http://dx.doi.org/10.1016/j.ajhg.2017.05.015
/scratch/gouwar.j/cran-all/cranData/BeviMed/vignettes/Intro.Rmd
--- title: "BeviMed with VCFs" author: "Daniel Greene" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BeviMed with VCFs} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- The `BeviMed` package comes with a script containing functions to simplify reading allele count matrices from VCF files. The functions depend on `tabix`, but have the advantage of allowing variants in local regions to be read in, reducing the amount of memory consumed at any one time. However, if you want to analyse many regions, it may be more efficient to read in larger parts of the file - in which case, a package such as `vcfR` might be more appropriate. In order to make the functions available, we must source the script: ```{r} library(BeviMed) source(paste0(system.file(package="BeviMed", "/scripts/vcf.R"))) ``` The script creates the function `vcf2matrix`, which depends on the external program `tabix` (available from http://www.htslib.org/download/) for reading allele count matrices from VCF files. It uses arguments: * `vcf_file_name` - path to vcf file. * `chr` - `character` value giving chromosome. * `from`/`to` - `integer` values giving from/to coordinates for chromosome. * `samples` - `character` vector of sample names as used in the VCF. * `include_variant_info` - `boolean` value determining whether to return just a matrix of allele counts (`TRUE`, default) or a list of allele count matrix `G` and `data.frame` of variant information `info` (`FALSE`). The variant information `info` could be useful for filtering the variants, for example if the VCF has not been pre-filtered for rare variants. * `description_columns` - `integer` value giving number of columns of description fields in the VCF file (i.e. before the genotype columns begin), defaults to `9`. * `warn_if_AF_greater_than` - `numeric` value giving threshold allele frequency for generating a warning. You can invoke the function simply to obtain the allele count matrix and pass straight to `bevimed`, along with phenotype label: ```{r eval=FALSE} ac_matrix <- vcf2matrix("my-vcf.vcf.gz", chr="2", from=1, to=1e4) pheno <- read.table(file="my-phenotype-data.txt", header=TRUE) bevimed(y=pheno$disease_status, G=ac_matrix) ```
/scratch/gouwar.j/cran-all/cranData/BeviMed/vignettes/vcf.Rmd
Benf.val <- function(fig,dig=1){if(dig==1) {if (fig==0) return("First digit is all but 0") else return(log(1+1/fig,10))} else {k=0; for (i in 10**(dig-2):(10**(dig-1)-1)) k=k+log(1+1/(10*i+fig),10); return(k);}}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/Benf.val.R
Blon.val <- function(lwbound=10**(dig-1),upbound,fig,dig=1,sd=0){ if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") count=0; prop_sum=0; for (i in(lwbound:upbound)) {if (as.numeric(substr(i,dig,dig))==fig) {count=count+1; prop_sum=prop_sum+count/(i-lwbound+1);} else prop_sum=prop_sum+count/(i-lwbound+1);} if (sd==0) return(prop_sum/(upbound-lwbound+1)) else return(data.frame(Blon.pro=c("The probability is:",prop_sum/(upbound-lwbound+1)),stan.dev=c("The standard deviation is:",Blon.val.sd(lwbound,upbound,fig,dig)))) }
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/Blon.val.R
Blon.val.sd <- function(lwbound=10**(dig-1),upbound,fig,dig=1){ if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") count=0; var_sum=0; for (i in(lwbound:upbound)) {if (as.numeric(substr(i,dig,dig))==fig) {count=count+1; var_sum=var_sum+count*(i-lwbound+1-count)/(i-lwbound+1)**2;} else var_sum=var_sum+count*(i-lwbound+1-count)/(i-lwbound+1)**2; } return(var_sum**0.5/(upbound-lwbound+1)) }
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/Blon.val.sd.R
chi2 <- function(dat,mod="ben",lwbound=max(floor(min(abs(dat)))+1,(10**(dig-1))),upbound=ceiling(max(dat)),dig=1,pval=0){ prep<-function(dat){dat=as.data.frame(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; for (j in 1:colnb) if (is.numeric(dat[,j])==FALSE) dat[,j]=as.numeric(as.character(dat[,j])); for (i in 1:rownb) for (j in 1:colnb) if (is.na(dat[i,j])==TRUE) dat[i,j]=0; return(dat);} dat=prep(dat); size=sum(obs.numb.dig(dat,dig)); if (mod=="ben") {if(size*Benf.val(9,dig)<5) {return("Chi2 can not be applied: at least one insufficient theoretical frequency")} else { if (dig==1) {chi=0; for (fig in 1:9) {chi=chi+((obs.numb.dig(dat)/size)[fig]-Benf.val(fig))**2/Benf.val(fig)}; if (pval!=0) return(data.frame(chi2=c("Chi2 value is:",size*chi),pval=c("The p-value is:",1-pchisq(size*chi,8)))) else return(data.frame(chi2=c("Chi2 value is:"),stat=c(size*chi)));} else {chi=0; for (fig in 1:10) {chi=chi+((obs.numb.dig(dat,dig)/size)[fig]-Benf.val(fig-1,dig))**2/Benf.val(fig-1,dig)}; if (pval!=0) return(data.frame(chi2=c("Chi2 value is:",size*chi),pval=c("The p-value is:",1-pchisq(size*chi,9)))) else return(data.frame(chi2=c("Chi2 value is:"),stat=c(size*chi))); }}} else {if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") else{if(size*Blon.val(lwbound,upbound,9,dig)<5) {return("Chi2 can not be applied: at least one insufficient theoretical frequency")} else { if (dig==1) {chi=0; for (fig in 1:9) {chi=chi+((obs.numb.dig(dat,dig)/size)[fig]-Blon.val(lwbound,upbound,fig,dig))**2/Blon.val(lwbound,upbound,fig,dig)}; if (pval!=0) return(data.frame(chi2=c("Chi2 value is:",size*chi),pval=c("The p-value is:",1-pchisq(size*chi,8)))) else return(data.frame(chi2=c("Chi2 value is:"),stat=c(size*chi)));} else {chi=0;for (fig in 1:10) {chi=chi+((obs.numb.dig(dat,dig)/size)[fig]-Blon.val(lwbound,upbound,fig-1,dig))**2/Blon.val(lwbound,upbound,fig-1,dig)}; if (pval!=0) return(data.frame(chi2=c("Chi2 value is:",size*chi),pval=c("The p-value is:",1-pchisq(size*chi,9)))) else return(data.frame(chi2=c("Chi2 value is:"),stat=c(size*chi)));}}}}}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/chi2.R
dat.distr <- function(dat,xlab="Data",ylab="Frequency",main="Distribution of data",theor=TRUE,nclass=50,col="lightblue",conv=0,lwbound=max(floor(min(abs(dat)))+1,(10**(dig-1))),upbound=ceiling(max(dat)),dig=1,colt="red",ylim=NULL,border="blue",nchi=0,legend=TRUE,bg.leg="gray85"){ prep<-function(dat){dat=as.data.frame(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; for (j in 1:colnb) if (is.numeric(dat[,j])==FALSE) dat[,j]=as.numeric(as.character(dat[,j])); for (i in 1:rownb) for (j in 1:colnb) if (is.na(dat[i,j])==TRUE) dat[i,j]=0; return(dat);} dat=prep(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; num.elig.val=0; if (conv==1) {min=max(abs(dat)); for (i in 1:rownb) for (j in 1:colnb) if (dat[i,j]!=0) if (abs(dat[i,j])<min) min=abs(dat[i,j]); k=0; while(min*10**k<1) k=k+1; dat=10**k*dat;}; for (i in 1:rownb) for (j in 1:colnb) if (dat[i,j]>=lwbound) num.elig.val=num.elig.val+1; if (num.elig.val==0) return("No eligible value"); data=rep(0,num.elig.val); l=1; for (i in 1:rownb) for (j in 1:colnb) if (dat[i,j]>=lwbound) {data[l]=dat[i,j]; l=l+1;} width=(max(data)-min(data))/nclass; if (width==0) return("Be careful, all selected data are equal"); dev.new(); if (theor==TRUE) {hist(data,breaks=(c(0:nclass)*width+min(data)),main=main,xlab=xlab,xlim=c(lwbound,max(upbound,max(dat))),ylim=ylim,ylab=ylab,col=col,border=border); if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") lines(c(rep(0,lwbound),theor.distr.val(upbound=upbound,dig)*num.elig.val*width),col=colt,type="p",pch=20,cex=0.2); box(); if (legend==TRUE) legend("topright", legend=c("Ideal theoretical distribution"),col=colt,inset=.05, lty=1:2, cex=0.8,bg=bg.leg)} else {hist(data,breaks=(c(0:nclass)*width+min(data)),main=main,xlab=xlab,ylab=ylab,col=col,border=border);box();} if (nchi!=0) {max=max(data,upbound); freq.obs=rep(0,nchi); for (k in 1:num.elig.val) if (data[k]==max) freq.obs[nchi]=freq.obs[nchi]+1 else freq.obs[floor(((data[k]-lwbound)/(max-lwbound))*nchi)+1]=freq.obs[floor(((data[k]-lwbound)/(max-lwbound))*nchi)+1]+1; Blon.class=rep(0,nchi); count=0; k=lwbound; tdv=theor.distr.val(lwbound=lwbound,upbound=upbound,dig=dig); while (count<(nchi-1)) { while (k<(lwbound+(count+1)*(max-lwbound)/nchi) & k<=upbound) {Blon.class[count+1]=tdv[k-lwbound+1]*num.elig.val+Blon.class[count+1]; k=k+1;}; count=count+1;} if (k<=upbound) for (l in k:upbound) Blon.class[nchi]=tdv[l-lwbound+1]*num.elig.val+Blon.class[nchi]; print(c("Class freq.: ",freq.obs)); print(c("Theor. freq.:",Blon.class)); if(Blon.class[nchi]<5) return("Chi2 can not be applied: at least one insufficient theoretical frequency") else {chi=0; for (i in 1:nchi) {chi=chi+(freq.obs[i]-Blon.class[i])**2/Blon.class[i]}; return(data.frame(chi2=c("Chi2 value is:",chi),pval=c("The p-value is:",1-pchisq(chi,nchi-1))))}}}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/dat.distr.R
digit.distr <- function(dat,mod="ben",lwbound=max(floor(min(abs(dat)))+1,(10**(dig-1))),upbound=ceiling(max(dat)),dig=1,col=c('#E69F00','#999999'),colbl=c('#AAFFAA','#999999'),colbebl=c('#E69F00','#AAFFAA','#999999'),main="Distribution of digits",No.sd=0,Sd.pr=0){ prep<-function(dat){dat=as.data.frame(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; for (j in 1:colnb) if (is.numeric(dat[,j])==FALSE) dat[,j]=as.numeric(as.character(dat[,j])); for (i in 1:rownb) for (j in 1:colnb) if (is.na(dat[i,j])==TRUE) dat[i,j]=0; return(dat);} dat=prep(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; num.elig.val=0; for (i in 1:rownb) for (j in 1:colnb) if (dat[i,j]>=lwbound) num.elig.val=num.elig.val+1; if (num.elig.val==0) return("No eligible value"); if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") ond=obs.numb.dig(dat,dig); size=sum(ond); ofd=ond/size; coeff=((upbound-lwbound+1)/size)**0.5; if (mod=="ben" & dig==1) {dev.new(); Distribution=c("Observed","Benford","Observed","Benford","Observed","Benford","Observed", "Benford","Observed","Benford","Observed","Benford","Observed","Benford","Observed","Benford","Observed","Benford"); Digit=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9); Freq.Prob=c(ofd[1],Benf.val(1),ofd[2],Benf.val(2),ofd[3],Benf.val(3),ofd[4],Benf.val(4),ofd[5],Benf.val(5),ofd[6],Benf.val(6),ofd[7],Benf.val(7),ofd[8],Benf.val(8),ofd[9],Benf.val(9)); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,0,Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,0,Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,0,Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,0,Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,0,Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,0,Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,0,Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,0,Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,0); tab=data.frame(Distribution,Digit,Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=col) print(p)} else if (mod=="ben") {dev.new(); Distribution=c("Observed","Benford","Observed","Benford","Observed","Benford","Observed", "Benford","Observed","Benford","Observed","Benford","Observed","Benford","Observed","Benford","Observed","Benford","Observed","Benford"); Digit=c(0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9); Freq.Prob=c(ofd[1],Benf.val(0,dig),ofd[2],Benf.val(1,dig),ofd[3],Benf.val(2,dig),ofd[4],Benf.val(3,dig),ofd[5],Benf.val(4,dig),ofd[6],Benf.val(5,dig),ofd[7],Benf.val(6,dig),ofd[8],Benf.val(7,dig),ofd[9],Benf.val(8,dig),ofd[10],Benf.val(9,dig)); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,0,Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,0,Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,0,Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,0,Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,0,Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,0,Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,0,Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,0,Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,0,Sd.pr*(ofd[10]*(1-ofd[10])/size)**0.5,0); tab=data.frame(Distribution, Digit,Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=col) print(p)} else if (mod=="ben&blo") {if (dig==1) {dev.new(); Distribution=c("Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau", "Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed", "Benford","Blondeau","Observed","Benford","Blondeau", "Observed","Benford","Blondeau"); Digit=c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7, 8,8,8,9,9,9); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,1),Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,2),Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,3),Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,4),Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,5),Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,6),Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,7),Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,8),Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,9)); Freq.Prob=c(ofd[1],Benf.val(1),Blon.val(lwbound,upbound,1),ofd[2],Benf.val(2),Blon.val(lwbound,upbound,2),ofd[3],Benf.val(3),Blon.val(lwbound,upbound,3),ofd[4],Benf.val(4),Blon.val(lwbound,upbound,4),ofd[5],Benf.val(5),Blon.val(lwbound,upbound,5),ofd[6],Benf.val(6),Blon.val(lwbound,upbound,6),ofd[7],Benf.val(7),Blon.val(lwbound,upbound,7),ofd[8],Benf.val(8),Blon.val(lwbound,upbound,8),ofd[9],Benf.val(9),Blon.val(lwbound,upbound,9)); tab=data.frame(Distribution, Digit, Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=colbebl) print(p)} else { Distribution=c("Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed", "Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford","Blondeau","Observed","Benford", "Blondeau","Observed","Benford", "Blondeau"); Digit=c(0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,9); Freq.Prob=c(ofd[1],Benf.val(0,dig),Blon.val(lwbound,upbound,0,dig),ofd[2],Benf.val(1,dig),Blon.val(lwbound,upbound,1,dig),ofd[3],Benf.val(2,dig),Blon.val(lwbound,upbound,2,dig),ofd[4],Benf.val(3,dig),Blon.val(lwbound,upbound,3,dig),ofd[5],Benf.val(4,dig),Blon.val(lwbound,upbound,4,dig),ofd[6],Benf.val(5,dig),Blon.val(lwbound,upbound,5,dig),ofd[7],Benf.val(6,dig),Blon.val(lwbound,upbound,6,dig),ofd[8],Benf.val(7,dig),Blon.val(lwbound,upbound,7,dig),ofd[9],Benf.val(8,dig),Blon.val(lwbound,upbound,8,dig),ofd[10],Benf.val(9,dig),Blon.val(lwbound,upbound,9,dig)); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,0,dig),Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,1,dig),Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,2,dig),Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,3,dig),Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,4,dig),Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,5,dig),Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,6,dig),Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,7,dig),Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,8,dig),Sd.pr*(ofd[10]*(1-ofd[10])/size)**0.5,0,coeff*Blon.val.sd(lwbound,upbound,9,dig)); tab=data.frame(Distribution, Digit,Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=colbebl) print(p)}} else if((mod!="ben") & (mod!="ben&blo")) {{if (dig==1) {dev.new(); Distribution=c("Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed", "Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau"); Digit=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9); Freq.Prob=c(ofd[1],Blon.val(lwbound,upbound,1),ofd[2],Blon.val(lwbound,upbound,2),ofd[3],Blon.val(lwbound,upbound,3),ofd[4],Blon.val(lwbound,upbound,4),ofd[5],Blon.val(lwbound,upbound,5),ofd[6],Blon.val(lwbound,upbound,6),ofd[7],Blon.val(lwbound,upbound,7),ofd[8],Blon.val(lwbound,upbound,8),ofd[9],Blon.val(lwbound,upbound,9)); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,1),Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,2),Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,3),Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,4),Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,5),Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,6),Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,7),Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,8),Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,9)); tab=data.frame(Distribution,Digit,Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=colbl) print(p)} else {dev.new(); Distribution=c("Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed", "Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed","Blondeau","Observed", "Blondeau"); Digit=c(0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9); Freq.Prob=c(ofd[1],Blon.val(lwbound,upbound,0,dig),ofd[2],Blon.val(lwbound,upbound,1,dig),ofd[3],Blon.val(lwbound,upbound,2,dig),ofd[4],Blon.val(lwbound,upbound,3,dig),ofd[5],Blon.val(lwbound,upbound,4,dig),ofd[6],Blon.val(lwbound,upbound,5,dig),ofd[7],Blon.val(lwbound,upbound,6,dig),ofd[8],Blon.val(lwbound,upbound,7,dig),ofd[9],Blon.val(lwbound,upbound,8,dig),ofd[10],Blon.val(lwbound,upbound,9,dig)); Stan.Dev=c(Sd.pr*(ofd[1]*(1-ofd[1])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,0,dig),Sd.pr*(ofd[2]*(1-ofd[2])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,1,dig),Sd.pr*(ofd[3]*(1-ofd[3])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,2,dig),Sd.pr*(ofd[4]*(1-ofd[4])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,3,dig),Sd.pr*(ofd[5]*(1-ofd[5])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,4,dig),Sd.pr*(ofd[6]*(1-ofd[6])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,5,dig),Sd.pr*(ofd[7]*(1-ofd[7])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,6,dig),Sd.pr*(ofd[8]*(1-ofd[8])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,7,dig),Sd.pr*(ofd[9]*(1-ofd[9])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,8,dig),Sd.pr*(ofd[10]*(1-ofd[10])/size)**0.5,coeff*Blon.val.sd(lwbound,upbound,9,dig)); tab=data.frame(Distribution,Digit,Freq.Prob,Stan.Dev) tab$Digit=as.factor(tab$Digit) p=ggplot(tab, aes(x=Digit, y=Freq.Prob, fill=Distribution))+ geom_bar(stat="identity", color="black", position=position_dodge())+ geom_errorbar(aes(ymin=Freq.Prob-No.sd*Stan.Dev, ymax=Freq.Prob+No.sd*Stan.Dev), width=0.3, position=position_dodge(0.9))+ labs(title=main, x="Digit", y = "Probabilities")+ theme_classic() + scale_fill_manual(values=colbl) print(p)} }}}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/digit.distr.R
obs.numb.dig <- function(dat,dig=1){ prep<-function(dat){dat=as.data.frame(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; for (j in 1:colnb) if (is.numeric(dat[,j])==FALSE) dat[,j]=as.numeric(as.character(dat[,j])); for (i in 1:rownb) for (j in 1:colnb) if (is.na(dat[i,j])==TRUE) dat[i,j]=0; return(dat);} firstdigit<-function(n){n=abs(n); if (n>=1) {k=1; while(n>=10**k) k=k+1; return(floor(n/10**(k-1)))} else {k=1; while(n*10**k<1) k=k+1; return(floor(n*10**k))}} secondigit<-function(n){n=abs(n); if (n>=10) {k=1; while(n>=10**k) k=k+1; return(floor(n/10**(k-2))-10*floor(n/10**(k-1)))} else if(n>=1) {if (floor(n)==n) return("?") else return(floor(10*n)-10*floor(n))} else {k=1; while(n*10**k<1) k=k+1; if(floor(n*10**k)==n*10**k) return("?") else return(floor(n*10**(k+1))-10*floor(n*10**k))}} thirdigit<-function(n){n=abs(n); if (n>=100) {k=1;while (n>=10**k) k=k+1; return(floor(n/10**(k-3))-10*floor(n/10**(k-2)))} else if(n>=10) {if (floor(n)==n) return("?") else return(floor(10*n)-10*floor(n))} else if(n>=1) {if (floor(n*10)==n*10) return("?") else return(floor(100*n)-10*floor(10*n))} else {k=1; while(n*10**k<1) k=k+1; if(floor(n*10**(k+1))==n*10**(k+1)) return("?") else return(floor(n*10**(k+2))-10*floor(n*10**(k+1)))}} fourthdigit<-function(n){n=abs(n); if (n>=1000) {k=1;while (n>=10**k) k=k+1; return(floor(n/10**(k-4))-10*floor(n/10**(k-3)))} else if(n>=100) {if (floor(n)==n) return("?") else return(floor(10*n)-10*floor(n))} else if(n>=10) {if (floor(n*10)==n*10) return("?") else return(floor(100*n)-10*floor(10*n))} else if(n>=1) {if (floor(n*100)==n*100) return("?") else return(floor(1000*n)-10*floor(n*100))} else {k=1; while(n*10**k<1) k=k+1; if(floor(n*10**(k+2))==n*10**(k+2)) return("?") else return(floor(n*10**(k+3))-10*floor(n*10**(k+2)))}} digit<-function(n,dig=1){if(n==0) return("?") else if(dig==1) return(firstdigit(n)) else if(dig==2) return(secondigit(n)) else if(dig==3) return(thirdigit(n)) else return(fourthdigit(n))} dat=prep(dat); if (dig==1) {vecnum=rep(0,9); for (i in 1:dim(dat)[1]) {for (j in 1:dim(dat)[2]) if (digit(dat[i,j])!="?") vecnum[digit(dat[i,j])]=vecnum[digit(dat[i,j])]+1}; return(vecnum)} else if (dig<=4) {vecnum=rep(0,10); for (i in 1:dim(dat)[1]) {for (j in 1:dim(dat)[2]) if (digit(dat[i,j],dig=dig)!="?") vecnum[digit(dat[i,j],dig=dig)+1]=vecnum[digit(dat[i,j],dig=dig)+1]+1}; return(vecnum)} else return("Non valid digit")}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/obs.numb.dig.R
prep <- function(dat){dat=as.data.frame(dat); rownb=dim(dat)[1]; colnb=dim(dat)[2]; for (j in 1:colnb) if (is.numeric(dat[,j])==FALSE) dat[,j]=as.numeric(as.character(dat[,j])); for (i in 1:rownb) for (j in 1:colnb) if (is.na(dat[i,j])==TRUE) dat[i,j]=0; return(dat);}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/prep.R
theor.distr.val <- function(lwbound,upbound,dig=1){if (lwbound>upbound) return("lwbound must be less than upbound") if (upbound<10**(dig-1)) return("upbound does not have enough digits") if (lwbound<(10**(dig-1))) return("lwbound does not have enough digits") else {val=rep(0,upbound-lwbound+1); for (k in lwbound:upbound) {sum=0; for (i in (k+1-lwbound):(upbound+1-lwbound)) sum=sum+1/i; val[k-lwbound+1]=sum;} val=val/(upbound+1-lwbound); return(val);}}
/scratch/gouwar.j/cran-all/cranData/BeyondBenford/R/theor.distr.val.R
##' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/Bhat/R/Bhat-package.R
##' Generalized inverse-logit transform ##' ##' maps real line onto open interval (xl, xu) using the transform y = (exp(xt) ##' * xu + xl)/(1.+exp(xt)) where xt is a numeric vector with -Inf < xt < Inf ##' ##' ##' @param xt a numeric vector ##' @param xl a numeric vector of same length as x ##' @param xu a numeric vector of same length as x, and xu > xl ##' @return returns the inverse-logit transform (numeric) of xt ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{ftrf}} ##' @keywords optimize misc ##' @export ##' "btrf" <- function(xt,xl,xu) { #### back transformation #### this assumes logit transformations of the parameters #### bounded from below by xl and from above by xu rho <- exp(xt) return((rho * xu + xl)/(1.+rho)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/btrf.R
##' Function minimization with box-constraints ##' ##' This Davidon-Fletcher-Powell optimization algorithm has been `hand-tuned' ##' for minimal setup configuration and for efficency. It uses an internal ##' logit-type transformation based on the pre-specified box-constraints. ##' Therefore, it usually does not require rescaling (see help for the R optim ##' function). \code{dfp} automatically computes step sizes for each parameter ##' to operate with sufficient sensitivity in the functional output. ##' Performance is comparable to the BFGS algorithm in the R function ##' \code{optim}. \code{dfp} interfaces with \code{newton} to ascertain ##' convergence, compute the eigenvalues of the Hessian, and provide 95\% ##' confidence intervals when the function to be minimized is a negative ##' log-likelihood. ##' ##' The dfp function minimizes a function \code{f} over the parameters ##' specified in the input list \code{x}. The algorithm is based on Fletcher's ##' "Switching Method" (Comp.J. 13,317 (1970)) ##' ##' the code has been 'transcribed' from Fortran source code into R ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param f the function that is to be minimized over the parameter vector ##' defined by the list \code{x} ##' @param tol a tolerance used to determine when convergence should be ##' indicated ##' @param nfcn number of function calls ##' @param ... other parameters to be passed to `f` ##' @return list with the following components: \item{fmin }{ the function ##' value f at the minimum } \item{label }{ the labels taken from list \code{x} ##' } \item{est }{ a vector of the estimates at the minimum. dfp does not ##' overwrite \code{x} } \item{status }{ 0 indicates convergence, 1 indicates ##' non-convergence } \item{nfcn }{ no. of function calls } ##' @note This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso optim, \code{\link{newton}}, \code{\link{ftrf}}, ##' \code{\link{btrf}}, \code{\link{logit.hessian}} ##' @references Fletcher's Switching Method (Comp.J. 13,317, 1970) ##' @keywords optimize methods ##' @examples ##' ##' # generate some Poisson counts on the fly ##' dose <- c(rep(0,50),rep(1,50),rep(5,50),rep(10,50)) ##' data <- cbind(dose,rpois(200,20*(1+dose*.5*(1-dose*0.05)))) ##' ##' # neg. log-likelihood of Poisson model with 'linear-quadratic' mean: ##' lkh <- function (x) { ##' ds <- data[, 1] ##' y <- data[, 2] ##' g <- x[1] * (1 + ds * x[2] * (1 - x[3] * ds)) ##' return(sum(g - y * log(g))) ##' } ##' ##' # for example define ##' x <- list(label=c("a","b","c"),est=c(10.,10.,.01),low=c(0,0,0),upp=c(100,20,.1)) ##' ##' # call: ##' results <- dfp(x,f=lkh) ##' ##' @export ##' "dfp" <- function (x, f, tol=1e-5, nfcn = 0, ...) { # Function Minimization for R. # This function is part of the Bhat exploration tool and is # based on Fletcher's "Switching Method" (Comp.J. 13,317 (1970)). # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # E Georg Luebeck ([email protected]) ff = function(x) f(x, ...) ## 1-arg function (closure) to be optimized xt.inf <- 16 slamin <- 0.2 slamax <- 3 tlamin <- 0.05 tlamax <- 6 iter <- 0 status <- 1 if (!is.list(x)) { cat("x is not a list! see help file", "\n") return() } names(x)[1] <- "label" names(x)[2] <- "est" names(x)[3] <- "low" names(x)[4] <- "upp" npar <- length(x$est) #### objects: if (npar <= 0) { warning("no. of parameters < 1") stop() } g <- numeric(npar) gs <- numeric(npar) g2 <- numeric(npar) xt <- numeric(npar) xxs <- numeric(npar) dirin <- numeric(npar) delgam <- numeric(npar) vg <- numeric(npar) #### first call cat(date(), "\n","\n") xt <- ftrf(x$est, x$low, x$upp) fmin <- ff(x$est) nfcn <- nfcn + 1 cat('starting at','\n') cat(format(nfcn), " fmin: ", fmin, " ", format(x$est), "\n") isw2 <- 0 nretry <- 0 nfcnmx <- 10000 vtest <- 0.001 apsi <- 0.05 #### or .01 for better convergence? up <- 1 #### memorize current no. of function calls npfn <- nfcn rho2 <- 10 * apsi rostop <- tol * apsi trace <- 1 fs <- fmin cat("\n") # cat("rostop: ", rostop, "\n", "apsi: ", apsi, "\n", "vtest: ", vtest, "\n") dfp.loop <- 0 # while() while (dfp.loop < 10) { #### 1, 2 #### ?? cat("COVARIANCE MATRIX NOT POSITIVE-DEFINITE","\n") #### define step sizes dirin d <- dqstep(list(label=x$label,est=btrf(xt, x$low, x$upp),low=x$low,upp=x$upp),ff,sens=.01) if (isw2 >= 1) d <- 0.02 * sqrt(abs(diag(v)) * up) dirin <- d ##### obtain gradients, second derivatives, search for pos. curvature ######### ntry <- 0 negg2 <- 1 # loop 10 while (negg2 >= 1 & ntry <= 6) { negg2 <- 0 #for(id in 1:npar) loop 10 for (i in 1:npar) { d <- dirin[i] xtf <- xt[i] xt[i] <- xtf + d fs1 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 xt[i] <- xtf - d fs2 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 xt[i] <- xtf gs[i] <- (fs1 - fs2)/(2 * d) g2[i] <- (fs1 + fs2 - 2 * fmin)/d^2 #if (g2[i] <= 0.) if (g2[i] <= 0) { #### search if g2 <= 0. . . cat("covariance matrix is not positive-definite or nearly singular", "\n") negg2 <- negg2 + 1 ntry <- ntry + 1 d <- 50 * abs(dirin[i]) xbeg <- xtf if (gs[i] < 0) { dirin[i] <- -dirin[i] } kg <- 0 nf <- 0 ns <- 0 # while(ns < 10 ...) while (ns < 10 & nf < 10) { xt[i] <- xtf + d f0 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 # cat("dfp search intermediate output:","\n") # cat("f0: ",f0," fmin: ",fmin," nfcn: ",nfcn,"\n") # cat("xt: ",xt,"\n") if (xt[i] > xt.inf) { cat("parameter ", x$label[i], " near upper boundary","\n")} if (xt[i] < -xt.inf) { cat("parameter ", x$label[i], " near lower boundary","\n")} if (f0 == "NaN") { warning("f0 is NaN") nf <- 10 break } if (f0 <= fmin & abs(xt[i]) < xt.inf) { # success xtf <- xt[i] d <- 3 * d fmin <- f0 kg <- 1 ns <- ns + 1 } else { if (kg == 1) { ns <- 0 nf <- 0 # failure break } else { kg <- -1 nf <- nf + 1 d <- (-0.4) * d } } } if (nf == 10) { d <- 1000 * d xtf <- xbeg g2[i] <- 1 negg2 <- negg2 - 1 } if (ns == 10) { if (fmin >= fs) { d <- 0.001 * d xtf <- xbeg g2[i] <- 1 negg2 <- negg2 - 1 } } xt[i] <- xtf dirin[i] <- 0.1 * d fs <- fmin } } } #### provide output if (ntry > 6) { warning("could not find pos. def. covariance - procede with DFP")} ntry <- 0 matgd <- 1 #### diagonal matrix #### get sigma and set up loop if (isw2 <= 1) { ntry <- 1 matgd <- 0 v <- matrix(0, npar, npar) diag(v) <- 2/g2 } if (!all(diag(v) > 0)) { ntry <- 1 matgd <- 0 v <- matrix(0, npar, npar) # check whether always g2 > 0 ? diag(v) <- 2/g2 } xxs <- xt sigma <- as.vector(gs %*% (v %*% gs)) * 0.5 if (sigma < 0) { cat("covariance matrix is not positive-definite", "\n") if (ntry == 0) { #try one more time (setting ntry=1) ntry <- 1 matgd <- 0 v <- matrix(0, npar, npar) diag(v) <- 2/g2 xxs <- xt sigma <- as.vector(gs %*% (v %*% gs)) * 0.5 #### provide output if (sigma < 0) { isw2 <- 0 warning("could not find pos. def. covariance") x$est <- btrf(xt, x$low, x$upp) return(list(fmin = fmin, x = x)) } } } else { isw2 <- 1 iter <- 0 } x$est <- btrf(xt, x$low, x$upp) cat("dfp search intermediate output:", "\n") cat("iter: ", iter, " fmin: ", fmin, " nfcn: ", nfcn, "\n") #### start main loop ####################################### if(dfp.loop == 0) {cat("start main loop:", "\n")} else { cat("restart main loop:", "\n")} f.main <- 0 #exit main if: f.main > 0 while (f.main == 0) { #vector dirin <- -0.5 * as.vector(v %*% gs) #scalar gdel <- as.vector(dirin %*% gs) #### linear search along -vg . . . xt <- xxs + dirin xt[xt > xt.inf] <- xt.inf xt[xt < -xt.inf] <- -xt.inf f0 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 #### cat(format(nfcn)," f0: ",f0," ",format(xt),"\n","\n") #### change to output on orig. scale #### quadr interp using slope gdel denom <- 2 * (f0 - fmin - gdel) if (denom <= 0) { slam <- slamax } else { slam <- -gdel/denom if (slam > slamax) { #### cat("slam: ",format(slam),"\n") slam <- slamax } else { if (slam < slamin) slam <- slamin } } # if (abs(slam-1.) >= 0.1) if (abs(slam - 1) >= 0.1) { # else go to 70 xt <- xxs + slam * dirin xt[xt > xt.inf] <- xt.inf xt[xt < -xt.inf] <- -xt.inf f2 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 #### cat(format(nfcn)," f2: ",f2," ",format(xt),"\n") #### quadr interp using 3 points aa <- fs/slam bb <- f0/(1 - slam) cc <- f2/(slam * (slam - 1)) denom <- 2 * (aa + bb + cc) if (denom <= 0) { tlam <- tlamax } else { tlam <- (aa * (slam + 1) + bb * slam + cc)/denom #### cat("tlam: ",format(tlam),"\n"); cat(f0,f2,fs,'\n') if (tlam > tlamax) { tlam <- tlamax } else { if (tlam < tlamin) tlam <- tlamin } } xt <- xxs + tlam * dirin xt[xt > xt.inf] <- xt.inf xt[xt < -xt.inf] <- -xt.inf f3 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 if (f0 >= fmin & f2 >= fmin & f3 >= fmin) { f.main <- 200 cat("break 200", "\n") #### cat(format(nfcn)," f3: ",f3," ",format(xt),"\n") break } if (f0 < f2 & f0 < f3) { slam <- 1 } else { if (f2 < f3) { f0 <- f2 } else { ## 55? f0 <- f3 slam <- tlam } } dirin <- dirin * slam xt <- xxs + dirin xt[xt > xt.inf] <- xt.inf xt[xt < -xt.inf] <- -xt.inf } fmin <- f0 isw2 <- 2 if (sigma + fs - fmin < rostop) { f.main <- 170 #### stop/convergence criteria break } apsi.c <- sigma + rho2 + fs - fmin if (apsi.c <= apsi) { if (trace < vtest) { f.main <- 170 break } } #non-convergence if (nfcn - npfn >= nfcnmx) { f.main <- 190 break } iter <- iter + 1 #### get gradient and sigma #### compute first and second (diagonal) derivatives fmin <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 #### cat(format(nfcn)," ",fmin," ",format(xt),"\n") #### cat("___________________________________________","\n") for (i in 1:npar) { vii <- v[i, i] if (vii > 0) { d <- 0.002 * sqrt(abs(vii) * up) } else { d <- 0.02 } xtf <- xt[i] xt[i] <- xtf + d fs1 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 xt[i] <- xtf - d fs2 <- ff(btrf(xt, x$low, x$upp)) nfcn <- nfcn + 1 xt[i] <- xtf g[i] <- (fs1 - fs2)/(2 * d) g2[i] <- (fs1 + fs2 - 2 * fmin)/d^2 } rho2 <- sigma vg <- 0.5 * as.vector(v %*% (g - gs)) gvg <- as.vector((g - gs) %*% vg) delgam <- as.vector(dirin %*% (g - gs)) sigma <- 0.5 * as.vector(g %*% (v %*% g)) if (sigma >= 0) { if (gvg <= 0 | delgam <= 0) { { if (sigma < 0.1 * rostop) { f.main <- 170 break } else { f.main <- 1 break } } } } else { f.main <- 1 break } #### update covariance matrix trace <- 0 vii <- diag(v) for (i in 1:npar) { for (j in 1:npar) { d <- dirin[i] * dirin[j]/delgam - vg[i] * vg[j]/gvg v[i, j] <- v[i, j] + 2 * d } } if (delgam > gvg) { flnu <- dirin/delgam - vg/gvg for (i in 1:npar) { for (j in 1:npar) { v[i, j] <- v[i, j] + 2 * gvg * flnu[i] * flnu[j] } } } xxs <- xt gs <- g trace <- sum(((diag(v) - vii)/(diag(v) + vii))^2) trace <- sqrt(trace/npar) fs <- f0 } # end main loop ########################################### if (f.main == 1) { x$est <- btrf(xt, x$low, x$upp) dfp.loop <- dfp.loop + 1 cat("dfp loop: ", dfp.loop, "\n") } if (f.main == 190) { #exceeds nfcn isw1 <- 1 f.main <- 230 status <- 1 break } if (f.main == 200) { cat("dfp search fails to converge, restart ...", "\n") xt <- xxs x$est <- btrf(xt, x$low, x$upp) isw2 <- 1 cat("sigma: ", sigma, "\n") if (sigma < rostop) { f.main <- 170 } else { if (matgd >= 0) { ################################### dfp.loop <- dfp.loop + 1 cat("dfp loop: ", dfp.loop, "\n") } else { status <- 1 break } } } #### CONVERGENCE if (f.main == 170) { status <- 0 cat("DFP search completed with status ",trunc(status),"\n","\n") isw2 <- 3 if (matgd == 0) { npargd <- npar * (npar + 5)/2 if (nfcn - npfn < npargd) { cat("covariance matrix inaccurate - calculate Hessian","\n") if (isw2 >= 2) { isw2 <- 3 cat("perhaps dfp started near minimum - try newton-raphson", "\n") } } } break } } fmin <- ff(btrf(xt, x$low, x$upp)); nfcn <- nfcn + 1 x$est <- btrf(xt, x$low, x$upp) #### compute error (logit scale) del <- dqstep(x,ff,sens=.01) h <- logit.hessian(x,ff,del,dapprox=FALSE,nfcn); nfcn <- h$nfcn v <- solve(h$ddf) xtl <- xt-1.96*sqrt(diag(v)) xtu <- xt+1.96*sqrt(diag(v)) #### dfp search final output: xl <- btrf(xtl, x$low, x$upp) xu <- btrf(xtu, x$low, x$upp) cat("Optimization Result:", "\n") cat("iter: ", iter, " fmin: ", fmin, " nfcn: ", nfcn, "\n") cat("\n") m.out <- cbind(x$label, signif(x$est,8), signif(xl,8), signif(xu,8)) dimnames(m.out) <- list(1:npar, c("label", "estimate", "low", "high")) print(m.out, quote = FALSE) cat("\n") return(list(fmin = fmin, label = x$label, est = x$est, low=xl, high=xu, status=status, nfcn=nfcn)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/dfp.R
##' step size generator ##' ##' \code{dqstep} determines the smallest steps ds from s so that ##' abs(f(s+ds)-f(s)) equals a pre-specified sensitivity ##' ##' uses simple quadratic interpolation ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param f the function that is to be minimized over the parameter vector ##' defined by the list \code{x} ##' @param sens target sensitivity (i.e. the value of f(s+ds)-f(s)) ##' @return returns a vector with the desired step sizes ##' @note This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{newton}}, ##' \code{\link{logit.hessian}} ##' @keywords optimize iteration ##' @examples ##' ##' ## Rosenbrock Banana function ##' fr <- function(x) { ##' x1 <- x[1] ##' x2 <- x[2] ##' 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 ##' } ##' ## define ##' x <- list(label=c("a","b"),est=c(1,1),low=c(0,0),upp=c(100,100)) ##' dqstep(x,fr,sens=1) ##' ##' @export ##' "dqstep" <- function(x,f,sens) { # fix nfcn counter later npar <- length(x$est) step <- .001; dsteps <- rep(step,npar) # OBTAIN REFERENCE POINT VALUE xt <- ftrf(x$est, x$low, x$upp) f0 <- f(x$est) for(i in 1:npar) { stepi <- step flag <- 0 xt.new <- xt while(flag==0) { flag <- 1 xt.new[i] <- xt[i]-stepi; x1 <- -stepi f1 <- f(btrf(xt.new, x$low, x$upp)) xt.new[i] <- xt[i]+stepi; x2 <- stepi f2 <- f(btrf(xt.new, x$low, x$upp)) # handle exceptions if (is.na(f2)) f2 <- Inf if(f2==Inf | f2==-Inf) { warning('Infs - reducing step size') stepi <- stepi/10; flag <- 0} if(f2==f0 & f1==f0) { cat('increasing step size','\n') stepi <- stepi*10; flag <- 0} if(abs(f2-f0) > (.5 * sens) | abs(f1-f0) > (.5 * sens)) { cat('reducing step size','\n') stepi <- stepi/10; flag <- 0 # cat(stepi,f1,f2,'\n') } } b <- ((f1-f0)*x2-(f2-f0)*x1)/(x1*x1*x2-x2*x2*x1) a <- (f1-f0)/x1-b*x1 # ***roots r <- a*a+4*b*sens if(r < 0 | is.na(r) | b ==0) { warning('oops: unable to find stepsize, use default') cat('problem with ',x$label[i],'\n') break } xs1 <- 0.5*(-a-sqrt(a*a+4*b*sens))/b xs2 <- 0.5*(-a+sqrt(a*a+4*b*sens))/b if(abs(xs1) <= abs(xs2)) {xs <- xs1} else { xs <- xs2} # *** see where we end up xt.new[i] <- xt[i]+xs f2 <- f(btrf(xt.new, x$low, x$upp)) if (is.na(f2)) f2 <- Inf if(f2==Inf | f2==-Inf) { warning('oops: unable to find stepsize, use default') break } dsteps[i] <- xs # cat('DSTEP:',x$label[i],signif(xs,6),(f2-f0),'\n') } return(dsteps) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/dqstep.R
##' Generalized logit transform ##' ##' maps a bounded parameter x onto the real line according to ##' y=log((x-xl)/(xu-x))), with xl < x < xu. If this constraint is violated, an ##' error occurs. x may be vector ##' ##' ##' @param x a numeric vector ##' @param xl a numeric vector of same length as x with x > xl ##' @param xu a numeric vector of same length as x with x < xu ##' @return returns numerical vector of transforms ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{btrf}} ##' @keywords optimize misc ##' @export ##' "ftrf" <- function(x,xl,xu) { #### forward transformation #### this assumes logit transformations of the parameters #### bounded from below by xl and from above by xu if(any((x-xl) <= 0)) stop('ftrf requires x > xl') if(any((xu-x) <= 0)) stop('ftrf requires x < xu') return(log((x-xl)/(xu-x))) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/ftrf.R
##' Random search for a global function minimum ##' ##' This function generates MCMC samples from a (posterior) density function f ##' (not necessarily normalized) in search of a global minimum of f. It uses a ##' simple Metropolis algorithm to generate the samples. Global monitors the ##' mcmc samples and returns the minimum value of f, as well as a sample ##' covariance (covm) that can be used as input for the Bhat function mymcmc. ##' ##' standard output reports a summary of the acceptance fraction, the current ##' values of nlogf and the parameters for every (100*skip) th cycle. Plotted ##' chains show values only for every (skip) th cycle. ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param nlogf negative log of the density function (not necessarily ##' normalized) ##' @param beta 'inverse temperature' parameter ##' @param mc length of MCMC search run ##' @param scl not used ##' @param skip number of cycles skipped for graphical output ##' @param nfcn number of function calls ##' @param plot logical variable. If TRUE the chain and the negative log ##' density (nlogf) is plotted ##' @return list with the following components: \item{fmin }{ minimum value of ##' nlogf for the samples obtained } \item{xmin }{ parameter values at fmin } ##' \item{covm }{ covariance matrix of differences between consecutive samples ##' in chain } ##' @note This function is part of the Bhat package ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{newton}}, ##' \code{\link{logit.hessian}} \code{\link{mymcmc}} ##' @references too numerous to be listed here ##' @keywords iteration methods optimize ##' @examples ##' ##' # generate some Poisson counts on the fly ##' dose <- c(rep(0,50),rep(1,50),rep(5,50),rep(10,50)) ##' data <- cbind(dose,rpois(200,20*(1+dose*.5*(1-dose*0.05)))) ##' ##' # neg. log-likelihood of Poisson model with 'linear-quadratic' mean: ##' nlogf <- function (x) { ##' ds <- data[, 1] ##' y <- data[, 2] ##' g <- x[1] * (1 + ds * x[2] * (1 - x[3] * ds)) ##' return(sum(g - y * log(g))) ##' } ##' ##' # initialize global search ##' x <- list(label=c("a","b","c"), est=c(10, 0.25, 0.05), low=c(0,0,0), upp=c(100,10,.1)) ##' # samples from posterior density (~exp(-nlogf))) with non-informative ##' # (random uniform) priors for "a", "b" and "c". ##' out <- global(x, nlogf, beta = 1., mc=1000, scl=2, skip=1, nfcn = 0, plot=TRUE) ##' # start MCMC from some other point: e.g. try x$est <- c(16,.2,.02) ##' ##' @export ##' "global" <- function (x, nlogf, beta = 1., mc=1000, scl=2, skip=1, nfcn = 0, plot=FALSE) { # simple MCMC/MH sampler for R # This module is part of the Bhat likelihood exploration tool. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # E. Georg Luebeck ([email protected]) if (!is.list(x)) { cat("x is not a list! see help file", "\n") return() } names(x)[1] <- "label" names(x)[2] <- "est" names(x)[3] <- "low" names(x)[4] <- "upp" npar <- length(x$est) x0 <- x #### objects: if (npar <= 0) stop('no. of parameters must be >= 1') eps <- .05 # *** initialize graphical output if(plot==TRUE) { graphics::par(mfrow=c(npar+1,1),bg="grey")} # *** initialize f.old cat(date(), "\n") fp <- nlogf(x$est); nfcn <- nfcn + 1 # initialize monitors f.mon <- numeric(mc); x.mon <- matrix(0,ncol=npar,nrow=mc) f.mon[1] <- fp; x.mon[1,] <- x$est # *** scale dsc: used in defining proposal dsc <- rep(.05,npar) fmin <- fp # --- start M chain ------------------------------------------------------------ sum1 <- sum2 <- sum3 <- n.accept2 <- sm <- c.sum <- 0 n.tried <- n.accepted <- numeric(npar) acc <- 1+numeric(npar) #?? for(n in 1:mc) { # ** parameters in turn for(i in 1:npar) { acc.p1 <- acc.p2 <- 0 # *** create suitable random uniform proposal kernel (width) di <- dsc[i] * min(abs(x$est[i]-x$low[i]),abs(x$est[i]-x$upp[i])) dih <- di/2 x0$est[i] = x$est[i] - dih + di*stats::runif(1) if(x0$est[i] > x$low[i] & x0$est[i] < x$upp[i]) { f0 <- nlogf(x0$est); nfcn <- nfcn + 1 # IF(FP.EQ.nan .or. FP.EQ.-nan) GOTO 4 # IF(FP.EQ.inf .or. FP.EQ.-inf) GOTO 4 acc[i] = exp(beta*(fp-f0)) acc.p1 <- max(eps,min(1,acc[i])) # max(eps,...) avoids freezing at initialization n.tried[i] <- n.tried[i]+1 } if(stats::runif(1) <= acc.p1) { x$est[i] <- x0$est[i]; fp <- f0; n.accept2 <- n.accept2+1 n.accepted[i] <- n.accepted[i]+1 } if(fmin>f0) {fmin <- f0; xmin <- x0$est} } f.mon[n] <- fp; x.mon[n,] <- x$est # *** adjust di to obtain acceptance near 50% if(n%%100==0) { # *** update proposal width array DI every 100 updates for acceptance near 50 % cat("Parameter","no. tried","no. accepted","ratio",'\n') freq <- n.accepted/n.tried for (i in 1:npar) { cat(i,' ',n.tried[i],' ',n.accepted[i],' ',freq[i],'\n') dsc[i] <- max(1.e-10,dsc[i]*(freq[i]+.5)^2) } n.accepted <- n.tried <- numeric(npar); n.accept2 <- 0 n.skip <- seq(skip,n,skip) if(plot==TRUE) { graphics::par(mar=c(0, 5, 0.1, 4) + 0.1) plot(f.mon[n.skip], type='l', xlab = " ", ylab = "-log-density",col=2) for (i in 1:(npar-1)) { graphics::par(mar=c(0, 5, 0, 4) + 0.1) plot(x.mon[n.skip,i], type='l', xlab = " ", ylab = x$label[i], col=2) } graphics::par(mar=c(0.1, 5, 0, 4) + 0.1) plot(x.mon[n.skip,npar], type='l', xlab = " ", xaxt='n', ylab = x$label[npar], col=2) } } } covm <- stats::cov(x.mon[2:mc,]-x.mon[1:(mc-1),]) cat('\n','\n') # print(covm,quote=FALSE) # cat('\n','\n') # *** pilot run 2 *** includes m2 cycles for incremental adjustment of covm cat('second pilot run:','\n','\n') eig <- eigen(covm) cat('eigen values:',eig$values,'\n','\n') return(list(fmin=fmin,xmin=xmin,covm=covm)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/global.R
##' Hessian (curvature matrix) ##' ##' Numerical evaluation of the Hessian of a real function f: \eqn{R^n }{R^n -> ##' R}\eqn{ \rightarrow R}{R^n -> R} on a generalized logit scale, i.e. using ##' transformed parameters according to x'=log((x-xl)/(xu-x))), with xl < x < ##' xu. ##' ##' This version uses a symmetric grid for the numerical evaluation computation ##' of first and second derivatives. ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param f the function for which the Hessian is to be computed at point x ##' @param del step size on logit scale (numeric) ##' @param dapprox logical variable. If TRUE the off-diagonal elements are set ##' to zero. If FALSE (default) the full Hessian is computed ##' @param nfcn number of function calls ##' @return returns list with \item{df }{ first derivatives (logit scale) } ##' \item{ddf }{ Hessian (logit scale) } \item{nfcn }{ number of function calls ##' } \item{eigen }{ eigen values (logit scale) } ##' @note This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{newton}}, \code{\link{ftrf}}, ##' \code{\link{btrf}}, \code{\link{dqstep}} ##' @keywords array iteration methods optimize ##' @examples ##' ##' ## Rosenbrock Banana function ##' fr <- function(x) { ##' x1 <- x[1] ##' x2 <- x[2] ##' 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 ##' } ##' ## define ##' x <- list(label=c("a","b"),est=c(1,1),low=c(-100,-100),upp=c(100,100)) ##' logit.hessian(x,f=fr,del=dqstep(x,f=fr,sens=0.01)) ##' ## shows the differences in curvature at the minimum of the Banana ##' ## function along principal axis (in a logit-transformed coordinate system) ##' ##' @export ##' "logit.hessian" <- function(x=x,f=f,del=rep(.002,length(x$est)),dapprox=FALSE,nfcn=0) { # computes the hessian of f on logit scale small <- 1.e-8 npar <- length(x$est) nlm <- 2*npar*npar np2 <- 2*npar xt <- ftrf(x$est, x$low, x$upp) f0 <- f(x$est); nfcn <- nfcn + 1 # cat('compute internal Hessian with function value at:',f0,'\n') # *** INITIALIZE NEWTON BY CALCULATION OF A # NLM POINT SIMPLEX GEOMETRY IN N DIM PARAMETER SPACE xn <- matrix(rep(xt,nlm),ncol=nlm) # *** ON AXES # *** compute delta distance in each direction for(i in 1:npar) { j.even <- 2*i j.odd <- j.even-1 xn[i,j.even] <- xn[i,j.even]-del[i] xn[i,j.odd] <- xn[i,j.odd] +del[i] } if(dapprox==FALSE) { # *** OFF AXIS if(npar > 1) { mc <- np2+1 for(i in 2:npar) { for(j in 1:(i-1)) { xn[i,mc] <- xn[i,mc]+del[i] xn[j,mc] <- xn[j,mc]+del[j] mc <- mc+1 xn[i,mc] <- xn[i,mc]-del[i] xn[j,mc] <- xn[j,mc]+del[j] mc <- mc+1 xn[i,mc] <- xn[i,mc]-del[i] xn[j,mc] <- xn[j,mc]-del[j] mc <- mc+1 xn[i,mc] <- xn[i,mc]+del[i] xn[j,mc] <- xn[j,mc]-del[j] mc <- mc+1 }} } f.vec <- numeric(nlm) for(i in 1:nlm) { f.vec[i] <- f(btrf(xn[,i], x$low, x$upp)) nfcn <- nfcn + 1 } } else { # ignore off diagonal elements f.vec <- numeric(np2) for(i in 1:np2) { f.vec[i] <- f(btrf(xn[,i], x$low, x$upp)) nfcn <- nfcn + 1 } } # *** FIRST AND DIAGONAL SECOND DERIVATIVES i <- 1:npar; i.even <- 2*i; i.odd <- i.even-1 df <- (f.vec[i.even]-f.vec[i.odd])/2/del if(npar > 1) {ddf <- diag((f.vec[i.even]+f.vec[i.odd]-2*f0)/(del**2))/2 } else {ddf <- (f.vec[i.even]+f.vec[i.odd]-2*f0)/(del**2)/2 } # print(format(ddf),quote=FALSE) if(dapprox==FALSE) { # *** SECOND DERIVATIVES if(npar > 1) { mc <- np2+1 for(i in 2:npar) { for(j in 1:(i-1)) { mc1 <- mc; mc2 <- mc1+1; mc3 <- mc2+1; mc4 <- mc3+1 ddf[i,j] <- (f.vec[mc1]+f.vec[mc3]-f.vec[mc2]-f.vec[mc4])/del[i]/del[j]/4 mc <- mc4+1 }} ddf <- ddf+t(ddf) } } else {ddf <- 2 * ddf} # *** EIGEN VALUES and MATRIX INVERSION eig <- eigen(ddf) # cat('eigen values: ',format(sort(eig$values)),'\n') if(any(eig$values < 0)) { warning('hessian not pos. definite') } if(any(abs(eig$values) < small)) { warning('hessian may be singular') } # *** ADJUSTMENT OF del (feature not implemented) # ddf.diag <- diag(ddf) # del[ddf.diag > 0] <- .002/sqrt(ddf.diag[ddf.diag > 0]) return(list(df=df,ddf=ddf,nfcn=nfcn,eigen=eig$values)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/logit.hessian.R
##' Adaptive Multivariate MCMC sampler ##' ##' This function generates MCMC-based samples from a (posterior) density f ##' (not necessarily normalized). It uses a Metropolis algorithm in conjunction ##' with a multivariate normal proposal distribution which is updated ##' adaptively by monitoring the correlations of succesive increments of at ##' least 2 pilot chains. The method is described in De Gunst, Dewanji and ##' Luebeck (submitted). The adaptive method is similar to the one proposed in ##' Gelfand and Sahu (JCGS 3:261--276, 1994). ##' ##' standard output reports a summary of the acceptance fraction, the current ##' values of nlogf and the parameters for every (100*skip) th cycle. Plotted ##' chains show values only for every (skip) th cycle. ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param nlogf negative log of the density function (not necessarily ##' normalized) for which samples are to be obtained ##' @param m1 length of first pilot run (not used when covm supplied) ##' @param m2 length of second pilot run (not used when covm supplied ) ##' @param m3 length of final run ##' @param scl1 scale for covariance of mv normal proposal (second pilot run) ##' @param scl2 scale for covariance of mv normal proposal (final run) ##' @param skip number of cycles skipped for graphical output ##' @param covm covariance matrix for multivariate normal proposal ##' distribution. If supplied, all pilot runs will be skipped and a run of ##' length m3 will be produced. Useful to continue a simulation from a given ##' point with specified covm ##' @param nfcn number of function calls ##' @param plot logical variable. If TRUE the chain and the negative log ##' density (nlogf) is plotted. The first m1+m2 cycles are shown in green, ##' other cycles in red ##' @param plot.range [Not documented. Leave as default] ##' @return list with the following components: \item{f }{ values of nlogf for ##' the samples obtained } \item{mcmc }{ the chain (samples obtained) } ##' \item{covm }{ current covariance matrix for mv normal proposal ##' distribution} ##' @note This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{newton}}, ##' \code{\link{logit.hessian}} ##' @references too numerous to be listed here ##' @keywords iteration methods optimize ##' @examples ##' ##' # generate some Poisson counts on the fly ##' dose <- c(rep(0,50),rep(1,50),rep(5,50),rep(10,50)) ##' data <- cbind(dose,rpois(200,20*(1+dose*.5*(1-dose*0.05)))) ##' ##' # neg. log-likelihood of Poisson model with 'linear-quadratic' mean: ##' nlogf <- function (x) { ##' ds <- data[, 1] ##' y <- data[, 2] ##' g <- x[1] * (1 + ds * x[2] * (1 - x[3] * ds)) ##' return(sum(g - y * log(g))) ##' } ##' ##' # start MCMC near mle ##' x <- list(label=c("a","b","c"), est=c(20, 0.5, 0.05), low=c(0,0,0), upp=c(100,10,.1)) ##' # samples from posterior density (~exp(-nlogf))) with non-informative ##' # (random uniform) priors for "a", "b" and "c". ##' out <- mymcmc(x, nlogf, m1=2000, m2=2000, m3=10000, scl1=0.5, scl2=2, skip=10, plot=TRUE) ##' # start MCMC from some other point: e.g. try x$est <- c(16,.2,.02) ##' ##' @export ##' "mymcmc" <- function (x, nlogf, m1, m2=m1, m3, scl1=0.5, scl2=2, skip=1, covm=0, nfcn = 0, plot=FALSE, plot.range=0) { # MCMC/MH sampler for R # This module is part of the Bhat likelihood function exploration tool. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # E. Georg Luebeck ([email protected]) # last updated: 07-27-2015 if (!is.list(x)) { cat("x is not a list! see help file", "\n") return() } names(x)[1] <- "label" names(x)[2] <- "est" names(x)[3] <- "low" names(x)[4] <- "upp" npar <- length(x$est) #### objects: if (npar <= 0) stop('no. of parameters must be >= 1') small <- 1.e-8 sens <- 1. xinf <- 20. # *** initialize graphical output if(plot==TRUE) { if(plot.range==0) { graphics::par(mfrow=c(npar+1,1),bg="grey") } } # *** initialize f.old cat(date(), "\n") xt <- ftrf(x$est, x$low, x$upp) f.old <- nlogf(x$est); nfcn <- nfcn + 1 f.first <- f.old acc.count <- 0 # *** if COVM is not provided if(!is.matrix(covm)) { # *** initialize monitors f.mon <- numeric(m1+m3); x.mon <- matrix(0,ncol=npar,nrow=m1+m3) cat('trying a proposal COVM using the Hessian','\n') del <- dqstep(x,nlogf,sens) h <- logit.hessian(x,nlogf,del,dapprox=FALSE,nfcn) # returns list: $df,$ddf,$nfcn nfcn <- h$nfcn ddf <- h$ddf # *** EIGEN VALUES and MATRIX INVERSION eig <- eigen(ddf) if(any(eig$values < small)) { cat('Hessian may not be positive definite','\n') cat('trying a proposal COVM using dqstep with sensitivity sens','\n') del <- dqstep(x,nlogf,sens) # eig <- .1/del/del; ddf <- diag(eig); eig <- eigen(ddf) inv.ddf <- diag(del*del) } else { # we've just eigendecomposed the matrix, let's use it for inverting inv.ddf <- 0.5 * eig$vectors %*% diag(1/eig$values, nrow=length(eig$values)) %*% t(eig$vectors) } # ggf <- 0.5 * solve(ddf,diag(1,npar),tol=1.e-10) # cat('unity test:','\n'); print(format(ggf %*% ddf),quote=FALSE) cat('first pilot chain:','\n','\n') nc <- 0 for (n in 1:m1) { accept <- 1 # *** compute proposal x' (=yt). If unacceptable, reject # dx <- t(eig$vectors) %*% rnorm(npar,0,1/(0.5*sqrt(eig$values))) dx <- MASS::mvrnorm(n = 1, mu = rep(0,npar), Sigma = inv.ddf, tol = 1e-6, empirical = FALSE, EISPACK = FALSE) yt <- xt + scl1 * dx Xt <- btrf(xt, x$low, x$upp) Yt <- btrf(yt, x$low, x$upp) # *** get log-likelihood f.new <- nlogf(Yt); nfcn <- nfcn + 1 # *** Jacobian of logit-transform djac <- sum(log((Yt-x$low)*(x$upp-Yt)) - log((Xt-x$low)*(x$upp-Xt))) # *** boundary checks from within func ... # *** R ratio if(any(abs(yt) > xinf)) { cat('cycle',n,': mymcmc close to boundary, move rejected!','\n'); accept <- 0} else { accept <- min(accept,exp(-f.new+f.old+djac)) } if(accept == 1) {xt <- yt; f.old <- f.new; acc.count <- acc.count+1 xt.maxl <- yt; f.maxl <- f.new # approximate/search max-likelihood } else { if(stats::runif(1) <= accept) { xt <- yt; f.old <- f.new; acc.count <- acc.count+1} } # *** record to monitor f.mon[n] <- f.old; x.mon[n,] <- btrf(xt, x$low, x$upp) # *** regular output and graphical monitor if(n%%(100*skip) == 0) { m.out <- c("n:",signif(n,3),"acceptance:",signif(acc.count/100/skip,3),"-log lkh:",signif(f.new,8),signif(x.mon[n,],6)) cat(m.out,'\n') acc.count <- 0 n.skip <- seq(skip,n,skip) if(plot==TRUE) { graphics::par(mar=c(0, 5, 0.1, 4) + 0.1) plot(f.mon[n.skip], type='l', xlab = " ", ylab = "-log-density",col=2) for (i in 1:(npar-1)) { graphics::par(mar=c(0, 5, 0, 4) + 0.1) plot(x.mon[n.skip,i], type='l', xlab = " ", ylab = x$label[i], col=2) } graphics::par(mar=c(0.1, 5, 0, 4) + 0.1) plot(x.mon[n.skip,npar], type='l', xlab = " ", xaxt='n', ylab = x$label[npar], col=2) nc <- nc + 1 } } } # *** obtain empirical covariance of increments covm <- stats::cov(x.mon[2:m1,]-x.mon[1:(m1-1),]) # now redundant cat('\n','\n') # print(covm,quote=FALSE) # cat('\n','\n') # *** pilot run 2 *** includes m2 cycles for incremental adjustment of covm cat('second pilot run:','\n','\n') eig <- eigen(covm) cat('eigen values:',eig$values,'\n','\n') } else { # covm for mvn proposal distribution given as input if(nrow(covm)!=length(x$est)) {stop('dimension of covm not specified correctly')} nc <- 0 # re-initialize monitors m1 <- 1 f.mon <- numeric(m1+m3); x.mon <- matrix(0,ncol=npar,nrow=m1+m3) # eig <- eigen(covm); x.mon[1,] <- x$est; f.mon <- f.first } # *** continue 'production' run on original scale acc.count <- 0 for (n in (m1+1):(m1+m3)) { x.mon[n,] <- x.mon[n-1,]; f.mon[n] <- f.mon[n-1] accept <- 1 # *** compute proposal x' (=yt). If unacceptable, reject # dx <- t(eig$vectors) %*% rnorm(npar,0,sqrt(eig$values)) dx <- MASS::mvrnorm(n = 1, mu = rep(0,npar), Sigma = covm, tol = 1e-6, empirical = FALSE, EISPACK = FALSE) y <- x.mon[n-1,] + scl2 * dx if(any((y-x$low) < small)) {cat('move rejected (lower bound)','\n'); accept <- 0} if(any((x$upp-y) < small)) {cat('move rejected (upper bound)','\n'); accept <- 0} # *** boundary checks from within func ... if(accept > 0) { # *** get log-likelihood f.new <- nlogf(y); nfcn <- nfcn + 1 # *** acceptance ratio and updates accept <- min(accept,exp(-f.new+f.mon[n])) if(accept == 1) {x.mon[n,] <- y; f.mon[n] <- f.new; acc.count <- acc.count+1 x.maxl <- y; f.maxl <- f.new # approximate/search max-likelihood } else { if(stats::runif(1) <= accept) { x.mon[n,] <- y; f.mon[n] <- f.new; acc.count <- acc.count+1 } } } # *** regular output and graphical monitor if(n%%(100*skip) == 0) { m.out <- c("n:",signif(n,3),"acceptance:",signif(acc.count/100/skip,3),"-log lkh:",signif(f.new,8),signif(x.mon[n,],6)) cat(m.out,'\n') acc.count <- 0 n.skip.1 <- seq(skip,n,skip) n.skip.2 <- seq(skip,min(n,m1+m2),skip) if(plot==TRUE) { if(m1 > 1) {brncol <- 3} else {brncol <- 2} graphics::par(mar=c(0, 5, 0.1, 4) + 0.1) plot(f.mon[n.skip.1], type='l', xlab = " ", ylab = "-log-density",col=2) graphics::lines(f.mon[n.skip.2],col=brncol) #pilot cycles for (i in 1:(npar-1)) { graphics::par(mar=c(0, 5, 0, 4) + 0.1) plot(x.mon[n.skip.1,i], type='l', xlab = " ", ylab = x$label[i], col=2) graphics::lines(x.mon[n.skip.2,i],col=brncol) #pilot cycles } graphics::par(mar=c(0.1, 5, 0, 4) + 0.1) plot(x.mon[n.skip.1,npar], type='l', xlab = " ", xaxt='n', ylab = x$label[npar], col=2) graphics::lines(x.mon[n.skip.2,npar],col=brncol) #pilot cycles nc <- nc + 1 } if(m1 > 1) { #note: when covm is passed to mymcmc, m1 is set to 1 # update covariance using sampled increments (m1+1):n if(n <= m1+m2) { covm <- stats::cov(x.mon[2:n,]-x.mon[1:(n-1),]) eig <- eigen(covm) if(any(eig$values < small)) {warning('covm nearly singular')} } } } } return(list(f=f.mon,mcmc=x.mon,covm=covm)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/mcmc.R
##' Function minimization with box-constraints ##' ##' Newton-Raphson algorithm for minimizing a function \code{f} over the ##' parameters specified in the input list \code{x}. Note, a Newton-Raphson ##' search is very efficient in the 'quadratic region' near the optimum. In ##' higher dimensions it tends to be rather unstable and may behave ##' chaotically. Therefore, a (local or global) minimum should be available to ##' begin with. Use the \code{optim} or \code{dfp} functions to search for ##' optima. ##' ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param f the function that is to be minimized over the parameter vector ##' defined by the list \code{x} ##' @param eps converges when all (logit-transformed) derivatives are smaller ##' \code{eps} ##' @param itmax maximum number of Newton-Raphson iterations ##' @param relax numeric. If 0, take full Newton step, otherwise 'relax' step ##' incrementally until a better value is found ##' @param nfcn number of function calls ##' @return list with the following components: \item{fmin }{ the function ##' value f at the minimum } \item{label }{ the labels } \item{est }{ a vector ##' of the parameter estimates at the minimum. newton does not overwrite ##' \code{x} } \item{low }{ lower 95\% (Wald) confidence bound } \item{upp }{ ##' upper 95\% (Wald) confidence bound } The confidence bounds assume that the ##' function \code{f} is a negative log-likelihood ##' @note \code{newton} computes the (logit-transformed) Hessian of \code{f} ##' (using logit.hessian). This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{ftrf}}, \code{\link{btrf}}, ##' \code{\link{logit.hessian}}, \code{\link{plkhci}} ##' @keywords optimize methods ##' @examples ##' ##' # generate some Poisson counts on the fly ##' dose <- c(rep(0,100),rep(1,100),rep(5,100),rep(10,100)) ##' data <- cbind(dose,rpois(400,20*(1+dose*.5*(1-dose*0.05)))) ##' ##' # neg. log-likelihood of Poisson model with 'linear-quadratic' mean: ##' lkh <- function (x) { ##' ds <- data[, 1] ##' y <- data[, 2] ##' g <- x[1] * (1 + ds * x[2] * (1 - x[3] * ds)) ##' return(sum(g - y * log(g))) ##' } ##' ##' # for example define ##' x <- list(label=c("a","b","c"),est=c(10.,10.,.01),low=c(0,0,0),upp=c(100,20,.1)) ##' ##' # calls: ##' r <- dfp(x,f=lkh) ##' x$est <- r$est ##' results <- newton(x,lkh) ##' ##' @export ##' "newton" <- function (x, f, eps=1e-1, itmax=10, relax=0, nfcn = 0) { # General Newton-Raphson module written in R. # This module is part of the Bhat likelihood exploration tool. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # E. Georg Luebeck, Fred Hutchinson Cancer Research Center # Fairview Avenue N, Seattle, WA 98109-1024 if (!is.list(x)) { cat("x is not a list! see help file", "\n") return() } names(x)[1] <- "label" names(x)[2] <- "est" names(x)[3] <- "low" names(x)[4] <- "upp" npar <- length(x$est ) #### objects: if (npar <= 0) { warning("no. of parameters < 1") stop() } small <- 1.e-8 nlm <- (npar*npar+3*npar)/2 np2 <- 2*npar sens <- 0.1 flag.hessian <- 1 #### first call cat(date(), "\n") xt <- ftrf(x$est, x$low, x$upp) f0 <- f(x$est); nfcn <- nfcn + 1 for(n in 1:itmax) { cat(nfcn, ' fmin: ', format(f0), " ", format(x$est), '\n') # *** INITIALIZE NEWTON BY CALCULATION OF A # NLM POINT SIMPLEX GEOMETRY IN NPAR DIM PARAMETER SPACE xn <- matrix(rep(xt,nlm),ncol=nlm) # *** ON AXES if(n==1) del <- dqstep(x,f,sens) if(flag.hessian==1) { h <- logit.hessian(x,f,del,dapprox=FALSE,nfcn) # returns list: $df,$ddf,$nfcn nfcn <- h$nfcn ddf <- h$ddf; df <- h$df } else { # *** compute delta distance in each direction for(i in 1:npar) { j.even <- 2*i j.odd <- j.even-1 xn[i,j.even] <- xn[i,j.even]-del[i] xn[i,j.odd] <- xn[i,j.odd] +del[i] } # *** OFF AXIS if(npar > 1) { mc <- np2+1 for(i in 2:npar) { for(j in 1:(i-1)) { xn[i,mc] <- xn[i,mc]+del[i] xn[j,mc] <- xn[j,mc]+del[j] mc <- mc+1 }} } f.vec <- numeric(nlm) for(i in 1:nlm) { f.vec[i] <- f(btrf(xn[,i], x$low, x$upp)) nfcn <- nfcn + 1 } # *** FIRST AND DIAGONAL SECOND DERIVATIVES i <- 1:npar i.even <- 2*i i.odd <- i.even-1 df <- (f.vec[i.even]-f.vec[i.odd])/2/del ddf <- diag((f.vec[i.even]+f.vec[i.odd]-2*f0)/del/del/2) # *** SECOND DERIVATIVES if(npar > 1) { mc <- np2+1 for(i in 2:npar) { for(j in 1:(i-1)) { ip <- 2*i-1; jp <- 2*j-1 ddf[i,j] <- (f0+f.vec[mc]-f.vec[jp]-f.vec[ip])/del[i]/del[j] mc <- mc+1 }} ddf <- ddf+t(ddf) } # *** EIGEN VALUES and MATRIX INVERSION eig <- eigen(ddf) cat('approx. eigen values: ',format(eig$values),'\n') if(any(eig$values < 0)) { warning('approx. hessian not pos. definite') } if(any(abs(eig$values) < small)) { warning('approx. hessian may be singular') } } # cat('compare:','\n') # print(format(h$ddf),quote=FALSE) # print(format(ddf),quote=FALSE) b <- diag(1,npar) # *** if inversion fails need to return or reinitialize NR (not implemented) ggf <- solve(ddf,b,tol=1.e-10) # cat('unity test:','\n'); print(format(ggf %*% ddf),quote=FALSE) # --- variable Newton stepping disc <- ggf %*% df disc0 <- disc rneps <- 1; nepsc <- 0; xt0 <- xt; f1 <- f0 xt <- xt0 + disc # cat(xt0,'\n',xt,'\n',df,'\n') tmp <- numeric(npar) tmp[abs(df) <= eps] <- 1 nz <- sum(tmp) f0 <- f(btrf(xt, x$low, x$upp)); nfcn <- nfcn + 1 if(relax > 0) { while(nepsc < 8 && f0 > f1 ) { rneps <- .75*rneps cat('reducing step size: ',format(rneps,f0),'\n') nepsc <- nepsc+1 disc <- rneps * disc0 xt <- xt0 + disc f0 <- f(btrf(xt, x$low, x$upp)); nfcn <- nfcn + 1 } } if(nepsc == 8) warning('problem finding lower function value, will continue') # --- STOP-CRITERION FOR ITERATION -------------------------------------- if(nz == npar && min(h$eigen) > small) { status <- 'converged' # --- compute Hessian symmetrically: x$est <- btrf(xt,x$low,x$upp) # h <- logit.hessian(x,f,del,dapprox=FALSE,nfcn) # returns list: $df,$ddf,$nfcn b <- diag(1,npar) ggf <- solve(ddf,b,tol=1.e-10) xtl <- rep(NA,npar); xtu <- rep(NA,npar) se <- numeric(npar) ggf.diag <- diag(ggf) se[ggf.diag >= 0] <- sqrt(ggf.diag[ggf.diag >= 0]) xtl <- xt - 1.96 * se xtu <- xt + 1.96 * se x$est <- btrf(xt, x$low, x$upp) xlow <- btrf(xtl, x$low, x$upp) xupp <- btrf(xtu, x$low, x$upp) cat('\n') cat('Bhat run: ',date(),' status: ',status,'\n') # cat('Optimization Result:', '\n') cat("iter: ", n, " fmin: ", format(f0), " nfcn: ", nfcn, "\n") cat("\n") m.out <- cbind(x$label, round(x$est,6), round(h$df,6), round(xlow,6), round(xupp,6)) dimnames(m.out) <- list(1:npar, c("label", "estimate", "log deriv", "lower 95%" , "upper 95%")) print(m.out, quote=FALSE) cat("\n") return(list(fmin = f0, label = x$label, est = x$est, low = xlow, upp = xupp)) } # --- graphical diagnostic (do be done) # *** DURING NON-CONVERGEING CYCLES status <- 'non-converged' xtl <- rep(NA,npar); xtu <- rep(NA,npar) se <- numeric(npar) ggf.diag <- diag(ggf) se[ggf.diag >= 0] <- sqrt(ggf.diag[ggf.diag >= 0]) xtl <- xt - 1.96 * se xtu <- xt + 1.96 * se x$est <- btrf(xt, x$low, x$upp) xlow <- btrf(xtl, x$low, x$upp) xupp <- btrf(xtu, x$low, x$upp) if(n == itmax) { cat('\n') cat('Bhat run: ',date(),' status: ',status,'\n') # cat('Optimization Result:', '\n') } cat('\n',"iter: ", n, " fmin: ", f0, " nfcn: ", nfcn, "\n") cat("\n") vbar <- rep("|",npar) m.out <- cbind(x$label, round(x$est,6), round(df,6), round(xlow,6), round(xupp,6)) dimnames(m.out) <- list(1:npar, c("label", "estimate", "log deriv", "lower 95%" , "upper 95%")) print(m.out, quote = FALSE) cat("\n") } return(list(fmin = f0, label = x$label, est = x$est, low = xlow, upp = xupp)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/newton.R
# Program: plkhci.R # modified newton-raphson optimizer # to obtain profile-likelihood-based confidence intervals # # Method: D.J. VENZON and S.H. MOOLGAVKAR # " A method for computing profile-likelihood-based # confidence intervals " # Journal of the Royal Statistical Society, Series C # volume 37, no.1, 1988, pp. 87-94 # not yet fully implemented, but may converge # # Notes: p: identifies which parameter (among npar) is to be used for profile # likelihood calc. # npar: total number of parameters in model # modified 04/26/2004 by Russell Millar Dept of Stats U. Akld # to handle case of npar=1 ##' Profile-likelihood based confidence intervals ##' ##' function to find \code{prob}*100\% confidence intervals using ##' profile-likelihood. Numerical solutions are obtained via a modified ##' Newton-Raphson algorithm. The method is described in Venzon and Moolgavkar, ##' Journal of the Royal Statistical Society, Series C vol 37, no.1, 1988, pp. ##' 87-94. ##' ##' ##' @param x a list with components 'label' (of mode character), 'est' (the ##' parameter vector with the initial guess), 'low' (vector with lower bounds), ##' and 'upp' (vector with upper bounds) ##' @param nlogf the negative log of the density function (not necessarily ##' normalized) for which samples are to be obtained ##' @param label parameter for which confidence bounds are computed ##' @param prob probability associated with the confidence interval ##' @param eps a numerical value. Convergence results when all ##' (logit-transformed) derivatives are smaller \code{eps} ##' @param nmax maximum number of Newton-Raphson iterations in each direction ##' @param nfcn number of function calls ##' @return 2 component vector giving lower and upper p\% confidence bounds ##' @note At this point, only a single parameter label can be passed to plkhci. ##' This function is part of the Bhat exploration tool ##' @author E. Georg Luebeck (FHCRC) ##' @seealso \code{\link{dfp}}, \code{\link{newton}}, ##' \code{\link{logit.hessian}} ##' @keywords distribution multivariate ##' @examples ##' ##' # generate some Poisson counts on the fly ##' dose <- c(rep(0,50),rep(1,50),rep(5,50),rep(10,50)) ##' data <- cbind(dose,rpois(200,20*(1+dose*.5*(1-dose*0.05)))) ##' ##' # neg. log-likelihood of Poisson model with 'linear-quadratic' mean: ##' nlogf <- function (x) { ##' ds <- data[, 1] ##' y <- data[, 2] ##' g <- x[1] * (1 + ds * x[2] * (1 - x[3] * ds)) ##' return(sum(g - y * log(g))) ##' } ##' ##' # for example define ##' x <- list(label=c("a","b","c"),est=c(10.,10.,.01),low=c(0,0,0),upp=c(100,20,.1)) ##' ##' # get MLEs using dfp: ##' r <- dfp(x,f=nlogf) ##' x$est <- r$est ##' plkhci(x,nlogf,"a") ##' plkhci(x,nlogf,"b") ##' plkhci(x,nlogf,"c") ##' # e.g. 90% confidence bounds for "c" ##' plkhci(x,nlogf,"c",prob=0.9) ##' ##' ##' @export ##' plkhci <- function (x, nlogf, label, prob = 0.95, eps = 0.001, nmax = 10, nfcn = 0) { if (!is.list(x)) { cat("x is not a list! see help file", "\n") return() } names(x)[1] <- "label" names(x)[2] <- "est" names(x)[3] <- "low" names(x)[4] <- "upp" npar <- length(x$est) if (npar <= 0) { warning("no. of parameters < 1") stop() } small <- 1e-08 nd1 <- npar - 1 sens <- 0.5 rhs <- numeric(npar) disc <- numeric(npar) qchi <- stats::qchisq(prob, 1) if (!is.character(label)) stop("label needs to be of type character") p <- match(label, x$label) if (is.na(p)) stop("label not found") xt <- ftrf(x$est, x$low, x$upp) fmle <- nlogf(x$est) nfcn <- nfcn + 1 cat(" neg. log. likelihood: ", fmle, "\n", "\n") cat(" will attempt to compute both bounds (+/- direction)", "\n") del <- dqstep(x, nlogf, sens) h <- logit.hessian(x, nlogf, del, dapprox = FALSE, nfcn) nfcn <- h$nfcn ddf <- as.matrix(h$ddf) df <- h$df dbb0 <- ddf[p, p] tmp0 <- 0 if(npar > 1) { ddl <- ddf[-p, -p] dbo <- ddf[p, -p] b <- diag(1, nd1) ggl <- solve(ddl, b, tol = 1e-16) dbl0 <- ggl %*% dbo tmp0 <- t(dbo) %*% dbl0 } cat("\n") for (idir in c(1, -1)) { xt0 <- xt xt0[p] <- xt0[p] + idir * sqrt(qchi/(dbb0 - tmp0))/2 if(npar > 1) xt0[-p] <- xt0[-p] - idir * as.vector(dbl0) * sqrt(qchi/(dbb0 - tmp0))/2 f0 <- nlogf(btrf(xt0, x$low, x$upp)) nfcn <- nfcn + 1 if (idir == 1) cat("trying lower bound ------------------------", "\n") if (idir == -1) cat("trying upper bound ------------------------", "\n") cat("starting at: ", 2 * (f0 - fmle), "\n") cat("initial guess: ", btrf(xt0, x$low, x$upp), "\n") cat("\n") cat("begin Newton-Raphson search for profile lkh conf. bounds:", "\n") cat("eps value for stop criterium:", eps, "\n") cat("nmax :", nmax, "\n") for (n in 1:nmax) { x0 <- x x0$est <- btrf(xt0, x$low, x$upp) f0 <- nlogf(x0$est) nfcn <- nfcn + 1 h <- logit.hessian(x0, nlogf, del, dapprox = FALSE, nfcn) nfcn <- h$nfcn ddf <- as.matrix(h$ddf) df <- h$df fdd <- ddf fdd[1, 1] <- -df[p] rhs[1] <- (fmle - f0 + 0.5 * qchi) if(npar > 1) { ddl <- ddf[-p, -p] dbo <- ddf[p, -p] fdd[1, 2:npar] <- -df[-p] rhs[2:npar] <- df[-p] fdd[-1, -1] <- ddl fdd[2:npar, 1] <- dbo } ggl <- solve(fdd, tol = 1e-16) if (n <= 2) rneps <- 0.2 if (n > 2 && n <= 4) rneps <- 0.8 if (n > 4) rneps <- 1 nepsc <- 0 xt00 <- xt0 disc0 <- disc <- as.vector(ggl %*% rhs) rhs1 <- 1000 disc <- rneps * disc0 nz <- 0 xt0[p] <- xt00[p] + disc[1] if(npar > 1) xt0[-p] <- xt00[-p] + disc[2:npar] tmp <- numeric(npar) tmp[abs(disc) <= eps] <- 1 nz <- sum(tmp) f0 <- nlogf(btrf(xt0, x$low, x$upp)) nfcn <- nfcn + 1 rhs1 <- abs(fmle - f0 + 0.5 * qchi) estimate <- btrf(xt0, x$low, x$upp) if (nz == npar && nepsc == 0) { cat("\n", "CONVERGENCE: ", trunc(n), " iterations", "\n") status <- "converged" cat("\n") cat("chisquare value is: ", 2 * (f0 - fmle), "\n") cat("confidence bound of ", x$label[p], " is ", estimate[p], "\n") if(npar > 1) cat("log derivatives: ", df[-p], "\n") m.out <- cbind(x$label, signif(estimate, 6), signif(df, 6), signif(diag(ddf), 6)) dimnames(m.out) <- list(1:npar, c("label", "estimate", "log deriv", "log curv")) print(m.out, quote = FALSE) cat("\n") break } } if (idir == -1) { xlow <- estimate[p] } else { xupp <- estimate[p] } } return(c(xlow, xupp)) }
/scratch/gouwar.j/cran-all/cranData/Bhat/R/plkhci.R
#' @title A biclustering algorithm for extracting bit-patterns from binary datasets #' #' @description BiBitR is a simple R wrapper which directly calls the original Java code for applying the BiBit algorithm. #' The original Java code can be found at \url{http://eps.upo.es/bigs/BiBit.html} by Domingo S. Rodriguez-Baena, Antonia J. Perez-Pulido and Jesus S. Aguilar-Ruiz. #' #' The BiBitR package also includes the following functions and/or workflows: #' \itemize{ #' \item A slightly adapted version of the original BiBit algorithm which now allows allows noise when adding rows to the bicluster (\code{\link{bibit2}}). #' \item A function which accepts a pattern and, using the BiBit algorithm, will find biclusters fully or partly fitting the given pattern (\code{\link{bibit3}}). #' \item A workflow which can discover larger patterns (and their biclusters) using BiBit and classic hierarchical clustering approaches (\code{\link{BiBitWorkflow}}). #' } #' #' #' @references Domingo S. Rodriguez-Baena, Antonia J. Perez-Pulido and Jesus S. Aguilar-Ruiz (2011), "A biclustering algorithm for extracting bit-patterns from binary datasets", \emph{Bioinformatics} #' #' @docType package #' @name BiBitR NULL
/scratch/gouwar.j/cran-all/cranData/BiBitR/R/BiBitR.R
## IMPORTS ## #' @importFrom foreign write.arff read.arff #' @import biclust #' @importFrom methods new setGeneric setMethod #' @importFrom utils read.table write.table combn txtProgressBar setTxtProgressBar capture.output #' @importFrom viridis viridis #' @importFrom cluster agnes clusGap maxSE #' @importFrom dendextend color_branches #' @importFrom stats hclust as.hclust cutree as.dendrogram as.dist fisher.test heatmap lm #' @importFrom lattice levelplot #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom graphics abline barplot image legend par plot points text #' @importFrom randomcoloR distinctColorPalette NULL #' @title The BiBit Algorithm #' #' @description A R-wrapper which directly calls the original Java code for the BiBit algorithm (\url{http://eps.upo.es/bigs/BiBit.html}) and transforms it to the output format of the \code{Biclust} R package. #' #' @details This function uses the original Java code directly (with the intended input and output). Because the Java code was not refactored, the \code{rJava} package could not be used. #' The \code{bibit} function does the following: #' \enumerate{ #' \item Convert R matrix to a \code{.arff} output file. #' \item Use the \code{.arff} file as input for the Java code which is called by \code{system()}. #' \item The outputted \code{.txt} file from the Java BiBit algorithm is read in and transformed to a \code{Biclust} object. #' } #' Because of this, there is a chance of \emph{overhead} when applying the algorithm on large datasets. Make sure your machine has enough RAM available when applying to big data. #' #' @author Ewoud De Troyer #' #' @references Domingo S. Rodriguez-Baena, Antonia J. Perez-Pulido and Jesus S. Aguilar-Ruiz (2011), "A biclustering algorithm for extracting bit-patterns from binary datasets", \emph{Bioinformatics} #' #' @export #' @param matrix The binary input matrix. #' @param minr The minimum number of rows of the Biclusters. #' @param minc The minimum number of columns of the Biclusters. #' @param arff_row_col If you want to circumvent the internal R function to convert the matrix to \code{.arff} format, provide the pathname of this file here. Additionally, two \code{.csv} files should be provided containing 1 column of row and column names. These two files should not contain a header or quotes around the names, simply 1 column with the names.\cr #' (\emph{Example}: \code{arff_row_col=c("...\\\\data\\\\matrix.arff","...\\\\data\\\\rownames.csv","...\\\\data\\\\colnames.csv")})\cr #' \emph{Note:} These files can be generated with the \code{\link{make_arff_row_col}} function. #' \cr \strong{Warning:} Should you use the \code{write.arff} function from the \code{foreign} package, remember to transpose the matrix first. #' @param output_path If as output, the original txt output of the Java code is desired, provide the outputh path here (without extension). In this case the \code{bibit} function will skip the transformation to a Biclust class object and simply return \code{NULL}.\cr #' (\emph{Example}: \code{output_path="...\\\\out\\\\bibitresult"}) #' \cr #' (\emph{Description Output}: The following information about every bicluster generated will be printed in the output file: number of rows, number of columns, name of rows and name of columns. #' @return A Biclust S4 Class object. #' #' @examples #' \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' result <- bibit(data,minr=5,minc=5) #' result #' MaxBC(result) #' } bibit <- function(matrix=NULL,minr=2,minc=2,arff_row_col=NULL,output_path=NULL){ pm <- match.call() if(is.null(arff_row_col)){ time_arff <- round(proc.time()['elapsed']/60,2) # Check if matrix is binary (DISCRETIZED NOT YET IMPLEMENTED!) if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Check if rownames & colnames contain ; or , -> should be deleted and give warnings it was deleted rowdot <- grepl(",",rownames(matrix)) if(sum(rowdot)>0){ rownames(matrix) <- gsub(",","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowdot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } rowsc <- grepl(";",rownames(matrix)) if(sum(rowsc)>0){ rownames(matrix) <- gsub(";","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } coldot <- grepl(",",colnames(matrix)) if(sum(coldot)>0){ colnames(matrix) <- gsub(",","",colnames(matrix)) warning(paste0("Column names ",paste0(which(coldot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } colsc <- grepl(";",colnames(matrix)) if(sum(colsc)>0){ colnames(matrix) <- gsub(";","",colnames(matrix)) warning(paste0("Column names ",paste0(which(colsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } # No duplicate row names allowed! if(sum(table(rownames(matrix))>1)){stop("No duplicate row names allowed!")} # Transform data into arff format cat("Transform matrix into arff format...") bibitdata_path <- tempfile("bibitdata",fileext=".arff") bibitrows_path <- tempfile("bibitrows",fileext=".csv") bibitcols_path <- tempfile("bibitcols",fileext=".csv") write.arff(t(matrix),file=bibitdata_path) write.table(matrix(rownames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitrows_path) write.table(matrix(colnames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitcols_path) cat("DONE\n") cat("\n") time_arff <- round(proc.time()['elapsed']/60-time_arff,2) }else{ time_arff <- 0 if(length(arff_row_col)!=3){stop("arff_row_col should contain 3 elements",call.=FALSE)} bibitdata_path <- arff_row_col[1] bibitrows_path <- arff_row_col[2] bibitcols_path <- arff_row_col[3] } if(is.null(output_path)){ bibitoutput_path <- tempfile("bibitoutput",fileext = "") }else{ bibitoutput_path <- output_path } time_bibit <- proc.time()['elapsed']/60 javaloc <- paste0(find.package("BiBitR")[1],"/java/BiBit.jar") # javaloc <- gsub("/","\\\\",javaloc) # BiBit.jar location needs to be standardized for package location! # .libPaths() # command <- paste("java -jar -Xmx1000M",javaloc,bibitdata_path,"1",minr,minc,bibitoutput_path,bibitrows_path,bibitcols_path,1) command <- paste("java -jar -Xmx1000M",paste0("\"",javaloc,"\""),paste0("\"",bibitdata_path,"\""),"1",minr,minc,paste0("\"",bibitoutput_path,"\""),paste0("\"",bibitrows_path,"\""),paste0("\"",bibitcols_path,"\""),1) system(command) time_bibit <- round(proc.time()['elapsed']/60-time_bibit,2) if(is.null(output_path)){ cat("\n") cat("Transforming into biclust output...") time_biclust <- round(proc.time()['elapsed']/60,2) result <- bibit2biclust(data=matrix,resultpath=paste0(bibitoutput_path,"_1.txt"),arff_row_col = arff_row_col) cat("DONE\n") time_biclust <- round(proc.time()['elapsed']/60-time_biclust,2) if(!is.null(result)){ result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=result$RowxNumber, NumberxCol=result$NumberxCol, Number=result$Number, info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,full=time_arff+time_bibit+time_biclust))) }else{ if(!is.null(arff_row_col)){ rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) nrow.data <- length(rownames.data) ncol.data <- length(colnames.data) }else{ nrow.data <- nrow(matrix) ncol.data <- ncol(matrix) } result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,full=time_arff+time_bibit+time_biclust))) } return(result2) }else{ return(NULL) } } bibit2biclust <- function(data,resultpath,arff_row_col){ result <- read.table(resultpath,header=TRUE,sep=";") if(is.null(arff_row_col)){ rownames.data <- rownames(data) colnames.data <- colnames(data) nrow.data <- nrow(data) ncol.data <- ncol(data) }else{ rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) nrow.data <- length(rownames.data) ncol.data <- length(colnames.data) } if(dim(result)[1]>0){ result$Rows <- as.character(result$Rows) result$Columns <- as.character(result$Columns) Number <- nrow(result) rowlist <- strsplit(result$Rows,",") # for(i in 1:length(rowlist)){ # rowlist[[i]] <- rowlist[[i]][1:result$NumOfRows[i]] # } collist <- strsplit(result$Columns,", ") # for(i in 1:length(collist)){ # collist[[i]] <- collist[[i]][1:result$NumOfColumns[i]] # } # Let's add a quick to avoid problems... if(!identical(result$NumOfRows,unlist(lapply(rowlist,FUN=length)))){warning("Issue reading row names...")} if(!identical(result$NumOfColumns,unlist(lapply(collist,FUN=length)))){warning("Issue reading column names...")} rowlist_index <- lapply(rowlist,FUN=function(x){rownames.data %in% x}) collist_index <- lapply(collist,FUN=function(x){colnames.data %in% x}) RowxNumber <- matrix(unlist(rowlist_index),byrow=FALSE,nrow=nrow.data,ncol=Number) NumberxCol <- matrix(unlist(collist_index),byrow=TRUE,nrow=Number,ncol=ncol.data) # again quick BC dimension check if(!identical(result$NumOfRows,as.integer(colSums(RowxNumber)))){warning("Issue row BC dimension")} if(!identical(result$NumOfColumns,as.integer(rowSums(NumberxCol)))){warning("Issue column BC dimension")} # Temporart list output, needs to be changed to biclust object return(list(Parameters=list(),Number=Number,RowxNumber=RowxNumber,NumberxCol=NumberxCol,info=list())) }else{ return(NULL) } } #' @title The BiBit Algorithm with Noise Allowance #' #' @description Same function as \code{\link{bibit}} with an additional new noise parameter which allows 0's in the discovered biclusters (See Details for more info). #' #' @section Details - General: #' \code{bibit2} follows the same steps as described in the Details section of \code{\link{bibit}}.\cr #' Following the general steps of the BiBit algorithm, the allowance for noise in the biclusters is inserted in the original algorithm as such: #' \enumerate{ #' \item Binary data is encoded in bit words. #' \item Take a pair of rows as your starting point. #' \item Find the maximal overlap of 1's between these two rows and save this as a pattern/motif. You now have a bicluster of 2 rows and N columns in which N is the number of 1's in the motif. #' \item Check all remaining rows if they match this motif, \emph{however} allow a specific amount of 0's in this matching as defined by the \code{noise} parameter. Those rows that match completely or those within the allowed noise range are added to bicluster. #' \item Go back to \emph{Step 2} and repeat for all possible row pairs. #' } #' \emph{Note:} Biclusters are only saved if they satisfy the \code{minr} and \code{minc} parameter settings and if the bicluster is not already contained completely within another bicluster.\cr #' \cr #' What you will end up with are biclusters not only consisting out of 1's, but biclusters in which 2 rows (the starting pair) are all 1's and in which the other rows could contain 0's (= noise).\cr #' \cr #' \emph{Note:} Because of the extra checks involved in the noise allowance, using noise might increase the computation time a little bit. #' #' #' @section Details - Column Extension: #' An optional procedure which can be applied \emph{after} applying the BiBit algorithm (with noise) is called \emph{Column Extension}. #' The procedure will add extra columns to a BiBit bicluster, keeping into account the allowed \code{extend_noise} level in each row. #' The primary goal is to, after applying BiBit with noise, to also try and add some noise to the 2 initial `perfect` rows. #' Other parameters like \code{extend_mincol} and \code{extend_limitcol} can also further restrict which extensions should be discovered. #' \cr This procedure can be done either \emph{naively} (fast) or \emph{recursively} (more slow and thorough) with the \code{extend_columns} parameter. #' #' \describe{ #' \item{\code{"naive"}}{Subsetting on the bicluster rows, the column candidates are ordered based on the most 1's in a column. Afterwards, in this order, each column is sequentially checked and added when the resulted BC is still within row noise levels. #' \cr This has 2 major consequences: #' \itemize{ #' \item{If 2 columns are identical, the first in the dataset is added, while the second isn't (depending on the noise level allowed per row).} #' \item{If 2 non-identical columns are viable to be added (correct row noise), the column with the most 1's is added. Afterwards the second column might not be viable anymore.} #' } #' Note that using this method will always result in a maximum of 1 extended bicluster per original bicluster. #' } #' \item{\code{"recursive"}}{ #' Conditioning the group of candidates for the allowed row noise level, each possible/allowed combination of adding columns to the bicluster is checked. Only the resulted biclusters with the highest number of extra columns are saved. #' Of course this could result in multiple extensions for 1 bicluster if there are multiple `maximum added columns` results. #' #' } #' } #' \emph{Note:} These procedures are followed by a fast check if the extensions resulted in any duplicate biclusters. If so, these are deleted from the final result. #' #' #' #' @author Ewoud De Troyer #' #' @references Domingo S. Rodriguez-Baena, Antonia J. Perez-Pulido and Jesus S. Aguilar-Ruiz (2011), "A biclustering algorithm for extracting bit-patterns from binary datasets", \emph{Bioinformatics} #' #' @export #' @param matrix The binary input matrix. #' @param minr The minimum number of rows of the Biclusters. #' @param minc The minimum number of columns of the Biclusters. #' @param noise Noise parameter which determines the amount of zero's allowed in the bicluster (i.e. in the extra added rows to the starting row pair). #' \itemize{ #' \item \code{noise=0}: No noise allowed. This gives the same result as using the \code{\link{bibit}} function. (default) #' \item \code{0<noise<1}: The \code{noise} parameter will be a noise percentage. The number of allowed 0's in a (extra) row in the bicluster will depend on the column size of the bicluster. #' More specifically \code{zeros_allowed = ceiling(noise * columnsize)}. For example for \code{noise=0.10} and a bicluster column size of \code{5}, the number of allowed 0's would be \code{1}. #' \item \code{noise>=1}: The \code{noise} parameter will be the number of allowed 0's in a (extra) row in the bicluster independent from the column size of the bicluster. In this noise option, the noise parameter should be an integer. #' } #' #' @param arff_row_col If you want to circumvent the internal R function to convert the matrix to \code{.arff} format, provide the pathname of this file here. Additionally, two \code{.csv} files should be provided containing 1 column of row and column names. These two files should not contain a header or quotes around the names, simply 1 column with the names.\cr #' (\emph{Example}: \code{arff_row_col=c("...\\\\data\\\\matrix.arff","...\\\\data\\\\rownames.csv","...\\\\data\\\\colnames.csv")})\cr #' \emph{Note:} These files can be generated with the \code{\link{make_arff_row_col}} function. #' \cr \strong{Warning:} Should you use the \code{write.arff} function from the \code{foreign} package, remember to transpose the matrix first. #' @param output_path If as output, the original txt output of the Java code is desired, provide the outputh path here (without extension). In this case the \code{bibit} function will skip the transformation to a Biclust class object and simply return \code{NULL}.\cr #' (\emph{Example}: \code{output_path="...\\\\out\\\\bibitresult"}) #' \cr #' (\emph{Description Output}: The following information about every bicluster generated will be printed in the output file: number of rows, number of columns, name of rows and name of columns. #' @param extend_columns \emph{Column Extension Parameter}\cr Can be one of the following: \code{"none"}, \code{"naive"}, \code{"recursive"} which will apply either a naive or recursive column extension procedure. (See Details Section for more information.) #' \cr Based on the extension, additional biclusters will be created in the Biclust object which can be seen in the column and row names of the \code{RowxNumber} and \code{NumberxCol} slots (\code{"_Ext"} suffix). #' \cr The \code{info} slot will also contain some additional information. Inside this slot, \code{BC.Extended} contains info on which original biclusters were extended, how many columns were added, and in how many extra extended biclusters this resulted. #' \cr \cr \strong{Warning:} Using a percentage-based \code{extend_noise} (or \code{noise} by default) in combination with the recursive procedure will result in a large amount of biclusters and increase the computation time a lot. Depending on the data when using recursive in combination with a noise percentage, it is advised to keep it reasonable small (e.g. 10\%). Another remedy is to sufficiently increase the \code{extend_limitcol} either as a percentage or integer to limit the candidates of columns. #' @param extend_mincol \emph{Column Extension Parameter}\cr A minimum number of columns that a bicluster should be able to be extended with before saving the result. (Default=1) #' @param extend_limitcol \emph{Column Extension Parameter}\cr The number (\code{extend_limitcol>=1}) or percentage (\code{0<extend_limitcol<1}) of 1's that a column (subsetted on the BC rows) should at least contain for it to be a candidate to be added to the bicluster as an extension. (Default=1) (Increase this parameter if the recursive extension takes too long. Limiting the pool of candidates will decrease computation time, but restrict the results more.) #' #' @param extend_noise \emph{Column Extension Parameter}\cr The maximum allowed noise (in each row) when extending the columns of the bicluster. Can take the same as the \code{noise} parameter. By default this is the same value as \code{noise}. #' @param extend_contained \emph{Column Extension Parameter}\cr Logical value if extended results should be checked if they contain each other (and deleted if this is the case). Default = \code{FALSE}. This can be a lengthy procedure for a large amount of biclusters (>1000). #' #' #' @return A Biclust S4 Class object. #' #' @examples #' \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' #' result1 <- bibit2(data,minr=5,minc=5,noise=0.2) #' result1 #' MaxBC(result1,top=1) #' #' result2 <- bibit2(data,minr=5,minc=5,noise=3) #' result2 #' MaxBC(result2,top=2) #' } bibit2 <- function(matrix=NULL,minr=2,minc=2,noise=0,arff_row_col=NULL,output_path=NULL, extend_columns="none",extend_mincol=1,extend_limitcol=1,extend_noise=noise,extend_contained=FALSE ){ pm <- match.call() # Various parameter checks if(noise<0){stop("noise parameter can not be negative",call.=FALSE)} if(noise>=1){noise <- as.integer(noise)} if(extend_noise<0){stop("extend_noise parameter can not be negative",call.=FALSE)} if(extend_noise>=1){extend_noise <- as.integer(extend_noise)} if(extend_noise<noise){stop("extend_noise can't be lower than noise",call.=FALSE)} if(length(extend_columns)!=1){stop("extend_columns needs 1 input",call.=FALSE)} if(!(extend_columns)%in%c("none","naive","recursive")){stop("extend_columns should be \"none\", \"naive\" or \"recursive\"",call.=FALSE)} if(extend_limitcol<=0){stop("extend_limitcol should be larger than 0",call.=FALSE)} if(extend_mincol<1){stop("extend_mincol should be larger than or equal to 1",call.=FALSE)} if(is.null(arff_row_col)){ time_arff <- round(proc.time()['elapsed']/60,2) # Check if matrix is binary (DISCRETIZED NOT YET IMPLEMENTED!) if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Check if rownames & colnames contain ; or , -> should be deleted and give warnings it was deleted rowdot <- grepl(",",rownames(matrix)) if(sum(rowdot)>0){ rownames(matrix) <- gsub(",","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowdot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } rowsc <- grepl(";",rownames(matrix)) if(sum(rowsc)>0){ rownames(matrix) <- gsub(";","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } coldot <- grepl(",",colnames(matrix)) if(sum(coldot)>0){ colnames(matrix) <- gsub(",","",colnames(matrix)) warning(paste0("Column names ",paste0(which(coldot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } colsc <- grepl(";",colnames(matrix)) if(sum(colsc)>0){ colnames(matrix) <- gsub(";","",colnames(matrix)) warning(paste0("Column names ",paste0(which(colsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } # No duplicate row names allowed! if(sum(table(rownames(matrix))>1)){stop("No duplicate row names allowed!")} # Transform data into arff format cat("Transform matrix into arff format...") bibitdata_path <- tempfile("bibitdata",fileext=".arff") bibitrows_path <- tempfile("bibitrows",fileext=".csv") bibitcols_path <- tempfile("bibitcols",fileext=".csv") write.arff(t(matrix),file=bibitdata_path) write.table(matrix(rownames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitrows_path) write.table(matrix(colnames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitcols_path) cat("DONE\n") cat("\n") time_arff <- round(proc.time()['elapsed']/60-time_arff,2) }else{ time_arff <- 0 if(length(arff_row_col)!=3){stop("arff_row_col should contain 3 elements",call.=FALSE)} bibitdata_path <- arff_row_col[1] bibitrows_path <- arff_row_col[2] bibitcols_path <- arff_row_col[3] } if(is.null(output_path)){ bibitoutput_path <- tempfile("bibitoutput",fileext = "") }else{ bibitoutput_path <- output_path } time_bibit <- proc.time()['elapsed']/60 javaloc <- paste0(find.package("BiBitR")[1],"/java/BiBit2.jar") # javaloc <- gsub("/","\\\\",javaloc) # BiBit.jar location needs to be standardized for package location! # .libPaths() command <- paste("java -jar -Xmx1000M",paste0("\"",javaloc,"\""),paste0("\"",bibitdata_path,"\""),"1",minr,minc,paste0("\"",bibitoutput_path,"\""),paste0("\"",bibitrows_path,"\""),paste0("\"",bibitcols_path,"\""),1,paste0(" ",noise)) # cat(command,"\n") ## APPLY JAVA ALGORITHM OF BIBIT ## system(command) time_bibit <- round(proc.time()['elapsed']/60-time_bibit,2) ## TRANSFROM OUTPUT TO BICLUST RESULT ## if(is.null(output_path)){ cat("\n") cat("Transforming into biclust output... ") time_biclust <- round(proc.time()['elapsed']/60,2) result <- bibit2biclust(data=matrix,resultpath=paste0(bibitoutput_path,"_1.txt"),arff_row_col = arff_row_col) cat("DONE\n") time_biclust <- round(proc.time()['elapsed']/60-time_biclust,2) if(!is.null(result)){ result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=result$RowxNumber, NumberxCol=result$NumberxCol, Number=result$Number, info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,full=time_arff+time_bibit+time_biclust))) ## COLUMN EXTENSION PROCEDURE ## if(!is.null(arff_row_col) & extend_columns!="none"){ matrix <- read.arff(arff_row_col[1]) rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) if(length(rownames.data)!=nrow(matrix)){ matrix <- t(matrix) } rownames(matrix) <- rownames.data colnames(matrix) <- colnames.data } result2 <- extension_procedure(result2=result2,data=matrix,extend_noise=extend_noise,extend_mincol=extend_mincol,extend_limitcol=extend_limitcol,extend_columns=extend_columns,extend_contained) }else{ if(!is.null(arff_row_col)){ rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) nrow.data <- length(rownames.data) ncol.data <- length(colnames.data) }else{ nrow.data <- nrow(matrix) ncol.data <- ncol(matrix) } result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,full=time_arff+time_bibit+time_biclust))) } return(result2) }else{ return(NULL) } } #' @title The BiBit Algorithm with Noise Allowance guided by Provided Patterns. #' #' @description Same function as \code{\link{bibit2}} but only aims to discover biclusters containing the (sub) pattern of provided patterns or their combinations. #' @details The goal of the \code{\link{bibit3}} function is to provide one or multiple patterns in order to only find those biclusters exhibiting those patterns. #' Multiple patterns can be given in matrix format, \code{pattern_matrix}, and their pairwise combinations can automatically be added to this matrix by setting \code{pattern_combinations=TRUE}. #' All discovered biclusters are still subject to the provided \code{noise} level. #' #' Three types of Biclusters can be discovered: #' \describe{ #' \item{\emph{Full Pattern: }}{Bicluster which overlaps completely (within allowed noise levels) with the provided pattern. The column size of this bicluster is always equal to the number of 1's in the pattern.} #' \item{\emph{Sub Pattern: }}{Biclusters which overlap with a part of the provided pattern within allowed noise levels. Will only be given if \code{subpattern=TRUE} (default). Setting this option to \code{FALSE} decreases computation time.} #' \item{\emph{Extended: }}{Using the resulting biclusters from the full and sub patterns, other columns will be attempted to be added to the biclusters while keeping the noise as low as possible (the number of rows in the BC stays constant). #' This can be done either with \code{extend_columns} equal to \code{"naive"} or \code{"recursive"}. More info on the difference can be found in the Details Section of \code{\link{bibit2}}. #' \cr Naturally the articially added pattern rows will not be taken into account with the noise levels as they are 0 in each other column. #' \cr The question which is attempted to be answered here is \emph{`Do the rows, which overlap partly or fully with the given pattern, have other similarities outside the given pattern?`} #' } #' } #' #' \emph{How?} #' \cr The BiBit algorithm is applied to a data matrix that contains 2 identical artificial rows at the top which contain the given pattern. #' The default algorithm is then slightly altered to only start from this articial row pair (=Full Pattern) or from 1 artificial row and 1 other row (=Sub Pattern). #' #' \emph{Note 1 - Large Data:} #' \cr The \code{arff_row_col} can still be provided in case of large data matrices, but the \code{.arff} file should already contain the pattern of interest in the first two rows. Consequently not more than 1 pattern at a time can be investigated with a single call of \code{bibit3}. #' #' \emph{Note 2 - Viewing Results:} #' \cr A \code{print} and \code{summary} method has been implemented for the output object of \code{bibit3}. It gives an overview of the amount of discovered biclusters and their dimensions #' \cr Additionally, the \code{\link{bibit3_patternBC}} function can extract a Bicluster and add the artificial pattern rows to investigate the results. #' #' @author Ewoud De Troyer #' #' @references Domingo S. Rodriguez-Baena, Antonia J. Perez-Pulido and Jesus S. Aguilar-Ruiz (2011), "A biclustering algorithm for extracting bit-patterns from binary datasets", \emph{Bioinformatics} #' #' @export #' @param matrix The binary input matrix. #' @param minr The minimum number of rows of the Biclusters. (Note that in contrast to \code{\link{bibit}} and \code{\link{bibit2}}, this can be be set to 1 since we are looking for additional rows to the provided pattern.) #' @param minc The minimum number of columns of the Biclusters. #' @param noise Noise parameter which determines the amount of zero's allowed in the bicluster (i.e. in the extra added rows to the starting row pair). #' \itemize{ #' \item \code{noise=0}: No noise allowed. This gives the same result as using the \code{\link{bibit}} function. (default) #' \item \code{0<noise<1}: The \code{noise} parameter will be a noise percentage. The number of allowed 0's in a (extra) row in the bicluster will depend on the column size of the bicluster. #' More specifically \code{zeros_allowed = ceiling(noise * columnsize)}. For example for \code{noise=0.10} and a bicluster column size of \code{5}, the number of allowed 0's would be \code{1}. #' \item \code{noise>=1}: The \code{noise} parameter will be the number of allowed 0's in a (extra) row in the bicluster independent from the column size of the bicluster. In this noise option, the noise parameter should be an integer. #' } #' @param pattern_matrix Matrix (Number of Patterns x Number of Data Columns) containing the patterns of interest. #' @param subpattern Boolean value if sub patterns are of interest as well (default=TRUE). #' @param pattern_combinations Boolean value if the pairwise combinations of patterns (the intersecting 1's) should also used as starting points (default=FALSE). #' @param arff_row_col Same argument as in \code{\link{bibit}} and \code{\link{bibit2}}. However you can only provide 1 pattern by using this option. For \code{bibit3} to work, the pattern has to be added 2 times on top of the matrix (= identical first 2 rows). #' @param extend_columns \emph{Column Extension Parameter}\cr Can be one of the following: \code{"none"}, \code{"naive"}, \code{"recursive"} which will apply either a naive or recursive column extension procedure. (See Details Section for more information.) #' \cr Based on the extension, additional biclusters will be created in the Biclust object which can be seen in the column and row names of the \code{RowxNumber} and \code{NumberxCol} slots (\code{"_Ext"} suffix). #' \cr The \code{info} slot will also contain some additional information. Inside this slot, \code{BC.Extended} contains info on which original biclusters were extended, how many columns were added, and in how many extra extended biclusters this resulted. #' \cr \cr \strong{Warning:} Using a percentage-based \code{extend_noise} (or \code{noise} by default) in combination with the recursive procedure will result in a large amount of biclusters and increase the computation time a lot. Depending on the data when using recursive in combination with a noise percentage, it is advised to keep it reasonable small (e.g. 10\%). Another remedy is to sufficiently increase the \code{extend_limitcol} either as a percentage or integer to limit the candidates of columns. #' @param extend_mincol \emph{Column Extension Parameter}\cr A minimum number of columns that a bicluster should be able to be extended with before saving the result. (Default=1) #' @param extend_limitcol \emph{Column Extension Parameter}\cr The number (\code{extend_limitcol>=1}) or percentage (\code{0<extend_limitcol<1}) of 1's that a column (subsetted on the BC rows) should at least contain for it to be a candidate to be added to the bicluster as an extension. (Default=1) (Increase this parameter if the recursive extension takes too long. Limiting the pool of candidates will decrease computation time, but restrict the results more.) #' #' @param extend_noise \emph{Column Extension Parameter}\cr The maximum allowed noise (in each row) when extending the columns of the bicluster. Can take the same as the \code{noise} parameter. By default this is the same value as \code{noise}. #' @param extend_contained \emph{Column Extension Parameter}\cr Logical value if extended results should be checked if they contain each other (and deleted if this is the case). Default = \code{FALSE}. This can be a lengthy procedure for a large amount of biclusters (>1000). #' #' @return A S3 list object, \code{"bibit3"} in which each element (apart from the last one) corresponds with a provided pattern or combination thereof. \cr #' Each element is a list containing: #' \describe{ #' \item{\code{Number}: }{Number of Initially found BC's by applying BiBit with the provided pattern.} #' \item{\code{Number_Extended}: }{Number of additional discovered BC's by extending the columns.} #' \item{\code{FullPattern}: }{Biclust S4 Class Object containing the Bicluster with the Full Pattern.} #' \item{\code{SubPattern}: }{Biclust S4 Class Object containing the Biclusters showing parts of the pattern.} #' \item{\code{Extended}: }{Biclust S4 Class Object containing the additional Biclusters after extending the biclusters (column wise) of the full and sub patterns} #' \item{\code{info}: }{Contains \code{Time_Min} element which includes the elapsed time of parts and the full analysis.} #' } #' The last element in the list is a matrix containing all the investigated patterns. #' #' @examples #' \dontrun{ #' set.seed(1) #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' colsel <- sample(1:ncol(data),ncol(data)) #' data <- data[sample(1:nrow(data),nrow(data)),colsel] #' #' pattern_matrix <- matrix(0,nrow=3,ncol=100) #' pattern_matrix[1,1:7] <- 1 #' pattern_matrix[2,11:15] <- 1 #' pattern_matrix[3,13:20] <- 1 #' #' pattern_matrix <- pattern_matrix[,colsel] #' #' #' out <- bibit3(matrix=data,minr=2,minc=2,noise=0.1,pattern_matrix=pattern_matrix, #' subpattern=TRUE,extend_columns=TRUE,pattern_combinations=TRUE) #' out # OR print(out) OR summary(out) #' #' #' bibit3_patternBC(result=out,matrix=data,pattern=c(1),type=c("full","sub","ext"),BC=c(1,2)) #' } bibit3 <- function(matrix=NULL,minr=1,minc=2,noise=0,pattern_matrix=NULL,subpattern=TRUE,pattern_combinations=FALSE,arff_row_col=NULL, extend_columns="none",extend_mincol=1,extend_limitcol=1,extend_noise=noise,extend_contained=FALSE){ pm <- match.call() minr <- minr + 2 ### # Legacy compatibility for GUI if(is.logical(extend_columns)){ extend_columns <- ifelse(extend_columns,"naive","none") } if(noise<0){stop("noise parameter can not be negative",call.=FALSE)} if(noise>=1){noise <- as.integer(noise)} ## Extend parameters if(extend_noise<0){stop("extend_noise parameter can not be negative",call.=FALSE)} if(extend_noise>=1){extend_noise <- as.integer(extend_noise)} if(extend_noise<noise){stop("extend_noise can't be lower than noise",call.=FALSE)} if(length(extend_columns)!=1){stop("extend_columns needs 1 input",call.=FALSE)} if(!(extend_columns)%in%c("none","naive","recursive")){stop("extend_columns should be \"none\", \"naive\" or \"recursive\"",call.=FALSE)} if(extend_limitcol<=0){stop("extend_limitcol should be larger than 0",call.=FALSE)} if(extend_mincol<1){stop("extend_mincol should be larger than or equal to 1",call.=FALSE)} ### if(is.null(arff_row_col)){ # Check if matrix is binary (DISCRETIZED NOT YET IMPLEMENTED!) if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Check if rownames & colnames contain ; or , -> should be deleted and give warnings it was deleted rowdot <- grepl(",",rownames(matrix)) if(sum(rowdot)>0){ rownames(matrix) <- gsub(",","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowdot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } rowsc <- grepl(";",rownames(matrix)) if(sum(rowsc)>0){ rownames(matrix) <- gsub(";","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } coldot <- grepl(",",colnames(matrix)) if(sum(coldot)>0){ colnames(matrix) <- gsub(",","",colnames(matrix)) warning(paste0("Column names ",paste0(which(coldot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } colsc <- grepl(";",colnames(matrix)) if(sum(colsc)>0){ colnames(matrix) <- gsub(";","",colnames(matrix)) warning(paste0("Column names ",paste0(which(colsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } # No duplicate row names allowed! if(sum(table(rownames(matrix))>1)){stop("No duplicate row names allowed!")} # Check pattern matrix if(is.null(pattern_matrix)){stop("pattern_matrix needs to be provided",call.=FALSE)} if(class(pattern_matrix)!="matrix"){stop("pattern_matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(pattern_matrix)),as.numeric(as.logical(pattern_matrix)))){stop("pattern_matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(pattern_matrix))){rownames(pattern_matrix) <- paste0("Pattern",1:nrow(pattern_matrix))} if(ncol(pattern_matrix)!=ncol(matrix)){stop("matrix and pattern_matrix have a different number of columns",call.=FALSE)} # If combinations required, add to pattern! if(pattern_combinations & nrow(pattern_matrix)>1){ cat("Computing pattern combinations...") comb_temp <- combn(1:nrow(pattern_matrix),2) comb_matrix <- matrix(NA,nrow=ncol(comb_temp),ncol=ncol(pattern_matrix),dimnames=list(paste0("comb",1:ncol(comb_temp)))) for(i.comb in 1:ncol(comb_temp)){ comb_matrix[i.comb,] <- ((pattern_matrix[comb_temp[1,i.comb],]+pattern_matrix[comb_temp[2,i.comb],])==2)+0 rownames(comb_matrix)[i.comb] <- paste0(rownames(pattern_matrix)[comb_temp[1,i.comb]],"_",rownames(pattern_matrix)[comb_temp[2,i.comb]]) } pattern_matrix <- rbind(pattern_matrix,comb_matrix) cat("DONE\n\n") } # Delete zero-rows zero_rows <- which(rowSums(pattern_matrix)==0) if(length(zero_rows)>0){ pattern_matrix <- pattern_matrix[-zero_rows,,drop=FALSE] } nPatterns <- nrow(pattern_matrix) if(nPatterns==0){stop("No viable patterns in pattern_matrix, all zero values.")} }else{ time_arff <- 0 if(length(arff_row_col)!=3){stop("arff_row_col should contain 3 elements",call.=FALSE)} bibitdata_path <- arff_row_col[1] bibitrows_path <- arff_row_col[2] bibitcols_path <- arff_row_col[3] pattern_matrix <- matrix(NA,nrow=1,ncol=1,dimnames=list("arff_Pattern")) nPatterns <- 1 } ############################################# ## PREPARE BASIC ARFF FILE & READ IN LINES ## ############################################# cat("Transform matrix into arff format...") bibitbasic_path <- tempfile("bibitbasic",fileext=".arff") write.arff(t(matrix),file=bibitbasic_path) basic_file <- file(bibitbasic_path) basic_lines <- readLines(basic_file) close(basic_file) number_white <- nrow(matrix)+2 cat("DONE\n\n") ###################################### ## START FOR LOOP OVER ALL PATTERNS ## ###################################### FINAL_RESULT <- vector("list",nPatterns) names(FINAL_RESULT) <- rownames(pattern_matrix) for(i.pattern in 1:nPatterns){ if(i.pattern>1){cat("\n=============================================================================\n\n")} cat(toupper(rownames(pattern_matrix)[i.pattern]),"\n\n") if(is.null(arff_row_col)){ time_arff <- round(proc.time()['elapsed']/60,2) # Add patterns to matrix matrix_with_pattern <- rbind(matrix(rep(pattern_matrix[i.pattern,],2),nrow=2,byrow=TRUE,dimnames = list(paste0(rownames(pattern_matrix)[i.pattern],"_Art",c(1,2)))),matrix) # Transform data into arff format cat("Changing arff file...",rownames(pattern_matrix)[i.pattern],"...") bibitdata_path <- tempfile("bibitdata",fileext=".arff") bibitrows_path <- tempfile("bibitrows",fileext=".csv") bibitcols_path <- tempfile("bibitcols",fileext=".csv") new_lines_meta <- basic_lines[1:number_white] new_lines_data <- basic_lines[(number_white+1):length(basic_lines)] pattern <- apply(cbind(matrix_with_pattern[1,],matrix_with_pattern[2,]),MARGIN=1,FUN=paste0,collapse=",") new_rownames <- rownames(matrix_with_pattern)[c(1,2)] meta1 <- new_lines_meta[1] new_lines_meta <- new_lines_meta[-1] new_lines_meta <- c(meta1,paste0("@attribute ",new_rownames," numeric"),new_lines_meta) new_lines_data <- apply(cbind(pattern,new_lines_data),MARGIN=1,FUN=paste0,collapse=",") new_file <- file(bibitdata_path) writeLines(c(new_lines_meta,new_lines_data),new_file) close(new_file) write.table(matrix(rownames(matrix_with_pattern),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitrows_path) write.table(matrix(colnames(matrix_with_pattern),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=bibitcols_path) cat("DONE\n") cat("\n") time_arff <- round(proc.time()['elapsed']/60-time_arff,2) }else{ matrix_with_pattern <- NULL if(extend_columns!="none"){ matrix_with_pattern <- read.arff(bibitdata_path) rownames.data <- as.character(read.table(bibitrows_path,header=FALSE)[,1]) colnames.data <- as.character(read.table(bibitcols_path,header=FALSE)[,1]) if(length(rownames.data)!=nrow(matrix_with_pattern)){ matrix_with_pattern <- t(matrix_with_pattern) } rownames(matrix_with_pattern) <- rownames.data colnames(matrix_with_pattern) <- colnames.data } } # Apply BiBit Algorithm cat("Initiate BiBit for",rownames(pattern_matrix)[i.pattern],"...\n") cat("\n") bibitoutput_path <- tempfile("bibitoutput",fileext = "") time_bibit <- proc.time()['elapsed']/60 javaloc <- paste0(find.package("BiBitR")[1],"/java/BiBit3.jar") # javaloc <- paste0(getwd(),"/inst/java/BiBit3.jar") subpat <- ifelse(subpattern,1,0) # BiBit.jar location needs to be standardized for package location! # .libPaths() command <- paste("java -jar -Xmx1000M",paste0("\"",javaloc,"\""),paste0("\"",bibitdata_path,"\""),"1",minr,minc,paste0("\"",bibitoutput_path,"\""),paste0("\"",bibitrows_path,"\""),paste0("\"",bibitcols_path,"\""),1,paste0(" ",noise),paste0(" ",subpat)) # cat(command,"\n") system(command) time_bibit <- round(proc.time()['elapsed']/60-time_bibit,2) cat("\n") cat("Transforming into biclust output...") time_biclust <- round(proc.time()['elapsed']/60,2) result <- bibit2biclust(data=matrix_with_pattern,resultpath=paste0(bibitoutput_path,"_1.txt"),arff_row_col = arff_row_col) cat("DONE\n") time_biclust <- round(proc.time()['elapsed']/60-time_biclust,2) # Small prep if(!is.null(arff_row_col)){ rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) nrow.data <- length(rownames.data) ncol.data <- length(colnames.data) }else{ nrow.data <- nrow(matrix_with_pattern) ncol.data <- ncol(matrix_with_pattern) } # Look for and label the Biclusters (Full Pattern (zero or not)/Sub Pattern) if(!is.null(result)){ result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=result$RowxNumber, NumberxCol=result$NumberxCol, Number=result$Number, info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,full=time_arff+time_bibit+time_biclust))) FullPattern <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=result2@RowxNumber[,1,drop=FALSE], NumberxCol=result2@NumberxCol[1,,drop=FALSE], Number=1, info=list()) if(subpattern & result2@Number>1){ SubPattern <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=result2@RowxNumber[,2:result2@Number,drop=FALSE], NumberxCol=result2@NumberxCol[2:result2@Number,,drop=FALSE], Number=result2@Number-1, info=list()) }else{ SubPattern <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) } if(extend_columns!="none"){ # Reduce matrix and result only for Extended part (Artificial rows may not influence extension procedure) ######################## ######################## result2_temp <- result2 result2_temp@RowxNumber <- result2_temp@RowxNumber[-c(1,2),,drop=FALSE] if(result2_temp@Number>0){ deleteBC_index <- which(colSums(result2_temp@RowxNumber)==0) if(length(deleteBC_index)>0){ if(length(deleteBC_index)==result2_temp@Number){ result2_temp <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) }else{ result2_temp@RowxNumber <- result2_temp@RowxNumber[,-deleteBC_index] result2_temp@NumberxCol <- result2_temp@NumberxCol[-deleteBC_index,] result2_temp@Number <- ncol(result2_temp@RowxNumber) } } } ######################## ######################## # Use extension_procedure, delete original BC's, check if there were extensions... # check for BC.Extender, if NULL, then make similar object below, otherwise delete parts Extended <- extension_procedure(result2=result2_temp,data=matrix_with_pattern[-c(1,2),],extend_noise=extend_noise,extend_mincol=extend_mincol,extend_limitcol=extend_limitcol,extend_columns=extend_columns,extend_contained=extend_contained) if(!is.null(Extended@info$BC.Extended)){ original_index <- which(!grepl("_Ext",colnames(Extended@RowxNumber))) Extended@Number <- Extended@Number - length(original_index) Extended@RowxNumber <- Extended@RowxNumber[,-original_index,drop=FALSE] Extended@NumberxCol <- Extended@NumberxCol[-original_index,,drop=FALSE] time_extend <- Extended@info$Time_Min$extend Extended@info$Time_Min <- NULL Number_Extended <- Extended@Number }else{ Extended <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) time_extend <- 0 Number_Extended <- 0 } # TO DO: take time extend from result + TO DO: add and check parameters + add documentaiton }else{ Extended <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) time_extend <- 0 Number_Extended <- 0 } time_final <- list(arff=result2@info$Time_Min$arff,bibit= result2@info$Time_Min$bibit,biclust=result2@info$Time_Min$biclust,extend=time_extend,full=result2@info$Time_Min$full+time_extend) FINAL_RESULT[[i.pattern]] <- list(Number=result2@Number,Number_Extended=Number_Extended,FullPattern=FullPattern,SubPattern=SubPattern,Extended=Extended,info=list(Time_Min=time_final)) }else{ result2 <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) FINAL_RESULT[[i.pattern]] <- list(Number=0,Number_Extended=0,FullPattern=result2,SubPattern=result2,Extended=result2,info=list(Time_Min=list(arff=time_arff,bibit=time_bibit,biclust=time_biclust,extend=0,full=time_arff+time_bibit+time_biclust))) } } # DELETE ARTIFICIAL ROWS FROM BC RESULTS , if no other rows remain, go to empty result for(i.list in 1:length(FINAL_RESULT)){ for(j.list in c("FullPattern","SubPattern")){ FINAL_RESULT[[i.list]][[j.list]]@RowxNumber <- FINAL_RESULT[[i.list]][[j.list]]@RowxNumber[-c(1,2),,drop=FALSE] if(FINAL_RESULT[[i.list]][[j.list]]@Number>0){ deleteBC_index <- which(colSums(FINAL_RESULT[[i.list]][[j.list]]@RowxNumber)==0) if(length(deleteBC_index)>0){ if(length(deleteBC_index)==FINAL_RESULT[[i.list]][[j.list]]@Number){ FINAL_RESULT[[i.list]][[j.list]] <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), Number=0, info=list()) }else{ FINAL_RESULT[[i.list]][[j.list]]@RowxNumber <- FINAL_RESULT[[i.list]][[j.list]]@RowxNumber[,-deleteBC_index] FINAL_RESULT[[i.list]][[j.list]]@NumberxCol <- FINAL_RESULT[[i.list]][[j.list]]@NumberxCol[-deleteBC_index,] FINAL_RESULT[[i.list]][[j.list]]@Number <- ncol(FINAL_RESULT[[i.list]][[j.list]]@RowxNumber) } } } } } # END RESULT FINAL_RESULT$pattern_matrix <- pattern_matrix class(FINAL_RESULT) <- "bibit3" return(FINAL_RESULT) } #' @title Column Extension Procedure #' #' @description Function which accepts result from \code{\link{bibit}}, \code{\link{bibit2}} or \code{\link{bibit3}} and will (re-)apply the column extension procedure. This means if the result already contained extended biclusters that these will be deleted. #' #' @section Details - Column Extension: #' An optional procedure which can be applied \emph{after} applying the BiBit algorithm (with noise) is called \emph{Column Extension}. #' The procedure will add extra columns to a BiBit bicluster, keeping into account the allowed \code{extend_noise} level in each row. #' The primary goal is to, after applying BiBit with noise, to also try and add some noise to the 2 initial `perfect` rows. #' Other parameters like \code{extend_mincol} and \code{extend_limitcol} can also further restrict which extensions should be discovered. #' \cr This procedure can be done either \emph{naively} (fast) or \emph{recursively} (more slow and thorough) with the \code{extend_columns} parameter. #' #' \describe{ #' \item{\code{"naive"}}{Subsetting on the bicluster rows, the column candidates are ordered based on the most 1's in a column. Afterwards, in this order, each column is sequentially checked and added when the resulted BC is still within row noise levels. #' \cr This has 2 major consequences: #' \itemize{ #' \item{If 2 columns are identical, the first in the dataset is added, while the second isn't (depending on the noise level allowed per row).} #' \item{If 2 non-identical columns are viable to be added (correct row noise), the column with the most 1's is added. Afterwards the second column might not be viable anymore.} #' } #' Note that using this method will always result in a maximum of 1 extended bicluster per original bicluster. #' } #' \item{\code{"recursive"}}{ #' Conditioning the group of candidates for the allowed row noise level, each possible/allowed combination of adding columns to the bicluster is checked. Only the resulted biclusters with the highest number of extra columns are saved. #' Of course this could result in multiple extensions for 1 bicluster if there are multiple `maximum added columns` results. #' #' } #' } #' \emph{Note:} These procedures are followed by a fast check if the extensions resulted in any duplicate biclusters. If so, these are deleted from the final result. #' #' @export #' @param result Result from \code{\link{bibit}}, \code{\link{bibit2}} or \code{\link{bibit3}}. #' @param matrix The binary input matrix. #' @param arff_row_col The same file directories (with the same limitations) as given in \code{\link{bibit}}, \code{\link{bibit2}} or \code{\link{bibit3}}. #' @param BC A numeric/integer vector of BC's which should be extended. Different behaviour for the 3 types of input results: #' \describe{ #' \item{\code{bibit}}{\code{BC} directly takes the corresponding biclusters from the result and extends them. (e.g. \code{BC=c(1,10)} is then remapped to \code{c("BC1","BC1_Ext1","BC2","BC2_Ext1") in the new output})} #' \item{\code{bibit2}}{\code{BC} corresponds with the original non-extended biclusters from the \code{\link{bibit2}} result. These original biclusters are selected and extended. (e.g. \code{BC=c(1,10)} selects biclusters \code{c("BC1","BC10")} which are then remapped to \code{c("BC1","BC1_Ext1","BC2","BC2_Ext1") in the new output})} #' \item{\code{bibit3}}{\code{BC} corresponds with the biclusters when combining the FULLPATTERN and SUBPATTERN result together. For example choosing \code{BC=1} would only select the 1 FULLPATTERN bicluster for each pattern and try to extend it. (e.g. \code{BC=c(1,10)} selects biclusters 1 and 10 from the combined fullpattern and subpattern result (meaning the full pattern BC and the 9th subpattern BC) which are then remapped to \code{c("BC1","BC1_Ext1","BC2","BC2_Ext1") in the new output}) } #' } #' #' @param extend_columns \emph{Column Extension Parameter}\cr Can be one of the following: \code{"naive"} or \code{"recursive"} which will apply either a naive or recursive column extension procedure. (See Details Section for more information.) #' \cr Based on the extension, additional biclusters will be created in the Biclust object which can be seen in the column and row names of the \code{RowxNumber} and \code{NumberxCol} slots (\code{"_Ext"} suffix). #' \cr The \code{info} slot will also contain some additional information. Inside this slot, \code{BC.Extended} contains info on which original biclusters were extended, how many columns were added, and in how many extra extended biclusters this resulted. #' \cr \cr \strong{Warning:} Using a percentage-based \code{extend_noise} in combination with the recursive procedure will result in a large amount of biclusters and increase the computation time a lot. Depending on the data when using recursive in combination with a noise percentage, it is advised to keep it reasonable small (e.g. 10\%). Another remedy is to sufficiently increase the \code{extend_limitcol} either as a percentage or integer to limit the candidates of columns. #' @param extend_mincol \emph{Column Extension Parameter}\cr A minimum number of columns that a bicluster should be able to be extended with before saving the result. (Default=1) #' @param extend_limitcol \emph{Column Extension Parameter}\cr The number (\code{extend_limitcol>=1}) or percentage (\code{0<extend_limitcol<1}) of 1's that a column (subsetted on the BC rows) should at least contain for it to be a candidate to be added to the bicluster as an extension. (Default=1) (Increase this parameter if the recursive extension takes too long. Limiting the pool of candidates will decrease computation time, but restrict the results more.) #' #' @param extend_noise \emph{Column Extension Parameter}\cr The maximum allowed noise (in each row) when extending the columns of the bicluster. Can take the same as the \code{noise} parameter. #' @param extend_contained \emph{Column Extension Parameter}\cr Logical value if extended results should be checked if they contain each other (and deleted if this is the case). Default = \code{FALSE}. This can be a lengthy procedure for a large amount of biclusters (>1000). #' #' @author Ewoud De Troyer #' @return A Biclust S4 Class object or bibit3 S3 list Class object #' #' @examples #' \dontrun{ #' #' set.seed(1) #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' #' result <- bibit2(data,minr=5,minc=5,noise=0.1,extend_columns = "recursive", #' extend_mincol=1,extend_limitcol=1) #' result #' result2 <- bibit_columnextension(result=out,matrix=data,arff_row_col=NULL,BC=c(1,10), #' extend_columns="recursive",extend_mincol=1, #' extend_limitcol=1,extend_noise=2,extend_contained=FALSE) #' result2 #' } bibit_columnextension <- function(result,matrix,arff_row_col=NULL,BC=NULL, extend_columns="naive",extend_mincol=1,extend_limitcol=1,extend_noise=1,extend_contained=FALSE){ # explain what BC does (note: name will change to order in new result!) # Check if result comes from bibit/bibit2/bibit3 if(!(class(result)=="Biclust" | class(result)=="bibit3")){ if(class(result)=="Biclust"){ if(result@Parameters$Method!="BiBit"){stop("result is Biclust class but does come from bibit")} }else{ stop("result does not come from bibit/bibit2 or bibit3 function") } } pm <- match.call() # Check parameters if(!is.null(BC)){ if(!(class(BC) %in% c("numeric","integer"))){stop("BC should be a numeric or integer vector",call.=FALSE)} BC <- as.integer(BC) } if(extend_noise<0){stop("extend_noise parameter can not be negative",call.=FALSE)} if(extend_noise>=1){extend_noise <- as.integer(extend_noise)} if(length(extend_columns)!=1){stop("extend_columns needs 1 input",call.=FALSE)} if(!(extend_columns)%in%c("naive","recursive")){stop("extend_columns should be \"naive\" or \"recursive\"",call.=FALSE)} if(extend_limitcol<=0){stop("extend_limitcol should be larger than 0",call.=FALSE)} if(extend_mincol<1){stop("extend_mincol should be larger than or equal to 1",call.=FALSE)} # Check matrix if(is.null(arff_row_col)){ # Check if matrix is binary (DISCRETIZED NOT YET IMPLEMENTED!) if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Check if rownames & colnames contain ; or , -> should be deleted and give warnings it was deleted rowdot <- grepl(",",rownames(matrix)) if(sum(rowdot)>0){ rownames(matrix) <- gsub(",","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowdot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } rowsc <- grepl(";",rownames(matrix)) if(sum(rowsc)>0){ rownames(matrix) <- gsub(";","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } coldot <- grepl(",",colnames(matrix)) if(sum(coldot)>0){ colnames(matrix) <- gsub(",","",colnames(matrix)) warning(paste0("Column names ",paste0(which(coldot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } colsc <- grepl(";",colnames(matrix)) if(sum(colsc)>0){ colnames(matrix) <- gsub(";","",colnames(matrix)) warning(paste0("Column names ",paste0(which(colsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } # No duplicate row names allowed! if(sum(table(rownames(matrix))>1)){stop("No duplicate row names allowed!")} }else{ # Make matrix if arffpath is given time_arff <- 0 if(length(arff_row_col)!=3){stop("arff_row_col should contain 3 elements",call.=FALSE)} matrix <- read.arff(arff_row_col[1]) rownames.data <- as.character(read.table(arff_row_col[2],header=FALSE)[,1]) colnames.data <- as.character(read.table(arff_row_col[3],header=FALSE)[,1]) if(length(rownames.data)!=nrow(matrix)){ matrix <- t(matrix) } rownames(matrix) <- rownames.data colnames(matrix) <- colnames.data } # Depending on result (bibit/bibit2/bibit3) # Check if there is extended result, if so delete it and reapply extension if(class(result)=="Biclust"){ if(!is.null(result@info$BC.Extended)){ ext_index <- which(grepl("_Ext",colnames(result@RowxNumber))) result@Number <- result@Number-length(ext_index) result@RowxNumber <- result@RowxNumber[,-ext_index,drop=FALSE] result@NumberxCol <- result@NumberxCol[-ext_index,,drop=FALSE] result@info$BC.Extended <- NULL }else{ colnames(result@RowxNumber) <- paste0("BC",1:result@Number) rownames(result@NumberxCol) <- paste0("BC",1:result@Number) } if(!is.null(result@info$Time_Min$extend)){ result@info$Time_Min$full <- result@info$Time_Min$full - result@info$Time_Min$extend result@info$Time_Min$extend <- NULL } result@Parameters$Call <- pm if(!is.null(BC)){ BC_names <- paste0("BC",BC) BC_index <- sapply(BC_names,FUN=function(x){which(x==colnames(result@RowxNumber))}) result@Number <- length(BC_index) result@RowxNumber <- result@RowxNumber[,BC_index,drop=FALSE] result@NumberxCol <- result@NumberxCol[BC_index,,drop=FALSE] cat("\nChosen specific BC's:\n") for(i.BC in 1:length(BC)){ cat(BC_names[i.BC],"mapped to",paste0("BC",i.BC),"\n") } cat("\n") } cat("Number of Original BC's:",result@Number) result <- extension_procedure(result2=result,data=matrix,extend_noise=extend_noise,extend_mincol=extend_mincol,extend_limitcol=extend_limitcol,extend_columns=extend_columns,extend_contained=extend_contained) return(result) }else if(class(result)=="bibit3"){ for(i.pattern in 1:(length(result)-1)){ cat(paste0("\n",names(result)[i.pattern])) cat("\n----------\n") result2 <- result[[i.pattern]] if(!is.null(result2$info$Time_Min$extend)){ result2$info$Time_Min$full <- result2$info$Time_Min$full - result2$info$Time_Min$extend result2$info$Time_Min$extend <- 0 } # Combine Fullpattern, subpattern result, then apply extension and overwrite previous Extended result result_temp <- new("Biclust",Parameters=list(Call=pm,method="BiBit"), RowxNumber=cbind(result2$FullPattern@RowxNumber,result2$SubPattern@RowxNumber), NumberxCol=rbind(result2$FullPattern@NumberxCol,result2$SubPattern@NumberxCol), Number=result2$Number, info=list(Time_Min=list( arff=result2$info$Time_Min$arff, bibit=result2$info$Time_Min$bibit, biclust=result2$info$Time_Min$biclust, full=result2$info$Time_Min$full )) ) if(!is.null(BC)){ result_temp@Number <- length(BC) result_temp@RowxNumber <- result_temp@RowxNumber[,BC,drop=FALSE] result_temp@NumberxCol <- result_temp@NumberxCol[BC,,drop=FALSE] cat("\nChosen specific BC's:\n") for(i.BC in 1:length(BC)){ cat(paste0("BC",BC[i.BC]),"mapped to",paste0("BC",i.BC),"\n") } cat("\n") } cat("Number of Original BC's:",result_temp@Number) result2$Extended <- extension_procedure(result2=result_temp,data=matrix,extend_noise=extend_noise,extend_mincol=extend_mincol,extend_limitcol=extend_limitcol,extend_columns=extend_columns,extend_contained=extend_contained) if(!is.null(result2$Extended@info$BC.Extended)){ # Don't forget to change Number_Extended, change time in result2, delete time in result2$Extended and delete original BC's! ext_index <- which(grepl("_Ext",colnames(result2$Extended@RowxNumber))) result2$Extended@Number <- length(ext_index) result2$Extended@RowxNumber <- result2$Extended@RowxNumber[,ext_index,drop=FALSE] result2$Extended@NumberxCol <- result2$Extended@NumberxCol[ext_index,,drop=FALSE] result2$Number_Extended <- result2$Extended@Number result2$info$Time_Min$extend <- result2$Extended@info$Time_Min$extend result2$info$Time_Min$full <- result2$Extended@info$Time_Min$full result2$Extended@info$Time_Min <- NULL }else{ result2$Number_Extended <- 0 result2$Extended <- new("Biclust",Parameters=list(Call=pm,Method="BiBit"), RowxNumber=matrix(FALSE,nrow=nrow(matrix),ncol=1), NumberxCol=matrix(FALSE,nrow=1,ncol=ncol(matrix)), Number=0, info=list()) } result[[i.pattern]] <- result2 } return(result) } }
/scratch/gouwar.j/cran-all/cranData/BiBitR/R/bibit.R
.EnvBIBIT <- new.env() .GetEnvBIBIT <- function(x){ if(!exists(x,envir=.EnvBIBIT,inherits=FALSE)){ return(NULL) } else{ return(get(x=x,envir=.EnvBIBIT,inherits=FALSE)) } } .AssignEnvBIBIT <- function(x,value){ assign(x=x,value=value,envir=.EnvBIBIT) } # Function which checks which one of the candidates are viable to be added as extra columns check_candidates <- function(data,included,candidates,noise){ if(length(candidates)>0){ noise <- ifelse(((noise<1)&(noise>0)),ceiling(noise*(length(included)+1)),noise) noise_in_rows <- ncol(data[,included,drop=FALSE])-rowSums(data[,included,drop=FALSE]) rows_noise_allowed <- which((noise - noise_in_rows)>0) compatible_candidates <- sapply(candidates,FUN=function(x){ zero_rows <- which(data[,x]==0) if(all(zero_rows %in% rows_noise_allowed)){ return(TRUE) }else{ return(FALSE) } }) return(candidates[compatible_candidates]) }else{ return(candidates) } } # Function to extend the columns of the bibit bicluster RECURSIVELY BC_column_extension_recursive <- function(result,data,noise,extend_limitcol=1,extend_mincol=1){ # Check parameters & prepare list objects if(extend_limitcol<=0){stop("Unallowed extend_limitcol")} if(extend_mincol<1){stop("Unallowed extend_mincol")} extend_mincol <- as.integer(extend_mincol) time_extend <- round(proc.time()['elapsed']/60,2) BC.extended_list <- rowxnumber_list <- numberxcol_list <- vector("list",result@Number) pb <- txtProgressBar(min=1,max=result@Number,initial=1,style=3) # Apply extension for each BC for(i.BC in 1:result@Number){ # Progress print setTxtProgressBar(pb,i.BC) # progress_dots(i=i.BC,nBC=result@Number) # cat("BC",i.BC,"\n") ####### if(extend_limitcol<1){ extend_limitcol2 <- ceiling(extend_limitcol*sum(result@RowxNumber[,i.BC])) }else{ extend_limitcol2 <- min(extend_limitcol,sum(result@RowxNumber[,i.BC])) } .AssignEnvBIBIT(x="extensions",value=list()) included_temp <- which(result@NumberxCol[i.BC,]) candidates_temp <- which(!result@NumberxCol[i.BC,]) # Before going into recursion which checks candidates, already delete candidates which have a full-zero column when extending this BC # + delete candidates which do not have enough 1's (depending on extend_limitcol parameter) candidates_temp <- candidates_temp[(which(colSums(data[result@RowxNumber[,i.BC],candidates_temp,drop=FALSE])>=extend_limitcol2))] # Apply recursive procedure temp <- extension_recursive(data=data[result@RowxNumber[,i.BC],],included=included_temp,candidates=candidates_temp,noise=noise,startlength = length(included_temp)) # Have we found extensions? extensions <- .GetEnvBIBIT("extensions") # If we found extensions, check if enough columns were added and add each extension to a biclust result if(length(extensions)>0){ number_columns <- unlist(lapply(extensions,FUN=function(x){x$number_columns})) max_extracol <- max(number_columns) if(max_extracol>=extend_mincol){ selected_ext <- which(number_columns==max_extracol) numberxcol <- matrix(rep(result@NumberxCol[i.BC,],length(selected_ext)+1),nrow=length(selected_ext)+1,byrow=TRUE) for(i.ext in 1:length(selected_ext)){numberxcol[i.ext+1,extensions[[selected_ext[i.ext]]]$included] <- TRUE} rowxnumber <- matrix(rep(result@RowxNumber[,i.BC],length(selected_ext)+1),ncol=length(selected_ext)+1) names_temp <- c(paste0("BC",i.BC),paste0("BC",i.BC,"_Ext",1:length(selected_ext))) rownames(numberxcol) <- names_temp colnames(rowxnumber) <- names_temp rowxnumber_list[[i.BC]] <- rowxnumber numberxcol_list[[i.BC]] <- numberxcol BC.extended_list[[i.BC]] <- data.frame(BC_Original=i.BC,Number_Extended=length(selected_ext),Number_AddedColumns=max_extracol) }else{ rowxnumber_list[[i.BC]] <- result@RowxNumber[,i.BC,drop=FALSE] colnames(rowxnumber_list[[i.BC]]) <- paste0("BC",i.BC) numberxcol_list[[i.BC]] <- result@NumberxCol[i.BC,,drop=FALSE] rownames(numberxcol_list[[i.BC]]) <- paste0("BC",i.BC) BC.extended_list[[i.BC]] <- NULL } }else{ rowxnumber_list[[i.BC]] <- result@RowxNumber[,i.BC,drop=FALSE] colnames(rowxnumber_list[[i.BC]]) <- paste0("BC",i.BC) numberxcol_list[[i.BC]] <- result@NumberxCol[i.BC,,drop=FALSE] rownames(numberxcol_list[[i.BC]]) <- paste0("BC",i.BC) BC.extended_list[[i.BC]] <- NULL } } close(pb) # Combine all extensions from all BC's into a single Biclust result #make a object with which BC's were extended, how many resulted BC's (when equal adding length), how many EXTRA columns BC.extended <- do.call(rbind,BC.extended_list) # do a do call rbind/cbind on lists to make final result RowxNumber <- do.call(cbind,rowxnumber_list) NumberxCol <- do.call(rbind,numberxcol_list) time_extend <- round(proc.time()['elapsed']/60-time_extend,2) info_temp <- result@info info_temp$Time_Min$extend <- time_extend info_temp$Time_Min$full <- info_temp$Time_Min$full + time_extend info_temp$BC.Extended <- BC.extended OUT <- new("Biclust",Parameters=result@Parameters,RowxNumber=RowxNumber,NumberxCol=NumberxCol,Number=ncol(RowxNumber),info=info_temp) return(OUT) } # Function for recursive column extension algorithm extension_recursive <- function(data,included,candidates,noise,startlength){ # Filter for viable candidates candidates <- check_candidates(data=data,included=included,candidates,noise) STOP <- FALSE # A stopping procedure which prevents recursion from continuing if we encounter an already saved combination (e.g. if something starts with 2,1 and we already did 1,2 then we can stop) if(length(candidates)>0){ extensions_list <- .GetEnvBIBIT("extensions") # Check if we already have had this combination of added columns up till now, if so, we can stop, it will only result in the same BC if(length(extensions_list)>0 & (length(included)>(startlength+1))){ added_length <- length(included)-startlength comb_list <- lapply(extensions_list,FUN=function(x){ if(length(x$included)>=added_length){ return(sort(x$included[1:added_length])) } }) added_columns <- included[-c(1:startlength)] if(sum(unlist( lapply(comb_list,FUN=function(x){ if(!is.null(x)){ return(all(sort(added_columns)==x)) }else{return(FALSE)} }) ))>0){ STOP <- TRUE # cat("stopped\n") } } if(!STOP){ for(i.candidates in candidates){ included_new <- included included_new[length(included_new)+1] <- i.candidates candidates_new <- candidates[!candidates==i.candidates] temp <- extension_recursive(data=data,included=included_new,candidates=candidates_new,noise=noise,startlength=startlength) } } }else if(length(included)>startlength){ extensions_list <- .GetEnvBIBIT("extensions") extensions_list[[length(extensions_list)+1]] <- list( number_columns=(length(included)-startlength), included=included[-c(1:startlength)] ) .AssignEnvBIBIT(x="extensions",value=extensions_list) } } # Function to extend the columns of the bibit bicluster NAIVELY (follow order of most 1`s columns) BC_column_extension <- function(result,data,noise,extend_mincol=1,extend_limitcol=1){ # Check parameters & prepare list objects time_extend <- round(proc.time()['elapsed']/60,2) if(extend_limitcol<=0){stop("Unallowed extend_limitcol")} if(extend_mincol<1){stop("Unallowed extend_mincol")} extend_mincol <- as.integer(extend_mincol) BC.extended_list <- rowxnumber_list <- numberxcol_list <- vector("list",result@Number) # Apply extension for each BC for(i.BC in 1:result@Number){ included_columns <- result@NumberxCol[i.BC,] colsums_temp <- colSums(data[result@RowxNumber[,i.BC],]) column_candidates <- order(colsums_temp,decreasing=TRUE) if(extend_limitcol<1){ extend_limitcol2 <- ceiling(extend_limitcol*sum(result@RowxNumber[,i.BC])) }else{ extend_limitcol2 <- min(extend_limitcol,sum(result@RowxNumber[,i.BC])) } column_candidates <- column_candidates[which(colsums_temp[column_candidates]>=extend_limitcol2)] GO <- TRUE i.candidate <- 1 while(GO & (i.candidate<=length(column_candidates))){ if(!included_columns[column_candidates[i.candidate]]){ included_columns_temp <- included_columns included_columns_temp[column_candidates[i.candidate]] <- TRUE zeros_allowed <- ifelse(((noise<1)&(noise>0)),ceiling(noise*sum(included_columns_temp)),noise) zeros_in_rows <- apply(data[result@RowxNumber[,i.BC],included_columns_temp],MARGIN=1,FUN=function(x){sum(x==0)}) if(all(zeros_in_rows<=zeros_allowed)){ included_columns <- included_columns_temp i.candidate <- i.candidate+1 }else{ GO <- FALSE } }else{ i.candidate <- i.candidate+1 } } n_addedcolumns <- sum(included_columns)-sum(result@NumberxCol[i.BC,]) if(n_addedcolumns>=extend_mincol){ rowxnumber <- matrix(rep(result@RowxNumber[,i.BC],2),ncol=2) numberxcol <- matrix(FALSE,nrow=2,ncol=ncol(data)) numberxcol[1,] <- result@NumberxCol[i.BC,] numberxcol[2,included_columns] <- TRUE names_temp <- c(paste0("BC",i.BC),paste0("BC",i.BC,"_Ext1")) colnames(rowxnumber) <- names_temp rownames(numberxcol) <- names_temp rowxnumber_list[[i.BC]] <- rowxnumber numberxcol_list[[i.BC]] <- numberxcol BC.extended_list[[i.BC]] <- data.frame(BC_Original=i.BC,Number_Extended=1,Number_AddedColumns=n_addedcolumns) }else{ rowxnumber_list[[i.BC]] <- result@RowxNumber[,i.BC,drop=FALSE] colnames(rowxnumber_list[[i.BC]]) <- paste0("BC",i.BC) numberxcol_list[[i.BC]] <- result@NumberxCol[i.BC,,drop=FALSE] rownames(numberxcol_list[[i.BC]]) <- paste0("BC",i.BC) BC.extended_list[[i.BC]] <- NULL } } # Recombine all BC extensions into a single Biclust result BC.extended <- do.call(rbind,BC.extended_list) RowxNumber <- do.call(cbind,rowxnumber_list) NumberxCol <- do.call(rbind,numberxcol_list) time_extend <- round(proc.time()['elapsed']/60-time_extend,2) info_temp <- result@info info_temp$Time_Min$extend <- time_extend info_temp$Time_Min$full <- info_temp$Time_Min$full + time_extend info_temp$BC.Extended <- BC.extended OUT <- new("Biclust",Parameters=result@Parameters,RowxNumber=RowxNumber,NumberxCol=NumberxCol,Number=ncol(RowxNumber),info=info_temp) return(OUT) } extension_procedure <- function(result2,data,extend_noise,extend_mincol,extend_limitcol,extend_columns,extend_contained){ if(extend_columns!="none" & (result2@Number>0)){ Number_result_noEXT <- result2@Number cat("\nExtending Columns using:",toupper(extend_columns)) if(extend_columns=="naive"){ cat("...") result2 <- BC_column_extension(result=result2,data=data,noise=extend_noise,extend_mincol = extend_mincol,extend_limitcol = extend_limitcol) }else if(extend_columns=="recursive"){ cat("\n") result2 <- BC_column_extension_recursive(result=result2,data=data,noise=extend_noise,extend_mincol = extend_mincol,extend_limitcol = extend_limitcol) } cat("DONE\n") # Delete duplicate BC's (While putting the original BC's first so they do not get deleted) # n_added_BC <- result2@Number-Number_result_noEXT cat("Total BC after extending:",result2@Number,paste0("(Extra BC's: ",n_added_BC,")"),"\n\n") if(n_added_BC>0){ indices_original <- which(!grepl("_Ext",colnames(result2@RowxNumber))) cat("Checking for Duplicate Biclusters... ") # In order to quickly delete duplicates, BC row and column memberships are encoded to 16bit words first nrow_data <- nrow(data) ncol_data <- ncol(data) nblocksrow <- ceiling(nrow_data/16) nblockscol <- ceiling(ncol_data/16) decBC_mat <- matrix(NA,nrow=result2@Number,ncol=nblocksrow+nblockscol,dimnames=list(colnames(result2@RowxNumber),NULL)) temp <- 1:nrow_data rowchunks <- split(temp,ceiling(seq_along(temp)/16)) temp <- 1:ncol_data colchuncks <- split(temp,ceiling(seq_along(temp)/16)) for(i.decBC in 1:result2@Number){ for(i.rowblock in 1:nblocksrow){ decBC_mat[i.decBC,i.rowblock] <- strtoi(paste0(result2@RowxNumber[rowchunks[[i.rowblock]],i.decBC]+0,collapse=""),2) } for(i.colblock in 1:nblockscol){ matindex <- i.colblock+nblocksrow decBC_mat[i.decBC,matindex] <- strtoi(paste0(result2@NumberxCol[i.decBC,colchuncks[[i.colblock]]]+0,collapse=""),2) } } # Change order to original BC's appear first order_temp <- 1:nrow(decBC_mat) order_temp <- c(order_temp[indices_original],order_temp[-indices_original]) decBC_mat <- decBC_mat[order_temp,] dup_temp <- duplicated(decBC_mat,MARGIN=1) temp_index <- which(dup_temp) if(length(temp_index)>0){ dup_names <- rownames(decBC_mat)[temp_index] decBC_mat <- decBC_mat[-temp_index,] dup_index <- sapply(dup_names,FUN=function(x){which(x==colnames(result2@RowxNumber))}) result2@RowxNumber <- result2@RowxNumber[,-dup_index,drop=FALSE] result2@NumberxCol <- result2@NumberxCol[-dup_index,,drop=FALSE] result2@Number <- nrow(result2@NumberxCol) } cat("DONE\n") cat("Number of duplicate BC's deleted:",sum(dup_temp),"\n") # If required, check if biclusters are contained within each other (excluding original biclusters) # if(extend_contained & n_added_BC>1){ cat("\nChecking for contained Biclusters... \n") # Exclude original biclusters from this procedure indices_original <- which(!grepl("_Ext",rownames(decBC_mat))) decBC_mat <- decBC_mat[-indices_original,,drop=FALSE] contained_vector <- rep(NA,nrow(decBC_mat)) # note: go through all, but skip if current i.decBC or j.decBC is already in contained_vector pb <- txtProgressBar(min=1,max=(nrow(decBC_mat)-1),initial=1,style=3) for(i.decBC in 1:(nrow(decBC_mat)-1)){ ## Progress dots # progress_dots(i=i.decBC,nBC=nrow(decBC_mat)-1) setTxtProgressBar(pb,i.decBC) # if(!(i.decBC%in%contained_vector)){ for(j.decBC in (i.decBC+1):(nrow(decBC_mat))){ if(!(j.decBC%in%contained_vector)){ current_comp <- c(i.decBC,j.decBC) contained <- BCcontained(decBC_mat[i.decBC,],decBC_mat[j.decBC,]) if(!is.null(contained)){ contained_vector[current_comp[contained]] <- current_comp[contained] } } } } # cat(length(contained_vector[!is.na(contained_vector)]),"\n") } close(pb) contained_vector <- contained_vector[!is.na(contained_vector)] if(length(contained_vector)>0){ contained_names <- rownames(decBC_mat)[contained_vector] contained_index <- sapply(contained_names,FUN=function(x){which(x==colnames(result2@RowxNumber))}) result2@RowxNumber <- result2@RowxNumber[,-contained_index] result2@NumberxCol <- result2@NumberxCol[-contained_index,] result2@Number <- nrow(result2@NumberxCol) } cat("DONE\n") cat("Number of contained within BC's deleted:",length(contained_vector),"\n\n") } cat("Final Total of Biclusters:",result2@Number,"\n\n") # Fix BC naming in rowxnumber and numberxcol (so Ext starts with 1 again and goes further +1) indices_original <- which(!grepl("_Ext",colnames(result2@RowxNumber))) names_list <- vector("list",length(indices_original)) if(length(names_list)>1){ for(i in 1:(length(indices_original)-1)){ names_list[[i]] <- colnames(result2@RowxNumber)[indices_original[i]:(indices_original[i+1]-1)] } } names_list[[length(indices_original)]] <- colnames(result2@RowxNumber)[indices_original[length(indices_original)]:length(colnames(result2@RowxNumber))] newnames <- unlist(lapply(names_list,FUN=function(x){ if(length(x)>1){x[2:length(x)] <- paste0(x[1],"_Ext",1:(length(x)-1))} return(x) })) colnames(result2@RowxNumber) <- newnames rownames(result2@NumberxCol) <- newnames # Adapt BC.Extended info + make NULL object if no extensions BC.Extended <- result2@info$BC.Extended if(!is.null(BC.Extended)){ for(i in 1:nrow(BC.Extended)){ BC.Extended$Number_Extended[i] <- length(names_list[[BC.Extended$BC_Original[i]]])-1 } result2@info$BC.Extended <- BC.Extended[BC.Extended$Number_Extended>0,] } } } return(result2) } # BC_column_extension_pattern <- function(result,data,noise){ # # # BC_extended <- rep(FALSE,result@Number) # # for(i.BC in 1:result@Number){ # included_columns <- result@NumberxCol[i.BC,] # # column_candidates <- order(colSums(data[result@RowxNumber[,i.BC],]),decreasing=TRUE) # # GO <- TRUE # i.candidate <- 1 # # # while(GO & (i.candidate<=length(column_candidates))){ # # if(!included_columns[column_candidates[i.candidate]]){ # # included_columns_temp <- included_columns # included_columns_temp[column_candidates[i.candidate]] <- TRUE # # zeros_allowed <- ifelse(((noise<1)&(noise>0)),ceiling(noise*sum(included_columns_temp)),noise) # # zeros_in_rows_withoutpattern <- apply(data[result@RowxNumber[,i.BC],included_columns_temp],MARGIN=1,FUN=function(x){sum(x==0)})[-c(1,2)] # # if(all(zeros_in_rows_withoutpattern<=zeros_allowed)){ # # included_columns <- included_columns_temp # i.candidate <- i.candidate+1 # # # }else{ # GO <- FALSE # } # }else{ # i.candidate <- i.candidate+1 # } # # } # # if(sum(included_columns)>sum(result@NumberxCol[i.BC,])){BC_extended[i.BC] <- TRUE} # result@NumberxCol[i.BC,] <- included_columns # # } # # if(sum(BC_extended)>=1){ # result@RowxNumber <- result@RowxNumber[,BC_extended,drop=FALSE] # result@NumberxCol <- result@NumberxCol[BC_extended,,drop=FALSE] # result@Number <- sum(BC_extended) # result@info <- list() # }else{ # nrow.data <- nrow(result@RowxNumber) # ncol.data <- ncol(result@NumberxCol) # # result <- new("Biclust",Parameters=result@Parameters, # RowxNumber=matrix(FALSE,nrow=nrow.data,ncol=1), # NumberxCol=matrix(FALSE,nrow=1,ncol=ncol.data), # Number=0, # info=list()) # } # # # return(result) # }
/scratch/gouwar.j/cran-all/cranData/BiBitR/R/extension.R
#' Finding Maximum Size Biclusters #' #' Simple function which scans a \code{Biclust} result and returns which biclusters have maximum row, column or size (row*column). #' #' @author Ewoud De Troyer #' #' @export #' @param result A \code{Biclust} result. (e.g. The return object from \code{bibit} or \code{bibit2}) #' @param top The number of top row/col/size dimension which are searched for. (e.g. default \code{top=1} gives only the maximum) #' #' @return A list containing: #' \itemize{ #' \item \code{$row}: A matrix containing in the columns the Biclusters which had maximum rows, and in the rows the Row Dimension, Column Dimension and Size. #' \item \code{$column}: A matrix containing in the columns the Biclusters which had maximum columns, and in the rows the Row Dimension, Column Dimension and Size. #' \item \code{$size}: A matrix containing in the columns the Biclusters which had maximum size, and in the rows the Row Dimension, Column Dimension and Size. #' } #' #' @examples #' \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' result <- bibit(data,minr=2,minc=2) #' #' MaxBC(result) #' #' } MaxBC <- function(result,top=1){ if(class(result)!="Biclust" & class(result)!="iBBiG"){stop("result needs to be of class 'Biclust'")} rowsum <- colSums(result@RowxNumber) colsum <- rowSums(result@NumberxCol) sizesum <- rowsum*colsum top.col <- sort(unique(colsum),decreasing=TRUE)[1:top] top.row <- sort(unique(rowsum),decreasing=TRUE)[1:top] top.size <- sort(unique(sizesum),decreasing=TRUE)[1:top] for(i in 1:top){ ind.colmax <- which(top.col[i]==colsum) ind.rowmax <- which(top.row[i]==rowsum) ind.sizemax <- which(top.size[i]==sizesum) if(i==1){ row <- rbind(RowDim=rowsum[ind.rowmax],ColDim=colsum[ind.rowmax],SizeDim=sizesum[ind.rowmax]) colnames(row) <- paste0("BC",ind.rowmax) column <- rbind(RowDim=rowsum[ind.colmax],ColDim=colsum[ind.colmax],SizeDim=sizesum[ind.colmax]) colnames(column) <- paste0("BC",ind.colmax) size <- rbind(RowDim=rowsum[ind.sizemax],ColDim=colsum[ind.sizemax],SizeDim=sizesum[ind.sizemax]) colnames(size) <- paste0("BC",ind.sizemax) }else{ if(length(ind.rowmax)>0){ row.temp <- rbind(RowDim = rowsum[ind.rowmax], ColDim = colsum[ind.rowmax], SizeDim = sizesum[ind.rowmax]) colnames(row.temp) <- paste0("BC", ind.rowmax) row <- cbind(row, row.temp) } if(length(ind.colmax)>0){ column.temp <- rbind(RowDim = rowsum[ind.colmax], ColDim = colsum[ind.colmax], SizeDim = sizesum[ind.colmax]) colnames(column.temp) <- paste0("BC", ind.colmax) column <- cbind(column, column.temp) } if(length(ind.sizemax)>0){ size.temp <- rbind(RowDim = rowsum[ind.sizemax], ColDim = colsum[ind.sizemax], SizeDim = sizesum[ind.sizemax]) colnames(size.temp) <- paste0("BC", ind.sizemax) size <- cbind(size, size.temp) } } } return(list(row=row,column=column,size=size)) } #' Transform R matrix object to BiBit input files. #' #' Transform the R matrix object to 1 \code{.arff} for the data and 2 \code{.csv} files for the row and column names. These are the 3 files required for the original BiBit Java algorithm #' The path of these 3 files can then be used in the \code{arff_row_col} parameter of the \code{bibit} function. #' #' @author Ewoud De Troyer #' #' @export #' @param matrix The binary input matrix. #' @param name Basename for the 3 input files. #' @param path Directory path where to write the 3 input files to. #' #' @return 3 input files for BiBit: #' \itemize{ #' \item One \code{.arff} file containing the data. #' \item One \code{.csv} file for the row names. The file contains 1 column of names without quotation. #' \item One \code{.csv} file for the column names. The file contains 1 column of names without quotation. #' } #' #' @examples #' \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' #' make_arff_row_col(matrix=data,name="data",path="") #' #' result <- bibit(data,minr=5,minc=5, #' arff_row_col=c("data_arff.arff","data_rownames.csv","data_colnames.csv")) #' } make_arff_row_col <- function(matrix,name="data",path=""){ if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.vector(matrix),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Check if rownames & colnames contain ; or , -> should be deleted and give warnings it was deleted rowdot <- grepl(",",rownames(matrix)) if(sum(rowdot)>0){ rownames(matrix) <- gsub(",","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowdot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } rowsc <- grepl(";",rownames(matrix)) if(sum(rowsc)>0){ rownames(matrix) <- gsub(";","",rownames(matrix)) warning(paste0("Row names ",paste0(which(rowsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } coldot <- grepl(",",colnames(matrix)) if(sum(coldot)>0){ colnames(matrix) <- gsub(",","",colnames(matrix)) warning(paste0("Column names ",paste0(which(coldot),collapse = ",")," contained a ',' which was deleted."),call.=FALSE) } colsc <- grepl(";",colnames(matrix)) if(sum(colsc)>0){ colnames(matrix) <- gsub(";","",colnames(matrix)) warning(paste0("Column names ",paste0(which(colsc),collapse = ",")," contained a ';' which was deleted."),call.=FALSE) } # No duplicate row names allowed! if(sum(table(rownames(matrix))>1)){stop("No duplicate row names allowed!")} write.arff(t(matrix),file=paste0(getwd(),"/",path,"/",name,"_arff.arff")) write.table(matrix(rownames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=paste0(getwd(),"/",path,"/",name,"_rownames.csv")) write.table(matrix(colnames(matrix),ncol=1),quote=FALSE,row.names=FALSE,col.names=FALSE,file=paste0(getwd(),"/",path,"/",name,"_colnames.csv")) } rows_in_BC <- function(bicresult,rows){ if(class(bicresult)!="Biclust"){stop("bicresult is not a Biclust class object",call.=FALSE)} BC.boolean <- sapply(1:bicresult@Number,FUN=function(x){ return(all(rows%in%which(bicresult@RowxNumber[,x]))) }) BC.sel <- which(BC.boolean) rowdim <- colSums(bicresult@RowxNumber[,BC.sel,drop=FALSE]) coldim <- rowSums(bicresult@NumberxCol[BC.sel,,drop=FALSE]) sizedim <- rowdim*coldim out <- matrix(c(rowdim,coldim,sizedim),byrow=TRUE,nrow=3,ncol=length(BC.sel),dimnames=list(c("RowDim","ColDim","SizeDim"),paste0("BC",BC.sel))) out <- out[,order(sizedim,decreasing=TRUE)] return(out) } rows_full1_in_BC <- function(matrix,bicresult,rows){ if(class(bicresult)!="Biclust"){stop("bicresult is not a Biclust class object",call.=FALSE)} if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} BC.boolean <- sapply(1:bicresult@Number,FUN=function(x){ if(all(rows%in%which(bicresult@RowxNumber[,x]))){ submat <- matrix[rows,bicresult@NumberxCol[x,]] return(all(submat==1)) }else{ return(FALSE) } }) BC.sel <- which(BC.boolean) rowdim <- colSums(bicresult@RowxNumber[,BC.sel,drop=FALSE]) coldim <- rowSums(bicresult@NumberxCol[BC.sel,,drop=FALSE]) sizedim <- rowdim*coldim out <- matrix(c(rowdim,coldim,sizedim),byrow=TRUE,nrow=3,ncol=length(BC.sel),dimnames=list(c("RowDim","ColDim","SizeDim"),paste0("BC",BC.sel))) out <- out[,order(sizedim,decreasing=TRUE)] return(out) } fitness_score <- function(BC,alpha=1){ if(!identical(as.numeric(as.vector(BC)),as.numeric(as.logical(BC)))){stop("BC is not a binary matrix!",call.=FALSE)} W_ik <- apply(BC,MARGIN=1,FUN=sum) p_i <- W_ik/ncol(BC) H_i <- sapply(p_i,FUN=function(x){ if(x==0 | x==1){ return(0) }else{ return(-x*log(x,base=2)-(1-x)*log(1-x,base=2)) } }) S_i <- rep(0,length(H_i)) S_i[p_i>0.5] <- W_ik[p_i>0.5]*(1-H_i[p_i>0.5])^alpha return(list(S_i=S_i,score=sum(S_i),score_idea=sum(S_i)/(nrow(BC)*ncol(BC)))) } GOF <- function(matrix,bicresult,alpha=1,verbose=FALSE){ if(class(bicresult)!="Biclust"){stop("bicresult is not a Biclust class object",call.=FALSE)} if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(alpha<0 | alpha>1){stop("alpha should be between 0 and 1")} outputlist_S <- vector("list",bicresult@Number) names(outputlist_S) <- paste0("BC",1:bicresult@Number) outputdf <- matrix(NA,nrow=bicresult@Number,ncol=2,dimnames=list(names(outputlist_S),c("score","score_idea"))) for(i in 1:length(outputlist_S)){ BC <- matrix[bicresult@RowxNumber[,i],bicresult@NumberxCol[i,]] temp <- fitness_score(BC,alpha=alpha) outputlist_S[[i]] <- temp$S_i outputdf[i,] <- c(temp$score,temp$score_idea) } output <- list(fitness=as.data.frame(outputdf),S_i=outputlist_S) class(output) <- "GOFBC" # Do a summary print if(verbose){summary(output)} return(output) } summary.GOFBC <- function(object,...){ score <- object$fitness$score score_idea <- object$fitness$score_idea names(score) <- names(score_idea) <- rownames(object$fitness) selnumber <- ifelse(length(score)>=5,5,length(score)) cat("Top 5 Fitness Scores:\n") cat("---------------------\n") print(sort(score,decreasing=TRUE)[1:selnumber]) cat("\n") cat("Top 5 Experimental Fitness Scores:\n") cat("----------------------------------\n") print(sort(score_idea,decreasing=TRUE)[1:selnumber]) } #' @export print.bibit3 <- function(x,...){ for(i in 1:(length(x)-1)){ cat("\n") cat(toupper(names(x)[i]),"\n") cat(paste0(rep("-",nchar(toupper(names(x)[i]))),collapse=""),"\n\n") patterns <- c("FullPattern","SubPattern","Extended") for(j in 1:length(patterns)){ cat(paste0(toupper(patterns[j]),":"),"\n") object <- x[[i]][[patterns[j]]] n<-object@Number if(n>1) { cat("\nNumber of Clusters found: ",object@Number, "\n") cat("\nCluster sizes:\n") rowcolsizes<-rbind(colSums(object@RowxNumber[,1:n]),rowSums(object@NumberxCol[1:n,])) rownames(rowcolsizes)<-c("Number of Rows:","Number of Columns:") colnames(rowcolsizes)<-paste("BC", 1:n) #print.default(format(rowcolsizes, print.gap = 2, quote = FALSE)) print(rowcolsizes) } else { if(n==1) cat("\nThere was one cluster found with\n ",sum(object@RowxNumber[,1]), "Rows and ", sum(object@NumberxCol), "columns\n") if(n==0) cat("\nThere was no cluster found\n") } cat("\n\n") } } } #' @export summary.bibit3 <- function(object,...){ print(object) } #' @title Extract BC from \code{bibit3} result and add pattern #' #' @description Function which will print the BC matrix and add 2 duplicate articial pattern rows on top. The function allows you to see the BC and the pattern the BC was guided towards to. #' @author Ewoud De Troyer #' #' @export #' @param result Result produced by \code{\link{bibit3}} #' @param matrix The binary input matrix. #' @param pattern Vector containing either the number or name of which patterns the BC results should be extracted. #' @param type Vector for which BC results should be printed. #' \itemize{ #' \item Full Pattern (\code{"full"}) #' \item Sub Pattern (\code{"sub"}) #' \item Extended (\code{"ext"}) #' } #' @param BC Vector of BC indices which should be printed, conditioned on \code{pattern} and \code{type}. #' @return Prints queried biclusters. #' @examples #' \dontrun{ #' set.seed(1) #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' colsel <- sample(1:ncol(data),ncol(data)) #' data <- data[sample(1:nrow(data),nrow(data)),colsel] #' #' pattern_matrix <- matrix(0,nrow=3,ncol=100) #' pattern_matrix[1,1:7] <- 1 #' pattern_matrix[2,11:15] <- 1 #' pattern_matrix[3,13:20] <- 1 #' #' pattern_matrix <- pattern_matrix[,colsel] #' #' #' out <- bibit3(matrix=data,minr=2,minc=2,noise=0.1,pattern_matrix=pattern_matrix, #' subpattern=TRUE,extend_columns=TRUE,pattern_combinations=TRUE) #' out # OR print(out) OR summary(out) #' #' #' bibit3_patternBC(result=out,matrix=data,pattern=c(1),type=c("full","sub","ext"),BC=c(1,2)) #' } bibit3_patternBC <- function(result,matrix,pattern=c(1),type=c("full","sub","ext"),BC=c(1)){ if(class(result)!="bibit3"){stop("result is not a `bibit3' S3 object")} # Check if matrix is binary (DISCRETIZED NOT YET IMPLEMENTED!) if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} # Checking other input nPatterns <- nrow(result$pattern_matrix) if(class(pattern)=="character"){ if(sum(!(pattern %in% names(result)))>0){stop("One of the patterns is not in the result object")} pattern <- sapply(pattern,FUN=function(x){which(x==names(result))}) } if(class(pattern)!="numeric"){stop("pattern should be numeric vector")} if(sum(pattern>nPatterns)>0){stop("One of the patterns is not in the result object")} if(sum(!(type %in% c("full","sub","ext")))>0){stop("type contains wrong input")} type <- sapply(type,FUN=function(x){switch(x,full="FullPattern",sub="SubPattern",ext="Extended")}) names(type) <- NULL if(!(class(BC)%in%c("integer","numeric"))){stop("BC should be numeric vector")} # Printing for(i.pattern in pattern){ for(i.type in type){ for(i.BC in BC){ if(i.BC<=result[[i.pattern]][[i.type]]@Number){ cat(paste0(toupper(names(result)[i.pattern])," - ",i.type," - BC ",i.BC)) extra_rows <- matrix(rep(result$pattern_matrix[i.pattern,],2),nrow=2,byrow=TRUE,dimnames=list(paste0(names(result)[i.pattern],c("_Art1","_Art2")),colnames(matrix))) BCprint <- matrix[result[[i.pattern]][[i.type]]@RowxNumber[,i.BC],result[[i.pattern]][[i.type]]@NumberxCol[i.BC,]] cat("\n\n") print(rbind(extra_rows[,result[[i.pattern]][[i.type]]@NumberxCol[i.BC,]],BCprint)) cat("\n\n") } } } } } jaccard_bc <- function(result,BC1,BC2){ combine.m <- result1.m <- result2.m <- matrix(0,nrow=dim(result@RowxNumber)[1],ncol=dim(result@NumberxCol)[2]) result1.m[result@RowxNumber[,BC1],result@NumberxCol[BC1,]] <- 1 combine.m[result@RowxNumber[,BC1],result@NumberxCol[BC1,]] <- 1 result2.m[result@RowxNumber[,BC2],result@NumberxCol[BC2,]] <- 1 combine.m[result@RowxNumber[,BC2],result@NumberxCol[BC2,]] <- 1 m1 <- sum(result1.m) m2 <- sum(result2.m) m12 <- sum(combine.m) JI <- (m1+m2-(m12))/(m12) return(JI) } same_bc <- function(result,BC1,BC2){ if((all((result@RowxNumber[,BC1]-result@RowxNumber[,BC2])==0))&(all((result@NumberxCol[BC1,]-result@NumberxCol[BC2,])==0))){return(TRUE)}else{FALSE} } # Small function which checks if 1 BC is contained in another. The input should be a bit word representation of the row and column booleans of both BC's BCcontained <- function(BC1word,BC2word){ intersectword <- apply(rbind(BC1word,BC2word),MARGIN=2,FUN=function(x){bitwAnd(x[1],x[2])}) if(all(intersectword==BC1word)){return(1)} if(all(intersectword==BC2word)){return(2)} } progress_dots <- function(i,nBC){ if(nBC<=200){ if(i%%40==0 | i==nBC){ cat(".",i,"\n") }else{ cat(".") } }else{ linenumber <- ceiling(nBC/5) numberperdot <- round(linenumber/40,digits=0) if(i%%linenumber==0 | i==nBC){ cat(".",i,"\n") }else{ if(i%%numberperdot==0){cat(".")} } } } biclust_correctdim <- function(result,matrix){ if(class(result)=="Biclust"){ if((nrow(matrix)!=nrow(result@RowxNumber))|(ncol(matrix)!=ncol(result@NumberxCol))){stop("result and matrix have incompatible dimenions",call.=FALSE)} }else if(class(result)=="BiBitWorkflow"){ if((nrow(matrix)!=nrow(result$Biclust@RowxNumber))|(ncol(matrix)!=ncol(result$Biclust@NumberxCol))){stop("result and matrix have incompatible dimenions",call.=FALSE)} } } #' @title Summary Method for Biclust Class #' @description Summary Method for Biclust Class #' @param object Biclust S4 Object #' @export setMethod("summary", "Biclust", function(object) { cat("\nAn object of class",class(object),"\n\n") cat("call:", deparse(object@Parameters$Call,0.75*getOption("width")), sep="\n\t") n<-object@Number if(n>1) { cat("\nNumber of Clusters found: ",object@Number, "\n") cat("\nCluster sizes:\n") rowcolsizes<-rbind(colSums(object@RowxNumber[,1:n]),rowSums(object@NumberxCol[1:n,])) rownames(rowcolsizes)<-c("Number of Rows:","Number of Columns:") colnames(rowcolsizes)<-paste("BC", 1:n) #print.default(format(rowcolsizes, print.gap = 2, quote = FALSE)) print(rowcolsizes) } else { if(n==1) cat("\nThere was one cluster found with\n ",sum(object@RowxNumber[,1]), "Rows and ", sum(object@NumberxCol), "columns") if(n==0) cat("\nThere was no cluster found") } cat("\n\n") })
/scratch/gouwar.j/cran-all/cranData/BiBitR/R/utilities.R
# library(biclust) # library(BiBitR) # library(viridis) # viridis # library(cluster) # agnes # library(dendextend) # color_branches # # stats: hclust, as.hclust, cutree # library(lattice) # levelplot # TO DO: # - complete documentation # - add memory option to bibit/bibit2/bibit3 # - Make CompareResultJI more efficient for single result #' @title BiBit Workflow #' @description Workflow to discover larger (noisy) patterns in big data using BiBit #' @details Looking for Noisy Biclusters in large data using BiBit (\code{\link{bibit2}}) often results in many (overlapping) biclusters. #' In order decrease the number of biclusters and find larger meaningful patterns which make up noisy biclusters, the following workflow can be applied. #' Note that this workflow is primarily used for data where there are many more rows (e.g. patients) than columns (e.g. symptoms). For example the workflow would discover larger meaningful symptom patterns which, conditioned on the allowed noise/zeros, subsets of the patients share. #' \enumerate{ #' \item Apply BiBit with \emph{no noise} (Preferably with high enough \code{minr} and \code{minc}). #' \item Compute Similarity Matrix (Jaccard Index) of all biclusters. By default this measure is only based on column similarity. #' This implies that the rows of the BC's are not of interest in this step. The goal then would be to discover highly overlapping column patterns and, in the next steps, merge them together. #' \item Apply Agglomerative Hierarchical Clustering on Similarity Matrix (default = average link) #' \item Cut the dendrogram of the clustering result and merge the biclusters based on this. (default = number of clusters is determined by the Tibs2001SEmax Gap Statistic) #' \item Extract Column Memberships of the Merged Biclusters. These are saved as the new column \emph{Patterns}. #' \item Starting from these patterns, \emph{(noisy) rows} are grown which match the pattern, creating a single final bicluster for each pattern. At the end duplicate/non-maximal BC's are deleted. #' } #' Using the described workflow (and column similarity in Step 2), the final result will contain biclusters which focus on larger column patterns. #' @author Ewoud De Troyer #' @export #' @param matrix The binary input matrix. #' @param minr The minimum number of rows of the Biclusters. #' @param minc The minimum number of columns of the Biclusters. #' @param similarity_type Which dimension to use for the Jaccard Index in Step 2. This is either columns (\code{"col"}, default) or both (\code{"both"}). #' @param func Which clustering function to use in Step 3. Either \code{"agnes"} (= default) or \code{"hclust"}. #' @param link Which clustering link to use in Step 3. The available links (depending on \code{func}) are: #' \itemize{ #' \item{\code{hclust}: }\code{"ward.D"}, \code{"ward.D2"}, \code{"single"}, \code{"complete"}, \code{"average"}, \code{"mcquitty"}, \code{"median"} or \code{"centroid"} #' \item{\code{agnes}: }\code{"average"} (default), \code{"single"}, \code{"complete"}, \code{"ward"}, \code{"weighted"}, \code{"gaverage"} or \code{"flexible"} #' } #' (More details in \code{\link[stats]{hclust}} and \code{\link[cluster]{agnes}}) #' @param par.method Additional parameters used for flexible link (See \code{\link[cluster]{agnes}}). Default is \code{c(0.625)} #' @param cut_type Which method should be used to decide the number of clusters in the tree in Step 4? #' \itemize{ #' \item \code{"gap"}: Use the Gap Statistic (default). #' \item \code{"number"}: Select a set number of clusters. #' \item \code{"height"}: Cut the tree at specific dissimilarity height. #' } #' #' @param cut_pm Cut Parameter (depends on \code{cut_type}) for Step 4 #' \itemize{ #' \item Gap Statistic (\code{cut_type="gap"}): How to compute optimal number of clusters? Choose one of the following: \code{"Tibs2001SEmax"} (default), \code{"globalmax"}, \code{"firstmax"}, \code{"firstSEmax"} or \code{"globalSEmax"}. #' \item Number (\code{cut_type="number"}): Integer for number of clusters. #' \item Height (\code{cut_type="height"}): Numeric dissimilarity value where the tree should be cut (\code{[0,1]}). #' } #' #' @param gap_B Number of bootstrap samples (default=500) for Gap Statistic (\code{\link[cluster]{clusGap}}). #' @param gap_maxK Number of clusters to consider (default=50) for Gap Statistic (\code{\link[cluster]{clusGap}}). #' #' @param noise The allowed noise level when growing the rows on the merged patterns in Step 6. (default=\code{0.1}, namely allow 10\% noise.) #' \itemize{ #' \item \code{noise=0}: No noise allowed. #' \item \code{0<noise<1}: The \code{noise} parameter will be a noise percentage. The number of allowed 0's in a row in the bicluster will depend on the column size of the bicluster. #' More specifically \code{zeros_allowed = ceiling(noise * columnsize)}. For example for \code{noise=0.10} and a bicluster column size of \code{5}, the number of allowed 0's would be \code{1}. #' \item \code{noise>=1}: The \code{noise} parameter will be the number of allowed 0's in a row in the bicluster independent from the column size of the bicluster. In this noise option, the noise parameter should be an integer. #' } #' @param noise_select Should the allowed noise level be automatically selected for each pattern? (Using ad hoc method to find the elbow/kink in the Noise Scree plots) #' \itemize{ #' \item \code{noise_select=0}: Do \emph{NOT} automatically select the noise levels. Use the the noise level given in the \code{noise} parameter (default). #' \item \code{noise_select=1}: Using the Noise Scree plot (with 'Added Rows' on the y-axis), find the noise level where the current number of added rows at this noise level is larger than the mean of 'added rows' at the lower noise levels. #' After locating this noise level, lower the noise level by 1. This is your automatically selected elbow/kink and therefore your noise level. #' \item \code{noise_select=2}: Applies the same steps as for \code{noise_select=1}, but instead of decreasing the noise level by only 1, keep decreasing the noise level until the number of added rows isn't decreasing anymore either. #' } #' #' @param plots Vector for which plots to draw: #' \enumerate{ #' \item Image plot of the similarity matrix computed in Step 2. #' \item Same as \code{plots=1}, but the rows and columns are reordered with the hierarchical tree. #' \item Dendrogram of the tree, its clusters colored after the chosen cut has been applied. #' \item Noise Scree plots for all the Saved Patterns. Two plots will be plotted, both with Noise on the x-axis. The first one will have the number of Added Number of Rows on that noise level on the y-axis, while the second will have the Total Number of Rows (i.e. cumulative of the first). #' If the title of one of the subplots is red, then this means that the Bicluster grown from this pattern, using the chosen noise level, was eventually deleted due to being a duplicate or non-maximal. #' \item Image plot of the Jaccard Index similarity matrix between the final biclusters after Step 6. #' } #' @param BCresult Import a BiBit Biclust result for Step 1 (e.g. extract from an older BiBitWorkflow object \code{$info$BiclustInitial}). This can be useful if you want to cut the tree differently/make different plots, but don't want to do the BiBit calculation again. #' @param simmatresult Import a (custom) Similarity Matrix (e.g. extract from older BiBitWorkflow object \code{$info$BiclustSimInitial}). Note that Step 1 (BiBit) will still be executed if \code{BCresult} is not provided. #' @param treeresult Import a (custom) tree (\code{hclust} object) based on the BiBit/Similarity (e.g. extract from older BiBitWorkflow object \code{$info$Tree}). #' @param plot.type Output Type #' \itemize{ #' \item \code{"device"}: All plots are outputted to new R graphics devices (default). #' \item \code{"file"}: All plots are saved in external files. Plots 1 and 2 are saved in separate \code{.png} files while all other plots are joint together in a single \code{.pdf} file. #' \item \code{"other"}: All plots are outputted to the current graphics device, but will overwrite each other. Use this if you want to include one or more plots in a sweave/knitr file or if you want to export a single plot by your own chosen format. #' } #' @param filename Base filename (with/without directory) for the plots if \code{plot.type="file"} (default=\code{"BiBitWorkflow"}). #' @param verbose Logical value if progress of workflow should be printed. #' #' @return A BiBitWorkflow S3 List Object with 3 slots: #' \itemize{ #' \item \code{Biclust}: Biclust Class Object of Final Biclustering Result (after Step 6). #' \item \code{BiclustSim}: Jaccard Index Similarity Matrix of Final Biclustering Result (after Step 6). #' \item \code{info}: List Object containing: #' \itemize{ #' \item \code{BiclustInitial}: Biclust Class Object of Initial Biclustering Result (after Step 1). #' \item \code{BiclustSimInitial}: Jaccard Index Similarity Matrix of Initial Biclustering Result (after Step 1). #' \item \code{Tree}: Hierarchical Tree of \code{BiclustSimInitial} as \code{hclust} object. #' \item \code{Number}: Vector containing the initial number of biclusters (\code{InitialNumber}), the number of saved patterns after cutting the tree (\code{PatternNumber}) and the final number of biclusters (\code{FinalNumber}). #' \item \code{GapStat}: Vector containing all different optimal cluster numbers based on the Gap Statistic. #' \item \code{BC.Merge}: A list (length of merged saved patterns) containing which biclusters were merged together after cutting the tree. #' \item \code{MergedColPatterns}: A list (length of merged saved patterns) containing the indices of which columns make up that pattern. #' \item \code{MergedNoiseThresholds}: A vector containing the selected noise levels for the merged saved patterns. #' \item \code{Coverage}: A list containing: 1. a vector of the total number (and percentage) of unique rows the final biclusters cover. 2. a table showing how many rows are used more than a single time in the final biclusters. #' \item \code{Call}: A match.call of the original function call. #' } #' } #' #' @examples #' \dontrun{ #' ## Simulate Data ## #' # DATA: 10000x50 #' # BC1: 200x10 #' # BC2: 100x10 #' # BC1 and BC2 overlap 5 columns #' #' # BC3: 200x10 #' # BC4: 100x10 #' # BC3 and bC4 overlap 2 columns #' #' # Background 1 percentage: 0.15 #' # BC Signal Percentage: 0.9 #' #' set.seed(273) #' mat <- matrix(sample(c(0,1),10000*50,replace=TRUE,prob=c(1-0.15,0.15)), #' nrow=10000,ncol=50) #' mat[1:200,1:10] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[300:399,6:15] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat[400:599,21:30] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[700:799,29:38] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat <- mat[sample(1:10000,10000,replace=FALSE),sample(1:50,50,replace=FALSE)] #' #' #' # Computing gap statistic for initial 1381 BC takes approx. 15 min. #' # Gap Statistic chooses 4 clusters. #' out <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2) #' summary(out$Biclust) #' #' # Reduce computation by selecting number of clusters manually. #' # Note: The "ClusterRowCoverage" function can be used to provided extra info #' # on the number of cluster choice. #' # How? #' # - More clusters result in smaller column patterns and more matching rows. #' # - Less clusters result in larger column patterns and less matching rows. #' # Step 1: Initial Workflow Run #' out2 <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2,cut_type="number",cut_pm=10) #' # Step 2: Use ClusterRowCoverage #' temp <- ClusterRowCoverage(result=out2,matrix=mat,noise=0.2,plots=2) #' # Step 3: Use BiBitWorkflow again (using previously computed parts) with new cut parameter #' out3 <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2,cut_type="number",cut_pm=4, #' BCresult = out2$info$BiclustInitial, #' simmatresult = out2$info$BiclustSimInitial) #' summary(out3$Biclust) #' } BiBitWorkflow <- function(matrix,minr=2,minc=2, similarity_type="col", func="agnes",link="average",par.method=0.625, cut_type="gap",cut_pm="Tibs2001SEmax",gap_B=500,gap_maxK=50, noise=0.1,noise_select=0, plots=c(3:5), BCresult=NULL,simmatresult=NULL,treeresult=NULL, plot.type="device",filename="BiBitWorkflow", verbose=TRUE){ # Plots # # 1. image plot of sim_mat # 2. image plot of sim_mat, reordened with tree # 3. Hierarchical tree, colored with cut # 4. Noise Scree Plot # 5. image plot of final sim_mat (both JI) if(!all(plots%in%c(1:5))){stop("plots should be part of c(1,2,3,4,5)")} if(length(plot.type)!=1){stop("plot.type should be of length 1",call.=FALSE)} if(!(plot.type %in% c("device","file","other"))){stop("plot.type should be 'device', 'file' or 'other'",call.=FALSE)} FIRSTPLOT <- TRUE pm <- match.call() ## PARAMETER CHECKS ## if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} if(noise<0){stop("noise parameter can not be negative",call.=FALSE)} if(!(noise_select %in% c(0,1,2))){stop("noise_select must be 0, 1 or 2")} if(!is.null(simmatresult)){if(class(simmatresult)!="matrix"){stop("simmatresult needs to be a matrix")}} if(length(similarity_type)>1){stop("similarity_type can only be of length1")} if(!(similarity_type %in% c("col","both"))){stop("similarity_type needs to be \"col\" or \"both\"")} if(length(func)>1){stop("func can only be of length 1")} if(!(func %in% c("hclust","agnes"))){stop("func needs to be \"hclust\" or \"agnes\"")} if(length(link)>1){stop("link can only be of length 1")} hclust_links <- c("ward.D","ward.D2","single","complete","average","mcquitty","median","centroid") agnes_links <- c("average","single","complete","ward","weighted","gaverage","flexible") link_options <- list(hclust=hclust_links,agnes=agnes_links) other_func <- setdiff(c("hclust","agnes"),func) if(!(link%in%link_options[[func]])){ if(link%in%agnes_links[[other_func]]){ message(paste0(link," link was not available for ",func," so ",other_func," was used.\n\n")) func <- other_func }else{ stop(paste0(link," link not available for hclust or agnes")) } } if(length(cut_type)>1){stop("cut_type can only be of length 1")} if(length(cut_pm)>1){stop("cut_pm can only be of length 1")} if(!(cut_type %in% c("gap","number","height"))){stop("cut_type should be \"gap\", \"number\" or \"height\"")} if(cut_type=="gap" & similarity_type=="both"){stop("Gap Statistic not possible for a 2-dimensional similarity measure")} JI <- NULL number <- NULL if(cut_type=="gap"){ gap_options <- c("firstSEmax", "Tibs2001SEmax", "globalSEmax", "firstmax", "globalmax") if(!(cut_pm %in% gap_options)){stop(paste0("for \"gap\" cut_pm should one of the following: ",paste0(sapply(gap_options,FUN=function(y){return(paste0("\"",y,"\""))}),collapse=", ")))} } if(cut_type=="number"|cut_type=="height"){ if(class(cut_pm)!="numeric" & class(cut_pm)!="integer"){stop("for \"number\", cut_pm should be a numeric or integer")} if(cut_type=="number"){ cut_pm <- as.numeric(as.integer(cut_pm)) number <- cut_pm }else{ cut_pm <- as.numeric(cut_pm) JI <- 1-cut_pm } } if(verbose){ ## APPLY ORIGINAL BIBIT WITHOUT NOISE ## cat("STEP 1: ORIGINAL BIBIT WITHOUT NOISE\n") cat("------------------------------------\n\n") } if(!is.null(BCresult)){ if(class(BCresult)!="Biclust"){stop("BCresult is not a Biclust class object")} result1 <- BCresult if(verbose){cat("BCresult was used\n")} }else{ if(verbose){ result1 <- bibit(matrix,minr=minr,minc=minc) }else{ temp <- capture.output({ result1 <- bibit(matrix,minr=minr,minc=minc) }) } } if(verbose){cat("Total BC's found: ",result1@Number,"\n\n")} if(result1@Number<=1){stop("Not enough BC's to continue analysis.")} ## COMPUTE INITIAL SIMILARITY MATRIX ## if(verbose){ cat("STEP 2: SIMILARITY MATRIX\n") cat("-------------------------\n\n") } if(!is.null(simmatresult)){ if(verbose){cat("simmatresult was used\n")} sim_mat <- simmatresult }else{ if(verbose){cat(paste0("Compute Jaccard Index Similarity (",similarity_type,") of ",result1@Number," BC's\n"))} sim_mat <- workflow_simmat(result1,type=similarity_type,verbose=verbose) } # Plot 1 # if(1%in%plots){ if(verbose){cat("Plot 1: Image Plot of Similarity Matrix\n")} if(plot.type=="device"){dev.new()}else if(plot.type=="file"){png(paste0(filename,"_plot1.png"))} image(sim_mat,col=viridis(256),axes=FALSE,main=paste0("Heatmap of JI Similarity (",similarity_type,")")) if(plot.type=="file"){dev.off()} } if(verbose){ cat("\nSTEP 3: AGGLOMERATIVE HIERARCHICAL CLUSTERING TREE\n") cat("-------------------------------------------------\n") cat("Link =",link," | Function =",func,"\n\n") } # Create tree based on parameters if(!is.null(treeresult)){ tree_init <- treeresult if(verbose){cat("treeresult was used\n")} }else{ if(func=="hclust"){ tree_init <- hclust(as.dist(1-sim_mat),method=link) } if(func=="agnes"){ tree_init <- as.hclust(agnes(as.dist(1-sim_mat),diss=TRUE,method=link,par.method = par.method)) } } # Plot 2 # if(2 %in% plots){ if(verbose){cat("Plot 2: Image plot of Similarity Matrix, reordered with clustering tree\n\n")} if(plot.type=="device"){dev.new()}else if(plot.type=="file"){png(paste0(filename,"_plot2.png"))} heatmap((sim_mat),Rowv=as.dendrogram(tree_init),col=viridis(256),main=paste0("Heatmap of JI Similarity (",similarity_type,") - Reordered")) if(plot.type=="file"){dev.off()} } ## COMPUTE GAP STATISTIC ## gap_out <- NULL if(cut_type=="gap"){ if(verbose){cat("Computing Gap Statistic...\n")} if(similarity_type=="col"){ gapdata <- as.data.frame(result1@NumberxCol+0) } if(similarity_type=="row"){ gapdata <- as.data.frame(t(result1@RowxNumber+0)) } if(result1@Number<50){gap_maxK <- result1@Number-1} gap_stat <- clusGap((gapdata),verbose=verbose,B=gap_B,K.max=gap_maxK,FUNcluster=function(x,k){list(cluster=cutree((tree_init),k=k))}) # gap_stat <- cluster::clusGap(as.data.frame(result1@NumberxCol),B=gap_B,K.max=gap_maxK,FUNcluster=function(x,k){list(cluster=cutree(tree_init,k=k))}) number <- maxSE(gap_stat$Tab[, 3], gap_stat$Tab[,4], cut_pm) if(verbose){cat(paste0("Gap Statistic '",cut_pm,"': ",number," clusters\n"))} gap_out <- sapply(gap_options,FUN=function(x){maxSE(gap_stat$Tab[, 3], gap_stat$Tab[,4], x)}) # Check for singletons tree_1 <- which(tree_init$height==1) if(length(tree_1)>0){ merge_1 <- as.vector(tree_init$merge[tree_1,]) singletons <- merge_1[which(merge_1<0)]*(-1) if(length(singletons)>0){ message(paste0("Singletons detected: Original BC ",paste0(singletons,collapse=", "),"\nGap Statistics might not be accurate and manual number of cluster selection might be necessary.")) } } # Check if number=1 if(number==1){ number <- 2 warning("Only 1 cluster selected by gap statistic. This was increased to 2 cluster in order to complete the workflow.") } } # Plot 3 # if(3 %in% plots){ if(is.null(JI)){ h_temp <- NULL cut_txt <- paste0(number," clusters") }else{ h_temp <- 1-JI cut_txt <- paste0(round(h_temp,2)," distance") } if(verbose){cat("Plot 3:",paste0("Dendrogram - ",func," - ",link," link (",cut_txt,")"),"\n")} if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,"_plot",paste0(intersect(plots,3:5),collapse=""),".pdf")) FIRSTPLOT <- FALSE } plot(color_branches(as.dendrogram(tree_init),h=h_temp,k=number),main=paste0("Dendrogram - ",func," - ",link," link (",cut_txt,")")) } ## MERGE BICLUSTERS ## if(verbose){ cat("\nSTEP 4: MERGING BICLUSTERS BASED ON HIERARCHICAL TREE\n") cat("------------------------------------------------------\n\n") } result2 <- workflow_mergeBC(result=result1,tree=tree_init,JI=JI,number=number,verbose=verbose) # Remove BC.Merge to put it in general info + make a list of original patterns BC.Merge <- result2@info$BC.Merge result2@info$BC.Merge <- NULL MergedColPatterns <- lapply(as.list(seq_len(result2@Number)),FUN=function(pattern){ return(which(result2@NumberxCol[pattern,])) }) names(MergedColPatterns) <- paste0("Pat",seq_len(result2@Number)) ## COMPUTE NOISE FOR MERGED PATTERNS & AUTOMATIC THRESHOLDS## NoisexNumber <- apply(result2@NumberxCol,MARGIN=1,FUN=function(pattern){ return(apply(matrix[,pattern],MARGIN=1,FUN=function(row){return(sum(row==0))})) }) data_noisescree <- lapply(seq_len(ncol(NoisexNumber)),FUN=function(pattern){ tab <- table(NoisexNumber[,pattern]) return(data.frame(Noise=as.numeric(names(tab)),Total=as.numeric(tab))) }) noise_threshold <- workflow_noisethreshold(noise,noise_select,data_noisescree) result2@info$Noise.Threshold <- noise_threshold ## APPLY PATTERN BIBIT/ROWGROWING ## if(verbose){ cat("\nSTEP 5: GROWING ROWS FOR MERGED PATTERNS\n") cat("----------------------------------------\n\n") } result3 <- workflow_UpdateBiclust_RowNoise(result=result2,matrix=matrix, noise=result2@info$Noise.Threshold,removeBC=TRUE,NoisexNumber=NoisexNumber,verbose=verbose) # Plot 4 # if(4 %in% plots){ if(verbose){cat("\n Plot 4: Noise Scree Plot of Merged Patterns\n")} # Plot with added number of rows & Plot with total number of rows tempdim <- sqrt(result2@Number) extradim <- ifelse((floor(tempdim)*ceiling(tempdim))<result2@Number,1,0) patcols <- apply(result2@NumberxCol,MARGIN=1,FUN=sum) for(i.plot in c(1,2)){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,"_plot",paste0(intersect(plots,3:5),collapse=""),".pdf")) FIRSTPLOT <- FALSE } par(mfrow=c(floor(tempdim)+extradim,ceiling(tempdim))) pattern_number <- 1 for(i.pattern in seq_len(length(data_noisescree))){ temp_data <- data_noisescree[[i.pattern]] if(i.plot==2){ temp_data$Total <- cumsum(temp_data$Total) ylab_temp <- "Total Number" }else{ ylab_temp <- "Added Number" } if(i.pattern %in% result3@info$Deleted.Patterns){ main_temp <- paste0("Deleted Pat"," (",patcols[i.pattern]," cols)") col.main <- "red" }else{ main_temp <- paste0("Pat",pattern_number," (",patcols[i.pattern]," cols)") col.main <- "black" pattern_number <- pattern_number+1 } plot(temp_data$Noise,temp_data$Total,main=main_temp,col.main=col.main,xlab="Noise",ylab=ylab_temp,pch=19) points(temp_data$Noise,temp_data$Total,type="l") text(temp_data$Noise,temp_data$Total,as.character(temp_data$Total),pos=3) abline(v=ifelse((noise_threshold[i.pattern]<1) & (noise_threshold[i.pattern]>0),ceiling(noise_threshold[i.pattern]*patcols[i.pattern]),noise_threshold[i.pattern]),col="blue") } par(mfrow=c(1,1)) } } # Similarity Matrix of final result if(result3@Number>1){ sim_mat_result3 <- workflow_simmat(result3,type="both",verbose=FALSE) }else{ sim_mat_result3 <- matrix(1,nrow=1,ncol=1) } rownames(sim_mat_result3) <- colnames(sim_mat_result3) <- paste0("BC",1:nrow(sim_mat_result3)) # Plot 5 # if(5 %in% plots){ if(verbose){cat("\nPlot 5: Image Plot of Similarity Matrix of Final Result\n")} if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,"_plot",paste0(intersect(plots,3:5),collapse=""),".pdf")) FIRSTPLOT <- FALSE } # image(sim_mat_result3,col=viridis(256),axes=FALSE,main=paste0("Heatmap of JI Similarity")) # axis(1,at=seq(0,1,length.out=ncol(sim_mat_result3)),labels=colnames(sim_mat_result3),tick=FALSE,las=2,cex.axis=0.5) # axis(2,at=seq(0,1,length.out=nrow(sim_mat_result3)),labels=rownames(sim_mat_result3),tick=FALSE,las=2,cex.axis=0.5) print(levelplot(sim_mat_result3,col.regions=viridis(256),main="Heatmap of JI Similarity",ylab="",xlab="", scales = list(tck = c(1,0)))) } ## Compute row coverage ## BC_rownames <- unlist(apply(result3@RowxNumber,MARGIN=2,FUN=function(x){return(rownames(matrix)[x])})) nBC_rownames <- length(unique(BC_rownames)) coverage <- c(NumberRows=round(nBC_rownames,0),RowPerc=round((nBC_rownames/nrow(matrix))*100,2)) coverage_table <- (table(table(BC_rownames))) names(coverage_table) <- paste0("TimesRowsUsed ",names(coverage_table)) ## FINAL RESULT ## OUT <- list( Biclust=result3, BiclustSim=sim_mat_result3, info=list( BiclustInitial=result1, BiclustSimInitial=sim_mat, Tree=tree_init, Number=c(InitialNumber=result1@Number,PatternNumber=result2@Number,FinalNumber=result3@Number), GapStat=gap_out, BC.Merge=BC.Merge, MergedColPatterns=MergedColPatterns, MergedNoiseThresholds=result2@info$Noise.Threshold, Coverage=list(RowCoverage=coverage,RowCoverageTable=coverage_table), call=pm ) ) class(OUT) <- "BiBitWorkflow" if(plot.type=="file" & any(plots %in% 3:5)){dev.off()} # results to save: # - Final result # - no noise result # - similarity matrix # - final similarity matrix # - somewhere how many BC's we had, then patterns, then final BC's # - also save all gap statistics (not only the chosen one) # - save info of coverage return(OUT) } #' @title Apply Fisher Exact Test on Bicluster Rows #' @description Accepts a Biclust or BiBitWorkflow result and applies the Fisher Exact Test for each row (see Details). #' @param result A Biclust or BiBitWorkflow Object. #' @param matrix Accompanying binary data matrix which was used to obtain \code{result}. #' @param p.adjust Which method to use when adjusting p-values, see \code{\link[stats]{p.adjust}} (default=\code{"BH"}). #' @param alpha Significance level (adjusted p-values) when constructing the \code{FisherInfo} object (default=0.05). #' @param pattern Numeric vector for which patterns/biclusters the Fisher Exact Test needs to be computed (default = all patterns/biclusters). #' @details Extracts the patterns from either a \code{Biclust} or \code{BiBitWorkflow} object (see below). #' Afterwards for each pattern all rows will be tested using the Fisher Exact Test. This test compares the part of the row inside the pattern (of the bicluster) with the part of the row outside the pattern. #' The Fisher Exact Test gives you some information on if the row is uniquely active for this pattern. #' #' Depending on the \code{result} input, different patterns will be extract and different info will be returned: #' \describe{ #' \item{\emph{Biclust S4 Object}}{ #' #' Using the column patterns of the Biclust result, all rows are tested using the Fisher Exact Test. #' Afterwards the following 2 objects are added to the \code{info} slot of the Biclust object: #' \itemize{ #' \item \code{FisherResult}: A list object (one element for each pattern) of data frames (Number of Rows \eqn{\times} 6) which contain the names of the rows (\code{Names}), the noise level of the row inside the pattern (\code{Noise}), the signal percentage inside the pattern (\code{InsidePerc1}), the signal percentage outside the pattern (\code{OutsidePerc1}), the p-value of the Fisher Exact Test (\code{Fisher_pvalue}) and the adjusted p-value of the Fisher Exact Test (\code{Fisher_pvalue_adj}). #' \item \code{FisherInfo}: Info object which contains a comparison of the current row membership for each pattern with a 'new' row membership based on the significant rows (from the Fisher Exact Test) for each pattern. #' It is a list object (one element for each pattern) of lists (6 elements). These list objects per pattern contain the number of new, removed and identical rows (\code{NewRows}, \code{RemovedRows}, \code{SameRows}) when comparing the significant rows with the original row membership (as well as their indices (\code{NewRows_index}, \code{RemovedRows_index})). The \code{MaxNoise} element contains the maximum noise of all Fisher significant rows. #' } #' } #' \item{\emph{BiBitWorkflow S3 Object}}{ #' #' The merged column patterns (after cutting the hierarchical tree) are extracted from the BiBitWorkflow object, namely the \code{$info$MergedColPatterns} slot. #' Afterwards the following object is added to the \code{$info} slot of the BiBitWorkflow object: #' \itemize{ #' \item \code{FisherResult}: Same as above #' } #' } #' } #' @author Ewoud De Troyer #' @export #' @return Depending on \code{result}, a \code{FisherResult} and/or \code{FisherInfo} object will be added to the \code{result} and returned (see Details). #' @examples \dontrun{ #' ## Prepare some data ## #' set.seed(254) #' mat <- matrix(sample(c(0,1),5000*50,replace=TRUE,prob=c(1-0.15,0.15)), #' nrow=5000,ncol=50) #' mat[1:200,1:10] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[300:399,6:15] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat[400:599,21:30] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[700:799,29:38] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat <- mat[sample(1:5000,5000,replace=FALSE),sample(1:50,50,replace=FALSE)] #' #' ## Apply BiBitWorkflow ## #' out <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2,cut_type="number",cut_pm=4) #' #' ## Apply RowTest_Fisher on Biclust Object -> returns Biclust Object ## #' out_new <- RowTest_Fisher(result=out$Biclust,matrix=mat) #' # FisherResult output in info slot #' str(out_new@info$FisherResult) #' # FisherInfo output in info slot (comparison with original BC's) #' str(out_new@info$FisherInfo) #' #' #' ## Apply RowTest_Fisher on BiBitWorkflow Object -> returns BiBitWorkflow Object ## #' out_new2 <- RowTest_Fisher(result=out,matrix=mat) #' # FisherResult output in BiBitWorkflow info element #' str(out_new2$info$FisherResult) #' # Fisher output is added to "NoiseScree" plot #' NoiseScree(result=out_new2,matrix=mat,type="Added") #' } RowTest_Fisher <- function(result,matrix,p.adjust="BH",alpha=0.05,pattern=NULL){ # Accepts Biclust output # Takes output of workflow # Shows significant rows (which are new, which are gone,...) (only when not a workflow output) if(class(result)!="Biclust" & class(result)!="BiBitWorkflow"){stop("result needs to be of class 'Biclust' or 'BiBitWorkflow'")} if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} if(!(p.adjust %in% c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY","fdr", "none"))){stop("Incorrect p.adjust")} if(length(p.adjust)!=1){stop("p.adjust needs to be of length 1")} biclust_correctdim(result=result,matrix=matrix) workflow <- FALSE if(class(result)=="BiBitWorkflow"){ workflow_result <- result workflow <- TRUE cat("BiBitWorkflow Object: ",length(workflow_result$info$MergedColPatterns),"Patterns\n") NumberxCol <- do.call(rbind,lapply(workflow_result$info$MergedColPatterns,FUN=function(pattern){ temp <- logical(ncol(matrix)) temp[pattern] <- TRUE return(temp) })) result <- new("Biclust", Parameters=workflow_result$Biclust@Parameters, RowxNumber=matrix(FALSE,nrow=nrow(matrix),ncol=nrow(NumberxCol)), NumberxCol=NumberxCol, Number=nrow(NumberxCol), info=workflow_result$Biclust@info ) } # Select Patterns if(is.null(pattern)){pattern <- seq_len(result@Number)} if(any(pattern>result@Number)|any(pattern<=0)){stop("Incorrect pattern choice.")} # Apply Fisher fisher_output <- workflow_test_all_rows(result=result,matrix=matrix,p.adjust=p.adjust,pattern=pattern) if(workflow){ workflow_result$info$FisherResult <- fisher_output return(workflow_result) }else{ # If biclust input, we can compare with BC result fisher_info <- vector("list",length(pattern)) names(fisher_info) <- names(fisher_output) for(i.pat in seq_len(length(pattern))){ sign_rows <- which(fisher_output[[i.pat]]$Fisher_pvalue_adj<=alpha) BC_rows <- which(result@RowxNumber[,pattern[i.pat]]) NewRows <- (setdiff(sign_rows,BC_rows)) RemovedRows <- (setdiff(BC_rows,sign_rows)) SameRows <- intersect(BC_rows,sign_rows) fisher_info[[i.pat]] <- list(NewRows=length(NewRows), RemovedRows=length(RemovedRows), SameRows=length(SameRows), NewRows_index=NewRows, RemovedRows_index=RemovedRows, MaxNoise=max(apply(matrix[sign_rows,result@NumberxCol[pattern[i.pat],]],MARGIN=1,FUN=function(x){sum(x==0)}))) } result@info$FisherResult <- fisher_output result@info$FisherInfo <- fisher_info return(result) } } # NOTE: REMEMBER TO LINK EARLIER BACK TO THIS FUNCTION!!! (in noise_select) #' @title Noise Scree Plots #' @description Extract patterns from either a Biclust or BiBitWorkflow object (see Details) and plot the Noise Scree plot (same as plot 4 in \code{\link{BiBitWorkflow}}). Additionally, if \code{FisherResult} is available (from \code{\link{RowTest_Fisher}}), this info will be added to the plot. #' @param result A Biclust or BiBitWorkflow Object. #' @param matrix Accompanying binary data matrix which was used to obtain \code{result}. #' @param type Either \code{"Added"} or \code{"Total"}. Should the noise level be plotted against the number of added rows (at that noise level) or the total number of rows (up to that noise level)? #' @param pattern Numeric vector for which patterns the noise scree plot should be drawn (default = all patterns). #' @param noise_select Should an automatic noise selection be applied and drawn (blue vertical line) on the plot? (Using ad hoc method to find the elbow/kink in the Noise Scree plots) #' \itemize{ #' \item \code{noise_select=0}: No noise selection is applied and no line is drawn (default). #' \item \code{noise_select=1}: Using the Noise Scree plot (with 'Added Rows' on the y-axis), find the noise level where the current number of added rows at this noise level is larger than the mean of 'added rows' at the lower noise levels. #' After locating this noise level, lower the noise level by 1. This is your automatically selected elbow/kink and therefore your noise level. #' \item \code{noise_select=2}: Applies the same steps as for \code{noise_select=1}, but instead of decreasing the noise level by only 1, keep decreasing the noise level until the number of added rows isn't decreasing anymore either. #' } #' @param alpha If info from the Fisher Exact test is available, which significance level should be used to in the plot (Noise versus Significant Fisher Exact Test rows). (default=0.05) #' @details #' \describe{ #' \item{\emph{Biclust S4 Object}}{ #' #' Using the column patterns of the Biclust result, the noise level is plotted versus the number of \code{"Total"} or \code{"Added"} rows. #' } #' \item{\emph{BiBitWorkflow S3 Object}}{ #' #' The merged column patterns (after cutting the hierarchical tree) are extracted from the BiBitWorkflow object, namely the \code{$info$MergedColPatterns} slot. #' These patterns are used to plot the noise level versus the number of \code{"Total"} or \code{"Added"} rows. #' } #' } #' If information on the Fisher Exact Test is available, then this info will added to the plot (noise level versus significant rows). #' @author Ewoud De Troyer #' @export #' @return \code{NULL} #' @examples \dontrun{ #' ## Prepare some data ## #' set.seed(254) #' mat <- matrix(sample(c(0,1),5000*50,replace=TRUE,prob=c(1-0.15,0.15)), #' nrow=5000,ncol=50) #' mat[1:200,1:10] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[300:399,6:15] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat[400:599,21:30] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[700:799,29:38] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat <- mat[sample(1:5000,5000,replace=FALSE),sample(1:50,50,replace=FALSE)] #' #' ## Apply BiBitWorkflow ## #' out <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2,cut_type="number",cut_pm=4) #' # Make Noise Scree Plot - Default #' NoiseScree(result=out,matrix=mat,type="Added") #' NoiseScree(result=out,matrix=mat,type="Total") #' # Make Noise Scree Plot - Use Automatic Noies Selection #' NoiseScree(result=out,matrix=mat,type="Added",noise_select=2) #' NoiseScree(result=out,matrix=mat,type="Total",noise_select=2) #' #' ## Apply RowTest_Fisher on BiBitWorkflow Object ## #' out2 <- RowTest_Fisher(result=out,matrix=mat) #' # Fisher output is added to "NoiseScree" plot #' NoiseScree(result=out2,matrix=mat,type="Added") #' NoiseScree(result=out2,matrix=mat,type="Total") #' } NoiseScree <- function(result,matrix,type=c("Added","Total"),pattern=NULL,noise_select=0,alpha=0.05){ # accepts biclust output OR list of patterns OR give workflow a class # if Fisher info available added to plot (no noise select) # able to choose added or total # choose all or specific BC ## PARAMETER CHECKS ## if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(class(result)!="Biclust" & class(result)!="BiBitWorkflow"){stop("result needs to be of class 'Biclust' or 'BiBitWorkflow'")} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} if(!(type %in% c("Added","Total"))){stop("type needs to be \"Added\" or \"Total\"")} if(length(type)!=1){stop("type needs to be of length 1")} biclust_correctdim(result=result,matrix=matrix) if(class(result)=="BiBitWorkflow"){ workflow_result <- result cat("BiBitWorkflow Object: ",length(workflow_result$info$MergedColPatterns),"Patterns\n") NumberxCol <- do.call(rbind,lapply(workflow_result$info$MergedColPatterns,FUN=function(pattern){ temp <- logical(ncol(matrix)) temp[pattern] <- TRUE return(temp) })) result <- new("Biclust", Parameters=workflow_result$Biclust@Parameters, RowxNumber=matrix(FALSE,nrow=nrow(matrix),ncol=nrow(NumberxCol)), NumberxCol=NumberxCol, Number=nrow(NumberxCol), info=workflow_result$Biclust@info ) if("FisherResult" %in% names(workflow_result$info)){result@info$FisherResult <- workflow_result$info$FisherResult} } # Fisher result available? If so, see of which patterns fisher <- ifelse("FisherResult"%in%names(result@info),TRUE,FALSE) if(fisher){ fisher_patterns <- as.numeric(gsub("Pattern","",names(result@info$FisherResult))) } # Select Patterns if(is.null(pattern)){pattern <- seq_len(result@Number)} if(any(pattern>result@Number)){stop("Incorrect pattern choice.")} # Calculate NoisexNumber NoisexNumber <- apply(result@NumberxCol,MARGIN=1,FUN=function(pattern){ return(apply(matrix[,pattern],MARGIN=1,FUN=function(row){return(sum(row==0))})) }) # Calculate data_noisescree data_noisescree <- lapply(seq_len(ncol(NoisexNumber)),FUN=function(pattern){ tab <- table(NoisexNumber[,pattern]) return(data.frame(Noise=as.numeric(names(tab)),Total=as.numeric(tab))) }) # Automatic Noise choice # if(noise_select>0){ noise <- workflow_noisethreshold(noise=1,noise_select=noise_select,data_noisescree=data_noisescree) # } # Make Plot tempdim <- sqrt(length(pattern)) extradim <- ifelse((floor(tempdim)*ceiling(tempdim))<length(pattern),1,0) par(mfrow=c(floor(tempdim)+extradim,ceiling(tempdim))) patcols <- apply(result@NumberxCol,MARGIN=1,FUN=sum) for(i.pat in pattern){ temp_data <- data_noisescree[[i.pat]] if(type=="Total"){ temp_data$Total <- cumsum(temp_data$Total) ylab_temp <- "Total Number" }else{ ylab_temp <- "Added Number" } plot(temp_data$Noise,temp_data$Total,main=paste0("Pat",i.pat," (",patcols[i.pat]," cols)"),xlab="Noise",ylab=ylab_temp,pch=19) points(temp_data$Noise,temp_data$Total,type="l") text(temp_data$Noise,temp_data$Total,as.character(temp_data$Total),pos=3) # Add fisher line if available if(fisher){ if((i.pat %in% fisher_patterns)){ index <- which(i.pat==fisher_patterns) fishernoise <- (result@info$FisherResult[[index]]$Noise[result@info$FisherResult[[index]]$Fisher_pvalue_adj<=alpha]) fisherdata <- data.frame(Noise=temp_data$Noise,Total=sapply(temp_data$Noise,FUN=function(x){return(sum(fishernoise==x))})) if(type=="Total"){ fisherdata$Total <- cumsum(fisherdata$Total) } points(fisherdata$Noise,fisherdata$Total,pch=19,col="red") points(fisherdata$Noise,fisherdata$Total,type="l",col="red") text(fisherdata$Noise,fisherdata$Total,as.character(fisherdata$Total),col="red",pos=1,xpd=TRUE) legend("topleft",c("All","Fisher Sign."),bty="n",lty=1,col=c("black","red"),cex=0.8,seg.len=0.5) } } if(noise_select>0){ abline(v=noise[i.pat],col="blue") }else{ abline(v=ifelse((noise[i.pat]<1) & (noise[i.pat]>0),ceiling(noise[i.pat]*patcols[i.pat]),noise[i.pat]),col="blue") } } par(mfrow=c(1,1)) return(NULL) } # Help function which takes BiBitWorkflow result and reapplies it multiple times for different number of clusters, then plots row coverage # More clusters: more row coverage, smaller patterns ; Less clusters: less coverage, larger patterns (depending on initial bibit result) #' @title Row Coverage Plots #' @description Plotting function to be used with the \code{\link{BiBitWorkflow}} output. It plots the number of clusters (of the hierarchical tree) versus the number/percentage of row coverage and number of final biclusters (see Details for more information). #' @param result A BiBitWorkflow Object. #' @param matrix Accompanying binary data matrix which was used to obtain \code{result}. #' @param maxCluster Maximum number of clusters to cut the tree at (default=20). #' @param noise The allowed noise level when growing the rows on the merged patterns after cutting the tree. (default=\code{0.1}, namely allow 10\% noise.) #' \itemize{ #' \item \code{noise=0}: No noise allowed. #' \item \code{0<noise<1}: The \code{noise} parameter will be a noise percentage. The number of allowed 0's in a row in the bicluster will depend on the column size of the bicluster. #' More specifically \code{zeros_allowed = ceiling(noise * columnsize)}. For example for \code{noise=0.10} and a bicluster column size of \code{5}, the number of allowed 0's would be \code{1}. #' \item \code{noise>=1}: The \code{noise} parameter will be the number of allowed 0's in a row in the bicluster independent from the column size of the bicluster. In this noise option, the noise parameter should be an integer. #' } #' @param noise_select Should the allowed noise level be automatically selected for each pattern? (Using ad hoc method to find the elbow/kink in the Noise Scree plots) #' \itemize{ #' \item \code{noise_select=0}: Do \emph{NOT} automatically select the noise levels. Use the the noise level given in the \code{noise} parameter (default) #' \item \code{noise_select=1}: Using the Noise Scree plot (with 'Added Rows' on the y-axis), find the noise level where the current number of added rows at this noise level is larger than the mean of 'added rows' at the lower noise levels. #' After locating this noise level, lower the noise level by 1. This is your automatically selected elbow/kink and therefore your noise level. #' \item \code{noise_select=2}: Applies the same steps as for \code{noise_select=1}, but instead of decreasing the noise level by only 1, keep decreasing the noise level until the number of added rows isn't decreasing anymore either. #' } #' @param plots Vector for which plots to draw: #' \enumerate{ #' \item Number of Clusters versus Row Coverage Percentage #' \item Number of Clusters versus Number of Row Coverage #' \item Number of Clusters versus Final Number of Biclusters #' } #' @param verbose Logical value if the progress bar of merging/growing the biclusters should be shown. (default=\code{TRUE}) #' @param plot.type Output Type #' \itemize{ #' \item \code{"device"}: All plots are outputted to new R graphics devices (default). #' \item \code{"file"}: All plots are saved in external files. Plots are joint together in a single \code{.pdf} file. #' \item \code{"other"}: All plots are outputted to the current graphics device, but will overwrite each other. Use this if you want to include one or more plots in a sweave/knitr file or if you want to export a single plot by your own chosen format. #' } #' @param filename Base filename (with/without directory) for the plots if \code{plot.type="file"} (default=\code{"RowCoverage"}). #' @details The graph of number of chosen tree clusters versus the final row coverage can help you to make a decision on how many clusters to choose in the hierarchical tree. #' The more clusters you choose, the smaller (albeit more similar) the patterns are and the more rows will fit your patterns (i.e. more row coverage). #' @author Ewoud De Troyer #' @export #' @return A data frame containing the number of clusters and the corresponding number of row coverage, percentage of row coverage and the number of final biclusters. #' @examples \dontrun{ #' ## Prepare some data ## #' set.seed(254) #' mat <- matrix(sample(c(0,1),5000*50,replace=TRUE,prob=c(1-0.15,0.15)), #' nrow=5000,ncol=50) #' mat[1:200,1:10] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[300:399,6:15] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat[400:599,21:30] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[700:799,29:38] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat <- mat[sample(1:5000,5000,replace=FALSE),sample(1:50,50,replace=FALSE)] #' #' ## Apply BiBitWorkflow ## #' out <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.2,cut_type="number",cut_pm=10) #' # Make ClusterRowCoverage Plots #' ClusterRowCoverage(result=out,matrix=mat,maxCluster=20,noise=0.2) #' } ClusterRowCoverage <- function(result,matrix,maxCluster=20, noise=0.1,noise_select = 0, plots=c(1:3), verbose=TRUE, plot.type="device",filename="RowCoverage"){ if(!all(plots%in%c(1:3))){stop("plots should be part of c(1,2,3)")} if(length(plot.type)!=1){stop("plot.type should be of length 1",call.=FALSE)} if(!(plot.type %in% c("device","file","other"))){stop("plot.type should be 'device', 'file' or 'other'",call.=FALSE)} FIRSTPLOT <- TRUE ## PARAMETER CHECKS ## if(class(result)!="BiBitWorkflow"){stop("result needs to be of class 'BiBitWorkflow'")} if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} biclust_correctdim(result=result,matrix=matrix) nrow <- length(2:maxCluster) cov_df <- data.frame(clusters=rep(NA,nrow),NumberRows=rep(NA,nrow),RowPerc=rep(NA,nrow),FinalBC=rep(NA,nrow)) if(verbose){ cat("Merging clusters and growing rows:\n") pb <- txtProgressBar(min=0,max=sum(1:nrow),initial=0,style=3) } for(i in 1:nrow){ # cat(i,"\n") # temp <- capture.output({ # out_temp <- BiBitWorkflow(matrix=matrix, # cut_type="number",cut_pm=i+1, # noise=noise,noise_select = noise_select, # plots=c(), # BCresult = result$info$BiclustInitial, # simmatresult = result$info$BiclustSimInitial, # treeresult = result$info$Tree) # # }) # cov_df[i,] <- c(i+1,out_temp$info$Coverage$RowCoverage,out_temp$Biclust@Number) temp <- capture.output({ result2 <- workflow_mergeBC(result=result$info$BiclustInitial,tree=result$info$Tree,JI=NULL,number=i+1) BC.Merge <- result2@info$BC.Merge MergedColPatterns <- lapply(as.list(seq_len(result2@Number)),FUN=function(pattern){ return(which(result2@NumberxCol[pattern,])) }) names(MergedColPatterns) <- paste0("Pat",seq_len(result2@Number)) NoisexNumber <- apply(result2@NumberxCol,MARGIN=1,FUN=function(pattern){ return(apply(matrix[,pattern],MARGIN=1,FUN=function(row){return(sum(row==0))})) }) data_noisescree <- lapply(seq_len(ncol(NoisexNumber)),FUN=function(pattern){ tab <- table(NoisexNumber[,pattern]) return(data.frame(Noise=as.numeric(names(tab)),Total=as.numeric(tab))) }) noise_threshold <- workflow_noisethreshold(noise,noise_select,data_noisescree) result2@info$Noise.Threshold <- noise_threshold result3 <- workflow_UpdateBiclust_RowNoise(result=result2,matrix=matrix, noise=result2@info$Noise.Threshold,removeBC=TRUE,NoisexNumber=NoisexNumber) }) BC_rownames <- unlist(apply(result3@RowxNumber,MARGIN=2,FUN=function(x){return(rownames(matrix)[x])})) nBC_rownames <- length(unique(BC_rownames)) coverage <- c(NumberRows=round(nBC_rownames,0),RowPerc=round((nBC_rownames/nrow(matrix))*100,2)) cov_df[i,] <- c(i+1,coverage,result3@Number) if(verbose){ setTxtProgressBar(pb,sum(1:i)) } } if(verbose){ close(pb) } if(1 %in% plots){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } plot(cov_df$clusters,cov_df$RowPerc,main="Clusters vs Row Coverage Perc.",xlab="n clusters",ylab="Row CovPerc",pch=19) points(cov_df$clusters,cov_df$RowPerc,type="l") text(cov_df$clusters,cov_df$RowPerc,as.character(round(cov_df$RowPerc,2)),pos=3) } if(2 %in% plots){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } plot(cov_df$clusters,cov_df$NumberRows,main="Clusters vs Total Number Rows",xlab="n clusters",ylab="Total Number Rows",pch=19) points(cov_df$clusters,cov_df$NumberRows,type="l") text(cov_df$clusters,cov_df$NumberRows,as.character(cov_df$NumberRows),pos=3) } if(3 %in% plots){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } plot(cov_df$clusters,cov_df$FinalBC,main="Clusters vs Final Number BC",xlab="n clusters",ylab="Final BC",pch=19) points(cov_df$clusters,cov_df$FinalBC,type="l") text(cov_df$clusters,cov_df$FinalBC,as.character(cov_df$FinalBC),pos=3) } if(plot.type=="file" & length(plots)>0){ dev.off() } return(cov_df) } #' @title Compare Biclustering Results using Jaccard Index #' @description Creates a heatmap and returns a similarity matrix of the Jaccard Index (Row, Column or both dimensions) in order to compare 2 different biclustering results or compare the biclusters of a single result. #' @details The Jaccard Index between two biclusters is calculated as following: #' \deqn{JI(BC1,BC2) = \frac{(m_1+m_2-m_{12})}{m_{12}}} #' in which #' \itemize{ #' \item \code{type="row"} or \code{type="col"} #' \itemize{ #' \item \eqn{m_1=} Number of rows/columns of BC1 #' \item \eqn{m_2=} Number of rows/columns of BC2 #' \item \eqn{m_{12}=} Number of rows/columns of union of row/column membership of BC1 and BC2 #' } #' \item \code{type="both"} #' \itemize{ #' \item \eqn{m_1=} Size of BC1 (rows times columns) #' \item \eqn{m_2=} Size of BC2 (rows times columns) #' \item \eqn{m_{12}= m_1+m_2 -} size of overlapping BC of BC1 and BC2 #' } #' } #' @param BCresult1 A S4 Biclust object. If only this input Biclust object is given, the biclusters of this single result will be compared. #' @param BCresult2 A second S4 Biclust object to which \code{BCresult1} should be compared. (default=\code{NULL}) #' @param type Of which dimension should the Jaccard Index be computed? Can be \code{"row"}, \code{"col"} or \code{"both"} (default). #' @param plot Logical value if plot should be outputted (default=\code{TRUE}). #' @author Ewoud De Troyer #' @export #' @return A list containing #' \itemize{ #' \item \code{SimMat}: The JI Similarity Matrix between the compared biclusters. #' \item \code{MaxSim}: A list containing the maximum values on each row (\code{BCResult1}) and each column (\code{BCResult2}). #' } #' @examples #' \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' #' # Result 1 #' result1 <- bibit(data,minr=5,minc=5) #' result1 #' #' # Result 2 #' result2 <- bibit(data,minr=2,minc=2) #' result2 #' #' ## Compare all BC's of Result 1 ## #' Sim1 <- CompareResultJI(BCresult1=result1,type="both") #' Sim1$SimMat #' #' ## Compare BC's of Result 1 and 2 ## #' Sim12 <- CompareResultJI(BCresult1=result1,BCresult2=result2,type="both",plot=FALSE) #' str(Sim12) #' } CompareResultJI <- function(BCresult1,BCresult2=NULL,type="both",plot=TRUE){ if(class(BCresult1)!="Biclust" & class(BCresult1)!="iBBiG"){stop("BCresult1 is not a Biclust object")} if(!is.null(BCresult2)){ if(class(BCresult2)!="Biclust" & class(BCresult2)!="iBBiG"){stop("BCresult2 is not a Biclust object")} name_temp <- "Result2_BC" }else{ BCresult2 <- BCresult1 name_temp <- "Result1_BC" } if(length(type)>1){stop("type can only have 1 argument")} if(!(type%in%c("both","row","col"))){stop("type incorrect")} simmat <- matrix(0,nrow=BCresult1@Number,ncol=BCresult2@Number,dimnames=list(paste0("Result1_BC",1:BCresult1@Number),paste0(name_temp,1:BCresult2@Number))) if(type=="both"){ main.temp <- "JI" for(i in 1:nrow(simmat)){ for(j in 1:ncol(simmat)){ row_contain_temp <- sum(which(BCresult1@RowxNumber[,i])%in%which(BCresult2@RowxNumber[,j])) col_contain_temp <- sum(which(BCresult1@NumberxCol[i,])%in%which(BCresult2@NumberxCol[j,])) m1 <- sum(BCresult1@RowxNumber[,i])*sum(BCresult1@NumberxCol[i,]) m2 <- sum(BCresult2@RowxNumber[,j])*sum(BCresult2@NumberxCol[j,]) m12 <- m1+m2-row_contain_temp*col_contain_temp simmat[i,j] <- (m1+m2-(m12))/(m12) } } }else if(type=="row"){ main.temp <- "Row" for(i in 1:nrow(simmat)){ for(j in 1:ncol(simmat)){ x1 <- BCresult1@RowxNumber[,i] x2 <- BCresult2@RowxNumber[,j] m1 <- sum(x1) m2 <- sum(x2) m12 <- sum(as.logical(x1+x2)) simmat[i,j] <- (m1+m2-m12)/m12 } } }else if(type=="col"){ main.temp <- "Col" for(i in 1:nrow(simmat)){ for(j in 1:ncol(simmat)){ x1 <- BCresult1@NumberxCol[i,] x2 <- BCresult2@NumberxCol[j,] m1 <- sum(x1) m2 <- sum(x2) m12 <- sum(as.logical(x1+x2)) simmat[i,j] <- (m1+m2-m12)/m12 } } } MaxSim1 <- apply(simmat,MARGIN=1,FUN=max) MaxSim2 <- apply(simmat,MARGIN=2,FUN=max) simmat_temp <- simmat rownames(simmat_temp) <- paste0(rownames(simmat_temp)," (",as.character(round(MaxSim1,2)),")") colnames(simmat_temp) <- paste0(colnames(simmat_temp)," (",as.character(round(MaxSim2,2)),")") if(plot){ # image(t(simmat),col=viridis(256),main=paste0("Comparison ",main.temp," Matrix"),axes=FALSE) # axis(1,at=seq(0,1,length.out=ncol(simmat)),labels=colnames(simmat),tick=FALSE,las=2,cex.axis=0.5) # axis(2,at=seq(0,1,length.out=nrow(simmat)),labels=rownames(simmat),tick=FALSE,las=2,cex.axis=0.5) # axis(3,at=seq(0,1,length.out=ncol(simmat)),labels=as.character(round(MaxSim2,2)),tick=FALSE,cex.axis=0.6) # axis(4,at=seq(0,1,length.out=nrow(simmat)),labels=as.character(round(MaxSim1,2)),tick=FALSE,las=2,cex.axis=0.6) print(levelplot(t(simmat_temp),col.regions=viridis(256),main=paste0("Comparison ",main.temp," Matrix"),ylab="",xlab="",scales=list(x=list(rot=90),tck = c(1,0)))) } return(list(SimMat=simmat,MaxSim=list(BCResult1=MaxSim1,BCResult2=MaxSim2))) } #' @title Update a Biclust or BiBitWorkflow Object with a new Noise Level #' @description Apply a new noise level on a Biclust object result or BiBitWorkflow result. See Details on how both objects are affected. #' @details #' \describe{ #' \item{\emph{Biclust S4 Object}}{ #' Using the column patterns of the Biclust result, new grows are grown using the inputted \code{noise} level. #' The \code{removeBC} parameter decides if duplicate and non-maximal BC's should be deleted. Afterwards a new \code{Biclust} S4 object is returned with the new biclusters. #' } #' \item{\emph{BiBitWorkflow S3 Object}}{ #' The merged column patterns (after cutting the hierarchical tree) are extracted from the BiBitWorkflow object, namely the \code{$info$MergedColPatterns} slot. #' Afterwards, using the new \code{noise} level, new rows are grown and the returned object is an updated \code{BiBitWorkflow} object. (e.g. The final Biclust slot, MergedNoiseThresholds, coverage,etc. are updated) #' } #' } #' @param result A Biclust or BiBitWorkflow Object. #' @param matrix Accompanying binary data matrix which was used to obtain \code{result}. #' @param noise The new noise level which should be used in the rows of the biclusters. (default=\code{0.1}, namely allow 10\% noise.). #' \itemize{ #' \item \code{noise=0}: No noise allowed. #' \item \code{0<noise<1}: The \code{noise} parameter will be a noise percentage. The number of allowed 0's in a row in the bicluster will depend on the column size of the bicluster. #' More specifically \code{zeros_allowed = ceiling(noise * columnsize)}. For example for \code{noise=0.10} and a bicluster column size of \code{5}, the number of allowed 0's would be \code{1}. #' \item \code{noise>=1}: The \code{noise} parameter will be the number of allowed 0's in a row in the bicluster independent from the column size of the bicluster. In this noise option, the noise parameter should be an integer. #' } #' #' #' #' @param noise_select Should the allowed noise level be automatically selected for each pattern? (Using ad hoc method to find the elbow/kink in the Noise Scree plots) #' \itemize{ #' \item \code{noise_select=0}: Do \emph{NOT} automatically select the noise levels. Use the the noise level given in the \code{noise} parameter (default) #' \item \code{noise_select=1}: Using the Noise Scree plot (with 'Added Rows' on the y-axis), find the noise level where the current number of added rows at this noise level is larger than the mean of 'added rows' at the lower noise levels. #' After locating this noise level, lower the noise level by 1. This is your automatically selected elbow/kink and therefore your noise level. #' \item \code{noise_select=2}: Applies the same steps as for \code{noise_select=1}, but instead of decreasing the noise level by only 1, keep decreasing the noise level until the number of added rows isn't decreasing anymore either. #' } #' @param removeBC \emph{(Only applicable when result is a Biclust object)} Logical value if after applying a new noise level, duplicate and non-maximal BC's should be deleted. #' @author Ewoud De Troyer #' @export #' @return A \code{Biclust} or \code{BiBitWorkflow} Object (See Details) #' @examples #' \dontrun{ #' ## Prepare some data ## #' set.seed(254) #' mat <- matrix(sample(c(0,1),5000*50,replace=TRUE,prob=c(1-0.15,0.15)), #' nrow=5000,ncol=50) #' mat[1:200,1:10] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[300:399,6:15] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat[400:599,21:30] <- matrix(sample(c(0,1),200*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=200,ncol=10) #' mat[700:799,29:38] <- matrix(sample(c(0,1),100*10,replace=TRUE,prob=c(1-0.9,0.9)), #' nrow=100,ncol=10) #' mat <- mat[sample(1:5000,5000,replace=FALSE),sample(1:50,50,replace=FALSE)] #' #' ## Apply BiBitWorkflow ## #' out <- BiBitWorkflow(matrix=mat,minr=50,minc=5,noise=0.1,cut_type="number",cut_pm=4) #' summary(out$Biclust) #' #' ## Update Rows with new noise level on Biclust Obect -> returns Biclust Object ## #' out_new <- UpdateBiclust_RowNoise(result=out$Biclust,matrix=mat,noise=0.3) #' summary(out_new) #' out_new@info$Noise.Threshold # New Noise Levels #' #' ## Update Rows with new noise level on BiBitWorkflow Obect -> returns BiBitWorkflow Object ## #' out_new2 <- UpdateBiclust_RowNoise(result=out,matrix=mat,noise=0.2) #' summary(out_new2$Biclust) #' out_new2$info$MergedNoiseThresholds # New Noise Levels #' } UpdateBiclust_RowNoise <- function(result,matrix, noise=0.1,noise_select=0,removeBC=FALSE){ ## PARAMETER CHECKS ## if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(any(noise<0)){stop("noise parameter can not be negative",call.=FALSE)} if(class(result)!="Biclust" & class(result)!="BiBitWorkflow"){stop("result needs to be of class 'Biclust' or 'BiBitWorkflow'")} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} biclust_correctdim(result=result,matrix=matrix) workflow <- FALSE if(class(result)=="BiBitWorkflow"){ workflow <- TRUE workflow_result <- result cat("BiBitWorkflow Object: ",length(workflow_result$info$MergedColPatterns),"Patterns\n") NumberxCol <- do.call(rbind,lapply(workflow_result$info$MergedColPatterns,FUN=function(pattern){ temp <- logical(ncol(matrix)) temp[pattern] <- TRUE return(temp) })) result <- new("Biclust", Parameters=workflow_result$Biclust@Parameters, RowxNumber=matrix(FALSE,nrow=nrow(matrix),ncol=nrow(NumberxCol)), NumberxCol=NumberxCol, Number=nrow(NumberxCol), info=workflow_result$Biclust@info ) # Same parameters, but add noise! } if(length(noise)==1){noise <- rep(noise,result@Number)}else if(length(noise)!=result@Number){stop("length of noise should be 1 or equal to the number of BC's")} # Prep to know which patterns are removed due to no fitting rows/duplicates/containedwithin all_patterns <- seq_len(result@Number) current_patterns <- all_patterns # Transform the percentage noise values into integers perc_index <- which(noise<1 & noise>0) if(length(perc_index)>0){ coldim <- apply(result@NumberxCol[perc_index,,drop=FALSE],MARGIN=1,FUN=sum) noise[perc_index] <- ceiling(noise*coldim) } # Calculate NoisexNumber NoisexNumber <- apply(result@NumberxCol,MARGIN=1,FUN=function(pattern){ return(apply(matrix[,pattern],MARGIN=1,FUN=function(row){return(sum(row==0))})) }) # Select rows and adapt Biclust object RowxNumber <- do.call(cbind,lapply(as.list(seq_len(result@Number)),FUN=function(pattern){ return(NoisexNumber[,pattern]<=noise[pattern]) })) rownames(RowxNumber) <- NULL result@RowxNumber <- RowxNumber if(noise_select>0){ # Calculate data_noisescree data_noisescree <- lapply(seq_len(ncol(NoisexNumber)),FUN=function(pattern){ tab <- table(NoisexNumber[,pattern]) return(data.frame(Noise=as.numeric(names(tab)),Total=as.numeric(tab))) }) noise <- workflow_noisethreshold(noise=noise[1],noise_select=noise_select,data_noisescree=data_noisescree) } result@info$Noise.Threshold <- noise # Check for no-row BC BC.delete <- apply(result@RowxNumber,MARGIN=2,FUN=sum)==0 if(sum(BC.delete)>0){ index <- which(BC.delete) result@Number <- result@Number - length(index) result@RowxNumber <- result@RowxNumber[,-index,drop=FALSE] result@NumberxCol <- result@NumberxCol[-index,,drop=FALSE] result@info$Noise.Threshold <- result@info$Noise.Threshold[-index] current_patterns <- current_patterns[-index] cat("Number of biclusters deleted due to no rows fitting the pattern with allowed noise:",length(index),"\n") } # Save current patterns temporarily in the result (this vector might decrease) result@info$current_patterns <- current_patterns if(removeBC){ result <- workflow_duplicate_BC(result) } if(removeBC | (sum(BC.delete)>0)){ cat("Total final BC's:",result@Number,"\n") } # Saving deleted patterns and remove temporary current patterns result@info$Deleted.Patterns <- setdiff(all_patterns,result@info$current_patterns) result@info$current_patterns <- NULL # Different output depending on Biclust of BiBitWorkflow input if(workflow){ # Some elements of the BiBitWorkflow need to be updated workflow_result$Biclust <- result workflow_result$info$Number["FinalNumber"] <- result@Number workflow_result$info$MergedNoiseThresholds <- noise ## Compute row coverage ## BC_rownames <- unlist(apply(result@RowxNumber,MARGIN=2,FUN=function(x){return(rownames(matrix)[x])})) nBC_rownames <- length(unique(BC_rownames)) coverage <- c(NumberRows=round(nBC_rownames,0),RowPerc=round((nBC_rownames/nrow(matrix))*100,2)) coverage_table <- (table(table(BC_rownames))) names(coverage_table) <- paste0("TimesRowsUsed ",names(coverage_table)) workflow_result$info$Coverage <- list(RowCoverage=coverage,RowCoverageTable=coverage_table) return(workflow_result) }else{ return(result) } } workflow_UpdateBiclust_RowNoise <- function(result,matrix, noise,removeBC=FALSE,NoisexNumber,verbose=TRUE){ ## PARAMETER CHECKS ## if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(length(noise)==1){noise <- rep(noise,result@Number)}else if(length(noise)!=result@Number){stop("length of noise should be 1 or equal to the number of BC's")} if(any(noise<0)){stop("noise parameter can not be negative",call.=FALSE)} if(class(result)!="Biclust"){stop("result needs to be of class 'Biclust'")} # Prep to know which patterns are removed due to no fitting rows/duplicates/containedwithin all_patterns <- seq_len(result@Number) current_patterns <- all_patterns # Transform the percentage noise values into integers perc_index <- which(noise<1 & noise>0) if(length(perc_index)>0){ coldim <- apply(result@NumberxCol[perc_index,,drop=FALSE],MARGIN=1,FUN=sum) noise[perc_index] <- ceiling(noise*coldim) } # Select rows and adapt Biclust object RowxNumber <- do.call(cbind,lapply(as.list(seq_len(result@Number)),FUN=function(pattern){ return(NoisexNumber[,pattern]<=noise[pattern]) })) rownames(RowxNumber) <- NULL result@RowxNumber <- RowxNumber # Check for no-row BC BC.delete <- apply(result@RowxNumber,MARGIN=2,FUN=sum)==0 if(sum(BC.delete)>0){ index <- which(BC.delete) result@Number <- result@Number - length(index) result@RowxNumber <- result@RowxNumber[,-index,drop=FALSE] result@NumberxCol <- result@NumberxCol[-index,,drop=FALSE] result@info$Noise.Threshold <- result@info$Noise.Threshold[-index] current_patterns <- current_patterns[-index] if(verbose){cat("Number of biclusters deleted due to no rows fitting the pattern with allowed noise:",length(index),"\n")} } # Save current patterns temporarily in the result (this vector might decrease) result@info$current_patterns <- current_patterns if(removeBC & result@Number>1){ result <- workflow_duplicate_BC(result,verbose=verbose) } if(removeBC | (sum(BC.delete)>0)){ if(verbose){cat("Total final BC's:",result@Number,"\n")} } # Saving deleted patterns and remove temporary current patterns result@info$Deleted.Patterns <- setdiff(all_patterns,result@info$current_patterns) result@info$current_patterns <- NULL return(result) } workflow_simmat <- function(result,type="both",verbose=TRUE){ mat_placeholder <- matrix(0,nrow=result@Number,ncol=result@Number) total_number <- (nrow(mat_placeholder)-1)*nrow(mat_placeholder)/2 if(verbose){ current_number <- 0 pb <- txtProgressBar(min=0,max=((nrow(mat_placeholder)-1)*(ncol(mat_placeholder))/2),initial=0,style=3) } for(i in 1:(nrow(mat_placeholder)-1)){ for(j in (i+1):(ncol(mat_placeholder))){ mat_placeholder[i,j] <- workflow_jaccard_bc(result,i,j,type=type) if(verbose){ current_number <- current_number+1 setTxtProgressBar(pb,value=current_number) } } } if(verbose){close(pb)} mat_placeholder <- mat_placeholder+t(mat_placeholder) diag(mat_placeholder) <- 1 return(mat_placeholder) } workflow_mergeBC <- function(result,tree,JI=NULL,number=NULL,verbose=TRUE){ cut <- cutree(tree,h=(1-JI),k=number) new_number <- length(unique(cut)) if(verbose){ cat("Number of BC before merge:",result@Number,"\n") cat("Number of BC after merge:",new_number,"\n") cat("Merging...") } out <- new("Biclust",Parameters=result@Parameters, RowxNumber=matrix(FALSE,nrow=nrow(result@RowxNumber),ncol=new_number), NumberxCol=matrix(FALSE,nrow=new_number,ncol=ncol(result@NumberxCol)), Number=new_number, info=result@info) out@info$BC.Merge <- vector("list",new_number) for(i in sort(unique(cut))){ BC.index <- which(i==cut) out@info$BC.Merge[[i]] <- BC.index out@RowxNumber[,i] <- as.logical(rowSums(result@RowxNumber[,BC.index,drop=FALSE])) out@NumberxCol[i,] <- as.logical(colSums(result@NumberxCol[BC.index,,drop=FALSE])) } if(verbose){cat("DONE\n")} return(out) } workflow_noisethreshold <- function(noise,noise_select,data_noisescree){ if(noise_select!=0){ ### Find where it starts increasing too much # 1. Find NOISE where ADD goes over mean (excluding mean of only first) # 2. Go back to NOISE before first increase now noise_threshold <- unlist(lapply(data_noisescree,FUN=function(data_temp){ stop1 <- which(cumsum(data_temp$Total)/c(1:nrow(data_temp))<data_temp$Total) stop1 <- stop1[!stop1==2][1] if(all(data_temp$Total[1:(stop1-1)]==0)){ return(data_temp$Noise[stop1]) } else if(noise_select==1){ return(data_temp$Noise[stop1-1]) }else if(noise_select==2){ while((data_temp$Total[stop1] - data_temp$Total[stop1-1])>0){ stop1 <- stop1-1 if(stop1==1){break} } return(data_temp$Noise[stop1]) } })) }else{ noise_threshold <- rep(noise,length(data_noisescree)) } return(noise_threshold) } # workflow_patternstep <- function(result,matrix,noise,noise_select,NoisexNumber,data_noisescree){ # # # # # #### ALTERNATIVE WAY: USING PATTERNBIBIT -> BUT NOT INTERESTED IN SUBPATTERNS SO OVERKILL! # # # # pattern_matrix <- result@NumberxCol+0 # # # # out <- BiBitR:::bibit3_alt(matrix,noise=noise,pattern_matrix=pattern_matrix,subpattern = FALSE) # # # # # # out2 <- new("Biclust",Parameters=result@Parameters, # # RowxNumber=do.call(cbind,lapply(out[1:result@Number],FUN=function(x){x$FullPattern@RowxNumber})), # # NumberxCol=do.call(rbind,lapply(out[1:result@Number],FUN=function(x){x$FullPattern@NumberxCol})), # # Number=result@Number, # # info=result@info) # # # # BC.delete <- rep(FALSE,out2@Number) # # for(i in 1:out2@Number){ # # if(sum(out2@RowxNumber[,i])==0 & sum(out2@NumberxCol[i,])==0){BC.delete[i] <- TRUE} # # } # # if(sum(BC.delete)>0){ # # index <- which(BC.delete) # # out2@Number <- out2@Number - length(index) # # out2@RowxNumber <- out2@RowxNumber[,-index,drop=FALSE] # # out2@NumberxCol <- out2@NumberxCol[-index,,drop=FALSE] # # out2@info$BC.Merge <- out2@info$BC.Merge[-index] # # cat("Number of biclusters deleted due to no rows fitting the pattern:",length(index),"\n") # # } # # # # out2 <- workflow_duplicate_BC(out2) # # cat("Total final BC's:",out2@Number,"\n") # # # # return(out2) # } workflow_duplicate_BC <- function(result,verbose=TRUE){ # cat("Checking for Duplicate Biclusters... ") # In order to quickly delete duplicates, BC row and column memberships are encoded to 16bit words first nrow_data <- nrow(result@RowxNumber) ncol_data <- ncol(result@NumberxCol) nblockscol <- ceiling(ncol_data/16) nblocksrow <- ceiling(nrow_data/16) decBC_mat <- matrix(NA,nrow=result@Number,ncol=nblocksrow+nblockscol,dimnames=list(colnames(result@RowxNumber),NULL)) temp <- 1:nrow_data rowchunks <- split(temp,ceiling(seq_along(temp)/16)) temp <- 1:ncol_data colchuncks <- split(temp,ceiling(seq_along(temp)/16)) for(i.decBC in 1:result@Number){ for(i.rowblock in 1:nblocksrow){ decBC_mat[i.decBC,i.rowblock] <- strtoi(paste0(result@RowxNumber[rowchunks[[i.rowblock]],i.decBC]+0,collapse=""),2) } for(i.colblock in 1:nblockscol){ matindex <- i.colblock+nblocksrow decBC_mat[i.decBC,matindex] <- strtoi(paste0(result@NumberxCol[i.decBC,colchuncks[[i.colblock]]]+0,collapse=""),2) } } # Change order to original BC's appear first dup_temp <- duplicated(decBC_mat,MARGIN=1) dup_index <- which(dup_temp) if(length(dup_index)>0){ result@RowxNumber <- result@RowxNumber[,-dup_index,drop=FALSE] result@NumberxCol <- result@NumberxCol[-dup_index,,drop=FALSE] result@Number <- nrow(result@NumberxCol) result@info$Noise.Threshold <- result@info$Noise.Threshold[-dup_index] result@info$current_patterns <- result@info$current_patterns[-dup_index] decBC_mat <- decBC_mat[-dup_index,] } # cat("DONE\n") if(verbose){cat("Number of duplicate BC's deleted:",sum(dup_temp),"\n")} ######################## Check for contained if(verbose){cat("Number of contained within BC's deleted:")} contained_vector <- rep(NA,nrow(decBC_mat)) # note: go through all, but skip if current i.decBC or j.decBC is already in contained_vector # pb <- txtProgressBar(min=1,max=(nrow(decBC_mat)-1),initial=1,style=3) for(i.decBC in 1:(nrow(decBC_mat)-1)){ ## Progress dots # progress_dots(i=i.decBC,nBC=nrow(decBC_mat)-1) # setTxtProgressBar(pb,i.decBC) # if(!(i.decBC%in%contained_vector)){ for(j.decBC in (i.decBC+1):(nrow(decBC_mat))){ if(!(j.decBC%in%contained_vector)){ current_comp <- c(i.decBC,j.decBC) contained <- workflow_BCcontained(decBC_mat[i.decBC,],decBC_mat[j.decBC,]) if(!is.null(contained)){ contained_vector[current_comp[contained]] <- current_comp[contained] } } } } # cat(length(contained_vector[!is.na(contained_vector)]),"\n") } # close(pb) contained_vector <- contained_vector[!is.na(contained_vector)] if(length(contained_vector)>0){ contained_index <- contained_vector result@RowxNumber <- result@RowxNumber[,-contained_index] result@NumberxCol <- result@NumberxCol[-contained_index,] result@Number <- nrow(result@NumberxCol) result@info$Noise.Threshold <- result@info$Noise.Threshold[-contained_index] result@info$current_patterns <- result@info$current_patterns[-contained_index] } # cat("DONE\n") if(verbose){cat(length(contained_vector),"\n\n")} return(result) } workflow_BCcontained <- function(BC1word,BC2word){ intersectword <- apply(rbind(BC1word,BC2word),MARGIN=2,FUN=function(x){bitwAnd(x[1],x[2])}) if(all(intersectword==BC1word)){return(1)} if(all(intersectword==BC2word)){return(2)} } workflow_jaccard_bc <- function(result,BC1,BC2,type="both"){ if(length(type)>1){stop("type can only have 1 argument")} if(!(type%in%c("both","row","col"))){stop("type incorrect")} if(type=="both"){ row_contain_temp <- sum(which(result@RowxNumber[,BC1])%in%which(result@RowxNumber[,BC2])) col_contain_temp <- sum(which(result@NumberxCol[BC1,])%in%which(result@NumberxCol[BC2,])) m1 <- sum(result@RowxNumber[,BC1])*sum(result@NumberxCol[BC1,]) m2 <- sum(result@RowxNumber[,BC2])*sum(result@NumberxCol[BC2,]) m12 <- m1+m2-row_contain_temp*col_contain_temp JI <- (m1+m2-(m12))/(m12) return(JI) }else{ if(type=="row"){ x1 <- result@RowxNumber[,BC1] x2 <- result@RowxNumber[,BC2] } if(type=="col"){ x1 <- result@NumberxCol[BC1,] x2 <- result@NumberxCol[BC2,] } m1 <- sum(x1) m2 <- sum(x2) m12 <- sum(as.logical(x1+x2)) JI <- (m1+m2-m12)/m12 return(JI) } } workflow_test_all_rows <- function(result,matrix,p.adjust="BH",pattern=NULL){ cat("Applying Fisher Test to all rows...\n\n") out <- vector("list",length(pattern)) names(out) <- paste0("Pattern",pattern) # # Extract all column indices which are involved in patterns # columns_ignore <- unique(do.call(c,lapply(split(result@NumberxCol,seq(result@Number)),FUN=which))) # for(i.pattern in seq_len(length(pattern))){ cat("Pattern",pattern[i.pattern],"\n") out_df <- apply(matrix,MARGIN=1,FUN=function(row){ inside <- row[result@NumberxCol[pattern[i.pattern],]] outside <- row[!result@NumberxCol[pattern[i.pattern],]] out_df_temp <- data.frame( Noise=sum(inside==0), InsidePerc1=sum(inside==1)/length(inside), OutsidePerc1=sum(outside==1)/length(outside), # Prop_pvalue=prop.test(matrix(c(sum(inside==1),sum(inside==0), # sum(outside==1),sum(outside==0)),nrow=2,ncol=2,byrow=TRUE), # alternative="greater")$p.value, Fisher_pvalue=fisher.test(matrix(c(sum(inside==1),sum(outside==1), sum(inside==0),sum(outside==0)),nrow=2,ncol=2,byrow=TRUE))$p.value ) return(out_df_temp) }) out_df <- do.call(rbind,out_df) out_df <- cbind(data.frame(Names=rownames(matrix)),out_df) # out_df$Prop_pvalue_adj <- p.adjust(out_df$Prop_pvalue,method=p.adjust) out_df$Fisher_pvalue_adj <- p.adjust(out_df$Fisher_pvalue,method=p.adjust) out_df$Names <- as.character(out_df$Names) # out_df$index <- 1:nrow(out_df) rownames(out_df) <- NULL out[[i.pattern]] <- out_df } return(out) } clust_sel = function(x,y,jrange=3:25,dd=2) { ## x is an array, ## y is an hclust object wss4 = function(x,y,w = rep(1, length(y))) sum(lm(x~factor(y),weights = w)$resid^2*w) ### wss4 calculates within cluster sum of squares sm1 <- NULL for(i in jrange) sm1[i] = wss4(x,cutree(y,i)) sm1=sm1[jrange] k = if(dd==1) sm1[-1] else -diff(sm1) plot(jrange[-length(k)+1:0], -diff(k)/k[-length(k)]*100) jrange [sort.list(diff(k)/k[-length(k)]*100)[1:4]] } workflow_alternativeclusterselect <- function(result){ if(class(result)!="BiBitWorkflow"){stop("result needs to be a BiBitWorkflow object")} return(clust_sel(result$info$BiclustInitial@NumberxCol,result$info$Tree)) } #' @title Column Info of Biclusters #' @description Function that returns which column labels are part of the pattern derived from the biclusters. #' Additionally, a biclustmember plot and a general barplot of the column labels (retrieved from the biclusters) can be drawn. #' @author Ewoud De Troyer #' @export #' @param result A Biclust Object. #' @param matrix Accompanying data matrix which was used to obtain \code{result}. #' @param plots Which plots to draw: #' \enumerate{ #' \item Barplot of number of appearances of column labels in bicluster results. #' \item Biclustmember plot of BC results (see \code{\link[biclust]{biclustmember}}). #' } #' @param plot.type Output Type #' \itemize{ #' \item \code{"device"}: All plots are outputted to new R graphics devices (default). #' \item \code{"file"}: All plots are saved in external files. Plots are joint together in a single \code{.pdf} file. #' \item \code{"other"}: All plots are outputted to the current graphics device, but will overwrite each other. Use this if you want to include one or more plots in a sweave/knitr file or if you want to export a single plot by your own chosen format. #' } #' @param filename Base filename (with/without directory) for the plots if \code{plot.type="file"} (default=\code{"RowCoverage"}). #' @return A list object (length equal to number of Biclusters) in which vectors of column labels are saved. #' @examples \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' result <- bibit(data,minr=5,minc=5) #' ColInfo(result=result,matrix=data) #' } ColInfo <- function(result,matrix,plots=c(1,2),plot.type="device",filename="ColInfo"){ if(length(plot.type)!=1){stop("plot.type should be of length 1",call.=FALSE)} if(!(plot.type %in% c("device","file","other"))){stop("plot.type should be 'device', 'file' or 'other'",call.=FALSE)} if(!all(plots%in%c(1:2))){stop("plots should be part of c(1,2)")} FIRSTPLOT <- TRUE ## PARAMETER CHECKS ## if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(class(result)!="Biclust"){stop("result needs to be of class 'Biclust'")} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} biclust_correctdim(result=result,matrix=matrix) out <- lapply(as.list(1:result@Number),FUN=function(i){ return(colnames(matrix)[(result@NumberxCol[i,])]) }) names(out) <- paste0("BC",1:length(out)) if(1 %in% plots){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } tab <- table(unlist(out)) barplot(tab[order(tab,decreasing=TRUE)],las=2,col="lightblue",main="Number Column Appearances") } if(2 %in% plots){ if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } col_t <- viridis(101) biclustmember(x=matrix,bicResult=result,color=col_t) # col_t <- diverge_hcl(101, h = c(0, 130)) legend(c(0.1,1.2),c(as.character(min(matrix)),as.character(max(matrix))),col=c(col_t[1],col_t[length(col_t)]),xpd=TRUE,bty="n",pch=15) } if(plot.type=="file" & length(plots)>0){dev.off()} return(out) } #' @title Barplots of Column Noise for Biclusters #' @description Draws barplots of column noise of chosen biclusters. This plot can be helpful in determining which column label is often zero in noisy biclusters. #' @author Ewoud De Troyer #' @export #' @param result A Biclust Object. #' @param matrix Accompanying binary data matrix which was used to obtain \code{result}. #' @param BC Numeric vector to select of which BC's a column noise bar plot should be drawn. #' @param plot.type Output Type #' \itemize{ #' \item \code{"device"}: All plots are outputted to new R graphics devices (default). #' \item \code{"file"}: All plots are saved in external files. Plots are joint together in a single \code{.pdf} file. #' \item \code{"other"}: All plots are outputted to the current graphics device, but will overwrite each other. Use this if you want to include one or more plots in a sweave/knitr file or if you want to export a single plot by your own chosen format. #' } #' @param filename Base filename (with/without directory) for the plots if \code{plot.type="file"} (default=\code{"RowCoverage"}). #' @return NULL #' @examples \dontrun{ #' data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) #' data[1:10,1:10] <- 1 # BC1 #' data[11:20,11:20] <- 1 # BC2 #' data[21:30,21:30] <- 1 # BC3 #' data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] #' result <- bibit2(data,minr=5,minc=5,noise=1) #' ColNoiseBC(result=result,matrix=data,BC=1:3) #' } ColNoiseBC <- function(result,matrix,BC=1:result@Number, plot.type="device",filename="ColNoise"){ if(length(plot.type)!=1){stop("plot.type should be of length 1",call.=FALSE)} if(!(plot.type %in% c("device","file","other"))){stop("plot.type should be 'device', 'file' or 'other'",call.=FALSE)} FIRSTPLOT <- TRUE ## PARAMETER CHECKS ## if(class(result)!="Biclust"){stop("result needs to be of class 'Biclust'")} if(class(matrix)!="matrix"){stop("matrix parameter should contain a matrix object",call.=FALSE)} if(!identical(as.numeric(as.vector(matrix)),as.numeric(as.logical(matrix)))){stop("matrix is not a binary matrix!",call.=FALSE)} if(is.null(rownames(matrix))){rownames(matrix) <- paste0("Row",c(1:nrow(matrix)))} if(is.null(colnames(matrix))){colnames(matrix) <- paste0("Col",c(1:ncol(matrix)))} biclust_correctdim(result=result,matrix=matrix) if(!(class(BC)=="numeric" | class(BC)=="integer")){stop("BC should be a numeric vector")} if(any(BC<0)){stop("BC cannot be negative")} if(any(BC>result@Number)){stop(paste0("BC contains a unavailable BC. The biclustering result only has ",result@Number," BC's"))} for(i in BC){ temp <- 1-apply(matrix[result@RowxNumber[,i],result@NumberxCol[i,]],MARGIN=2,FUN=sum)/sum(result@RowxNumber[,i]) if(plot.type=="device"){ dev.new() }else if(plot.type=="file" & FIRSTPLOT){ pdf(paste0(filename,".pdf")) FIRSTPLOT <- FALSE } set.seed(1) col <- distinctColorPalette(length(temp)) barplot(temp,ylim=c(0,1),main=paste0("Column Noise - BC ",i),xlab="",ylab="Noise Percentage",col=col,las=2,cex.names=0.8) } if(plot.type=="file"){ dev.off() } return(NULL) } # ################ # library(xtable) # summary_t <- function(result){ # m <- rbind(apply(result@RowxNumber,MARGIN=2,FUN=sum),apply(result@NumberxCol,MARGIN=1,FUN=sum)) # rownames(m) <- c("Number of Rows","Number of Columns") # colnames(m) <- paste0("BC",1:ncol(m)) # print(xtable(m,align=paste0("r|",paste0(rep("r",ncol(m)),collapse=""))),hline.after=c(0),table.placement="H") # }
/scratch/gouwar.j/cran-all/cranData/BiBitR/R/workflowfunction.R
#'@title CausalGraphInferMainFunc function #' @description #' A framework to infer causality on binary data using techniques in frequent pattern mining and estimation statistics. Given a set of individual vectors S=\{x\} where x(i) is a realization value of binary variable i, the framework infers empirical causal relations of binary variables i,j from S in a form of causal graph G=(V,E) where V is a set of nodes representing binary variables and there is an edge from i to j in E if the variable i causes j. The framework determines dependency among variables as well as analyzing confounding factors before deciding whether i causes j. #' #' Note that all statistics (e.g. means) and confidence intervals as well as hypothesis testing are inferred by bootstrapping. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param nboot is a number of bootstrap replicates for bootstrapping deployed to infer confidence intervals and distributions for hypothesis tests. The default is 100. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' @param CausalThs is a threshold for the degree of causal direction In the causal-direction test, to claim that any variables have causal relations, the degree of causal direction must greater than this value significantly. The default is 0.1. #' #' @return This function returns causal inference results. #TODO: provide list of results. #' \item{depRes}{The result of inferring dependencies between all pairs of variables.} #' \item{ConfoundRes}{The result of filtering associations without true causal directions from any confounding factor.} #' \item{CausalGRes}{The result of inferring causal directions between all pairs of dependent variables that have no confounding factors.} #' \item{depRes$E0}{An adjacency matrix of undirected graph where there is an edge between any pair of variables if they are dependent.} #' \item{depRes$E0pval}{A matrix of p-values from independence test of pairs of variables.} #' \item{depRes$E0mean}{A matrix of means of dependency degrees between variables.} #' \item{depRes$E0lowbound}{A matrix of lower bounds of dependency-degree confidence intervals between variables.} #' \item{depRes$depInfo$'i,j'$bmean}{A mean of dependency degrees between variables i and j.} #' \item{depRes$depInfo$'i,j'$confInv}{An \code{alpha}*100th percentile confidence interval of dependency degrees between variables i and j.} #' \item{depRes$depInfo$'i,j'$testRes}{A Mann-Whitney hypothesis test result for an independence test between variables i and j. The null hypothesis is that the distributions of dependency degrees of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of dependency degrees of i,j is shifted greater than \code{IndpThs}. } #' \item{depRes$depInfo$'i,j'$indices}{A pair of indices of i and j in a numeric vector.} #' \item{depRes$Dboot}{A list of \code{D}s (aligned list of transactions) that are generated from sampling with replacement on input samples (\code{mat}) \code{nboot} times. } #' \item{ConfoundRes$E1}{An adjacency matrix of undirected graph after filtering associations without true causal directions from any confounding factor.} #' \item{ConfoundRes$E2}{A matrix of associations that have confounding factors where \code{E2[i,j]=0} if no confounding factor and \code{E2[i,j]=k} if k is a confounding factor of i and j.} #' \item{CausalGRes$Ehat}{An adjacency matrix of directed causal graph where \code{CausalGRes$Ehat[i,j]=1} implies i causes j.} #' \item{CausalGRes$EValHat}{An adjacency matrix of weighted directed causal graph where edge weights are estimated means of probabilities of effect being 1 given cause being either 1 for positive association or 0 for negative association using CondProb() and bootstrapping to estimate} #' \item{CausalGRes$causalInfo$'i,j'$CDirConfValInv}{An \code{alpha}*100th percentile confidence interval of estimated conditional probability of effect j being 1/0 given cause i's value being either the same (positive association) or opposite (negative association).} #' \item{CausalGRes$causalInfo$'i,j'$CDirConfInv}{An \code{alpha}*100th percentile confidence interval of estimated causal direction degree of i cause j. } #' \item{CausalGRes$causalInfo$'i,j'$CDirmean}{A mean-estimated-causal-direction degree of i cause j.} #' \item{CausalGRes$causalInfo$'i,j'$testRes2}{A Mann-Whitney hypothesis test result for existence of causal direction. The null hypothesis is that the distributions of causal-direction degrees of i,j differ by a location shift of \code{CausalThs} and the alternative is that distributions of causal-direction degrees of i,j is shifted greater than \code{CausalThs}.} #' \item{CausalGRes$causalInfo$'i,j'$testRes1}{A Mann-Whitney hypothesis test result for existence of association by odd differences from \code{oddDiffFunc()}. The null hypothesis is that the distributions of absolute odd difference of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of absolute odd difference of i,j is shifted greater than \code{IndpThs}.} #' \item{CausalGRes$causalInfo$'i,j'$sign}{A direction of i,j association: 1 for positive, 0 for negative, and -1 for no association.} #' \item{CausalGRes$causalInfo$'i,j'$SignConfInv}{An \code{alpha}*100th percentile confidence interval of i,j odd difference from bootstrapping. } #' \item{CausalGRes$causalInfo$'i,j'$Signmean}{A mean of i,j odd difference from bootstrapping.} #' #' @examples #' \donttest{resC<-CausalGraphInferMainFunc(mat = mat, nboot =50)} #' #'@export #' CausalGraphInferMainFunc<-function(mat,alpha=0.05,nboot=100,IndpThs=0.05,CausalThs = 0.1) { message("Inferring dependent graph") res<-bSCMDepndentGraphFunc(mat,nboot=nboot,alpha=alpha,IndpThs=IndpThs) message("Removing confounder(s)") res2<-bSCMdeConfoundingGraphFunc(res,IndpThs=IndpThs,alpha=alpha) message("Inferring causal graph") res3<-bSCMCausalGraphFunc(res2$E1,res$Dboot ,alpha=alpha,SignThs=IndpThs,CausalThs = CausalThs) return(list(depRes = res, ConfoundRes= res2, CausalGRes= res3)) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/CausalGraphInferMainFunc.R
#'@title CondProb function #' #' @description #' This function computes a confidence value of \code{y} given \code{c} #' or \code{conf(y|z)} from an aligned list \code{D}. #' For any\code{y[i],z[j]}, their values are -1 by default. #' The function computes the numbers of transactions #' that satisfy the following conditions. #' #' 1) All transactions must have values at any k position equal to \code{z[k]} #' for any \code{z[k]} that is not -1. #' Let \code{count} be the number of these transactions in \code{D}. #' 2) All transactions must have values at any k position equal to either \code{z[k]} or \code{y[k]} #' that is not -1. Let \code{countTotal} be the number of these transactions in \code{D}. #' #' #' #' @param D is an aligned list of transactions that was converted from any matrix n by d \code{mat} using #' \code{D<-VecAlignment(mat)} where n is a number of transactions or samples #' and d is a number of dimensions for each sample. #' @param y is a d-dimensional vector. #' @param z is a d-dimensional vector. #' #' @return This function returns the ratio \code{condP=count/countTotal}, which is the confidence of \code{y} given \code{z}. #' \item{condP}{The confidence of \code{y} given \code{z} in \code{D}. } #' \item{nD}{ The subset of \code{D} such that all transactions #' have values at any position similar to \code{z[k]} when \code{z[k]} is not -1. } #' \item{count}{ A number of transactions that have values at any position similar #' to either \code{z[k]} or \code{y[k]} that is not -1. } #' \item{countTotal}{ A number of transactions in \code{nD} } #' #'@examples #'d=10 # dimensions of example vectors #'z<-numeric(d)-1 #'y<-numeric(d)-1 #'y[1]<-c(1) #'z[c(2,3)]<-c(1,1) #'CondProb(BiCausality::D,y=y,z=z)$condP # conf(inx1 is 1 |inx 2,3 are 1 ) y|z #' #' @export #' CondProb<-function(D,y,z) { p<-0 filter<-z != -1 if(sum(filter) == 0) # go full D without conditional variables nD<-D else # Keep only binary that are match with the given z. { nD<-list() for(i in seq(length(D)) ) #For each unique binary D[[i]] { flag<-sum(D[[i]]$name[filter] == z[filter]) # check whether D[[i]] bits in z's positions are the same as z's if(flag == sum(filter)) # if so, add the D[[i]] into nD { nD[[names(D)[i] ]] <- D[[i]] } } } if(length(nD)==0) # for the case when there is no transaction for z. { return(list("condP"=NA, nD=nD,countTotal=0,count=0 )) } filterY<- y!= -1 count<-0 countTotal<-0 for(i in seq(length(nD)) ) #For each unique binary nD[[i]] in nD { flag<-sum(nD[[i]]$name[filterY] == y[filterY]) countTotal<-countTotal+nD[[i]]$count if(flag == sum(filterY)) # check whether nD[[i]] bits in y's positions are the same as y's { count<-count+nD[[i]]$count } } return(list("condP"=count/countTotal, nD=nD,countTotal=countTotal,count=count )) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/CondProb.R
#'@title VecAlignment function #' #' @description #' This function rearranges the samples in the \code{mat} into #' an aligned list of transactions, which is mainly used by other functions in the package. #' Suppose \code{mat[i,]} is a binary vector we are interested, we use \code{A<-bin2dec(mat[i,])} #' to store the decimal value of \code{mat[i,]} in \code{A}. Then, we call \code{D[[A]]$count} #' to get number of samples in \code{mat} that are similar to \code{mat[i,]} and #' the \code{D[[A]]$name} is \code{mat[i,]}. #' #' #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' #' @return This function returns an aligned list of transactions \code{D}, is an aligned list of transactions that was converted from any matrix n by d \code{mat}. #' #' @examples #' VecAlignment(mat=mat) #' #'@export #' VecAlignment<-function(mat) { # mat must have the dimension n by d where n and d must be greater than 1. D<-list() d<-dim(mat)[1] for(i in seq(d)) { D[[sprintf("%d",bin2dec(mat[i,]))]]$name<-mat[i,] if(is.null(D[[sprintf("%d",bin2dec(mat[i,]))]]$count)==TRUE ) D[[sprintf("%d",bin2dec(mat[i,]))]]$count<-1 else D[[sprintf("%d",bin2dec(mat[i,]))]]$count<-D[[sprintf("%d",bin2dec(mat[i,]))]]$count+1 } return(D) } #'@title bin2dec function #' #'@description #'This function convertes a binary vector into its decimal value. #'@param X is a binary vector where \code{X[i]} is the ith bit of vector. #' #'@return This function returns a decimal value of \code{X}. #' #'@examples #'bin2dec(X=c(1,1,1,0)) #' #'@export #' bin2dec<-function(X) { newx<-0 d<-length(X) for(i in seq(d)) { newx <- newx+X[i]*2^(d-i) } return(newx) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/VecAlignment.R
#'@title adjustmentProb function #'@description #' This function evaluates the P(Y=yflag|do(X=xflag)) given only marginal distributions using parent adjustment method. #' #'@param EValHat is an adjacency matrix of weighted directed causal graph where edge weights are P(Y=yflag|X=xflag) or probabilities of effect being 1 given cause being either 1 for positive association or 0 for negative association. #'@param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #'@param yflag is value set for Y in P(Y=yflag|X=xflag,z) for the adjustment method. #'@param xflag is value set for X in P(Y=yflag|X=xflag,z) for the adjustment method. #' #'@return This function returns an adjacency matrix of weighted directed causal graph where the edge weights are P(Y=yflag|do(X=xflag) ). #' #' @examples #' adjustmentProb(resC$CausalGRes$EValHat,mat) #' #'@export #' adjustmentProb<-function(EValHat,mat,yflag=1,xflag=1) { #Work with positive association P(Y=yflag|X=xflag,Z) d<-dim(EValHat)[1] D<-VecAlignment(mat) adjEValHat<-matrix(0,nrow = d, ncol=d) for(i in seq(d)) for(j in seq(d)) { if(EValHat[i,j]>0) # P(Xj|Xi)>0 or xi -> xj { # if xi has no parent np<-sum(EValHat[,i]) if(np==0) { adjEValHat[i,j]<-EValHat[i,j] } else # if xi has any parent(s), it needs adjustment { filter1<-EValHat[,i]>0 # detect parents of xi (Zi -> xi) nBits<-2^np #print(filter1) for(b in seq(nBits)) # loop to all combination of Zi { # Z is a parent of xi b1<- b-1 vec1<-num2Bits(b1,n=np) z1<-numeric(d)-1 z1[filter1]<-vec1 pZi<-CondProb(D,z1,z=numeric(d)-1)$condP # P(Z=b1) message(sprintf("P(Z=%d)=%.2f",b1,pZi)) y1<-numeric(d)-1 y1[j]<-c(yflag) z2<-z1 z2[i]<-xflag # set Xi=xflag #print(z2) # P(Xj=yflag|Xi=xflag,Z=b1)*P(Z=b1) a1<-CondProb(D,y=y1,z=z2)$condP a1<-ifelse(is.na(a1),0,a1) message(sprintf("P(X%d =%d|X%d = %d,z%d)=%.2f",j,yflag,i,xflag,b1,a1)) adjEValHat[i,j]<-adjEValHat[i,j]+a1*pZi } } } } return(adjEValHat) } #'@title num2Bits function #'@description #'Given a natural number and number of bits, the function provides an n-dimensional vector of bits that represents \code{num}. #' The ith bits of binary vector represents the ith bit of \code{num}. #' For example, if \code{vec<-num2Bits(num=2,n=4)}, the first bit \code{vec[1]} is 0 and the second bit \code{vec[2]} is 1. #' #'@param num is a natural number. #'@param n is a number of bits representing \code{num}. #' #'@return This function returns an n-dimensional vector of bits that represents \code{num}. #' #' @examples #' num2Bits(num=10,n=4) #' #'@export #' num2Bits<-function(num,n=32) { res <- sapply(num,function(x){ as.integer(intToBits(x))}) return(as.numeric(res[1:n]) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/adjustmentProb.R
#'@title indpFunc function #' #' @description #' This function provides association signs (positive/negative association) inference between i and j. #' If there is a positive association, it implies i and j trend to have a similar value. #' For a negative association, however, i and j trend to have an opposite value. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param i is an ith dimension in \code{mat}. #' @param j is an jth dimension in \code{mat}. #' @param z is a conditioning d-dimensional vector on \code{mat}. #' Given k non-negative-bit positions of \code{z}, all k bit positions of samples in the subset of \code{mat} must have similar values with these bits. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param nboot is a number of bootstrap replicates for bootstrapping deployed to infer confidence intervals and distributions for hypothesis tests. The default is 100. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' #' @return This function returns results of inference of association signs (positive/negative association) between i and j. #' \item{bmean}{A mean of sign dependency degrees between variables i and j.} #' \item{confInv}{An \code{alpha}*100th percentile confidence interval of sign dependency degrees between variables i and j.} #' \item{testRes}{A Mann-Whitney hypothesis test result for an independence test between variables i and j. The null hypothesis is that the distributions of dependency degrees of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of dependency degrees of i,j is shifted greater than \code{IndpThs}. } #' #' @examples #' assocSignTest(mat=mat,i=1,j=2) #'@export #' assocSignTest<-function(mat,i,j,z=c(),alpha=0.05,IndpThs = 0.05,nboot=100) { d<-dim(mat)[2] if(is.null(z)) z<-numeric(d)-1 n<-dim(mat)[1] bDx<-matrix(0,nboot,n) bSignDist<-numeric(nboot) for(k in seq(nboot)) { bDx[k,]<-sample(1:n,length(1:n),replace = TRUE) nMat<-mat[bDx[k,],] D<-VecAlignment(nMat) bSignDist[k]<-oddDiffFunc(D,i,j,z=z) } testRes<-wilcox.test(x=abs(bSignDist), mu = IndpThs, alternative = "greater") confInv<-quantile(bSignDist, c(0+alpha/2, 1-alpha/2)) bmean <- mean(bSignDist) return(list(testRes=testRes,confInv=confInv,bmean=bmean)) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/assocSignTest.R
#'@title bIndpTest function #' #' @description #' This function infers dependency for a pair of variables i,j with bootstrapping. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param i is an ith dimension in \code{mat}. #' @param j is an jth dimension in \code{mat}. #' @param z is a conditioning d-dimensional vector on \code{mat}. #' Given k non-negative-bit positions of \code{z}, all k bit positions of samples in the subset of \code{mat} must have similar values with these bits. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param nboot is a number of bootstrap replicates for bootstrapping deployed to infer confidence intervals and distributions for hypothesis tests. The default is 100. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' @param pflag is a flag for printing progress message (TRUE). The default is FALSE (no printing). #' #' @return This function returns results of dependency inference between i and j. #' \item{bmean}{A mean of dependency degrees between variables i and j.} #' \item{confInv}{An \code{alpha}*100th percentile confidence interval of dependency degrees between variables i and j.} #' \item{testRes}{A Mann-Whitney hypothesis test result for an independence test between variables i and j. The null hypothesis is that the distributions of dependency degrees of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of dependency degrees of i,j is shifted greater than \code{IndpThs}. } #' #' @examples #' bIndpTest(mat=mat,i=1,j=2) #' #' #'@export #' bIndpTest<-function(mat,i,j,z=c(),alpha=0.05,IndpThs = 0.05,nboot=100,pflag=FALSE) { d<-dim(mat)[2] if(is.null(z)) z<-numeric(d)-1 #z[2]<- -1 n<-dim(mat)[1] bDx<-matrix(0,nboot,n) bIndpDist<-numeric(nboot) for(k in seq(nboot)) { if(pflag==TRUE) message(sprintf("bIndpTest-boot#%d",k)) bDx[k,]<-sample(1:n,length(1:n),replace = TRUE) #sampling index vector nMat<-mat[bDx[k,],] #sampling from mat and save to nMat D<-VecAlignment(nMat) # align bIndpDist[k]<-indpFunc(D,i,j,z=z) } testRes<-wilcox.test(x=bIndpDist, mu = IndpThs, alternative = "greater") confInv<-quantile(bIndpDist, c(0+alpha/2, 1-alpha/2)) bmean <- mean(bIndpDist) return(list(testRes=testRes,confInv=confInv,bmean=bmean)) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/bIndpTest.R
#'@title bSCMCausalGraphFunc function #' #' @description #' This function infers a causal graph from a result of confounding factor filtering by \code{bSCMdeConfoundingGraphFunc()}. #' #' @param E1 is an adjacency matrix of undirected graph after filtering associations without true causal directions from any confounding factor. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param Dboot is a list of \code{D}s (aligned list of transactions) that are generated from sampling with replacement on input samples (\code{mat}) \code{nboot} times. #' @param SignThs is a threshold for the degree of dependency for association direction inference. In the independence test of sign direction, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' @param CausalThs is a threshold for the degree of causal direction In the causal-direction test, to claim that any variables have causal relations, the degree of causal direction must greater than this value significantly. The default is 0.1. #' #' @return This function returns causal inference results from E1 matrix that is an output of \code{bSCMdeConfoundingGraphFunc}. #' \item{Ehat}{An adjacency matrix of directed causal graph where \code{CausalGRes$Ehat[i,j]=1} implies i causes j.} #' \item{EValHat}{An adjacency matrix of weighted directed causal graph where edge weights are estimated means of probabilities of effect being 1 given cause being either 1 for positive association or 0 for negative association using CondProb() and bootstrapping to estimate.} #' \item{i}{An index} #' \item{j}{An index} #' \item{causalInfo$'i,j'$CDirConfValInv}{An \code{alpha}*100th percentile confidence interval of estimated conditional probability of effect j being 1/0 given cause i's value being either the same (positive association) or opposite (negative association).} #' \item{causalInfo$'i,j'$CDirConfInv}{An \code{alpha}*100th percentile confidence interval of estimated causal direction degree of i cause j. } #' \item{causalInfo$'i,j'$CDirmean}{A mean-estimated-causal-direction degree of i cause j.} #' \item{causalInfo$'i,j'$testRes2}{A Mann-Whitney hypothesis test result for existence of causal direction. The null hypothesis is that the distributions of causal-direction degrees of i,j differ by a location shift of \code{CausalThs} and the alternative is that distributions of causal-direction degrees of i,j is shifted greater than \code{CausalThs}.} #' \item{causalInfo$'i,j'$testRes1}{A Mann-Whitney hypothesis test result for existence of association by odd differences from \code{oddDiffFunc()}. The null hypothesis is that the distributions of absolute odd difference of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of absolute odd difference of i,j is shifted greater than \code{IndpThs}.} #' \item{causalInfo$'i,j'$sign}{A direction of i,j association: 1 for positive, 0 for negative, and -1 for no association.} #' \item{causalInfo$'i,j'$SignConfInv}{An \code{alpha}*100th percentile confidence interval of i,j odd difference from bootstrapping. } #' \item{causalInfo$'i,j'$Signmean}{A mean of i,j odd difference from bootstrapping.} #' #' @examples #' bSCMCausalGraphFunc(resC$ConfoundRes$E1,resC$depRes$Dboot) #'@export #' bSCMCausalGraphFunc<-function(E1,Dboot,alpha=0.05,SignThs=0.05,CausalThs = 0.25) { d<-dim(E1)[1] Ehat<-matrix(0,d,d) EValHat<-matrix(0,d,d) nboot= length(Dboot) bSignDist<-numeric(nboot) inxList<-c() causalInfo<-list() if(sum(E1) ==0) return(list(Ehat=E1,causalInfo=causalInfo)) for(i in seq(1,d-1)) for(j in seq(i+1,d)) { if(E1[i,j]==1) inxList<-rbind(inxList, c(i,j)) } signFlag<-numeric(dim(inxList)[1])-1 # == Create the bootstrapping sequence of bSignDist for( itr in seq(dim(inxList)[1]) ) { inx<-inxList[itr,] i=inx[1] j=inx[2] str<-sprintf("%d,%d",i,j) #print(str) for(k in seq(nboot)) { bSignDist[k]<-oddDiffFunc(D=Dboot[[k]],i=inx[1],j=inx[2]) } testRes1<-wilcox.test(x=abs(bSignDist), mu = SignThs, alternative = "greater") if(testRes1$p.value<=alpha) { if(mean(bSignDist) >0) signFlag[itr]<-1 else signFlag[itr]<-0 } if(signFlag[itr] != -1){ #========= dir inference bCausalDirDist<-numeric(nboot) bCausalDirValDistA<-numeric(nboot) # Y given Z bCausalDirValDistB<-numeric(nboot) # Z given Y for(k in seq(nboot)) { D<-Dboot[[k]] z1<-numeric(d)-1 y1<-numeric(d)-1 y1[j]<-1 z1[i]<-(signFlag[itr]) a1<-CondProb(D,y=y1,z=z1)$condP b1<-CondProb(D,y=z1,z=y1)$condP bCausalDirDist[k]<-a1-b1 bCausalDirValDistA[k]<-a1 bCausalDirValDistB[k]<-b1 } testRes2<-wilcox.test(x=abs(bCausalDirDist), mu = CausalThs, alternative = "greater") bmean <- mean( (bCausalDirDist) ) #print(bmean) dirFlag=1 if(testRes2$p.value<=alpha) { if(bmean >0) # i -> j { Ehat[i,j]<-1 EValHat[i,j]<-mean(bCausalDirValDistA) causalInfo[[str]]$CDirConfValInv<-abs(quantile(bCausalDirValDistA, c(0+alpha/2, 1-alpha/2)) ) } else #j -> i { Ehat[j,i]<-1 EValHat[j,i]<-mean(bCausalDirValDistB) str<-sprintf("%d,%d",j,i) dirFlag=-1 causalInfo[[str]]$CDirConfValInv<-abs(quantile(bCausalDirValDistB, c(0+alpha/2, 1-alpha/2)) ) } causalInfo[[str]]$CDirConfInv<-abs(quantile(dirFlag*bCausalDirDist, c(0+alpha/2, 1-alpha/2)) ) causalInfo[[str]]$CDirmean<-abs(bmean) causalInfo[[str]]$testRes2<-testRes2 causalInfo[[str]]$testRes1<-testRes1 causalInfo[[str]]$sign<-signFlag[itr] causalInfo[[str]]$SignConfInv<-quantile(bSignDist, c(0+alpha/2, 1-alpha/2)) causalInfo[[str]]$Signmean<-mean(bSignDist) } } } return(list(Ehat=Ehat,causalInfo=causalInfo,EValHat=EValHat) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/bSCMCausalGraphFunc.R
#'@title bSCMDepndentGraphFastFunc function #' @description #' This function infers dependencies for all pairs of variables without bootstrapping. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' #' @return This function returns results of dependency inference among variables. #' \item{E0}{An adjacency matrix of undirected graph where there is an edge between any pair of variables if they are dependent.} #' \item{E0raw}{A matrix of the degree of dependency of variable pairs.} #' @examples #' bSCMDepndentGraphFastFunc(mat) #' #' #'@export #' bSCMDepndentGraphFastFunc<-function(mat,IndpThs=0.05) { n<-dim(mat)[1] d<-dim(mat)[2] # == Create the bootstrapping sequence of D D<-VecAlignment(mat) #Check dependency of all pairwises E0<-matrix(0,nrow=d,ncol=d) # save mean E0raw<-E0 depInfo<-list() for(i in seq(d-1)) for(j in seq(i+1,d)) { str<-sprintf("%d,%d",i,j) #print(str) bIndpDist<-indpFunc(D,i,j,z=c()) #check whether i is dependent with j E0raw[i,j]<-bIndpDist E0raw[j,i]<-bIndpDist if(bIndpDist>=IndpThs) { E0[i,j]<-1 E0[j,i]<-1 } } return(list(E0=E0,E0raw=E0raw) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/bSCMDepndentGraphFastFunc.R
#'@title bSCMDepndentGraphFunc function #' #' @description #' This function infers dependencies for all pairs of variables with bootstrapping. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param nboot is a number of bootstrap replicates for bootstrapping deployed to infer confidence intervals and distributions for hypothesis tests. The default is 100. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' @param pflag is a flag for printing progress message (TRUE). The default is FALSE (no printing). #' #' #' @return This function returns results of dependency inference among variables. #' \item{E0}{An adjacency matrix of undirected graph where there is an edge between any pair of variables if they are dependent.} #' \item{E0pval}{A matrix of p-values from independence test of pairs of variables.} #' \item{E0mean}{A matrix of means of dependency degrees between variables.} #' \item{E0lowbound}{A matrix of lower bounds of dependency-degree confidence intervals between variables.} #' \item{depInfo$'i,j'$bmean}{A mean of dependency degrees between variables i and j.} #' \item{depInfo$'i,j'$confInv}{An \code{alpha}*100th percentile confidence interval of dependency degrees between variables i and j.} #' \item{depInfo$'i,j'$testRes}{A Mann-Whitney hypothesis test result for an independence test between variables i and j. The null hypothesis is that the distributions of dependency degrees of i,j differ by a location shift of \code{IndpThs} and the alternative is that distributions of dependency degrees of i,j is shifted greater than \code{IndpThs}. } #' \item{depInfo$'i,j'$indices}{A pair of indices of i and j in a numeric vector.} #' \item{Dboot}{A list of \code{D}s (aligned list of transactions) that are generated from sampling with replacement on input samples (\code{mat}) \code{nboot} times. } #' #' @examples #' \donttest{bSCMDepndentGraphFunc(mat, nboot=50)} #' #'@export #' bSCMDepndentGraphFunc<-function(mat,nboot=100,alpha=0.05,IndpThs=0.05,pflag=FALSE) { n<-dim(mat)[1] d<-dim(mat)[2] nMatboot<-list() Dboot<-list() bDx<-matrix(0,nboot,n) # == Create the bootstrapping sequence of D for(k in seq(nboot)) { if(pflag==TRUE) print(sprintf("bIndpTest-boot#%d",k)) bDx[k,]<-sample(1:n,length(1:n),replace = TRUE) nMatboot[[k]]<-mat[bDx[k,],] Dboot[[k]]<-VecAlignment(nMatboot[[k]]) } #Check dependency of all pairwises E0<-matrix(0,nrow=d,ncol=d) # save mean E0pval<-matrix(0,nrow=d,ncol=d) E0mean<-matrix(0,nrow=d,ncol=d) E0lowbound<-matrix(0,nrow=d,ncol=d) depInfo<-list() for(i in seq(d-1)) for(j in seq(i+1,d)) { str<-sprintf("%d,%d",i,j) #print(str) bIndpDist<-numeric(nboot) for(k in seq(nboot)) { D<-Dboot[[k]] bIndpDist[k]<-indpFunc(D,i,j,z=c()) } testRes<-wilcox.test(x=bIndpDist, mu = IndpThs, alternative = "greater") confInv<-quantile(bIndpDist, c(0+alpha/2, 1-alpha/2)) bmean <- mean(bIndpDist) #save value in matrices E0pval[i,j]<-testRes$p.value E0pval[j,i]<-E0pval[i,j] E0mean[i,j]<-bmean E0mean[j,i]<-E0mean[i,j] E0lowbound[i,j]<-confInv[1] E0lowbound[j,i]<-E0lowbound[i,j] #check whether i is dependent with j if(testRes$p.value<=alpha) { E0[i,j]<-1 E0[j,i]<-E0[i,j] depInfo[[str]]$bmean<-bmean depInfo[[str]]$confInv<-confInv depInfo[[str]]$testRes<-testRes depInfo[[str]]$indices<-c(i,j) } } return(list(E0=E0,E0pval=E0pval,E0mean=E0mean,E0lowbound=E0lowbound,depInfo=depInfo,Dboot=Dboot) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/bSCMDepndentGraphFunc.R
#'@title bSCMdeConfoundingGraphFunc function #' #' @description #' This function removes any association/dependency of variables i,j that have any confounding factor k s.t. given k, i and j are independent. #' #' @param dat is the result of inferring dependencies between all pairs of variables from \code{bSCMDepndentGraphFunc()}. #' @param alpha is a significance threshold for hypothesis tests (Mann Whitney) #' that deploys for testing degrees of dependency, association direction, and causal direction. The default is 0.5. #' @param IndpThs is a threshold for the degree of dependency. In the independence test, to claim that any variables are dependent, the dependency degree must greater than this value significantly. The default is 0.05. #' #' @return This function returns an adjacency matrix of dependencies that have no confounding factors. #' \item{E1}{An adjacency matrix of undirected graph after filtering associations without true causal directions from any confounding factor.} #' \item{E2}{A matrix of associations that have confounding factors where \code{E2[i,j]=0} if no confounding factor and \code{E2[i,j]=k} if k is a confounding factor of i and j.} #' #' @examples #' bSCMdeConfoundingGraphFunc(resC$depRes) #' #' @importFrom stats quantile wilcox.test #'@export #' bSCMdeConfoundingGraphFunc<-function(dat,IndpThs=0.05,alpha=0.05) { E0<-dat$E0 d<-dim(E0)[1] E1<-matrix(0,d,d) E2<-matrix(0,d,d) nboot<-length(dat$Dboot) for(info in dat$depInfo) { i<-info$indices[1] j<-info$indices[2] # i and j have another dependent var if(E0[i,j]==1 && sum(E0[i,])>1 && sum(E0[j,])>1) { z<- 1:d z<-z[E0[i,] & E0[j,]] flag=0 if(length(z)>0) for(z0 in z) { #print(sprintf("%d,%d | %d",i,j,z0)) bIndpDist<-numeric(nboot) z1<-numeric(d)-1 z1[z0]<-1 z2<-numeric(d)-1 z2[z0]<-0 for(b in seq(nboot) ) { nD<-dat$Dboot[[b]] bIndpDist[b]<- max(c(indpFunc(nD,i,j,z1), indpFunc(nD,i,j,z2) ) ) } testRes<-wilcox.test(x=bIndpDist, mu = IndpThs, alternative = "greater") #print(sprintf("pval:%f",testRes$p.value)) #check whether i is dependent with j given z0 if(testRes$p.value>alpha) { flag=1 E2[i,j]<-z0 break } } if(flag==0) { E1[i,j]<-1 E1[j,i]<-1 } } else { E1[i,j]<-1 E1[j,i]<-1 } } return(list(E1=E1,E2=E2)) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/bSCMdeConfoundingGraphFunc.R
#' @title comparePredAdjMatrix2TrueAdjMat #' #' @description #' #' comparePredAdjMatrix2TrueAdjMat is a support function that can compare two adjacency matrices: ground-truth and inferred matrices. #' #' @param trueAdjMat a ground-truth matrix. #' @param adjMat an inferred matrix. #' #' @return This function returns a list of precision \code{prec}, recall \code{rec}, and F1 score \code{F1} of inferred vs. groundtruth matrices. #' #' @examples #' # Generate simulation data #' G<-matrix(FALSE,10,10) # groundtruth #' G[1,c(4,7,8,10)]<-TRUE #' G[2,c(5,7,9,10)]<-TRUE #' G[3,c(6,8,9,10)]<-TRUE #' comparePredAdjMatrix2TrueAdjMat(trueAdjMat=G,adjMat=G) #' #'@export comparePredAdjMatrix2TrueAdjMat<-function(trueAdjMat,adjMat) { TP<-0 FP<-0 FN<-0 for(i in seq(10)) for(j in seq(10)) { if(trueAdjMat[i,j] && adjMat[i,j]) TP<-TP+1 else if( (!trueAdjMat[i,j]) && adjMat[i,j]) FP<-FP+1 else if(trueAdjMat[i,j] && (!adjMat[i,j]) ) FN<-FN+1 } prec<-TP/(TP+FP) rec<-TP/(TP+FN) F1<-2*prec*rec/(prec+rec) return(list(prec=prec,rec=rec,F1=F1)) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/comparePredAdjMatrix2TrueAdjMat.R
#'@title confNetFunc function #' @description #' This function Computes a confidence network in data mining. #' Given a set of n transactions or samples in \code{mat} s.t. each transaction has d binary items. #' The \code{conf(mat[,j]=1|mat[,i]=1)} is a ratio of a number of samples in jth and ith dimensions that have values equal to #' one divided by a number of samples in the ith dimension that has a value equal to one. #' The confNetFunc computes the network where the nodes are dimensions and the edge weights are \code{conf(mat[,j]=1|mat[,i]=1)} for any directed edge from i to j. #' #' @param mat is a matrix n by d where n is a number of transactions or samples and d is a number of dimensions. #' @param ths is a threshold parameter for cutting of the edge weights. There exists the directed edge from i to j if its edge weight if above or equal \code{ths}. #' @return This function returns a binary adjacency matrix \code{confNet} and the weighted adjacency matrix \code{confValMat}. #' \item{confNet}{A binary adjacency matrix that has \code{confNet[i,j]=1} if \code{confValMat[i,j]>=ths}. Otherwise, it is zero.} #' \item{confValMat}{A weighted adjacency matrix where \code{confValMat[i,j]} is \code{conf(mat[,j]=1|mat[,i]=1)}.} #' #' @examples #' res<-confNetFunc(mat) #' #'@export #' confNetFunc<-function(mat,ths=0.1) { d<-dim(mat)[2] confNet<-matrix(0,nrow=d,ncol=d) D<-VecAlignment(mat) confValMat<-matrix(0,nrow=d,ncol=d) #=== for(i in seq(d-1)) for(j in seq(i+1,d)) { z<-numeric(d)-1 y<-numeric(d)-1 y[j]<-c(1) z[i]<-c(1) a1<-CondProb(D,y=y,z=z)$condP # conf(y|z) b1<-CondProb(D,y=z,z=y)$condP # conf(z|y) confValMat[i,j]<-a1 confValMat[j,i]<-b1 if((a1-b1)>0 && a1>=ths) confNet[i,j]<-1 else if((b1-a1)>0 && b1>= ths) confNet[j,i]<-1 else confNet[i,j]<-0 } return(list(confNet=confNet,confValMat=confValMat) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/confNetFunc.R
#' A simulation dataset #' #'@description #' A dataset containing simulated data that is used for examples in the package. #' The matrix \code{mat} is generated by the following code. #' #' \code{seedN<-2022} #' #'\code{n<-200} # 200 individuals #' #'\code{d<-10} # 10 variables #' #'\code{mat<-matrix(nrow=n,ncol=d)} # the input of framework #' #' #Simulate binary data from Bernoulli distribution distribution where the probability of value being 1 is 0.5. #' #'\code{for(i in seq(n)) } #'\code{ \{ set.seed(seedN+i) } #' #'\code{ mat[i,] <- rbinom(n=d, size=1, prob=0.5) \} } #' #' #'\code{mat[,1]<-mat[,2] | mat[,3] } # 1 causes by 2 and 3 #' #'\code{mat[,4] <-mat[,2] | mat[,5] }# 4 causes by 2 and 5 #' #'\code{mat[,6] <- mat[,1] | mat[,4] } # 6 causes by 1 and 4 #' #' @format A matrix with 200 samples and 10 dimensions generated from Bernoulli distribution. #' \describe{ #' \item{mat}{ It is a 200 by 10 matrix where n is a number of transactions or samples and d is a number of dimensions.} #' ... #' } "mat" #' An example of aligned list of transactions #' #'@description #' A dataset containing simulated data that is used for examples in the package. #' #' The \code{D} is an aligned list of transactions that was converted by using \code{D<-VecAlignment(mat)}. #' #' @format An aligned list of a matrix with 200 samples and 10 dimensions generated from Bernoulli distribution. #' \describe{ #' \item{D}{It is an aligned list of transactions that was converted from \code{mat}.} #' } #' "D" #' An example of causal inference result #' #'@description #' A dataset containing a result of causal inference from simulated data that is used for examples in the package. #' #' @format A result of causal inference using \code{mat} as an input. #' \describe{ #' \item{resC}{It is a result of causal inference using simData$mat as an input by running #' \code{resC<-BiCausality::CausalGraphInferMainFunc(mat = mat,CausalThs=0.1, nboot =50, IndpThs=0.05)}. }. #' } #' "resC"
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/data.R
#' getReachableNodes function #' #' getReachableNodes is a support function for inferring reachable nodes that have some directed path to a node \code{targetNode}. #' This function uses Breadth-first search (BFS) algorithm. #' #' @param adjMat is an adjacency matrix of a directed graph of which its elements are binary: zero for no edge, and one for having an edge. #' @param targetNode is a node in a graph that we want to find a set of nodes that can reach this target node via some paths. #' #' @return This function returns a set of node IDs that have some directed path to a node \code{targetNode}. #' #' @examples #' # Given an example of adjacency matrix #' A<-matrix(FALSE,5,5) #' A[2,1]<-TRUE #' A[c(3,4),2]<-TRUE #' A[5,3]<-TRUE #' # Get a set of reachable nodes of targetNode. #' #' followers<-getReachableNodes(adjMat=A,targetNode=1) #' #'@export #' getReachableNodes<-function(adjMat,targetNode) { adjMat N<-dim(adjMat)[1] IDs<-1:N Qmembers<-IDs[as.logical(adjMat[,targetNode])] # finding nodes that have directed edges to targetNode flag<-logical(N) flag[Qmembers]<-TRUE while(length(Qmembers)>0) # loop until no members within the queue Qmembers (BFS) { ID<-Qmembers[1] # read head of the queue Qmembers<-Qmembers[-1] # dequeue currNewMembers<-IDs[as.logical(adjMat[,ID])] #list all followers of ID if(length(currNewMembers) <1) # if no follower, then just skip this iteration next for(i in currNewMembers) # for each follwer in currNewMembers { if(!flag[i]) # if i is the new follower never detected, then add i to the queue { flag[i]<-TRUE Qmembers<-c(Qmembers,i) } } } members<-IDs[flag] # return all reachable nodes that have the end of paths to targetNode return(members) } #' getTransitiveClosureMat function #' #' getTransitiveClosureMat is a support function for inferring a transitive-closure adjacency matrix. #' #' @param adjMat is an adjacency matrix of a directed graph of which its elements are binary: zero for no edge, and one for having an edge. #' #' @return This function returns a transitive-closure adjacency matrix. #' @examples #' # Given an example of adjacency matrix #' A<-matrix(FALSE,5,5) #' A[2,1]<-TRUE #' A[c(3,4),2]<-TRUE #' A[5,3]<-TRUE #' # Get a set of reachable nodes of targetNode. #' #' trsClosureMat<-getTransitiveClosureMat(adjMat=A) #' #' @export getTransitiveClosureMat<-function(adjMat) { d<-dim(adjMat)[2] clsMat<-matrix(0,d,d) for(i in seq(1,d)) { row<-getReachableNodes(adjMat = adjMat,targetNode = i) if(length(row) != 0) clsMat[row,i]<-TRUE } return(clsMat) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/getReachableNodes.R
#'@title indpFunc function #' #' @description This function computes the degree of dependency between variables. #' Let i and j be variables, if they are independent, then |p(i,j) -p(i)*p(j)| should be zero. #' Given the samples in the n by d matrix \code{mat} where n is a number of samples and d is a number of dimensions, #' an aligned list of transactions \code{D} is computed by #' \code{D<-VecAlignment(mat)}. #' #'@param D is an aligned list of transactions that was converted from \code{mat}. #'@param i is an ith dimension in \code{mat}. #'@param j is an jth dimension in \code{mat}. #'@param z is a conditioning d-dimensional vector on \code{D}. #' Given k non-negative-bit positions of \code{z}, all k bit positions of samples in the subset of \code{D} must have similar values with these bits. #' #'@return This function returns the degree of dependency between variables: #' zero implies both variables are independent, and non-zero value implies the degree of dependency (higher implies more dependent degree). #' #' @examples #' indpFunc(D,i=1,j=2) #' #'@export #' indpFunc<-function(D,i,j,z=c()) { d<-length(D[[1]]$name) if(is.null(z)) # go full D without conditional variables z<-numeric(d)-1 res<-CondProb(D,y=numeric(d)-1,z=z) #get the total n D<-res$nD n<-res$countTotal L<-length(D) indMagitude<-0 z1<-numeric(d)-1 for(i1 in c(0,1) ) for(j1 in c(0,1)) { y1<-numeric(d) -1 y1[c(i,j)] <- c(i1,j1) # supp(i,j) y2<-numeric(d) -1 y2[c(i)] <- i1 #supp(i) y3<-numeric(d) -1 y3[c(j)] <- j1 #supp(j) res2<-CondProb(D,y1,z1) n2<-res2$count condPair<-res2$condP condi<-CondProb(D,y2,z1)$condP condj<-CondProb(D,y3,z1)$condP # |p(i,j) -p(i)*p(j)|*weight indMagitude<-indMagitude+ (abs(condPair - condi*condj)*(n2/n) ) } return(indMagitude) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/indpFunc.R
#'@title oddRatioFunc function #' #'@description #'Given the samples in the n by d matrix \code{mat} where n is a number of samples and d is a number of dimensions. #'This function computes an odd ratio value of variables of ith and jth dimensions from #'a given an aligned list of transactions \code{D} (compute by \code{D<-VecAlignment(mat)}). #' #'@param D is an aligned list of transactions that was converted from \code{mat}. #'@param i is an ith dimension in \code{mat} for computing the odd ratio with. #'@param j is an jth dimension in \code{mat} for computing compute the odd ratio with. #'@param z is a conditioning d-dimensional vector on \code{D}. #' Given k non-negative-bit positions of \code{z}, all k bit positions of samples in the subset of \code{D} must have similar values with these bits. #'@param slack is a parameter to prevent the issue of division by zero. #' #'@return This function returns an odd ratio value of variables of ith and jth dimensions from \code{D}. #' #' @examples #' oddRatioFunc(D,i=1,j=2) #' #'@export #' oddRatioFunc<-function(D,i,j,z=c(),slack=0.001) { d<-length(D[[1]]$name) if(is.null(z)) z<-numeric(d)-1 res<-CondProb(D,y=numeric(d)-1,z=z) D<-res$nD n<-res$countTotal L<-length(D) oddMagitude<-0 z1<-numeric(d)-1 y<-numeric(d)-1 y[c(i,j)]<-c(0,0) a1<-CondProb(D,y,z1)$condP+slack y[c(i,j)]<-c(1,1) b1<-CondProb(D,y,z1)$condP+slack y[c(i,j)]<-c(1,0) c1<-CondProb(D,y,z1)$condP+slack y[c(i,j)]<-c(0,1) d1<-CondProb(D,y,z1)$condP+slack return(a1*b1/(c1*d1)) } #'@title oddDiffFunc function #'@description #'Given the samples in the n by d matrix \code{mat} where n is a number of samples and d is a number of dimensions. #'This function computes an odd difference value of variables of ith and jth dimensions from #'a given an aligned list of transactions \code{D} (compute by \code{D<-VecAlignment(mat)}). #' #'@param D is an aligned list of transactions that was converted from \code{mat}. #'@param i is an ith dimension in \code{mat} for computing the odd difference with. #'@param j is an jth dimension in \code{mat} for computing compute the odd difference with. #'@param z is a conditioning d-dimensional vector on \code{D}. #' Given k non-negative-bit positions of \code{z}, all k bit positions of samples in the subset of \code{D} must have similar values with these bits. #' #'@return This function returns an odd difference value of variables of ith and jth dimensions from \code{D}. #' #' @examples #' oddDiffFunc(D,i=1,j=2) #' #'@export #' oddDiffFunc<-function(D,i,j,z=c()) { d<-length(D[[1]]$name) if(is.null(z)) z<-numeric(d)-1 res<-CondProb(D,y=numeric(d)-1,z=z) D<-res$nD n<-res$countTotal L<-length(D) oddMagitude<-0 z1<-numeric(d)-1 y<-numeric(d)-1 y[c(i,j)]<-c(0,0) a1<-CondProb(D,y,z1)$condP y[c(i,j)]<-c(1,1) b1<-CondProb(D,y,z1)$condP y[c(i,j)]<-c(1,0) c1<-CondProb(D,y,z1)$condP y[c(i,j)]<-c(0,1) d1<-CondProb(D,y,z1)$condP return( a1*b1-(c1*d1) ) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/oddRatioFunc.R
#'@title supp function #' #' @description #' This function computes a support value from a matrix \code{X} given a \code{values}. #' #' @param X is a matrix n by d where n is a number of transactions or samples #' and d is a number of dimensions for each sample. #' @param values is a d-dimensional vector #' we use to count how many of it within \code{X}. #' #' @return This function returns the support of \code{values} in \code{X} by counting #' the ratio of how many samples in \code{X} are similar to \code{values} #'@examples #' x <- rbinom(n=100, size=1, prob=0.5) #' ny<-rbinom(n=100, size=1, prob=0.25) #' y <- x | ny #' supp(X=cbind(x,y),values=c(1,1) ) #' #' @export #' supp<-function(X,values) { count<-0 n<-0 flag=0 if(is.null(dim(X)[1])) { n<-length(X) flag=1 } else n<-dim(X)[1] for(i in seq(n)) { if(flag==1) row<-X[i] else row<-X[i,] if(sum(row==values) == length(values)) count=count+1 } return(count/n) }
/scratch/gouwar.j/cran-all/cranData/BiCausality/R/supp.R
## ----------------------------------------------------------------------------- seedN<-2022 n<-200 # 200 individuals d<-10 # 10 variables mat<-matrix(nrow=n,ncol=d) # the input of framework #Simulate binary data from binomial distribution where the probability of value being 1 is 0.5. for(i in seq(n)) { set.seed(seedN+i) mat[i,] <- rbinom(n=d, size=1, prob=0.5) } mat[,1]<-mat[,2] | mat[,3] # 1 causes by 2 and 3 mat[,4] <-mat[,2] | mat[,5] # 4 causses by 2 and 5 mat[,6] <- mat[,1] | mat[,4] # 6 causes by 1 and 4 ## ----------------------------------------------------------------------------- # Run the function library(BiCausality) resC<-BiCausality::CausalGraphInferMainFunc(mat = mat,CausalThs=0.1, nboot =50, IndpThs=0.05) ## ----------------------------------------------------------------------------- resC$CausalGRes$Ehat ## ----------------------------------------------------------------------------- library(igraph) net <- graph_from_adjacency_matrix(resC$CausalGRes$Ehat ,weighted = NULL) plot(net, edge.arrow.size = 0.3, vertex.size =20 , vertex.color = '#D4C8E9',layout=layout_with_kk) ## ----------------------------------------------------------------------------- resC$CausalGRes$causalInfo[['2,1']]
/scratch/gouwar.j/cran-all/cranData/BiCausality/inst/doc/BiCausality_demo.R
title: "BiCausality: Binary Causality Inference Framework" author: " C. Amornbunchornvej" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BiCausality_demo} %\VignetteEngine{knitr::knitr} \usepackage[utf8]{inputenc} --- Example: Inferred binary causal graph from simulation ---------------------------------------------------------------------------------- In the first step, we generate a simulation dataset as an input. ```{r} seedN<-2022 n<-200 # 200 individuals d<-10 # 10 variables mat<-matrix(nrow=n,ncol=d) # the input of framework #Simulate binary data from binomial distribution where the probability of value being 1 is 0.5. for(i in seq(n)) { set.seed(seedN+i) mat[i,] <- rbinom(n=d, size=1, prob=0.5) } mat[,1]<-mat[,2] | mat[,3] # 1 causes by 2 and 3 mat[,4] <-mat[,2] | mat[,5] # 4 causses by 2 and 5 mat[,6] <- mat[,1] | mat[,4] # 6 causes by 1 and 4 ``` We use the following function to infer whether X causes Y. ``` {r} # Run the function library(BiCausality) resC<-BiCausality::CausalGraphInferMainFunc(mat = mat,CausalThs=0.1, nboot =50, IndpThs=0.05) ``` The result of the adjacency matrix of the directed causal graph is below: ```{r} resC$CausalGRes$Ehat ``` The value in the element EValHat[i,j] represents that i causes j if the value is not zero. For example, EValHat[2,1] = 1 implies node 2 causes node 1, which is correct since node 1 have nodes 2 and 3 as causal nodes. The directed causal graph also can be plot using the code below. ```{r} library(igraph) net <- graph_from_adjacency_matrix(resC$CausalGRes$Ehat ,weighted = NULL) plot(net, edge.arrow.size = 0.3, vertex.size =20 , vertex.color = '#D4C8E9',layout=layout_with_kk) ``` For the causal relation of variables 2 and 1, we can use the command below to see further information. **Note that the odd difference between X and Y denoted oddDiff(X,Y) is define as |P (X = 1, Y = 1) P (X = 0, Y = 0) −P (X = 0, Y = 1) P (X = 1, Y = 0)|. If X is directly proportional to Y, then oddDiff(X,Y) is close to 1. If X is inverse of Y, then oddDiff(X,Y) is close to -1. If X and Y have no association, then oddDiff(X,Y) is close to zero. ```{r} resC$CausalGRes$causalInfo[['2,1']] ``` Below are the details of result explanation. ``` #This value represents the 95th percentile confidence interval of P(Y=1|X=1). $CDirConfValInv 2.5% 97.5% 1 1 #This value represents the 95th percentile confidence interval of |P(Y=1|X=1) - P(X=1|Y=1)|. $CDirConfInv 2.5% 97.5% 0.3217322 0.4534494 #This value represents the mean of |P(Y=1|X=1) - P(X=1|Y=1)|. $CDirmean [1] 0.3787904 #The test that has the null hypothesis that |P(Y=1|X=1) - P(X=1|Y=1)| below #or equal the argument of parameter "CausalThs" and the alternative hypothesis #is that |P(Y=1|X=1) - P(X=1|Y=1)| is greater than "CausalThs". $testRes2 Wilcoxon signed rank test with continuity correction data: abs(bCausalDirDist) V = 1275, p-value = 3.893e-10 alternative hypothesis: true location is greater than 0.1 #The test that has the null hypothesis that |oddDiff(X,Y)| below #or equal the argument of parameter "IndpThs" and the alternative hypothesis is #that |oddDiff(X,Y)| is greater than "IndpThs". $testRes1 Wilcoxon signed rank test with continuity correction data: abs(bSignDist) V = 1275, p-value = 3.894e-10 alternative hypothesis: true location is greater than 0.05 #If the test above rejects the null hypothesis with the significance threshold #alpha (default alpha=0.05), then the value "sign=1", otherwise, it is zero. $sign [1] 1 #This value represents the 95th percentile confidence interval of oddDiff(X,Y) $SignConfInv 2.5% 97.5% 0.08670325 0.13693900 #This value represents the mean of oddDiff(X,Y) $Signmean [1] 0.1082242 ```
/scratch/gouwar.j/cran-all/cranData/BiCausality/inst/doc/BiCausality_demo.Rmd
title: "BiCausality: Binary Causality Inference Framework" author: " C. Amornbunchornvej" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BiCausality_demo} %\VignetteEngine{knitr::knitr} \usepackage[utf8]{inputenc} --- Example: Inferred binary causal graph from simulation ---------------------------------------------------------------------------------- In the first step, we generate a simulation dataset as an input. ```{r} seedN<-2022 n<-200 # 200 individuals d<-10 # 10 variables mat<-matrix(nrow=n,ncol=d) # the input of framework #Simulate binary data from binomial distribution where the probability of value being 1 is 0.5. for(i in seq(n)) { set.seed(seedN+i) mat[i,] <- rbinom(n=d, size=1, prob=0.5) } mat[,1]<-mat[,2] | mat[,3] # 1 causes by 2 and 3 mat[,4] <-mat[,2] | mat[,5] # 4 causses by 2 and 5 mat[,6] <- mat[,1] | mat[,4] # 6 causes by 1 and 4 ``` We use the following function to infer whether X causes Y. ``` {r} # Run the function library(BiCausality) resC<-BiCausality::CausalGraphInferMainFunc(mat = mat,CausalThs=0.1, nboot =50, IndpThs=0.05) ``` The result of the adjacency matrix of the directed causal graph is below: ```{r} resC$CausalGRes$Ehat ``` The value in the element EValHat[i,j] represents that i causes j if the value is not zero. For example, EValHat[2,1] = 1 implies node 2 causes node 1, which is correct since node 1 have nodes 2 and 3 as causal nodes. The directed causal graph also can be plot using the code below. ```{r} library(igraph) net <- graph_from_adjacency_matrix(resC$CausalGRes$Ehat ,weighted = NULL) plot(net, edge.arrow.size = 0.3, vertex.size =20 , vertex.color = '#D4C8E9',layout=layout_with_kk) ``` For the causal relation of variables 2 and 1, we can use the command below to see further information. **Note that the odd difference between X and Y denoted oddDiff(X,Y) is define as |P (X = 1, Y = 1) P (X = 0, Y = 0) −P (X = 0, Y = 1) P (X = 1, Y = 0)|. If X is directly proportional to Y, then oddDiff(X,Y) is close to 1. If X is inverse of Y, then oddDiff(X,Y) is close to -1. If X and Y have no association, then oddDiff(X,Y) is close to zero. ```{r} resC$CausalGRes$causalInfo[['2,1']] ``` Below are the details of result explanation. ``` #This value represents the 95th percentile confidence interval of P(Y=1|X=1). $CDirConfValInv 2.5% 97.5% 1 1 #This value represents the 95th percentile confidence interval of |P(Y=1|X=1) - P(X=1|Y=1)|. $CDirConfInv 2.5% 97.5% 0.3217322 0.4534494 #This value represents the mean of |P(Y=1|X=1) - P(X=1|Y=1)|. $CDirmean [1] 0.3787904 #The test that has the null hypothesis that |P(Y=1|X=1) - P(X=1|Y=1)| below #or equal the argument of parameter "CausalThs" and the alternative hypothesis #is that |P(Y=1|X=1) - P(X=1|Y=1)| is greater than "CausalThs". $testRes2 Wilcoxon signed rank test with continuity correction data: abs(bCausalDirDist) V = 1275, p-value = 3.893e-10 alternative hypothesis: true location is greater than 0.1 #The test that has the null hypothesis that |oddDiff(X,Y)| below #or equal the argument of parameter "IndpThs" and the alternative hypothesis is #that |oddDiff(X,Y)| is greater than "IndpThs". $testRes1 Wilcoxon signed rank test with continuity correction data: abs(bSignDist) V = 1275, p-value = 3.894e-10 alternative hypothesis: true location is greater than 0.05 #If the test above rejects the null hypothesis with the significance threshold #alpha (default alpha=0.05), then the value "sign=1", otherwise, it is zero. $sign [1] 1 #This value represents the 95th percentile confidence interval of oddDiff(X,Y) $SignConfInv 2.5% 97.5% 0.08670325 0.13693900 #This value represents the mean of oddDiff(X,Y) $Signmean [1] 0.1082242 ```
/scratch/gouwar.j/cran-all/cranData/BiCausality/vignettes/BiCausality_demo.Rmd
#' Asiamat #' #' An adjacency matrix representing the ground truth DAG used to generate a synthetic dataset from #' Lauritzen and Spiegelhalter (1988) about lung #' diseases (tuberculosis, lung cancer or bronchitis) and visits to Asia. #' #' @source \url{https://www.bnlearn.com/bnrepository/} #' @format A binary matrix with 8 rows and 8 columns representing an adjacency matrix of a DAG with 8 nodes: #' \itemize{ #' \item D (dyspnoea), binary 1/0 corresponding to "yes" and "no" #' \item T (tuberculosis), binary 1/0 corresponding to "yes" and "no" #' \item L (lung cancer), binary 1/0 corresponding to "yes" and "no" #' \item B (bronchitis), binary 1/0 corresponding to "yes" and "no" #' \item A (visit to Asia), binary 1/0 corresponding to "yes" and "no" #' \item S (smoking), binary 1/0 corresponding to "yes" and "no" #' \item X (chest X-ray), binary 1/0 corresponding to "yes" and "no" #' \item E (tuberculosis versus lung cancer/bronchitis), binary 1/0 corresponding to "yes" and "no" #' } #'@references Lauritzen S, Spiegelhalter D (1988). `Local Computation with Probabilities on Graphical Structures and their Application to Expert Systems (with discussion)'. #'Journal of the Royal Statistical Society: Series B 50, 157-224. #' "Asiamat"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/Asiaadj.R
#' Simulated data set from a 2-step dynamic Bayesian network #' #' @description #' A synthetic dataset containing 100 observations generated from a random dynamic Bayesian network with 12 continuous dynamic nodes and 3 static nodes. #' The DBN includes observations from 5 time slices. #' #' @format A data frame with 100 rows and 63 (3+12*5) columns representing observations of 15 variables: 3 static variables (first 3 columns) which do not change over time and 12 dynamic variables observed in 5 conseecutive time slices. #' "DBNdata"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/DBNdata.R
#'Deriving an adjecency matrix of a full DBN #' #'This function transforms a compact 2-slice adjacency matrix of DBN into full T-slice adjacency matrix #' #' @param DBNmat a square matrix, representing initial and transitional structure of a DBN; the size of matrix is 2*dyn+b #' @param slices integer, number of slices in an unrolled DBN #' @param b integer, number of static variables #' @return an adjacency matrix of an unrolled DBN #' @examples #' compact2full(DBNmat, slices=5, b=3) #' @export compact2full<-function(DBNmat,slices,b=0) { dyn<-(ncol(DBNmat)-b)/2 if(slices<3) { return(DBNmat) } else { if(all(is.character(colnames(DBNmat)))){ baseall<-colnames(DBNmat) basenames<-colnames(DBNmat)[1:dyn+b] } else { if(b!=0) { staticnames<-paste("s",1:b,sep="") basenames<-paste("v",1:dyn,sep="") baseall<-c(staticnames,basenames) } else { basenames<-paste("v",1:dyn,sep="") baseall<-basenames } } for(i in 3:slices) { baseall<-c(baseall,paste(basenames,".",i,sep="")) } nbig<-slices*dyn+b DBNbig<-matrix(0,nrow=nbig,ncol=nbig) colnames(DBNbig)<-baseall rownames(DBNbig)<-baseall DBNbig[1:(dyn+b),1:dyn+b]<-DBNmat[1:(dyn+b),1:dyn+b] #copying initial structure intStruct<-DBNmat[1:dyn+dyn+b,1:dyn+dyn+b] #internal structure transStruct<-DBNmat[1:dyn+b,1:dyn+dyn+b] #transitional structure if(b>0) { bgStrct<-DBNmat[1:b,1:dyn+dyn+b] #edges from static variables } for(i in 1:(slices-1)) { if(b>0) { DBNbig[1:b,1:dyn+i*dyn+b]<-bgStrct } DBNbig[1:dyn+(i-1)*dyn+b,1:dyn+i*dyn+b]<-transStruct DBNbig[1:dyn+i*dyn+b,1:dyn+i*dyn+b]<-intStruct } return(DBNbig) } } #'Deriving a compact adjacency matrix of a DBN #' #'This function transforms an unrolled adjacency matrix of DBN into a compact representation #' #' @param DBNmat a square matrix, representing the structure of an unrolled DBN; the size of matrix is slices*dyn+b; all static variables are assumed to be in the first b rows and columns of the matrix #' @param b integer, number of static variables; 0 by default #' @examples #' full2compact(DBNunrolled,b=3) #'@export full2compact<-function(DBNmat,b=0) { dyn<-(ncol(DBNmat)-b)/2 DBNcompact<-DBNmat[1:(2*dyn+b),1:(2*dyn+b)] return(DBNcompact) } #turns internal representation into user-friendly DBNtransform<-function(DBNmat,param) { newDBNmat<-Matrix(0,nrow=param$n+param$nsmall,ncol=param$n+param$nsmall,sparse=TRUE) colnames(newDBNmat)<-param$labels.short rownames(newDBNmat)<-param$labels.short newDBNmat[param$usrinitstr$rows,param$usrinitstr$cols]<-DBNmat[param$intstr$rows,param$intstr$cols] newDBNmat[param$usrintstr$rows,param$usrintstr$cols]<-DBNmat[param$intstr$rows,param$intstr$cols] newDBNmat[param$usrtrans$rows,param$usrtrans$cols]<-DBNmat[param$trans$rows,param$trans$cols] return(newDBNmat) } #turns internal representation into user-friendly DBNtransform.init<-function(DBNmat,param) { if(param$bgn>0) { newDBNmat<-matrix(0,nrow=param$bgn+param$nsmall,ncol=param$bgn+param$nsmall) colnames(newDBNmat)<-param$labels.short[1:param$n] rownames(newDBNmat)<-param$labels.short[1:param$n] newDBNmat[,1:param$bgn]<-DBNmat[,1:param$bgn+param$nsmall] newDBNmat[,1:param$nsmall+param$bgn]<-DBNmat[,1:param$nsmall] DBNmat<-newDBNmat newDBNmat[1:param$bgn,]<-DBNmat[1:param$bgn+param$nsmall,] newDBNmat[1:param$nsmall+param$bgn,]<-DBNmat[1:param$nsmall,] return(newDBNmat) } else { return(DBNmat) } } #turns user-friendly representation into internal DBNbacktransform<-function(DBNmat,param,coln=FALSE,nozero=FALSE) { if(!is.null(colnames(DBNmat))) { oldnodelabels<-colnames(DBNmat) newnodelabels<-oldnodelabels newnodelabels[param$intstr$cols]<-oldnodelabels[param$usrtrans$cols] if(param$bgn==0) newnodelabels[param$trans$rows]<-oldnodelabels[param$usrinitstr$rows] else { newnodelabels[c(param$intstr$rows[1:param$bgn],param$trans$rows)]<-oldnodelabels[param$usrinitstr$rows] } } if(nozero) newDBNmat<-DBNmat else { newDBNmat<-Matrix(0,nrow=param$n+param$nsmall,ncol=param$n+param$nsmall) } newDBNmat[param$intstr$rows,param$intstr$cols]<-1*(DBNmat[param$usrintstr$rows,param$usrintstr$cols]|DBNmat[param$usrinitstr$rows,param$usrinitstr$cols]) newDBNmat[param$trans$rows,param$trans$cols]<-DBNmat[param$usrtrans$rows,param$usrtrans$cols] if(!param$split) { if(coln) colnames(newDBNmat)<-rownames(newDBNmat)<-newnodelabels return(newDBNmat) } else { res<-list() initDBNmat<-DBNmat[1:param$n,1:param$n] newinitDBNmat<-DBNmat[1:param$n,1:param$n] if(param$bgn>0) { newinitDBNmat[,1:param$bgn+param$nsmall]<-initDBNmat[,1:param$bgn] } newinitDBNmat[,1:param$nsmall]<-initDBNmat[,1:param$nsmall+param$bgn] initDBNmat<-newinitDBNmat if(param$bgn>0) { newinitDBNmat[1:param$bgn+param$nsmall,]<-initDBNmat[1:param$bgn,] } newinitDBNmat[1:param$nsmall,]<-initDBNmat[1:param$nsmall+param$bgn,] res$init<-newinitDBNmat if(nozero) { transDBNmat<-DBNmat } else { transDBNmat<-Matrix(0,nrow=2*param$nsmall+param$bgn,ncol=2*param$nsmall+param$bgn) DBNmat<-DBNcut(DBNmat,dyn=param$nsmall,b=param$bgn)} transDBNmat[param$intstr$rows,param$intstr$cols]<-DBNmat[param$usrintstr$rows,param$usrintstr$cols] transDBNmat[param$trans$rows,param$trans$cols]<-DBNmat[param$usrtrans$rows,param$usrtrans$cols] res$trans<-transDBNmat return(res) } } DBNcut<-function(adj,dyn,b){ adj[,1:(dyn+b)]<-0 return(adj) } DBNinit<-function(adj,dyn,b){ adj<-adj[1:(b+dyn),1:(b+dyn)] if(b>0) { adj[,1:b]<-0 } return(adj) } #Combining initial and transition DBN structures in one matrix mergeDBNstr<-function(initStruct,transStruct) { n<-ncol(initStruct) if(is(initStruct,"graphNEL")) { initStruct<-graph2m(initStruct) } else if(!is.matrix(initStruct)) { initStruct<-as.matrix(initStruct) } if(is(transStruct,"graphNEL")) { transStruct<-graph2m(transStruct) }else if(!is.matrix(transStruct)) { transStruct<-as.matrix(transStruct) } n<-ncol(initStruct) transStruct[1:n,1:n]<-initStruct return(transStruct) } #Combining orders for a DBN mergeDBNord<-function(initorder,transorder) { return(c(initorder,transorder)) } #Combining order scores for a DBN mergeDBNscore<-function(initscore,transscore) { return(initscore+transscore) } #this function produces common result for DBN structure learning when samestruct=FALSE mergeDBNres<-function(result.init,result.trans,scorepar,algo) { res<-list() maxtrans<-DBNtransform(result.trans$DAG,scorepar) maxinit<-DBNtransform.init(result.init$DAG,scorepar) res$DAG<-mergeDBNstr(maxinit,maxtrans) res$order<-mergeDBNord(result.init$order,result.trans$order) res$score<-mergeDBNscore(result.init$score,result.trans$score) if(!is.null(result.init$traceadd)) { result.init$traceadd$incidence<-lapply(result.init$traceadd$incidence,DBNtransform.init,param=scorepar) result.trans$traceadd$incidence<-lapply(result.trans$traceadd$incidence,DBNtransform,param=scorepar) result.trans$traceadd$incidence<-lapply(result.trans$traceadd$incidence,DBNcut,dyn=scorepar$nsmall,b=scorepar$bgn) res$traceadd$incidence<-mapply(mergeDBNstr,result.init$traceadd$incidence,result.trans$traceadd$incidence,SIMPLIFY = FALSE) res$trace<-mapply(mergeDBNscore,result.init$trace,result.trans$trace) if(algo=="order"){ res$traceadd$orders<-mapply(mergeDBNord,result.init$traceadd$orders,result.trans$traceadd$orders,SIMPLIFY = FALSE) res$traceadd$orderscores<-mapply(mergeDBNscore,result.init$traceadd$orderscores,result.trans$traceadd$orderscores) } else if (algo=="partition") { res$traceadd$order<-mapply(mergeDBNord,result.init$traceadd$order,result.trans$traceadd$order,SIMPLIFY = FALSE) res$traceadd$partitionscores<-mapply(mergeDBNscore,result.init$traceadd$partitionscores,result.trans$traceadd$partitionscores) } } attr(res,"class")<-"MCMCres" return(res) } #this function produces common result for DBN iterative structure learning when samestruct=FALSE mergeDBNres.it<-function(result.init,result.trans,scorepar) { res<-list() res$init<-result.init res$trans<-result.trans maxtrans<-DBNtransform(result.trans$DAG,scorepar) maxinit<-DBNtransform.init(result.init$DAG,scorepar) for(i in 1:length(res$trans$maxtrace)) { res$trans$maxtrace[[i]]$DAG<-DBNtransform(res$trans$maxtrace[[i]]$DAG,scorepar) res$trans$maxtrace[[i]]$DAG<-DBNcut(res$trans$maxtrace[[i]]$DAG,dyn=scorepar$nsmall,b=scorepar$bgn) } for(i in 1:length(res$init$maxtrace)) { res$init$maxtrace[[i]]$DAG<-DBNtransform.init(res$init$maxtrace[[i]]$DAG,scorepar) res$init$maxtrace[[i]]$DAG<-DBNinit(res$init$maxtrace[[i]]$DAG,dyn=scorepar$nsmall,b=scorepar$bgn) } res$DAG<-mergeDBNstr(maxinit,maxtrans) res$order<-mergeDBNord(result.init$order,result.trans$order) res$score<-mergeDBNscore(result.init$score,result.trans$score) endinit<-DBNtransform.init(result.init$endspace,scorepar) endtrans<-DBNtransform(result.trans$endspace,scorepar) startinit<-DBNtransform.init(result.init$startspace,scorepar) starttrans<-DBNtransform(result.trans$startspace,scorepar) res$endspace<-mergeDBNstr(endinit,endtrans) res$startspace<-mergeDBNstr(startinit,starttrans) return(res) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/DBNfns.R
#' An adjacency matrix of a dynamic Bayesian network #' #' An adjacency matrix representing the ground truth DBN used to generate a synthetic dataset \code{\link{DBNdata}}. The matrix is a compact representation of a 2-step DBN, such that initial structure is stored in the first 15 columns of the matrix and transitional structure is stored in the last 12 columns of the matrix. #' #' @format A binary matrix with 27 rows and 27 columns representing an adjacency matrix of a DBN. Rows and columns of the matrix correspond to 15 variables of a DBN across 2 time slices. #' "DBNmat"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/DBNmat.R
#' An unrolled adjacency matrix of a dynamic Bayesian network #' #' An adjacency matrix representing the ground truth DBN used to generate a synthetic dataset \code{\link{DBNdata}}. The matrix is an unrolled representation of a 2-step DBN, such that the static variables are represented in the first 3 columns/rows of the matrix. #' #' @format A binary matrix with 63 rows and 63 columns representing an adjacency matrix of a DBN. Rows and columns of the matrix correspond to 15 variables (s1, s2, s3, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) of a DBN across 5 time slices. #' "DBNunrolled"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/DBNunrolled.R
#' kirc dataset #' #' Mutation data from TCGA kidney renal clear cell cohort (KIRC). #' Mutations are picked according to q-value computed #' by MutSig2CV (q<0.1) or connected in networks discovered by Kuipers et al. 2018. #' # @source \url{https://portal.gdc.cancer.gov/} #' @details { #' Each variable represents a gene. If in sample i gene j contains a mutation, than j-th element in #' row i equals 1, and 0 otherwise. #' The rows are named according to sample names in TCGA. #' The columns are named according to gene symbols. #' } #'@references \url{https://portal.gdc.cancer.gov/} #'@references \url{http://firebrowse.org/iCoMut/?cohort=kirc} #'@references Lawrence, M. et al. Mutational heterogeneity in cancer and the search for new cancer-associated genes. Nature 499, 214-218 (2013) #' "kirc"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/KIRC.R
#' kirp dataset #' #' Mutation data from TCGA kidney renal papillary cell cohort (KIRP). #' Mutations are picked according to q-value computed #' by MutSigCV (q<0.1) or connected in networks discovered by Kuipers et al. 2018. #' # @source \url{https://portal.gdc.cancer.gov/} #' @details { #' Each variable represents a gene. If in sample i gene j contains a mutation, than j-th element in #' row i equals 1, and 0 otherwise. #' The rows are named according to sample names in TCGA. #' The columns are named according to gene symbols. #' } #'@references \url{https://portal.gdc.cancer.gov/} #'@references \url{http://firebrowse.org/iCoMut/?cohort=kirp} #'@references Lawrence, M. et al. Mutational heterogeneity in cancer and the search for new cancer-associated genes. Nature 499, 214-218 (2013) #' "kirp"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/KIRP.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 extractT <- function(xs, ys, ts) { .Call(`_BiDAG_extractT`, xs, ys, ts) } collectC <- function(xs, ys, n) { .Call(`_BiDAG_collectC`, xs, ys, n) } collectCcatwt <- function(xs, ys, ws, n, m) { .Call(`_BiDAG_collectCcatwt`, xs, ys, ws, n, m) } collectCcat <- function(xs, ys, n, m) { .Call(`_BiDAG_collectCcat`, xs, ys, n, m) } takefirst <- function(xs, pos) { .Call(`_BiDAG_takefirst`, xs, pos) } takelast <- function(xs, pos, n) { .Call(`_BiDAG_takelast`, xs, pos, n) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/RcppExports.R
weightedbinCItest <- function(x,y,S,suffStat) { ## p-value: CIbincoretest(x, y, S, suffStat) } weightedcatCItest <- function(x,y,S,suffStat) { ## p-value: CIcatcoretest(x, y, S, suffStat) } #user defined CI test usrCItest <- function(x,y,S,suffStat) { ## p-value: #insert test here (I have inserted categorical data CI test) CIcatcoretest(x, y, S, suffStat) } #user defined core CI test CIusrcoretest<-function(j,k,parentnodes,suffStat) { #insert core function here (I have inserted categorical data CI test) lp <- length(parentnodes) # number of parents Cj <- suffStat$Cvec[j] # number of levels of j Ck <- suffStat$Cvec[k] # number of levels of k switch(as.character(lp), "0"={# no parents Cp <- 1 # effectively 1 parent level summys <- rep(0, nrow(suffStat$data)) }, "1"={# one parent Cp <- suffStat$Cvec[parentnodes] # number of parent levels summys <- suffStat$data[,parentnodes] }, { # more parents Cp <- prod(suffStat$Cvec[parentnodes]) # use mixed radix mapping to unique parent states summys<-colSums(cumprod(c(1,suffStat$Cvec[parentnodes[-lp]]))*t(suffStat$data[,parentnodes])) }) datasummy <- colSums(c(1,Cj)*t(suffStat$data[,c(j,k)])) # tabulate the observed counts if(!is.null(suffStat$weightvector)){ Ns <- collectCcatwt(summys, datasummy, suffStat$weightvector, Cp, Cj*Ck) } else{ Ns <- collectCcat(summys, datasummy, Cp, Cj*Ck) } ### We could run over the data again for the marginals # margj <- collectCcat(summys, suffStat$data[,j], Cp, Cj) # margk <- collectCcat(summys, suffStat$data[,k], Cp, Ck) ### instead we combine the sums already margj <- Ns[,1:Cj] + Ns[,Cj+1:Cj] if(Ck>2){ for(ii in 3:Ck-1){ margj <- margj + Ns[,ii*Cj+1:Cj] } } margk <- Ns[,Cj*(1:Ck-1)+1] + Ns[,Cj*(1:Ck-1)+2] if(Cj>2){ for(ii in 3:Cj-1){ margk <- margk + Ns[,Cj*(1:Ck-1)+ii+1] } } if(lp==0){# then we have vectors! Es <- c(margj*margk[1],margj*margk[2]) if(Ck>2){ for(ii in 3:Ck){ Es <- c(Es,margj*margk[ii]) } } } else { Es <- cbind(margj*margk[,1],margj*margk[,2]) if(Ck>2){ for(ii in 3:Ck){ Es <- cbind(Es,margj*margk[,ii]) } } } Es <- Es/rowSums(Ns) # normalise G2 <- 2*Ns*log(Ns/Es) G2[is.nan(G2)] <- 0 Gsquared <- sum(G2) df <- Cp*(Cj-1)*(Ck-1) pvally<-pchisq(Gsquared, df, lower.tail = FALSE) return(pvally) } # this version uses the C code and runs through the data 4 times CIbincoretest<-function(j,k,parentnodes,suffStat){ lp<-length(parentnodes) # number of parents noparams<-2^lp # number of binary states of the parents switch(as.character(lp), "0"={# no parents # note the weighting is already in the first term P11<-sum(suffStat$d1[,j]*suffStat$data[,k],na.rm=TRUE) P10<-sum(suffStat$d1[,j]*(1-suffStat$data[,k]),na.rm=TRUE) P01<-sum(suffStat$d0[,j]*suffStat$data[,k],na.rm=TRUE) P00<-sum(suffStat$d0[,j]*(1-suffStat$data[,k]),na.rm=TRUE) PT<-P11+P10+P01+P00 N1<-P11+P10 N0<-P01+P00 NT<-N0+N1 M1<-P11+P01 M0<-P10+P00 MT<-M0+M1 # calculate the statistic in the tedious way if(P11>0){ part1<-P11*(log(P11)+log(PT)-log(N1)-log(M1)) } else{ part1<-0 } if(P10>0){ part2<-P10*(log(P10)+log(PT)-log(N1)-log(M0)) } else{ part2<-0 } if(P01>0){ part3<-P01*(log(P01)+log(PT)-log(N0)-log(M1)) } else{ part3<-0 } if(P00>0){ part4<-P00*(log(P00)+log(PT)-log(N0)-log(M0)) } else{ part4<-0 } Gsquared<-2*(part1+part2+part3+part4) }, "1"={ # 1 parent Gsquared<-0 summys<-suffStat$data[,parentnodes] for(i in 1:noparams-1){ totest<-which(summys==i) # note the weighting is already in the first term P11<-sum(suffStat$d1[totest,j]*suffStat$data[totest,k],na.rm=TRUE) P10<-sum(suffStat$d1[totest,j]*(1-suffStat$data[totest,k]),na.rm=TRUE) P01<-sum(suffStat$d0[totest,j]*suffStat$data[totest,k],na.rm=TRUE) P00<-sum(suffStat$d0[totest,j]*(1-suffStat$data[totest,k]),na.rm=TRUE) PT<-P11+P10+P01+P00 N1<-P11+P10 N0<-P01+P00 NT<-N0+N1 M1<-P11+P01 M0<-P10+P00 MT<-M0+M1 # calculate the statistic in the tedious way if(P11>0){ part1<-P11*(log(P11)+log(PT)-log(N1)-log(M1)) } else{ part1<-0 } if(P10>0){ part2<-P10*(log(P10)+log(PT)-log(N1)-log(M0)) } else{ part2<-0 } if(P01>0){ part3<-P01*(log(P01)+log(PT)-log(N0)-log(M1)) } else{ part3<-0 } if(P00>0){ part4<-P00*(log(P00)+log(PT)-log(N0)-log(M0)) } else{ part4<-0 } Gsquared<-Gsquared+2*(part1+part2+part3+part4) } }, { # more parents summys<-colSums(2^(c(0:(lp-1)))*t(suffStat$data[,parentnodes])) P11vec<-suffStat$d1[,j]*suffStat$data[,k] # these include the weighting in the first term P01vec<-suffStat$d0[,j]*suffStat$data[,k] tokeep<-which(!is.na(summys+P11vec)) # remove NAs either in the parents or the children if(length(tokeep)<length(summys)){ P11s<-collectC(summys[tokeep],P11vec[tokeep],noparams) P10s<-collectC(summys[tokeep],suffStat$d1[tokeep,j],noparams)-P11s P01s<-collectC(summys[tokeep],P01vec[tokeep],noparams) P00s<-collectC(summys[tokeep],suffStat$d0[tokeep,j],noparams)-P01s } else { P11s<-collectC(summys,P11vec,noparams) P10s<-collectC(summys,suffStat$d1[,j],noparams)-P11s P01s<-collectC(summys,P01vec,noparams) P00s<-collectC(summys,suffStat$d0[,j],noparams)-P01s } PTs<-P11s+P10s+P01s+P00s # collect all marginal counts N1s<-P11s+P10s N0s<-P01s+P00s NTs<-N0s+N1s M1s<-P11s+P01s M0s<-P10s+P00s MTs<-M0s+M1s # calculate the statistic in the tedious way part1s<-P11s*(log(P11s)+log(PTs)-log(N1s)-log(M1s)) part1s[which(P11s==0)]<-0 # put in limit by hand part1<-sum(part1s) part2s<-P10s*(log(P10s)+log(PTs)-log(N1s)-log(M0s)) part2s[which(P10s==0)]<-0 part2<-sum(part2s) part3s<-P01s*(log(P01s)+log(PTs)-log(N0s)-log(M1s)) part3s[which(P01s==0)]<-0 part3<-sum(part3s) part4s<-P00s*(log(P00s)+log(PTs)-log(N0s)-log(M0s)) part4s[which(P00s==0)]<-0 part4<-sum(part4s) Gsquared<-2*(part1+part2+part3+part4) }) pvally<-pchisq(Gsquared, noparams, lower.tail = FALSE) return(pvally) } # this version uses the C code and runs through the data once CIcatcoretest<-function(j,k,parentnodes,suffStat){ lp <- length(parentnodes) # number of parents Cj <- suffStat$Cvec[j] # number of levels of j Ck <- suffStat$Cvec[k] # number of levels of k switch(as.character(lp), "0"={# no parents Cp <- 1 # effectively 1 parent level summys <- rep(0, nrow(suffStat$data)) }, "1"={# one parent Cp <- suffStat$Cvec[parentnodes] # number of parent levels summys <- suffStat$data[,parentnodes] }, { # more parents Cp <- prod(suffStat$Cvec[parentnodes]) # use mixed radix mapping to unique parent states summys<-colSums(cumprod(c(1,suffStat$Cvec[parentnodes[-lp]]))*t(suffStat$data[,parentnodes])) }) datasummy <- colSums(c(1,Cj)*t(suffStat$data[,c(j,k)])) # tabulate the observed counts if(!is.null(suffStat$weightvector)){ Ns <- collectCcatwt(summys, datasummy, suffStat$weightvector, Cp, Cj*Ck) } else{ Ns <- collectCcat(summys, datasummy, Cp, Cj*Ck) } ### We could run over the data again for the marginals # margj <- collectCcat(summys, suffStat$data[,j], Cp, Cj) # margk <- collectCcat(summys, suffStat$data[,k], Cp, Ck) ### instead we combine the sums already margj <- Ns[,1:Cj] + Ns[,Cj+1:Cj] if(Ck>2){ for(ii in 3:Ck-1){ margj <- margj + Ns[,ii*Cj+1:Cj] } } margk <- Ns[,Cj*(1:Ck-1)+1] + Ns[,Cj*(1:Ck-1)+2] if(Cj>2){ for(ii in 3:Cj-1){ margk <- margk + Ns[,Cj*(1:Ck-1)+ii+1] } } if(lp==0){# then we have vectors! Es <- c(margj*margk[1],margj*margk[2]) if(Ck>2){ for(ii in 3:Ck){ Es <- c(Es,margj*margk[ii]) } } } else { Es <- cbind(margj*margk[,1],margj*margk[,2]) if(Ck>2){ for(ii in 3:Ck){ Es <- cbind(Es,margj*margk[,ii]) } } } Es <- Es/rowSums(Ns) # normalise G2 <- 2*Ns*log(Ns/Es) G2[is.nan(G2)] <- 0 Gsquared <- sum(G2) df <- Cp*(Cj-1)*(Ck-1) pvally<-pchisq(Gsquared, df, lower.tail = FALSE) return(pvally) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/WeightedCI.R
#' Converting a single BiDAG chain to mcmc object #' #' This function converts a single object of one of the BiDAG classes, #' namely 'orderMCMC' or 'partitionMCMC' to an object of class 'mcmc'. This object can #' be further used for convergence and mixing diagnostics implemented in the package coda #' #' @param MCMCtrace object of class \code{orderMCMC} or \code{partitionMCMC} #' @param edges logical, when FALSE (default), then only DAG score trace is extracted; when TRUE, a trace of posterior probabilities is extracted for every edge (based on the sampled DAGs defined by parameters 'window' and 'cumulative') resulting in up to n^2 trace vectors, where n is the number of nodes in the network #' @param pdag logical, when edges=TRUE, defines if the DAGs are converted to CPDAGs prior to computing posterior probabilities; ignored otherwise #' @param p numeric, between 0 and 1; defines the minimum probability for including posterior traces in the returned objects (for probabilities close to 0 PRSF diagnostics maybe too conservative) #' @param burnin numeric between \code{0} and \code{1}, indicates the percentage of the samples which will be discarded as 'burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default #' @param window integer, defines a number of DAG samples for averaging and computing edges' posterior probabilities; ignored when edges=FALSE #' @param cumulative logical, indicates if posterior probabilities should be calculated based on a cumulative sample of DAGs, where 25\% of the first samples are discarded #' @return Object of class \code{mcmc} from the package \pkg{coda} #'@examples #'\dontrun{ #'library(coda) #'myscore<-scoreparameters("bde",Asia) #'ordersample<-sampleBN(myscore,"order") #'order_mcmc<-bidag2coda(ordersample) #'par(mfrow=c(1,2)) #'densplot(order_mcmc) #'traceplot(order_mcmc) #'} #'@author Polina Suter #'@export bidag2coda<-function(MCMCtrace, edges=FALSE, pdag=TRUE, p=0.1,burnin=0.2,window=100,cumulative=FALSE) { thin<-MCMCtrace$info$iterations/MCMCtrace$info$samplesteps-1 lmcmc<-length(MCMCtrace$trace) firstsample<-ceiling(lmcmc*burnin) if(!edges) { mcmcobj<-mcmc(data=as.matrix(MCMCtrace$trace[firstsample:lmcmc],nrow=1), start = firstsample, thin = thin) } else { MCMCtrace<-MCMCtrace$traceadd$incidence if(is.null(MCMCtrace)) { stop("no saved MCMC steps found! try chainout=TRUE when sampling") } lchain<-length(MCMCtrace) if(pdag==TRUE) { MCMCtrace<-lapply(MCMCtrace,dagadj2cpadj) } countmatrix<-MCMCtrace[[1]] posteriors<-as.vector(MCMCtrace[[1]]) curstartstep<-1 for (i in 2:lchain) { if(cumulative) { newstartstep<-ceiling(0.25*i) countmatrix<-countmatrix+MCMCtrace[[i]] if(newstartstep>curstartstep) { countmatrix<-countmatrix-MCMCtrace[[curstartstep]] curstartstep<-newstartstep } posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/(i-curstartstep))) } else { if(i<(window+1)) { countmatrix<-countmatrix+MCMCtrace[[i]] posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/i)) } else { countmatrix<-countmatrix+MCMCtrace[[i]]-MCMCtrace[[i-window]] posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/window)) } } } if(p>0) { edg<-which(posteriors[,ncol(posteriors)>p]) posteriors<-posteriors[edg,] } mcmcobj<-mcmc(data=t(posteriors[,firstsample:lmcmc]), start = firstsample, thin = thin) } return(mcmcobj) } #' Converting multiple BiDAG chains to mcmc.list #' #' This function converts a list of objects of classes #' 'orderMCMC' or 'partitionMCMC' to an object of class 'mcmc.list'. This object can #' be further used for convergence and mixing diagnostics implemented in the R-package coda. #' #' @param MCMClist a list of objects of classes \code{orderMCMC} or \code{partitionMCMC} #' @param edges logical, when FALSE (default), then only DAG score trace is extracted; when TRUE, a trace of posterior probabilities is extracted for every edge (based on the sampled DAGs defined by parameters 'window' and 'cumulative') resulting in up to n^2 trace vectors, where n is the number of nodes in the network #' @param pdag logical, when edges=TRUE, defines if the DAGs are converted to CPDAGs prior to computing posterior probabilities; ignored otherwise #' @param p numeric, between 0 and 1; defines the minimum probability for including posterior traces in the returned objects (for probabilities close to 0, PRSF diagnostics maybe too conservative; the threshold above 0 is recommended) #' @param burnin numeric between \code{0} and \code{1}, indicates the percentage of the samples which will be discarded as 'burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default #' @param window integer, defines a number of DAG samples for averaging and computing edges' posterior probabilities; ignored when edges=FALSE #' @param cumulative logical, indicates if posterior probabilities should be calculated based on a cumulative sample of DAGs, where 25\% of the first samples are discarded #' @return Object of class \code{mcmc.list} from the package \pkg{coda} #'@examples #'\dontrun{ #'library(coda) #'scoreBoston<-scoreparameters("bge",Boston) #'ordershort<-list() #'#run very short chains -> convergence issues #'ordershort[[1]] <- sampleBN(scoreBoston, algorithm = "order", iterations=2000) #'ordershort[[2]] <- sampleBN(scoreBoston, algorithm = "order", iterations=2000) #'codashort_edges<-bidag2codalist(ordershort,edges=TRUE,pdag=TRUE,p=0.05,burnin=0.2,window=10) #'gd_short<-gelman.diag(codashort_edges, transform=FALSE, autoburnin=FALSE, multivariate=FALSE) #'length(which(gd_short$psrf[,1]>1.1))/(length(gd_short$psrf[,1])) #'#=>more MCMC iterations are needed, try 100000 #'} #'@author Polina Suter #'@references Robert J. B. Goudie and Sach Mukherjee (2016). A Gibbs Sampler for Learning DAGs. J Mach Learn Res. 2016 Apr; 17(30): 1–39. #'@export bidag2codalist<-function(MCMClist, edges=FALSE, pdag=TRUE,p=0.1,burnin=0.2,window=10,cumulative=FALSE) { thin<-vector() lmcmc<-length(MCMClist[[1]]$trace) firstsample<-ceiling(lmcmc*burnin) for(i in 1:length(MCMClist)) { thin[i]<-MCMClist[[i]]$info$iterations/MCMClist[[i]]$info$samplesteps-1 } if(edges==FALSE | p==0) { for(i in 1:length(MCMClist)) { MCMClist[[i]]<-bidag2coda(MCMClist[[i]],edges=edges,pdag=pdag,p=0) } } else { for(i in 1:length(MCMClist)) { MCMClist[[i]]<-bidag2codacore(MCMClist[[i]],edges=edges,pdag=pdag,burnin=burnin,window=window,cumulative=cumulative) } edg<-c() for(i in 1:length(MCMClist)) { edg<-union(edg,which(MCMClist[[i]][,ncol(MCMClist[[i]])]>p)) } for(i in 1:length(MCMClist)) { MCMClist[[i]]<- MCMClist[[i]][edg,] MCMClist[[i]]<-mcmc(data=t(MCMClist[[i]][,firstsample:lmcmc]), start = firstsample, thin = thin[i]) } } return(mcmc.list(MCMClist)) } bidag2codacore<-function(MCMCtrace, edges=FALSE, pdag=TRUE,burnin=0.2,window=100,cumulative=FALSE) { MCMCtrace<-MCMCtrace$traceadd$incidence if(is.null(MCMCtrace)) { stop("no saved MCMC steps found! try chainout=TRUE when sampling") } lchain<-length(MCMCtrace) if(pdag==TRUE) { MCMCtrace<-lapply(MCMCtrace,dagadj2cpadj) } countmatrix<-MCMCtrace[[1]] posteriors<-as.vector(MCMCtrace[[1]]) curstartstep<-1 for (i in 2:lchain) { if(cumulative) { newstartstep<-ceiling(0.25*i) countmatrix<-countmatrix+MCMCtrace[[i]] if(newstartstep>curstartstep) { countmatrix<-countmatrix-MCMCtrace[[curstartstep]] curstartstep<-newstartstep } posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/(i-curstartstep))) } else { if(i<(window+1)) { countmatrix<-countmatrix+MCMCtrace[[i]] posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/i)) } else { countmatrix<-countmatrix+MCMCtrace[[i]]-MCMCtrace[[i-window]] posteriors<-cbind(posteriors,as.vector(as.matrix(countmatrix)/window)) } } } return(posteriors) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/bidag2coda.R
# The log of the BGe/BDe score, but simplified as much as possible # see arXiv:1402.6863 DAGcorescore<-function(j,parentnodes,n,param) { if(param$DBN){ if(param$stationary) { internalparents <- parentnodes[which(parentnodes<=param$nsmall)] corescore <- DAGcorescore(j,parentnodes,param$n+param$nsmall,param$otherslices)+ DAGcorescore(j,internalparents,param$n,param$firstslice) } else { corescore<-0 for(i in 1:(length(param$paramsets)-1)) { corescore<-corescore+DAGcorescore(j,parentnodes,param$n+param$nsmall, param$paramsets[[i]]) } internalparents <- parentnodes[which(parentnodes<=param$nsmall)] corescore<-corescore+DAGcorescore(j,internalparents,param$n, param$paramsets[[length(param$paramsets)]]) } } else if(param$MDAG) { corescore<-0 for(i in 1:length(param$paramsets)) { corescore<-corescore+DAGcorescore(j,parentnodes,param$n, param$paramsets[[i]]) } }else if(param$type=="bge"){ TN<-param$TN awpN<-param$awpN scoreconstvec<-param$scoreconstvec lp<-length(parentnodes) #number of parents awpNd2<-(awpN-n+lp+1)/2 A<-TN[j,j] switch(as.character(lp), "0"={# just a single term if no parents corescore <- scoreconstvec[lp+1] -awpNd2*log(A) }, "1"={# no need for matrices D<-TN[parentnodes,parentnodes] logdetD<-log(D) B<-TN[j,parentnodes] logdetpart2<-log(A-B^2/D) corescore <- scoreconstvec[lp+1]-awpNd2*logdetpart2 - logdetD/2 if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - param$logedgepmat[parentnodes, j] } }, "2"={# can do matrix determinant and inverse explicitly # but this is numerically unstable for large matrices! # so we use the same approach as for 3 parents D<-TN[parentnodes,parentnodes] detD<-dettwobytwo(D) logdetD<-log(detD) B<-TN[j,parentnodes] #logdetpart2<-log(A-(D[2,2]*B[1]^2+D[1,1]*B[2]^2-2*D[1,2]*B[1]*B[2])/detD) #also using symmetry of D logdetpart2<-log(dettwobytwo(D-(B)%*%t(B)/A))+log(A)-logdetD corescore <- scoreconstvec[lp+1]-awpNd2*logdetpart2 - logdetD/2 if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - sum(param$logedgepmat[parentnodes, j]) } }, {# otherwise we use cholesky decomposition to perform both D<-as.matrix(TN[parentnodes,parentnodes]) choltemp<-chol(D) logdetD<-2*log(prod(choltemp[(lp+1)*c(0:(lp-1))+1])) B<-TN[j,parentnodes] logdetpart2<-log(A-sum(backsolve(choltemp,B,transpose=TRUE)^2)) corescore <- scoreconstvec[lp+1]-awpNd2*logdetpart2 - logdetD/2 if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - sum(param$logedgepmat[parentnodes, j]) } }) } else if (param$type=="bde"){ lp<-length(parentnodes) # number of parents noparams<-2^lp # number of binary states of the parents chi<-param$chi scoreconstvec<-param$scoreconstvec switch(as.character(lp), "0"={# no parents N1<-sum(param$d1[,j]) N0<-sum(param$d0[,j]) NT<-N0+N1 corescore <- scoreconstvec[lp+1] + lgamma(N0+chi/(2*noparams)) + lgamma(N1+chi/(2*noparams)) - lgamma(NT+chi/noparams) }, "1"={# one parent corescore<-scoreconstvec[lp+1] summys<-param$data[,parentnodes] for(i in 1:noparams-1){ totest<-which(summys==i) N1<-sum(param$d1[totest,j]) N0<-sum(param$d0[totest,j]) NT<-N0+N1 corescore <- corescore + lgamma(N0+chi/(2*noparams)) + lgamma(N1+chi/(2*noparams)) - lgamma(NT+chi/noparams) } if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - param$logedgepmat[parentnodes, j] } }, { # more parents summys<-colSums(2^(c(0:(lp-1)))*t(param$data[,parentnodes])) N1s<-collectC(summys,param$d1[,j],noparams) N0s<-collectC(summys,param$d0[,j],noparams) #N1s<-1 #!changeback!! #N0s<-1 #!changeback!! NTs<-N1s+N0s corescore <- scoreconstvec[lp+1] + sum(lgamma(N0s+chi/(2*noparams))) + sum(lgamma(N1s+chi/(2*noparams))) - sum(lgamma(NTs+chi/noparams)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - sum(param$logedgepmat[parentnodes, j]) } }) } else if (param$type=="bdecat"){ lp<-length(parentnodes) # number of parents chi<-param$chi corescore <- param$scoreconstvec[lp+1] # starting value Cj <- param$Cvec[j] # number of levels of j switch(as.character(lp), "0"={# no parents Cp <- 1 # effectively 1 parent level summys <- rep(0, nrow(param$data)) }, "1"={# one parent Cp <- param$Cvec[parentnodes] # number of parent levels summys <- param$data[,parentnodes] if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - param$logedgepmat[parentnodes, j] } }, { # more parents Cp <- prod(param$Cvec[parentnodes]) # use mixed radix mapping to unique parent states summys<-colSums(cumprod(c(1,param$Cvec[parentnodes[-lp]]))*t(param$data[,parentnodes])) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescore <- corescore - sum(param$logedgepmat[parentnodes, j]) } }) if(!is.null(param$weightvector)){ Ns <- collectCcatwt(summys, param$data[,j], param$weightvector, Cp, Cj) } else{ Ns <- collectCcat(summys, param$data[,j], Cp, Cj) } NTs <- rowSums(Ns) corescore <- corescore + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) } else if(param$type=="usr"){ corescore <- usrDAGcorescore(j,parentnodes,n,param) } return(corescore) } dettwobytwo <- function(D) { D[1,1]*D[2,2]-D[1,2]*D[2,1] } # The determinant of a 3 by 3 matrix detthreebythree <- function(D){ D[1,1]*(D[2,2]*D[3,3]-D[2,3]*D[3,2])-D[1,2]*(D[2,1]*D[3,3]-D[2,3]*D[3,1])+D[1,3]*(D[2,1]*D[2,3]-D[2,2]*D[3,1]) } # The log of the BGe/BDe score, but simplified as much as possible # see arXiv:1402.6863 dettwobytwo <- function(D) { D[1,1]*D[2,2]-D[1,2]*D[2,1] } # The determinant of a 3 by 3 matrix detthreebythree <- function(D){ D[1,1]*(D[2,2]*D[3,3]-D[2,3]*D[3,2])-D[1,2]*(D[2,1]*D[3,3]-D[2,3]*D[3,1])+D[1,3]*(D[2,1]*D[2,3]-D[2,2]*D[3,1]) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/corescore.R
#' interactions dataset #' #' A data frame containing possible interactions between genes from \code{kirp} and \code{kirc} #' data sets #' #' @source \url{https://string-db.org/} #' @format A data frame with 179 rows and 3 columns; #' \itemize{ #' \item node1 character, name of a gene #' \item node2 character, name of a gene #' \item combined_score interaction score, reflecting confidence in the fact that interaction between gene1 and gene2 is possible #'} #' each row represents a possible interaction between two genes #' "interactions" #' mapping dataset #' #' A data frame containing mapping between names of genes used in \code{kirp}/\code{kirc} #' data sets and names used in STRING interactions list (see \code{\link{interactions}}). #' #' @source \url{https://string-db.org/} #' @format A data frame with 46 rows and two columns: #' \itemize{ #' \item queryItem character, name used for structure learning #' \item preferredName character, name used in STRING interactions data set #'} #' "mapping"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/dataSTRING.R
#' Extracting adjacency matrix (DAG) from MCMC object #' #' This function extracts an adjacency matrix of #' a maximum scoring DAG from the result of the MCMC run. #' #' @param x object of class 'orderMCMC','partitionMCMC' or 'iterativeMCMC' #' @param amat logical, when TRUE adjacency matrix is returned and object of class 'graphNEL' otherwise #' @param cp logical, when TRUE the CPDAG (equivalence class) is returned and DAG otherwise; FALSE by default #' @return adjacency matrix of a maximum scoring DAG (or CPDAG) discovered/sampled in one MCMC run #' @examples #'myscore<-scoreparameters("bge", Boston) #'\dontrun{ #'itfit<-learnBN(myscore,algorithm="orderIter") #'maxEC<-getDAG(itfit,cp=TRUE) #'} #' @export getDAG<-function(x,amat=TRUE,cp=FALSE) { if (cp) graphy<-x$CPDAG else graphy<-x$DAG if(amat) return(graphy) else return(m2graph(graphy)) } #' Extracting scorespace from MCMC object #' #' This function extracts an object of class 'scorespace' #' from the result of the MCMC run when the parameter 'scoreout' was set to TRUE; otherwise extracts #' only adjacency matrix of the final search space without the score tables. #' #' @param x object of class 'orderMCMC','partitionMCMC' or 'iterativeMCMC' #' @return an object of class 'scorespace' or an adjacency binary matrix corresponding to a search space last used in MCMC #' @examples #'myscore<-scoreparameters("bge", Boston) #'\dontrun{ #'itfit<-learnBN(myscore,algorithm="orderIter",scoreout=TRUE) #'itspace<-getSpace(itfit) #'} #' @export getSpace<-function(x) { if(is.null(x$scoretable)) { warning("object x does not contain score tables! set the parameter 'scoreout' to TRUE when running MCMC only adjacency matrix is returned") return(x$endspace) } else { return(x$scoretable) } } #' Extracting score from MCMC object #' #' This function extracts the score of a maximum DAG sampled in the MCMC run. #' #' @param x object of class 'orderMCMC','partitionMCMC' or 'iterativeMCMC' #' @return a score of a maximum-scoring DAG found/sampled in one MCMC run #' @examples #'myscore<-scoreparameters("bge", Boston) #'\dontrun{ #'itfit<-learnBN(myscore,algorithm="orderIter") #'getMCMCscore(itfit) #'} #' @export getMCMCscore<-function(x) { return(x$score) } #' Extracting trace from MCMC object #' #' This function extracts a trace of #' \itemize{ #' \item DAG scores #' \item DAG adjacency matrices #' \item orders #' \item order scores #' } #' from the result of the MCMC run. Note that the last three options #' work only when the parameter 'scoreout' was set to TRUE. #' #' @param x object of class 'orderMCMC','partitionMCMC' or 'iterativeMCMC' #' @param which integer, indication which trace is returned: DAG scores (which = 0), DAGs (which = 1), #' orders (which = 2), order scores (which = 3) #' @return a list or a vector of objects representing MCMC trace, depends on parameter 'which'; by default, the trace of DAG scores is returned #' @examples #'myscore<-scoreparameters("bge",Boston) #'\dontrun{ #'orderfit<-sampleBN(myscore,algorithm="order") #'DAGscores<-getTrace(orderfit,which=0) #'DAGtrace<-getTrace(orderfit,which=1) #'orderscores<-getTrace(orderfit,which=3) #'} #' @export getTrace<-function(x,which=0) { if(is.null(x$traceadd) & which>0) { warning("the result does not contain all sampled DAGs (only maximum)! set the parameter 'chainout' to TRUE when running MCMC returning DAG scores") return(x$trace) } else { if(which==0) return(x$trace) else return(x$traceadd[[which]]) } } #' Extracting runtime #' #' This function extracts runtime of a particular step of order and partition MCMC. #' #' @param x object of class 'orderMCMC'or 'partitionMCMC' #' @param which integer, defines if the runtime is extracted for: computing score tables (which = 1), running MCMC chain (which = 2) #' @return runtime of a particular step of MCMC scheme or total runtime #' @examples #'myscore<-scoreparameters("bge",Boston) #'\dontrun{ #'orderfit<-sampleBN(myscore,algorithm="order") #'(getRuntime(orderfit,1)) #'(getRuntime(orderfit,2)) #'} #' @export getRuntime<-function(x,which=0) { if(which==1) return(x$info$runtimes["scoretables"]) else return(x$info$runtimes["MCMCchain"]) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/extractors.R
#'Deriving an adjacency matrix of a graph #' #'This function derives the adjacency matrix corresponding to a graph object #' #'@param g graph, object of class \code{\link[graph]{graphNEL}} (package `graph') #'@return a square matrix whose dimensions are the number of nodes in the graph g, where element #' \code{[i,j]} equals \code{1} if there is a directed edge from node \code{i} to node \code{j} in the graph \code{g}, #' and \code{0} otherwise #' @examples #' Asiagraph<-m2graph(Asiamat) #' Asia.adj<-graph2m(Asiagraph) #'@export graph2m<-function(g) { l<-length(g@edgeL) adj<-matrix(rep(0,l*l),nrow=l,ncol=l) for (i in 1:l) { adj[g@edgeL[[i]]$edges,i]<-1} rownames(adj)<-g@nodes colnames(adj)<-g@nodes return(t(adj)) } #'Deriving connected subgraph #' #'This function derives an adjacency matrix of a subgraph whose nodes are connected to at least one other node in a graph #' #'@param adj square adjacency matrix with elements in \code{\{0,1\}}, representing a graph #'@return adjacency matrix of a subgraph of graph represented by 'adj' whose nodes have at least one connection #'@examples #'dim(gsimmat) #full graph contains 100 nodes #'gconn<-connectedSubGraph(gsimmat) #removing disconnected nodes #'dim(gconn) #connected subgraph contains 93 nodes #'@export connectedSubGraph<-function(adj) { n<-ncol(adj) col2del<-vector() for(i in 1:n) { if(sum(adj[i,],adj[,i])==0) { col2del<-c(col2del,i) } } if(length(col2del)>0) { adj<-adj[,-col2del] adj<-adj[-col2del,] } return(adj) } #'Deriving subgraph #' #'This function derives an adjacency matrix of a subgraph based on the adjacency matrix of a full graph and a list of nodes #' #'@param adj square adjacency matrix with elements in \code{\{0,1\}}, representing a graph #'@param nodes vector of node names of the subgraph; should be a subset of column names of 'adj' #'@return adjacency matrix of a subgraph which includes all 'nodes' #'@examples #'getSubGraph(Asiamat,c("E","B","D","X")) #'@export getSubGraph<-function(adj,nodes) { if(!all(nodes%in%colnames(adj))) { stop("some 'nodes' can't be found in column names of adj!") } adj<-adj[nodes,nodes] return(adj) } #'Deriving a graph from an adjacancy matrix #' #'This function derives a graph object corresponding to an adjacency matrix #' #'@param adj square adjacency matrix with elements in \code{\{0,1\}}, representing a graph #'@param nodes (optional) labels of the nodes, \code{c(1:n)} are used by default #'@return object of class \code{\link[graph]{graphNEL}} (package `graph'); if element \code{adj[i,j]} equals \code{1}, then there is a directed edge from node \code{i} to node \code{j} in the graph, and no edge otherwise #'@examples #'m2graph(Asiamat) #'@export m2graph<-function(adj,nodes=NULL) { l<-ncol(adj) if (is.null(nodes)) { if (!all(is.character(colnames(adj)))) { V <- c(1:l) edL <- vector("list", length=l) names(edL) <- sapply(V,toString) } else { edL <- vector("list", length=l) names(edL) <- colnames(adj) V<-colnames(adj) } for(i in 1:l) edL[[i]] <- list(edges=which(adj[i,]==1))} else { V <- nodes edL <- vector("list", length=l) names(edL) <- V for(i in 1:l) edL[[i]] <- list(edges=which(adj[i,]==1)) } gR <- new("graphNEL", nodes=sapply(V,toString), edgeL=edL,edgemode="directed") return(gR) } #'Comparing two graphs #' #'This function compares one (estimated) graph to another graph (true graph), returning a vector of 8 values: #'\itemize{ #' \item the number of true positive edges ('TP') is the number of edges in the skeleton of 'egraph' which are also present in the skeleton of 'truegraph' #' \item the number of false positive edges ('FP') is the number of edges in the skeleton of 'egraph' which are absent in the skeleton of 'truegraph' #' \item the number of fralse negative edges ('FN') is the number of edges in the skeleton of 'truegraph' which are absent in the skeleton of 'egraph' #' \item structural Hamming distance ('SHD') between 2 graphs is computed as TP+FP+the number of edges with an error in direction #' \item TPR equals TP/(TP+FN) #' \item FPR equals FP/(TN+FP) (TN stands for true negative edges) #' \item FPRn equals FP/(TP+FN) #' \item FDR equals FP/(TP+FP) #' } #' #'@param egraph an object of class \code{\link[graph]{graphNEL}} (package `graph'), representing the graph which should be compared to a ground truth graph or an ajecency matrix corresponding to the graph #'@param truegraph an object of class \code{\link[graph]{graphNEL}} (package `graph'), representing the ground truth graph or an ajecency matrix corresponding to this graph #'@param cpdag logical, if TRUE (FALSE by default) both graphs are first converted to their respective equivalence class (CPDAG); this affects SHD calculation #'@param rnd integer, rounding integer indicating the number of decimal places (round) when computing TPR, FPR, FPRn and FDR #'@return a named numeric vector 8 elements: SHD, number of true positive edges (TP), number of false positive edges (FP), number of false negative edges (FN), true positive rate (TPR), #'false positive rate (FPR), false positive rate normalized to the true number of edges (FPRn) and false discovery rate (FDR) #'@examples #'Asiascore<-scoreparameters("bde", Asia) #'\dontrun{ #'eDAG<-learnBN(Asiascore,algorithm="order") #'compareDAGs(eDAG$DAG,Asiamat) #'} #'@export compareDAGs<-function(egraph, truegraph, cpdag=FALSE, rnd=2) { if(is.matrix(egraph)) { egraph<-m2graph(egraph) } else if (is(egraph,"dgCMatrix") | is(egraph,"dtCMatrix")) { egraph<-as.matrix(egraph) egraph<-m2graph(egraph) } if(is.matrix(truegraph)) { truegraph<-m2graph(truegraph) } else if (is(truegraph,"dgCMatrix") | is(truegraph,"dtCMatrix")) { truegraph<-as.matrix(truegraph) truegraph<-m2graph(truegraph) } skeleton1<-graph2skeleton(egraph) n<-ncol(skeleton1) skeleton2<-graph2skeleton(truegraph) numedges1<-sum(skeleton1) numedges2<-sum(skeleton2) TN<-(n*n-n)/2-numedges2 diff2<-skeleton2-skeleton1 res<-vector() res[1]<-numedges1-sum(diff2<0) #TP res[2]<-sum(diff2<0) #FP res[3]<-sum(diff2>0)#FN res[4]<-round(res[1]/numedges2, rnd) #TPR res[5]<-round(res[2]/TN, rnd) #FPR res[6]<-round(res[2]/numedges2, rnd) #FPRn res[7]<-round(res[2]/(res[1]+res[2]),rnd) #FDR if(cpdag) { res[8]<-pcalg::shd(dag2cpdag(egraph), dag2cpdag(truegraph)) } else res[8]<-pcalg::shd(egraph, truegraph) names(res)<-c("TP", "FP", "FN", "TPR", "FPR", "FPRn", "FDR", "SHD") return(res) } #'Comparing two DBNs #' #'This function compares one (estimated) DBN structure to another DBN (true DBN). Comparisons for initial and transitional structures are returned separately if \code{equalstruct} equals \code{TRUE}. #' #'@param eDBN an object of class \code{\link[graph]{graphNEL}} (or an ajacency matrix corresponding to this DBN), representing the DBN which should be compared to a ground truth DBN #'@param trueDBN an object of class \code{\link[graph]{graphNEL}} (or an ajacency matrix corresponding to this DBN), representing the ground truth DBN #'@param struct option used to determine if the initial or the transitional structure should be compared; accaptable values are init or trans #'@param b number of static variables in one time slice of a DBN; note that for function to work correctly all static variables have to be in the first b columns of the matrix #'@return a vector of 5: SHD, number of true positive edges, number of false positive edges, number of false negative edges and true positive rate #'@examples #'testscore<-scoreparameters("bge", DBNdata, DBN=TRUE, #'dbnpar=list(samestruct=TRUE, slices=5, b=3)) #'\dontrun{ #'DBNfit<-learnBN(testscore, algorithm="orderIter",moveprobs=c(0.11,0.84,0.04,0.01)) #'compareDBNs(DBNfit$DAG,DBNmat, struct="trans", b=3) #'} #'@export compareDBNs<-function(eDBN, trueDBN, struct=c("init","trans"), b=0) { if(length(struct)>1) { struct<-"trans" warning("parameter struct was not defined, 'trans' used by default") } dyn<-(ncol(eDBN)-b)/2 n<-b+dyn matsize<-b+2*dyn if(is.matrix(eDBN)) { adj1<-eDBN } else if (is(eDBN,"dtCMatrix") | is(eDBN,"dgCMatrix")) { adj1<-as.matrix(eDBN) } else { adj1<-graph2m(eDBN) } if(!is.matrix(trueDBN)) { if(is(trueDBN,"graphNEL")) { adj2<-graph2m(trueDBN) } else { adj2<-as.matrix(trueDBN) }} else { adj2<-trueDBN } adj1<-adj1[1:matsize,1:matsize] adj2<-adj2[1:matsize,1:matsize] skeleton1<-graph2skeleton(eDBN) skeleton2<-graph2skeleton(trueDBN) if(struct=="trans") { skeleton1[,1:n]<-0 skeleton2[,1:n]<-0 adj1[,1:n]<-0 adj2[,1:n]<-0 } else if (struct=="init"){ adj1<-adj1[1:n,1:n] adj2<-adj2[1:n,1:n] skeleton1<-skeleton1[1:n,1:n] skeleton2<-skeleton2[1:n,1:n] } else { stop("'struc' must be either 'init' or 'trans'!" ) } eDAG<-m2graph(adj1) trueDAG<-m2graph(adj2) numedges1<-sum(skeleton1) numedges2<-sum(skeleton2) diff2<-skeleton2-skeleton1 res<-vector() res["SHD"]<-pcalg::shd(eDAG, trueDAG) res["TP"]<-numedges1-sum(diff2<0) #TP res["FP"]<-sum(diff2<0) #FP res["FN"]<-sum(diff2>0)#FN res["TPR"]<-res["TP"]/numedges2 #TPR return(res) } #'Deriving interactions matrix #' #'This transforms a list of possible interactions between proteins downloaded from STRING database #'into a matrix which can be used for blacklisting/penalization in BiDAG. #' #' @param curnames character vector with gene names which will be used in \code{BiDAG} learning function #' @param int data frame, representing a interactions between genes/proteins downloaded from STRING (\url{https://string-db.org/}); two columns are necessary 'node1' and 'node2' #' @param mapping (optional) data frame, representing a mapping between 'curnames' (gene names, usually the column names of 'data') and gene names used in interactions downloaded from STRING (\url{https://string-db.org/}); two columns are necessary 'queryItem' and 'preferredName' #' @param type character, defines how interactions will be reflected in the output matrix; \code{int} will result in a matrix whose entries equal 1 if interaction is present in the list of interactions \code{int} and 0 otherwise; \code{blacklist} results in a matrix whose entries equal 0 when interaction is present in the list of interactions and 1 otherwise; #' \code{pf} results in a matrix results in a matrix whose entries equal 1 is interaction is present in the list of interactions \code{int} and \code{pf} otherwise$ "int" by default #' @param pf penalization factor for interactions, needed if \code{type}=pf #'@return square matrix whose entries correspond to the list of interactions and parameter \code{type} #'@examples #'curnames<-colnames(kirp) #'intmat<-string2mat(curnames, mapping, interactions, type="pf") #'@export string2mat<-function(curnames, int, mapping=NULL, type=c("int"), pf=2) { if(is.null(mapping)) { mapping<-cbind(curnames,curnames) colnames(mapping)<-c("queryItem","preferredName") mapping<-as.data.frame(mapping) } rownames(mapping)<-mapping$queryItem n<-length(curnames) aliases<-as.character(mapping[curnames,]$preferredName) nagenes<-which(is.na(aliases)) if(length(nagenes)>0) { aliases[nagenes]<-curnames[nagenes] warning(paste(curnames[nagenes], "were not found in mapping; no interactions inferred")) } space<-matrix(0,nrow=n,ncol=n) rownames(space)<-aliases colnames(space)<-aliases for(i in 1:n) { curnode<-colnames(space)[i] nodz1<-intersect(int[which(int$node1==curnode),]$node2,aliases) nodz2<-intersect(int[which(int$node2==curnode),]$node1,aliases) if(length(nodz1)>0){ space[curnode,nodz1]<-1 } if(length(nodz2)>0){ space[curnode,nodz2]<-1 } } colnames(space)<-curnames rownames(space)<-curnames if(type=="int") space else if(type=="blacklist") 1*(!space) else (pf-1)*(!space)+1 } #returns a matrix of a CPDAG corresponding to a given DAG dagadj2cpadj<-function(adj) { g<-m2graph(adj) cpg<-pcalg::dag2cpdag(g) return(graph2m(cpg)) } #returns a symmetric matrix of a skeleton corresponding to a given CPDAG #UPPER TRIANGULAR VERSION! adjacency2skeleton<-function(adj) { skel<-1*(adj|transp(adj)) skel<-ifelse(upper.tri(skel)==TRUE,skel,0) return(skel) } #returns a list of edges corresponding to an adjacency matrix adjacency2edgel<-function(adj,nodes=NULL) { l<-ncol(adj) if (is.null(nodes)) { if (!all(is.character(colnames(adj)))) { V <- c(1:l) edL <- vector("list", length=l) names(edL) <- sapply(V,toString) } else { edL <- vector("list", length=l) names(edL) <- colnames(adj) V<-colnames(adj) } for(i in 1:l) edL[[i]] <- list(edges=which(adj[i,]==1))} else { V <- nodes edL <- vector("list", length=l) names(edL) <- V for(i in 1:l) edL[[i]] <- list(edges=which(adj[i,]==1)) } return(edL) } #Deriving an adjacency matrix of the skeleton of a graph # #This function derives the skeleton matrix corresponding to a graph object # #g graph, object of class \code{\link[graph]{graphNEL}} (package `graph') #returns a symmetric square matrix whose dimensions are the number of nodes in the graph \code{g}, # where element \code{[i,j]} equals \code{1} if there is a directed edge from node \code{i} to node \code{j}, # or from node \code{j} to node \code{i}, in the graph \code{g}, and \code{0} otherwise # examples #myDAG<-pcalg::randomDAG(20, prob=0.15, lB = 0.4, uB = 2) #graph2skeleton.m(myDAG) graph2skeleton<-function(g,upper=TRUE,outmat=TRUE) { if (is.matrix(g)) { adj<-g } else if(is(g,"graphNEL")) { adj<-graph2m(g) colnames(adj)<-g@nodes rownames(adj)<-g@nodes }else { adj<-as.matrix(g) } skel<-1*(adj|transp(adj)) if(upper) { skel<-ifelse(upper.tri(skel)==TRUE,skel,0) } if(outmat) { return(skel) } else { return(m2graph(skel)) } } getRepr<-function(pdag,dag) { n<-ncol(pdag) for(i in 1:n) { for(j in 1:n) { if(pdag[i,j]==pdag[j,i] & pdag[i,j]==1) { pdag[i,j]<-dag[i,j] pdag[j,i]<-dag[j,i] } } } if(orderdag(pdag)!="error1") { return(pdag) } else { stop("not possible to resolve cycles!") } } orderdag<-function(adj) { n<-ncol(adj) allnodes<-c(1:n) curnodes<-c(1) order<-c() cntr<-1 while(length(curnodes)<n & cntr<n) { npar<-apply(adj,2,sum) curnodes<-which(npar==0) order<-c(setdiff(curnodes,order),order) adj[curnodes,]<-0 cntr<-cntr+1 } if(sum(adj)==0) return(order) else return("error1") }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/graphhelpfns.R
#' An adjacency matrix of a simulated dataset #' #' An adjacency matrix representing the ground truth DAG used to generate a synthetic dataset with observations of 100 continuous variables. #' #' @format A binary matrix with 100 rows and 100 columns representing an adjacency matrix of a DAG with 100 nodes: V1, ..., V100 #' "gsimmat"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/gsimadj.R
#'Initializing score object #' #'This function returns an object of class scoreparameters containing the data and parameters needed for calculation of the BDe/BGe score, or a user defined score. #' @param data the data matrix with n columns (the number of variables) and a number of rows equal to the number of observations #' @param scoretype the score to be used to assess the DAG structure: #' "bge" for Gaussian data, "bde" for binary data, #' "bdecat" for categorical data, #' "usr" for a user defined score; when "usr" score is chosen, one must define a function (which evaluates the log score of a node given its parents) in the following format: usrDAGcorescore(j,parentnodes,n,param), where 'j' is node to be scores, 'parentnodes' are the parents of this node, 'n' number of nodes in the netwrok and 'param' is an object of class 'scoreparameters' #' @param bgepar a list which contains parameters for BGe score: #' \itemize{ #' \item am (optional) a positive numerical value, 1 by default #' \item aw (optional) a positive numerical value should be more than \code{n+1}, \code{n+am+1} by default #' \item edgepf (optional) a positive numerical value providing the edge penalization factor to be combined with the BGe score, 1 by default (no penalization) #' } #' @param bdepar a list which contains parameters for BDe score for binary data: #' \itemize{ #' \item chi (optional) a positive number of prior pseudo counts used by the BDe score, 0.5 by default #' \item edgepf (optional) a positive numerical value providing the edge penalization factor to be combined with the BDe score, 2 by default #' } #' @param bdecatpar a list which contains parameters for BDe score for categorical data: #' \itemize{ #' \item chi (optional) a positive number of prior pseudo counts used by the BDe score, 0.5 by default #' \item edgepf (optional) a positive numerical value providing the edge penalization factor to be combined with the BDe score, 2 by default #' } #' @param dbnpar which type of score to use for the slices #' \itemize{ #' \item samestruct logical, when TRUE the structure of the first time slice is assumed to be the same as internal structure of all other time slices #' \item slices integer representing the number of time slices in a DBN #' \item b the number of static variables; all static variables have to be in the first b columns of the data; for DBNs static variables have the same meaning as bgnodes for usual Bayesian networks; for DBNs parameters parameter \code{bgnodes} is ignored #' \item rowids optional vector of time IDs; usefull for identifying data for initial time slice #' \item datalist indicates is data is passed as a list for a two step DBN; useful for unbalanced number of samples in timi slices #' } #'@param mixedpar a list which contains parameters for the BGe and BDe score for mixed data #' \itemize{ #' \item nbin a positive integer number of binary nodes in the network (the binary nodes are always assumed in first nbin columns of the data) #' } #' @param usrpar a list which contains parameters for the user defined score #' \itemize{ #' \item pctesttype (optional) conditional independence test ("bde","bge","bdecat") #' } #'@param MDAG logical, when TRUE the score is initialized for a model with multiple sets of parameters but the same structure #'@param DBN logical, when TRUE the score is initialized for a dynamic Baysian network; FALSE by default #'@param weightvector (optional) a numerical vector of positive values representing the weight of each observation; should be NULL(default) for non-weighted data #'@param bgnodes (optional) a vector that contains column indices in the data defining the nodes that are forced to be root nodes in the sampled graphs; root nodes are nodes which have no parents but can be parents of other nodes in the network; in case of DBNs bgnodes represent static variables and defined via element \code{b} of the parameters \code{dbnpar}; parameter \code{bgnodes} is ignored for DBNs #'@param edgepmat (optional) a matrix of positive numerical values providing the per edge penalization factor to be added to the score, NULL by default #'@param nodeslabels (optional) a vector of characters which denote the names of nodes in the Bayesian network; by default column names of the data will be taken #'@return an object of class \code{scoreparameters}, which includes all necessary information for calculating the BDe/BGe score #'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian acyclic graphical models. The Annals of Statistics 42, 1689-1691. #'@references Heckerman D and Geiger D (1995). Learning Bayesian networks: A unification for discrete and Gaussian domains. In Eleventh Conference on Uncertainty in Artificial Intelligence, pages 274-284. #'@references Scutari M (2016). An Empirical-Bayes Score for Discrete Bayesian Networks. Journal of Machine Learning Research 52, 438-448 #'@examples #' myDAG<-pcalg::randomDAG(20, prob=0.15, lB = 0.4, uB = 2) #' myData<-pcalg::rmvDAG(200, myDAG) #' myScore<-scoreparameters("bge", myData) #'@author Polina Suter, Jack kuipers #'@export # a constructor function for the "scoreparameters" class scoreparameters<-function(scoretype=c("bge","bde","bdecat","usr"), data, bgepar=list(am=1, aw=NULL, edgepf=1), bdepar=list(chi=0.5, edgepf=2), bdecatpar=list(chi=0.5, edgepf=2), dbnpar=list(samestruct=TRUE, slices=2, b=0, stationary=TRUE, rowids=NULL, datalist=NULL, learninit=TRUE), usrpar=list(pctesttype=c("bge","bde","bdecat")), mixedpar=list(nbin=0), MDAG=FALSE, DBN=FALSE, weightvector=NULL, bgnodes=NULL, edgepmat=NULL, nodeslabels=NULL) { initparam<-list() if(DBN) { dbnpardef<-list(samestruct=TRUE, slices=2, b=0, stationary=TRUE, rowids=NULL, datalist=NULL,learninit=TRUE) dbnpardef[names(dbnpar)]<-dbnpar[names(dbnpar)] dbnpar<-dbnpardef if(is.null(dbnpar$b)) bgnodes<-NULL else if(dbnpar$b>0) bgnodes<-c(1:dbnpar$b) else bgnodes<-NULL initparam$learninit<-dbnpar$learninit if (!is.null(dbnpar$samestruct)) { initparam$split<-!dbnpar$samestruct } else { initparam$split<-FALSE } if(dbnpar$slices>2 & !dbnpar$stationary) MDAG<-TRUE } bgn<-length(bgnodes) if(DBN) { if(is.null(dbnpar$datalist)) { n<-(ncol(data)-bgn)/dbnpar$slices+bgn } else { n<-(ncol(data[[2]])-bgn)/2+bgn }} else n<-ncol(data) nsmall<-n-bgn #number of nodes in the network excluding root nodes if (!(scoretype%in%c("bge", "bde", "bdecat","usr","mixed"))) { #add mixed later stop("Scoretype should be bge (for continuous data), bde (for binary data) bdecat (for categorical data) or usr (for user defined)") } if(!DBN) { if (anyNA(data)) { stop("Dataset contains missing data") } if (ncol(data)!=nsmall+bgn) { stop("n and the number of columns in the data do not match") } } else { if(dbnpar$stationary) { if(is.null(dbnpar$datalist)) { if (ncol(data)!=nsmall*dbnpar$slices+bgn) { stop("n, bgn and the number of columns in the data do not match") } } } } if (!is.null(weightvector)) { if (length(weightvector)!=nrow(data)) { stop("Length of the weightvector does not match the number of rows (observations) in data") } } if (scoretype=="bde") { if (!all(sapply(data,function(x)x%in%c(0,1,NA)))) { stop("Dataset contains non-binary values") } } if (scoretype=="bdecat") { # convert factors to levels indx <- sapply(data, is.factor) data[indx] <- lapply(data[indx], function(x) as.numeric(x)-1) if (!all(unlist(lapply(data, function(x) setequal(unique(x),c(0:max(x))))))) { stop("Some variable levels are not present in the data") } } if (is.null(nodeslabels)) { if(!DBN){ if(all(is.character(colnames(data)))){ nodeslabels<-colnames(data) } else { nodeslabels<-sapply(c(1:n), function(x)paste("v",x,sep="")) } } else { if(dbnpar$stationary & is.null(dbnpar$datalist)) { if(all(is.character(colnames(data)))){ nodeslabels<-colnames(data) } else { if(!is.null(bgnodes)) { staticnames<-sapply(c(1:bgn), function(x)paste("s",x,sep="")) dynamicnames<-rep(sapply(c(1:nsmall), function(x)paste("v",x,sep="")),dbnpar$slices) for(i in 2:dbnpar$slices) { dynamicnames[1:nsmall+(i-1)*nsmall]<-paste(dynamicnames[1:nsmall+(i-1)*nsmall],".",i,sep="") } nodeslabels<-c(staticnames,dynamicnames) } else { nodeslabels<-rep(sapply(c(1:n), function(x)paste("v",x,sep="")),dbnpar$slices) for(i in 2:dbnpar$slices) { nodeslabels[1:nsmall+(i-1)*nsmall]<-paste(nodeslabels[1:nsmall+(i-1)*nsmall],".",i,sep="") } } } } else { nodeslabels<-colnames(data[[2]]) } } } multwv<-NULL if (is.null(dbnpar$datalist)) colnames(data)<-nodeslabels initparam$labels<-nodeslabels initparam$type<-scoretype initparam$DBN<-DBN initparam$MDAG<-MDAG initparam$weightvector<-weightvector initparam$data<-data if(DBN) { initparam$bgnodes<-c(1:n+nsmall) if(bgn>0) { initparam$static<-c(1:bgn) } initparam$mainnodes<-c(1:nsmall+bgn) } else { initparam$bgnodes<-bgnodes initparam$static<-bgnodes if(!is.null(bgnodes)) { initparam$mainnodes<-c(1:n)[-bgnodes] } else initparam$mainnodes<-c(1:n) } initparam$bgn<-bgn initparam$n<-n initparam$nsmall<-nsmall if(DBN) { if(dbnpar$stationary) { initparam$labels.short<-initparam$labels[1:(n+nsmall)] } else { nodeslabels<-colnames(data[[1]]) initparam$labels<-nodeslabels initparam$labels.short<-colnames(data[[1]]) }} else { initparam$labels.short<-initparam$labels } if (is.null(edgepmat)) { initparam$logedgepmat <- NULL } else { if(all(edgepmat>0)) { initparam$logedgepmat <- log(edgepmat) } else stop("all entries of edgepmat matrix must be bigger than 0! 1 corresponds to no penalization") } if(DBN) { if(!dbnpar$stationary) { #initparam$split=FALSE #changed initparam$stationary<-FALSE initparam$slices<-length(data) initparam$intstr<-list() initparam$trans<-list() initparam$usrinitstr<-list() initparam$usrintstr<-list() initparam$usrtrans<-list() initparam$usrinitstr$rows<-c(1:n) initparam$usrinitstr$cols<-c(1:nsmall+bgn) if(bgn==0) initparam$usrintstr$rows<-c(1:nsmall+n) else initparam$usrintstr$rows<-c(1:bgn,1:nsmall+n) initparam$usrintstr$cols<-c(1:nsmall+n) initparam$usrtrans$rows<-c(1:nsmall+bgn) initparam$usrtrans$cols<-c(1:nsmall+n) if(bgn!=0) { initparam$intstr$rows<-c(1:bgn+nsmall,1:nsmall) } else { initparam$intstr$rows<-c(1:nsmall) } initparam$intstr$cols<-c(1:nsmall) initparam$trans$rows<-c(1:nsmall+n) initparam$trans$cols<-c(1:nsmall) initparam$paramsets<-list() #initparam$split<-FALSE if(!is.null(edgepmat)) { edgepmatfirst<-edgepmat[1:n,1:n] edgepmat <-DBNbacktransform(edgepmat,initparam,nozero=TRUE)$trans #initparam$logedgepmat <- log(edgepmat) } else { edgepmatfirst<-NULL } initparam$nsets<-length(data) datalocal<-data[[length(data)]] if(bgn>0) datalocal <- datalocal[,c(1:nsmall+bgn,1:bgn)] initparam$paramsets[[length(data)]]<-scoreparameters(scoretype=scoretype, datalocal, weightvector=NULL, bgnodes=NULL, bgepar=bgepar, bdepar=bdepar, bdecatpar=bdecatpar, dbnpar=dbnpar, edgepmat=edgepmatfirst, DBN=FALSE) for(i in 1:(length(data)-1)) { datalocal<-data[[i]] if(bgn>0) datalocal <- datalocal[,c(1:nsmall+nsmall+bgn,1:bgn,1:nsmall+bgn)] else { datalocal <- datalocal[,c(1:nsmall+nsmall,1:nsmall)] } initparam$paramsets[[i]]<-scoreparameters(scoretype=scoretype, datalocal, weightvector=NULL, bgnodes=initparam$bgnodes, bgepar=bgepar, bdepar=bdepar, bdecatpar=bdecatpar, dbnpar=dbnpar, edgepmat=edgepmat, DBN=FALSE) } } else { initparam$stationary<-TRUE initparam$slices<-dbnpar$slices # other slices we layer the data, if(!is.null(dbnpar$datalist)) { datalocal<-data[[2]] collabels<-colnames(datalocal) if(bgn>0) newbgnodes<-bgnodes+nsmall else newbgnodes<-bgnodes } else { datalocal <- data[,1:(2*nsmall+bgn)] collabels<-colnames(datalocal) if (bgn>0){ bgdata<-data[,bgnodes] if(dbnpar$slices > 2){ # layer on later time slices for(jj in 1:(dbnpar$slices-2)){ datatobind<-cbind(bgdata,data[,nsmall*jj+1:(2*nsmall)+bgn]) colnames(datatobind)<-collabels datalocal <- rbind(datalocal,datatobind) } } newbgnodes<-bgnodes+nsmall #since we change data columns bgnodes change indices } else { if(dbnpar$slices > 2){ # layer on later time slices for(jj in 1:(dbnpar$slices-2)){ datatobind<-data[,n*jj+1:(2*n)] colnames(datatobind)<-collabels datalocal <- rbind(datalocal,datatobind) } } newbgnodes<-bgnodes } } # and have earlier times on the right hand side! (bgnodes if present go between two time slices) if(bgn>0) { datalocal <- datalocal[,c(1:nsmall+nsmall+bgn,1:bgn,1:nsmall+bgn)] } else { datalocal <- datalocal[,c(1:n+n,1:n)] } #define column and row ranges in a compact adjacency matrix initparam$intstr<-list() initparam$trans<-list() initparam$usrinitstr<-list() initparam$usrintstr<-list() initparam$usrtrans<-list() initparam$usrinitstr$rows<-c(1:n) initparam$usrinitstr$cols<-c(1:nsmall+bgn) if(bgn==0) initparam$usrintstr$rows<-c(1:nsmall+n) else initparam$usrintstr$rows<-c(1:bgn,1:nsmall+n) initparam$usrintstr$cols<-c(1:nsmall+n) initparam$usrtrans$rows<-c(1:nsmall+bgn) initparam$usrtrans$cols<-c(1:nsmall+n) if(bgn!=0) { initparam$intstr$rows<-c(1:bgn+nsmall,1:nsmall) } else { initparam$intstr$rows<-c(1:nsmall) } initparam$intstr$cols<-c(1:nsmall) initparam$trans$rows<-c(1:nsmall+n) initparam$trans$cols<-c(1:nsmall) if(!is.null(weightvector)) { weightvector.other<-rep(weightvector,dbnpar$slices-1) } else { weightvector.other<-weightvector } #removing rows containing missing data lNA<-0 if (anyNA(datalocal)) { NArows<-which(apply(datalocal,1,anyNA)==TRUE) lNA<-length(NArows) datalocal<-datalocal[-NArows,] if(!is.null(weightvector)) { weightvector.other<-weightvector.other[-NArows] } } if(!is.null(edgepmat)) { edgepmatfirst<-edgepmat[1:n,1:n] edgepmat <-DBNbacktransform(edgepmat,initparam,nozero=TRUE) if(initparam$split) { edgepmat<-edgepmat$trans } else { initparam$logedgepmat <- log(edgepmat) } } else { edgepmatfirst<-NULL } initparam$otherslices <- scoreparameters(scoretype=scoretype, datalocal, weightvector=weightvector.other, bgnodes=initparam$bgnodes, bgepar=bgepar, bdepar=bdepar, bdecatpar=bdecatpar, dbnpar=dbnpar, edgepmat=edgepmat, DBN=FALSE) # first slice we just take the first block bdecatpar$edgepf <- 1 # we don't want any additional edge penalisation bdepar$edgepf <- 1 if (!is.null(dbnpar$datalist)) { datalocal<-data[[1]] } else { datalocal<-data[,1:(nsmall+bgn)] } if(bgn==0) { datalocal <- datalocal[,c(1:nsmall)] } else { datalocal <- datalocal[,c(1:nsmall+bgn,1:bgn)] #move static variables to the right hand side } if(!is.null(dbnpar$rowids)) { datalocal<-datalocal[which(dbnpar$rowids==1),] } #removing rows containing missing data if (anyNA(datalocal)) { NArows<-which(apply(datalocal,1,anyNA)==TRUE) lNA<-lNA+length(NArows) datalocal<-datalocal[-NArows,] if(!is.null(weightvector)) { weightvector<-weightvector[-NArows] } } if(lNA>0) { cat(paste(lNA, "rows were removed due to missing data"),"\n") } initparam$firstslice <- scoreparameters(scoretype=scoretype, datalocal, weightvector=weightvector, bgnodes=newbgnodes, bgepar=bgepar, bdepar=bdepar, bdecatpar=bdecatpar, dbnpar=dbnpar, edgepmat=edgepmatfirst, DBN=FALSE) } } else if(scoretype=="bge") { if(is.null(bgepar$am)) { bgepar$am<-1 } if(is.null(bgepar$aw)) { bgepar$aw<-n+bgepar$am+1 } if(is.null(bgepar$edgepf)) { bgepar$edgepf<-1 } if (is.null(weightvector)) { N<-nrow(data) covmat<-cov(data)*(N-1) means<-colMeans(data) } else { N<-sum(weightvector) forcov<-cov.wt(data,wt=weightvector,cor=TRUE,method="ML") covmat<-forcov$cov*N means<-forcov$center } initparam$am <- bgepar$am # store parameters initparam$aw <- bgepar$aw initparam$pf <- bgepar$edgepf initparam$N <- N # store effective sample size #initparam$covmat <- (N-1)*covmat initparam$means <- means # store means mu0<-numeric(n) #https://arxiv.org/pdf/1302.6808.pdf page 10 T0scale <- bgepar$am*(bgepar$aw-n-1)/(bgepar$am+1) # This follows from equations (19) and (20) of [GH2002] T0<-diag(T0scale,n,n) initparam$TN <- T0 + covmat + ((bgepar$am*N)/(bgepar$am+N))* (mu0 - means)%*%t(mu0 - means) initparam$awpN<-bgepar$aw+N constscorefact<- -(N/2)*log(pi) + (1/2)*log(bgepar$am/(bgepar$am+N)) initparam$muN <- (N*means + bgepar$am*mu0)/(N + bgepar$am) # posterior mean mean initparam$SigmaN <- initparam$TN/(initparam$awpN-n-1) # posterior mode covariance matrix initparam$scoreconstvec<-numeric(n) for (j in (1:n)) {# j represents the number of parents plus 1 awp<-bgepar$aw-n+j initparam$scoreconstvec[j]<-constscorefact - lgamma(awp/2) + lgamma((awp+N)/2) + ((awp+j-1)/2)*log(T0scale) - j*log(initparam$pf) } } else if (scoretype=="bde") { if(is.null(bdepar$chi)) {bdepar$chi<-0.5} if(is.null(bdepar$edgepf)) {bdepar$edgepf<- 2} if (is.null(weightvector)) { initparam$N<-nrow(data) initparam$d1<-data initparam$d0<-(1-data) } else { initparam$N<-sum(weightvector) initparam$d1<-data*weightvector initparam$d0<-(1-data)*weightvector } maxparents<-n-1 initparam$scoreconstvec<-rep(0,maxparents+1) initparam$chi<-bdepar$chi #1 initparam$pf<-bdepar$edgepf for(i in 0:maxparents){ # constant part of the score depending only on the hyperparameters noparams<-2^i initparam$scoreconstvec[i+1]<-noparams*lgamma(initparam$chi/noparams)-2*noparams*lgamma(initparam$chi/(2*noparams))-i*log(initparam$pf) # } } else if (scoretype=="bdecat") { if(is.null(bdecatpar$chi)) {bdecatpar$chi<-0.5} if(is.null(bdecatpar$edgepf)) {bdecatpar$edgepf<-2} maxparents<-n-1 initparam$chi<-bdecatpar$chi initparam$pf<-bdecatpar$edgepf initparam$scoreconstvec <- -c(0:maxparents)*log(initparam$pf) # just edge penalisation here initparam$Cvec <- apply(initparam$data,2,max)+1 # number of levels of each variable } else if (scoretype=="usr"){ if(is.null(usrpar$pctesttype)){usrpar$pctesttype <- "usr"} initparam$pctesttype <- usrpar$pctesttype initparam <- usrscoreparameters(initparam, usrpar) } else if (scoretype=="mixed") { initparam$nbin<-mixedpar$nbin initparam$binpar<-scoreparameters("bde", data[,1:mixedpar$nbin], bdepar=bdepar, nodeslabels=nodeslabels[1:mixedpar$nbin],weightvector=weightvector) initparam$gausspar<-scoreparameters("bge",data,bgnodes = c(1:mixedpar$nbin), bgepar=bgepar, nodeslabels = nodeslabels, weightvector=weightvector) } attr(initparam, "class") <- "scoreparameters" return(initparam) # #this if for future models # if(!is.null(multwv)) { # initparam$paramsets<-list() # for(i in 1:length(multwv)) { # # initparam$paramsets[[i]]<-scoreparameters(scoretype=scoretype, # # data, weightvector=multwv[[i]], # # bgnodes=initparam$bgnodes, # # bgepar=bgepar, bdepar=bdepar, bdecatpar=bdecatpar, dbnpar=dbnpar, # # edgepmat=edgepmat, DBN=FALSE,MDAG=FALSE,multwv=NULL) # } # } } #Add later #@param mixedpar a list which contains parameters for the BGe and BDe score for mixed data: #\itemize{ #\item nbin a positive integer number of binary nodes in the network (the binary nodes are always assumed in first nbin columns of the data) # } # else if (scoretype=="mixed") { # initparam$nbin<-mixedpar$nbin # initparam$binpar<-scoreparameters("bde", data[,1:mixedpar$nbin], bdepar=bdepar, # nodeslabels=nodeslabels[1:mixedpar$nbin],weightvector=weightvector) # initparam$gausspar<-scoreparameters("bge",data,bgnodes = c(1:mixedpar$nbin), bgepar=bgepar, # nodeslabels = nodeslabels, weightvector=weightvector) # }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/initpar.R
iterativeMCMCplus1<-function(param,iterations,stepsave,plus1it=NULL,MAP=TRUE, posterior=0.5, startorder=NULL,moveprobs,softlimit=9,hardlimit=14,chainout=FALSE, scoreout=FALSE,startspace=NULL,blacklist=NULL,gamma=1,verbose=FALSE,alpha=NULL, cpdag=FALSE,mergecp="skeleton",addspace=NULL,scoretable=NULL, accum,alphainit=NULL,compress=TRUE) { n<-param$n nsmall<-param$nsmall matsize<-ifelse(param$DBN,n+nsmall,n) objsizes<-list() maxlist<-list() maxobj<-list() updatenodeslist<-list() MCMCtraces<-list() if(!param$DBN) { if(param$bgn!=0) { updatenodes<-c(1:n)[-param$bgnodes] } else { updatenodes<-c(1:n) } } else { updatenodes<-c(1:nsmall) } if (is.null(blacklist)) { blacklist<-matrix(0,nrow=matsize,ncol=matsize) } diag(blacklist)<-1 if(!is.null(param$bgnodes)) { for(i in param$bgnodes) { blacklist[,i]<-1 } } #defining startskel if (!is.null(scoretable)) { startskel<-scoretable$adjacency blacklist<-scoretable$blacklist scoretable<-scoretable$tables } else { if (is.null(startspace)){ startspace<-definestartspace(alpha,param,cpdag=cpdag,algo="pc",alphainit=alphainit) } startskeleton<-1*(startspace&!blacklist) if(!is.null(addspace)) { startskel<-1*((addspace|startskeleton)&!blacklist) } else {startskel<-startskeleton } } blacklistparents<-list() for (i in 1:matsize) { blacklistparents[[i]]<-which(blacklist[,i]==1) } if(verbose) { cat(paste("maximum parent set size is", max(apply(startskel,2,sum))),"\n") } if(max(apply(startskel,2,sum))>hardlimit) { stop("the size of maximal parent set is higher that the hardlimit; redifine the search space or increase the hardlimit!") } maxorder<-startorder ptab<-listpossibleparents.PC.aliases(startskel,isgraphNEL=FALSE,n,updatenodes) if (verbose) { cat("core space defined, score table are being computed \n") flush.console() } ################################## #calculating initial score tables# ################################## parenttable<-ptab$parenttable # basic parenttable without plus1 lists aliases<-ptab$aliases #aliases for each node since all nodes in parent tables are done as 1,2,3,4... not real parent names numberofparentsvec<-ptab$numberofparentsvec numparents<-ptab$numparents plus1lists<-PLUS1(matsize,aliases,updatenodes,blacklistparents) rowmaps<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes) if(is.null(scoretable)) { scoretable<-scorepossibleparents.PLUS1(parenttable=parenttable,plus1lists=plus1lists, n=n,param=param,updatenodes=updatenodes, rowmaps,numparents,numberofparentsvec) } posetparenttable<-poset(parenttable,numberofparentsvec,rowmaps,n,updatenodes) if(MAP==TRUE){ maxmatrices<-posetscoremax(posetparenttable,scoretable,numberofparentsvec, rowmaps,n,plus1lists=plus1lists,updatenodes) } else { bannedscore<-poset.scores(posetparenttable,scoretable,ptab$numberofparentsvec,rowmaps, n,plus1lists=plus1lists,ptab$numparents,updatenodes) } oldadj<-startskeleton ############ #MCMC chain# ############ i<-1 if(is.null(plus1it)) plus1it<-100 while (length(updatenodes)>0 & i<=plus1it){ if(i>1){ newptab<-listpossibleparents.PC.aliases(newadj,isgraphNEL=FALSE,n,updatenodes) parenttable[updatenodes]<-newptab$parenttable[updatenodes] # basic parenttable without plus1 lists aliases[updatenodes]<-newptab$aliases[updatenodes] #aliases for each node since all nodes in parent tables are done as 1,2,3,4... not real parent names numberofparentsvec[updatenodes]<-newptab$numberofparentsvec[updatenodes] numparents[updatenodes]<-newptab$numparents[updatenodes] newplus1lists<-PLUS1(matsize,aliases,updatenodes,blacklistparents) plus1lists$mask[updatenodes]<- newplus1lists$mask[updatenodes] plus1lists$parents[updatenodes]<- newplus1lists$parents[updatenodes] plus1lists$aliases[updatenodes]<- newplus1lists$aliases[updatenodes] rowmaps[updatenodes]<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)[updatenodes] scoretable[updatenodes]<-scorepossibleparents.PLUS1(parenttable,plus1lists,n,param,updatenodes,rowmaps,numparents,numberofparentsvec)[updatenodes] posetparenttable[updatenodes]<-poset(parenttable,numberofparentsvec,rowmaps,n,updatenodes)[updatenodes] if (MAP) { newmaxmatrices<-posetscoremax(posetparenttable,scoretable,numberofparentsvec, rowmaps,n,plus1lists=plus1lists,updatenodes) maxmatrices$maxmatrix[updatenodes]<- newmaxmatrices$maxmatrix[updatenodes] maxmatrices$maxrow[updatenodes]<- newmaxmatrices$maxrow[updatenodes] } else { newbannedscore<-poset.scores(posetparenttable,scoretable,numberofparentsvec,rowmaps, n,plus1lists=plus1lists,numparents,updatenodes) bannedscore[updatenodes]<-newbannedscore[updatenodes] } if(verbose) { cat(paste("search space expansion",i, "\n")) flush.console() } } else { if(verbose) { cat(paste("score tables completed, iterative MCMC is running", "\n")) flush.console() } } if(MAP) { MCMCresult<-orderMCMCplus1max(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable, scoretable,aliases,numparents,rowmaps,plus1lists,maxmatrices,numberofparentsvec, gamma=gamma,bgnodes=param$bgnodes,matsize=matsize,chainout=chainout,compress=compress) } else { MCMCresult<-orderMCMCplus1(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable, scoretable,aliases,numparents,rowmaps,plus1lists, bannedscore,numberofparentsvec,gamma=gamma,bgnodes=param$bgnodes, matsize=matsize,chainout=TRUE,compress=compress) } MCMCtraces$DAGscores[[i]]<-MCMCresult$DAGscores if(chainout) { if(param$DBN) { MCMCtraces$incidence[[i]]<-lapply(MCMCresult$incidence,function(x)DBNtransform(x,param=param)) MCMCtraces$orders[[i]]<-lapply(MCMCresult$orders,order2var,varnames=param$firstslice$labels) } else { MCMCtraces$incidence[[i]]<-lapply(MCMCresult$incidence,function(x)assignLabels(x,param$labels)) MCMCtraces$orders[[i]]<-lapply(MCMCresult$orders,order2var,varnames=param$labels) } MCMCtraces$orderscores[[i]]<-MCMCresult$orderscores } maxobj<-storemaxMCMC(MCMCresult,param) maxlist[[i]]<-maxobj maxN<-which.max(MCMCresult$DAGscores) if(i>1){ if (maxobj$score>maxscore){ maxDAG<-maxobj$DAG maxorder<-maxobj$order maxscore<-maxobj$score maxit<-i } } else { maxDAG<-maxobj$DAG maxscore<-maxobj$score maxorder<-maxobj$order maxit<-1 } if (MAP) { newadj<-newspacemap(n,startskeleton,oldadj,softlimit,hardlimit,blacklist, maxdag=MCMCresult$maxdag,mergetype=mergecp, accum=accum) } else { newadj<-newspaceskel(n,startskeleton,oldadj,softlimit,hardlimit,posterior, blacklist,MCMCtrace=MCMCresult[[1]],mergetype=mergecp) } updatenodes<-which(apply(newadj==oldadj,2,all)==FALSE) updatenodeslist[[i]]<-updatenodes if(is.null(plus1it)) { oldadj<-newadj } else if(i<plus1it) { oldadj<-newadj } else if (!scoreout) { oldadj<-newadj } startorder<-c(MCMCresult$orders[[maxN]],param$bgnodes) i<-i+1 } addedge<-sum(newadj)-sum(startskeleton) result<-list() if (scoreout){ if(chainout){output<-4} else{output<-3} } else { if(chainout) {output<-2} else {output<-1} } result$maxtrace<-maxlist result$DAG<-maxobj$DAG result$CPDAG<-Matrix(graph2m(dag2cpdag(m2graph(result$DAG))),sparse = TRUE) result$score<-maxobj$score result$maxorder<-maxobj$order result$trace<-MCMCtraces$DAGscores MCMCtraces$DAGscores<-NULL if(param$DBN) { result$startspace<-DBNtransform(startskeleton,param) result$endspace<-DBNtransform(oldadj,param) } else { result$startspace<-startskeleton result$endspace<-oldadj } switch(as.character(output), "1"={ # return only maximum DAG and order # do not need to do anything else }, "2"={ # return all MCMC all saved MCMC steps: incidence, DAGscore, orderscore and order and max result result$traceadd<-MCMCtraces }, "3"={ # return max DAG, order, last search space incidence and all scoretables result$scoretable<-list() result$scoretable$adjacency<-result$endspace result$scoretable$tables<-scoretable result$scoretable$blacklist<-blacklist attr(result$scoretable,"class")<-"scorespace" }, "4"={ # return all MCMC all saved MCMC steps,max result,last search space and scoretables result$traceadd<-MCMCtraces result$scoretable<-list() result$scoretable$adjacency<-result$endspace result$scoretable$tables<-scoretable result$scoretable$blacklist<-blacklist attr(result$scoretable,"class")<-"scorespace" } ) return(result) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/iterativeMCMC.R
#'Bayesian network structure learning #' #'This function can be used finding the maximum a posteriori (MAP) DAG using stochastic search relying on MCMC schemes. Due to the superexponential size of the search space, it #'must be reduced. By default the search space is limited to the skeleton found through the PC algorithm by means of conditional independence tests #'(using the functions \code{\link[pcalg]{skeleton}} and \code{\link[pcalg]{pc}} from the `pcalg' package [Kalisch et al, 2012]). #'It is also possible to define an arbitrary search space by inputting an adjacency matrix, for example estimated by partial correlations or other network algorithms. Order MCMC scheme (\code{algorithm="order"}) #'performs the search of a maximum scoring order and selects a maximum scoring DAG from this order as MAP. To avoid discovering a suboptimal graph due to the absence #'of some of the true positive edges in the search space, the function includes the possibility to expand the default or input search space, by allowing each node in the network to have one additional parent (\code{plus1="TRUE"}). #'This offers improvements in the learning of Bayesian networks. The iterative MCMC (\code{algorithm="orderIter"}) scheme allows for iterative expansions of the search space. #'This is useful in cases when the initial search space is poor in a sense that it contains only a limited number of true positive edges. Iterative expansions of the search space #'efficiently solve this issue. However this scheme requires longer runtimes due to the need of running multiple consecutive MCMC chains. #'This function is a wrapper for the individual structure learning functions that implement each of the described algorithms; for details see \code{\link{orderMCMC}}, #' and \code{\link{iterativeMCMC}}. #' @param scorepar an object of class \code{scoreparameters}, containing the data and score parameters, see constructor function \code{\link{scoreparameters}} #' @param algorithm MCMC scheme to be used for MAP structure learning; possible options are "order" (\code{\link{orderMCMC}}) or "orderIter" (\code{\link{iterativeMCMC}}) #' @param chainout logical, if TRUE the saved MCMC steps are returned, TRUE by default #' @param scoreout logical, if TRUE the search space and score tables are returned; FALSE by default for "order", TRUE for "orderIter" #' @param moveprobs a numerical vector of 4 (for "order" and "orderIter" algorithms) or 5 values (for "partition" algorithm) representing probabilities of the different moves in the space of #' order and partitions accordingly. The moves are described in the corresponding algorithm specific functions \code{\link{orderMCMC}} and \code{\link{partitionMCMC}} #' @param iterations integer, the number of MCMC steps, the default value is \eqn{6n^{2}\log{n}} orderMCMC, \eqn{20n^{2}\log{n}} for partitionMCMC and \eqn{3.5n^{2}\log{n}} for iterativeMCMC; where n is the number of nodes in the Bayesian network #' @param stepsave integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations/1000} #' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests at the PC algorithm stage #' @param gamma tuning parameter which transforms the score by raising it to this power, 1 by default #' @param cpdag logical, if TRUE the CPDAG returned by the PC algorithm will be used as the search #'space, if FALSE (default) the full undirected skeleton will be used as the search space #' @param hardlimit integer, limit on the size of parent sets in the search space; by default 14 when MAP=TRUE and 20 when MAP=FALSE #' @param verbose logical, if TRUE messages about the algorithm's progress will be printed, FALSE by default #' @param compress logical, if TRUE adjacency matrices representing sampled graphs will be stored as a sparse Matrix (recommended); TRUE by default #' @param startspace (optional) a square sparse or ordinary matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix. If NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1. #' @param blacklist (optional) a square sparse or ordinary matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space. If \code{blacklist[i,j]} equals to 1 it means that the edge from node \code{i} to node \code{j} is excluded from the search space. #' @param scoretable (optional) object of class \code{scorespace} containing list of score tables calculated for example by the last iteration of the function \code{iterativeMCMC}. When not NULL, parameter \code{startspace} is ignored. #' @param startpoint (optional) integer vector of length n (representing an order when \code{algorithm="order"} or \code{algorithm="orderIter"}) or an adjacency matrix or sparse adjacency matrix (representing a DAG when \code{algorithm="partition"}), which will be used as the starting point in the MCMC algorithm, the default starting point is random #' @param plus1 logical, if TRUE (default) the search is performed on the extended search space; only changable for orderMCMC; for other algorithms is fixed to TRUE #' @param iterpar addition list of parameters for the MCMC scheme implemeting iterative expansions of the search space; for more details see \code{\link{iterativeMCMC}}; list(posterior = 0.5, softlimit = 9, mergetype = "skeleton", accum = FALSE, #'plus1it = NULL, addspace = NULL, alphainit = NULL) #' @return Depending on the value or the parameter \code{algorithm} returns an object of class \code{orderMCMC} or \code{iterativeMCMC} which contains log-score trace of sampled DAGs as well #' as adjacency matrix of the maximum scoring DAG(s), its score and the order or partition score. The output can optionally include DAGs sampled in MCMC iterations and the score tables. #' Optional output is regulated by the parameters \code{chainout} and \code{scoreout}. See \code{\link{orderMCMC class}}, \code{\link{iterativeMCMC class}} for a detailed description of the classes' structures. #' @note see also extractor functions \code{\link{getDAG}}, \code{\link{getTrace}}, \code{\link{getSpace}}, \code{\link{getMCMCscore}}. #'@references P. Suter, J. Kuipers, G. Moffa, N.Beerenwinkel (2023) <doi:10.18637/jss.v105.i09> #'@references Friedman N and Koller D (2003). A Bayesian approach to structure discovery in bayesian networks. Machine Learning 50, 95-125. #'@references Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26. #'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian acyclic graphical models. The Annals of Statistics 42, 1689-1691. #'@references Spirtes P, Glymour C and Scheines R (2000). Causation, Prediction, and Search, 2nd edition. The MIT Press. #'@examples #'\dontrun{ #'myScore<-scoreparameters("bge",Boston) #'mapfit<-learnBN(myScore,"orderIter") #'summary(mapfit) #'plot(mapfit) #'} #'@author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426> #'@export learnBN<-function(scorepar, algorithm = c("order", "orderIter"), chainout = FALSE, scoreout = ifelse(algorithm=="orderIter",TRUE,FALSE), alpha = 0.05, moveprobs = NULL, iterations = NULL, stepsave = NULL, gamma = 1, verbose = FALSE, compress = TRUE, startspace = NULL, blacklist = NULL, scoretable = NULL, startpoint = NULL, plus1 = TRUE, iterpar = list(softlimit = 9, mergetype = "skeleton", accum = FALSE, plus1it = NULL, addspace = NULL, alphainit = NULL), cpdag = FALSE, hardlimit = 12){ if(length(algorithm)>1) algorithm<-"order" if(algorithm=="order") { MCMCresult<-orderMCMC(scorepar,chainout=chainout,scoreout=scoreout,alpha=alpha,moveprobs=moveprobs, iterations=iterations,stepsave=stepsave,gamma=gamma,verbose=verbose,compress=compress, startspace=startspace,blacklist=blacklist,scoretable=scoretable,startorder=startpoint, plus1=plus1,cpdag=cpdag,hardlimit=hardlimit) } else { iterpardef<-list(softlimit = 9, mergetype = "skeleton", accum = FALSE, plus1it = NULL, addspace = NULL, alphainit = NULL) iterpardef[names(iterpar)]<-iterpar[names(iterpar)] iterpar<-iterpardef MCMCresult<-iterativeMCMC(scorepar,chainout=chainout,scoreout=scoreout,alpha=alpha,moveprobs=moveprobs, iterations=iterations,stepsave=stepsave,gamma=gamma,verbose=verbose,compress=compress, startspace=startspace,blacklist=blacklist,scoretable=scoretable,startorder=startpoint, cpdag=cpdag,hardlimit=hardlimit,softlimit=iterpar$softlimit, mergetype=iterpar$mergetype,accum=iterpar$accum,plus1it=iterpar$plus1it, addspace=iterpar$addspace,alphainit=iterpar$alphainit) } return(MCMCresult) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/learnBN.R
#' Asia dataset #' #' A synthetic dataset from Lauritzen and Spiegelhalter (1988) about lung #' diseases (tuberculosis, lung cancer or bronchitis) and visits to Asia. #' #' @source \url{https://www.bnlearn.com/bnrepository/} #' @format A data frame with 5000 rows and 8 binary variables: #' \itemize{ #' \item D (dyspnoea), binary 1/0 corresponding to "yes" and "no" #' \item T (tuberculosis), binary 1/0 corresponding to "yes" and "no" #' \item L (lung cancer), binary 1/0 corresponding to "yes" and "no" #' \item B (bronchitis), binary 1/0 corresponding to "yes" and "no" #' \item A (visit to Asia), binary 1/0 corresponding to "yes" and "no" #' \item S (smoking), binary 1/0 corresponding to "yes" and "no" #' \item X (chest X-ray), binary 1/0 corresponding to "yes" and "no" #' \item E (tuberculosis versus lung cancer/bronchitis), binary 1/0 corresponding to "yes" and "no" #' } #'@references Lauritzen S, Spiegelhalter D (1988). `Local Computation with Probabilities on Graphical Structures and their Application to Expert Systems (with discussion)'. #'Journal of the Royal Statistical Society: Series B 50, 157-224. #' "Asia" #' Boston housing data #' #' A dataset containing information collected by the U.S Census Service concerning housing #' in the area of Boston, originally published by Harrison and Rubinfeld (1978). #' #' @source \url{http://lib.stat.cmu.edu/datasets/boston} #' @format A data frame with 506 rows and 14 variables: #' \itemize{ #' \item CRIM - per capita crime rate by town #' \item ZN - proportion of residential land zoned for lots over 25,000 sq.ft. #' \item INDUS - proportion of non-retail business acres per town. #' \item CHAS - Charles River dummy variable (1 if tract bounds river; 0 otherwise) #' \item NOX - nitric oxides concentration (parts per 10 million) #' \item RM - average number of rooms per dwelling #' \item AGE - proportion of owner-occupied units built prior to 1940 #' \item DIS - weighted distances to five Boston employment centres #' \item TAX - full-value property-tax rate per $10,000 #' \item RAD - index of accessibility to radial highways #' \item PTRATIO - pupil-teacher ratio by town #' \item B - 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town #' \item LSTAT - percentage lower status of the population #' \item MEDV - Median value of owner-occupied homes in $1000's #' } #' #'@references Harrison, D and Rubinfeld, DL (1978) #' `Hedonic prices and the demand for clean air', #' Journal of Environmental Economics and Management 5, 81-102. #' "Boston" #' A simulated data set from a Gaussian continuous Bayesian network #' #' A synthetic dataset containing 1000 observations generated from a random DAG with 100 continuous nodes. #' Functions 'randomDAG' and 'rmvDAG' from R-packages 'pcalg' were used to generate the data. #' #' @format A data frame with 1000 rows representing observations of 100 continuous variables: V1, ..., V100 #' "gsim" #' A simulated data set from a Gaussian continuous Bayesian network #' #' A synthetic dataset containing 100 observations generated from a random DAG with 100 continuous nodes. #' Functions 'randomDAG' and 'rmvDAG' from R-packages 'pcalg' were used to generate the data. #' #' @format A data frame with 100 rows representing observations of 100 continuous variables: V1, ..., V100 #' "gsim100"
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/learningdata.R
#'Structure learning with the order MCMC algorithm #' #'This function implements the order MCMC algorithm for the structure learning of Bayesian networks. This function can be used #'for MAP discovery and for sampling from the posterior distribution of DAGs given the data. #'Due to the superexponential size of the search space as the number of nodes increases, the #'MCMC search is performed on a reduced search space. #'By default the search space is limited to the skeleton found through the PC algorithm by means of conditional independence tests #'(using the functions \code{\link[pcalg]{skeleton}} and \code{\link[pcalg]{pc}} from the `pcalg' package [Kalisch et al, 2012]). #'It is also possible to define an arbitrary search space by inputting an adjacency matrix, for example estimated by partial correlations or other network algorithms. #'Also implemented is the possibility to expand the default or input search space, by allowing each node in the network to have one additional parent. This offers improvements in the learning and sampling of Bayesian networks. #' @param scorepar an object of class \code{scoreparameters}, containing the data and score parameters, see constructor function \code{\link{scoreparameters}} #' @param MAP logical, if TRUE (default) the search targets the MAP DAG (a DAG with maximum score), #' if FALSE at each MCMC step a DAG is sampled from the order proportionally to its score #' @param plus1 logical, if TRUE (default) the search is performed on the extended search space #' @param chainout logical, if TRUE the saved MCMC steps are returned, TRUE by default #' @param scoreout logical, if TRUE the search space and score tables are returned, FALSE by default #' @param moveprobs a numerical vector of 4 values in \code{\{0,1\}} corresponding to the probabilities of the following MCMC moves in the order space #' \itemize{ #' \item exchanging 2 random nodes in the order #' \item exchanging 2 adjacent nodes in the order #' \item placing a single node elsewhere in the order #' \item staying still #' } #' @param iterations integer, the number of MCMC steps, the default value is \eqn{6n^{2}\log{n}} #' @param stepsave integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations/1000} #' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests at the PC algorithm stage #' @param gamma tuning parameter which transforms the score by raising it to this power, 1 by default #' @param cpdag logical, if TRUE the CPDAG returned by the PC algorithm will be used as the search #'space, if FALSE (default) the full undirected skeleton will be used as the search space #' @param hardlimit integer, limit on the size of parent sets in the search space; by default 14 when MAP=TRUE and 20 when MAP=FALSE #' @param verbose logical, if TRUE messages about the algorithm's progress will be printed, FALSE by default #' @param compress logical, if TRUE adjacency matrices representing sampled graphs will be stored as a sparse Matrix (recommended); TRUE by default #' @param startspace (optional) a square matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix. If NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1. #' @param blacklist (optional) a square matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space. If \code{blacklist[i,j]} equals to 1 it means that the edge from node \code{i} to node \code{j} is excluded from the search space. #' @param scoretable (optional) object of class \code{scorespace} containing list of score tables calculated for example by the last iteration of the function \code{iterativeMCMC}. When not NULL, parameter \code{startspace} is ignored. #' @param startorder (optional) integer vector of length n, which will be used as the starting order in the MCMC algorithm, the default order is random #' @return Object of class \code{orderMCMC}, which contains log-score trace of sampled DAGs as well #' as adjacency matrix of the maximum scoring DAG, its score and the order score. The output can optionally include DAGs sampled in MCMC iterations and the score tables. #' Optional output is regulated by the parameters \code{chainout} and \code{scoreout}. See \code{\link{orderMCMC class}} for a detailed class structure. #' @note see also extractor functions \code{\link{getDAG}}, \code{\link{getTrace}}, \code{\link{getSpace}}, \code{\link{getMCMCscore}}. #'@references P. Suter, J. Kuipers, G. Moffa, N.Beerenwinkel (2023) <doi:10.18637/jss.v105.i09> #'@references Friedman N and Koller D (2003). A Bayesian approach to structure discovery in bayesian networks. Machine Learning 50, 95-125. #'@references Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26. #'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian acyclic graphical models. The Annals of Statistics 42, 1689-1691. #'@references Spirtes P, Glymour C and Scheines R (2000). Causation, Prediction, and Search, 2nd edition. The MIT Press. #'@examples #'\dontrun{ #'#find a MAP DAG with search space defined by PC and plus1 neighbourhood #'Bostonscore<-scoreparameters("bge",Boston) #'#estimate MAP DAG #'orderMAPfit<-orderMCMC(Bostonscore) #'summary(orderMAPfit) #'#sample DAGs from the posterior distribution #'ordersamplefit<-orderMCMC(Bostonscore,MAP=FALSE,chainout=TRUE) #'plot(ordersamplefit) #'} #'@author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426> #'@export orderMCMC<-function(scorepar, MAP=TRUE, plus1=TRUE,chainout=FALSE, scoreout=FALSE, moveprobs=NULL, iterations=NULL, stepsave=NULL, alpha=0.05, cpdag=FALSE, gamma=1, hardlimit=ifelse(plus1,14,20),verbose=FALSE,compress=TRUE, startspace=NULL, blacklist=NULL,startorder=NULL, scoretable=NULL) { if (is.null(moveprobs)) { prob1<-99 if(scorepar$nsmall>3){ prob1<-round(6*99*scorepar$nsmall/(scorepar$nsmall^2+10*scorepar$nsmall-24)) } prob1<-prob1/100 moveprobs<-c(prob1,0.99-prob1,0.01) moveprobs<-moveprobs/sum(moveprobs) moveprobs<-c(moveprobs[c(1,2)],0,moveprobs[3]) } if(is.null(iterations)){ if(scorepar$nsmall<26){ iterations<-30000 } else { iterations<-(6*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall))-(6*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall)) %% 1000 } } if(is.null(stepsave)){ stepsave<-floor(iterations/1000) } ordercheck<-checkstartorder(startorder,varnames=scorepar$labels.short,mainnodes=scorepar$mainnodes, bgnodes=scorepar$static,DBN=scorepar$DBN,split=scorepar$split) if(ordercheck$errorflag) { stop(ordercheck$message) } else { startorder<-ordercheck$order } if(scorepar$DBN) { #flag for DBN structure learning with different initial and transition structures if(!is.null(blacklist)) { blacklist<-DBNbacktransform(blacklist,scorepar) } if(!is.null(scoretable)) { scoretable$adjacency<-DBNbacktransform(scoretable$adjacency,scorepar) scoretable$blacklist<-DBNbacktransform(scoretable$blacklist,scorepar) } if(!is.null(startspace)) { startspace<-DBNbacktransform(startspace,scorepar) } if(scorepar$split) { #we learn initial and transition structures separately if(scorepar$MDAG) { param1<-scorepar$paramsets[[scorepar$nsets]] param2<-scorepar$paramsets[[1]] param2$paramsets<-scorepar$paramsets[1:(scorepar$nsets-1)] param2$MDAG<-TRUE } else { param1<-scorepar$firstslice param2<-scorepar$otherslices } if(scoreout | !is.null(scoretable)) { cat("option scoreout always equals FALSE for DBNs with samestruct=FALSE, scoretable parameter is ignored \n") } result.trans<-orderMCMCmain(param=param2,iterations,stepsave,startorder=startorder$trans, moveprobs=moveprobs,alpha=alpha,cpdag=cpdag,scoretable=NULL, plus1=plus1,MAP=MAP,chainout=chainout, scoreout=FALSE, startspace=startspace$trans,blacklist=blacklist$trans,gamma=gamma,verbose=verbose, hardlimit=hardlimit,compress=compress) if(scorepar$learninit) { result.init<-orderMCMCmain(param=param1,iterations,stepsave,startorder=startorder$init, moveprobs=moveprobs,alpha=alpha,cpdag=cpdag,scoretable=NULL, plus1=plus1,MAP=MAP,chainout=chainout, scoreout=FALSE, startspace=startspace$init,blacklist=blacklist$init,gamma=gamma,verbose=verbose, hardlimit=hardlimit,compress=compress) result<-mergeDBNres(result.init,result.trans,scorepar,algo="order") } else { result<-result.trans } } else { result<-orderMCMCmain(param=scorepar,iterations,stepsave,startorder=startorder, moveprobs=moveprobs,alpha=alpha,cpdag=cpdag,scoretable=scoretable, plus1=plus1,MAP=MAP,chainout=chainout, scoreout=scoreout, startspace=startspace,blacklist=blacklist,gamma=gamma,verbose=verbose, hardlimit=hardlimit,compress=compress) } } else { result<-orderMCMCmain(param=scorepar,iterations,stepsave,startorder=startorder, moveprobs=moveprobs,alpha=alpha,cpdag=cpdag,scoretable=scoretable, plus1=plus1,MAP=MAP,chainout=chainout, scoreout=scoreout, startspace=startspace,blacklist=blacklist,gamma=gamma,verbose=verbose, hardlimit=hardlimit,compress=compress) } if(plus1) { result$info$algo<-"plus1 order MCMC" } else { result$info$algo<-"base order MCMC" } result$info$DBN<-scorepar$DBN if(scorepar$DBN) { result$info$nsmall<-scorepar$nsmall result$info$bgn<-scorepar$bgn result$info$split<-scorepar$split } if(is.null(startspace)) { result$info$spacealgo<-"PC" } else { result$info$spacealgo<-"user defined matrix" } result$info$iterations<-iterations result$info$samplesteps<-length(result$trace) if(MAP) { result$info$sampletype<-"MAP" } else { result$info$sampletype<-"sample" } result$info$startorder<-startorder result$info$fncall<-match.call() attr(result,"class")<-"orderMCMC" return(result) } #'DAG structure sampling with partition MCMC #' #'This function implements the partition MCMC algorithm for the structure learning of Bayesian networks. This procedure provides an unbiased sample from the posterior distribution of DAGs given the data. #'The search space can be defined either by a preliminary run of the function \code{iterativeMCMC} or by a given adjacency matrix (which can be the full matrix with zero on the diagonal, to consider the entire space of DAGs, feasible only for a limited number of nodes). #' #' @param scorepar an object of class \code{scoreparameters}, containing the data and scoring parameters; see constructor function \code{\link{scoreparameters}}. #' @param moveprobs (optional) a numerical vector of 5 values in \code{\{0,1\}} corresponding to the following MCMC move probabilities in the space of partitions: #' \itemize{ #' \item swap any two elements from different partition elements #' \item swap any two elements in adjacent partition elements #' \item split a partition element or join one #' \item move a single node into another partition element or into a new one #' \item stay still #' } #' @param iterations integer, the number of MCMC steps, the default value is \eqn{20n^{2}\log{n}} #' @param stepsave integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations/1000} #' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests at the PC algorithm stage #' @param gamma tuning parameter which transforms the score by raising it to this power, 1 by default #' @param verbose logical, if set to TRUE (default) messages about progress will be printed #' @param scoreout logical, if TRUE the search space and score tables are returned, FALSE by default #' @param compress logical, if TRUE adjacency matrices representing sampled graphs will be stored as a sparse Matrix (recommended); TRUE by default #' @param startspace (optional) a square matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix; if NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1. #' @param blacklist (optional) a square matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space; if \code{blacklist[i,j]=1} it means that the edge from node \code{i} to node \code{j} is excluded from the search space #' @param scoretable (optional) object of class \code{scorespace} containing list of score tables calculated for example by the last iteration of the function \code{iterativeMCMC}. When not NULL, parameter \code{startspace} is ignored #' @param startDAG (optional) an adjacency matrix of dimensions equal to the number of nodes, representing a DAG in the search space defined by startspace. If startspace is defined but \code{startDAG} is not, an empty DAG will be used by default #' @return Object of class \code{partitionMCMC}, which contains log-score trace as well #' as adjacency matrix of the maximum scoring DAG, its score and the order score. Additionally, returns all sampled DAGs (represented by their adjacency matrices), their scores, #'orders and partitions See \code{\link{partitionMCMC class}}. #' @note see also extractor functions \code{\link{getDAG}}, \code{\link{getTrace}}, \code{\link{getSpace}}, \code{\link{getMCMCscore}}. #'@references P. Suter, J. Kuipers, G. Moffa, N.Beerenwinkel (2023) <doi:10.18637/jss.v105.i09> #'@references Kuipers J and Moffa G (2017). Partition MCMC for inference on acyclic digraphs. Journal of the American Statistical Association 112, 282-299. #'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #'@references Heckerman D and Geiger D (1995). Learning Bayesian networks: A unification for discrete and Gaussian domains. In Eleventh Conference on Uncertainty in Artificial Intelligence, pages 274-284. #'@references Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26. #'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian directed acyclic graphical models. The Annals of Statistics 42, 1689-1691. #'@examples #'\dontrun{ #'myScore<-scoreparameters("bge", Boston) #'partfit<-partitionMCMC(myScore) #'plot(partfit) #'} #'@import pcalg #'@author Jack Kuipers, Polina Suter, the code partly derived from the partition MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426> #'@export partitionMCMC<-function(scorepar, moveprobs=NULL, iterations=NULL, stepsave=NULL, alpha = 0.05, gamma=1,verbose=FALSE, scoreout=FALSE,compress=TRUE,startspace=NULL, blacklist=NULL,scoretable=NULL, startDAG=NULL) { if (is.null(moveprobs)) { prob1start<-40/100 prob1<-prob1start*100 if(scorepar$nsmall>3){ prob1<-round(6*prob1*scorepar$nsmall/(scorepar$nsmall^2+10*scorepar$nsmall-24)) } prob1<-prob1/100 prob2start<-99/100-prob1start prob2<-prob2start*100 if(scorepar$nsmall>3){ prob2<-round(6*prob2*scorepar$nsmall/(scorepar$nsmall^2+10*scorepar$nsmall-24)) } prob2<-prob2/100 moveprobs.partition<-c(prob1,prob1start-prob1,prob2start-prob2,prob2,0.01) moveprobs<-moveprobs.partition/sum(moveprobs.partition) # normalisation } if(is.null(iterations)){ if(scorepar$nsmall<20){ iterations<-20000 } else { iterations<-(20*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall))-(20*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall)) %% 1000 } } if(is.null(stepsave)){ stepsave<-floor(iterations/1000) } if(scorepar$DBN) { #flag for DBN structure learning with different initial and transition structures if(!is.null(blacklist)) { blacklist<-DBNbacktransform(blacklist,scorepar) } if(!is.null(startspace)) { startspace<-DBNbacktransform(startspace,scorepar) } if(!is.null(startDAG)) { startDAG<-DBNbacktransform(startDAG,scorepar) } if(scorepar$split) { #we learn initial and transition structures separately if(scorepar$MDAG) { param1<-scorepar$paramsets[[scorepar$nsets]] param2<-scorepar$paramsets[[1]] param2$paramsets<-scorepar$paramsets[1:(scorepar$nsets-1)] param2$MDAG<-TRUE } else { param1<-scorepar$firstslice param2<-scorepar$otherslices } if(!is.null(scoretable)) { warning("for DBNs with samestruct=FALSE 'scoretable' parameter is ignored") } result.init<-partitionMCMCplus1sample(param=param1,startspace=startspace$init, blacklist=blacklist$init,moveprobs=moveprobs, numit=iterations,DAG=startDAG$init,stepsave=stepsave, scoretable=NULL,verbose=verbose, gamma=gamma,compress=compress,alpha=alpha) result.trans<-partitionMCMCplus1sample(param=param2,startspace=startspace$trans, blacklist=blacklist$trans,moveprobs=moveprobs, numit=iterations,DAG=startDAG$trans,stepsave=stepsave, scoretable=NULL,verbose=verbose,gamma=gamma,compress=compress, alpha=alpha) result<-mergeDBNres(result.init,result.trans,scorepar,algo="partition") } else { result<-partitionMCMCplus1sample(param=scorepar,startspace=startspace,blacklist=blacklist, moveprobs=moveprobs,numit=iterations,DAG=startDAG, stepsave=stepsave,scoretable=scoretable,verbose=verbose, gamma=gamma,compress=compress,alpha=alpha) } } else { result<-partitionMCMCplus1sample(param=scorepar,startspace=startspace,blacklist=blacklist, moveprobs=moveprobs,numit=iterations,DAG=startDAG, stepsave=stepsave,scoretable=scoretable,verbose=verbose, gamma=gamma,compress=compress,alpha=alpha) } result$info$DBN<-scorepar$DBN if(scorepar$DBN) { result$info$nsmall<-scorepar$nsmall result$info$bgn<-scorepar$bgn result$info$split<-scorepar$split } result$info$algo<-"plus1 partition MCMC" if(is.null(startspace)) { result$info$spacealgo<-"PC + iterative plus1 order MCMC" } else { result$info$spacealgo<-"user defined matrix" } result$info$iterations<-iterations result$info$samplesteps<-length(result$trace) result$info$sampletype<-"sample" result$info$fncall<-match.call() attr(result,"class")<-"partitionMCMC" return(result) } #'Structure learning with an iterative order MCMC algorithm on an expanded search space #' #'This function implements an iterative search for the maximum a posteriori (MAP) DAG, #'by means of order MCMC (arXiv:1803.07859v3). At each iteration, the current search space is expanded by #'allowing each node to have up to one additional parent not already included in the search space. #'By default the initial search space is obtained through the PC-algorithm (using the functions \code{\link[pcalg]{skeleton}} and \code{\link[pcalg]{pc}} from the `pcalg' package [Kalisch et al, 2012]). #'At each iteration order MCMC is employed to search for the MAP DAG. #'The edges in the MAP DAG are added to the initial search space to provide #'the search space for the next iteration. The algorithm iterates until no #'further score improvements can be achieved by expanding the search space. #'The final search space may be used for the sampling versions of \code{\link{orderMCMC}} and \code{\link{partitionMCMC}}. #' #' @param scorepar an object of class \code{scoreparameters}, containing the data and scoring parameters; see constructor function \code{\link{scoreparameters}} #' @param moveprobs a numerical vector of 4 values in \code{\{0,1\}} corresponding to the probabilities of the following MCMC moves in the order space: #' \itemize{ #' \item exchanging 2 random nodes in the order #' \item exchanging 2 adjacent nodes in the order #' \item placing a single node elsewhere in the order #' \item staying still #' } #' @param iterations integer, the number of MCMC steps, the default value is \eqn{3.5n^{2}\log{n}} #' @param stepsave integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations}/1000 #' @param MAP logical, if TRUE (default) the search targets the MAP DAG (a DAG with maximum score), #' if FALSE at each MCMC step a DAG is sampled from the order proportionally to its score; when expanding a search space when MAP=TRUE all edges from the maximum scoring DAG are added #' to the new space, when MAP=FALSE only edges with posterior probability higher than defined by parameter \code{posterior} are added to the search space #' @param posterior logical, when \code{MAP} set to FALSE defines posterior probability threshold for adding the edges to the search space #' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests in the PC-stage #' @param gamma tuning parameter which transforms the score by raising it to this power, 1 by default #' @param startorder integer vector of length n, which will be used as the starting order in the MCMC algorithm, the default order is random #' @param softlimit integer, limit on the size of parent sets beyond which adding undirected edges is restricted; below this #' limit edges are added to expand the parent sets based on the undirected skeleton of the MAP DAG (or from its CPDAG, depending #' on the parameter \code{mergecp}), above the limit only the directed edges are added from the MAP DAG; the limit is 9 by default #' @param hardlimit integer, limit on the size of parent sets beyond which the search space is not further expanded to prevent long runtimes; the limit is 12 by default #' @param cpdag logical, if set to TRUE the equivalence class (CPDAG) found by the PC algorithm is used as a search #' space, when FALSE (default) the undirected skeleton used as a search space #' @param mergetype defines which edges are added to the search space at each expansion iteration; three options are available 'dag', 'cpdag', 'skeleton'; 'skeleton' by default #' @param accum logical, when TRUE at each search step expansion new edges are added to the current search space; when FALSE (default) the new edges are added to the starting space #' @param compress logical, if TRUE adjacency matrices representing sampled graphs will be stored as a sparse Matrix (recommended); TRUE by default #' @param plus1it (optional) integer, a number of iterations of search space expansion; by default the algorithm iterates until no score improvement can be achieved by further expanding the search space #' @param startspace (optional) a square matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix; if NULL, the skeleton obtained from the PC-algorithm will be used; if \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space; to include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1 #' @param scoretable (optional) object of class \code{scorespace}. When not NULL, parameters \code{startspace} and \code{addspace} are ignored. #' @param addspace (optional) a square matrix, of dimensions equal to the number of nodes, which defines the edges, which are added at to the search space only at the first iteration of iterative seach and do not necessarily stay afterwards; defined in the form of an adjacency matrix; if \code{addspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space; to include an edge in both directions, both \code{addspace[i,j]} and \code{addspace[j,i]} should be 1 #' @param blacklist (optional) a square matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space; if \code{blacklist[i,j]} equals to 1 it means that the edge from node \code{i} to node \code{j} is excluded from the search space #' \itemize{ #' \item "dag", then edges from maximum scoring DAG are added; #' \item "cpdag", then the maximum scoring DAG is first converted to the CPDAG, from which all edges are added to the search space; #' \item "skeleton", then the maximum scoring DAG is first converted to the skeleton, from which all edges are added to the search space #' } #' @param verbose logical, if TRUE (default) prints messages on the progress of execution #' @param chainout logical, if TRUE the saved MCMC steps are returned, FALSE by default #' @param scoreout logical, if TRUE the search space from the last plus1 iterations and the corresponding score tables are returned, FALSE by default #' @param alphainit (optional) numerical, defines alpha that is used by the PC algorithm to learn initial structure of a DBN, ignored in static case #' @return Object of class \code{iterativeMCMC}, which contains log-score trace as well as adjacency matrix of the maximum scoring DAG, its score and the order score. #' The output can optionally include DAGs sampled in MCMC iterations and the score tables. Optional output is regulated by the parameters \code{chainout} and \code{scoreout}. See \code{\link{iterativeMCMC class}} for a detailed class structure. #' @note see also extractor functions \code{\link{getDAG}}, \code{\link{getTrace}}, \code{\link{getSpace}}, \code{\link{getMCMCscore}}. #'@references P. Suter, J. Kuipers, G. Moffa, N.Beerenwinkel (2023) <doi:10.18637/jss.v105.i09> #'@references Kuipers J, Super P and Moffa G (2020). Efficient Sampling and Structure Learning of Bayesian Networks. (arXiv:1803.07859v3) #'@references Friedman N and Koller D (2003). A Bayesian approach to structure discovery in bayesian networks. Machine Learning 50, 95-125. #'@references Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26. #'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian directed acyclic graphical models. The Annals of Statistics 42, 1689-1691. #'@references Spirtes P, Glymour C and Scheines R (2000). Causation, Prediction, and Search, 2nd edition. The MIT Press. #'@examples #'\dontrun{ #'Bostonpar<-scoreparameters("bge",Boston) #'itfit<-iterativeMCMC(Bostonpar, chainout=TRUE, scoreout=TRUE) #'plot(itfit) #'} #'@import pcalg #'@importFrom methods new #'@importFrom graphics lines #'@importFrom graphics par #'@importFrom graphics layout #'@importFrom graphics legend #'@importFrom stats cor #'@importFrom stats cov #'@importFrom stats cov.wt #'@importFrom stats pchisq #'@importFrom stats runif #'@importFrom stats rnorm #'@importFrom utils data #'@importFrom utils flush.console #'@importFrom utils tail #'@importFrom Rgraphviz makeNodeAttrs #'@importFrom graph subGraph #'@importFrom graph nodes #'@importFrom graph nodeRenderInfo #'@importFrom graph graph.par #'@importFrom graph plot #'@importFrom graph numNodes #'@importFrom graphics text #'@importFrom Rcpp evalCpp #'@importFrom graphics abline #'@importFrom utils head #'@importFrom Matrix Matrix #'@importFrom methods is #'@importFrom coda mcmc #'@importFrom coda mcmc.list #'@useDynLib BiDAG, .registration=TRUE #'@rdname iterativeMCMC #'@export iterativeMCMC #'@author Polina Suter, Jack Kuipers iterativeMCMC<-function(scorepar, MAP=TRUE,posterior=0.5, softlimit=9, hardlimit=12, alpha=0.05, gamma=1, verbose=TRUE, chainout=FALSE, scoreout=FALSE, cpdag=FALSE, mergetype="skeleton",iterations=NULL,moveprobs=NULL,stepsave=NULL,startorder=NULL, accum=FALSE, compress=TRUE,plus1it=NULL,startspace=NULL,blacklist=NULL,addspace=NULL,scoretable=NULL, alphainit=NULL) { if (is.null(moveprobs)) { prob1<-99 if(scorepar$nsmall>3){ prob1<-round(6*99*scorepar$nsmall/(scorepar$nsmall^2+10*scorepar$nsmall-24)) } prob1<-prob1/100 moveprobs<-c(prob1,0.99-prob1,0.01) moveprobs<-moveprobs/sum(moveprobs) # normalisation moveprobs<-c(moveprobs[c(1,2)],0,moveprobs[3]) } if(is.null(iterations)) { if(scorepar$nsmall<26){ iterations<-25000 } else { iterations<-(3.5*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall))-(3.5*scorepar$nsmall*scorepar$nsmall*log(scorepar$nsmall)) %% 1000 } } if(is.null(stepsave)) { stepsave<-floor(iterations/1000) } ordercheck<-checkstartorder(startorder,varnames=scorepar$labels.short,mainnodes=scorepar$mainnodes, bgnodes=scorepar$static,DBN=scorepar$DBN,split=scorepar$split) if(ordercheck$errorflag) { stop(ordercheck$message) } else { startorder<-ordercheck$order } if(scorepar$DBN) { #flag for DBN structure learning with different initial and transition structures if(!is.null(blacklist)) { blacklist<-DBNbacktransform(blacklist,scorepar) } if(!is.null(startspace)) { startspace<-DBNbacktransform(startspace,scorepar) } if(!is.null(addspace)) { addspace<-DBNbacktransform(addspace,scorepar) } if(scorepar$split) { #we learn initial and transition structures separately if(scorepar$MDAG) { param1<-scorepar$paramsets[[scorepar$nsets]] param2<-scorepar$paramsets[[1]] param2$paramsets<-scorepar$paramsets[1:(scorepar$nsets-1)] param2$MDAG<-TRUE } else { param1<-scorepar$firstslice param2<-scorepar$otherslices } if(scoreout | !is.null(scoretable)) { cat("option scoreout always equals FALSE for DBNs with samestruct=FALSE, scoretable parameter is ignored \n") } cat("learning initial structure...\n") result.init<-iterativeMCMCplus1(param=param1,iterations,stepsave,plus1it=plus1it, MAP=MAP, posterior=posterior,alpha=alpha,cpdag=cpdag, moveprobs=moveprobs,softlimit=softlimit,hardlimit=hardlimit, startspace=startspace$init,blacklist=blacklist$init,gamma=gamma, verbose=verbose, chainout=chainout,scoreout=FALSE,mergecp=mergetype, addspace=addspace$init,scoretable=NULL,startorder=startorder$init,accum=accum,alphainit=alphainit,compress=compress) cat("learning transition structure...\n") result.trans<-iterativeMCMCplus1(param=param2,iterations,stepsave,plus1it=plus1it, MAP=MAP, posterior=posterior,alpha=alpha,cpdag=cpdag, moveprobs=moveprobs,softlimit=softlimit,hardlimit=hardlimit, startspace=startspace$trans,blacklist=blacklist$trans,gamma=gamma, verbose=verbose, chainout=chainout,scoreout=FALSE,mergecp=mergetype, addspace=addspace$trans,scoretable=NULL,startorder=startorder$trans,accum=accum, alphainit=alphainit,compress=compress) result<-mergeDBNres.it(result.init,result.trans,scorepar) } else { result<-iterativeMCMCplus1(param=scorepar,iterations,stepsave,plus1it=plus1it, MAP=MAP, posterior=posterior,alpha=alpha,cpdag=cpdag, moveprobs=moveprobs,softlimit=softlimit,hardlimit=hardlimit, startspace=startspace,blacklist=blacklist,gamma=gamma, verbose=verbose, chainout=chainout,scoreout=scoreout,mergecp=mergetype, addspace=addspace,scoretable=scoretable,startorder=startorder,accum=accum, alphainit=alphainit,compress=compress) } } else { result<-iterativeMCMCplus1(param=scorepar,iterations,stepsave,plus1it=plus1it, MAP=MAP, posterior=posterior,alpha=alpha,cpdag=cpdag, moveprobs=moveprobs,softlimit=softlimit,hardlimit=hardlimit, startspace=startspace,blacklist=blacklist,gamma=gamma, verbose=verbose, chainout=chainout,scoreout=scoreout,mergecp=mergetype, addspace=addspace,scoretable=scoretable,startorder=startorder,accum=accum,compress=compress) } result$info<-list() result$info$DBN<-scorepar$DBN if(scorepar$DBN) { result$info$nsmall<-scorepar$nsmall result$info$bgn<-scorepar$bgn result$info$split<-scorepar$split } result$info$algo<-"iterative order MCMC" if(is.null(startspace)) { result$info$spacealgo<-"PC" } else { result$info$spacealgo<-"user defined matrix" } result$info$iterations<-iterations result$info$plus1it<-length(result$max) result$info$samplesteps<-floor(iterations/stepsave)+1 if(MAP) { result$info$sampletype<-"MAP" } else { result$info$sampletype<-"sample" result$info$threshold<-posterior } result$info$fncall<-match.call() attr(result,"class")<-"iterativeMCMC" return(result) } #'Calculating the BGe/BDe score of a single DAG #' #'This function calculates the score of a DAG defined by its adjacency matrix. #'Acceptable data matrices are homogeneous with all variables of the same type: #'continuous, binary or categorical. The BGe score is evaluated in the case of #'continuous data and the BDe score is evaluated for binary and categorical variables. #' #' @param scorepar an object of class \code{scoreparameters}, containing the data and #' scoring parameters; see constructor function \code{\link{scoreparameters}} #' @param incidence a square matrix of dimensions equal to the number of nodes, representing the adjacency matrix of a DAG; the matrix entries are in \code{\{0,1\}} such that \code{incidence[i,j]} equals 1 if there is a directed edge from node \code{i} to node \code{j} in the DAG and #' \code{incidence[i,j]} equals 0 otherwise #' @return the log of the BGe or BDe score of the DAG #' @references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440. #' @references Heckerman D and Geiger D (1995). Learning Bayesian networks: A unification for discrete and Gaussian domains. In Eleventh Conference on Uncertainty in Artificial Intelligence, pages 274-284. #' @references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian directed acyclic graphical models. The Annals of Statistics 42, 1689-1691. #' @examples #' myScore<-scoreparameters("bde", Asia) #' DAGscore(myScore, Asiamat) #' @import pcalg #' @export #' @author Jack Kuipers, Polina Suter, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426> DAGscore <- function(scorepar, incidence){ if(scorepar$DBN) { stop("To calculate DBN score DBNscore should be used!") } n<-ncol(scorepar$data) if(scorepar$bgn==0) { mainnodes<-c(1:scorepar$n) } else { mainnodes<-c(1:n)[-scorepar$bgnodes] } P_local <- numeric(n) for (j in mainnodes) { #j is a node at which scoring is done parentnodes <- which(incidence[,j]==1) P_local[j]<-DAGcorescore(j,parentnodes,scorepar$n,scorepar) } return(sum(P_local)) } #'Calculating the BGe/BDe score of a single DBN #' #'This function calculates the score of a DBN defined by its compact adjacency matrix. #'Acceptable data matrices are homogeneous with all variables of the same type: continuous, #'binary or categorical. The BGe score is evaluated in the case of continuous data and the BDe score is evaluated for binary and categorical variables. #' #' @param scorepar an object of class \code{scoreparameters}, containing the data and scoring parameters; see constructor function \code{\link{scoreparameters}} #' @param incidence a square matrix, representing initial and transitional structure of a DBN; the size of matrix is 2*nsmall+bgn, where nsmall is the number of variables per time slice excluding static nodes and bgn is the number of static variables #' the matrix entries are in \code{\{0,1\}} such that \code{incidence[i,j]} equals #' 1 if there is a directed edge from node \code{i} to node \code{j} in the DAG and #' \code{incidence[i,j]} equals 0 otherwise #' @return the log of the BGe or BDe score of the DBN #' @examples #' testscore<-scoreparameters("bge", DBNdata, DBN=TRUE, dbnpar=list(slices=5, b=3)) #' DBNscore(testscore, DBNmat) #' #' @export #' @author Polina Suter, Jack Kuipers DBNscore<-function(scorepar,incidence) { if(nrow(incidence)==ncol(incidence) & ncol(incidence)==(2*scorepar$nsmall+scorepar$bgn)) { incidence<-DBNbacktransform(incidence,scorepar) if(!scorepar$split) { P_local <- numeric(scorepar$nsmall) for (j in 1:scorepar$nsmall) { #j is a node at which scoring is done parentnodes <- which(incidence[,j]==1) P_local[j]<-DAGcorescore(j,parentnodes,scorepar$n,scorepar) } return(sum(P_local)) } else { P_local <- numeric(2*scorepar$nsmall) for (j in 1:scorepar$nsmall) { #j is a node at which scoring is done parentnodes <- which(incidence$init[,j]==1) P_local[j]<-DAGcorescore(j,parentnodes,scorepar$n,scorepar$firstslice) } for (j in 1:scorepar$nsmall) { #j is a node at which scoring is done parentnodes <- which(incidence$trans[,j]==1) P_local[j+scorepar$nsmall]<-DAGcorescore(j,parentnodes,scorepar$otherslices$n,scorepar$otherslices) } return(sum(P_local)) } } else { stop("wrong dimensions of the adjacency matrix!") } }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/main.R
DAGbinarytablescore<-function(j,parentnodes,n,param,parenttable,tablemaps,numparents,numberofparentsvec){ lp<-length(parentnodes) # number of parents noparams<-2^lp # number of binary states of the parents corescores<-rep(NA,noparams) chi<-param$chi scoreconstvec<-param$scoreconstvec N1slist<-vector("list", noparams) N0slist<-vector("list", noparams) if(lp==0){ # no parents N1<-sum(param$d1[,j]) N0<-sum(param$d0[,j]) NT<-N0+N1 corescores[noparams] <- scoreconstvec[lp+1] + lgamma(N0+chi/(2*noparams)) + lgamma(N1+chi/(2*noparams)) - lgamma(NT+chi/noparams) } else { if(lp==1){ summys<-param$data[,parentnodes] } else { summys<-colSums(2^(c(0:(lp-1)))*t(param$data[,parentnodes])) } N1s<-collectC(summys,param$d1[,j],noparams) N0s<-collectC(summys,param$d0[,j],noparams) N1slist[[noparams]]<-N1s N0slist[[noparams]]<-N0s NTs<-N1s+N0s corescores[noparams] <- scoreconstvec[lp+1] + sum(lgamma(N0s+chi/(2*noparams))) + sum(lgamma(N1s+chi/(2*noparams))) - sum(lgamma(NTs+chi/noparams)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescores[noparams] <- corescores[noparams] - sum(param$logedgepmat[parentnodes, j]) } for (jj in (noparams-1):1){ # use poset to combine sets lplocal<-numberofparentsvec[jj] # size of parent set noparamslocal<-2^lplocal # number of parameters of this size, for scoring later missingparentindex<-parenttable[tablemaps$backwards[noparams-tablemaps$forward[jj]+1],1] # get first element of complement of parent set higherlayer<-tablemaps$backwards[tablemaps$forward[jj]+2^(missingparentindex-1)] # get row of poset element with this missing parent included missingparent<-which(parenttable[higherlayer,]==missingparentindex) # which component it is in the higher layer N1stemp<-N1slist[[higherlayer]] # map to the previous lists with missing parent added N0stemp<-N0slist[[higherlayer]] # we take the N1s and N0s calculated previously for the case with this element included # since we know which power of 2 it takes in the mapping to summys, we can marginalise it out size1<-2^(missingparent-1) size2<-2^(lplocal-missingparent+1) elementstocombine<-as.vector(t(t(matrix(c(1:size1),nrow=size1,ncol=size2))+2*(c(1:size2)-1)*size1)) # collect the elements we want to combine to remove the missing parent from the previous tables N1s<-N1stemp[elementstocombine]+N1stemp[elementstocombine+size1] N0s<-N0stemp[elementstocombine]+N0stemp[elementstocombine+size1] N1slist[[jj]]<-N1s N0slist[[jj]]<-N0s NTs<-N1s+N0s corescores[jj] <- scoreconstvec[lplocal+1] + sum(lgamma(N0s+chi/(2*noparamslocal))) + sum(lgamma(N1s+chi/(2*noparamslocal))) - sum(lgamma(NTs+chi/noparamslocal)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation if(lplocal>0) { localparents <- parentnodes[parenttable[jj, 1:lplocal]] if(length(localparents)>0) { corescores[jj] <- corescores[jj] - sum(param$logedgepmat[localparents, j]) } } } } } return(corescores) } DAGbinarytablescoreplus1<-function(j,parentnodes,additionalparent,n,param,parenttable,tablemaps,numparents,numberofparentsvec){ lp<-length(parentnodes) # number of parents noparams<-2^lp # number of binary states of the parents allparents<-c(parentnodes,additionalparent) # combine the sets, but put the additional one last! lpadd<-lp+1 # including the additional parent noparamsadd<-2*noparams chi<-param$chi scoreconstvec<-param$scoreconstvec corescores<-rep(NA,noparams) N1slist<-vector("list", noparams) N0slist<-vector("list", noparams) if(lpadd==1){ summys<-param$data[,allparents] } else { summys<-colSums(2^(c(0:(lpadd-1)))*t(param$data[,allparents])) } N1s<-collectC(summys,param$d1[,j],noparamsadd) N0s<-collectC(summys,param$d0[,j],noparamsadd) N1slist[[noparams]]<-N1s N0slist[[noparams]]<-N0s NTs<-N1s+N0s corescores[noparams] <- scoreconstvec[lpadd+1] + sum(lgamma(N0s+chi/(2*noparamsadd))) + sum(lgamma(N1s+chi/(2*noparamsadd))) - sum(lgamma(NTs+chi/noparamsadd)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescores[noparams] <- corescores[noparams] - sum(param$logedgepmat[allparents, j]) } if(lpadd>1){ # otherwise there are no further terms to compute! for (jj in (noparams-1):1){ # use poset to combine sets lplocal<-numberofparentsvec[jj]+1 # size of parent set noparamslocal<-2^lplocal # number of parameters of this size, for scoring later missingparentindex<-parenttable[tablemaps$backwards[noparams-tablemaps$forward[jj]+1],1] # get first element of complement of parent set higherlayer<-tablemaps$backwards[tablemaps$forward[jj]+2^(missingparentindex-1)] # get row of poset element with this missing parent included missingparent<-which(parenttable[higherlayer,]==missingparentindex) # which component it is in the higher layer N1stemp<-N1slist[[higherlayer]] # map to the previous lists with missing parent added N0stemp<-N0slist[[higherlayer]] # we take the N1s and N0s calculated previously for the case with this element included # since we know which power of 2 it takes in the mapping to summys, we can marginalise it out size1<-2^(missingparent-1) size2<-2^(lplocal-missingparent+1) elementstocombine<-as.vector(t(t(matrix(c(1:size1),nrow=size1,ncol=size2))+2*(c(1:size2)-1)*size1)) # collect the elements we want to combine to remove the missing parent from the previous tables N1s<-N1stemp[elementstocombine]+N1stemp[elementstocombine+size1] N0s<-N0stemp[elementstocombine]+N0stemp[elementstocombine+size1] N1slist[[jj]]<-N1s N0slist[[jj]]<-N0s NTs<-N1s+N0s #lplocal+1 because we have 1 additional parent and indexing in scoreconstvec started with 0 corescores[jj] <- scoreconstvec[lplocal+1] + sum(lgamma(N0s+chi/(2*noparamslocal))) + sum(lgamma(N1s+chi/(2*noparamslocal))) - sum(lgamma(NTs+chi/noparamslocal)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation if (lplocal>1) { localparents <- c(parentnodes[parenttable[jj, 1:(lplocal-1)]], additionalparent) } else { localparents<-additionalparent } corescores[jj] <- corescores[jj] - sum(param$logedgepmat[localparents, j]) } } } return(corescores) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/newbinaryscoring.R
DAGcattablescore<-function(j,parentnodes,n,param,parenttable,tablemaps,numparents,numberofparentsvec){ lp <- length(parentnodes) # number of parents noparams <- 2^lp # number of binary states of the parents corescores <- rep(NA,noparams) chi <- param$chi scoreconstvec <- param$scoreconstvec Cj <- param$Cvec[j] # number of levels of j Nslist <- vector("list", noparams) if(lp==0){ # no parents Cp <- 1 # effectively 1 parent level summys <- rep(0, nrow(param$data)) if(!is.null(param$weightvector)){ Ns <- collectCcatwt(summys, param$data[,j], param$weightvector, Cp, Cj) } else{ Ns <- collectCcat(summys, param$data[,j], Cp, Cj) } NTs <- rowSums(Ns) corescores[noparams] <- scoreconstvec[lp+1] + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) } else { if(lp==1){ Cp <- param$Cvec[parentnodes] # number of parent levels summys <- param$data[,parentnodes] } else { Cp <- prod(param$Cvec[parentnodes]) # use mixed radix mapping to unique parent states summys <- colSums(cumprod(c(1,param$Cvec[parentnodes[-lp]]))*t(param$data[,parentnodes])) } if(!is.null(param$weightvector)){ Ns <- collectCcatwt(summys, param$data[,j], param$weightvector, Cp, Cj) } else{ Ns <- collectCcat(summys, param$data[,j], Cp, Cj) } Nslist[[noparams]] <- Ns NTs <- rowSums(Ns) corescores[noparams] <- scoreconstvec[lp+1] + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescores[noparams] <- corescores[noparams] - sum(param$logedgepmat[parentnodes, j]) } for (jj in (noparams-1):1){ # use poset to combine sets lplocal <- numberofparentsvec[jj] # size of parent set if(lplocal>0) { localparents <- parentnodes[parenttable[jj, 1:lplocal]] Cp <- prod(param$Cvec[localparents]) } else { Cp <- 1 } missingparentindex <- parenttable[tablemaps$backwards[noparams-tablemaps$forward[jj]+1],1] # get first element of complement of parent set higherlayer <- tablemaps$backwards[tablemaps$forward[jj]+2^(missingparentindex-1)] # get row of poset element with this missing parent included missingparent <- which(parenttable[higherlayer,]==missingparentindex) # which component it is in the higher layer Nstemp <- Nslist[[higherlayer]] # map to the previous lists with missing parent added # we take the Ns calculated previously for the case with this element included # since we know which power it takes in the mapping to summys, we can marginalise it out if(missingparent>1){ size1 <- prod(param$Cvec[parentnodes[parenttable[higherlayer,1:(missingparent-1)]]]) } else { size1 <- 1 } if(missingparent<(lplocal+1)){ size2 <- prod(param$Cvec[parentnodes[parenttable[higherlayer,missingparent:lplocal+1]]]) } else { size2 <- 1 } Cmissing <- param$Cvec[parentnodes[parenttable[higherlayer,missingparent]]] elementstocombine <- as.vector(t(t(matrix(c(1:size1),nrow=size1,ncol=size2))+Cmissing*(c(1:size2)-1)*size1)) # collect the elements we want to combine to remove the missing parent from the previous tables Ns <- Nstemp[elementstocombine,]+Nstemp[elementstocombine+size1,] if(Cmissing>2){ for(ii in 3:Cmissing-1) Ns <- Ns + Nstemp[elementstocombine+ii*size1,] } Nslist[[jj]] <- Ns if(lplocal==0){ # the combining turns the matrix into a vector at the end NTs <- sum(Ns) } else { NTs <- rowSums(Ns) } corescores[jj] <- scoreconstvec[lplocal+1] + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation if(lplocal>0) { corescores[jj] <- corescores[jj] - sum(param$logedgepmat[localparents, j]) } } } } return(corescores) } DAGcattablescoreplus1<-function(j,parentnodes,additionalparent,n,param,parenttable,tablemaps,numparents,numberofparentsvec){ lp <- length(parentnodes) # number of parents noparams <- 2^lp # number of binary states of the parents corescores <- rep(NA,noparams) allparents <- c(parentnodes,additionalparent) # combine the sets, but put the additional one last! lpadd <- lp+1 # including the additional parent chi <- param$chi scoreconstvec <- param$scoreconstvec Cj <- param$Cvec[j] # number of levels of j Nslist <- vector("list", noparams) if(lpadd==1){ Cp <- param$Cvec[allparents] # number of parent levels summys <- param$data[,allparents] } else { Cp <- prod(param$Cvec[allparents]) summys <- colSums(cumprod(c(1,param$Cvec[allparents[-lpadd]]))*t(param$data[,allparents])) } if(!is.null(param$weightvector)){ Ns <- collectCcatwt(summys, param$data[,j], param$weightvector, Cp, Cj) } else{ Ns <- collectCcat(summys, param$data[,j], Cp, Cj) } Nslist[[noparams]] <- Ns NTs <- rowSums(Ns) corescores[noparams] <- scoreconstvec[lpadd+1] + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescores[noparams] <- corescores[noparams] - sum(param$logedgepmat[allparents, j]) } if(lpadd>1){ # otherwise there are no further terms to compute! for (jj in (noparams-1):1){ # use poset to combine sets lplocal<-numberofparentsvec[jj]+1 # size of parent set if (lplocal>1) { localparents <- c(parentnodes[parenttable[jj, 1:(lplocal-1)]], additionalparent) Cp <- prod(param$Cvec[localparents]) } else { localparents <- additionalparent Cp <- param$Cvec[localparents] } missingparentindex<-parenttable[tablemaps$backwards[noparams-tablemaps$forward[jj]+1],1] # get first element of complement of parent set higherlayer<-tablemaps$backwards[tablemaps$forward[jj]+2^(missingparentindex-1)] # get row of poset element with this missing parent included missingparent<-which(parenttable[higherlayer,]==missingparentindex) # which component it is in the higher layer Nstemp <- Nslist[[higherlayer]] # map to the previous lists with missing parent added # we take the Ns calculated previously for the case with this element included # since we know which power it takes in the mapping to summys, we can marginalise it out if(missingparent>1){ size1 <- prod(param$Cvec[parentnodes[parenttable[higherlayer,1:(missingparent-1)]]]) } else { size1 <- 1 } if(missingparent<lplocal){ size2 <- prod(param$Cvec[c(parentnodes[parenttable[higherlayer,(missingparent+1):lplocal]],additionalparent)]) } else { size2 <- param$Cvec[additionalparent] } Cmissing <- param$Cvec[parentnodes[parenttable[higherlayer,missingparent]]] elementstocombine <- as.vector(t(t(matrix(c(1:size1),nrow=size1,ncol=size2))+Cmissing*(c(1:size2)-1)*size1)) # collect the elements we want to combine to remove the missing parent from the previous tables Ns <- Nstemp[elementstocombine,]+Nstemp[elementstocombine+size1,] if(Cmissing>2){ for(ii in 3:Cmissing-1) Ns <- Ns + Nstemp[elementstocombine+ii*size1,] } Nslist[[jj]] <- Ns NTs <- rowSums(Ns) #lplocal+1 because we have 1 additional parent and indexing in scoreconstvec started with 0 corescores[jj] <- scoreconstvec[lplocal+1] + sum(lgamma(Ns+chi/(Cp*Cj))) - sum(lgamma(NTs + chi/Cp)) + Cp*lgamma(chi/Cp) - (Cp*Cj)*lgamma(chi/(Cp*Cj)) if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation corescores[jj] <- corescores[jj] - sum(param$logedgepmat[localparents, j]) } } } return(corescores) }
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/newcatscoring.R