content
stringlengths
0
14.9M
filename
stringlengths
44
136
"threeDarr" <- function (..., rep=1, union=TRUE, slicenames=NULL) { fun.copyright <- "Placed in the public domain 2011-2014 by Burns Statistics Ltd." fun.version <- "threeDarr 004" dots <- list(...) ndot <- length(dots) if(ndot == 1 && rep <= 1) { stop(paste("expecting at least 2 inputs when 'rep'", "is not greater than 1")) } else if(ndot == 0) { stop("no inputs given") } ddims <- lapply(dots, dim) if(any(unlist(lapply(ddims, length)) != 2)) { stop(paste(sum(unlist(lapply(ddims, length)) != 2), "input(s) do not have length 2 dim")) } ddimnam <- lapply(dots, dimnames) if(ndot == 1) { rnam <- ddimnam[[1]][[1]] cnam <- ddimnam[[1]][[2]] rnc <- nchar(rnam) cnc <- nchar(cnam) if(any(rnc == 0)) { rnsub <- paste("R", 1:length(rnam), sep="") rnam[rnc == 0] <- rnsub[rnc == 0] dimnames(dots[[1]])[[1]] <- rnam } if(any(cnc == 0)) { cnsub <- paste("C", 1:length(cnam), sep="") cnam[cnc == 0] <- cnsub[cnc == 0] dimnames(dots[[1]])[[2]] <- cnam } } else { rtest <- unlist(lapply(ddimnam, function(x) length(x[[1]]))) ctest <- unlist(lapply(ddimnam, function(x) length(x[[2]]))) if((any(rtest == 0) && any(rtest > 0)) || (any(ctest == 0) && any(ctest > 0))) { stop(paste(sum(rtest == 0), "inputs without rownames", "and", sum(ctest == 0), "without colnames", "while at least one does have these")) } if(union) { rnam <- unique(unlist(lapply(ddimnam, function(x) x[[1]]))) cnam <- unique(unlist(lapply(ddimnam, function(x) x[[2]]))) } else { rnam <- ddimnam[[1]][[1]] cnam <- ddimnam[[1]][[2]] if(ndot > 1) { for(i in 2:ndot) { rnam <- intersect(rnam, ddimnam[[i]][[1]]) cnam <- intersect(cnam, ddimnam[[i]][[2]]) } } } } if(any(nchar(rnam) == 0) || any(duplicated(rnam))) { stop("when row names exist, all rows must have unique names") } if(any(nchar(cnam) == 0) || any(duplicated(cnam))) { stop(paste("when column names exist, all columns must have", "unique names")) } if(!length(rnam)) { nr <- unlist(lapply(ddims, function(x) x[1])) if(diff(range(nr))) { stop(paste("no (suitable) row names and variable", "number of rows in inputs")) } nr <- nr[1] } else { nr <- length(rnam) } if(!length(cnam)) { nc <- unlist(lapply(ddims, function(x) x[2])) if(diff(range(nc))) { stop(paste("no (suitable) column names and variable", "number of columns in inputs")) } nc <- nc[1] } else { nc <- length(cnam) } if(length(slicenames)) { if(length(slicenames) != ndot * rep) { stop(paste("length of 'slicenames' is", length(slicenames), "-- should be", ndot * rep)) } snam <- slicenames } else { snam <- names(dots) if(length(snam) && rep > 1) { snam <- paste(snam, rep(1:rep, each=ndot), sep=".") } } ans <- array(NA, c(nr, nc, ndot * rep), list(rnam, cnam, snam)) ncode <- paste(if(length(rnam)) "R" else "N", if(length(cnam)) "C" else "N", sep="") switch(ncode, RC={ for(i in 1:ndot) { thismat <- as.matrix(dots[[i]]) thisr <- intersect(dimnames(thismat)[[1]], rnam) thisc <- intersect(dimnames(thismat)[[2]], cnam) ans[thisr, thisc, i] <- thismat[thisr, thisc] } }, RN={ for(i in 1:ndot) { thismat <- as.matrix(dots[[i]]) thisr <- intersect(dimnames(thismat)[[1]], rnam) ans[thisr,, i] <- thismat[thisr,] } }, NC={ for(i in 1:ndot) { thismat <- as.matrix(dots[[i]]) thisc <- intersect(dimnames(thismat)[[2]], cnam) ans[,thisc, i] <- thismat[,thisc] } }, NN={ for(i in 1:ndot) { ans[,,i] <- as.matrix(dots[[i]]) } } ) if(rep > 1) { orig <- ans[,,1:ndot] for(i in 2:rep) { ans[,, 1:ndot + (i-1)*ndot] <- orig } } ans }
/scratch/gouwar.j/cran-all/cranData/BurStFin/R/threeDarr.R
"var.add.benchmark" <- function (variance, benchmark.weights, name="benchmark", sum.to.one=TRUE) { fun.copyright <- "Placed in the public domain 2009-2014 by Burns Statistics Ltd." fun.version <- "var.add.benchmark 005" subfun.varadd <- function(varmat, lwt, p, pseq) { bcov <- drop(varmat %*% lwt) sf.ans <- array(NA, dim(varmat) + 1) sf.ans[pseq, pseq] <- varmat sf.ans[pseq, p+1] <- bcov sf.ans[p+1, pseq] <- bcov sf.ans[p+1, p+1] <- sum(lwt * bcov) sf.ans } # # start of main function # vnam <- dimnames(variance)[[1]] bnam <- names(benchmark.weights) if(!length(vnam)) { if(!length(bnam)) { stop(paste("need asset names for both", "'variance' and 'benchmark.weights'")) } stop("no asset names for 'variance'") } else if(!length(bnam)) { stop("no asset names for 'benchmark.weights'") } if(any(nchar(c(vnam, bnam)) == 0)) stop("no asset name may be missing") if(any(duplicated(bnam))) { stop("duplicate names in 'benchmark.weights'") } if(length(unique(intersect(bnam, vnam))) < length(bnam)) { nmiss <- length(bnam) - length(unique(intersect(bnam, vnam))) stop(paste(nmiss, "asset(s) in 'benchmark.weights' are", "not in 'variance'")) } if(any(is.na(benchmark.weights))) { stop(paste(sum(is.na(benchmark.weights)), "missing value(s) in 'benchmark.weights'")) } if(sum.to.one && abs(sum(abs(benchmark.weights)) - 1) > 1e-10) { wsum <- sum(abs(benchmark.weights)) warning(paste("absolute of 'benchmark.weights' sums to", wsum, " adjusting so it sums to 1", "-- use 'sum.to.one=FALSE' to avoid adjustment")) benchmark.weights <- benchmark.weights / wsum } dv <- dim(variance) ldv <- length(dv) if(ldv != 2 && ldv != 3) { stop("'variance' must be a matrix or 3D array") } p <- dv[1] if(dv[2] != p || any(dimnames(variance)[[2]] != vnam)) { stop(paste("second dimension of 'variance' does not match", "the first")) } if(any(is.na(variance))) { stop(paste(sum(is.na(variance)), "missing value(s) in variance")) } pseq <- 1:p lwt <- rep(0, p) names(lwt) <- vnam lwt[bnam] <- benchmark.weights vnamp <- c(vnam, name) if(ldv == 2) { ans <- subfun.varadd(variance, lwt, p, pseq) dimnames(ans) <- list(vnamp, vnamp) } else { ans <- array(NA, dv + c(1,1,0)) for(i in 1:dv[3]) { ans[, , i] <- subfun.varadd(variance[,, i], lwt, p, pseq) } dimnames(ans) <- list(vnamp, vnamp, dimnames(variance)[[3]]) } ans }
/scratch/gouwar.j/cran-all/cranData/BurStFin/R/var.add.benchmark.R
"var.relative.benchmark" <- function (variance, benchmark) { fun.copyright <- "Placed in the public domain 2009-2012 Burns Statistics Ltd." fun.version <- "var.relative.benchmark 004" subfun.varrel <- function(varmat, b.ind) { sf.ans <- varmat[-b.ind, -b.ind, drop=FALSE] + varmat[b.ind, b.ind] bencor <- varmat[-b.ind, b.ind] sf.ans <- t(sf.ans - bencor) - bencor sf.ans } # # start of main function # dv <- dim(variance) ldv <- length(dv) if(ldv != 2 && ldv != 3) { stop("'variance' must be a matrix or 3D array") } if(dv[1] != dv[2]) stop("bad dimensions for 'variance'") vnam <- dimnames(variance)[[1]] if(!length(vnam)) { stop("no asset names for 'variance'") } if(any(dimnames(variance)[[2]] != vnam)) { stop("mismatch of 'variance' names in 1st and 2nd dimensions") } if(length(benchmark) != 1 || !is.character(benchmark) || nchar(benchmark) == 0) { stop(paste("'benchmark' must be a single non-empty", "character string -- given has mode", mode(benchmark), "and length", length(benchmark))) } b.ind <- match(benchmark, vnam, nomatch=NA) if(is.na(b.ind)) { stop(paste("benchmark (", benchmark, ") not an asset in 'variance'", sep="")) } if(ldv == 2) { ans <- subfun.varrel(variance, b.ind) } else { ans <- array(NA, dv - c(1, 1, 0), list(vnam[-b.ind], vnam[-b.ind], dimnames(variance)[[3]])) for(i in 1:dv[3]) { ans[, , i] <- subfun.varrel(variance[, , i, drop=TRUE], b.ind) } } attr(ans, "call") <- match.call() ans }
/scratch/gouwar.j/cran-all/cranData/BurStFin/R/var.relative.benchmark.R
"var.shrink.eqcor" <- function (x, weights=seq(0.5, 1.5, length=nt), shrink=NULL, center=TRUE, vol.shrink=0, sd.min=20, quan.sd=0.9, tol=1e-4, compatible=FALSE, verbose=2) { fun.copyright <- "Placed in the public domain 2009-2014 by Burns Statistics Ltd." fun.version <- "var.shrink.eqcor 004" x <- as.matrix(x) nassets <- ncol(x) if(nassets < 2) stop("'x' must have at least 2 columns") nt <- nrow(x) if(nt < 2) stop("'x' must have at least 2 rows") # for use in finance, try to check if prices rather than returns if(verbose >= 1 && min(x, na.rm=TRUE) >= 0) { warning(paste("minimum value in 'x' is", min(x, na.rm=TRUE), "are you giving a price", "matrix rather than a return matrix?", "(warning suppressed with verbose < 1)")) } if(is.null(weights)) weights <- rep(1, nt) if(any(weights < 0)) { stop(paste(sum(weights < 0), "negative value(s) in 'weights'")) } if(length(weights) != nt) { if(length(weights) == 1 && weights > 0) { weights <- rep(1, nt) } else { stop(paste("bad length for 'weights' argument", "-- length must be the number of rows of 'x'", "or a single positive number (meaning", "equal weighting)")) } } if(any(weights == 0)) { x <- x[weights > 0, , drop=FALSE] nt <- nrow(x) weights <- weights[weights > 0] } if(!is.numeric(sd.min) || length(sd.min) != 1) { stop(paste("'sd.min' should be a single number -- given", "has mode", mode(sd.min), "and length", length(sd.min))) } else if(sd.min < 2) { stop("'sd.min' must be at least 2") } if(!is.numeric(quan.sd) || length(quan.sd) != 1) { stop(paste("'quan.sd' should be a single number -- given", "has mode", mode(quan.sd), "and length", length(quan.sd))) } nobs <- nt - colSums(is.na(x)) good <- nobs >= min(sd.min, nt) if(sum(good) < 2) { stop(paste("not enough columns with enough data", "-- fewer than 2 columns with at least", sd.min, "non-missing observations")) } weights <- weights / mean(weights) if(!is.numeric(tol) || length(tol) != 1) { stop(paste("bad value for 'tol' which should be a", "single numeric value -- given has mode", mode(tol), "and length", length(tol))) } if(is.logical(center)) { if(center) { center <- colMeans(x * weights, na.rm=TRUE) } else { center <- rep(0, nassets) } } else if(length(center) != nassets || !is.numeric(center)) { stop(paste("bad value for 'center' which should be", "either a single logical value or a numeric vector", "of length", nassets, "-- given has mode", mode(center), "and length", length(center))) } x <- sweep(x, 2, center, "-") svar <- var(x * sqrt(weights), use='pairwise') if(compatible) { # to match Ledoit-Wolf code, do rescaling as in next line svar <- svar * (nt - 1) / nt if(diff(range(weights)) > 1e-7) { warning("in compatible mode but time weighting") } } svar.sup <- svar[good, good, drop=FALSE] sdiag <- sqrt(diag(svar)) sdiag[!good] <- quantile(sdiag[good], quan.sd) if(vol.shrink > 0) { if(vol.shrink > 1) vol.shrink <- 1 meanvol <- mean(sdiag) sdiag <- vol.shrink * meanvol + (1 - vol.shrink) * sdiag } meancor <- mean(cov2cor(svar.sup)[which(lower.tri(svar.sup, diag=FALSE))]) prior <- svar prior[] <- meancor diag(prior) <- 1 prior <- sdiag * prior * rep(sdiag, each=nassets) svar.sup <- which(is.na(svar)) svar[svar.sup] <- prior[svar.sup] if(length(shrink) != 1) { if(length(shrink)) { if(!is.numeric(shrink)) { stop(paste("'shrink' should be either NULL", "or a single numeric value -- given", "has mode", mode(shrink), "and length", length(shrink))) } warning(paste("'shrink' ignored, it has length", length(shrink), "-- should be either NULL", "or a single numeric value")) } gamma <- sum((svar - prior)^2) x[which(is.na(x))] <- 0 pi.mat <- theta1.mat <- theta2.mat <- array(0, c(nassets, nassets)) for(i in 1:nt) { this.wt <- weights[i] this.cross <- crossprod(x[i, , drop=FALSE]) - svar pi.mat <- pi.mat + this.wt * this.cross^2 theta1.mat <- theta1.mat + this.wt * diag(this.cross) * this.cross theta2.mat <- theta2.mat + rep(diag(this.cross), each=nassets) * this.cross * this.wt } pi.hat <- sum(pi.mat)/nt theta1.mat <- theta1.mat / nt theta2.mat <- theta2.mat / nt diag(theta1.mat) <- 0 diag(theta2.mat) <- 0 theta1.mat <- rep(sdiag, each=nassets) * theta1.mat / sdiag theta2.mat <- rep(1/sdiag, each=nassets) * theta2.mat * sdiag rho <- sum(diag(pi.mat)) / nt + meancor * 0.5 * (sum(theta1.mat) + sum(theta2.mat)) shrink <- (pi.hat - rho) / gamma / nt # allow memory clean up pi.mat <- theta1.mat <- theta2.mat <- this.cross <- NULL } else { if(!is.numeric(shrink)) { stop(paste("'shrink' should be either NULL or", "a single numeric value -- given has mode", mode(shrink), "and length", length(shrink))) } if(is.na(shrink)) { stop("missing value for 'shrink' argument") } } shrink <- min(1, max(0, shrink)) # overwrite svar to save space svar <- shrink * prior + (1 - shrink) * svar if(tol > 0 || any(is.na(x))) { svar.sup <- eigen(svar, symmetric=TRUE) tol <- tol * max(svar.sup$values) if(min(svar.sup$values) < tol) { vals <- svar.sup$values vals[which(vals < tol)] <- tol svar[] <- svar.sup$vectors %*% (vals * t(svar.sup$vectors)) } } attr(svar, "shrink") <- shrink attr(svar, "timestamp") <- date() svar }
/scratch/gouwar.j/cran-all/cranData/BurStFin/R/var.shrink.eqcor.R
"Cfrag.list" <- function (x, file=NULL, item.num=c(3,10,5), indent=c("\t", "\t\t"), declaration.only=FALSE, long=FALSE, append=FALSE) { fun.copyright <- "Placed in the public domain 2009 by Burns Statistics Ltd." fun.version <- "Cfrag.list 003" subfun.bp <- function(z, inum, indent2) { zlen <- length(z) start <- seq(1, zlen, by=inum) end <- c(start[-1] - 1, zlen) cm1 <- paste("paste(z[", start, ":", end, "], collapse=', ')", sep="") cm2 <- paste("paste(c(", paste(cm1, collapse=", "), "))") subans <- eval(parse(text=cm2)) paste(indent2, subans, rep(c(",", ""), c(length(start)-1, 1)), sep="") } # start of main function decl <- unlist(lapply(x, storage.mode)) prefix <- rep("", length(decl)) pdm <- match(decl, c("double", "integer", "character"), nomatch=NA) if(any(is.na(pdm))) stop("at least one storage mode that can not be handled") decl[pdm == 2] <- if(long) "long" else "int" decl[pdm == 3] <- "char" prefix[pdm == 3] <- "*" dec.out <- paste(decl, " ", prefix, names(x), "[]", sep="") if(declaration.only) { dec.out <- paste(indent[1], dec.out, ";", sep="") if(length(file) && nchar(file)) { cat(dec.out, sep="\n", file=file, append=append) return(file[1]) } else { return(dec.out) } } item.num <- rep(item.num, length=3) indent <- rep(indent, length=2) ans <- NULL for(i in 1:length(x)) { switch(decl[i], double= { t.inum <- item.num[1] }, long=, int={ t.inum <- item.num[2] }, char={ t.inum <- item.num[3] x[[i]] <- paste('"', x[[i]], '"', sep="") }) t.sa <- subfun.bp(x[[i]], t.inum, indent[2]) ans <- c(ans, paste(indent[1], dec.out[i], " = {", sep=""), t.sa, paste(indent[1], "};", sep="")) } if(length(file) && nchar(file)) { cat(ans, sep="\n", file=file, append=append) return(file[1]) } else { return(ans) } }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/Cfrag.list.R
"corner" <- function (x, corner = "tlffff", n = 6) { if (length(corner) != 1) stop("corner must be a single string") dx <- dim(x) ldx <- length(dx) if (ldx < 2) { if (substring(corner, 1, 1) == "t") return(head(x, n = n[1])) else return(tail(x, n = n[1])) } clen <- nchar(corner) if (clen < ldx) { if (clen == 0) { corner <- paste("tl", paste(rep("f", ldx - 2), collapse = ""), sep = "") } else if (clen == 1) corner <- paste(corner, "l", paste(rep("f", ldx - 1), collapse = ""), sep = "") else corner <- paste(corner, paste(rep("f", ldx - clen), collapse = ""), sep = "") } corner <- substring(corner, 1:ldx, 1:ldx) n <- rep(n, length = ldx) if (corner[1] == "t") rsub <- seq(length = min(n[1], dx[1])) else rsub <- seq(to = dx[1], length = min(n[1], dx[1])) if (corner[2] == "l") csub <- seq(length = min(n[2], dx[2])) else csub <- seq(to = dx[2], length = min(n[2], dx[2])) if (ldx == 2) { return(x[rsub, csub, drop = FALSE]) } subv <- vector("list", ldx + 1) subv[[1]] <- rsub subv[[2]] <- csub for (i in 3:ldx) { if (corner[i] == "f") subv[[i]] <- seq(length = min(n[i], dx[i])) else subv[[i]] <- seq(to = dx[i], length = min(n[i], dx[i])) } names(subv) <- c(rep("", ldx), "drop") subv[[ldx + 1]] <- FALSE do.call("[", c(list(x), subv)) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/corner.R
.jpTogether <- function() { Sys.Date() - as.Date('1980-09-28') }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/dot_jpTogether.R
"genopt" <- function (fun, population, lower = -Inf, upper = Inf, scale = dcontrol["eps"], add.args = NULL, control = genopt.control(...), ...) { if(!exists(".Random.seed")) runif(1) random.seed <- .Random.seed if (is.character(fun)) fun <- get(fun, mode = "function") fun.args <- c(list(NULL), add.args) go.rectify <- function(pars, lower, upper) { pars[pars < lower] <- lower[pars < lower] pars[pars > upper] <- upper[pars > upper] pars } if (is.list(population)) { objective <- population$objective funevals <- population$funevals population <- population$population popsize <- ncol(population) if (is.null(popsize) || length(objective) != popsize) stop("bad input population") if (!is.numeric(funevals) || is.na(funevals)) { funevals <- 0 warning("funevals starting at 0") } } else { if (!is.matrix(population)) stop("bad input population") popsize <- ncol(population) objective <- numeric(popsize) npar <- nrow(population) lower <- rep(lower, length = npar) upper <- rep(upper, length = npar) if (any(upper < lower)) stop("upper element smaller than lower") for (i in 1:popsize) { population[, i] <- fun.args[[1]] <- go.rectify(population[, i], lower, upper) objective[i] <- do.call("fun", fun.args) } funevals <- popsize } icontrol <- control$icontrol dcontrol <- control$dcontrol trace <- icontrol["trace"] minobj <- min(objective) npar <- nrow(population) if (trace) { cat("objectives go from", format(minobj), "to", format(max(objective)), "\n") } if (icontrol["random.n"]) { par.range <- apply(population, 1, range) par.range[2, par.range[2, ] == par.range[1, ]] <- par.range[2, par.range[2, ] == par.range[1, ]] + dcontrol["scale.min"] maxobj <- max(objective) for (i in 1:icontrol["random.n"]) { fun.args[[1]] <- runif(npar, par.range[1, ], par.range[2, ]) this.obj <- do.call("fun", fun.args) if (this.obj < maxobj) { maxind <- order(objective)[popsize] population[, maxind] <- fun.args[[1]] objective[maxind] <- this.obj maxobj <- max(objective) } } if (trace) { cat("objectives go from", format(minobj), "to", format(maxobj), "\n") } } njit <- icontrol["jitters.n"] lower <- rep(lower, length = npar) upper <- rep(upper, length = npar) if (any(upper < lower)) stop("upper element smaller than lower") scale[scale < dcontrol["scale.min"]] <- dcontrol["scale.min"] scale <- rep(scale, length = npar) prob <- dcontrol["prob"] prob <- c(prob, 1 - prob) maxeval <- icontrol["maxeval"] for (i in 1:icontrol["births"]) { if (funevals >= maxeval) break parents <- sample(popsize, 2) child <- population[, parents[1]] cloc <- sample(c(TRUE, FALSE), npar, replace = TRUE, prob = prob) if (all(cloc)) cloc[sample(npar, 1)] <- FALSE else if (all(!cloc)) cloc[sample(npar, 1)] <- TRUE child[cloc] <- population[cloc, parents[2]] fun.args[[1]] <- child child.obj <- do.call("fun", fun.args) funevals <- funevals + 1 parent.obj <- objective[parents] survive <- child.obj < max(parent.obj) if (trace) { cat(i, "parents:", parent.obj, "child:", format(child.obj), if (survive) "(improve)", "\n") } if (survive || (child.obj == parent.obj[1] && child.obj == parent.obj[2])) { if (parent.obj[1] > parent.obj[2]) out <- parents[1] else out <- parents[2] population[, out] <- child objective[out] <- child.obj if (trace && child.obj < minobj) { minobj <- child.obj cat("new minimum\n") } for (i in seq(length = njit)) { fun.args[[1]] <- jchild <- go.rectify(rnorm(npar, child, scale), lower, upper) jchild.obj <- do.call("fun", fun.args) funevals <- funevals + 1 if (jchild.obj < child.obj) { child <- population[, out] <- jchild child.obj <- objective[out] <- jchild.obj if (trace) { cat("jitter successsful:", format(jchild.obj), "\n") if (jchild.obj < minobj) { cat("new minimum\n") minobj <- jchild.obj } } } } } } ord <- order(objective) answer <- list(population = population[, ord], objective=objective[ord], funevals=funevals, random.seed=random.seed, call=match.call()) class(answer) <- "genopt" answer }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/genopt.R
"genopt.control" <- function (births = 100, random.n = 0, jitters.n = 3, trace = TRUE, eps = 0.1, prob = 0.4, scale.min = 1e-12, maxeval = Inf) { dcon <- c(eps = eps, prob = prob, scale.min = scale.min) icon <- c(births = births, random.n = random.n, jitters.n = jitters.n, trace = trace, maxeval = maxeval) list(icontrol = icon, dcontrol = dcon) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/genopt.control.R
ntile <- function (x, ngroups, na.rm=FALSE, result="list", reverse=FALSE, checkBleed=TRUE) { # placed in the public domain 2012-2016 by Burns Statistics stopifnot(is.numeric(ngroups), length(ngroups) == 1, ngroups > 0) result.menu <- c("list", "numeric", "factor") result.num <- pmatch(result, result.menu, nomatch=0) if(result.num == 0L) { stop("'result' must be (an abbreviation of) one of: ", paste(result.menu, collapse=", ")) } result <- result.menu[result.num] if(na.rm) { x <- x[!is.na(x)] } else if(nas <- sum(is.na(x))) { stop(nas, " missing values present") } nx <- length(x) if(nx < ngroups) { stop("more groups (", ngroups, ") than observations (", nx, ")") } basenum <- nx %/% ngroups extra <- nx %% ngroups repnum <- rep(basenum, ngroups) if(extra) { eloc <- seq(floor((ngroups - extra)/2 + 1), length=extra) repnum[eloc] <- repnum[eloc] + 1 } if(reverse) { groupvec <- rep(ngroups:1, rev(repnum))[order(order(x))] } else { groupvec <- rep(1:ngroups, repnum)[order(order(x))] } names(groupvec) <- names(x) grouplist <- split(x, groupvec) if(checkBleed && ngroups > 1) { bleeding <- rep(FALSE, ngroups) if(reverse) { for(i in 2:ngroups) { if(max(grouplist[[i]]) >= min(grouplist[[i-1L]])) { bleeding[(i-1L):i] <- TRUE } } } else { for(i in 2:ngroups) { if(max(grouplist[[i-1L]]) >= min(grouplist[[i]])) { bleeding[(i-1L):i] <- TRUE } } } if(any(bleeding)) { warning("common values across groups: ", paste(which(bleeding), collapse=", ")) } } switch(result, list={ grouplist }, numeric={ groupvec }, factor={ ordered(groupvec, levels=if(reverse) ngroups:1 else 1:ngroups) }) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/ntile.R
"permutation.test.discrete" <- function (x, y = NULL, scores, alternative = "greater", trials = 1000) { if (length(y)) { n <- length(y) if (length(x) != n) stop("x and y have different lengths") } else { if (ncol(x) != 2) stop("x does not have 2 columns and y is missing") y <- x[, 2] x <- x[, 1] n <- length(y) } x <- as.character(x) y <- as.character(y) if (length(alternative) != 1 || !is.character(alternative)) stop("alternative must be a single character string") altnum <- pmatch(alternative, c("greater", "less"), nomatch = NA) if (is.na(altnum)) stop("alternative must partially match 'greater' or 'less'") alternative <- c("greater", "less")[altnum] orig.tab <- table(x, y) otd <- dim(orig.tab) odnam <- dimnames(orig.tab) scnam <- dimnames(scores) if (!is.matrix(scores) || length(scnam) != 2 || !is.numeric(scores)) stop("scores must be a numeric matrix with dimnames") scd <- dim(scores) if (any(scd != otd) && any(rev(scd) != otd)) { stop(paste("scores is not the proper size, should be", otd[1], "by", otd[2])) } if (any(scd != otd)) { scores <- t(scores) scd <- dim(scores) scnam <- dimnames(scores) reverse <- TRUE } else { reverse <- FALSE } rownum <- match(scnam[[1]], odnam[[1]], nomatch = NA) if (any(is.na(rownum))) { if (reverse || otd[1] != otd[2]) stop("bad dimnames for scores") scores <- t(scores) scd <- dim(scores) scnam <- dimnames(scores) rownum <- match(scnam[[1]], odnam[[1]], nomatch = NA) if (any(is.na(rownum))) stop("bad dimnames for scores") } colnum <- match(scnam[[2]], odnam[[2]], nomatch = NA) if (any(is.na(colnum))) stop("bad dimnames for scores") scores <- scores[rownum, colnum] if(!exists(".Random.seed")) runif(1) ranseed <- .Random.seed orig.score <- sum(orig.tab * scores) perm.scores <- numeric(trials) for (i in 1:trials) { perm.scores[i] <- sum(table(x, sample(y)) * scores) } if (alternative == "greater") { extreme <- sum(perm.scores >= orig.score) } else { extreme <- sum(perm.scores <= orig.score) } ans <- list(original.score = orig.score, perm.scores = perm.scores, stats = c(nobs = n, trials = trials, extreme = extreme), alternative = alternative, random.seed = ranseed, call = match.call()) class(ans) <- "permtstBurSt" ans }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/permutation.test.discrete.R
"permutation.test.fun" <- function (x, y = NULL, fun = function(x, y) sum(x * y), alternative = "greater", trials = 1000) { if (length(y)) { n <- length(y) if (length(x) != n) stop("x and y have different lengths") if (!is.numeric(y)) stop("y must be numeric") } else { if (ncol(x) != 2) stop("x does not have 2 columns and y is missing") x <- as.matrix(x) if (!is.numeric(x)) stop("x must be numeric") y <- x[, 2] x <- x[, 1] n <- length(y) } if (length(alternative) != 1 || !is.character(alternative)) stop("alternative must be a single character string") altnum <- pmatch(alternative, c("greater", "less"), nomatch = NA) if (is.na(altnum)) stop("alternative must partially match 'greater' or 'less'") alternative <- c("greater", "less")[altnum] if(!exists(".Random.seed")) runif(1) ranseed <- .Random.seed orig.score <- fun(x, y) if (length(orig.score) != 1) stop("fun must return a single number") perm.scores <- numeric(trials) for (i in 1:trials) { perm.scores[i] <- fun(x, sample(y)) } if (alternative == "greater") { extreme <- sum(perm.scores >= orig.score) } else { extreme <- sum(perm.scores <= orig.score) } ans <- list(original.score = orig.score, perm.scores = perm.scores, stats = c(nobs = n, trials = trials, extreme = extreme), alternative = alternative, random.seed = ranseed, call = match.call()) class(ans) <- "permtstBurSt" ans }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/permutation.test.fun.R
"plot.permtstBurSt" <- function (x, col = c("black", "red"), width = 10, uniqlim = 10, main = "", xlab = "Scores", ...) { orig.score <- x$original.score ulen <- length(unique(x$perm.scores)) if (ulen > uniqlim) { hist(x$perm.scores, xlim = range(x$perm.scores, orig.score), main = main, xlab = xlab, ...) box() abline(v = orig.score, col = col[2]) } else { ptab <- table(x$perm.scores) vals <- as.numeric(names(ptab)) if (x$alternative == "greater") { extreme <- vals >= orig.score } else { extreme <- vals <= orig.score } if (all(vals > orig.score) || all(vals < orig.score)) { xrng <- range(vals, orig.score) } else { xrng <- range(vals) } plot(vals, ptab, type = "n", xlab = xlab, ylab = "Count", xlim = xrng, ...) points(vals[!extreme], ptab[!extreme], type = "h", col = col[1], lwd = width, ...) points(vals[extreme], ptab[extreme], type = "h", col = col[2], lwd = width, ...) if (nchar(main)) title(main = main) } }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/plot.permtstBurSt.R
"print.permtstBurSt" <- function (x, digits = 4, ...) { cat("Call:\n") print(x$call) cat("\nOriginal value:", x$original.score, " Number of observations:", x$stats["nobs"], "\n") cat("Number of random permutations:", x$stats["trials"], " Alternative:", x$alternative, " p-value:", round(x$stats["extreme"]/x$stats["trials"], digits), "\n") invisible(x) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/print.permtstBurSt.R
scriptSearch <- function(pattern, path=".", subdirs=TRUE, suffix="\\.[rR]$", commentsIncluded=FALSE, ..., verbose=FALSE) { grepFile <- function(x, commentsIncluded, ...) { if(!file.exists(x)) { warning("non-existent file: ", x) return(NULL) } suppressWarnings(theText <- readLines(x)) subans <- trimws(grep(pattern, value=TRUE, x=theText, ...)) if(!commentsIncluded && length(subans)) { subans <- subans[substr(subans, 1, 1) != "#"] } subans } # start of main function if(!file.exists(path)) { path <- substr(path, 1, nchar(path) - 1) if(!file.exists(path)) { stop("'path' does not exist, it seems") } } fileList <- list.files(path=path, pattern=suffix, full.names=TRUE, recursive=subdirs) answer <- setNames(vector("list", length(fileList)), fileList) for(i in seq_along(fileList)) { if(verbose) { cat("checking:", fileList[i], "\n") } answer[[i]] <- grepFile(fileList[i], commentsIncluded=commentsIncluded, ...) } answer[lengths(answer) > 0L] }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/scriptSearch.R
summary.genopt <- function(object, ...) { answer <- list(call=object$call, summary.objectives=summary(object$objective), best.solution=object$population[, 1L]) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/summary.genopt.R
writeExpectTest <- function(expr, filename="", ...) { tfile <- tempfile() on.exit(unlink(tfile)) exsub <- deparse(substitute(expr)) result <- paste(deparse(dput(expr, file=tfile)), collapse="\n") output <- paste0("expect_equal(", exsub, ",\n", result, "\n)") cat(output, file=filename, ...) }
/scratch/gouwar.j/cran-all/cranData/BurStMisc/R/writeExpectTest.R
businessDuration <- function(startdate="",enddate="",starttime=NA,endtime=NA,weekendlist=c("Saturday","Sunday"),holidaylist=c(),unit='min'){ result <- class(startdate) if(!result[1] %in% c("POSIXlt","POSIXct")){ return("startdate must be in POSIXlt/POSIXct format") } result <- class(enddate) if(!result[1] %in% c("POSIXlt","POSIXct")){ return("enddate must be in POSIXlt/POSIXct format") } if(is.na(startdate)){ return(NA) } if(is.na(enddate)){ return(NA) } if(startdate > enddate){ return(NA) } if(!is.na(starttime) && !is.na(endtime)){ #Business start & end time supplied result <- try(expr = times(starttime),silent = TRUE) if(class(result)=="try-error"){ return("format must be in hh:mm:ss") }else{ starttime <- times(starttime) } result <- try(expr = times(endtime),silent = TRUE) if(class(result)=="try-error"){ return("format must be in hh:mm:ss") }else{ endtime <- times(endtime) } }else if((!is.na(starttime) & is.na(endtime)) || (is.na(starttime) & !is.na(endtime))){ return(NA) #Either Business start/end time is supplied but not both }else{ #Both Business start & end time are not supplied-NA } #Creating sequence of days working_days <- seq(from = as.Date(startdate),to = as.Date(enddate),by = "day") #Removing weekends working_days <- working_days[!(weekdays(working_days) %in% weekendlist)] #Removing Public Holidays if(length(holidaylist)!=0){ working_days <- working_days[!working_days %in% holidaylist] } #No. of working days len_working_days <- length(working_days) if(len_working_days == 0){ return(NA) }else if(len_working_days == 1){ #1 working day if(!as.Date(startdate) %in% working_days){ startdate <- strptime(paste(working_days[1],times("00:00:00")),format = "%Y-%m-%d %H:%M:%S") } if(!as.Date(enddate) %in% working_days){ enddate <- strptime(paste(working_days[1],times("23:59:59")),format = "%Y-%m-%d %H:%M:%S") } startdatetime=times(strftime(x = startdate,format = "%H:%M:%S")) enddatetime=times(strftime(x = enddate,format = "%H:%M:%S")) if(starttime<=endtime){# Eg. 9AM - 6PM if(startdatetime < starttime){ open_time <- starttime }else if(startdatetime >= starttime && startdatetime <= endtime){ open_time <- startdatetime }else{ # open_time <- times("00:00:00") return(NA) } if(enddatetime < starttime){ # close_time <- times("00:00:00") return(NA) }else if(enddatetime >= starttime && enddatetime <= endtime){ close_time <- enddatetime }else{ close_time <- endtime } }else{# Eg. 9PM - 3AM midnight_time <- times("23:59:59") if(startdatetime < starttime){ open_time <- starttime close_time <- midnight_time }else{ open_time <- startdatetime close_time <- midnight_time } } add_seconds <- ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) }else if(len_working_days == 2){ #2 working day if(!as.Date(startdate) %in% working_days){ startdate <- strptime(paste(working_days[1],times("00:00:00")),format = "%Y-%m-%d %H:%M:%S") } if(!as.Date(enddate) %in% working_days){ enddate <- strptime(paste(working_days[len_working_days],times("23:59:59")),format = "%Y-%m-%d %H:%M:%S") } startdatetime=times(strftime(x = startdate,format = "%H:%M:%S")) enddatetime=times(strftime(x = enddate,format = "%H:%M:%S")) add_seconds <- 0 if(starttime <= endtime){# Eg. 9AM - 6PM if(startdatetime<starttime){ open_time <- starttime close_time <- endtime }else if(startdatetime >= starttime && startdatetime <= endtime){ open_time <- startdatetime close_time <- endtime }else{ open_time <- times("00:00:00") close_time <- times("00:00:00") } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculating Closing day time in seconds if(enddatetime < starttime){ open_time <- times("00:00:00") close_time <- times("00:00:00") }else if(enddatetime >= starttime && enddatetime <= endtime){ open_time <- starttime close_time <- enddatetime }else{ open_time <- starttime close_time <- endtime } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) }else{ #Eg. 9PM - 3AM #Calculating starting day time in seconds midnight_time <- times("23:59:59") if(startdatetime < starttime){ open_time <- starttime close_time <- midnight_time }else{ open_time <- startdatetime close_time <- midnight_time } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculate Closing day time in seconds if(enddatetime <= endtime){ open_time <- times("00:00:00") close_time <- enddatetime }else{ open_time <- times("00:00:00") close_time <- endtime } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) } }else{ #more than 2 working day add_seconds = 0 if(!as.Date(startdate) %in% working_days){ startdate <- strptime(paste(working_days[1],times("00:00:00")),format = "%Y-%m-%d %H:%M:%S") } if(!as.Date(enddate) %in% working_days){ enddate <- strptime(paste(working_days[len_working_days],times("23:59:59")),format = "%Y-%m-%d %H:%M:%S") } startdatetime=times(strftime(x = startdate,format = "%H:%M:%S")) enddatetime=times(strftime(x = enddate,format = "%H:%M:%S")) in_between_days <- len_working_days-2 if(starttime <= endtime){ #Eg. 9AM - 6PM #Calculate Starting day time in seconds if(startdatetime < starttime){ open_time <- starttime close_time <- endtime }else if(startdatetime >= starttime && startdatetime <= endtime){ open_time <- startdatetime close_time <- endtime }else{ open_time <- times("00:00:00") close_time <- times("00:00:00") } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculate Closing day time in seconds if(enddatetime < starttime){ open_time <- times("00:00:00") close_time <- times("00:00:00") }else if(enddatetime >= starttime && enddatetime <= endtime){ open_time <- starttime close_time <- enddatetime }else{ open_time <- starttime close_time <- endtime } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculate in between days in seconds in_between_days_seconds <- in_between_days*(((hours(endtime)*60*60)+(minutes(endtime)*60)+seconds(endtime)) - ((hours(starttime)*60*60)+(minutes(starttime)*60)+seconds(starttime))) add_seconds <- add_seconds + in_between_days_seconds }else{ #Eg. 9PM - 3AM #Calculate Starting day time in seconds midnight_time <- times("23:59:59") if(startdatetime < starttime){ open_time <- starttime close_time <- midnight_time }else{ open_time <- startdatetime close_time <- midnight_time } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculate Closing day time in seconds if(enddatetime <= endtime){ open_time <- times("00:00:00") close_time <- enddatetime }else{ open_time <- times("00:00:00") close_time <- endtime } add_seconds <- add_seconds + ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) - ((hours(open_time)*60*60)+(minutes(open_time)*60)+seconds(open_time)) #Calculating business hours between days in seconds half1 <- ((hours(midnight_time)*60*60)+(minutes(midnight_time)*60)+seconds(midnight_time)) - ((hours(starttime)*60*60)+(minutes(starttime)*60)+seconds(starttime)) half2 <- ((hours(close_time)*60*60)+(minutes(close_time)*60)+seconds(close_time)) in_between_days_seconds <- in_between_days*(half1+half2) add_seconds <- add_seconds + in_between_days_seconds } } if(unit == "sec"){ bd = add_seconds }else if(unit == "min"){ bd = add_seconds/60 }else if(unit=="hour"){ bd = (add_seconds/60)/60 }else if(unit=="day"){ bd = ((add_seconds/60)/60)/24 }else{ bd <- NA } return(bd) }
/scratch/gouwar.j/cran-all/cranData/BusinessDuration/R/business_duration.R
BuyseTest.env <- new.env() # create a specific environment for the package '.onAttach' <- function(libname, pkgname="BuyseTest") { desc <- utils::packageDescription(pkgname) packageStartupMessage(desc$Package, " version ",desc$Version) BuyseTest.options(reinitialise = TRUE) # generate .BuyseTest-options when loading the package } riskRegression_transformT <- get("transformT", envir = asNamespace("riskRegression"), inherits = FALSE) riskRegression_transformIID <- get("transformIID", envir = asNamespace("riskRegression"), inherits = FALSE)
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/0-onLoad.R
## * Allocator alloc (for BuyseTest-options) setGeneric(name = "alloc", def = function(object, ...){standardGeneric("alloc")} ) ## * Selector select (for BuyseTest-options) setGeneric(name = "select", def = function(object, ...){standardGeneric("select")} ) ## * Selector getCount (for S4BuyseTest) #' @rdname S4BuyseTest-getCount #' @exportMethod getCount #' @keywords methods setGeneric(name = "getCount", def = function(object, type){standardGeneric("getCount")} ) ## * Selector getPairScore (for S4BuyseTest) #' @rdname S4BuyseTest-getPairScore #' @exportMethod getPairScore #' @keywords methods setGeneric(name = "getPairScore", def = function(object, endpoint = NULL, strata = NULL, sum = FALSE, rm.withinStrata = TRUE, rm.strata = is.na(object@strata), rm.indexPair = TRUE, rm.weight = FALSE, rm.corrected = ([email protected]==0), unlist = TRUE, trace = 1){ standardGeneric("getPairScore") } ) ## * Selector getPseudovalue (for S4BuyseTest) #' @rdname S4BuyseTest-getPseudovalue #' @exportMethod getPseudovalue #' @keywords methods setGeneric(name = "getPseudovalue", def = function(object, statistic = NULL, endpoint = NULL){ standardGeneric("getPseudovalue") } ) ## * Selector getSurvival (for S4BuyseTest) #' @rdname S4BuyseTest-getSurvival #' @exportMethod getSurvival #' @keywords methods setGeneric(name = "getSurvival", def = function(object, type = NULL, endpoint = NULL, strata = NULL, unlist = TRUE, trace = TRUE){ standardGeneric("getSurvival") } ) ## * Selector getIid (for S4BuyseTest) #' @rdname S4BuyseTest-getIid #' @exportMethod getIid #' @keywords methods setGeneric(name = "getIid", def = function(object, endpoint = NULL, statistic = NULL, strata = FALSE, cumulative = TRUE, center = TRUE, scale = TRUE, type = "all", cluster = NULL, simplify = FALSE){ standardGeneric("getIid") } ) ## * method sensitivity (for BuyseTest) #' @rdname S4BuyseTest-sensitivity #' @exportMethod sensitivity #' @keywords htest setGeneric(name = "sensitivity", def = function(object, ...){standardGeneric("sensitivity")} )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/1-setGeneric.R
### multcomp.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: okt 4 2021 (16:17) ## Version: ## Last-Updated: jul 17 2023 (17:29) ## By: Brice Ozenne ## Update #: 294 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * BuyseMultComp (documentation) ##' @title Adjustment for Multiple Comparisons ##' @description Adjust p-values and confidence intervals estimated via GPC for multiple comparisons. ##' @name BuyseMultComp ##' ##' @param object A BuyseTest object or a list of BuyseTest objects. All objects should contain the same endpoints. ##' @param cluster [character] name of the variable identifying the observations in the dataset used by each BuyseTest model. ##' Only relevant when using a list of BuyseTest objects to correctly combine the influence functions. ##' If NULL, then it is assumed that the BuyseTest objects correspond to different groups of individuals. ##' @param linfct [numeric matrix] a contrast matrix of size the number of endpoints times the number of BuyseTest models. ##' @param rhs [numeric vector] the values for which the test statistic should be tested against. Should have the same number of rows as \code{linfct}. ##' @param endpoint [character or numeric vector] the endpoint(s) to be considered. ##' @param statistic [character] the statistic summarizing the pairwise comparison: ##' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), ##' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), ##' \code{"favorable"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). ##' \code{"unfavorable"} displays the proportion in favor of the control. ##' Default value read from \code{BuyseTest.options()}. #' @param cumulative [logical] should the summary statistic be cumulated over endpoints? #' Otherwise display the contribution of each endpoint. #' @param conf.level [numeric] confidence level for the confidence intervals. #' Default value read from \code{BuyseTest.options()}. #' @param band [logical] Should confidence intervals and p-values adjusted for multiple comparisons be computed. #' @param global [logical] Should global test (intersection of all null hypotheses) be made? #' @param alternative [character] the type of alternative hypothesis: \code{"two.sided"}, \code{"greater"}, or \code{"less"}. #' Default value read from \code{BuyseTest.options()}. #' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. #' Otherwise they are computed without any transformation. #' Default value read from \code{BuyseTest.options()}. Not relevant when using permutations or percentile bootstrap. #' @param ... argument passsed to the function \code{transformCIBP} of the riskRegression package. #' #' @details Simulateneous confidence intervals and adjusted p-values are computed using a single-step max-test approach via the function \code{transformCIBP} of the riskRegression package. #' This corresponds to the single-step Dunnett described in Dmitrienko et al (2013) in table 2 and section 7. #' #' @return An S3 object of class \code{BuyseMultComp}. #' #' @keywords htest #' #' @references Dmitrienko, A. and D'Agostino, R., Sr (2013), Traditional multiplicity adjustment methods in clinical trials. Statist. Med., 32: 5172-5218. https://doi.org/10.1002/sim.5990 #' #' @examples #' #### simulate data #### #' set.seed(10) #' df.data <- simBuyseTest(1e2, n.strata = 3) #' #' #### adjustment for all univariate analyses #### #' ff1 <- treatment ~ TTE(eventtime, status = status, threshold = 0.1) #' ff2 <- update(ff1, .~. + cont(score, threshold = 1)) #' BT2 <- BuyseTest(ff2, data= df.data, trace = FALSE) #' #' ## (require riskRegression >= 2021.10.04 to match) #' confint(BT2, cumulative = FALSE) ## not adjusted #' confintAdj <- BuyseMultComp(BT2, cumulative = FALSE, endpoint = 1:2) ## adjusted #' confintAdj #' if(require(lava)){ #' cor(lava::iid(confintAdj)) ## correlation between test-statistic #' } #' #' #### 2- adjustment for multi-arm trial #### #' ## case where we have more than two treatment groups #' ## here strata will represent the treatment groups #' df.data$strata <- as.character(df.data$strata) #' df.data$id <- paste0("Id",1:NROW(df.data)) ## define id variable #' #' BT1ba <- BuyseTest(strata ~ TTE(eventtime, status = status, threshold = 1), #' data= df.data[strata %in% c("a","b"),], trace = FALSE) #' BT1ca <- BuyseTest(strata ~ TTE(eventtime, status = status, threshold = 0.1), #' data= df.data[strata %in% c("a","c"),], trace = FALSE) #' BT1cb <- BuyseTest(strata ~ TTE(eventtime, status = status, threshold = 0.1), #' data= df.data[strata %in% c("b","c"),], trace = FALSE) #' rbind("b-a" = confint(BT1ba), #' "c-a" = confint(BT1ca), #' "c-b" = confint(BT1cb)) ## not adjusted #' confintAdj <- BuyseMultComp(list("b-a" = BT1ba, "c-a" = BT1ca, "c-b" = BT1cb), #' cluster = "id", global = TRUE) #' confintAdj #' if(require(lava)){ #' cor(lava::iid(confintAdj)) #' } ## * BuyseMultComp (code) ##' @rdname BuyseMultComp ##' @export BuyseMultComp <- function(object, cluster = NULL, linfct = NULL, rhs = NULL, endpoint = NULL, statistic = NULL, cumulative = TRUE, conf.level = NULL, band = TRUE, global = FALSE, alternative = NULL, transformation = NULL, ...){ ## ** normalize arguments option <- BuyseTest.options() call <- match.call() ## object if(inherits(object,"S4BuyseTest")){ test.list <- FALSE if(any(object@weightObs!=1)){ stop("Cannot not currently handle weighted observations. \n") } name.object <- NULL }else if(all(sapply(object,inherits,"S4BuyseTest"))){ n.object <- length(object) test.list <- TRUE if(is.null(object)){ names(object) <- paste0("test",1:n.object) } if(any(sapply(object, function(iO){any(iO@weightObs!=1)}))){ stop("Cannot not currently handle weighted observations. \n") } name.object <- names(object) }else{ stop("Incorrect \'object\': should be a BuyseTest object or a list of BuyseTest objects. \n") } ## statistic if(is.null(statistic)){ statistic <- tolower(option$statistic) }else{ if(length(statistic)>1){ stop("Argument \'statistic\' must have length 1. \n") } statistic <- match.arg(statistic, c("netbenefit","winratio","favorable","unfavorable"), several.ok = FALSE) } ## endpoint if(test.list){ valid.endpoint <- lapply(object,function(iBT){names(iBT@endpoint)}) if(is.null(endpoint)){ endpoint <- unlist(lapply(valid.endpoint, function(iE){iE[length(iE)]})) }else if(is.vector(endpoint)){ if(length(endpoint)==1){ endpoint <- rep(endpoint, n.object) }else if(length(endpoint)!=n.object){ stop("Argument \'endpoint\' misspecified. \n", "Should have length 1 or length ",n.object," (i.e. the number of objects in the list). \n") } if(is.numeric(endpoint)){ for(iE in 1:length(endpoint)){ if(endpoint[iE]<=0 || endpoint[iE]>=length(valid.endpoint[[iE]])){ stop("The ",iE," element in argument \'endpoint\' should be between 1 and ",length(valid.endpoint[[iE]]),". \n") } } }else{ for(iE in 1:length(endpoint)){ if(endpoint[iE] %in% valid.endpoint[[iE]] == FALSE){ stop("The ",iE," element in argument \'endpoint\' should one \"",paste(valid.endpoint[[iE]],collapse = "\" \""),"\". \n") } } } }else{ stop("Argument \'endpoint\' should be a vector. \n") } }else{ valid.endpoint <- names(object@endpoint) if(is.null(endpoint)){ endpoint <- valid.endpoint[length(valid.endpoint)] }else if(is.numeric(endpoint)){ validInteger(endpoint, name1 = "endpoint", min = 1, max = length(valid.endpoint), valid.length = NULL, method = "iid[BuyseTest]") endpoint <- valid.endpoint[endpoint] }else{ validCharacter(endpoint, valid.length = 1:length(valid.endpoint), valid.values = valid.endpoint, refuse.NULL = FALSE) } n.endpoint <- length(endpoint) } ## conf.level if(is.null(conf.level)){ conf.level <- option$conf.level } ## altenative if(is.null(alternative)){ alternative <- option$alternative } ## transformation if(is.null(transformation)){ transformation <- option$transformation } ## type of transformation if(transformation){ type <- switch(statistic, "netbenefit" = "atanh", "winratio" = "log", "favorable" = "atanh2", "unfavorable" = "atanh2", "none" = "none") }else{ type <- "none" } ## weights ## ** extract iid and coefficients if(test.list){ if(is.null(name.object)){ iName <- endpoint }else{ if(any(duplicated(name.object))){ iName <- paste0(name.object,": ",endpoint) }else{ iName <- name.object } } if(any(duplicated(iName))){ stop("Duplicated names for the estimates: provide unique name to each element of the list containing the S4BuyseTest objects. \n") } ls.beta <- lapply(1:n.object, function(iO){coef(object[[iO]], endpoint = endpoint[iO], statistic = statistic, cumulative = cumulative, strata = "global")}) vec.beta <- stats::setNames(unlist(ls.beta), iName) ls.iid <- lapply(1:n.object, function(iO){ ## iO <- 1 iIID <- getIid(object[[iO]], endpoint = endpoint[iO], statistic = statistic, cumulative = cumulative, strata = "global", simplify = FALSE)[["global"]] colnames(iIID) <- iName[iO] return(iIID) }) if(is.null(cluster)){ ## seqn.id <- sapply(ls.iid,NROW) ## cumseqn.id <- cumsum(seqn.id) ## n.id <- cumseqn.id[n.object] ## M.iid <- matrix(0, nrow = n.id, ncol = n.object*n.endpoint, dimnames = list(NULL, iName)) ## for(iObject in 1:length(name.object)){ ## iStart <- c(1,cumseqn.id+1)[iObject] ## iStop <- cumseqn.id[iObject] ## M.iid[iStart:iStop,iName[iObject]] <- ls.iid[[iObject]] ## } stop("The argument \'cluster\' must be specified to identify the common individuals across the BuyseTest object. \n") }else{ ## retrieve data if(is.numeric(cluster)){ if(length(unique(sapply(ls.iid,length)))>1){ stop("Argument \'cluster\' cannot be numeric when the BuyseTest are performed on different number of observations. \n", "Number of observations per BuyseTest: ",paste(sapply(ls.iid,length), collapse = ", "),".\n") } if(any(length(cluster) != length(ls.iid)[[1]])){ stop("Incorrect length for argument \'cluster\': when numeric it should have length the number of observations ",length(ls.iid[[1]]),".\n", "Current length: ",length(cluster),".\n") } if(any(cluster %in% 1:length(ls.iid[[1]]) == FALSE)){ stop("Incorrect values for argument \'cluster\': when numeric it should takes integer values between 1 and ",length(ls.iid[[1]]),".\n", "Example of incorrect value: ",cluster[cluster %in% 1:length(ls.iid[[1]]) == FALSE][1],".\n") } cluster <- lapply(1:n.object, function(iO){cluster}) }else if(is.list(cluster)){ if(any(sapply(cluster,is.numeric) == FALSE)){ stop("When a list, argument \'cluster\' should contain integers indexing the clusters") } if(any(sapply(cluster,length) != sapply(ls.iid,length))){ stop("Incorrect argument \'cluster\': when a list, the length of each element should match the number of observations the BuyseTest.\n", "Length argument vs. BuyseTest: ",paste(paste(sapply(cluster,length), sapply(ls.iid,length), sep = " vs. "), collapse = ", "),".\n") } if(any(sapply(cluster,max) < 1) || any(unlist(lapply(cluster, `%%`, 1))!=0)){ stop("Incorrect argument \'cluster\': when a list, element should contain integers indexing the clusters.\n") } }else{ cluster.var <- cluster cluster <- lapply(object, function(iO){try(as.character(iO@call$data[[cluster.var]]), silent = TRUE) }) if(any(sapply(cluster, inherits, "try-error"))){ indexPb <- which(sapply(cluster, inherits, "try-error")) stop("Could not retrieve the cluster column \"",cluster.var,"\" from the evaluation of the call of the BuyseTest objects. \n", "Problematic object(s): ",paste(iName[indexPb], collapse = ","),"\n") } if(any(sapply(cluster, length)==0)){ indexPb <- which(sapply(cluster, length)==0) stop("Could not retrieve the cluster column \"",cluster.var,"\" from the evaluation of the call of the BuyseTest objects. \n", "Problematic object(s): ",paste(iName[indexPb], collapse = ","),"\n") } } ## find unique clusters Ucluster <- unique(unlist(cluster)) cluster.factor <- lapply(cluster, factor, level = Ucluster) n.id <- length(Ucluster) ## store iid according to the clusters M.iid <- matrix(0, nrow = n.id, ncol = n.object, dimnames = list(Ucluster, iName)) for(iObject in 1:n.object){ ## iObject <- 1 M.iid[,iName[iObject]] <- tapply(ls.iid[[iObject]],cluster.factor[[iObject]],sum, default = 0) } } }else{ iName <- endpoint vec.beta <- stats::setNames(coef(object, endpoint = endpoint, statistic = statistic, cumulative = cumulative, strata = "global"), iName) M.iid <- getIid(object, endpoint = endpoint, statistic = statistic, cumulative = cumulative, strata = "global") colnames(M.iid) <- iName } n.beta <- length(vec.beta) vec.name <- names(vec.beta) ## ** create and apply linfct matrix if(is.null(linfct)){ linfct <- diag(1, nrow = n.beta, ncol = n.beta) dimnames(linfct) = list(vec.name,vec.name) }else{ if(NCOL(linfct) != n.beta){ stop("Incorrect argument \'linfct\': should have ",n.beta," columns. \n") } if(is.null(colnames(linfct))){ colnames(linfct) <- names(vec.beta) }else{ if(any(vec.name %in% colnames(linfct) == FALSE)){ stop("Missing column \"",paste0(vec.name[vec.name %in% colnames(linfct)==FALSE],collapse="\" \""),"\" in argument \'linfct\'. \n") } linfct <- linfct[,vec.name,drop=FALSE] } } vec.Cbeta <- t(linfct %*% vec.beta) M.Ciid <- M.iid %*% t(linfct) n.C <- NROW(linfct) vec.Cse <- rbind(sqrt(diag(crossprod(M.Ciid)))) A.Ciid <- array(NA, dim = c(NROW(M.Ciid), NCOL(M.Ciid),1)) A.Ciid[,,1] <- M.Ciid ## ** create rhs vector if(is.null(rhs)){ rhs <- rep(switch(statistic, "netbenefit" = 0, "winratio" = 1, "favorable" = 0.5, "unfavorable" = 0.5), n.C) }else{ if(length(rhs) != n.C){ stop("Length of argument \'rhs\' does not match the number of row of argument \'contrast\' (",length(rhs)," vs. ",NROW(linfct),"). \n") } } ## ** perform global test (single multivariate Wald test) if(global){ if(transformation){ vec.CbetaTrans <- riskRegression_transformT(estimate = vec.Cbeta, se = 1 , null = rhs, type = type, alternative = alternative) M.cSigmaTrans <- crossprod(riskRegression_transformIID(estimate = vec.Cbeta, iid = A.Ciid, type = type)[,,1]) dimnames(M.cSigmaTrans) <- list(colnames(M.iid),colnames(M.iid)) }else{ vec.CbetaTrans <- vec.Cbeta M.CsigmaTrans <- crossprod(M.Ciid) } iStat <- try(as.double(vec.CbetaTrans %*% solve(M.cSigmaTrans) %*% t(vec.CbetaTrans), silent = TRUE)) if(inherits(iStat,"try-error")){ out.chisq <- iStat }else{ out.chisq <- data.frame(statistic = iStat, df = n.C, p.value = 1 - stats::pchisq(iStat, df = n.C)) } }else{ out.chisq <- data.frame(statistic = NA, df = NA, p.value = NA) } ## ** perform adjustment for multiple comparisons (several univariate Wald test) min.value <- switch(statistic, "netBenefit" = -1, "winRatio" = 0, "favorable" = 0, "unfavorable" = 0) max.value <- switch(statistic, "netBenefit" = 1, "winRatio" = Inf, "favorable" = 1, "unfavorable" = 1) dots <- list(...) if("seed" %in% names(dots) == FALSE){ dots$seed <- NA } if("method.band" %in% names(dots) == FALSE){ dots$method.band <- "maxT-integration" } requireNamespace("riskRegression") iBand <- do.call(riskRegression::transformCIBP, args = c(list(estimate = vec.Cbeta, se = vec.Cse, iid = A.Ciid, null = rhs, conf.level = conf.level, alternative = alternative, ci = TRUE, type = type, min.value = min.value, max.value = max.value, band = band, p.value = TRUE), dots)) ## ** export out <- list(table.uni = NULL, table.multi = out.chisq, iid = M.Ciid, linfct = linfct, quantileBand = iBand$quantile ) if(band){ out$table.uni <- data.frame(estimate = as.double(vec.Cbeta), se = as.double(vec.Cse), lower.ci = as.double(iBand$lower), upper.ci = as.double(iBand$upper), null = rhs, p.value = as.double(iBand$p.value), lower.band = as.double(iBand$lowerBand), upper.band = as.double(iBand$upperBand), adj.p.value = as.double(iBand$adj.p.value)) }else{ out$table.uni <- data.frame(estimate = as.double(vec.Cbeta), se = as.double(vec.Cse), lower.ci = as.double(iBand$lower), upper.ci = as.double(iBand$upper), null = rhs, p.value = as.double(iBand$p.value), lower.band = NA, upper.band = NA, adj.p.value = NA) } rownames(out$table.uni) <- iName class(out) <- append("BuyseMultComp",class(out)) return(out) } ## * as.data.frame.BuyseMultComp ##' @method as.data.frame BuyseMultComp ##' @export as.data.frame.BuyseMultComp <- function(x, row.names = NULL, optional = FALSE, ...){ return(as.data.frame(x$table.uni, row.names = row.names, optional = optional, ...)) } ## * as.data.table.BuyseMultComp ##' @method as.data.table BuyseMultComp ##' @export as.data.table.BuyseMultComp <- function(x, keep.rownames = NULL, ...){ return(as.data.table(x$table.uni, keep.rownames = keep.rownames, ...)) } ## * coef.BuyseMultComp ##' @method coef BuyseMultComp ##' @export coef.BuyseMultComp <- function(object, ...){ out <- stats::setNames(object$table.uni$estimate,rownames(object$table.uni)) return(out) } ## * confint.BuyseMultComp ##' @method confint BuyseMultComp ##' @export confint.BuyseMultComp <- function(object, parm, level = 0.95, ...){ out <- object$table.uni[,c("estimate","lower.band","upper.band","adj.p.value")] return(out) } ## * iid.BuyseMultComp ##' @method iid BuyseMultComp ##' @export iid.BuyseMultComp <- function(x, keep.rownames = NULL, ...){ return(x$iid) } ## * model.tables.BuyseMultComp ##' @method model.tables BuyseMultComp ##' @export model.tables.BuyseMultComp <- function(x, type = "univariate", ...){ type <- match.arg(type, c("univariate","multivariate")) if(type == "univariate"){ return(x$table.uni) }else if(type == "multivariate"){ return(x$table.multi) } } ## * print.BuyseMultComp ##' @method print BuyseMultComp ##' @export print.BuyseMultComp <- function(x, ...){ dots <- list(...) if(!all(is.na(x$table.multi))){ cat(" - Multivariate test: p.value = ",x$table.multi[,"p.value"]," (df = ",x$table.multi[,"df"],")\n",sep="") } cat(" - Univariate tests:\n",sep="") if(any(dots$cols %in% names(x$table.uni) == FALSE) ){ stop("Incorrect argument \'cols\'. \n", "Valid values: \"",paste(names(x$table.uni), collapse = "\" \""),"\".\n") } if(!is.null(dots$cols)){ print(x$table.uni[,dots$cols,drop=FALSE]) }else{ print(x$table.uni) } return(invisible(NULL)) } ##---------------------------------------------------------------------- ### multcomp.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseMultComp.R
### BuyseTTEM.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: nov 18 2020 (12:15) ## Version: ## Last-Updated: jul 18 2023 (10:18) ## By: Brice Ozenne ## Update #: 665 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * BuyseTTEM (documentation) #' @title Time to Event Model #' @name BuyseTTEM #' #' @description Pre-compute quantities of a time to event model useful for predictions. #' Only does something for prodlim objects. #' #' @param object time to event model. #' @param treatment [character] Name of the treatment variable. #' @param iid [logical] Should the iid decomposition of the predictions be output. #' @param iid.surv [character] Estimator of the survival used when computing the influence function. #' Can be the product limit estimator (\code{"prodlim"}) or an exponential approximation (\code{"exp"}, same as in \code{riskRegression::predictCoxPL}). #' @param n.grid [integer, >0] Number of timepoints used to discretize the time scale. Not relevant for prodlim objects. #' @param ... additional arguments passed to lower lever methods. #' #' @return An S3 object of class \code{BuyseTTEM}. #' @keywords models #' #' @examples #' library(prodlim) #' library(data.table) #' #' tau <- seq(0,3,length.out=10) #' #' #### survival case #### #' set.seed(10) #' df.data <- simBuyseTest(1e2, n.strata = 2) #' #' e.prodlim <- prodlim(Hist(eventtime,status)~treatment+strata, data = df.data) #' ## plot(e.prodlim) #' #' e.prodlim2 <- BuyseTTEM(e.prodlim, treatment = "treatment", iid = TRUE) #' #' predict(e.prodlim2, time = tau, treatment = "T", strata = "a") #' predict(e.prodlim, times = tau, newdata = data.frame(treatment = "T", strata = "a")) #' #' predict(e.prodlim2, time = tau, treatment = "C", strata = "a") #' predict(e.prodlim, times = tau, newdata = data.frame(treatment = "C", strata = "a")) #' #' #### competing risk case #### #' df.dataCR <- copy(df.data) #' df.dataCR$status <- rbinom(NROW(df.dataCR), prob = 0.5, size = 2) #' #' e.prodlimCR <- prodlim(Hist(eventtime,status)~treatment+strata, data = df.dataCR) #' ## plot(e.prodlimCR) #' #' e.prodlimCR2 <- BuyseTTEM(e.prodlimCR, treatment = "treatment", iid = TRUE) #' #' predict(e.prodlimCR2, time = tau, treatment = "T", strata = "a") #' predict(e.prodlimCR, times = tau, newdata = data.frame(treatment = "T", strata = "a"), cause = 1) #' #' predict(e.prodlimCR2, time = tau, treatment = "C", strata = "a") #' predict(e.prodlimCR, times = tau, newdata = data.frame(treatment = "C", strata = "a"), cause = 1) #' @export `BuyseTTEM` <- function(object,...) UseMethod("BuyseTTEM") ## * BuyseTTEM.default #' @rdname BuyseTTEM #' @export BuyseTTEM.formula <- function(object, treatment, iid, iid.surv = "exp", ...){ e.prodlim <- prodlim(object, ...) return(BuyseTTEM(e.prodlim, treatment = treatment, iid = iid, iid.surv = iid.surv, ...)) } ## * BuyseTTEM.prodlim #' @rdname BuyseTTEM #' @export BuyseTTEM.prodlim <- function(object, treatment, iid, iid.surv = "exp", ...){ tol12 <- 1e-12 tol11 <- 1e-11 dots <- list(...) ## ** check arguments if(any(object$time<=0)){ stop("Only handles strictly positive event times \n") } iid.surv <- match.arg(iid.surv, c("prodlim","exp")) level.treatment <- dots$level.treatment level.strata <- dots$level.strata ## ** prepare output test.CR <- switch(object$type, "surv" = FALSE, "risk" = TRUE) if(test.CR){ n.CR <- length(object$cuminc) }else{ n.CR <- 1 object$cuminc <- list(1-object$surv) object$cause.hazard <- list(object$hazard) } ## Note: object$xlevels is not in the right order object$peron <- .initPeron(X = object$X, treatment = treatment, level.treatment = level.treatment, level.strata = level.strata, xlevels = NULL, n.CR = n.CR) X <- object$peron$X treatment <- object$peron$treatment level.treatment <- object$peron$level.treatment level.strata <- object$peron$level.strata n.strata <- object$peron$n.strata strata.var <- object$peron$strata.var object.stratified <- NCOL(object$X)>1 ## ** compute start/stop indexes per strata iIndexX.C <- which(X[[treatment]]==0) iIndexX.T <- which(X[[treatment]]==1) ## position in the results of the pair (treatment,strata) iMindexX <- do.call(rbind,lapply(1:n.strata, function(iStrata){ if(object.stratified){ iIndexS <- which(X[["..strata.."]]==iStrata) return(c(C = intersect(iIndexX.C,iIndexS), T = intersect(iIndexX.T,iIndexS))) }else{ return(c(C = iIndexX.C, T = iIndexX.T)) } })) index.start <- matrix(NA, nrow = n.strata, ncol = 2, dimnames = list(NULL,level.treatment)) index.start[] <- object$first.strata[iMindexX] index.stop <- matrix(NA, nrow = n.strata, ncol = 2, dimnames = list(NULL,level.treatment)) index.stop[] <- object$first.strata[iMindexX] + object$size.strata[iMindexX] - 1 ## ** find last CIF value object$peron$last.time[,1] <- object$time[index.stop[,1]] object$peron$last.time[,2] <- object$time[index.stop[,2]] for(iCause in 1:n.CR){ object$peron$last.estimate[,1,iCause] <- object$cuminc[[iCause]][index.stop[,1]] object$peron$last.estimate[,2,iCause] <- object$cuminc[[iCause]][index.stop[,2]] } ## ** table CIF index.allJump <- sort(unlist(lapply(object$cause.hazard, function(iVec){which(iVec>0)}))) for(iStrata in 1:n.strata){ ## iStrata <- 1 object$peron$jumpSurvHaz[[iStrata]] <- setNames(vector(mode = "list", length=2), level.treatment) for(iTreat in 1:2){ ## iTreat <- 1 iMissingCIF <- sum(object$peron$last.estimate[iStrata,iTreat,])<(1-tol12) ## jump time (for all causes) in this strata iIndex.jump <- intersect(index.allJump, index.start[iStrata,iTreat]:index.stop[iStrata,iTreat]) object$peron$jumpSurvHaz[[iStrata]][[iTreat]] <- data.frame(index.jump = iIndex.jump, time.jump = object$time[iIndex.jump], survival = object$surv[iIndex.jump], do.call(cbind,setNames(lapply(object$cause.hazard, function(iVec){iVec[iIndex.jump]}), paste0("hazard",1:n.CR)))) object$peron$jumpSurvHaz[[iStrata]][[iTreat]] <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]][order(object$peron$jumpSurvHaz[[iStrata]][[iTreat]]$time.jump),] iIndex.jump <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]]$index.jump iStrata.nJump <- length(iIndex.jump) for(iEvent in 1:n.CR){ ## iEvent <- 1 ## CIF at each jump (if any) and add time 0 if(iStrata.nJump>0){ iStrata.extcuminc <- c(0,object$cuminc[[iEvent]][iIndex.jump]) iStrata.exttime.jump <- c(-tol12,object$time[iIndex.jump]) }else{ iStrata.extcuminc <- 0 iStrata.exttime.jump <- -tol12 } ## CIF just after the last observations if(iMissingCIF){ iStrata.extcuminc <- c(iStrata.extcuminc,NA) iStrata.exttime.jump <- c(iStrata.exttime.jump, as.double(object$peron$last.time[iStrata,iTreat]) + tol11) } ## index of the jumps if(iStrata.nJump>0){ iStrata.index.afterJump <- c(1,prodlim::sindex(jump.times = iStrata.exttime.jump, eval.times = object$time[iIndex.jump] + tol12)) }else{ iStrata.index.afterJump <- 1 } if(iMissingCIF){ iStrata.index.afterJump <- c(iStrata.index.afterJump, NA) } ## *** store object$peron$cif[[iStrata]][[iTreat]][[iEvent]] <- data.frame(time = iStrata.exttime.jump, cif = iStrata.extcuminc, index = iStrata.index.afterJump - 1) ## move to C++ indexing } } } ## cif <- object$peron$cif[[1]][[1]][[1]] ## (cif[cif[,"index.cif.after"]+1,"cif"] - cif[cif[,"index.cif.before"]+1,"cif"]) - cif[,"dcif"] ## ** table iid if(iid){ n.obs <- NROW(object$model.response) vec.eventtime <- object$model.response[object$originalDataOrder,"time"] if(test.CR){ vec.status <- object$model.response[object$originalDataOrder,"event"] ## 1:n.CR event, n.CR+1 censoring }else{ vec.status <- object$model.response[object$originalDataOrder,"status"] ## 0 censoring, 1 event } model.matrix <- object$model.matrix[object$originalDataOrder,,drop=FALSE] model.matrix[[treatment]] <- factor(model.matrix[[treatment]], labels = level.treatment) if(is.null(attr(strata.var,"original"))){ model.matrix <- cbind(model.matrix, "..strata.." = 1) }else if(!identical(attr(strata.var,"original"),"..strata..")){ model.matrix <- cbind(model.matrix, "..strata.." = as.numeric(factor(interaction(model.matrix[,strata.var,drop=FALSE]), levels = level.strata))) } object$peron$iid.hazard <- setNames(vector(mode = "list", length=n.strata), level.strata) object$peron$iid.survival <- setNames(vector(mode = "list", length=n.strata), level.strata) object$peron$iid.cif <- setNames(vector(mode = "list", length=n.strata), level.strata) for(iStrata in 1:n.strata){ ## iStrata <- 1 object$peron$iid.hazard[[iStrata]] <- setNames(vector(mode = "list", length=2), level.treatment) object$peron$iid.survival[[iStrata]] <- setNames(vector(mode = "list", length=2), level.treatment) object$peron$iid.cif[[iStrata]] <- setNames(vector(mode = "list", length=2), level.treatment) for(iTreat in 1:2){ ## iTreat <- 1 object$peron$iid.hazard[[iStrata]][[iTreat]] <- vector(mode = "list", length=n.CR) object$peron$iid.cif[[iStrata]][[iTreat]] <- vector(mode = "list", length=n.CR) if(object.stratified){ iIndStrata <- intersect(which(model.matrix[[treatment]]==level.treatment[iTreat]), which(model.matrix[["..strata.."]]==iStrata)) }else{ iIndStrata <- which(model.matrix[[treatment]]==level.treatment[iTreat]) } iIndex.jump <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]]$index.jump iN.jump <- length(iIndex.jump) if(length(iIndex.jump)==0){ ## no event: iid = 0 for(iEvent in 1:n.CR){ ## iEvent <- 1 object$peron$iid.hazard[[iStrata]][[iTreat]][[iEvent]] <- matrix(0, nrow = n.obs, ncol = 1) object$peron$iid.survival[[iStrata]][[iTreat]][[iEvent]] <- matrix(0, nrow = n.obs, ncol = 1) object$peron$iid.cif[[iStrata]][[iTreat]][[iEvent]] <- matrix(0, nrow = n.obs, ncol = 1) } next } ## *** influence function for each cause-specific hazard for(iEvent in 1:n.CR){ ## iEvent <- 1 iJump.time <- object$time[iIndex.jump] iHazard <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]][[paste0("hazard",iEvent)]] iStatus <- do.call(cbind,lapply(iJump.time, function(iTime){ (abs(vec.eventtime[iIndStrata]-iTime)<tol11)*(vec.status[iIndStrata]==iEvent) })) iAtRisk <- do.call(cbind,lapply(iJump.time, function(iTime){ vec.eventtime[iIndStrata] >= iTime })) object$peron$iid.hazard[[iStrata]][[iTreat]][[iEvent]] <- .rowScale_cpp(iStatus - .rowMultiply_cpp(iAtRisk, scale=iHazard), scale = colSums(iAtRisk)) } ## *** influence function for the overall survival if(iid.surv == "exp"){ iSurv <- exp(-cumsum(rowSums(object$peron$jumpSurvHaz[[iStrata]][[iTreat]][,paste0("hazard",1:n.CR),drop=FALSE]))) }else if(iid.surv == "prodlim"){ iSurv <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]]$survival } object$peron$iid.survival[[iStrata]][[iTreat]] <- -.rowMultiply_cpp(.rowCumSum_cpp(Reduce("+",object$peron$iid.hazard[[iStrata]][[iTreat]])), iSurv) ## *** influence function for the cumulative incidence for(iCR in 1:n.CR){ ## iCR <- 1 iParam <- na.omit(unique(object$peron$cif[[iStrata]][[iTreat]][[iCR]]$index)) object$peron$iid.cif[[iStrata]][[iTreat]][[iCR]] <- matrix(0, nrow = n.obs, ncol = length(iParam)) if(test.CR){ iHazard <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]][[paste0("hazard",iCR)]] ## at t iHazard.iid <- object$peron$iid.hazard[[iStrata]][[iTreat]][[iCR]] ## at t iSurvival <- c(1,object$peron$jumpSurvHaz[[iStrata]][[iTreat]][1:(iN.jump-1),"survival"]) ## at t- iSurvival.iid <- cbind(0,object$peron$iid.survival[[iStrata]][[iTreat]][,1:(iN.jump-1),drop=FALSE]) ## at t- ## add iid at time 0 and (if censoring) NA after the last event object$peron$iid.cif[[iStrata]][[iTreat]][[iCR]][iIndStrata,] <- cbind(0,.rowCumSum_cpp(.rowMultiply_cpp(iSurvival.iid,iHazard) + .rowMultiply_cpp(iHazard.iid,iSurvival))) }else{ ## add iid at time 0 and (if censoring) NA after the last event object$peron$iid.cif[[iStrata]][[iTreat]][[iCR]][iIndStrata,] <- cbind(0,-object$peron$iid.survival[[iStrata]][[iTreat]]) } } }} } ## ** export class(object) <- append("BuyseTTEM",class(object)) return(object) } ## * BuyseTTEM.survreg #' @rdname BuyseTTEM #' @export BuyseTTEM.survreg <- function(object, treatment, n.grid = 1e3, iid, ...){ tol12 <- 1e-12 tol11 <- 1e-11 dots <- list(...) level.treatment <- dots$level.treatment level.strata <- dots$level.strata ## ** check arguments and prepare output mf <- stats::model.frame(object) object$peron <- .initPeron(X = mf[,-1,drop=FALSE], ## first column for the Surv object, treatment = treatment, level.treatment = level.treatment, level.strata = level.strata, xlevels = object$xlevels, n.CR = 1) n.strata <- object$peron$n.strata treatment <- object$peron$treatment level.treatment <- object$peron$level.treatment level.strata <- object$peron$level.strata object.stratified <- NCOL(object$X)>1 if(is.null(object$xlevels[[treatment]])){ mf[[treatment]] <- factor(mf[[treatment]], levels = sort(unique(mf[[treatment]])), labels = level.treatment) }else{ mf[[treatment]] <- factor(mf[[treatment]], levels = object$xlevels[[treatment]], labels = level.treatment) } if(any(object$y[,"time"]<=0)){ stop("Only handles strictly positive event times \n") } ## ** handling competing risks object$peron$n.CR <- 1 ## survival case (one type of event) ## ** prepare for iid if(iid){ ## *** extract information n.obs <- stats::nobs(object) X <- stats::model.matrix(stats::formula(object), mf) beta <- stats::coef(object) sigma <- object$scale object.iid <- lava::iid(object) object.iid.beta <- object.iid[,setdiff(colnames(object.iid), "logsigma"),drop=FALSE] if("logsigma" %in% colnames(object.iid)){ object.iid.sigma <- object.iid[,"logsigma",drop=FALSE] }else{ object.iid.sigma <- matrix(0, nrow = n.obs, ncol = 1, dimnames = list(1:n.obs,"logsigma")) } ## *** extract link and derivative object.dist <- survival::survreg.distributions[[object$dist]] object.dist$quantileM1 <- switch(object.dist$dist, "t" = function(q,df){stats::pt(q,df)}, "gaussian" = function(q,parms){stats::pnorm(q)}, "logistic" = function(q,parms){1/(1+exp(-q))}, "extreme" = function(q,parms){1-exp(-exp(q))}, NULL) object.dist$dquantileM1 <- switch(object.dist$dist, "t" = function(q,df){stats::dt(q,df)}, "gaussian" = function(q,parms){stats::dnorm(q)}, "logistic" = function(q,parms){exp(-q)/(1+exp(-q))^2}, "extreme" = function(q,parms){exp(q)*exp(-exp(q))}, NULL) if("quantile" %in% names(object.dist) == FALSE){ object.dist$quantile <- survival::survreg.distributions[[object.dist$dist]]$quantile } if("quantileM1" %in% names(object.dist) == FALSE){ object.dist$quantileM1 <- switch(object.dist$dist, "t" = function(q,df){stats::pt(q,df)}, "gaussian" = function(q,parms){stats::pnorm(q)}, "logistic" = function(q,parms){1/(1+exp(-q))}, "extreme" = function(q,parms){1-exp(-exp(q))}, NULL) object.dist$dquantileM1 <- switch(object.dist$dist, "t" = function(q,df){stats::dt(q,df)}, "gaussian" = function(q,parms){stats::dnorm(q)}, "logistic" = function(q,parms){exp(-q)/(1+exp(-q))^2}, "extreme" = function(q,parms){exp(q)*exp(-exp(q))}, NULL) } if(is.null(object.dist$itrans)){ object.dist$trans <- function(x){x} object.dist$itrans <- function(x){x} object.dist$dtrans <- function(x){1} } } ## ** table CIF and iid grid.quantile <- seq(from=0,to=1-1/n.grid,length.out=n.grid) object$peron$last.estimate[] <- utils::tail(grid.quantile,1) ## final modeled survival value close to 0 i.e. CIF close to 1 for(iStrata in 1:n.strata){ ## iStrata <- 1 for(iTreat in level.treatment){ ## iTreat <- 1 if(object.stratified){ iIndex.obs <- intersect( intersect(which(mf[[treatment]]==iTreat), which(object$peron$X[,"..strata.."]==iStrata)), which(mf[,1][,2]==1) ) }else{ iIndex.obs <- intersect(which(mf[[treatment]]==iTreat), which(mf[,1][,2]==1)) } ## jump time in this strata iNewdata <- mf[iIndex.obs[1],,drop=FALSE] iJump <- predict(object, newdata = iNewdata, p = grid.quantile, type = "quantile") object$peron$jumpSurvHaz[[iStrata]][[iTreat]] <- data.frame(index.jump = NA, time.jump = iJump, survival = 1-grid.quantile) iTime <- sort(mf[iIndex.obs,1][,1]) iIndex.param <- prodlim::sindex(jump.times = iJump, eval.times = iTime + tol12) iSurv <- object$peron$jumpSurvHaz[[iStrata]][[iTreat]]$survival[iIndex.param] object$peron$cif[[iStrata]][[iTreat]][[1]] <- data.frame(time = c(-tol12,iTime), cif = c(0,1-iSurv), index = c(0,iIndex.param-1)) object$peron$last.time[iStrata,iTreat] <- utils::tail(iJump,1) if(iid){ iP <- NROW(object$peron$jumpSurvHaz[[iStrata]][[iTreat]]) iLP <- drop(X[iIndex.obs[1],,drop=FALSE] %*% beta) object.iid.iLP <- object.iid.beta %*% t(X[iIndex.obs[1],,drop=FALSE]) ## *** compute time with se [not used] ## fit.trans <- iLP + object.dist$quantile(grid.quantile) * sigma ## range(object.dist$itrans(fit.trans) - iJump) ## fit.iid <- .rowScale_cpp(.colCenter_cpp(object.iid.sigma %*% object.dist$quantile(grid.quantile), center = - object.iid.iLP), object.dist$dtrans(object.dist$itrans(fit.trans))) ## fit.se <- sqrt(colSums(fit.iid^2)) ## quantile(predict(object, type = "quantile", p = grid.quantile, newdata = mf[iIndex.obs[1],,drop=FALSE],se = TRUE)$se - fit.se, na.rm = TRUE) ## *** compute survival with se iPred <- (object.dist$trans(iJump) - iLP)/sigma ## range(grid.quantile - object.dist$quantileM1(iPred)) object$peron$iid.cif[[iStrata]][[iTreat]][[1]] <- .rowMultiply_cpp(.colCenter_cpp(- object.iid.sigma %*% (object.dist$trans(iJump) - iLP) / sigma^2, center = object.iid.iLP / sigma), scale = object.dist$dquantileM1(iPred)) object$peron$iid.cif[[iStrata]][[iTreat]][[1]][,iJump==0] <- 0 ## range(object$peron$iid.cif[[iStrata]][[iTreat]][[1]][,10] - (- object.iid.sigma * (object.dist$trans(iJump[10]) - iLP) /sigma^2 - object.iid.iLP / sigma) * object.dist$dquantileM1(iPred[10])) } } } ## ** export class(object) <- append("BuyseTTEM",class(object)) return(object) } ## * BuyseTTEM.BuyseTTEM #' @rdname BuyseTTEM #' @export BuyseTTEM.BuyseTTEM <- function(object, ...){ return(object) } ## * .initPeron ## X (data.frame) containing the stratification variables (including treatment) ## treatment (character) variable identifying the treatment variable in X ## levels.treatment (character vector) possible values of the treatment variable ## n.CR (integer) number of competing risks ## xlevels (list) order of the levels of each factor variable (optional) .initPeron <- function(X, treatment, level.treatment, level.strata, n.CR, xlevels){ ## ** automatically set treatment and level.treatment if necessary and possible if(missing(treatment)){ if(NCOL(X)==1){ treatment <- names(X)[1] }else{ stop("Argument \'treatment\' is missing \n") } } if(treatment %in% names(X) == FALSE){ stop("Wrong specification of the argument \'treatment\' \n", "Could not be found in the prodlim object, e.g. in object$X. \n") } if(is.null(level.treatment)){ if(!is.null(xlevels)){ level.treatment <- xlevels[[treatment]] }else{ level.treatment <- unique(X[[treatment]]) } }else if(length(level.treatment) != length(unique(X[[treatment]]))){ stop("Wrong specification of the argument \'level.treatment\' \n", "Does not match the number of possible values in object$X. \n") } ## ** normalize treatment and strata variable to numeric ## treatment variable (convert to numeric) if(!is.numeric(X[[treatment]]) || any(X[[treatment]] %in% 0:1 == FALSE)){ X[[treatment]] <- as.numeric(factor(X[[treatment]], levels = level.treatment))-1 } ## strata variable (convert to factor with the right order) strata.var <- setdiff(names(X),treatment) if(length(strata.var)==0){ ## no existing strata variable X <- cbind(X, "..strata.." = 1) ## set all observation to strata 1 strata.var <- "..strata.." attr(strata.var,"original") <- NULL if(is.null(level.strata)){level.strata <- "REF"} }else if(identical(strata.var,"..strata..")){ ## unique strata variable already in the right format attr(strata.var,"original") <- "..strata.." if(is.null(level.strata)){level.strata <- unique(X[["..strata.."]])} }else{ ## check name of the strata variables if(any(strata.var == "..strata..")){ stop("Incorrect strata variable \n", "Cannot use \"..strata..\" as it will be used internally \n.") } attr(strata.var,"original") <- strata.var ## update the design matrix with the right ordering of the factors for(iVar in setdiff(names(xlevels),treatment)){ if(!is.null(xlevels)){ X[[iVar]] <- factor(X[[iVar]], levels = xlevels[[iVar]]) }else{ X[[iVar]] <- factor(X[[iVar]]) } } ## create the unique strata variable UX.strata <- interaction(X[,strata.var,drop=FALSE]) if(is.null(level.strata)){level.strata <- levels(UX.strata)} X <- cbind(X, "..strata.." = as.numeric(factor(UX.strata, levels = level.strata))) } n.strata <- length(level.strata) ## ** collect elements out <- list(cif = setNames(lapply(1:n.strata, function(iS){setNames(lapply(1:2, function(iT){vector(mode = "list", length = n.CR)}), level.treatment)}), level.strata), ## cif at each obsevation time iid.cif = setNames(lapply(1:n.strata, function(iS){setNames(lapply(1:2, function(iT){vector(mode = "list", length = n.CR)}), level.treatment)}), level.strata), ## iid of the cif over time n.CR = n.CR, ## number of competing risks X = X, ## design matrix treatment = treatment, ## name of the treatment variable level.treatment = level.treatment,## levels of the treatment variable strata.var = strata.var, ## name of the original strata values (outside the treatment) level.strata = level.strata, ## vector contain all possible strata values (outside the treatment) n.strata = n.strata, ## number of strata (outside the treatment) last.estimate = array(NA, dim = c(n.strata, 2, n.CR), dimnames = list(level.strata,level.treatment,NULL)), ## estimate of each cumulative incidence at the last observed event (array strata x treatment x cause) last.time = matrix(NA, nrow = n.strata, ncol = 2, dimnames = list(level.strata,level.treatment)), ## time of the last observed event (matrix strata x treatment) jumpSurvHaz = setNames(lapply(1:n.strata, function(iS){setNames(vector(mode = "list", length = 2), level.treatment)}), level.strata) ## index of the jump times/suvival/cause-specific hazard ## for non-censored events (all causes) in the strata (list strata x treatment) ) ## ** export return(out) } ## * predict.BuyseTTEM #' @title Prediction with Time to Event Model #' @name predict.BuyseTTEM #' #' @description Evaluate the cumulative incidence function (cif) / survival in one of the treatment groups. #' #' @param object time to event model. #' @param time [numeric vector] time at which to evaluate the cif/survival. #' @param treatment [character/integer] Treatment or index of the treatment group. #' @param strata [character/integer] Strata or index of the strata. #' @param cause [integer] The cause relative to which the cif will be evaluated. #' @param iid [logical] Should the influence function associated with the cif/survival be output? #' @param ... not used, for compatibility with the generic method. #' #' @return a list containing the survival (element \code{survival}) or the cumulative incidence function (element \code{cif}), #' and possible standard errors (element \code{.se}) and influence function (element \code{.iid}). #' #' @keywords methods #' #' @export predict.BuyseTTEM <- function(object, time, treatment, strata, cause = 1, iid = FALSE, ...){ ## ** normalize input if(!is.numeric(treatment)){ treatment <- which(treatment == object$peron$level.treatment) } if(missing(strata) && object$peron$n.strata == 1){ strata <- 1 } type <- ifelse(object$peron$n.CR==1,"survival","competing.risks") ## ** output last cif estimate if(identical(time,"last")){ if(type=="survival"){ return(1-object$peron$last.estimate[strata,treatment,cause]) }else{ return(object$peron$last.estimate[strata,treatment,cause]) } }else if(identical(time,"jump")){ return(object$peron$jumpSurvHaz[[strata]][[treatment]]$time.jump) } ## ** extract information out <- list() table.cif <- object$peron$cif[[strata]][[treatment]][[cause]] index.table <- pmax(1,prodlim::sindex(jump.time = table.cif$time, eval.time = time)) ## pmin since 1 is taking care of negative times out$index <- table.cif[index.table,"index"] if(type=="survival"){ out$survival <- 1-table.cif[index.table,"cif"] if(iid){ out$survival.iid <- lava::iid(object, strata = strata, treatment = treatment, cause = cause)[,out$index+1,drop=FALSE] out$survival.se <- sqrt(colSums(out$survival.iid^2)) } }else{ out$cif <- table.cif[index.table,"cif"] if(iid){ out$cif.iid <- lava::iid(object, strata = strata, treatment = treatment, cause = cause)[,out$index+1,drop=FALSE] out$cif.se <- sqrt(colSums(out$cif.iid^2)) } } ## ** export return(out) } ## * iid.BuyseTTEM #' @export iid.BuyseTTEM <- function(x, treatment, strata, cause = 1, ...){ object <- x if(is.null(object$peron$iid.cif[[strata]][[treatment]][[cause]])){ stop("iid decomposition not available - consider setting the argument \'iid\' to TRUE when calling BuyseTTEM. \n") } type <- ifelse(object$peron$n.CR==1,"survival","competing.risks") if(type=="survival"){ return(-object$peron$iid.cif[[strata]][[treatment]][[cause]]) }else{ return(object$peron$iid.cif[[strata]][[treatment]][[cause]]) } } ###################################################################### ### BuyseTTEM.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTTEM.R
### BuyseTest-Peron.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: okt 12 2020 (11:10) ## Version: ## Last-Updated: jul 18 2023 (12:03) ## By: Brice Ozenne ## Update #: 548 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * calcPeron #' @noRd calcPeron <- function(data, model.tte, fitter, args, method.score, paired, treatment, level.treatment, endpoint, endpoint.TTE, endpoint.UTTE, status, status.TTE, status.UTTE, D.TTE, D.UTTE, level.strata, n.strata, strata, threshold, restriction, precompute, iidNuisance, out){ zeroPlus <- 1e-12 ## ** fit model for the cumulative incidence function (survival case or competing risk case) ls.indexAssociatedEndpoint <- setNames(vector(mode = "list", length = D.UTTE), endpoint.UTTE) test.CR <- setNames(vector(mode = "logical", length = D.UTTE), endpoint.UTTE) ## prepare formula if(is.null(model.tte)){ model.tte <- vector(length = D.UTTE, mode = "list") names(model.tte) <- endpoint.UTTE tofit <- TRUE if(length(args)==0){args <- NULL} txt.fitter <- sapply(fitter, switch, "prodlim" = "prodlim::Hist", "survreg" = "survival::Surv", NA) if(paired){ txt.modelUTTE <- paste0(txt.fitter,"(",endpoint.UTTE,",",status.UTTE,") ~ ",treatment) }else{ txt.modelUTTE <- paste0(txt.fitter,"(",endpoint.UTTE,",",status.UTTE,") ~ ",treatment," + ..strata..") } }else{ tofit <- FALSE } ## fit survival model and prepare for extracting survival for(iUTTE in 1:D.UTTE){ ## iUTTE <- 1 ls.indexAssociatedEndpoint[[iUTTE]] <- which(endpoint == endpoint.UTTE[iUTTE]) test.CR[iUTTE] <- any(method.score[ls.indexAssociatedEndpoint[[iUTTE]]]=="CRPeron") if(fitter[iUTTE]=="prodlim"){ if(tofit){ model.tte[[iUTTE]] <- do.call(prodlim::prodlim, args = c(list(as.formula(txt.modelUTTE[iUTTE]), data = data, discrete.level = 1e5), args)) } }else if(fitter[iUTTE]=="survreg"){ if(tofit){ model.tte[[iUTTE]] <- do.call(survival::survreg, args = c(list(as.formula(txt.modelUTTE[iUTTE]), data = data), args)) } } model.tte[[iUTTE]] <- BuyseTTEM(model.tte[[iUTTE]], treatment = treatment, level.treatment = level.treatment, level.strata = level.strata, iid = iidNuisance) } ## ** estimate quantities for scoring pairs for(iUTTE in 1:D.UTTE){ ## iUTTE <- 1 iN.CR <- model.tte[[iUTTE]]$peron$n.CR ## fitted survival at event timepoints for(iStrata in 1:n.strata){ for(iTreat in level.treatment){ iStoreJump <- c("survJumpC","survJumpT")[(iTreat==level.treatment[2])+1] iStoreTime <- c("survTimeC","survTimeT")[(iTreat==level.treatment[2])+1] iStoreP <- c("p.C","p.T")[(iTreat==level.treatment[2])+1] iTreat.num <- as.numeric(iTreat==level.treatment[2]) iTime <- data[list(iTreat.num,iStrata),.SD[[endpoint.UTTE[iUTTE]]],on=c(treatment,"..strata..")] iTime.jump <- predict(model.tte[[iUTTE]], time = "jump", strata = iStrata, treatment = iTreat) iRestriction <- restriction[ls.indexAssociatedEndpoint[[iUTTE]][1]] if(!is.na(iRestriction)){ iTime <- pmin(iTime, iRestriction) ## take minimum between outcome and restriction iTime.jump <- iTime.jump[iTime.jump <= iRestriction] ## remove jumps after restriction } if(length(iTime.jump)==0){iTime.jump <- 0} iPred1.C <- predict(model.tte[[iUTTE]], time = iTime, treatment = level.treatment[1], strata = iStrata, cause = 1) iPred1.T <- predict(model.tte[[iUTTE]], time = iTime, treatment = level.treatment[2], strata = iStrata, cause = 1) if(iN.CR>1){ iPred2 <- predict(model.tte[[iUTTE]], time = iTime, treatment = iTreat, strata = iStrata, cause = 2) } iPred1.iTreat.beforeJump <- predict(model.tte[[iUTTE]], time = iTime.jump-zeroPlus, treatment = iTreat, strata = iStrata, iid = iidNuisance) iPred1.iTreat.afterJump <- predict(model.tte[[iUTTE]], time = iTime.jump+zeroPlus, treatment = iTreat, strata = iStrata, iid = iidNuisance) ## technically already computed in the previous lines iLastEstimate <- sapply(1:iN.CR, function(iCause){ if(is.na(iRestriction) || model.tte[[iUTTE]]$peron$last.time[iStrata,iTreat]<=iRestriction){ return(predict(model.tte[[iUTTE]], time = "last", strata = iStrata, treatment = iTreat, cause = iCause)) }else{ ## no remainder term if end of the survival curve after restriction (i.e. fully known survival up to the restriction) return(0) } }) for(iEndpoint in ls.indexAssociatedEndpoint[[iUTTE]]){ iThreshold <- threshold[iEndpoint] ## last estimate of the survival/cif out$lastSurv[[iEndpoint]][iStrata,seq(from=iTreat.num+1, by = 2, length=iN.CR)] <- iLastEstimate if(test.CR[iUTTE]){ ## *** CIF at jump times iPred1.iOther.beforeTau <- predict(model.tte[[iUTTE]], time = iTime.jump - iThreshold, treatment = setdiff(level.treatment,iTreat), strata = iStrata) iPred1.iOther.afterTau <- predict(model.tte[[iUTTE]], time = iTime.jump + iThreshold, treatment = setdiff(level.treatment,iTreat), strata = iStrata) out[[iStoreJump]][[iEndpoint]][[iStrata]] <- cbind("time" = iTime.jump, "CIF1-threshold" = iPred1.iOther.beforeTau$cif, "CIF1+threshold" = iPred1.iOther.afterTau$cif, "dCIF" = iPred1.iTreat.afterJump$cif - iPred1.iTreat.beforeJump$cif, "index.CIF1-threshold" = iPred1.iOther.beforeTau$index, "index.CIF1+threshold" = iPred1.iOther.afterTau$index, "index.dCIF11" = iPred1.iTreat.beforeJump$index, "index.dCIF12" = iPred1.iTreat.afterJump$index) if(iidNuisance){ out$iid[[iStoreJump]][[iUTTE]][[iStrata]] <- cbind(lava::iid(model.tte[[iUTTE]], strata = iStrata, treatment = iTreat, cause = 1), lava::iid(model.tte[[iUTTE]], strata = iStrata, treatment = iTreat, cause = 2)) out[[iStoreP]][iStrata, iEndpoint] <- NCOL(out$iid[[iStoreJump]][[iUTTE]][[iStrata]]) } ## *** CIF at observation time (+/- threshold) iPred1.C.beforeTau <- predict(model.tte[[iUTTE]], time = iTime - iThreshold, treatment = level.treatment[1], strata = iStrata) iPred1.C.afterTau <- predict(model.tte[[iUTTE]], time = iTime + iThreshold, treatment = level.treatment[1], strata = iStrata) iPred1.T.beforeTau <- predict(model.tte[[iUTTE]], time = iTime - iThreshold, treatment = level.treatment[2], strata = iStrata) iPred1.T.afterTau <- predict(model.tte[[iUTTE]], time = iTime + iThreshold, treatment = level.treatment[2], strata = iStrata) out[[iStoreTime]][[iEndpoint]][[iStrata]] <- cbind("time" = iTime, ## 0 "CIF1C-threshold" = iPred1.C.beforeTau$cif, ## 1 "CIF1C_0" = iPred1.C$cif, ## 2 "CIF1C+threshold" = iPred1.C.afterTau$cif, ## 3 "CIF1T-threshold" = iPred1.T.beforeTau$cif, ## 4 "CIF1T_0" = iPred1.T$cif, ## 5 "CIF1T+threshold" = iPred1.T.afterTau$cif, ## 6 "CIF2_0" = iPred2$cif, ## 7 "index.CIF1C-threshold" = iPred1.C.beforeTau$index, ## 8 "index.CIF1C_0" = iPred1.C$index, ## 9 "index.CIF1C+threshold" = iPred1.C.afterTau$index, ## 10 "index.CIF1T-threshold" = iPred1.T.beforeTau$index, ## 11 "index.CIF1T_0" = iPred1.T$index, ## 12 "index.CIF1T+threshold" = iPred1.T.afterTau$index, ## 13 "index.CIF2_0" = iPred2$index) ## 14 }else{ ## *** survival at jump time iTimeTau.jump <- iTime.jump + iThreshold if(!is.na(iRestriction)){ ## remove jump that are such that jump+threshold are after restriction iSubset.restriction <- which(iTimeTau.jump<=iRestriction) }else{ iSubset.restriction <- 1:length(iTimeTau.jump) } if(iidNuisance){ out$iid[[iStoreJump]][[iUTTE]][[iStrata]] <- lava::iid(model.tte[[iUTTE]], strata = iStrata, treatment = iTreat) out[[iStoreP]][iStrata, iEndpoint] <- NCOL(out$iid[[iStoreJump]][[iUTTE]][[iStrata]]) if(any(is.na(out$iid[[iStoreJump]][[iUTTE]][[iStrata]]))){ stop("NA in the iid decomposition of the survival model. \n") } } if(length(iSubset.restriction)==0){ out[[iStoreJump]][[iEndpoint]][[iStrata]] <- cbind(time = 0, ## jump time survival = 1, dSurvival = 0, index.survival = NA, ## index of the survival parameter at t+\tau index.dsurvival1 = NA, ## index of the survival parameter before the jump index.dsurvival2 = NA) ## index of the survival parameter after the jump }else{ iSurvTau.jump <- predict(model.tte[[iUTTE]], time = iTimeTau.jump[iSubset.restriction], treatment = setdiff(level.treatment, iTreat), strata = iStrata, iid = iidNuisance) out[[iStoreJump]][[iEndpoint]][[iStrata]] <- cbind(time = iTime.jump[iSubset.restriction], ## jump time survival = iSurvTau.jump$survival, dSurvival = iPred1.iTreat.afterJump$survival[iSubset.restriction] - iPred1.iTreat.beforeJump$survival[iSubset.restriction], index.survival = iSurvTau.jump$index, ## index of the survival parameter at t+\tau index.dsurvival1 = iPred1.iTreat.beforeJump$index[iSubset.restriction], ## index of the survival parameter before the jump index.dsurvival2 = iPred1.iTreat.afterJump$index[iSubset.restriction]) ## index of the survival parameter after the jump } ## *** survival at observation time (+/- threshold) iPred.C.beforeTau <- predict(model.tte[[iUTTE]], time = iTime - iThreshold, treatment = level.treatment[1], strata = iStrata) iPred.C.afterTau <- predict(model.tte[[iUTTE]], time = iTime + iThreshold, treatment = level.treatment[1], strata = iStrata) iPred.T.beforeTau <- predict(model.tte[[iUTTE]], time = iTime - iThreshold, treatment = level.treatment[2], strata = iStrata) iPred.T.afterTau <- predict(model.tte[[iUTTE]], time = iTime + iThreshold, treatment = level.treatment[2], strata = iStrata) out[[iStoreTime]][[iEndpoint]][[iStrata]] <- cbind("time" = iTime, "survivalC-threshold" = iPred.C.beforeTau$survival, "survivalC_0" = iPred1.C$survival, "survivalC+threshold" = iPred.C.afterTau$survival, "survivalT-threshold" = iPred.T.beforeTau$survival, "survivalT_0" = iPred1.T$survival, "survivalT+threshold" = iPred.T.afterTau$survival, "index.survivalC-threshold" = iPred.C.beforeTau$index, "index.survivalC_0" = iPred1.C$index, "index.survivalC+threshold" = iPred.C.afterTau$index, "index.survivalT-threshold" = iPred.T.beforeTau$index, "index.survivalT_0" = iPred1.T$index, "index.survivalT+threshold" = iPred.T.afterTau$index ) } ## End if CR } ## End if iEndpoint } ## End if treatment } ## End if strata } ## End if UTTE ## ** pre-compute integrals for(iEndpoint in 1:length(endpoint)){ ## iEndpoint <- 1 if(!precompute || method.score[iEndpoint] %in% c("continuous","gaussian")){next} ## only relevant for survival/ competing risk with Peron for(iStrata in 1:n.strata){ ## iStrata <- 1 if(method.score[iEndpoint]=="SurvPeron"){ ## compute integral at any jump time ls.intC <- calcIntegralSurv2_cpp(time = out$survJumpC[[iEndpoint]][[iStrata]][,"time"], survival = out$survJumpC[[iEndpoint]][[iStrata]][,"survival"], dSurvival = out$survJumpC[[iEndpoint]][[iStrata]][,"dSurvival"], index_survival = out$survJumpC[[iEndpoint]][[iStrata]][,"index.survival"], index_dSurvival1 = out$survJumpC[[iEndpoint]][[iStrata]][,"index.dsurvival1"], index_dSurvival2 = out$survJumpC[[iEndpoint]][[iStrata]][,"index.dsurvival2"], lastSurv = out$lastSurv[[iEndpoint]][iStrata,2], lastdSurv = out$lastSurv[[iEndpoint]][iStrata,1], iidNuisance = iidNuisance, nJump = NROW(out$survJumpC[[iEndpoint]][[iStrata]])) ls.intT <- calcIntegralSurv2_cpp(time = out$survJumpT[[iEndpoint]][[iStrata]][,"time"], survival = out$survJumpT[[iEndpoint]][[iStrata]][,"survival"], dSurvival = out$survJumpT[[iEndpoint]][[iStrata]][,"dSurvival"], index_survival = out$survJumpT[[iEndpoint]][[iStrata]][,"index.survival"], index_dSurvival1 = out$survJumpT[[iEndpoint]][[iStrata]][,"index.dsurvival1"], index_dSurvival2 = out$survJumpT[[iEndpoint]][[iStrata]][,"index.dsurvival2"], lastSurv = out$lastSurv[[iEndpoint]][iStrata,1], lastdSurv = out$lastSurv[[iEndpoint]][iStrata,2], iidNuisance = iidNuisance, nJump = NROW(out$survJumpT[[iEndpoint]][[iStrata]])) ## evaluate compute integral just before the observation time (possibly shifted by tau) ## e.g. jump.times = 1:3, eval.times = c(0,1,1.1,2,3,4) should give c(1,2,2,3,4,4) index.dSurvivalT.tau <- prodlim::sindex(jump.times = ls.intT$time, eval.times = out$survTimeC[[iEndpoint]][[iStrata]][,"time"] - threshold[iEndpoint]) + 1 index.dSurvivalC.0 <- prodlim::sindex(jump.times = ls.intC$time, eval.times = out$survTimeC[[iEndpoint]][[iStrata]][,"time"]) + 1 index.dSurvivalC.tau <- prodlim::sindex(jump.times = ls.intC$time, eval.times = out$survTimeT[[iEndpoint]][[iStrata]][,"time"] - threshold[iEndpoint]) + 1 index.dSurvivalT.0 <- prodlim::sindex(jump.times = ls.intT$time, eval.times = out$survTimeT[[iEndpoint]][[iStrata]][,"time"]) + 1 ## get survivals out$survTimeC[[iEndpoint]][[iStrata]] <- cbind(out$survTimeC[[iEndpoint]][[iStrata]], "int.dSurvivalT-threshold_lower" = ls.intT$intSurv_lower[index.dSurvivalT.tau], "int.dSurvivalT-threshold_upper" = ls.intT$intSurv_upper[index.dSurvivalT.tau], "int.dSurvivalC_0_lower" = ls.intC$intSurv_lower[index.dSurvivalC.0], "int.dSurvivalC_0_upper" = ls.intC$intSurv_upper[index.dSurvivalC.0]) out$survTimeT[[iEndpoint]][[iStrata]] <- cbind(out$survTimeT[[iEndpoint]][[iStrata]], "int.dSurvivalC-threshold_lower" = ls.intC$intSurv_lower[index.dSurvivalC.tau], "int.dSurvivalC-threshold_upper" = ls.intC$intSurv_upper[index.dSurvivalC.tau], "int.dSurvivalT_0_lower" = ls.intT$intSurv_lower[index.dSurvivalT.0], "int.dSurvivalT_0_upper" = ls.intT$intSurv_upper[index.dSurvivalT.0]) }else{ stop("precompute terms for competing risks not implemented") ## to be done } if(iidNuisance){ if(method.score[iEndpoint]=="SurvPeron"){ colnames(ls.intC$intSurv_deriv) <- c("index.jump","time","index.param.surv","value.surv","index.param.dsurv1","value.dsurv1","index.param.dsurv2","value.dsurv2") colnames(ls.intT$intSurv_deriv) <- c("index.jump","time","index.param.surv","value.surv","index.param.dsurv1","value.dsurv1","index.param.dsurv2","value.dsurv2") out$survJumpC[[iEndpoint]][[iStrata]] <- ls.intC$intSurv_deriv out$survJumpT[[iEndpoint]][[iStrata]] <- ls.intT$intSurv_deriv out$survTimeC[[iEndpoint]][[iStrata]] <- cbind(out$survTimeC[[iEndpoint]][[iStrata]], "index.int.dSurvivalT-threshold" = index.dSurvivalT.tau-1, "indexMax.int.dSurvivalT-threshold" = NROW(ls.intT$intSurv_deriv)-1, "index.int.dSurvivalC_0" = index.dSurvivalC.0-1, "indexMax.int.dSurvivalC_0" = NROW(ls.intC$intSurv_deriv)-1) out$survTimeT[[iEndpoint]][[iStrata]] <- cbind(out$survTimeT[[iEndpoint]][[iStrata]], "index.int.dSurvivalC-threshold" = index.dSurvivalC.tau-1, "indexMax.int.dSurvivalC-threshold" = NROW(ls.intC$intSurv_deriv)-1, "index.int.dSurvivalT_0" = index.dSurvivalT.0-1, "indexMax.int.dSurvivalT_0" = NROW(ls.intT$intSurv_deriv)-1) }else{ ## to be done } }else{ out$survJumpC[[iEndpoint]][[iStrata]] <- matrix(0, nrow = 0, ncol = 0) out$survJumpT[[iEndpoint]][[iStrata]] <- matrix(0, nrow = 0, ncol = 0) } } } ## ** export return(out) } ## ## * .sindex2 ## ## e.g. jump.times = 1:3, eval.times = c(0,1,1.1,2,3,4) should give c(1,2,2,3,4,4) ## ##' .sindex2(jump.times = 1:3, eval.times = c(0,1,1.1,2,3,4)) ## ##' prodlim::sindex(jump.times = 1:3, eval.times = c(0,1,1.1,2,3,4))+1 ## ##' 3 - prodlim::sindex(jump.times = 1:3, eval.times = c(0,1,1.1,2,3,4), strict = TRUE, comp = "greater") + 1 ## .sindex2 <- function(jump.times, eval.times){ ## return(length(jump.times)-prodlim::sindex(jump.times = jump.times, eval.times = eval.times, strict = TRUE, comp = "greater")+1) ## } ###################################################################### ### BuyseTest-Peron.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-Peron.R
### BuyseTest-check.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: apr 27 2018 (23:32) ## Version: ## Last-Updated: jul 18 2023 (09:34) ## By: Brice Ozenne ## Update #: 344 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * testArgs ##' @title Check Arguments Passed to BuyseTest ##' @description Check the validity of the argument passed the BuyseTest function by the user. ##' @noRd ##' ##' @author Brice Ozenne testArgs <- function(name.call, status, correction.uninf, cpus, data, endpoint, engine, formula, iid, iidNuisance, keep.pairScore, scoring.rule, pool.strata, model.tte, method.inference, n.resampling, strata.resampling, hierarchical, neutral.as.uninf, add.halfNeutral, operator, censoring, restriction, seed, strata, threshold, trace, treatment, type, weightEndpoint, weightObs, ...){ ## ** data if (!data.table::is.data.table(data)) { if(inherits(data,"function")){ stop("Argument \'data\' is mispecified. \n", "\'data\' cannot be a function. \n") } data <- data.table::as.data.table(data) }else{ data <- data.table::copy(data) } if("..rowIndex.." %in% names(data)){ stop("BuyseTest: Argument \'data\' must not contain a column \"..rowIndex..\". \n") } if("..NA.." %in% names(data)){ stop("BuyseTest: Argument \'data\' must not contain a column \"..NA..\". \n") } if("..strata.." %in% names(data)){ stop("BuyseTest: Argument \'data\' must not contain a column \"..strata..\". \n") } if("..weight.." %in% names(data)){ stop("BuyseTest: Argument \'data\' must not contain a column \"..weight..\". \n") } ## ** extract usefull quantities argnames <- c("treatment", "endpoint", "type", "threshold", "status", "strata") D <- length(endpoint) D.TTE <- sum(type == "tte") # number of time to event endpoints level.treatment <- levels(as.factor(data[[treatment]])) if(is.null(strata)){ n.strata <- 1 }else{ indexT <- which(data[[treatment]] == level.treatment[2]) indexC <- which(data[[treatment]] == level.treatment[1]) if(any(strata %in% names(data) == FALSE)){ stop("Strata variable(s) \"",paste0(strata,collapse="\" \""),"\" not found in argument \'data\' \n") } strataT <- interaction(data[indexT,strata,with=FALSE], drop = TRUE, lex.order=FALSE,sep=".") strataC <- interaction(data[indexC,strata,with=FALSE], drop = TRUE, lex.order=FALSE,sep=".") level.strata <- levels(strataT) n.strata <- length(level.strata) } ## ** status if(length(status) != D){ stop("BuyseTest: \'status\' does not match \'endpoint\' size. \n", "length(status): ",length(status),"\n", "length(endpoint) : ",D,"\n") } if(any(is.na(status))){ stop("BuyseTest: \'status\' must not contain NA. \n") } index.pb <- which(status[type=="tte"] == "..NA..") if(length(index.pb)>0){ if(all(attr(censoring,"original")[index.pb] %in% names(data))){ stop("BuyseTest: wrong specification of \'status\'. \n", "\'status\' must indicate a variable in data for TTE endpoints. \n", "\'censoring\' is used to indicate whether there is left or right censoring. \n", "Consider changing \'censoring =\' into \'status =\' when in the argument \'formula\' \n") }else{ stop("BuyseTest: wrong specification of \'status\'. \n", "\'status\' must indicate a variable in data for TTE endpoints. \n", "TTE endoints: ",paste(endpoint[type=="tte"],collapse=" "),"\n", "proposed \'status\' for these endoints: ",paste(status[type=="tte"],collapse=" "),"\n") } } index.pb <- which(status[type=="gaussian"] == "..NA..") if(length(index.pb)>0){ stop("BuyseTest: wrong specification of \'std\'. \n", "\'std\' must indicate a variable in data for Gaussian endpoints. \n", "Gaussian endoints: ",paste(endpoint[type==4],collapse=" "),"\n", "proposed \'gaussian\' for these endoints: ",paste(status[type==4],collapse=" "),"\n") } if(any(status[type %in% c("bin","cont")] !="..NA..") ){ stop("BuyseTest: wrong specification of \'status\'. \n", "\'status\' must be \"..NA..\" for binary or continuous endpoints. \n", "endoints : ",paste(endpoint[type %in% c("bin","cont")],collapse=" "),"\n", "proposed \'status\' for these endoints: ",paste(status[type %in% c("bin","cont")],collapse=" "),"\n") } Ustatus.TTE <- unique(status[type=="tte"]) if(any(Ustatus.TTE %in% names(data) == FALSE)){ stop("BuyseTest: variable(s) \'status\': \"",paste0(Ustatus.TTE[Ustatus.TTE %in% names(data) == FALSE], collapse = "\" \""),"\" \n", "not found in argument \'data\'.\n") } if(is.null(strata)){ if(any(sapply(Ustatus.TTE, function(iS){sum(data[[iS]]!=0)})==0)){ warning("BuyseTest: time to event variables with only censored events \n") } }else{ strata.tempo <- data[[strata]] if(is.factor(strata.tempo)){strata.tempo <- droplevels(strata.tempo)} ## otherwise the next tapply statement generates NA when there are empty levels which leads to an error ## if non-paired data (i.e. more than 2 obs per strata) if(any(table(strata.tempo)>2) && any(sapply(Ustatus.TTE, function(iS){tapply(data[[iS]], strata.tempo, function(iVec){sum(iVec!=0)})})==0)){ warning("BuyseTest: time to event variables with only censored events in at least one strata \n") } } ## ** censoring if(any(type=="gaus")){ ## iid has been internally stored in the censoring variable censoring.gaus <- na.omit(censoring[type=="gaus"]) if(length(censoring.gaus)>0 && any(censoring.gaus %in% names(data) == FALSE)){ stop("BuyseTest: wrong specification of \'iid\'. \n", "\'iid\' must indicate a variable in argument \'data\'. \n", "incorrect \'iid\' value(s): \"",paste(censoring.gaus[censoring.gaus %in% names(data) == FALSE], collapse = "\" \""),"\" \n") } }else if(any(type=="tte")){ censoring.tte <- censoring[type=="tte"] if(any(is.na(censoring.tte))){ stop("BuyseTest: wrong specification of \'censoring\'. \n", "\'censoring\' must be \"left\", or \"right\" for time to event endpoints. \n", "incorrect \'censoring\' value(s): \"",paste(censoring.tte[is.na(censoring.tte)], collapse = "\" \""),"\" \n") } if(any(censoring.tte %in% c("left","right") == FALSE)){ stop("BuyseTest: wrong specification of \'censoring\'. \n", "\'censoring\' must be \"left\", or \"right\" \n", "incorrect \'censoring\' value(s): \"",paste(censoring.tte[censoring.tte %in% c("left","right") == FALSE], collapse = "\" \""),"\" \n") } } ## ** restriction if(any(type[which(!is.na(restriction))] %in% c("tte","cont") == FALSE)){ stop("Type(s) \"",paste(unique(setdiff(type[which(!is.na(restriction))], c("tte","cont"))), collapse = "\" \""),"\" do not support argument restriction. \n") } if(any(type[which(!is.na(restriction))] %in% c("tte","cont") == FALSE)){ stop("Type(s) \"",paste(unique(setdiff(type[which(!is.na(restriction))], c("tte","cont"))), collapse = "\" \""),"\" do not support argument restriction. \n") } ## ** cpus if(cpus>1){ validInteger(cpus, valid.length = 1, min = 1, max = parallel::detectCores(), method = "BuyseTest") } ## ** scoring.rule ## must be before time to event endpoints if(is.na(scoring.rule)){ stop("BuyseTest: wrong specification of \'scoring.rule\'. \n", "valid values: \"Gehan\" \"Gehan corrected\" \"Peron\" \"Peron corrected\". \n") } if(scoring.rule>0 && any(censoring=="left")){ warning("The Peron's scoring rule does not support left-censored endpoints \n", "For those endpoints, the Gehan's scoring rule will be used instead.") } ## ** pool.strata if(is.na(pool.strata)){ stop("BuyseTest: wrong specification of \'pool.strata\'. \n", "valid values: \"Buyse\", \"CMH\", \"equal\", \"var-favorable\", \"var-unfavorable\", \"var-netBenefit\", \"var-winRatio\". \n") } ## ** model.tte if(!is.null(model.tte)){ endpoint.UTTE <- unique(endpoint[type=="tte"]) D.UTTE <- length(endpoint.UTTE) if(!is.list(model.tte) || length(model.tte) != D.UTTE){ stop("BuyseTest: argument \'model.tte\' must be a list containing ",D.UTTE," elements. \n", "(one for each unique time to event endpoint). \n") } if(is.null(model.tte) || any(names(model.tte) != endpoint.UTTE)){ stop("BuyseTest: argument \'model.tte\' must be a named list. \n", "valid sequence of names: \"",paste0(endpoint.UTTE, collapse = "\" \""),"\" \n", "proposed names: \"",paste0(names(model.tte), collapse = "\" \""),"\" \n") } valid.class <- setdiff(utils::methods(generic.function = "BuyseTTEM"), c("BuyseTTEM.formula")) vec.class <- sapply(model.tte, function(iTTE){any(paste0("BuyseTTEM.",class(model.tte[[1]])) %in% valid.class)}) if(any(vec.class == FALSE) ){ stop("BuyseTest: argument \'model.tte\' must be a list of \"",paste0(gsub("BuyseTTEM\\.","",valid.class), collapse = "\", or \""),"\" objects. \n") } test.prodlim.continuous <- sapply(model.tte, function(iModel){ inherits(iModel,"prodlim") & length(iModel$continuous.predictor>0) }) if(any(test.prodlim.continuous)){ stop("Incorrect model for time to event: cannot handle continuous variables. \n", "Consider setting the argument \"discrete.level\" to a large value when calling prodlim for endpoint(s) \"",paste(names(test.prodlim.continuous)[test.prodlim.continuous], collapse = "\" \""),"\". \n") } vec.predictors <- sapply(model.tte, function(iTTE){identical(sort(all.vars(stats::update(stats::formula(model.tte[[1]]), "0~."))), sort(c(treatment,strata)))}) if(any(vec.predictors == FALSE) ){ stop("BuyseTest: argument \'model.tte\' must be a list of objects with \"",paste0(c(treatment,strata),collapse = "\" \""),"\" as predictors. \n") } } ## ** data (endpoints) ## *** binary endpoints index.Bin <- which(type=="bin") if(length(index.Bin)>0){ for(iBin in index.Bin){ ## iterY <- 1 if(length(unique(na.omit(data[[endpoint[iBin]]])))>2){ stop("Binary endpoint cannot have more than 2 levels. \n", "endpoint: ",endpoint[iBin],"\n") } ## if(any(is.na(data[[endpoint[iBin]]]))){ ## warning("BuyseTest: endpoint ",endpoint[iBin]," contains NA \n") ## } } } ## *** continuous endpoints index.Cont <- which(type=="cont") if(length(index.Cont)>0){ for(iCont in index.Cont){ validNumeric(data[[endpoint[iCont]]], name1 = endpoint[iCont], valid.length = NULL, refuse.NA = FALSE, method = "BuyseTest") ## if(any(is.na(data[[endpoint[iCont]]]))){ ## warning("BuyseTest: endpoint ",endpoint[iCont]," contains NA \n") ## } } } ## *** time to event endpoint index.TTE <- which(type=="tte") status.TTE <- status[type=="tte"] if(length(index.TTE)>0){ validNames(data, name1 = "data", required.values = status.TTE, valid.length = NULL, refuse.NULL = FALSE, method = "BuyseTest") valid.values.status <- 0:2 for(iTTE in index.TTE){ validNumeric(data[[endpoint[iTTE]]], name1 = endpoint[iTTE], valid.length = NULL, refuse.NA = TRUE, method = "BuyseTest") validNumeric(unique(data[[status.TTE[which(index.TTE == iTTE)]]]), name1 = status.TTE[which(index.TTE == iTTE)], valid.values = valid.values.status, valid.length = NULL, method = "BuyseTest") } } ## *** Gaussian endpoints index.Gaus <- which(type=="gaus") if(length(index.Gaus)>0){ for(iGaus in index.Gaus){ validNumeric(data[[endpoint[iGaus]]], name1 = endpoint[iGaus], valid.length = NULL, refuse.NA = FALSE, method = "BuyseTest") validNumeric(data[[status[iGaus]]], name1 = status[iGaus], valid.length = NULL, refuse.NA = FALSE, method = "BuyseTest") } } ## ** endpoint validNames(data, name1 = "data", required.values = endpoint, valid.length = NULL, method = "BuyseTest") ## ** formula if(!is.null(formula) && any(name.call %in% argnames)){ txt <- paste(name.call[name.call %in% argnames], collapse = "\' \'") warning("BuyseTest: argument",if(length(txt)>1){"s"}," \'",txt,"\' ha",if(length(txt)>1){"ve"}else{"s"}," been ignored. \n", "when specified, only argument \'formula\' is used. \n") } ## ** keep.pairScore validLogical(keep.pairScore, valid.length = 1, method = "BuyseTest") ## ** correction.uninf validInteger(correction.uninf, valid.length = 1, min = 0, max = 3, method = "BuyseTest") ## ** method.inference if(length(method.inference)!=1){ stop("Argument \'method.inference\' must have length 1. \n") } if(method.inference != "u-statistic-bebu"){ ## asympototic bebu - hidden value only for debugging validCharacter(method.inference, valid.length = 1, valid.values = c("none","u-statistic","permutation", "studentized permutation", "bootstrap", "studentized bootstrap"), method = "BuyseTest") } if(pool.strata>3 && method.inference %in% c("u-statistic","studentized permutation","studentized bootstrap")){ stop("Only bootstrap and permutation can be used to quantify uncertainty when weighting strata-specific effects by the inverse of the variance. \n") } if(method.inference != "none" && any(table(data[[treatment]])<2) ){ warning("P-value/confidence intervals will not be valid with only one observation. \n") } if(!is.na(attr(method.inference,"resampling-strata")) && any(attr(method.inference,"resampling-strata") %in% names(data) == FALSE)){ stop("Incorrect value for argument \'strata.resampling\': must correspond to a column in argument \'data\'. \n") } if(!is.na(attr(method.inference,"resampling-strata")) && attr(method.inference,"permutation") && any(attr(method.inference,"resampling-strata") == treatment)){ stop("Argument \'strata.resampling\' should not contain the variable used to form the treatment groups when using a permutation test. \n") } if(iid && correction.uninf > 0){ warning("The current implementation of the asymptotic distribution is valid when using a correction. \n", "Standard errors / confidence intervals / p-values may not be correct. \n", "Consider using a resampling approach or checking the control of the type 1 error with powerBuyseTest. \n") } ## ** n.resampling if(method.inference %in% c("bootstrap","permutation","stratified bootstrap","stratified permutation")){ validInteger(n.resampling, valid.length = 1, min = 1, method = "BuyseTest") if(!is.null(seed)){ tol.seed <- attr(seed,"max") if(n.resampling>tol.seed){ stop("Cannot set a seed per sample when considering more than ",tol.seed," samples. \n") } } } ## ** hierarchical validLogical(hierarchical, valid.length = 1, method = "BuyseTest") ## ** neutral.as.uninf validLogical(neutral.as.uninf, valid.length = D, method = "BuyseTest") ## ** add.halfNeutral validLogical(add.halfNeutral, valid.length = 1, method = "BuyseTest") ## ** operator if(any(is.na(operator))){ stop("BuyseTest: wrong specification of \'operator\'. \n", "Should be either \"<0\" (lower is better) or \">0\" (higher is better)") } ## ** restriction if(any(tapply(restriction,endpoint,function(iRestriction){length(unique(iRestriction))})>1)){ stop("BuyseTest: wrong specification of \'restriction\'. \n", "Should not vary when the same endpoint is used at different priorities.") } ## ** seed validInteger(seed, valid.length = 1, refuse.NULL = FALSE, min = 1, method = "BuyseTest") ## ** strata if (!is.null(strata)) { validNames(data, name1 = "data", required.values = strata, valid.length = NULL, method = "BuyseTest") if(length(level.strata) != length(levels(strataC)) || any(level.strata != levels(strataC))){ stop("BuyseTest: wrong specification of \'strata\'. \n", "different levels between Control and Treatment \n", "levels(strataT) : ",paste(levels(strataT),collapse=" "),"\n", "levels(strataC) : ",paste(levels(strataC),collapse=" "),"\n") } } ## ** threshold ## check numeric and no NA validNumeric(threshold, valid.length = D, min = 0, refuse.NA = TRUE, method = "BuyseTest") ## check threshold at 1/2 for binary endpoints if(any(threshold[type=="bin"]>1e-12)){ stop("BuyseTest: wrong specification of \'threshold\'. \n", "\'threshold\' must be 1e-12 for binary endpoints (or equivalently NA) \n", "proposed \'threshold\' : ",paste(threshold[type=="bin"],collapse=" "),"\n", "binary endpoint(s) : ",paste(endpoint[type=="bin"],collapse=" "),"\n") } ## Check that the thresholds related to the same endoints are strictly decreasing ## is.unsorted(rev(2:1)) ## is.unsorted(rev(1:2)) vec.test <- tapply(threshold,endpoint, function(x){ test.unsorted <- is.unsorted(rev(x)) test.duplicated <- any(duplicated(x)) return(test.unsorted+test.duplicated) }) if(any(vec.test>0)){ stop("BuyseTest: wrong specification of \'endpoint\' or \'threshold\'. \n", "Endpoints must be used with strictly decreasing threshold when re-used with lower priority. \n", "Problematic endpoints: \"",paste0(names(vec.test)[vec.test>0], collapse = "\" \""),"\"\n") } ## ** trace validInteger(trace, valid.length = 1, min = 0, max = 2, method = "BuyseTest") ## ** treatment validCharacter(treatment, valid.length = 1, method = "BuyseTest") validNames(data, name1 = "data", required.values = treatment, valid.length = NULL, method = "BuyseTest") if (length(level.treatment) != 2) { stop("BuyseTest: wrong specification of \'treatment\'. \n", "The corresponding column in \'data\' must have exactly 2 levels. \n", "Proposed levels : ",paste(level.treatment,collapse = " "),"\n") } if(any(table(data[[treatment]])==0)){ txt.stop <- names(which(table(data[[treatment]])==0)) stop("BuyseTest: wrong specification of \'data\'. \n", "No observation taking level ",txt.stop," in the treatment variable. \n") } ## ** type if(any(type %in% c("bin","cont","tte","gaus") == FALSE)){ txt <- type[type %in% c("bin","cont","tte","gaus") == FALSE] stop("BuyseTest: wrong specification of \'type\' \n", "valid values: \"binary\" \"continuous\" \"timetoevent\" \n", "incorrect values: \"",paste(txt, collapse = "\" \""),"\" \n") } n.typePerEndpoint <- tapply(type, endpoint, function(x){length(unique(x))}) if(any(n.typePerEndpoint>1)){ message <- paste0("several types have been specified for endpoint(s) ", paste0(unique(endpoint)[n.typePerEndpoint>1],collapse = ""), "\n") stop("BuyseTest: wrong specification of \'endpoint\' or \'type\' \n",message) } ## ** weightEndpoint if(length(weightEndpoint) != D){ stop("BuyseTest: argument \'weightEndpoint\' must have length the number of endpoints \n") } if(hierarchical){ if(any(weightEndpoint!=1) || any(is.na(weightEndpoint))){ stop("BuyseTest: all the weights for the endpoints must be 1 when using hierarchical GPC \n") } } ## ** weightObs if(!is.null(weightObs)){ test1 <- is.character(weightObs) && length(weightObs) == 1 && weightObs %in% names(data) test2 <- is.numeric(weightObs) && length(weightObs) == NROW(data) if((test1 == FALSE) && (test2 == FALSE)){ stop("BuyseTest: argument \'weightObs\' must correspond to a column in argument \'data\'", "or must have as many element as rows in argument \'data\'. \n") } if(engine == "GPC_cpp"){ stop("Cannot weigth observations with engine GPC_cpp. \n") } } if(hierarchical){ if(any(weightEndpoint!=1) || any(is.na(weightEndpoint))){ stop("BuyseTest: all the weights for the endpoints must be 1 when using hierarchical GPC \n") } } ## ** export return(invisible(TRUE)) } ##---------------------------------------------------------------------- ### BuyseTest-check.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-check.R
## * inferenceResampling inferenceResampling <- function(envir){ cpus <- envir$outArgs$cpus D <- envir$outArgs$D endpoint <- envir$outArgs$endpoint iid <- envir$outArgs$iid level.strata <- envir$outArgs$level.strata method.inference <- envir$outArgs$method.inference n.resampling <- envir$outArgs$n.resampling n.strata <- envir$outArgs$n.strata seed <- envir$outArgs$seed if (!is.null(seed)) { if(!is.null(get0(".Random.seed"))){ ## avoid error when .Random.seed do not exists, e.g. fresh R session with no call to RNG old <- .Random.seed # to save the current seed on.exit(.Random.seed <<- old) # restore the current seed (before the call to the function) }else{ on.exit(rm(.Random.seed, envir=.GlobalEnv)) } tol.seed <- attr(seed,"max") set.seed(seed) seqSeed <- sample.int(tol.seed, n.resampling, replace = FALSE) } trace <- envir$outArgs$trace ## re-order dataset according to the strata used when resampling if(!is.na(attr(method.inference,"resampling-strata"))){ envir$outArgs$data[,c("..rowIndex..") := 1:.N] data.table::setkeyv(envir$outArgs$data, cols = attr(method.inference,"resampling-strata")) envir$outArgs$M.endpoint <- envir$outArgs$M.endpoint[envir$outArgs$data[["..rowIndex.."]],,drop=FALSE] envir$outArgs$M.status <- envir$outArgs$M.status[envir$outArgs$data[["..rowIndex.."]],,drop=FALSE] envir$outArgs$index.C <- which(envir$outArgs$data[[envir$outArgs$treatment]] == 0) envir$outArgs$index.T <- which(envir$outArgs$data[[envir$outArgs$treatment]] == 1) envir$outArgs$index.strata <- tapply(1:NROW(envir$outArgs$data), envir$outArgs$data[["..strata.."]], list) envir$outArgs$data[,c("..rowIndex..") := NULL,] } ## ** computation if (cpus == 1) { ## *** sequential resampling test if (trace > 0) { requireNamespace("pbapply") method.loop <- pbapply::pblapply }else{ method.loop <- lapply } ls.resampling <- do.call(method.loop, args = list(X = 1:n.resampling, FUN = function(iB){ if(!is.null(seed)){set.seed(seqSeed[iB])} iOut <- .BuyseTest(envir = envir, iid = iid, method.inference = method.inference, pointEstimation = FALSE ) if(!is.null(seed)){ return(c(iOut,list(seed = seqSeed[iB]))) }else{ return(iOut) } }) ) }else { ## *** parallel resampling test ## define cluster cl <- parallel::makeCluster(cpus) if(trace>0){ pb <- utils::txtProgressBar(max = n.resampling, style = 3) progress <- function(n){utils::setTxtProgressBar(pb, n)} opts <- list(progress = progress) }else{ opts <- list() } ## link to foreach doSNOW::registerDoSNOW(cl) ## seed if (!is.null(seed)) { parallel::clusterExport(cl, varlist = "seqSeed", envir = environment()) } ## export package parallel::clusterCall(cl, fun = function(x){ suppressPackageStartupMessages(library(BuyseTest, quietly = TRUE, warn.conflicts = FALSE, verbose = FALSE)) }) ## export functions toExport <- c(".BuyseTest","calcPeron","calcSample") iB <- NULL ## [:forCRANcheck:] foreach ls.resampling <- foreach::`%dopar%`( foreach::foreach(iB=1:n.resampling, .export = toExport, .packages = "data.table", .options.snow = opts), { if(!is.null(seed)){set.seed(seqSeed[iB])} iOut <- .BuyseTest(envir = envir, iid = iid, method.inference = method.inference, pointEstimation = FALSE) if(!is.null(seed)){ return(c(iOut,list(seed = seqSeed[iB]))) }else{ return(iOut) } }) parallel::stopCluster(cl) if(trace>0){close(pb)} } ## ** post treatment test.resampling <- which(unlist(lapply(ls.resampling,is.null)) == FALSE) if(length(test.resampling) != n.resampling){ n.failure <- n.resampling - length(test.resampling) warning("The resampling procedure failed for ",n.failure," samples (",round(100*n.failure/n.resampling,2),"%)") } dim.delta <- c(n.resampling, n.strata, D, 6) dimnames.delta <- list(as.character(1:n.resampling), level.strata, endpoint, c("favorable","unfavorable","neutral","uninf","netBenefit","winRatio")) out <- list(deltaResampling = array(NA, dim = dim.delta, dimnames = dimnames.delta), DeltaResampling = array(NA, dim = dim.delta[c(1,3,4)], dimnames = dimnames.delta[c(1,3,4)]), weightStrataResampling = matrix(NA, nrow = n.resampling, ncol = n.strata, dimnames = list(NULL, level.strata)) ) if(!is.null(seed)){ out$seed <- rep(NA, n.resampling) } if(iid){ out$covarianceResampling = array(NA, dim = c(n.resampling, D, 5), dimnames = list(as.character(1:n.resampling), endpoint, c("favorable", "unfavorable", "covariance", "netBenefit", "winRatio"))) }else{ out$covarianceResampling <- array(NA, dim = c(0,0,0)) } for(iR in test.resampling){ ## iR <- 1 out$deltaResampling[iR,,,] <- ls.resampling[[iR]]$delta out$DeltaResampling[iR,,] <- ls.resampling[[iR]]$Delta out$weightStrataResampling[iR,] <- ls.resampling[[iR]]$weightStrata if(!is.null(seed)){ out$seed[iR] <- seqSeed[iR] } if(iid){ out$covarianceResampling[iR,,] <- ls.resampling[[iR]]$covariance } } ## ** export return(out) } ## * inference U-statistic (Bebu et al 2015) ##' @description Implement the computation of the asymptotic variance as described in Bebu et al (2015) ##' Should give results equivalent to inferenceUstatistic ##' NOTE: arguments subset.C and subset.T were used for BuysePower to re-compute statistics on a subset of the data ##' but this happens to be slower than just re-running the test so is not used ##' @noRd ##' @references Large sample inference for a win ratio analysis of a composite outcome based on prioritized components Biostatistics (2015), pp. 1–10 doi:10.1093/biostatistics/kxv032 inferenceUstatisticBebu <- function(tablePairScore, subset.C = NULL, subset.T = NULL, order, weightEndpoint, n.pairs, n.C, n.T, level.strata, n.strata, n.endpoint, endpoint){ . <- NULL ## for CRAN test out <- list() ## ** extract informations n.endpoint <- length(endpoint) ntot.pairs <- sum(n.pairs) ## ** merge tables ls.table <- wsumPairScore(tablePairScore, weightEndpoint = weightEndpoint, subset.C = subset.C, subset.T = subset.T) out$n.pairs <- NROW(ls.table[[1]]) count.favorable <- do.call(cbind,lapply(ls.table, function(iTable){sum(iTable$favorable)})) count.unfavorable <- do.call(cbind,lapply(ls.table, function(iTable){sum(iTable$unfavorable)})) p.favorable <- as.double(count.favorable/sum(n.pairs)) p.unfavorable <- as.double(count.unfavorable/sum(n.pairs)) ## out$Delta <- unname(cbind(p.favorable, ## p.unfavorable, ## p.favorable - p.unfavorable, ## count.favorable / count.unfavorable)) ## out$delta <- array(NA, dim = c(n.strata, n.endpoint, 4)) ## out$count_favorable <- matrix(NA, nrow = n.strata, ncol = n.endpoint) ## out$count_unfavorable <- matrix(NA, nrow = n.strata, ncol = n.endpoint) ## out$count_neutral <- matrix(NA, nrow = n.strata, ncol = n.endpoint) ## out$count_uninf <- matrix(NA, nrow = n.strata, ncol = n.endpoint) ## ** compute variance component over strata M.cov <- matrix(0, nrow = n.endpoint, ncol = 3, dimnames = list(endpoint, c("favorable","unfavorable","covariance"))) strataSum <- matrix(NA, nrow = 6, ncol = n.endpoint, dimnames = list(c("favorableT","favorableC","unfavorableT","unfavorableC","mixedC","mixedT"), endpoint)) for(iStrata in 1:n.strata){ ## iStrata <- 1 iN.strata <- n.pairs[iStrata] iLS.table <- lapply(ls.table, function(iT){iT[iT$strata == level.strata[iStrata]]}) iDT.nCT <- iLS.table[[1]][,.(n.C = length(unique(.SD$indexWithinStrata.C)),n.T = length(unique(.SD$indexWithinStrata.T)))] iN.C <- iDT.nCT$n.C iN.T <- iDT.nCT$n.T iP.favorable <- unlist(lapply(iLS.table, function(iT){sum(iT$favorable)}))/n.pairs[iStrata] iP.unfavorable <- unlist(lapply(iLS.table, function(iT){sum(iT$unfavorable)}))/n.pairs[iStrata] for(iE in 1:n.endpoint){ ## iE <- 1 iTable <- iLS.table[[iE]] index2originalOrder.C <- iTable[!duplicated(iTable$index.C), stats::setNames(.SD$index.C,.SD$indexWithinStrata.C)] index2originalOrder.T <- iTable[!duplicated(iTable$index.T), stats::setNames(.SD$index.T,.SD$indexWithinStrata.T)] ## *** Hajek projection ## \E[X_i>=Y_j+\tau|X_i] and \E[X_i+\tau<=Y_j|X_i] sumPair.T <- iTable[, .(pairs = .N, favorable = sum(.SD$favorable), unfavorable = sum(.SD$unfavorable)), by = "indexWithinStrata.T"] sumPair.T[, c("E.favorable") := .SD$favorable/.SD$pairs] sumPair.T[, c("E.unfavorable") := .SD$unfavorable/.SD$pairs] iN.setT <- NROW(sumPair.T) ## \E[X_i>=Y_j+\tau|Y_j] and \E[X_i+\tau<=Y_j|Y_j] sumPair.C <- iTable[, .(pairs = .N, favorable = sum(.SD$favorable), unfavorable = sum(.SD$unfavorable)), by = "indexWithinStrata.C"] sumPair.C[, c("E.favorable") := .SD$favorable/.SD$pairs] sumPair.C[, c("E.unfavorable") := .SD$unfavorable/.SD$pairs] iN.setC <- NROW(sumPair.C) ## *** variance ## P[1(X_i,Y_j)1(X_i,Y_k)] = 1/nm(m-1) sum_i sum_j 1(X_i,Y_j) sum_k neq j 1(X_i,Y_j) ## = 1/nm(m-1) sum_i sum_j 1(X_i,Y_j) ( sum_k 1(X_i,Y_k) - 1(X_i,Y_j) ) ## here we compute sum_k 1(X_i,Y_k) and m-1 iTable[, c("sumFavorable.T") := sumPair.T$favorable[.SD$indexWithinStrata.T]] iTable[, c("sumUnfavorable.T") := sumPair.T$unfavorable[.SD$indexWithinStrata.T]] iTable[, c("sumFavorable.C") := sumPair.C$favorable[.SD$indexWithinStrata.C]] iTable[, c("sumUnfavorable.C") := sumPair.C$unfavorable[.SD$indexWithinStrata.C]] if(iN.setT > 0){ ## E[ 1(X_i>Y_j) 1(X_i>Y_k) ] strataSum["favorableT",iE] <- iTable[,sum(.SD$sumFavorable.T * .SD$favorable)] / (iN.strata*iN.setT) ## E[ 1(X_i<Y_j) 1(X_i<Y_k) ] strataSum["unfavorableT",iE] <- iTable[,sum(.SD$sumUnfavorable.T * .SD$unfavorable)] / (iN.strata*iN.setT) ## E[ 1(X_i>Y_j) 1(X_i<Y_k) ] strataSum["mixedT",iE] <- iTable[,sum(.SD$sumUnfavorable.T * .SD$favorable)] / (iN.strata*iN.setT) } if(iN.setC > 0){ ## E[ 1(X_i>Y_j) 1(X_k>Y_j) ] strataSum["favorableC",iE] <- iTable[,sum(.SD$sumFavorable.C * .SD$favorable)] / (iN.strata*iN.setC) ## E[ 1(X_i<Y_j) 1(X_k<Y_j) ] strataSum["unfavorableC",iE] <- iTable[,sum(.SD$sumUnfavorable.C * .SD$unfavorable)] / (iN.strata*iN.setC) ## E[ 1(X_i>Y_j) 1(X_k<Y_j) ] strataSum["mixedC",iE] <- iTable[,sum(.SD$sumUnfavorable.C * .SD$favorable)] / (iN.strata*iN.setC) } } ## *** first order terms: compute xi ## P[X1>Y1 & X1>Y1'] - P[X1>Y1]^2 xi_10_11 <- strataSum["favorableT",] - iP.favorable^2 ## P[X1>Y1 & X1'>Y1] - P[X1>Y1]^2 xi_01_11 <- strataSum["favorableC",] - iP.favorable^2 ## P[X1<Y1 & X1<Y1'] - P[X1<Y1]^2 xi_10_22 <- strataSum["unfavorableT",] - iP.unfavorable^2 ## P[X1<Y1 & X1'<Y1] - P[X1<Y1]^2 xi_01_22 <- strataSum["unfavorableC",] - iP.unfavorable^2 ## P[X1>Y1 & X1<Y1'] - P[X1>Y1]*P[X1<Y1] xi_10_12 <- strataSum["mixedC",] - iP.favorable * iP.unfavorable ## P[X1>Y1 & X1'<Y1] - P[X1>Y1]*P[X1<Y1] xi_01_12 <- strataSum["mixedT",] - iP.favorable * iP.unfavorable ## *** second order terms if(order == 2){ Mfav <- do.call(cbind,lapply(iLS.table,"[[","favorable")) Munfav <- do.call(cbind,lapply(iLS.table,"[[","unfavorable")) varUfav <- colMeans(Mfav^2) - colMeans(Mfav)^2 ##instead of p1.favorable*(1-p1.favorable) varUunfav <- colMeans(Munfav^2) - colMeans(Munfav)^2 ##instead of p1.unfavorable*(1-p1.unfavorable) covUfavunfav <- colMeans(Mfav*Munfav) - colMeans(Mfav)*colMeans(Munfav) ##instead of -p1.favorable*p1.unfavorable H2.favorable <- (varUfav - xi_10_11 - xi_01_11)/(iN.strata) H2.unfavorable <- (varUunfav - xi_10_22 - xi_01_22)/(iN.strata) H2.covariance <- (covUfavunfav - xi_10_12 - xi_01_12)/(iN.strata) }else{ H2.favorable <- 0 H2.unfavorable <- 0 H2.covariance <- 0 } ## ** compute sigma ## NO STRATA: ## N.TC = N.T+N.C ## Sigma = N.TC/N.C SigmaC + N.TT/N.T SigmaT ## asymptotic variance i.e. sqrt(N.TC)(Uhat - U) \sim N(0,Sigma) ## scaled asymptotic variance i.e. (Uhat - U) \sim N(0,Sigma/N.TC) = N(0,1/N.C SigmaC + 1/N.T SigmaT) ## ## STRATA: ## same but adding a factor n.strata / N.TC to accound for pooling M.cov <- M.cov + (iN.strata/ntot.pairs)^2 * cbind(favorable = xi_10_11 / iN.C + xi_01_11 / iN.T + H2.favorable, unfavorable = xi_10_22 / iN.C + xi_01_22 / iN.T + H2.unfavorable, covariance = xi_10_12 / iN.C + xi_01_12 / iN.T + H2.covariance) } ## ** export out$Sigma <- cbind(M.cov, "netBenefit" = M.cov[,"favorable"] + M.cov[,"unfavorable"] - 2 * M.cov[,"covariance"], "winRatio" = M.cov[,"favorable"]/p.unfavorable^2 + M.cov[,"unfavorable"]*p.favorable^2/p.unfavorable^4 - 2 * M.cov[,"covariance"]*p.favorable/p.unfavorable^3 ) return(out) } ## * wsumPairScore ##' @description cumulate over endpoint the scores ##' @noRd wsumPairScore <- function(pairScore, weightEndpoint, subset.C, subset.T){ keep.col <- c("strata","index.C","index.T","index.pair","indexWithinStrata.C", "indexWithinStrata.T","favorableC","unfavorableC","neutralC","uninfC") old.col <- c("favorableC","unfavorableC","neutralC","uninfC") new.col <- c("favorable","unfavorable","neutral","uninf") n.endpoint <- length(pairScore) out <- vector(mode = "list", length = n.endpoint) if(!is.null(subset.C)){ subset.numC <- sort(unique(pairScore[[1]]$index.C))[subset.C] } if(!is.null(subset.T)){ subset.numT <- sort(unique(pairScore[[1]]$index.T))[subset.T] } ## indexPair <- stats::setNames(1:NROW(pairScore[[1]]),pairScore[[1]]$index.pair) for(iE in 1:n.endpoint){ ## iE <- 2 iTable <- data.table::copy(pairScore[[iE]][,.SD,.SDcols = keep.col]) if(!is.null(subset.C)){ iTable <- iTable[iTable$index.C %in% subset.numC] } if(!is.null(subset.T)){ iTable <- iTable[iTable$index.T %in% subset.numT] } data.table::setnames(iTable, old = old.col, new = new.col) iTable[,c("favorable") := .SD$favorable * weightEndpoint[iE]] iTable[,c("unfavorable") := .SD$unfavorable * weightEndpoint[iE]] iTable[,c("neutral") := .SD$neutral * weightEndpoint[iE]] iTable[,c("uninf") := .SD$uninf * weightEndpoint[iE]] if(iE==1){ out[[iE]] <- iTable }else{ out[[iE]] <- data.table::copy(out[[iE-1]]) indexMatch <- match(iTable$index.pair,out[[iE]]$index.pair) ## indexMatch - iTable$index.pair out[[iE]][indexMatch, c("favorable") := .SD$favorable + iTable$favorable] out[[iE]][indexMatch, c("unfavorable") := .SD$unfavorable + iTable$unfavorable] out[[iE]][indexMatch, c("neutral") := .SD$neutral + iTable$neutral] out[[iE]][indexMatch, c("uninf") := .SD$uninf + iTable$uninf] } } return(out) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-inference.R
## * Documentation initialization functions called by BuyseTest #' @title internal functions for BuyseTest - initialization #' @name BuyseTest-initialization #' @description Functions called by \code{\link{BuyseTest}} to initialize the arguments. #' @noRd #' #' @details #' #' \code{initializeArgs}: Normalize the argument #' \itemize{ #' \item scoring.rule, pool.strata, neutral.as.uninf, add.halfNeutral, keep.pairScore, n.resampling, seed, cpus, trace: set to default value when not specified. #' \item formula: call \code{initializeFormula} to extract arguments. #' \item type: convert to numeric. #' \item status: only keep status relative to TTE endpoint. Set to \code{NULL} if no TTE endpoint. #' \item threshold: set default threshold to 1e-12. #' the rational being we consider a pair favorable if X>Y ie X>=Y+1e-12. #' When using a threshold e.g. 5 we want X>=Y+5 and not X>Y+5, especially when the measurement is discrete. \cr #' \item data: convert to data.table object. #' \item scoring.rule: convert to numeric. #' } #' #' \code{initializeFormula}: extract \code{treatment}, \code{type}, \code{endpoint}, \code{threshold}, \code{status}, \code{operator}, and \code{strata} #' from the formula. \cr \cr #' #' \code{initializeData}: Divide the dataset into two, one relative to the treatment group and the other relative to the control group. #' Merge the strata into one with the interaction variable. #' Extract for each strata the index of the observations within each group. #' #' @author Brice Ozenne ## * initializeArgs initializeArgs <- function(status, correction.uninf = NULL, cpus = NULL, data, endpoint, formula, hierarchical = NULL, keep.pairScore = NULL, method.inference = NULL, scoring.rule = NULL, pool.strata = NULL, model.tte, n.resampling = NULL, strata.resampling = NULL, name.call, neutral.as.uninf = NULL, add.halfNeutral = NULL, operator = NULL, censoring, restriction, option, seed = NULL, strata, threshold, trace = NULL, treatment, type, weightEndpoint = NULL, weightObs = NULL, envir){ ## ** apply default options if(is.null(cpus)){ cpus <- option$cpus } if(is.null(keep.pairScore)){ keep.pairScore <- option$keep.pairScore } if(is.null(scoring.rule)){ scoring.rule <- option$scoring.rule } if(is.null(hierarchical)){ hierarchical <- option$hierarchical } if(is.null(correction.uninf)){ correction.uninf <- option$correction.uninf } if(is.null(method.inference)){ method.inference <- option$method.inference } if(is.null(n.resampling)){ n.resampling <- option$n.resampling } if(is.null(strata.resampling)){ strata.resampling <- option$strata.resampling } if(is.null(neutral.as.uninf)){ neutral.as.uninf <- option$neutral.as.uninf } if(is.null(add.halfNeutral)){ add.halfNeutral <- option$add.halfNeutral } if(is.null(trace)){ trace <- option$trace } fitter.model.tte <- option$fitter.model.tte engine <- option$engine alternative <- option$alternative precompute <- option$precompute ## ** convert formula into separate arguments if(!missing(formula)){ ## the missing is for BuysePower where the arguments are not necessarily specified test.null <- c(status = !missing(status) && !is.null(status), endpoint = !missing(endpoint) && !is.null(endpoint), operator = !missing(operator) && !is.null(operator), censoring = !missing(censoring) && !is.null(censoring), restriction = !missing(restriction) && !is.null(restriction), strata = !missing(strata) && !is.null(strata), threshold = !missing(threshold) && !is.null(threshold), treatment = !missing(treatment) && !is.null(treatment), type = !missing(type) && !is.null(type), weightEndpoint = !missing(weightEndpoint) && !is.null(weightEndpoint) ) if(any(test.null)){ txt <- names(test.null)[test.null] warning("Argument",if(sum(test.null)>1){"s"}," \'",paste(txt, collpase="\' \'"),if(sum(test.null)>1){" are "}else{" is "}," ignored when argument \'formula\' has been specified\n") } resFormula <- initializeFormula(formula, hierarchical = hierarchical, envir = envir) treatment <- resFormula$treatment type <- resFormula$type endpoint <- resFormula$endpoint threshold <- resFormula$threshold status <- resFormula$status weightEndpoint <- resFormula$weightEndpoint operator <- resFormula$operator censoring <- resFormula$censoring restriction <- resFormula$restriction strata <- resFormula$strata }else{ formula <- NULL } ## ** type validType1 <- paste0("^",c("b","bin","binary"),"$") validType2 <- paste0("^",c("c","cont","continuous"),"$") validType3 <- paste0("^",c("t","tte","time","timetoevent"),"$") ## [if modified, remember to change the corresponding vector in initFormula] validType4 <- paste0("^",c("g","gaus","gaussian"),"$") ## [if modified, remember to change the corresponding vector in initFormula] type <- tolower(type) type[grep(paste(validType1,collapse="|"), type)] <- "bin" type[grep(paste(validType2,collapse="|"), type)] <- "cont" type[grep(paste(validType3,collapse="|"), type)] <- "tte" type[grep(paste(validType4,collapse="|"), type)] <- "gaus" ## ** endpoint index.typeTTE <- which(type=="tte") endpoint.TTE <- endpoint[index.typeTTE] threshold.TTE <- threshold[index.typeTTE] D <- length(endpoint) D.TTE <- length(endpoint.TTE) Uendpoint <- unique(endpoint) Uendpoint.TTE <- unique(endpoint.TTE) ## ** default values if(is.null(formula)){ if(is.null(threshold)){ threshold <- rep(10^{-12},D) # if no treshold is proposed all threshold are by default set to 10^{-12} } if(is.null(restriction)){ restriction <- rep(as.numeric(NA),D) } if(is.null(operator)){ operator <- rep(">0",D) } if(is.null(weightEndpoint)){ if(hierarchical){ weightEndpoint <- rep(1,D) }else{ weightEndpoint <- rep(1/D,D) } } if(is.null(status)){ status <- rep("..NA..",D) }else if(length(status) != D && length(status) == D.TTE){ status.save <- status status <- rep("..NA..", D) status[index.typeTTE] <- status.save } if(is.null(censoring)){ censoring <- rep("right",D) }else if(length(status) != D && length(status) == D.TTE){ censoring.save <- status censoring <- rep("right", D) censoring[index.typeTTE] <- status.save } } ## ** status Ustatus <- unique(status) status.TTE <- status[index.typeTTE] ## from now, status contains for each endpoint the name of variable indicating status (0) or event (1) or NA ## ** censoring ## ## if(any(type %in% 1:2)){ ## ## censoring[type %in% 1:2] <- as.character(NA) ## ## } ## if(!is.numeric(censoring)){ ## censoring.save <- censoring ## censoring <- sapply(unname(censoring),function(iC){ ## if(identical(iC,"NA")){ ## return(0) ## }else if(identical(iC,"right")){ ## return(1) ## }else if(identical(iC,"left")){ ## return(2) ## }else{ ## return(NA) ## } ## }) ## attr(censoring,"original") <- censoring.save ## } ## ** scoring.rule ## WARNING: choices must be lower cases ## remember to update check scoring.rule (in BuyseTest-check.R) if(is.character(scoring.rule)){ scoring.rule <- switch(tolower(scoring.rule), "gehan" = 0, "peron" = 1, NA ) } if (D.TTE == 0) { scoring.rule <- 0 if ("scoring.rule" %in% name.call && trace > 0) { message("NOTE : there is no survival endpoint, \'scoring.rule\' argument is ignored \n") } } ## ** pool.strata if(is.null(strata)){ pool.strata <- 0 attr(pool.strata,"type") <- "none" attr(pool.strata,"original") <- NA }else if(is.null(pool.strata)){ pool.strata <- switch(tolower(option$pool.strata), "buyse" = 0, "cmh" = 1, "equal" = 2, "var-favorable" = 3.1, "var-unfavorable" = 3.2, "var-netbenefit" = 3.3, "var-winratio" = 3.4, NA ) attr(pool.strata,"type") <- option$pool.strata attr(pool.strata,"original") <- NA }else if(is.character(pool.strata)){ pool.strata_save <- tolower(pool.strata) pool.strata <- switch(pool.strata_save, "buyse" = 0, "cmh" = 1, "equal" = 2, "var-favorable" = 3.1, "var-unfavorable" = 3.2, "var-netbenefit" = 3.3, "var-winratio" = 3.4, NA ) attr(pool.strata,"type") <- pool.strata_save attr(pool.strata,"original") <- pool.strata_save }else{ pool.strata <- NA } ## ** threshold if(any(is.na(threshold))){ threshold[which(is.na(threshold))] <- 10^{-12} } if(any(abs(threshold)<10^{-12})){ threshold[which(abs(threshold)<10^{-12})] <- 10^{-12} } ## ** method.inference method.inference <- tolower(method.inference) attr(method.inference,"permutation") <- grepl("permutation",method.inference) attr(method.inference,"bootstrap") <- grepl("bootstrap",method.inference) attr(method.inference,"studentized") <- grepl("studentized",method.inference) attr(method.inference,"ustatistic") <- grepl("u-statistic",method.inference) if(is.na(strata.resampling) || length(strata.resampling)== 0){ attr(method.inference,"resampling-strata") <- as.character(NA) }else{ attr(method.inference,"resampling-strata") <- strata.resampling } ## ** neutral.as.uninf if(length(neutral.as.uninf)==1 && D>1){ neutral.as.uninf <- rep(neutral.as.uninf,D) } ## ** correction.uninf correction.uninf <- as.numeric(correction.uninf) if(correction.uninf>0){ engine <- "GPC_cpp" } ## ** model.tte if(identical(scoring.rule,1)){ if((!is.null(model.tte)) && (length(unique(endpoint.TTE)) == 1) && !inherits(model.tte, "list")){ attr.save <- attr(model.tte,"iidNuisance") model.tte <- list(model.tte) names(model.tte) <- unique(endpoint.TTE) attr(model.tte,"iidNuisance") <- attr.save } }else{ model.tte <- NULL } if(!is.null(model.tte)){ fitter.model.tte <- unlist(lapply(model.tte, class)) }else{ fitter.model.tte <- setNames(rep(fitter.model.tte, length(Uendpoint.TTE)), Uendpoint.TTE) } ## ** iid iid <- attr(method.inference,"studentized") || (method.inference == "u-statistic") if(iid){ attr(method.inference,"hprojection") <- option$order.Hprojection }else{ attr(method.inference,"hprojection") <- NA } iidNuisance <- iid && identical(scoring.rule,1) && (is.null(model.tte) || identical(attr(model.tte,"iidNuisance"),TRUE)) ## ** cpu if (cpus == "all") { cpus <- parallel::detectCores() # this function detect the number of CPU cores } ## ** trace if(is.logical(trace)){ trace <- as.numeric(trace) } ## ** seed if(!is.null(seed)){ attr(seed,"max") <- 10^(floor(log10(.Machine$integer.max))-1) } ## ** operator if(!is.numeric(operator)){ operator <- sapply(operator, switch, ">0"=1, "<0"=-1, NA) } ## ** export return(list( name.call = name.call, status = status, status.TTE = status.TTE, correction.uninf = correction.uninf, cpus = cpus, D = D, D.TTE = D.TTE, data = data, endpoint = endpoint, endpoint.TTE = endpoint.TTE, engine = engine, fitter.model.tte = fitter.model.tte, formula = formula, iid = iid, iidNuisance = iidNuisance, index.endpoint = match(endpoint, Uendpoint) - 1, index.status = match(status, Ustatus) - 1, keep.pairScore = keep.pairScore, keep.survival = option$keep.survival, scoring.rule = scoring.rule, pool.strata = pool.strata, model.tte = model.tte, method.inference = method.inference, n.resampling = n.resampling, hierarchical = hierarchical, neutral.as.uninf = neutral.as.uninf, add.halfNeutral = add.halfNeutral, operator = operator, censoring = censoring, restriction = restriction, order.Hprojection = option$order.Hprojection, precompute = precompute, seed = seed, strata = strata, threshold = threshold, trace = trace, treatment = treatment, type = type, Uendpoint = Uendpoint, Ustatus = Ustatus, weightEndpoint = weightEndpoint, weightObs = weightObs, debug = option$debug )) } ## * initializeData initializeData <- function(data, type, endpoint, Uendpoint, D, scoring.rule, status, Ustatus, method.inference, censoring, strata, pool.strata, treatment, hierarchical, copy, keep.pairScore, endpoint.TTE, status.TTE, iidNuisance, weightEndpoint, weightObs){ if (!data.table::is.data.table(data)) { data <- data.table::as.data.table(data) }else if(copy){ data <- data.table::copy(data) } ## ** convert character/factor to numeric for binary endpoints name.bin <- endpoint[which(type == "bin")] if(length(name.bin)>0){ data.class <- sapply(data,class) test.num <- (data.class %in% c("numeric","integer")) if(any(test.num==FALSE)){ endpoint.char <- setdiff(names(data.class)[test.num==FALSE],c(treatment,strata)) for(iE in endpoint.char){ data[, c(iE) := as.double(as.factor(.SD[[1]]))-1.0, .SDcols = iE] } } } ## ** n.obs n.obs <- data[,.N] ## ** strata if(!is.null(strata)){ data[ , c("..strata..") := interaction(.SD, drop = TRUE, lex.order = FALSE, sep = "."), .SDcols = strata] level.strata <- levels(data[["..strata.."]]) data[ , c("..strata..") := as.numeric(.SD[["..strata.."]])] # convert to numeric n.obsStrata <- data[,.N, by = "..strata.."][,stats::setNames(.SD[[1]],.SD[[2]]),.SD = c("N","..strata..")] }else{ data[ , c("..strata..") := 1] n.obsStrata <- n.obs level.strata <- 1 } n.strata <- length(level.strata) ## ** convert treatment to binary indicator level.treatment <- levels(as.factor(data[[treatment]])) trt2bin <- stats::setNames(0:1,level.treatment) data[ , c(treatment) := trt2bin[as.character(.SD[[1]])], .SDcols = treatment] ## ** rowIndex data[,c("..rowIndex..") := 1:.N] ## ** unique status if(any(status == "..NA..")){ data[,c("..NA..") := -100] } ## ** TTE with status if(scoring.rule>0){ test.status <- sapply(status.TTE, function(iC){any(data[[iC]]==0)}) if(all(test.status==FALSE)){ scoring.rule <- 0 iidNuisance <- FALSE }else if(identical(attr(method.inference,"hprojection"),2)){ keep.pairScore <- TRUE ## need the detail of the score to perform the 2nd order projection } ## distinct time to event endpoints endpoint.UTTE <- unique(endpoint.TTE[test.status]) status.UTTE <- unique(status.TTE[test.status]) D.UTTE <- length(endpoint.UTTE) ## correspondance endpoint, TTE endpoint (non TTEe endpoint are set to -100) index.UTTE <- match(endpoint, endpoint.UTTE, nomatch = -99) - 1 }else{ endpoint.UTTE <- numeric(0) status.UTTE <- numeric(0) D.UTTE <- 0 index.UTTE <- rep(-100, D) } ## ** scoring method for each endpoint ## check if status n.CR <- sapply(status, function(iC){max(data[[iC]])}) test.CR <- n.CR[status]>1 test.censoring <- sapply(Ustatus, function(iC){any(data[[iC]]==0)})[status] method.score <- sapply(1:D, function(iE){ ## iE <- 1 if(type[iE] %in% c("bin","cont")){ return("continuous") }else if(type[iE] == "gaus"){ return("gaussian") }else if(type[iE] == "tte"){ if(test.censoring[iE]==FALSE && test.CR[iE]==FALSE){ return("continuous") }else if(scoring.rule == 0){ ## 3/4 Gehan (right/left censoring) return(switch(censoring[iE], "left" = "TTEgehan2", "right" = "TTEgehan")) }else if(scoring.rule == 1){ return(switch(as.character(test.CR[iE]), "FALSE" = "SurvPeron", "TRUE" = "CRPeron")) } } }) attr(method.score,"test.censoring") <- test.censoring attr(method.score,"test.CR") <- test.CR paired <- all(n.obsStrata==2) ## ** previously analyzed distinct TTE endpoints if((scoring.rule==1) && hierarchical){ ## only relevant when using Peron scoring rule with hierarchical GPC ## number of distinct, previously analyzed, TTE endpoints nUTTE.analyzedPeron_M1 <- sapply(1:D, function(iE){ if(iE>1){ sum(endpoint.UTTE %in% endpoint[1:(iE-1)]) }else{ return(0) } }) }else{ nUTTE.analyzedPeron_M1 <- rep(0,D) } ## ** number of observations per strata used when resampling index.C <- which(data[[treatment]] == 0) index.T <- which(data[[treatment]] == 1) if(!is.na(attr(method.inference,"resampling-strata"))){ n.obsStrataResampling <- table(data[,interaction(.SD), .SDcols = attr(method.inference,"resampling-strata")]) }else{ n.obsStrataResampling <- n.obs } ## ** skeleton for survival proba (only relevant for Peron scoring rule) skeletonPeron <- list(survTimeC = lapply(1:D, function(iE){lapply(1:n.strata, function(iS){matrix(nrow=0,ncol=0)})}), survTimeT = lapply(1:D, function(iE){lapply(1:n.strata, function(iS){matrix(nrow=0,ncol=0)})}), survJumpC = lapply(1:D, function(iE){lapply(1:n.strata, function(iS){matrix(nrow=0,ncol=0)})}), survJumpT = lapply(1:D, function(iE){lapply(1:n.strata, function(iS){matrix(nrow=0,ncol=0)})}), lastSurv = lapply(1:D, function(iS){matrix(nrow = n.strata, ncol = 2*max(1,n.CR[iS]))}), ## 4 for competing risk setting, 2 is enough for survival p.C = matrix(-100, nrow = n.strata, ncol = D), p.T = matrix(-100, nrow = n.strata, ncol = D), iid = list(survJumpC = lapply(1:D.UTTE, function(IE){lapply(1:n.strata, matrix, nrow = 0, ncol = 0)}), survJumpT = lapply(1:D.UTTE, function(IE){lapply(1:n.strata, matrix, nrow = 0, ncol = 0)}) ) ) ## ** iid for gaussian endpoints n.endpoint <- length(endpoint) index.gaussiid <- which(type == "gaus") if(length(index.gaussiid)>0 && any(!is.na(censoring[index.gaussiid]))){ index.gaussiid2 <- intersect(which(!is.na(censoring)),index.gaussiid) for(iE in index.gaussiid2){ ## iE <- 1 skeletonPeron$survTimeC[[iE]] <- data[index.C,list(list(do.call(cbind,.SD[[1]]))), .SDcols = censoring[iE], by = "..strata.."][[2]] skeletonPeron$survTimeT[[iE]] <- data[index.T,list(list(do.call(cbind,.SD[[1]]))), .SDcols = censoring[iE], by = "..strata.."][[2]] } } ## ** weightEndpoint if(missing(weightObs) || is.null(weightObs)){ data$..weight.. <- 1 weightObs <- data$..weight.. }else if( (length(weightObs)==1) && (weightObs %in% names(data)) ){ names(data)[names(data)==weightObs] <- "..weight.." weightObs <- data$..weight.. } ## ** pool.strata ## set default pool.strata to Buyse for paired data ## otherwise pooling will do something strange if(paired && pool.strata !=0){ if(is.na(attr(pool.strata,"original"))){ pool.strata[] <- 0 }else{ warning("Weights from the \"buyse\" pooling scheme (argument \'pool.strata\') are recommended for paired data. \n") } } ## ** export keep.cols <- union(c(treatment, "..strata.."), na.omit(attr(method.inference,"resampling-strata"))) return(list(data = data[,.SD,.SDcols = keep.cols], M.endpoint = as.matrix(data[, .SD, .SDcols = Uendpoint]), M.status = as.matrix(data[, .SD, .SDcols = Ustatus]), index.C = index.C, index.T = index.T, weightObs = weightObs, index.strata = tapply(data[["..rowIndex.."]], data[["..strata.."]], list), level.treatment = level.treatment, level.strata = level.strata, pool.strata = pool.strata, method.score = method.score, paired = paired, n.strata = n.strata, n.obs = n.obs, n.obsStrata = n.obsStrata, n.obsStrataResampling = n.obsStrataResampling, cumn.obsStrataResampling = c(0,cumsum(n.obsStrataResampling)), skeletonPeron = skeletonPeron, scoring.rule = scoring.rule, iidNuisance = iidNuisance, nUTTE.analyzedPeron_M1 = nUTTE.analyzedPeron_M1, endpoint.UTTE = endpoint.UTTE, status.UTTE = status.UTTE, D.UTTE = D.UTTE, index.UTTE = index.UTTE, keep.pairScore = keep.pairScore )) } ## * initializeFormula initializeFormula <- function(x, hierarchical, envir){ validClass(x, valid.class = "formula") ## ** extract treatment treatment <- setdiff(all.vars(x), all.vars(stats::delete.response(stats::terms(x)))) if(length(treatment)!=1){ stop("initFormula: there must be exactly one response variable in formula\n", "number of response variables founded: ",length(treatment),"\n") } if(length(as.character(x))!=3){ stop("initFormula: formula with unexpected length, as.character(x) should have length 3\n", "length founded: ",length(as.character(x)),"\n") } ## ** restrict to the right side of the formula x.rhs <- as.character(x)[3] ## remove all blanks x.rhs <- gsub("[[:blank:]]|\n", "", x.rhs) ## find endpoints ## https://stackoverflow.com/questions/35347537/using-strsplit-in-r-ignoring-anything-in-parentheses/35347645 ## (*SKIP)(*FAIL): ignore ## \\( \\): inside brackets ## [^()]*: anything but () magic.formula <- "\\([^()]*\\)(*SKIP)(*FAIL)|\\h*\\+\\h*" vec.x.rhs <- unlist(strsplit(x.rhs, split = magic.formula, perl = TRUE)) ## find all element in the vector corresponding to endpoints (i.e. ...(...) ) ## \\w* any letter/number ## [[:print:]]* any letter/number/punctuation/space index.endpoint <- grep("\\w*\\([[:print:]]*\\)$", vec.x.rhs) index.strata <- setdiff(1:length(vec.x.rhs), index.endpoint) ## ** strata variables if(length(index.strata)==0){ strata <- NULL }else{ strata <- vec.x.rhs[index.strata] } ## ** number of endpoint variables vec.x.endpoint <- vec.x.rhs[index.endpoint] n.endpoint <- length(vec.x.endpoint) if(n.endpoint==0){ stop("initFormula: x must contain endpoints \n", "nothing of the form type(endpoint,threshold,status) found in the formula \n") } ## ** extract endpoints and additional arguments threshold <- NULL status <- NULL endpoint <- NULL operator <- NULL censoring <- NULL restriction <- NULL weightEndpoint <- NULL type <- NULL validArgs <- c("endpoint","mean", "status", "std", "iid", "threshold","operator","weight","censoring","restriction") ## split around parentheses ls.x.endpoint <- strsplit(vec.x.endpoint, split = "(", fixed = TRUE) for(iE in 1:n.endpoint){ ## extract type candidate <- tolower(ls.x.endpoint[[iE]][1]) if(candidate %in% c("b","bin","binary")){ type <- c(type, candidate) iValidArgs <- setdiff(validArgs,c("status","threshold","mean","std","iid","restriction")) default.censoring <- "right" }else if(candidate %in% c("c","cont","continuous")){ type <- c(type, candidate) iValidArgs <- setdiff(validArgs,c("status","mean","std","iid")) default.censoring <- "right" }else if(candidate %in% c("t","tte","time","timetoevent")){ type <- c(type, candidate) iValidArgs <- setdiff(validArgs,c("mean","std","iid")) default.censoring <- "right" }else if(candidate %in% c("g","gaus","gaussian")){ type <- c(type, candidate) iValidArgs <- setdiff(validArgs, c("endpoint","status","censoring","restriction")) default.censoring <- as.character(NA) }else if(candidate %in% c("s","strat","strata")){ strata <- c(strata, gsub(")", replacement = "",ls.x.endpoint[[iE]][2])) next }else{ stop("initFormula: cannot convert the element ",paste(ls.x.endpoint[[iE]],collapse="(")," in the formula to a useful information.\n") } ## get each argument iVec.args <- strsplit(gsub(")", replacement = "",ls.x.endpoint[[iE]][2]), split = ",", fixed = TRUE)[[1]] n.args <- length(iVec.args) ## check size if(n.args==0){ stop("initFormula: invalid formula \n", vec.x.rhs[iE]," must contain the name of the endpoint between the parentheses \n" ) } if(n.args>4){ stop("initFormula: invalid formula \n", x[iE]," has too many arguments (maximum 4: endpoint, threshold, status variable, operator) \n") } ## extract name of each argument iIndex.name <- grep("=",iVec.args) iArg <- gsub("^[[:print:]]*=", replacement = "", iVec.args) iName <- rep(as.character(NA),n.args) ## use existing names if(length(iIndex.name)>0){ iiName <- gsub("=[[:print:]]*$","",iVec.args[iIndex.name]) iName[iIndex.name] <- iiName if(any(iiName %in% iValidArgs == FALSE)){ stop("initFormula: invalid formula \n", vec.x.rhs[iE]," contains arguments that are not \"",paste0(iValidArgs,sep = "\" \""),"\" \n") } if( any(duplicated(iiName)) ){ stop("initFormula: invalid formula \n", vec.x.rhs[iE]," contains arguments with the same name \n") } }else{ iiName <- NULL } ## add missing names n.missingNames <- n.args - length(iiName) if(n.missingNames>0){ iName[setdiff(1:n.args,iIndex.name)] <- setdiff(iValidArgs,iiName)[1:n.missingNames] } ## rename if("mean" %in% iName){ iName[iName=="mean"] <- "endpoint" } if("std" %in% iName){ iName[iName=="std"] <- "status" } if("iid" %in% iName){ iName[iName=="iid"] <- "censoring" } ## extract arguments endpoint <- c(endpoint, gsub("\"","",iArg[iName=="endpoint"])) if("threshold" %in% iName){ thresholdTempo <- try(eval(expr = parse(text = iArg[iName=="threshold"])), silent = TRUE) if(inherits(thresholdTempo,"try-error")){ thresholdTempo <- try(eval(expr = parse(text = iArg[iName=="threshold"]), envir = envir), silent = TRUE) if(inherits(thresholdTempo,"try-error")){ stop(iArg[iName=="threshold"]," does not refer to a valid threshold \n", "Should be numeric or the name of a variable in the global workspace \n") } } if(inherits(thresholdTempo, "function")){ packageTempo <- environmentName(environment(thresholdTempo)) if(nchar(packageTempo)>0){ txt <- paste0("(package ",packageTempo,")") }else{ txt <- "" } stop(iArg[iName=="threshold"]," is already defined as a function ",txt,"\n", "cannot be used to specify the threshold \n") } threshold <- c(threshold, as.numeric(thresholdTempo)) }else{ threshold <- c(threshold, NA) } if("status" %in% iName){ status <- c(status, gsub("\"","",iArg[iName=="status"])) }else{ status <- c(status, "..NA..") } if("operator" %in% iName){ operator <- c(operator, gsub("\"","",iArg[iName=="operator"])) }else{ operator <- c(operator, ">0") } if("weight" %in% iName){ weightEndpoint <- c(weightEndpoint, as.numeric(eval(expr = parse(text = iArg[iName=="weight"])))) }else if(hierarchical){ weightEndpoint <- c(weightEndpoint, 1) }else{ weightEndpoint <- c(weightEndpoint, as.numeric(NA)) } if("censoring" %in% iName){ censoring <- c(censoring, gsub("\"","",iArg[iName=="censoring"])) }else{ censoring <- c(censoring, default.censoring) } if("restriction" %in% iName){ restrictionTempo <- try(eval(expr = parse(text = iArg[iName=="restriction"])), silent = TRUE) if(inherits(restrictionTempo,"try-error")){ restrictionTempo <- try(eval(expr = parse(text = iArg[iName=="restriction"]), envir = envir), silent = TRUE) if(inherits(restrictionTempo,"try-error")){ stop(iArg[iName=="restriction"]," does not refer to a valid restriction \n", "Should be numeric or the name of a variable in the global workspace \n") } } if(inherits(restrictionTempo, "function")){ packageTempo <- environmentName(environment(restrictionTempo)) if(nchar(packageTempo)>0){ txt <- paste0("(package ",packageTempo,")") }else{ txt <- "" } stop(iArg[iName=="restriction"]," is already defined as a function ",txt,"\n", "cannot be used to specify the restriction \n") } restriction <- c(restriction, as.numeric(restrictionTempo)) }else{ restriction <- c(restriction, as.numeric(NA)) } } ## ** export if(all(is.na(weightEndpoint))){ weightEndpoint <- rep(1/length(weightEndpoint), length(weightEndpoint)) }else if(sum(weightEndpoint, na.rm = TRUE)<1){ weightEndpoint[!is.na(weightEndpoint)] <- (1-sum(weightEndpoint, na.rm = TRUE))/sum(is.na(weightEndpoint)) } out <- list(treatment = treatment, type = type, endpoint = endpoint, threshold = threshold, status = status, operator = operator, weightEndpoint = weightEndpoint, censoring = censoring, restriction = restriction, strata = strata) return(out) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-initialization.R
#' @docType package #' @title BuyseTest package: Generalized Pairwise Comparisons #' @name BuyseTest-package #' #' @description Implementation of the Generalized Pairwise Comparisons. #' \code{\link{BuyseTest}} is the main function of the package. See the vignette of an overview of the functionalities of the package. #' Run \code{citation("BuyseTest")} in R for how to cite this package in scientific publications. #' See the section reference below for examples of application in clinical studies. #' #' The Generalized Pairwise Comparisons form all possible pairs of observations, #' one observation being taken from the intervention group and the other is taken from the control group, #' and compare the difference in endpoints (\eqn{Y-X}) to the threshold of clinical relevance (\eqn{\tau}). #' #' For a single endpoint, #' if the difference is greater or equal than the threshold of clinical relevance (\eqn{Y \ge X + \tau}), #' the pair is classified as favorable (i.e. win). #' If the difference is lower or equal than minus the threshold of clinical relevance (\eqn{X \ge Y + \tau}), #' the pair is classified as unfavorable (i.e. loss). #' Otherwise the pair is classified as neutral. In presence of censoring, it might not be possible to compare the difference to the threshold. In such cases the pair #' is classified as uninformative. #' #' Simultaneously analysis of several endpoints is performed by prioritizing the endpoints, assigning the highest priority to the endpoint considered the most clinically relevant. #' The endpoint with highest priority is analyzed first, and neutral and uninformative pair are analyzed regarding endpoint of lower priority. #' #' \strong{Keywords}: documented methods/functions are classified according to the following keywords \itemize{ #' \item models: function fitting a statistical model/method based on a dataset (e.g. \code{\link{auc}}, \code{\link{brier}}, \code{\link{BuyseTest}}, \code{\link{BuyseTTEM}}, \code{\link{CasinoTest}}, \code{\link{performance}}) #' \item htest: methods performing statistical inference based on an existing model (e.g. \code{\link{BuyseMultComp}}, \code{\link{performanceResample}}, \code{\link{powerBuyseTest}}, \code{\link{sensitivity}}) #' \item methods: extractors (e.g. \code{\link{getCount}}, \code{\link{getPairScore}}, \code{\link{getPseudovalue}}, \code{\link{getSurvival}}, \code{\link{getIid}}) #' \item print: concise display of an object in the console (e.g. \code{print}, \code{summary}) #' \item utilities: function used to facilitate user interactions (e.g. \code{\link{BuyseTest.options}}, \code{\link{constStrata}}) #' \item hplot: graphical display (e.g. \code{\link{autoplot.S3sensitivity}}) #' \item internal: function used internally but that need to be exported for parallel calculations (e.g. \code{\link{GPC_cpp}}) #' \item datagen: function for generating data sets (e.g. \code{\link{simBuyseTest}}, \code{\link{simCompetingRisks}}) #' \item classes: definition of S4 classes #' } #' #' @references #' Method papers on the GPC procedure and its extensions: #' On the GPC procedure: Marc Buyse (2010). \bold{Generalized pairwise comparisons of prioritized endpoints in the two-sample problem}. \emph{Statistics in Medicine} 29:3245-3257 \cr #' On the win ratio: D. Wang, S. Pocock (2016). \bold{A win ratio approach to comparing continuous non-normal outcomes in clinical trials}. \emph{Pharmaceutical Statistics} 15:238-245 \cr #' On the Peron's scoring rule: J. Peron, M. Buyse, B. Ozenne, L. Roche and P. Roy (2018). \bold{An extension of generalized pairwise comparisons for prioritized outcomes in the presence of censoring}. \emph{Statistical Methods in Medical Research} 27: 1230-1239. \cr #' On the Gehan's scoring rule: Gehan EA (1965). \bold{A generalized two-sample Wilcoxon test for doubly censored data}. \emph{Biometrika} 52(3):650-653 \cr #' On inference in GPC using the U-statistic theory: Ozenne B, Budtz-Jorgensen E, Peron J (2021). \bold{The asymptotic distribution of the Net Benefit estimator in presence of right-censoring}. \emph{Statistical Methods in Medical Research} 2021 doi:10.1177/09622802211037067 \cr #' On how to handle right-censoring: J. Peron, M. Idlhaj, D. Maucort-Boulch, et al. (2021) \bold{Correcting the bias of the net benefit estimator due to right-censored observations}. \emph{Biometrical Journal} 63: 893–906. \cr #' #' Examples of application in clinical studies: \cr #' J. Peron, P. Roy, K. Ding, W. R. Parulekar, L. Roche, M. Buyse (2015). \bold{Assessing the benefit-risk of new treatments using generalized pairwise comparisons: the case of erlotinib in pancreatic cancer}. \emph{British journal of cancer} 112:(6)971-976. \cr #' J. Peron, P. Roy, T. Conroy, F. Desseigne, M. Ychou, S. Gourgou-Bourgade, T. Stanbury, L. Roche, B. Ozenne, M. Buyse (2016). \bold{An assessment of the benefit-risk balance of FOLFORINOX in metastatic pancreatic adenocarcinoma}. \emph{Oncotarget} 7:82953-60, 2016. \cr #' #' Comparison between the net benefit and alternative measures of treatment effect: \cr #' J. Peron, P. Roy, B. Ozenne, L. Roche, M. Buyse (2016). \bold{The net chance of a longer survival as a patient-oriented measure of benefit in randomized clinical trials}. \emph{JAMA Oncology} 2:901-5. \cr #' E. D. Saad , J. R. Zalcberg, J. Peron, E. Coart, T. Burzykowski, M. Buyse (2018). \bold{Understanding and communicating measures of treatment effect on survival: can we do better?}. \emph{J Natl Cancer Inst}. #' @useDynLib BuyseTest, .registration=TRUE #' @import data.table #' @importFrom scales percent #' @importFrom ggplot2 autoplot #' @importFrom rlang .data #' @importFrom lava categorical coxExponential.lvm distribution eventTime iid lvm sim vars latent<- #' @import methods #' @importFrom parallel detectCores #' @import Rcpp #' @importFrom stats as.formula delete.response formula na.omit rbinom predict setNames terms #' @importFrom stats4 summary #' @importFrom prodlim prodlim Hist #' @importFrom utils capture.output tail NULL
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-package.R
## * Documentation - print function called by BuyseTest #' @name internal-print #' @title internal functions for BuyseTest - display #' @description Functions called by \code{\link{BuyseTest}} to display the settings. #' @noRd #' #' @author Brice Ozenne ## * Function printGeneral printGeneral <- function(status, D, D.TTE, data, endpoint, hierarchical, level.strata, level.treatment, scoring.rule, M.status, method.score, paired, neutral.as.uninf, correction.uninf, operator, restriction, strata, threshold, trace, treatment, type, weightEndpoint, Wscheme, ...){ if(!is.null(strata)){ n.strata <- length(level.strata) }else{ n.strata <- 1 } ## ** Prepare ## endpoint name.col <- c("NA", "endpoint","type","operator","restriction","threshold","event") df.endpoint <- data.frame(matrix(NA, nrow = D, ncol = 7, dimnames = list(NULL, name.col) ), stringsAsFactors = FALSE) if(hierarchical){ df.endpoint[,1] <- paste0(" ",1:D) names(df.endpoint)[1] <- " priority" }else{ df.endpoint[,1] <- paste0(" ",weightEndpoint) names(df.endpoint)[1] <- " weight" } df.endpoint$endpoint <- endpoint if(any(type=="gaus")){ df.endpoint$endpoint[type=="gaus"] <- paste0(df.endpoint$endpoint[type=="gaus"],",",status[type=="gaus"]) } df.endpoint$type <- sapply(type,switch, "bin"="binary", "cont"="continuous", "tte"="time to event", "gaus"="gaussian") df.endpoint$operator <- ifelse(operator>0,"higher is favorable","lower is favorable") df.endpoint$threshold[type!="bin"] <- threshold[type!="bin"] df.endpoint$restriction <- restriction df.endpoint$event[type=="tte"] <- status[type=="tte"] ## add white space df.endpoint$endpoint <- paste0(df.endpoint$endpoint," ") df.endpoint$type <- paste0(df.endpoint$type," ") df.endpoint$operator <- paste0(df.endpoint$operator," ") if(all(threshold <= 1e-12)){ df.endpoint$threshold <- NULL }else{ df.endpoint$threshold <- ifelse(df.endpoint$threshold<=1e-12,NA,paste0(df.endpoint$threshold," ")) } if(all(is.na(restriction))){ df.endpoint$restriction <- NULL }else{ df.endpoint$restriction <- ifelse(is.na(df.endpoint$restriction),NA,paste0(df.endpoint$restriction," ")) } if(all(type!="tte")){ df.endpoint$event <- NULL }else{ txt.eventType <- sapply(status[type=="tte"], function(iC){ return(paste0(" (",paste(sort(unique(M.status[,iC])), collapse = " "),")")) }) df.endpoint$event[type=="tte"] <- paste0(df.endpoint$event[type=="tte"],txt.eventType) } df.endpoint[is.na(df.endpoint)] <- "" ## ** Display cat("Settings \n") cat(" - 2 groups ",if(D>1){" "},": Control = ",level.treatment[1]," and Treatment = ",level.treatment[2],"\n", sep = "") cat(" - ",D," endpoint",if(D>1){"s"},": \n", sep = "") print(df.endpoint, row.names = FALSE, quote = FALSE, right = FALSE) if(paired){ txt.variable <- switch(as.character(length(strata)), "1" = "variable", "variables") cat(" - ", n.strata, " pairs (",txt.variable,": ",paste(strata, collapse = " "),") \n", sep = "") }else if(n.strata>1){ txt.variable <- switch(as.character(length(strata)), "1" = "variable", "variables") cat(" - ", n.strata, " strata : levels ",paste(level.strata, collapse = " ") , " (",txt.variable,": ",paste(strata, collapse = " "),") \n", sep = "") } if(D>1){ cat(" - neutral pairs: ") if(all(neutral.as.uninf)){ cat("re-analyzed using lower priority endpoints \n") }else if(all(!neutral.as.uninf)){ cat("ignored at lower priority endpoints \n") }else{ cat("re-analyzed using lower priority endpoints for endpoint ", paste(which(neutral.as.uninf), collapse = ", "), " \n otherwise ignored at lower priority endpoints \n",sep="") } } if(D.TTE>0){ cat(" - right-censored pairs: ") n.CR <- sum(grep("2", txt.eventType)) if(n.CR==D.TTE){ txt.Peron <- "cif" }else if(n.CR==0){ txt.Peron <- "survival" }else{ txt.Peron <- "survival/cif" } switch(as.character(scoring.rule), "0" = cat("deterministic score or uninformative \n"), "1" = cat("probabilistic score based on the ",txt.Peron," curves \n",sep="") ) } ## if(trace>2){ ## if ( (scoring.rule == "3" || correction.uninf) && D > 1) { ## cat(" - Current contribution of a pair based on the weights computed at previous enpoints: \n") ## print(Wscheme) ## } ## } return(NULL) } ## * Function printInference printInference <- function(method.inference, n.resampling, cpus, seed, ...){ if(method.inference != "none"){ ## method if(attr(method.inference,"ustatistic")){ txt.type <- "moments of the U-statistic" }else if(attr(method.inference,"bootstrap")){ txt.type <- paste0("non-parametric bootstrap with ",n.resampling," samples") }else if(attr(method.inference,"permutation")){ txt.type <- paste0("permutation test with ",n.resampling," permutations") } if(!is.na(attr(method.inference,"resampling-strata"))){ txt.type <- paste0(txt.type, " (stratified by \"",paste(attr(method.inference,"resampling-strata"),sep="\" \""),"\")") } ## display cat("Estimation of the estimator's distribution \n", " - method: ",txt.type,"\n", sep = "") if(!attr(method.inference,"ustatistic")){ cat(" - cpus : ",cpus,"\n", sep = "") if (!is.null(seed)) { cat(" - seeds : ",seed, sep = "") } } } return(NULL) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest-print.R
## * Documentation - BuyseTest #' @name BuyseTest #' @title Two-group GPC #' #' @description Performs Generalized Pairwise Comparisons (GPC) between two groups. #' Can handle one or several binary, continuous and time-to-event endpoints. #' #' @param formula [formula] a symbolic description of the GPC model, #' typically \code{treatment ~ type1(endpoint1) + type2(endpoint2, threshold2) + strata}. #' See Details, section "Specification of the GPC model". #' @param treatment,endpoint,type,threshold,status,operator,censoring,restriction,strata Alternative to \code{formula} for describing the GPC model. #' See Details, section "Specification of the GPC model". #' @param data [data.frame] dataset. #' @param scoring.rule [character] method used to compare the observations of a pair in presence of right censoring (i.e. \code{"timeToEvent"} endpoints). #' Can be \code{"Gehan"} or \code{"Peron"}. #' See Details, section "Handling missing values". #' @param pool.strata [character] weights used to combine estimates across strata. Can be #' \code{"Buyse"} to weight proportionally to the number of pairs in the strata, #' \code{"CMH"} to weight proportionally to the ratio between the number of pairs in the strata and the number of observations in the strata. #' \code{"equal"} to weight equally each strata, #' or \code{"var-netBenefit"} to weight each strata proportionally to the precision of its estimated net benefit (similar syntax for the win ratio: \code{"var-winRatio"}) #' @param correction.uninf [integer] should a correction be applied to remove the bias due to the presence of uninformative pairs? #' 0 indicates no correction, 1 impute the average score of the informative pairs, and 2 performs IPCW. #' See Details, section "Handling missing values". #' @param model.tte [list] optional survival models relative to each time to each time to event endpoint. #' Models must \code{prodlim} objects and stratified on the treatment and strata variable. When used, the uncertainty from the estimates of these survival models is ignored. #' @param method.inference [character] method used to compute confidence intervals and p-values. #' Can be \code{"none"}, \code{"u-statistic"}, \code{"permutation"}, \code{"studentized permutation"}, \code{"bootstrap"}, \code{"studentized bootstrap"}. #' See Details, section "Statistical inference". #' @param n.resampling [integer] the number of permutations/samples used for computing the confidence intervals and the p.values. #' See Details, section "Statistical inference". #' @param strata.resampling [character] the variable on which the permutation/sampling should be stratified. #' See Details, section "Statistical inference". #' @param hierarchical [logical] should only the uninformative pairs be analyzed at the lower priority endpoints (hierarchical GPC)? #' Otherwise all pairs will be compaired for all endpoint (full GPC). #' @param weightEndpoint [numeric vector] weights used to cumulating the pairwise scores over the endpoints. #' Only used when \code{hierarchical=FALSE}. Disregarded if the argument \code{formula} is defined. #' @param weightObs [character or numeric vector] weights or variable in the dataset containing the weight associated to each observation. #' These weights are only considered when performing GPC (but not when fitting surival models). #' @param neutral.as.uninf [logical vector] should paired classified as neutral be re-analyzed using endpoints of lower priority (as it is done for uninformative pairs). #' See Details, section "Handling missing values". #' @param add.halfNeutral [logical] should half of the neutral score be added to the favorable and unfavorable scores? #' @param keep.pairScore [logical] should the result of each pairwise comparison be kept? #' @param seed [integer, >0] Random number generator (RNG) state used when starting resampling. #' If \code{NULL} no state is set. #' @param cpus [integer, >0] the number of CPU to use. #' Only the permutation test can use parallel computation. #' See Details, section "Statistical inference". #' @param trace [integer] should the execution of the function be traced ? \code{0} remains silent #' and \code{1}-\code{3} correspond to a more and more verbose output in the console. #' #' @details #' #' \bold{Specification of the GPC model} \cr #' There are two way to specify the GPC model in \code{BuyseTest}. #' A \emph{Formula interface} via the argument \code{formula} where the response variable should be a binary variable defining the treatment arms. #' The rest of the formula should indicate the endpoints by order of priority and the strata variables (if any). #' A \emph{Vector interface} using the following arguments \itemize{ #' \item \code{treatment}: [character] name of the treatment variable identifying the control and the experimental group. #' Must have only two levels (e.g. \code{0} and \code{1}). #' \item \code{endpoint}: [character vector] the name of the endpoint variable(s). #' \item \code{threshold}: [numeric vector] critical values used to compare the pairs (threshold of minimal important difference). #' A pair will be classified as neutral if the difference in endpoint is strictly below this threshold. #' There must be one threshold for each endpoint variable; it must be \code{NA} for binary endpoints and positive for continuous or time to event endpoints. #' \item \code{status}: [character vector] the name of the binary variable(s) indicating whether the endpoint was observed or censored. #' Must value \code{NA} when the endpoint is not a time to event. #' \item \code{operator}: [character vector] the sign defining a favorable endpoint. #' \code{">0"} indicates that higher values are favorable while "<0" indicates the opposite. #' \item \code{type}: [character vector] indicates whether it is #' a binary outcome (\code{"b"}, \code{"bin"}, or \code{"binary"}), #' a continuous outcome (\code{"c"}, \code{"cont"}, or \code{"continuous"}), #' or a time to event outcome (\code{"t"}, \code{"tte"}, \code{"time"}, or \code{"timetoevent"}) #' \item \code{censoring}: [character vector] is the endpoint subject to right or left censoring (\code{"left"} or \code{"right"}). The default is right-censoring. #' \item \code{restriction}: [numeric vector] value above which any difference is classified as neutral. #' \item \code{strata}: [character vector] if not \code{NULL}, the GPC will be applied within each group of patient defined by the strata variable(s). #' } #' The formula interface can be more concise, especially when considering few outcomes, but may be more difficult to apprehend for new users. #' Note that arguments \code{endpoint}, \code{threshold}, \code{status}, \code{operator}, \code{type}, and \code{censoring} must have the same length. \cr \cr \cr #' #' #' \bold{GPC procedure} \cr #' The GPC procedure form all pairs of observations, one belonging to the experimental group and the other to the control group, and class them in 4 categories: \itemize{ #' \item \emph{Favorable pair}: the endpoint is better for the observation in the experimental group. #' \item \emph{Unfavorable pair}: the endpoint is better for the observation in the control group. #' \item \emph{Neutral pair}: the difference between the endpoints of the two observations is (in absolute value) below the threshold. When \code{threshold=0}, neutral pairs correspond to pairs with equal endpoint. Lower-priority outcomes (if any) are then used to classified the pair into favorable/unfavorable. #' \item \emph{Uninformative pair}: censoring/missingness prevents from classifying into favorable, unfavorable or neutral. #' } #' With complete data, pairs can be decidely classified as favorable/unfavorable/neutral. #' In presence of missing values, the GPC procedure uses the scoring rule (argument \code{scoring.rule}) and the correction for uninformative pairs (argument \code{correction.uninf}) to classify the pairs. #' The classification may not be 0,1, e.g. the probability that the pair is favorable/unfavorable/neutral with the Peron's scoring rule. #' To export the classification of each pair set the argument \code{keep.pairScore} to \code{TRUE} and call the function \code{getPairScore} on the result of the \code{BuyseTest} function. \cr \cr \cr #' #' #' \bold{Handling missing values} #' \itemize{ #' \item \code{scoring.rule}: indicates how to handle right-censoring in time to event endpoints using information from the survival curves. #' The Gehan's scoring rule (argument \code{scoring.rule="Gehan"}) only scores pairs that can be decidedly classified as favorable, unfavorable, or neutral #' while the "Peron"'s scoring rule (argument \code{scoring.rule="Peron"}) uses the empirical survival curves of each group to also score the pairs that cannot be decidedly classified. #' The Peron's scoring rule is the recommanded scoring rule but only handles right-censoring. #' \item \code{correction.uninf}: indicates how to handle missing values that could not be classified by the scoring rule. \describe{ #' \item{\code{correction.uninf=0}}{ treat them as uninformative: this is an equivalent to complete case analysis when \code{neutral.as.uninf=FALSE}, while when \code{neutral.as.uninf=TRUE}, uninformative pairs are treated as neutral, i.e., analyzed at the following endpoint (if any). This approach will (generally) lead to biased estimates for the proportion of favorable, unfavorable, or neutral pairs.} #' \item{\code{correction.uninf=1}}{ imputes to the uninformative pairs the average score of the informative pairs, i.e. assumes that uninformative pairs would on average behave like informative pairs. This is therefore the recommanded approach when this assumption is resonnable, typically when the the tail of the survival function estimated by the Kaplan–Meier method is close to 0.} #' \item{\code{correction.uninf=2}}{ uses inverse probability of censoring weights (IPCW), i.e. up-weight informative pairs to represent uninformative pairs. It also assumes that uninformative pairs would on average behave like informative pairs and is only recommanded when the analysis is stopped after the first endpoint with uninformative pairs.} #' } #' Note that both corrections will convert the whole proportion of uninformative pairs of a given endpoint into favorable, unfavorable, or neutral pairs. See Peron et al (2021) for further details and recommandations \cr \cr #' } #' #' #' \bold{Statistical inference} \cr #' The argument \code{method.inference} defines how to approximate the distribution of the GPC estimators and so how standard errors, confidence intervals, and p-values are computed. #' Available methods are: #' \itemize{ #' \item argument \code{method.inference="none"}: only the point estimate is computed which makes the execution of the \code{BuyseTest} faster than with the other methods. #' \item argument \code{method.inference="u-statistic"}: uses a Gaussian approximation to obtain the distribution of the GPC estimators. #' The U-statistic theory indicates that this approximation is asymptotically exact. #' The variance is computed using a H-projection of order 1 (default option), which is a consistent but downward biased estimator. #' An unbiased estimator can be obtained using a H-projection of order 2 (only available for the uncorrected Gehan's scoring rule, see \code{BuyseTest.options}). #' \bold{WARNING}: the current implementation of the H-projection is not valid when using corrections for uninformative pairs (\code{correction.uninf=1}, or \code{correction.uninf=2}). #' \item argument \code{method.inference="permutation"}: perform a permutation test, estimating in each sample the summary statistics (net benefit, win ratio). #' \item argument \code{method.inference="studentized permutation"}: perform a permutation test, estimating in each sample the summary statistics (net benefit, win ratio) and the variance-covariance matrix of the estimate. #' \item argument \code{method.inference="bootstrap"}: perform a non-parametric boostrap, estimating in each sample the summary statistics (net benefit, win ratio). #' \item argument \code{method.inference=" studentized bootstrap"}: perform a non-parametric boostrap, estimating in each sample the summary statistics (net benefit, win ratio) and the variance-covariance matrix of the estimator. #' } #' Additional arguments for permutation and bootstrap resampling: #' \itemize{ #' \item \code{strata.resampling} If \code{NA} or of length 0, the permutation/non-parametric boostrap will be performed by resampling in the whole sample. #' Otherwise, the permutation/non-parametric boostrap will be performed separately for each level that the variable defined in \code{strata.resampling} take. #' \item \code{n.resampling} set the number of permutations/samples used. #' A large number of permutations (e.g. \code{n.resampling=10000}) are needed to obtain accurate CI and p.value. See (Buyse et al., 2010) for more details. #' \item \code{seed}: the seed is used to generate one seed per sample. These seeds are the same whether one or several CPUs are used. #' \item \code{cpus} indicates whether the resampling procedure can be splitted on several cpus to save time. Can be set to \code{"all"} to use all available cpus. #' The detection of the number of cpus relies on the \code{detectCores} function from the \emph{parallel} package. \cr \cr #' } #' #' \bold{Pooling results across strata} \cr Consider \eqn{K} strata and denote by \eqn{m_k} and \eqn{n_k} the sample size in the control and active arm (respectively) for strata \eqn{k}. Let \eqn{\sigma_k} be the standard error of the strata-specific summary statistic (e.g. net benefit). The strata specific weights, \eqn{w_k}, are given by: #' \itemize{ #' \item \code{"CMH"}: \eqn{w_k=\frac{\frac{m_k \times n_k}{m_k + n_k}}{\sum_{l=1}^K \frac{m_l \times n_l}{m_l + n_l}}}. Optimal if the if the odds ratios are constant across strata. #' \item \code{"equal"}: \eqn{w_k=\frac{1}{K}} #' \item \code{"Buyse"}: \eqn{w_k=\frac{m_k \times n_k}{\sum_{l=1}^K m_l \times n_l}}. Optimal if the risk difference is constant across strata #' \item \code{"var-*"} (e.g. \code{"var-netBenefit"}): . \eqn{w_k=\frac{1/\sigma^2_k}{\sum_{l=1}^K 1/\sigma^2_k}}\cr \cr #' } #' #' \bold{Default values} \cr #' The default of the arguments #' \code{scoring.rule}, \code{correction.uninf}, \code{method.inference}, \code{n.resampling}, #' \code{hierarchical}, \code{neutral.as.uninf}, \code{keep.pairScore}, \code{strata.resampling}, #' \code{cpus}, \code{trace} is read from \code{BuyseTest.options()}. \cr #' Additional (hidden) arguments are \itemize{ #' \item \code{alternative} [character] the alternative hypothesis. Must be one of "two.sided", "greater" or "less" (used by \code{confint}). #' \item \code{conf.level} [numeric] level for the confidence intervals (used by \code{confint}). #' \item \code{keep.survival} [logical] export the survival values used by the Peron's scoring rule. #' \item \code{order.Hprojection} [1 or 2] the order of the H-projection used to compute the variance when \code{method.inference="u-statistic"}. #' } #' #' @return An \R object of class \code{\linkS4class{S4BuyseTest}}. #' #' @references #' On the GPC procedure: Marc Buyse (2010). \bold{Generalized pairwise comparisons of prioritized endpoints in the two-sample problem}. \emph{Statistics in Medicine} 29:3245-3257 \cr #' On the win ratio: D. Wang, S. Pocock (2016). \bold{A win ratio approach to comparing continuous non-normal outcomes in clinical trials}. \emph{Pharmaceutical Statistics} 15:238-245 \cr #' On the Peron's scoring rule: J. Peron, M. Buyse, B. Ozenne, L. Roche and P. Roy (2018). \bold{An extension of generalized pairwise comparisons for prioritized outcomes in the presence of censoring}. \emph{Statistical Methods in Medical Research} 27: 1230-1239. \cr #' On the Gehan's scoring rule: Gehan EA (1965). \bold{A generalized two-sample Wilcoxon test for doubly censored data}. \emph{Biometrika} 52(3):650-653 \cr #' On inference in GPC using the U-statistic theory: Ozenne B, Budtz-Jorgensen E, Peron J (2021). \bold{The asymptotic distribution of the Net Benefit estimator in presence of right-censoring}. \emph{Statistical Methods in Medical Research} 2021 doi:10.1177/09622802211037067 \cr #' On how to handle right-censoring: J. Peron, M. Idlhaj, D. Maucort-Boulch, et al. (2021) \bold{Correcting the bias of the net benefit estimator due to right-censored observations}. \emph{Biometrical Journal} 63: 893–906. #' #' @seealso #' \code{\link{S4BuyseTest-summary}} for a summary of the results of generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-confint}} for exporting estimates with confidence intervals and p-values. \cr #' \code{\link{S4BuyseTest-model.tables}} for exporting the number or percentage of favorable/unfavorable/neutral/uninformative pairs. \cr #' \code{\link{S4BuyseTest-sensitivity}} for performing a sensitivity analysis on the choice of the threshold(s). \cr #' \code{\link{S4BuyseTest-plot}} for graphical display of the pairs across endpoints. \cr #' \code{\link{S4BuyseTest-getIid}} for exporting the first order H-decomposition. \cr #' \code{\link{S4BuyseTest-getPairScore}} for exporting the scoring of each pair. #' @keywords models #' @author Brice Ozenne ## * BuyseTest (example) #' @rdname BuyseTest #' @examples #' library(data.table) #' #' #### simulate some data #### #' set.seed(10) #' df.data <- simBuyseTest(1e2, n.strata = 2) #' #' ## display #' if(require(prodlim)){ #' resKM_tempo <- prodlim(Hist(eventtime,status)~treatment, data = df.data) #' plot(resKM_tempo) #' } #' #' #### one time to event endpoint #### #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status), data= df.data) #' #' summary(BT) ## net benefit #' model.tables(BT) ## export the table at the end of summary #' summary(BT, percentage = FALSE) #' summary(BT, statistic = "winRatio") ## win Ratio #' #' ## permutation instead of asymptotics to compute the p-value #' \dontrun{ #' BTperm <- BuyseTest(treatment ~ TTE(eventtime, status = status), data=df.data, #' method.inference = "permutation", n.resampling = 1e3) #' } #' \dontshow{ #' BTperm <- BuyseTest(treatment ~ TTE(eventtime, status = status), data=df.data, #' method.inference = "permutation", n.resampling = 1e1, trace = 0) #' } #' summary(BTperm) #' summary(BTperm, statistic = "winRatio") #' #' ## same with parallel calculations #' \dontrun{ #' BTperm <- BuyseTest(treatment ~ TTE(eventtime, status = status), data=df.data, #' method.inference = "permutation", n.resampling = 1e3, cpus = 8) #' summary(BTperm) #' } #' #' ## method Gehan is much faster but does not optimally handle censored observations #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status), data=df.data, #' scoring.rule = "Gehan", trace = 0) #' summary(BT) #' #' #### one time to event endpoint: only differences in survival over 1 unit #### #' BT <- BuyseTest(treatment ~ TTE(eventtime, threshold = 1, status = status), data=df.data) #' summary(BT) #' #' #### one time to event endpoint with a strata variable #' BTS <- BuyseTest(treatment ~ strata + TTE(eventtime, status = status), data=df.data) #' summary(BTS) #' #' #### several endpoints with a strata variable #' ff <- treatment ~ strata + T(eventtime, status, 1) + B(toxicity) #' ff <- update(ff, #' ~. + T(eventtime, status, 0.5) + C(score, 1) + T(eventtime, status, 0.25)) #' #' BTM <- BuyseTest(ff, data=df.data) #' summary(BTM) #' plot(BTM) #' #' #### real example : veteran dataset of the survival package #### #' ## Only one endpoint. Type = Time-to-event. Thresold = 0. Stratfication by histological subtype #' ## scoring.rule = "Gehan" #' #' if(require(survival)){ #' \dontrun{ #' data(cancer, package = "survival") ## import veteran #' #' ## scoring.rule = "Gehan" #' BT_Gehan <- BuyseTest(trt ~ celltype + TTE(time,threshold=0,status=status), #' data=veteran, scoring.rule="Gehan") #' #' summary_Gehan <- summary(BT_Gehan) #' summary_Gehan <- summary(BT_Gehan, statistic = "winRatio") #' #' ## scoring.rule = "Peron" #' BT_Peron <- BuyseTest(trt ~ celltype + TTE(time,threshold=0,status=status), #' data=veteran, scoring.rule="Peron") #' #' summary(BT_Peron) #' } #' } ## * BuyseTest (code) ##' @export BuyseTest <- function(formula, data, scoring.rule = NULL, pool.strata = NULL, correction.uninf = NULL, model.tte = NULL, method.inference = NULL, n.resampling = NULL, strata.resampling = NULL, hierarchical = NULL, weightEndpoint = NULL, weightObs = NULL, neutral.as.uninf = NULL, add.halfNeutral = NULL, keep.pairScore = NULL, seed = NULL, cpus = NULL, trace = NULL, treatment = NULL, endpoint = NULL, type = NULL, threshold = NULL, status = NULL, operator = NULL, censoring = NULL, restriction = NULL, strata = NULL){ mycall <- match.call() name.call <- names(mycall) option <- BuyseTest.options() ## ** compatibility with previous version if(!is.null(method.inference) && (method.inference=="asymptotic")){ stop("Value \"asymptotic\" for argument \'method.inference\' is obsolete. \n", "Use \"u-statistic\" instead \n") } ## ** initialize arguments (all expect data that is just converted to data.table) ## initialized arguments are stored in outArgs outArgs <- initializeArgs(status = status, correction.uninf = correction.uninf, cpus = cpus, data = data, endpoint = endpoint, formula = formula, hierarchical = hierarchical, keep.pairScore = keep.pairScore, method.inference = method.inference, scoring.rule = scoring.rule, pool.strata = pool.strata, model.tte = model.tte, n.resampling = n.resampling, strata.resampling = strata.resampling, name.call = name.call, neutral.as.uninf = neutral.as.uninf, add.halfNeutral = add.halfNeutral, operator = operator, censoring = censoring, option = option, seed = seed, strata = strata, threshold = threshold, restriction = restriction, trace = trace, treatment = treatment, type = type, weightEndpoint = weightEndpoint, weightObs = weightObs, envir = parent.frame()) ## ** test arguments if(option$check){ outTest <- do.call(testArgs, args = outArgs) } ## ** initialization data ## WARNING when updating code: names in the c() must precisely match output of initializeData, in the same order out.name <- c("data","M.endpoint","M.status", "index.C","index.T","weightObs","index.strata", "level.treatment","level.strata", "pool.strata", "method.score", "paired", "n.strata","n.obs","n.obsStrata","n.obsStrataResampling","cumn.obsStrataResampling","skeletonPeron", "scoring.rule", "iidNuisance", "nUTTE.analyzedPeron_M1", "endpoint.UTTE", "status.UTTE", "D.UTTE","index.UTTE","keep.pairScore") outArgs[out.name] <- initializeData(data = outArgs$data, type = outArgs$type, endpoint = outArgs$endpoint, Uendpoint = outArgs$Uendpoint, D = outArgs$D, scoring.rule = outArgs$scoring.rule, status = outArgs$status, Ustatus = outArgs$Ustatus, method.inference = outArgs$method.inference, censoring = outArgs$censoring, strata = outArgs$strata, pool.strata = outArgs$pool.strata, treatment = outArgs$treatment, hierarchical = outArgs$hierarchical, copy = TRUE, keep.pairScore = outArgs$keep.pairScore, endpoint.TTE = outArgs$endpoint.TTE, status.TTE = outArgs$status.TTE, iidNuisance = outArgs$iidNuisance, weightEndpoint = outArgs$weightEndpoint, weightObs = outArgs$weightObs) if(option$check){ if(outArgs$iidNuisance && any(outArgs$method.score == "CRPeron")){ warning("Inference via the asymptotic theory for competing risks when using the Peron's scoring rule has not been validating \n", "Consider setting \'method.inference\' to \"none\", \"bootstrap\", or \"permutation\" \n") } ## if(outArgs$precompute && any(outArgs$method.score == "CRPeron")){ ## stop("Option \'precompute\' is not available for the Peron scoring rule in the competing risk case \n") ## } if(outArgs$precompute && any(outArgs$method.score == "CRPeron")){ outArgs$precompute <- FALSE } } ## ** Display if (outArgs$trace > 1) { cat("\n Generalized Pairwise Comparisons\n\n") do.call(printGeneral, args = outArgs) cat("\n") } ## ** define environment envirBT <- environment() envirBT$.BuyseTest <- .BuyseTest envirBT$initializeData <- initializeData envirBT$calcPeron <- calcPeron envirBT$outArgs$args.model.tte <- option$args.model.tte ## ** Point estimation if (outArgs$trace > 1) { if(outArgs$iid){ cat("Point estimation and calculation of the iid decomposition") }else{ cat("Point estimation") } } outPoint <- .BuyseTest(envir = envirBT, iid = outArgs$iid, method.inference = "none", ## do not use outArgs$method.inference as when it is equal to "bootstrap" or "permutation" we need the point estimate first. pointEstimation = TRUE) if (outArgs$trace > 1) { cat("\n\n") } ## check number of pairs if(option$check){ vec.nPair <- (outPoint$count_favorable + outPoint$count_unfavorable + outPoint$count_neutral + outPoint$count_uninf )[,1] if(any(abs(outPoint$n_pairs - vec.nPair) > 0.01)){ warning("Incorrect estimation of the number of pairs \n", "Something probably went wrong - contact the package maintainer\n") } } ## convert from a list of vector (output of C++) to a list of data.table if(outArgs$keep.pairScore){ ## needed for inference with bebu outPoint$tableScore <- pairScore2dt(outPoint$tableScore, level.treatment = outArgs$level.treatment, level.strata = outArgs$level.strata, n.strata = outArgs$n.strata, endpoint = outArgs$endpoint, threshold = outArgs$threshold, restriction = outArgs$restriction) } ## ** Inference if((outArgs$method.inference != "none") && (outArgs$trace > 1)){ do.call(printInference, args = outArgs) } outResampling <- NULL if(outArgs$method.inference == "u-statistic"){ ## done in the C++ code }else if(outArgs$method.inference == "u-statistic-bebu"){ if(outArgs$keep.pairScore == FALSE){ stop("Argument \'keep.pairScore\' needs to be TRUE when argument \'method.inference\' is \"u-statistic-bebu\" \n") } ## direct computation of the variance outCovariance <- inferenceUstatisticBebu(tablePairScore = outPoint$tableScore, order = option$order.Hprojection, weightEndpoint = outArgs$weightEndpoint, n.pairs = outPoint$n_pairs, n.C = length(envirBT$outArgs$index.C), n.T = length(envirBT$outArgs$index.T), level.strata = outArgs$level.strata, n.strata = outArgs$n.strata, endpoint = outArgs$endpoint) outPoint$covariance <- outCovariance$Sigma attr(outArgs$method.inference,"Hprojection") <- option$order.Hprojection }else if(grepl("bootstrap|permutation",outArgs$method.inference)){ outResampling <- inferenceResampling(envirBT) } if((outArgs$method.inference != "none") && (outArgs$trace > 1)){ cat("\n") } ## ** Gather results into a S4BuyseTest object if(outArgs$trace > 1){ cat("Gather the results in a S4BuyseTest object \n") } keep.args <- c("index.T", "index.C", "index.strata", "type","endpoint","level.strata","level.treatment","scoring.rule","hierarchical","neutral.as.uninf","add.halfNeutral", "correction.uninf","method.inference","method.score","strata","threshold","restriction","weightObs","weightEndpoint","pool.strata","n.resampling","paired") mycall2 <- setNames(as.list(mycall),names(mycall)) if(!missing(formula)){ mycall2$formula <- formula ## change name of the variable into actual value } mycall2$data <- data ## change name of the variable into actual value BuyseTest.object <- do.call("S4BuyseTest", args = c(list(call = mycall2), outPoint, outArgs[keep.args], outResampling)) if(outArgs$trace > 1){ cat("\n") } ## ** export return(BuyseTest.object) } ## * .BuyseTest (code) .BuyseTest <- function(envir, iid, method.inference, pointEstimation){ ## ** Resampling outSample <- calcSample(envir = envir, method.inference = method.inference) if(is.null(outSample)){return(NULL)} ## ** Estimate survival curves with its iid if(envir$outArgs$scoring.rule == 0){ ## Gehan outSurv <- envir$outArgs$skeletonPeron }else{ ## Peron outSurv <- calcPeron(data = outSample$data, model.tte = envir$outArgs$model.tte, method.score = envir$outArgs$method.score, paired = envir$outArgs$paired, treatment = envir$outArgs$treatment, level.treatment = envir$outArgs$level.treatment, endpoint = envir$outArgs$endpoint, endpoint.TTE = envir$outArgs$endpoint.TTE, endpoint.UTTE = envir$outArgs$endpoint.UTTE, status = envir$outArgs$status, status.TTE = envir$outArgs$status.TTE, status.UTTE = envir$outArgs$status.UTTE, D.TTE = envir$outArgs$D.TTE, D.UTTE = envir$outArgs$D.UTTE, threshold = envir$outArgs$threshold, restriction = envir$outArgs$restriction, level.strata = envir$outArgs$level.strata, n.strata = envir$outArgs$n.strata, strata = envir$outArgs$strata, precompute = envir$outArgs$precompute, iidNuisance = envir$outArgs$iidNuisance * iid, out = envir$outArgs$skeletonPeron, fitter = envir$outArgs$fitter.model.tte, args = envir$outArgs$args.model.tte) index.test <- which(envir$outArgs$method.score == "SurvPeron") if(!grepl("permutation|bootstrap",method.inference) && envir$outArgs$correction.uninf>0 && length(index.test)>0 && all(is.na(envir$outArgs$restriction))){ maxLastSurv <- setNames(sapply(outSurv$lastSurv[index.test],max),envir$outArgs$endpoint[index.test])[!duplicated(envir$outArgs$endpoint[index.test])] Wtau <- BuyseTest.options("warning.correction") if(any(maxLastSurv>Wtau)){ warning("Some of the survival curves for endpoint(s) \"",paste(names(which(maxLastSurv>Wtau)),collapse = "\", \""),"\" are unknown beyond a survival of ",Wtau,".\n", "The correction of uninformative pairs assume that uninformative pairs would on average behave like informative pairs. \n", "This can be a strong assumption and have substantial impact when the tail of the survival curve is unknown. \n") } } } ## ** Restriction if(any(!is.na(envir$outArgs$restriction))){ ## index restricted endpoint index.rendpoint <- setdiff(which(!is.na(envir$outArgs$restriction)), ## non-NA value which(duplicated(envir$outArgs$index.endpoint))) ## not already visitied for(iE in index.rendpoint){ ## iE <- 1 iRestriction <- envir$outArgs$restriction[iE] iStatus <- envir$outArgs$index.status[iE]+1 if(envir$outArgs$operator[iE]==1){ ## ">0" if(envir$outArgs$method.score[iE] %in% c("TTEgehan","SurvPeron","CRPeron")){ ## right censoring envir$outArgs$M.status[envir$outArgs$M.endpoint[,iE]>iRestriction,iStatus] <- 1/2 } envir$outArgs$M.endpoint[envir$outArgs$M.endpoint[,iE]>iRestriction,iE] <- iRestriction }else if(envir$outArgs$operator[iE]==-1){ ## "<0" if(envir$outArgs$method.score[iE] %in% c("TTEgehan2")){ ## left censoring envir$outArgs$M.status[envir$outArgs$M.endpoint[,iE]<iRestriction,iStatus] <- 1/2 } envir$outArgs$M.endpoint[envir$outArgs$M.endpoint[,iE]<iRestriction,iE] <- iRestriction } } } ## ** Perform GPC resBT <- do.call(envir$outArgs$engine, args = list(endpoint = envir$outArgs$M.endpoint, status = envir$outArgs$M.status, indexC = outSample$ls.indexC, posC = outSample$ls.posC, indexT = outSample$ls.indexT, posT = outSample$ls.posT, threshold = envir$outArgs$threshold, restriction = envir$outArgs$restriction, weightEndpoint = envir$outArgs$weightEndpoint, weightObs = envir$outArgs$weightObs, method = sapply(envir$outArgs$method.score, switch, "continuous" = 1, "gaussian" = 2, "TTEgehan" = 3, "TTEgehan2" = 4, "SurvPeron" = 5, "CRPeron" = 6), pool = envir$outArgs$pool.strata, op = envir$outArgs$operator, D = envir$outArgs$D, D_UTTE = envir$outArgs$D.UTTE, n_strata = envir$outArgs$n.strata, nUTTE_analyzedPeron_M1 = envir$outArgs$nUTTE.analyzedPeron_M1, index_endpoint = envir$outArgs$index.endpoint, index_status = envir$outArgs$index.status, index_UTTE = envir$outArgs$index.UTTE, list_survTimeC = outSurv$survTimeC, list_survTimeT = outSurv$survTimeT, list_survJumpC = outSurv$survJumpC, list_survJumpT = outSurv$survJumpT, list_lastSurv = outSurv$lastSurv, p_C = outSurv$p.C, p_T = outSurv$p.T, iid_survJumpC = outSurv$iid$survJumpC, iid_survJumpT = outSurv$iid$survJumpT, zeroPlus = 1e-8, correctionUninf = envir$outArgs$correction.uninf, hierarchical = envir$outArgs$hierarchical, hprojection = envir$outArgs$order.Hprojection, neutralAsUninf = envir$outArgs$neutral.as.uninf, addHalfNeutral = envir$outArgs$add.halfNeutral, keepScore = (pointEstimation && envir$outArgs$keep.pairScore), precompute = envir$outArgs$precompute, paired = envir$outArgs$paired, returnIID = c(iid,envir$outArgs$iidNuisance), debug = envir$outArgs$debug )) ## ** export if(pointEstimation){ if(envir$outArgs$keep.survival){ ## useful to test initSurvival resBT$tableSurvival <- outSurv } return(resBT) }else{ ## index <- 5 ## resBT$Delta[,index] ## sum(resBT$delta[,,index][,1] * resBT$weightStrata) return(list(delta = resBT$delta, Delta = resBT$Delta, weightStrata = resBT$weightStrata, covariance = resBT$covariance)) } } ## * calcSample calcSample <- function(envir, method.inference){ ## ** initialization out <- list(## rows in M.endpoint/M.status corresponding to observations from the control/treatment group (not unique when boostraping) ls.indexC = vector(mode = "list", length = envir$outArgs$n.strata), ls.indexT = vector(mode = "list", length = envir$outArgs$n.strata), ## identifier for each observation from the control/treatment group (unique even when boostrap) ls.posC = vector(mode = "list", length = envir$outArgs$n.strata), ls.posT = vector(mode = "list", length = envir$outArgs$n.strata), ## dataset data = data.table::data.table() ) if(method.inference %in% c("none","u-statistic")){ ## ** no resampling if(envir$outArgs$n.strata==1){ out$ls.indexC[[1]] <- envir$outArgs$index.C - 1 out$ls.indexT[[1]] <- envir$outArgs$index.T - 1 }else{ for(iStrata in 1:envir$outArgs$n.strata){ ## iStrata <- 1 out$ls.indexC[[iStrata]] <- intersect(envir$outArgs$index.C, envir$outArgs$index.strata[[iStrata]]) - 1 out$ls.indexT[[iStrata]] <- intersect(envir$outArgs$index.T, envir$outArgs$index.strata[[iStrata]]) - 1 } } out$ls.posC <- out$ls.indexC out$ls.posT <- out$ls.indexT if(envir$outArgs$scoring.rule>0){ out$data <- data.table::data.table(envir$outArgs$data, envir$outArgs$M.endpoint, envir$outArgs$M.status) } }else{ ## ** stratified resampling n.strataResampling <- length(envir$outArgs$n.obsStrataResampling) index.resampling <- NULL for (iSR in 1:n.strataResampling) { ## iSR <- 1 index.resampling <- c(index.resampling, envir$outArgs$cumn.obsStrataResampling[iSR] + sample.int(envir$outArgs$n.obsStrataResampling[iSR], replace = attr(method.inference, "bootstrap"))) } ## ** reconstruct groups ## index: index of the new observations in the old dataset by treatment group ## pos: unique identifier for each observation if(envir$outArgs$n.strata==1){ ## no strata if(grepl("permutation",method.inference)){ out$ls.indexC[[1]] <- which(index.resampling %in% envir$outArgs$index.C) - 1 out$ls.indexT[[1]] <- which(index.resampling %in% envir$outArgs$index.T) - 1 out$ls.posC[[1]] <- out$ls.indexC[[1]] out$ls.posT[[1]] <- out$ls.indexT[[1]] }else if(grepl("bootstrap",method.inference)){ out$ls.posC[[1]] <- which(index.resampling %in% envir$outArgs$index.C) - 1 out$ls.posT[[1]] <- which(index.resampling %in% envir$outArgs$index.T) - 1 out$ls.indexC[[1]] <- index.resampling[out$ls.posC[[1]] + 1] - 1 out$ls.indexT[[1]] <- index.resampling[out$ls.posT[[1]] + 1] - 1 } ## check that each group has at least one observation if(length(out$ls.indexC[[1]])==0 || length(out$ls.indexT[[1]])==0){return(NULL)} ## out$data[treatment == 0,eventtime1] - envir$outArgs$M.endpoint[out$ls.indexC[[1]]+1,1] ## out$data[treatment == 1,eventtime1] - envir$outArgs$M.endpoint[out$ls.indexT[[1]]+1,1] }else{ ## strata if (grepl("permutation",method.inference)) { index.C <- which(index.resampling %in% envir$outArgs$index.C) index.T <- which(index.resampling %in% envir$outArgs$index.T) } for(iStrata in 1:envir$outArgs$n.strata){ ## iStrata <- 1 ## index of the new observations in the old dataset by treatment group if(grepl("permutation",method.inference)){ out$ls.indexC[[iStrata]] <- intersect(index.C, envir$outArgs$index.strata[[iStrata]]) - 1 out$ls.indexT[[iStrata]] <- intersect(index.T, envir$outArgs$index.strata[[iStrata]]) - 1 out$ls.posC[[iStrata]] <- out$ls.indexC[[iStrata]] out$ls.posT[[iStrata]] <- out$ls.indexT[[iStrata]] }else if(grepl("bootstrap",method.inference)){ out$ls.posC[[iStrata]] <- which(index.resampling %in% intersect(envir$outArgs$index.C, envir$outArgs$index.strata[[iStrata]])) - 1 out$ls.posT[[iStrata]] <- which(index.resampling %in% intersect(envir$outArgs$index.T, envir$outArgs$index.strata[[iStrata]])) - 1 out$ls.indexC[[iStrata]] <- index.resampling[out$ls.posC[[iStrata]] + 1] - 1 out$ls.indexT[[iStrata]] <- index.resampling[out$ls.posT[[iStrata]] + 1] - 1 } ## check that each group has at least one observation if(length(out$ls.indexC[[iStrata]])==0 || length(out$ls.indexT[[iStrata]])==0){return(NULL)} } } ## ** rebuild dataset if(envir$outArgs$scoring.rule>0){ if(grepl("permutation",method.inference)){ out$data <- data.table::data.table(envir$outArgs$data[[envir$outArgs$treatment]][index.resampling], "..strata.." = envir$outArgs$data[["..strata.."]], envir$outArgs$M.endpoint,envir$outArgs$M.status) data.table::setnames(out$data, old = names(out$data)[1], new = envir$outArgs$treatment) }else{ out$data <- data.table::data.table(envir$outArgs$data[,.SD,.SDcols = c(envir$outArgs$treatment,"..strata..")], envir$outArgs$M.endpoint, envir$outArgs$M.status)[index.resampling] } } } return(out) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest.R
## * Documentation - BuyseTest.options #' @title Global options for BuyseTest package #' @name BuyseTest.options #' @include 0-onLoad.R #' #' @description Update or select global options for the BuyseTest package. #' #' @param ... options to be selected or updated #' @param reinitialise should all the global parameters be set to their default value #' #' @keywords utilities #' #' @examples #' library(data.table) #' #' ## see all global parameters #' BuyseTest.options() #' #' ## see some of the global parameters #' BuyseTest.options("n.resampling", "trace") #' #' ## update some of the global parameters #' BuyseTest.options(n.resampling = 10, trace = 1) #' BuyseTest.options("n.resampling", "trace") #' #' ## reinitialise all global parameters #' BuyseTest.options(reinitialise = TRUE) ## * Function BuyseTest.options #' @rdname BuyseTest.options #' @export BuyseTest.options <- function(..., reinitialise = FALSE){ if (reinitialise == TRUE) { assign(".BuyseTest-options", new("BuyseTest.options", add.halfNeutral = FALSE, ## default value of argument add.halfNeutral in BuyseTest() add.1.presample = TRUE, ## if TRUE p-value when using permutation is computed as (#more extreme+1)/(#sample+1) otherwise #more exterme/#sample. ## Another correction is used for the bootstrap to ensure strictly positive p-values alternative = "two.sided", ## type of alternative hypothesis when doing hypothesis testing: less, greater, two.sided args.model.tte = list(), ## additional argument passed to prodlim when fitting the survival model in BuyseTest() --> calcPeron check = TRUE, ## should arguments be checked when running BuyseTest() conf.level = 0.95, ## coverage of confidence intervals correction.uninf = 0, ## default value of argument correction.uninf in BuyseTest() cpus = 1, ## cpus used to performance inference via resampling in BuyseTest() debug = -1, ## hidden argument in BuyseTest to display progress of the C++ code engine = "GPC2_cpp", ## C++ function used to perform GPC calculation for BuyseTest() fitter.model.tte = "prodlim", ## survival model in BuyseTest() --> calcPeron hierarchical = TRUE, ## default value of argument hierarchical in BuyseTest() keep.pairScore = FALSE, ## default value of argument keep.pairScore in BuyseTest() keep.survival = FALSE, ## hidden argument to export survival values for the Peron Scoring rule in BuyseTest() method.inference = "u-statistic", ## default value of argument method.inference in BuyseTest() n.resampling = 1000, ## default value of argument n.resampling in BuyseTest() neutral.as.uninf = TRUE, ## default value of argument neutral.as.uninf in BuyseTest() order.Hprojection = 1, ## hidden argument in BuyseTest() to control the type of H-projection when using method.inference="u-statistic". Can be 1 or 2 pool.strata = "CMH", ## default weighting scheme to pool estimates across strata in BuyseTest(). Can be "Buyse" (weighted average proportional to number of pairs per strata), ## "CMH" (Cochran-Mantel-Haenszel weights) ## "equal" (equal weights for all strata) precompute = TRUE, ## hidden argument in BuyseTest() to pre-compute integrals over time before the C++ routine print.display = c("endpoint","restriction","threshold","delta","Delta"), ## what to display when showing a S4BuyseTest object scoring.rule = "Peron", ## default value of argument scoring.rule in BuyseTest() statistic = "netBenefit", ## what is the default statistic output by summary, confint ... strata.resampling = as.character(NA), ## default value of argument strata.resampling in BuyseTest() summary.display = list(c("endpoint","restriction","threshold","weight","strata","total","favorable","unfavorable","neutral","uninf","delta","Delta","CI","p.value","significance"), c("endpoint","restriction","threshold","weight","strata","favorable","unfavorable","delta","Delta","Delta(%)","information(%)")), transformation = TRUE, ## should p-value/CI be computed after transformation (and appropriate backtransformation) trace = 2, ## default value of argument trace in BuyseTest() warning.correction = 0.25), ## display a warning when the correction lead to a very large change in estimate envir = BuyseTest.env) return(invisible(get(".BuyseTest-options", envir = BuyseTest.env))) }else{ args <- list(...) object <- get(".BuyseTest-options", envir = BuyseTest.env) if (!is.null(names(args))) { # write validCharacter(names(args), name1 = "...", valid.length = NULL, valid.values = slotNames(object), refuse.duplicates = TRUE, refuse.NULL = FALSE, method = "BuyseTest.options") value <- alloc(object, field = args) assign(".BuyseTest-options", value, envir = BuyseTest.env) return(invisible(value)) } else {# read validCharacter(args, name1 = "...", valid.length = NULL, valid.values = slotNames(object), refuse.duplicates = TRUE, refuse.NULL = FALSE, method = "BuyseTest.options") value <- select(object, name.field = unlist(args)) return(value) } } }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/BuyseTest.options.R
### CasinoTest.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: mar 22 2023 (15:15) ## Version: ## Last-Updated: jul 18 2023 (12:06) ## By: Brice Ozenne ## Update #: 111 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * CasinoTest (documentation) ##' @title Multi-group GPC (EXPERIMENTAL) ##' @description Perform Generalized Pairwise Comparisons (GPC) for two or more groups. ##' Can handle one or several binary, continuous and time-to-event endpoints. ##' ##' @param formula [formula] a symbolic description of the GPC model, see the \code{BuyseTest} function ##' @param data [data.frame] dataset. ##' @param type [character] Type of estimator: can be \code{"unweighted"} or \code{"weighted"}. ##' @param add.halfNeutral [logical] should half of the neutral score be added to the favorable and unfavorable scores? ##' @param method.inference [character] method used to compute confidence intervals and p-values. ##' Can be \code{"none"}, \code{"u-statistic"}, or \code{"rank"}. ##' @param method.multcomp [character] method used to adjust for multiple comparisons. ##' Can be any element of ‘p.adjust.methods’ (e.g. "holm"), "maxT-integration", or "maxT-simulation". ##' @param conf.level [numeric] confidence level for the confidence intervals. ##' Default value read from \code{BuyseTest.options()}. ##' @param alternative [character] the type of alternative hypothesis: \code{"two.sided"}, \code{"greater"}, or \code{"less"}. ##' Default value read from \code{BuyseTest.options()}. ##' @param transformation [logical] should the CI be computed on the inverse hyperbolic tangent scale / log scale for the net benefit / win ratio and backtransformed. ##' Otherwise they are computed without any transformation. ##' Default value read from \code{BuyseTest.options()}. Not relevant when using permutations or percentile bootstrap. #' @param seed [integer, >0] Random number generator (RNG) state used when adjusting for multiple comparisons. #' If \code{NULL} no state is set. ##' ##' @details Require to have installed the package riskRegression and BuyseTest ##' ##' Setting argument \code{method.inference} to \code{"rank"} uses a U-statistic approach with a small sample correction to match the variance estimator derived in Result 4.16 page 228 of Brunner (2018). ##' ##' @return An S3 object of class \code{CasinoTest} that inherits from data.frame. ##' @references Edgar Brunner, Arne C Bathke, and Frank Konietschke (2018). \bold{Rank and pseudo-rank procedures for independent observations in factorial designs}. Springer. ##' @keywords models ##' ## * CasinoTest (example) ##' @examples ##' library(data.table) ##' library(BuyseTest) ##' ##' #### simulate data #### ##' set.seed(11) ##' n <- 4 ##' dt <- rbind(data.table(score = rnorm(n), group = "A"), ##' data.table(score = rnorm(2*n), group = "B"), ##' data.table(score = rnorm(3*n), group = "C")) ##' dt$index <- 1:NROW(dt) ##' ##' #### estimation #### ##' score.casino <- dt$score ##' ##' ## naive casino (by hand) ##' M.score <- outer(dt[group=="A",score],score.casino,function(x,y){x>y+0.5*(x==y)}) ##' mean(M.score) ##' ##' ## naive casino (via BuyseTest) ##' CasinoTest(group ~ cont(score), data = dt, type = "weighted") ##' ##' ## harmonic casino (by hand) ##' hweight <- unlist(tapply(dt$group, dt$group, function(x){rep(1/length(x),length(x))})) ##' M.scoreW <- sweep(M.score, MARGIN = 2, FUN = "*", STATS = NROW(dt)*hweight/3) ##' mean(M.scoreW) ##' ##' ## harmonic casino (via BuyseTest) ##' CasinoTest(group ~ cont(score), data = dt, type = "unweighted") ##' ##' #### Relative liver weights data (Brunner 2018, table 4.1, page 183) #### ##' liverW <- rbind( ##' data.frame(value = c(3.78, 3.40, 3.29, 3.14, 3.55, 3.76, 3.23, 3.31), ##' group = "Placebo"), ##' data.frame(value = c(3.46,3.98,3.09,3.49,3.31,3.73,3.23), ##' group = "Dose 1"), ##' data.frame(value = c(3.71, 3.36, 3.38, 3.64, 3.41, 3.29, 3.61, 3.87), ##' group = "Dose 2"), ##' data.frame(value = c(3.86,3.80,4.14,3.62,3.95,4.12,4.54), ##' group = "Dose 3"), ##' data.frame(value = c(4.14,4.11,3.89,4.21,4.81,3.91,4.19, 5.05), ##' group = "Dose 4") ##' ) ##' liverW$valueU <- liverW$value + (1:NROW(liverW))/1e6 ##' ##' ## same as table 4.1, page 183 in Brunner et al (2018) ##' CasinoTest(group ~ cont(value), data = liverW, type = "weighted", add.halfNeutral = TRUE) ##' CasinoTest(group ~ cont(valueU), data = liverW, type = "unweighted", add.halfNeutral = TRUE) ## * CasinoTest (code) ##' @export CasinoTest <- function(formula, data, type = "unweighted", add.halfNeutral = NULL, method.inference = "u-statistic", conf.level = NULL, transformation = NULL, alternative = NULL, method.multcomp = "none", seed = NA){ requireNamespace("riskRegression") ## for confidence bands ## ** normalize arguments option <- BuyseTest.options() if(is.null(conf.level)){ conf.level <- option$conf.level } if(is.null(transformation)){ transformation <- option$transformation } if(is.null(alternative)){ alternative <- option$alternative } if(tolower(type)=="un-weighted"){ type <- "unweighted" } type <- match.arg(type, c("weighted","unweighted")) if("XXindexXX" %in% names(data)){ stop("Argument \'data\' should not contain a column named \"XXindexXX\" as this name is used internally by the CasinoTest function. \n") } if("XXweightXX" %in% names(data)){ stop("Argument \'data\' should not contain a column named \"XXweightXX\" as this name is used internally by the CasinoTest function. \n") } data <- as.data.frame(data) data$XXindexXX <- 1:NROW(data) method.inference <- match.arg(method.inference, c("none","u-statistic","rank")) if(method.inference=="rank"){ method.inference <- "u-statistic" ssc <- TRUE }else{ ssc <- FALSE } ## ** read formula details.formula <- initializeFormula(formula, hierarchical = TRUE, envir = environment()) name.treatment <- details.formula$treatment name.endpoint <- details.formula$endpoint if(!is.factor(data[[name.treatment]])){ data[[name.treatment]] <- as.factor(data[[name.treatment]]) }else{ data[[name.treatment]] <- droplevels(data[[name.treatment]]) } level.treatment <- levels(data[[name.treatment]]) elevel.treatement <- paste(level.treatment,collapse=".") n.treatment <- length(level.treatment) n.obs <- NROW(data) ## prepare normalization n.group <- table(data[[name.treatment]]) norm.groupvar <- 1/n.group norm.group <- norm.groupvar[data[[name.treatment]]] ## ** re-organize data (split by treatment and duplicate with new treatment level) ls.data <- by(data,data[[name.treatment]],function(x){x}) ls.data2 <- lapply(ls.data, function(x){ x[[name.treatment]] <- elevel.treatement return(x) }) data2 <- do.call(rbind,ls.data2) ## ** pairwise comparisons ## grid grid <- .unorderedPairs(level.treatment) n.grid <- NCOL(grid) grid.BT <- vector(length = n.grid, mode = "list") ## prepare to store output M.estimate <- matrix(NA, nrow = n.treatment, ncol = n.treatment, dimnames = list(level.treatment, level.treatment)) if(method.inference == "u-statistic"){ M.iid <- array(0, dim = c(n.obs,n.treatment,n.treatment), dimnames = list(NULL, level.treatment, level.treatment)) }else{ M.iid <- NULL } M.null <- matrix(NA, nrow = n.treatment, ncol = n.treatment, dimnames = list(level.treatment, level.treatment)) ## loop for(iGrid in 1:n.grid){ ## iGrid <- 1 iTreat1 <- grid[1,iGrid] iTreat2 <- grid[2,iGrid] iData <- rbind(ls.data[[iTreat1]],ls.data2[[iTreat2]]) iData[[name.treatment]] <- droplevels(stats::relevel(iData[[name.treatment]], iTreat1)) ## GPC grid.BT[[iGrid]] <- BuyseTest(formula, data = iData, method.inference = method.inference, add.halfNeutral = add.halfNeutral, trace = FALSE) ## store estimate iInference <- confint(grid.BT[[iGrid]], statistic = "favorable") M.estimate[iTreat1,iTreat2] <- iInference$estimate M.null[iTreat1,iTreat2] <- iInference$null if(iTreat1!=iTreat2){ M.estimate[iTreat2,iTreat1] <- coef(grid.BT[[iGrid]], statistic = "unfavorable") M.null[iTreat2,iTreat1] <- iInference$null } ## store iid if(method.inference == "u-statistic"){ if(iTreat1==iTreat2){ iIndex <- unique(sort(iData$XXindexXX)) M.iid[iIndex,iTreat1,iTreat1] <- getIid(grid.BT[[iGrid]], statistic = "favorable", scale = FALSE, center = TRUE, cluster = iData$XXindexXX)/n.group[iTreat1] }else{ iIndex <- iData$XXindexXX M.iid[iIndex,iTreat1,iTreat2] <- getIid(grid.BT[[iGrid]], statistic = "favorable", scale = TRUE, center = TRUE) M.iid[iIndex,iTreat2,iTreat1] <- getIid(grid.BT[[iGrid]], statistic = "unfavorable", scale = TRUE, center = TRUE) } } } ## ** collect results averaged over all treatments out.estimate <- as.data.frame(matrix(NA, nrow = n.treatment, ncol = 6, dimnames = list(level.treatment, c("estimate","se","lower.ci","upper.ci","null","p.value")))) if(type=="weighted"){ weight.GPC <- n.group/n.obs }else if(type=="unweighted"){ weight.GPC <- rep(1/n.treatment, n.treatment) } out.estimate$estimate <- colSums(.colMultiply_cpp(M.estimate, weight.GPC)) out.estimate$null <- colSums(.colMultiply_cpp(M.null, weight.GPC)) if(method.inference!="none"){ out.iid <- matrix(NA, nrow = n.obs, ncol = n.treatment, dimnames = list(NULL, level.treatment)) for(iT in 1:n.treatment){ ## iT <- 1 out.iid[,iT] <- rowSums(.rowMultiply_cpp(M.iid[,,iT], weight.GPC)) if(ssc){ out.iid[,iT] <- out.iid[,iT]*sqrt(n.group/(n.group-1))[data[[name.treatment]]] } } ## print(tapply(out.iid[,1]^2,data[[name.treatment]],sum)) ## ** statistical inference if(transformation){ type.trans <- "atanh2" }else{ type.trans <- "none" } out.estimate$se <- sqrt(colSums(out.iid^2)) out.estimate$lower.ci <- NA out.estimate$upper.ci <- NA out.estimate$p.value <- NA e.Band <- riskRegression::transformCIBP(estimate = rbind(out.estimate$estimate), se = rbind(out.estimate$se), iid = array(out.iid, dim = c(n.obs,n.treatment,1)), null = out.estimate$null, conf.level = conf.level, alternative = alternative, ci = TRUE, type = type.trans, min.value = 0, max.value = 1, p.value = TRUE, band = method.multcomp!="none", method.band = method.multcomp, seed = seed) out.estimate$lower.ci <- e.Band$lower[1,] out.estimate$upper.ci <- e.Band$upper[1,] out.estimate$p.value <- e.Band$p.value[1,] if(method.multcomp!="none"){ out.estimate$lower.band <- e.Band$lowerBand[1,] out.estimate$upper.band <- e.Band$upperBand[1,] out.estimate$adj.p.value <- e.Band$adj.p.value[1,] } attr(out.estimate,"iid") <- out.iid } ## ** export class(out.estimate) <- append("CasinoTest",class(out.estimate)) return(out.estimate) } ## * .unorderedPairs (from LMMstar package) .unorderedPairs <- function (x, distinct = FALSE){ n.x <- length(x) out <- do.call(cbind, lapply(1:n.x, function(iK) { rbind(x[iK], x[iK:n.x]) })) if (distinct) { return(out[, out[1, ] != out[2, ], drop = FALSE]) } else { return(out) } } ## alternative implementation ## if(method.inference == "u-statistic"){ ## if(iTreat1==iTreat2){ ## iIndex <- unique(sort(iData$XXindexXX)) ## M.iid[iIndex,iTreat1,iTreat1] <- getIid(grid.BT[[iGrid]], statistic = "favorable", scale = FALSE, center = FALSE, cluster = iData$XXindexXX) ## }else{ ## iIndex <- iData$XXindexXX ## M.iid[iIndex,iTreat1,iTreat2] <- getIid(grid.BT[[iGrid]], statistic = "favorable", scale = FALSE, center = FALSE) ## M.iid[iIndex,iTreat2,iTreat1] <- getIid(grid.BT[[iGrid]], statistic = "unfavorable", scale = FALSE, center = FALSE) ## } ## } ## } ## ## ** collect results averaged over all treatments ## out.estimate <- as.data.frame(matrix(NA, nrow = n.treatment, ncol = 6, ## dimnames = list(level.treatment, c("estimate","se","lower.ci","upper.ci","null","p.value")))) ## out.iid <- matrix(NA, nrow = n.obs, ncol = n.treatment, ## dimnames = list(NULL, level.treatment)) ## if(type=="weighted"){ ## weight.GPC <- n.group/n.obs ## }else if(type=="unweighted"){ ## weight.GPC <- rep(1/n.treatment, n.treatment) ## } ## out.estimate$estimate <- colSums(.colMultiply_cpp(M.estimate, weight.GPC)) ## out.estimate$null <- colSums(.colMultiply_cpp(M.null, weight.GPC)) ## for(iT in 1:n.treatment){ ## iT <- 1 ## iExpectation <- rowSums(.rowMultiply_cpp(M.iid[,,iT], weight.GPC)) ## iCenter <- tapply(iExpectation, data[[name.treatment]], mean) ## if(ssc){ ## ## one n.group is used to evaluate the variance of the H-decomposition, i.e. becomes n.group-1 ## ## one n.group is from the H-decomposition (1/n \sum H) ## out.iid[,iT] <- (iExpectation-iCenter[data[[name.treatment]]])/sqrt(n.group[data[[name.treatment]]]*(n.group[data[[name.treatment]]]-1)) ## }else{ ## out.iid[,iT] <- (iExpectation-iCenter[data[[name.treatment]]])/n.group[data[[name.treatment]]] ## } ## } ##---------------------------------------------------------------------- ### CasinoTest.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/CasinoTest.R
### tablePairScore.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: maj 26 2018 (14:54) ## Version: ## Last-Updated: Dec 21 2021 (17:01) ## By: Brice Ozenne ## Update #: 137 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * pairScore2dt ## Convert output of .BuyseTest (list of vector) into a list of data.table pairScore2dt <- function(pairScore, level.treatment, level.strata, n.strata, endpoint, threshold, restriction){ ## Rcpp outputs vector: convert to matrix and rename name.tempo <- c("strata", "index.C", "index.T", "index.pair", "indexWithinStrata.C", "indexWithinStrata.T", "favorable","unfavorable","neutral","uninf", "weight", "favorableC","unfavorableC","neutralC","uninfC") p <- length(pairScore) pairScore2 <- vector(mode = "list", length = p) for(iL in 1:p){ pairScore2[[iL]] <- data.table::as.data.table(matrix(pairScore[[iL]], ncol = 15, byrow = FALSE, dimnames = list(NULL,name.tempo))) pairScore2[[iL]][, c("strata") := factor(.SD[["strata"]], levels = 0:(n.strata-1), labels = level.strata)] ## indexes start at 1 in R and not at 0 as in C++ ## recall that indexes start at 1 in R and not at 0 as in C++ pairScore2[[iL]][, c("index.C") := .SD$index.C + 1] ## restaure position in the original dataset, not the datasets relative to T and C pairScore2[[iL]][, c("index.T") := .SD$index.T + 1] ## restaure position in the original dataset, not the datasets relative to T and C pairScore2[[iL]][, c("index.pair") := .SD$index.pair + 1] pairScore2[[iL]][, c("indexWithinStrata.T") := .SD$indexWithinStrata.T + 1] pairScore2[[iL]][, c("indexWithinStrata.C") := .SD$indexWithinStrata.C + 1] data.table::setkeyv(pairScore2[[iL]], c("index.T","index.C")) } names(pairScore2) <- paste0(endpoint,ifelse(!is.na(restriction),paste0("_r",restriction),""),ifelse(threshold>1e-12,paste0("_t",threshold),"")) return(pairScore2) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/PairScore.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title C++ function performing the pairwise comparison over several endpoints. #' @description \code{GPC_cpp} call for each endpoint and each strata the pairwise comparison function suited to the type of endpoint and store the results. #' @name GPC_cpp #' #' @param endpoint A matrix containing the values of each endpoint (in columns) for each observation (in rows). #' @param status A matrix containing the values of the status variables relative to each endpoint (in columns) for each observation (in rows). #' @param indexC A list containing, for each strata, which rows of the endpoint and status matrices corresponds to the control observations. Not unique when bootstraping. #' @param posC A list containing, for each strata, the unique identifier of each control observations. #' @param indexT A list containing, for each strata, which rows of the endpoint and status matrices corresponds to the treatment observations. Not unique when bootstraping. #' @param posT A list containing, for each strata, the unique identifier of each treatment observations. #' @param threshold Store the thresholds associated to each endpoint. Must have length D. The threshold is ignored for binary endpoints. #' @param restriction Store the restriction time associated to each endpoint. Must have length D. #' @param weightEndpoint Store the weight associated to each endpoint. Must have length D. #' @param weightObs A vector containing the weight associated to each observation. #' @param method The index of the method used to score the pairs. Must have length D. 1 for binary/continuous, 2 for Gaussian, 3/4 for Gehan (left or right-censoring), and 5/6 for Peron (right-censoring survival or competing risks). #' @param pool The index of the method used to pool results across strata. Can be 0 (weight inversely proportional to the sample size), 1 (Mantel Haenszel weights), 2 (equal weights), 3 (precision weights) #' @param op The index of the operator used to score the pairs. Must have length D. 1 for larger is beter, -1 for smaller is better. #' @param D The number of endpoints. #' @param D_UTTE The number of distinct time to event endpoints. #' @param n_strata The number of strata. #' @param nUTTE_analyzedPeron_M1 The number of unique time-to-event endpoints that have been analyzed the Peron scoring rule before the current endpoint. Must have length D. #' @param index_endpoint The position of the endpoint at each priority in the argument endpoint. Must have length D. #' @param index_status The position of the status at each priority in the argument status. Must have length D. #' @param index_UTTE The position, among all the unique tte endpoints, of the TTE endpoints. Equals -1 for non tte endpoints. Must have length n_TTE. #' @param list_survTimeC A list of matrix containing the survival estimates (-threshold, 0, +threshold ...) for each event of the control group (in rows). #' @param list_survTimeT A list of matrix containing the survival estimates (-threshold, 0, +threshold ...) for each event of the treatment group (in rows). #' @param list_survJumpC A list of matrix containing the survival estimates and survival jumps when the survival for the control arm jumps. #' @param list_survJumpT A list of matrix containing the survival estimates and survival jumps when the survival for the treatment arm jumps. #' @param list_lastSurv A list of matrix containing the last survival estimate in each strata (rows) and treatment group (columns). #' @param p_C Number of nuisance parameter in the survival model for the control group, for each endpoint and strata #' @param p_T Number of nuisance parameter in the survival model for the treatment group, for each endpoint and strata #' @param iid_survJumpC A list of matrix containing the iid of the survival estimates in the control group. #' @param iid_survJumpT A list of matrix containing the iid of the survival estimates in the treatment group. #' @param zeroPlus Value under which doubles are considered 0? #' @param correctionUninf Should the uninformative weight be re-distributed to favorable and unfavorable? #' @param hierarchical Should only the uninformative pairs be analyzed at the lower priority endpoints (hierarchical GPC)? Otherwise all pairs will be compaired for all endpoint (full GPC). #' @param hprojection Order of the H-projection used to compute the variance. #' @param neutralAsUninf Should paired classified as neutral be re-analyzed using endpoints of lower priority? #' @param addHalfNeutral Should half of the neutral score be added to the favorable and unfavorable scores? #' @param keepScore Should the result of each pairwise comparison be kept? #' @param precompute Have the integrals relative to the survival be already computed and stored in list_survTimeC/list_survTimeT and list_survJumpC/list_survJumpT (derivatives) #' @param paired In case of paired data, the variance of the summary statistic across strata will be added to the variance of the pooled statistic. #' @param returnIID Should the iid be computed? Second element: is there any nuisance parameter? #' @param debug Print messages tracing the execution of the function to help debugging. The amount of messages increase with the value of debug (0-5). #' #' @details GPC_cpp implements GPC looping first over endpoints and then over pairs. #' To handle multiple endpoints, it stores some of the results which can be memory demanding when considering large sample - especially when computing the iid decomposition. #' GPC2_cpp implements GPC looping first over pairs and then over endpoints. It has rather minimal memory requirement but does not handle correction for uninformative pairs. #' #' @keywords internal #' #' @author Brice Ozenne NULL #' @name GPC_cpp #' @export GPC_cpp <- function(endpoint, status, indexC, posC, indexT, posT, threshold, restriction, weightEndpoint, weightObs, method, pool, op, D, D_UTTE, n_strata, nUTTE_analyzedPeron_M1, index_endpoint, index_status, index_UTTE, list_survTimeC, list_survTimeT, list_survJumpC, list_survJumpT, list_lastSurv, p_C, p_T, iid_survJumpC, iid_survJumpT, zeroPlus, correctionUninf, hierarchical, hprojection, neutralAsUninf, addHalfNeutral, keepScore, precompute, paired, returnIID, debug) { .Call(`_BuyseTest_GPC_cpp`, endpoint, status, indexC, posC, indexT, posT, threshold, restriction, weightEndpoint, weightObs, method, pool, op, D, D_UTTE, n_strata, nUTTE_analyzedPeron_M1, index_endpoint, index_status, index_UTTE, list_survTimeC, list_survTimeT, list_survJumpC, list_survJumpT, list_lastSurv, p_C, p_T, iid_survJumpC, iid_survJumpT, zeroPlus, correctionUninf, hierarchical, hprojection, neutralAsUninf, addHalfNeutral, keepScore, precompute, paired, returnIID, debug) } #' @name GPC_cpp #' @export GPC2_cpp <- function(endpoint, status, indexC, posC, indexT, posT, threshold, restriction, weightEndpoint, weightObs, method, pool, op, D, D_UTTE, n_strata, nUTTE_analyzedPeron_M1, index_endpoint, index_status, index_UTTE, list_survTimeC, list_survTimeT, list_survJumpC, list_survJumpT, list_lastSurv, p_C, p_T, iid_survJumpC, iid_survJumpT, zeroPlus, correctionUninf, hierarchical, hprojection, neutralAsUninf, addHalfNeutral, keepScore, precompute, paired, returnIID, debug) { .Call(`_BuyseTest_GPC2_cpp`, endpoint, status, indexC, posC, indexT, posT, threshold, restriction, weightEndpoint, weightObs, method, pool, op, D, D_UTTE, n_strata, nUTTE_analyzedPeron_M1, index_endpoint, index_status, index_UTTE, list_survTimeC, list_survTimeT, list_survJumpC, list_survJumpT, list_lastSurv, p_C, p_T, iid_survJumpC, iid_survJumpT, zeroPlus, correctionUninf, hierarchical, hprojection, neutralAsUninf, addHalfNeutral, keepScore, precompute, paired, returnIID, debug) } #' @title C++ Function Computing the Integral Terms for the Peron Method in the survival case. #' @description Compute the integral with respect to the jump in survival for pairs where both outcomes are censored. #' #' @param survival [matrix] Contains the jump times in the first column, #' the survival in the other arm at times plus threshold in the second column, #' and the jump in survival in the third column. #' @param start [integer] time at which to start the integral. #' @param lastSurv [numeric,>0] last survival value for the survival function in the second column. #' @param lastdSurv [numeric,>0] last survival value for the survival function in the third column. #' @param returnDeriv [logical] should the derivative regarding the survival parameters be return. #' @param derivSurv [matrix] matrix column filled of 0 whose number of rows is the number of parameters of the survival. #' @param derivSurvD [matrix] matrix column filled of 0 whose number of rows is the number of parameters of the survival used to compute the jumps. #' #' @keywords function Cpp internal #' @author Brice Ozenne #' @export .calcIntegralSurv_cpp <- function(survival, start, lastSurv, lastdSurv, returnDeriv, derivSurv, derivSurvD) { .Call(`_BuyseTest_calcIntegralSurv_cpp`, survival, start, lastSurv, lastdSurv, returnDeriv, derivSurv, derivSurvD) } #' @title C++ Function Computing the Integral Terms for the Peron Method in the presence of competing risks (CR). #' @description Compute the integral with respect to the jump in CIF for pairs where both outcomes are censored. #' #' @param cifJump [matrix] cif[1] = jump times in control group (event of interest), cif[2-3] = CIF of event of interest in group #' T at times - tau and times + tau, cif[4] : jump in cif of control group at times (event of interest). #' @param start_val [numeric] Time at which to start the integral. #' @param stop_val [numeric] Time at which to stop the integral. #' @param cifTimeT [numeric] CIF of event of interest in group T evaluated at observed time of treatment patient. #' @param lastCIF [numeric, >0] last value of CIF of event type 1 in group T. #' @param type [numeric] Indicates the type of integral to compute (1 for wins, 2 for losses, 3 for neutral pairs with two #' events of interest - integral with t+tau and xi - and 4 for neutral pairs with two events of interest - integral with #' t+tau and t-tau). #' @param returnDeriv [logical] should the derivative regarding the survival parameters be return. #' @param derivSurv [matrix] matrix column filled of 0 whose number of rows is the number of parameters of the survival. #' @param derivSurvD [matrix] matrix column filled of 0 whose number of rows is the number of parameters of the survival used to compute the jumps. #' #' @keywords function Cpp internal #' @author Eva Cantagallo #' @export .calcIntegralCif_cpp <- function(cifJump, start_val, stop_val, cifTimeT, lastCIF, type, returnDeriv, derivSurv, derivSurvD) { .Call(`_BuyseTest_calcIntegralCif_cpp`, cifJump, start_val, stop_val, cifTimeT, lastCIF, type, returnDeriv, derivSurv, derivSurvD) } #' @title C++ Function pre-computing the Integral Terms for the Peron Method in the survival case. #' @description Compute the integral with respect to the jump in survival for pairs where both outcomes are censored, i.e. \eqn{\int S1(t+\tau) dS2(t)}. #' @name calcIntegralSurv2_cpp #' #' @param time [numeric vector] vector of jump time for S2. #' @param survival [numeric vector] the survival at each jump time: \eqn{S1(t+\tau)}. #' @param dSurvival [numeric vector] the jump in survival at each jump time: \eqn{S2(t+)-S2(t-)} #' @param index_survival [numeric vector] the position of survival parameter \eqn{S1(t+\tau)} among all parameters relative to S1. #' @param index_dSurvival1 [numeric vector] the position of survival parameter \eqn{S2(t-)} among all parameters relative to S2. #' @param index_dSurvival2 [numeric vector] the position of survival parameter \eqn{S2(t+)} among all parameters relative to S2. #' @param lastSurv [numeric] the value of S1 at the end of the follow-up. #' @param lastSurv [numeric] the value of S2 at the end of the follow-up. #' @param iidNuisance [logical] should the derivative of the integral relative to the S1 and S2 parameter be output. #' @param nJump [integer] the number of jump times relative to S2. #' #' @keywords function Cpp internal #' @author Brice Ozenne #' @export calcIntegralSurv2_cpp <- function(time, survival, dSurvival, index_survival, index_dSurvival1, index_dSurvival2, lastSurv, lastdSurv, iidNuisance, nJump) { .Call(`_BuyseTest_calcIntegralSurv2_cpp`, time, survival, dSurvival, index_survival, index_dSurvival1, index_dSurvival2, lastSurv, lastdSurv, iidNuisance, nJump) } #' Row-wise cumulative sum #' #' @description Fast computation of apply(x,1,cumsum) #' @param X A matrix. #' @return A matrix of same size as x. #' @keywords utilities .rowCumSum_cpp <- function(X) { .Call(`_BuyseTest_rowCumSum_cpp`, X) } #' Column-wise cumulative sum #' #' @description Fast computation of apply(x,2,cumsum) #' @param X A matrix. #' @return A matrix of same size as x. #' @keywords utilities .colCumSum_cpp <- function(X) { .Call(`_BuyseTest_colCumSum_cpp`, X) } #' Apply cumprod in each row #' #' @description Fast computation of t(apply(x,1,cumprod)) #' @param X A matrix. #' @return A matrix of same size as x. #' @keywords utilities .rowCumProd_cpp <- function(X) { .Call(`_BuyseTest_rowCumProd_cpp`, X) } #' Substract a vector of values in each column #' #' @description Fast computation of sweep(X, FUN = "-", STATS = center, MARGIN = 1) #' @param X A matrix. #' @param center A vector with length the number of rows of X . #' @return A matrix of same size as x. #' @keywords utilities .colCenter_cpp <- function(X, center) { .Call(`_BuyseTest_colCenter_cpp`, X, center) } #' Substract a vector of values in each row #' #' @description Fast computation of sweep(X, FUN = "-", STATS = center, MARGIN = 2) #' @param X A matrix. #' @param center A vector with length the number of columns of X. #' @return A matrix of same size as x. #' @keywords utilities .rowCenter_cpp <- function(X, center) { .Call(`_BuyseTest_rowCenter_cpp`, X, center) } #' Divide by a vector of values in each column #' #' @description Fast computation of sweep(X, FUN = "/", STATS = scale, MARGIN = 1) #' @param X A matrix. #' @param scale A vector with length the number of rows of X . #' @return A matrix of same size as x. #' @keywords utilities .colScale_cpp <- function(X, scale) { .Call(`_BuyseTest_colScale_cpp`, X, scale) } #' Dividy by a vector of values in each row #' #' @description Fast computation of sweep(X, FUN = "/", STATS = center, MARGIN = 2) #' @param X A matrix. #' @param scale A vector with length the number of columns of X. #' @return A matrix of same size as x. #' @keywords utilities .rowScale_cpp <- function(X, scale) { .Call(`_BuyseTest_rowScale_cpp`, X, scale) } #' Multiply by a vector of values in each column #' #' @description Fast computation of sweep(X, FUN = "*", STATS = scale, MARGIN = 1) #' @param X A matrix. #' @param scale A vector with length the number of rows of X . #' @return A matrix of same size as x. #' @keywords utilities .colMultiply_cpp <- function(X, scale) { .Call(`_BuyseTest_colMultiply_cpp`, X, scale) } #' Multiply by a vector of values in each row #' #' @description Fast computation of sweep(X, FUN = "*", STATS = center, MARGIN = 2) #' @param X A matrix. #' @param scale A vector with length the number of columns of X. #' @return A matrix of same size as x. #' @keywords utilities .rowMultiply_cpp <- function(X, scale) { .Call(`_BuyseTest_rowMultiply_cpp`, X, scale) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/RcppExports.R
### S4-BuysePower-model.tables.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: jun 27 2023 (14:29) ## Version: ## Last-Updated: Jul 3 2023 (10:53) ## By: Brice Ozenne ## Update #: 46 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * model.tables (documentation) #' @docType methods #' @name S4BuysePower-model.tables #' @title Extract Summary for Class "S4BuysePower" #' @aliases model.tables,S4BuysePower-method #' @include S4-BuysePower.R #' #' @description Extract a summary of the results from the \code{\link{powerBuyseTest}} function. #' #' @param x output of \code{\link{powerBuyseTest}} #' @param type [character] should a summary of the results (\code{"summary"}) or the raw results (\code{"raw"}) be output? #' @param statistic [character] statistic relative to which the power should be computed: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"mannWhitney"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' Default value read from \code{BuyseTest.options()}. #' @param endpoint [character vector] the endpoints to be displayed: must be the name of the endpoint followed by an underscore and then by the threshold. #' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. #' @param order.Hprojection [integer 1,2] the order of the H-project to be used to compute the variance of the net benefit/win ratio. #' #' @seealso #' \code{\link{powerBuyseTest}} for performing a simulation study for generalized pairwise comparison. \cr #' #' @return data.frame #' @keywords methods #' @author Brice Ozenne ## * model.tables (code) #' @exportMethod model.tables setMethod(f = "model.tables", signature = "S4BuysePower", definition = function(x, type = "summary", statistic = NULL, endpoint = NULL, order.Hprojection = NULL, transformation = NULL){ dt.res <- slot(x, name = "results") object.endpoint <- slot(x, name = "endpoint") object.seed <- slot(x, name = "seed") args <- slot(x, name = "args") alpha <- 1-args$conf.level null <- args$null method.inference <- args$method.inference object.restriction <- args$restriction object.threshold <- args$threshold object.type <- args$type ## ** normalize and check arguments type <- match.arg(type, c("raw","summary")) valid.endpoint <- names(object.endpoint) valid.statistic <- unique(dt.res$statistic) valid.order <- unique(dt.res$order) valid.transformation <- unique(dt.res$transformation) option <- BuyseTest.options() if(is.null(statistic)){ statistic <- unique(dt.res$statistic) } if(is.null(endpoint)){ endpoint <- utils::tail(valid.endpoint, 1) }else if(identical(endpoint,"all")){ endpoint <- valid.endpoint }else if(is.numeric(endpoint) && all(endpoint %in% 1:length(valid.endpoint))){ endpoint <- valid.endpoint[endpoint] } if(is.null(order.Hprojection)){ order.Hprojection <- max(dt.res$order.Hprojection) } if(is.null(transformation)){ transformation <- any(dt.res$transformation!="none") } statistic <- sapply(gsub("[[:blank:]]", "", tolower(statistic)), switch, "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic) validCharacter(statistic, name1 = "statistic", valid.values = valid.statistic, valid.length = 1:2, method = "summary[S4BuysePower]") validCharacter(endpoint, name1 = "endpoint", valid.length = NULL, valid.values = valid.endpoint, refuse.duplicates = TRUE, refuse.NULL = TRUE, method = "summary[S4BuysePower]") validLogical(transformation, name1 = "transformation", valid.length = 1, method = "summary[S4BuysePower]") validInteger(order.Hprojection, name1 = "order.Hprojection", valid.length = 1, min = min(valid.order), max = max(valid.order), method = "summary[S4BuysePower]") ## ** subset if(transformation){ index.subset <- which((dt.res$endpoint %in% endpoint) * (dt.res$order == order.Hprojection) * (dt.res$transformation != "none") == 1) }else{ index.subset <- which((dt.res$endpoint %in% endpoint) * (dt.res$order == order.Hprojection) * (dt.res$transformation == "none") == 1) } if(type == "summary"){ if(method.inference == "none"){ dtS.res <- dt.res[index.subset,list(rep.estimate = sum(!is.na(.SD$estimate)), mean.estimate = mean(.SD$estimate, na.rm = TRUE)), by = c("n.T","n.C","endpoint","statistic"),] col.value <- c("mean.estimate","rep.estimate") }else{ dtS.res <- dt.res[index.subset,list(rep.estimate = sum(!is.na(.SD$estimate)), rep.se = sum(!is.na(.SD$se)), mean.estimate = mean(.SD$estimate, na.rm = TRUE), sd.estimate = stats::sd(.SD$estimate, na.rm = TRUE), mean.se = mean(.SD$se, na.rm = TRUE), rejection.rate = mean(.SD$p.value<=alpha, na.rm = TRUE)), by = c("n.T","n.C","endpoint","statistic"),] col.value <- c("mean.estimate","sd.estimate","mean.se","rejection.rate","rep.estimate","rep.se") } index.endpoint <- match(dtS.res$endpoint, valid.endpoint) dtS.res$endpoint <- object.endpoint[index.endpoint] dtS.res$threshold <- object.threshold[index.endpoint] dtS.res$restriction <- object.restriction[index.endpoint] if(any(object.type[index.endpoint]=="bin")){ dtS.res$threshold[object.type[index.endpoint]=="bin"] <- NA } data.table::setkeyv(dtS.res, c("endpoint","n.T")) data.table::setcolorder(dtS.res, neworder = c("statistic","endpoint","restriction","threshold","n.T","n.C",col.value)) }else if(type == "raw"){ dtS.res <- dt.res[index.subset] } ## ** export if(method.inference == "u-statistic"){ if(transformation){ attr(dtS.res,"transformation") <- stats::setNames(dt.res[index.subset,.SD$transformation],dt.res[index.subset,.SD$statistic])[!duplicated(dt.res[index.subset,.SD$transformation])] } attr(dtS.res,"order.Hprojection") <- order.Hprojection } return(dtS.res) } ) ##---------------------------------------------------------------------- ### S4-BuysePower-model.tables.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower-model.tables.R
### S4-BuysePower-nobs.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: Jul 3 2023 (10:00) ## Version: ## Last-Updated: Jul 3 2023 (10:34) ## By: Brice Ozenne ## Update #: 29 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * Documentation - print #' @docType methods #' @name S4BuysePower-nobs #' @title Sample Size for Class "S4BuysePower" #' @aliases nobs,S4BuysePower-method #' @include S4-BuysePower.R #' #' @description Display the sample size in each treatmnet arm as well as the number of pairs. #' #' @param object an \R object of class \code{S4BuysePower}, i.e., output of \code{\link{powerBuyseTest}} #' @param ... no used, for compatibility with the generic method. #' #' @return A data.frame with two colunms, one for each treatment group, and as many rows as sample sizes used for the simulation. #' #' @keywords methods #' @author Brice Ozenne ## * Method - print #' @rdname S4BuysePower-nobs #' @exportMethod nobs setMethod(f = "nobs", signature = "S4BuysePower", definition = function(object, ...){ return([email protected]) } ) ##---------------------------------------------------------------------- ### S4-BuysePower-nobs.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower-nobs.R
## * Documentation - print #' @docType methods #' @name S4BuysePower-print #' @title Print Method for Class "S4BuysePower" #' @aliases print,S4BuysePower-method #' @include S4-BuysePower.R S4-BuysePower-summary.R #' #' @description Display the main results stored in a \code{S4BuysePower} object. #' #' @param x an \R object of class \code{S4BuysePower}, i.e., output of \code{\link{powerBuyseTest}} #' @param ... additional arguments passed to the summary method. #' #' @seealso #' \code{\link{powerBuyseTest}} for performing power calculation based on GPC. \cr #' \code{\link{S4BuysePower-summary}} for a more detailed presentation of the \code{S4BuysePower} object. #' #' @return invisible table #' @keywords print #' @author Brice Ozenne ## * Method - print #' @rdname S4BuysePower-print #' @exportMethod print setMethod(f = "print", signature = "S4BuysePower", definition = function(x, ...){ ## compute summary statistics outSummary <- summary(x, print = FALSE, ...) print(outSummary[[1]], row.names = FALSE, quote = FALSE) return(invisible(outSummary$table)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower-print.R
## * Documentation - show #' @docType methods #' @name S4BuysePower-show #' @title Show Method for Class "S4BuysePower" #' @aliases show,S4BuysePower-method #' @include S4-BuysePower.R S4-BuysePower-print.R #' #' @description Display the main results stored in a \code{S4BuysePower} object. #' #' @param object an \R object of class \code{S4BuysePower}, i.e., output of \code{\link{powerBuyseTest}} #' #' @seealso #' \code{\link{powerBuyseTest}} for performing power calculation based on GPC. \cr #' \code{\link{S4BuysePower-summary}} for a more detailed presentation of the \code{S4BuysePower} object. #' #' @return invisible \code{NULL} #' @keywords print #' #' @author Brice Ozenne ## * Method - show #' @rdname S4BuyseTest-show #' @exportMethod show setMethod(f = "show", signature = "S4BuysePower", definition = function(object){ print(object) return(invisible(NULL)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower-show.R
## * Documentation - summary #' @docType methods #' @name S4BuysePower-summary #' @title Summary Method for Class "S4BuysePower" #' @aliases summary,S4BuysePower-method #' @include S4-BuysePower.R #' #' @description Summarize the results from the \code{\link{powerBuyseTest}} function. #' #' @param object output of \code{\link{powerBuyseTest}} #' @param statistic [character] statistic relative to which the power should be computed: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"mannWhitney"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' Default value read from \code{BuyseTest.options()}. #' @param endpoint [character vector] the endpoints to be displayed: must be the name of the endpoint followed by an underscore and then by the threshold. #' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. #' @param order.Hprojection [integer 1,2] the order of the H-project to be used to compute the variance of the net benefit/win ratio. #' @param print [logical] Should the table be displayed?. #' @param digit [integer vector] the number of digit to use for printing the counts and the delta. #' @param legend [logical] should explainations about the content of each column be displayed? #' @param col.rep [logical] should the number of successful simulations be displayed? #' @param ... Not used. For compatibility with the generic method. #' #' @seealso #' \code{\link{powerBuyseTest}} for performing a simulation study for generalized pairwise comparison. \cr #' #' @return data.frame #' @keywords print #' @author Brice Ozenne ## * method - summary #' @rdname S4BuysePower-summary #' @exportMethod summary setMethod(f = "summary", signature = "S4BuysePower", definition = function(object, statistic = NULL, endpoint = NULL, order.Hprojection = NULL, transformation = NULL, print = TRUE, legend = TRUE, col.rep = FALSE, digit = 4, ...){ ## ** normalize and check arguments option <- BuyseTest.options() dots <- list(...) if(length(dots)>0){ stop("Unknown argument(s) \'",paste(names(dots),collapse="\' \'"),"\'. \n") } validLogical(print, name1 = "print", valid.length = 1, method = "summary[S4BuysePower]") args <- slot(object, name = "args") null <- args$null power <- args$power alpha <- 1-args$conf.level n.rep <- args$n.rep restriction <- args$restriction method.inference <- args$method.inference max.sample.size <- args$max.sample.size dtS.res <- model.tables(object, statistic = statistic, endpoint = endpoint, order.Hprojection = order.Hprojection, transformation = transformation) col.value <- intersect(names(dtS.res),c("mean.estimate","sd.estimate","mean.se","rejection.rate","rep.estimate","rep.se")) statistic <- unique(dtS.res$statistic) order.Hprojection <- attr(dtS.res,"order.Hprojection") transformation <- attr(dtS.res,"transformation") nobs <- nobs(object) ## ** print ls.df.print <- stats::setNames(lapply(statistic, function(iStat){ ## iStat <- dtS.res$statistic[1] iDF <- as.data.frame(dtS.res[dtS.res$statistic == iStat]) iDF$statistic <- NULL iDF[,col.value] <- round(iDF[,col.value], digits = digit) if(col.rep == FALSE){ iDF$rep.estimate <- NULL iDF$rep.se <- NULL } iDF[duplicated(iDF[,c("endpoint","restriction","threshold")]),c("endpoint","restriction","threshold")] <- as.character(NA) if(all(is.na(iDF$restriction))){ iDF$restriction <- NULL } if(all(is.na(iDF$threshold))){ iDF$threshold <- NULL } iDF[] <- lapply(iDF, as.character) iDF[is.na(iDF)] <- "" return(iDF) }), statistic) if(print){ if(!is.null(power)){ range.sampleC <- c(ceiling(min(attr(nobs,"sample")[,"C"])), ceiling(max(attr(nobs,"sample")[,"C"]))) range.sampleT <- c(ceiling(min(attr(nobs,"sample")[,"T"])), ceiling(max(attr(nobs,"sample")[,"T"]))) cat(" Sample size calculation with Generalized pairwise comparison\n", sep = "") cat(" for a power of ",power," and type 1 error rate of ",alpha," \n\n", sep = "") cat(" - estimated sample size (mean [min;max]): ",nobs[,"C"]," [",range.sampleC[1],";",range.sampleC[2],"] controls\n", " ",nobs[,"T"]," [",range.sampleT[1],";",range.sampleT[2],"] treated\n\n",sep="") }else{ cat(" Simulation study with Generalized pairwise comparison\n", sep = "") cat(" with ",n.rep[1]," samples\n\n", sep = "") } rm.duplicate <- c("n.T", "n.C", "rep.estimate", "rep.se", "mean.estimate", "sd.estimate") for(iStatistic in statistic){ if(all(is.na(restriction))){ name.statistic <- switch(iStatistic, "netBenefit" = "net benefit", "winRatio" = "win ratio", "favorable" = "proportion in favor of treatment", "unfavorable" = "proportion in favor of control" ) }else{ name.statistic <- switch(iStatistic, "netBenefit" = "restricted net benefit", "winRatio" = "restricted win ratio", "favorable" = "restricted proportion in favor of treatment", "unfavorable" = "restricted proportion in favor of control" ) } cat(" - ",name.statistic," statistic (null hypothesis Delta=",null[statistic],")\n", sep = "") print(ls.df.print[[iStatistic]], row.names = FALSE, quote = FALSE) cat("\n") } if(legend){ M <- rbind(c(" n.T",":","number of observations in the treatment group"), c(" n.C",":","number of observations in the control group"), c(" mean.estimate",":","average estimate over simulations"), c(" sd.estimate",":","standard deviation of the estimate over simulations")) if(method.inference != "none"){ M <- rbind(M, c(" mean.se",":","average estimated standard error of the estimate over simulations"), c(" rejection",":","frequency of the rejection of the null hypothesis over simulations") ) txt.note <- paste0("(standard error: H-projection of order ",order.Hprojection,"| p-value:") if(!is.null(transformation)){ txt.note <- paste0(txt.note," after transformation) \n", sep="") }else{ txt.note <- paste0(txt.note," original scale) \n", sep="") } }else{ txt.note <- NULL } if(col.rep){ M <- rbind(M, c(" rep.estimate",":","number of sucessful simulations for the point estimation"), c(" rep.se",":","number of sucessful simulations for the estimation of the standard error"), ) } nchar.1 <- sapply(M[,1],nchar) M[,1] <- paste0(M[,1], sapply(max(nchar.1) - nchar.1, function(iX){paste0(rep(" ",time = iX),collapse = "")})) txt.legend <- apply(M, 1, function(iRow){paste(iRow[1],iRow[2]," ",iRow[3],"\n",sep = "")}) cat(txt.legend,sep ="") cat(txt.note,sep ="") cat("\n") } } ## ** export return(invisible(ls.df.print)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower-summary.R
## * Documentation S4BuysePower #' @name S4BuysePower-class #' @title Class "S4BuysePower" (output of BuyseTest) #' #' @description A \code{\link{powerBuyseTest}} output is reported in a \code{S4BuysePower} object. #' #' @seealso #' \code{\link{powerBuyseTest}} for the function computing generalized pairwise comparisons. \cr #' \code{\link{S4BuysePower-summary}} for the summary of the BuyseTest function results #' #' @keywords classes #' @author Brice Ozenne ## * Class S4BuysePower #' @rdname S4BuysePower-class #' @exportClass S4BuysePower setClass( Class = "S4BuysePower", representation( args = "list", endpoint = "character", results = "data.table", sample.size = "matrix", seed = "numeric" ) ) ## * Initialize S4BuysePower objects methods::setMethod( f = "initialize", signature = "S4BuysePower", definition = function(.Object, alternative, method.inference, conf.level, endpoint, null, power, n.rep, results, threshold, restriction, type, max.sample.size, sample.sizeC, sample.sizeT, seed){ ## ** store .Object@args <- list(alternative = alternative, conf.level = conf.level, method.inference = method.inference, n.rep = n.rep, null = null, restriction = restriction, threshold = threshold, type = type ) .Object@endpoint <- stats::setNames(endpoint,paste0(endpoint,ifelse(!is.na(restriction),paste0("_r",restriction),""),ifelse(threshold>1e-12,paste0("_t",threshold),""))) .Object@results <- results [email protected] <- cbind("C" = sample.sizeC, "T" = sample.sizeT) if(!is.null(power)){ .Object@args$power <- power .Object@args$max.sample.size <- max.sample.size attr([email protected], "sample") <- cbind("C" = attr(sample.sizeC,"sample"), "T" = attr(sample.sizeT, "sample")) } if(!is.null(seed)){ .Object@seed <- seed } ## ** export ## validObject(.Object) return(.Object) }) ## * Constructor S4BuysePower objects S4BuysePower <- function(...) new("S4BuysePower", ...)
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuysePower.R
### S4BuyseTest-coef.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: apr 12 2019 (10:45) ## Version: ## Last-Updated: jul 18 2023 (09:28) ## By: Brice Ozenne ## Update #: 365 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * Documentation - coef #' @docType methods #' @name S4BuyseTest-coef #' @title Extract Summary Statistics from GPC #' @aliases coef,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract summary statistics (net benefit, win ratio, ...) from GPC. #' #' @param object a \code{S4BuyseTest} object, output of \code{\link{BuyseTest}}. #' @param statistic [character] the type of summary statistic. See the detail section. #' @param endpoint [character] for which endpoint(s) the summary statistic should be output? #' If \code{NULL} returns the summary statistic for all endpoints. #' @param strata [character vector] the strata relative to which the statistic should be output. #' Can also be \code{"global"} or \code{FALSE} to output the statistic pooled over all strata, #' or \code{TRUE} to output each strata-specific statistic. #' @param cumulative [logical] should the summary statistic be cumulated over endpoints? #' Otherwise display the contribution of each endpoint. #' @param resampling [logical] should the summary statistic obtained by resampling be output? #' @param simplify [logical] should the result be coerced to the lowest possible dimension? #' @param ... ignored. #' #' @details #' One of the following statistic can be specified: #' \itemize{ #' \item \code{"netBenefit"}: returns the net benefit. #' \item \code{"winRatio"}: returns the win ratio. #' \item \code{"favorable"}: returns the proportion in favor of the treatment (also called Mann-Whitney parameter). #' \item \code{"unfavorable"}: returns the proportion in favor of the control. #' \item \code{"unfavorable"}: returns the proportion of neutral pairs. #' \item \code{"unfavorable"}: returns the proportion of uninformative pairs. #' \item \code{"count.favorable"}: returns the number of pairs in favor of the treatment. #' \item \code{"count.unfavorable"}: returns the number of pairs in favor of the control. #' \item \code{"count.neutral"}: returns the number of neutral pairs. #' \item \code{"count.uninf"}: returns the number of uninformative pairs. #' } #' #' @return When \code{resampling=FALSE} and \code{simplify=FALSE}, a matrix (strata, endpoint). #' When \code{resampling=FALSE} and \code{simplify=FALSE}, an array (sample, strata, endpoint). #' #' @keywords method #' @author Brice Ozenne ## * method - coef #' @rdname S4BuyseTest-coef #' @exportMethod coef setMethod(f = "coef", signature = "S4BuyseTest", definition = function(object, endpoint = NULL, statistic = NULL, strata = FALSE, cumulative = NULL, resampling = FALSE, simplify = TRUE, ...){ ## ** normalize arguments option <- BuyseTest.options() mycall <- match.call() dots <- list(...) if(length(dots)>0){ stop("Unknown argument(s) \'",paste(names(dots),collapse="\' \'"),"\'. \n") } ## statistic if(is.null(statistic)){ statistic <- option$statistic } statistic <- switch(gsub("[[:blank:]]", "", tolower(statistic)), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", "uninformative" = "uninf", statistic) type.count <- c("count.favorable","count.unfavorable","count.neutral","count.uninf") validCharacter(statistic, name1 = "statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable","neutral","uninf",type.count), valid.length = 1, method = "coef[S4BuyseTest]") if(is.null(cumulative)){ if(statistic %in% c("count.neutral","neutral","count.uninf","uninf")){ cumulative <- FALSE }else{ cumulative <- TRUE } }else if(statistic %in% c("count.neutral","neutral","count.uninf","uninf") && cumulative){ stop("Argument \'cumulative\' must be FALSE when argument \'statistic\' is set to \"neutral\" or \"uninf\". \n") } ## endpoint valid.endpoint <- names(object@endpoint) n.endpoint <- length(valid.endpoint) if(is.null(endpoint)){ endpoint <- valid.endpoint }else if(!is.null(endpoint)){ if(is.numeric(endpoint)){ validInteger(endpoint, name1 = "endpoint", min = 1, max = length(valid.endpoint), valid.length = NULL, method = "iid[BuyseTest]") endpoint <- valid.endpoint[endpoint] }else{ validCharacter(endpoint, valid.length = 1:length(valid.endpoint), valid.values = valid.endpoint, refuse.NULL = FALSE) } } weightEndpoint <- slot(object, "weightEndpoint") ## strata level.strata <- [email protected] n.strata <- length(level.strata) weightStrata <- object@weightStrata type.weightStrata <- attr(weightStrata,"type") if(is.null(strata)){ if(length(level.strata)==1){ strata <- "global" }else{ strata <- c("global", level.strata) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- level.strata }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "autoplot[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",level.strata), refuse.NULL = FALSE, method = "coef[S4BuyseTest]") } ## resampling if(resampling){ if(!attr(slot(object, "method.inference"),"permutation") && !attr(slot(object, "method.inference"),"bootstrap")){ stop("No resampling procedure was performed so cannot output the corresponding coefficients. \n") } if(statistic %in% type.count){ stop("The number of ",gsub("count.","",statistic)," pairs when performing resampling is not saved. \n") } n.resampling <- slot(object, "n.resampling") weightStrataResampling <- slot(object, "weightStrataResampling") } ## ** normalize element in object (add global or stratified result) if(statistic %in% type.count){ object.statistic <- slot(object, statistic) delta <- rbind(global = colSums(object.statistic), object.statistic) Delta <- matrix(.rowCumSum_cpp(delta), nrow = n.strata+1, ncol = n.endpoint, dimnames = list(c("global",level.strata), valid.endpoint)) }else if(resampling == FALSE){ object.delta <- matrix(slot(object, "delta")[,,statistic], nrow = n.strata, ncol = n.endpoint, dimnames = list(level.strata, valid.endpoint)) object.Delta <- matrix(slot(object, "Delta")[,statistic], nrow = 1, ncol = n.endpoint, dimnames = list("global", valid.endpoint)) if(statistic != "winRatio"){ delta <- rbind(global = colSums(.colMultiply_cpp(object.delta, weightStrata)), object.delta) Delta.strata <- .rowCumSum_cpp(.rowMultiply_cpp(object.delta, weightEndpoint)) rownames(Delta.strata) <- rownames(object.delta) Delta <- rbind(object.Delta, Delta.strata) }else if(statistic == "winRatio"){ if(type.weightStrata == "var-winratio"){ delta <- rbind(global = colSums(.colMultiply_cpp(object.delta, weightStrata)), object.delta) }else{ out.fav <- coef(object, statistic = "favorable", endpoint = valid.endpoint, strata = "global", cumulative = FALSE, resampling = FALSE, simplify = FALSE) out.unfav <- coef(object, statistic = "unfavorable", endpoint = valid.endpoint, strata = "global", cumulative = FALSE, resampling = FALSE, simplify = FALSE) delta <- rbind(global = out.fav/out.unfav, object.delta) } out.cumFav <- coef(object, statistic = "favorable", endpoint = valid.endpoint, strata = level.strata, cumulative = TRUE, resampling = FALSE, simplify = FALSE) out.cumUnfav <- coef(object, statistic = "unfavorable", endpoint = valid.endpoint, strata = level.strata, cumulative = TRUE, resampling = FALSE, simplify = FALSE) Delta <- rbind(object.Delta, out.cumFav/out.cumUnfav) } }else if(resampling){ object.deltaResampling <- array(slot(object, "deltaResampling")[,,,statistic], dim = c(n.resampling, n.strata, n.endpoint), dimnames = list(NULL, level.strata, valid.endpoint)) object.DeltaResampling <- matrix(slot(object, "DeltaResampling")[,,statistic], ncol = n.endpoint, dimnames = list(NULL, valid.endpoint)) deltaResampling <- array(NA, dim = c(n.resampling, n.strata+1, n.endpoint), dimnames = list(NULL, c("global",level.strata), valid.endpoint)) deltaResampling[,level.strata,valid.endpoint] <- object.deltaResampling DeltaResampling <- array(NA, dim = c(n.resampling, n.strata+1, n.endpoint), dimnames = list(NULL, c("global",level.strata), valid.endpoint)) DeltaResampling[,"global",valid.endpoint] <- object.DeltaResampling if(statistic == "winRatio"){ if(statistic == "winRatio" && type.weightStrata != "var-winratio"){ favorableResampling <- coef(object, statistic = "favorable", endpoint = valid.endpoint, strata = "global", cumulative = FALSE, resampling = TRUE, simplify = FALSE) unfavorableResampling <- coef(object, statistic = "unfavorable", endpoint = valid.endpoint, strata = "global", cumulative = FALSE, resampling = TRUE, simplify = FALSE) } cumFavorableResampling <- coef(object, statistic = "favorable", endpoint = valid.endpoint, strata = level.strata, cumulative = TRUE, resampling = TRUE, simplify = FALSE) cumUnfavorableResampling <- coef(object, statistic = "unfavorable", endpoint = valid.endpoint, strata = level.strata, cumulative = TRUE, resampling = TRUE, simplify = FALSE) } for(iE in 1:n.endpoint){ ## iE <- 1 if(n.strata == 1){ deltaResampling[,"global",iE] <- object.deltaResampling[,1,iE] }else{ if(statistic != "winRatio" || type.weightStrata == "var-winratio"){ deltaResampling[,"global",iE] <- rowSums(object.deltaResampling[,,iE]*weightStrataResampling) }else{ deltaResampling[,"global",iE] <- favorableResampling[,"global",iE]/unfavorableResampling[,"global",iE] } } } for(iS in 1:n.strata){ ## iS <- 1 if(n.endpoint == 1){ DeltaResampling[,iS+1,1] <- object.DeltaResampling }else{ if(statistic != "winRatio"){ DeltaResampling[,iS+1,] <- .rowCumSum_cpp(.rowMultiply_cpp(object.deltaResampling[,iS,], weightEndpoint)) }else{ DeltaResampling[,iS+1,] <- cumFavorableResampling[,iS,]/cumUnfavorableResampling[,iS,] } } } } ## ** extract information if(resampling == FALSE){ if(cumulative==TRUE){ out <- Delta[strata,endpoint,drop=simplify] }else if(cumulative == FALSE){ out <- delta[strata,endpoint,drop=simplify] } }else if(resampling){ if(cumulative==TRUE){ out <- DeltaResampling[,strata,endpoint,drop=simplify] }else if(cumulative == FALSE){ out <- deltaResampling[,strata,endpoint,drop=simplify] } } ## ** export return(out) }) ###################################################################### ### S4BuyseTest-coef.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-coef.R
### BuyseTest-confint.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: maj 19 2018 (23:37) ## Version: ## By: Brice Ozenne ## Update #: 1103 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * Documentation - confint #' @docType methods #' @name S4BuyseTest-confint #' @title Extract Confidence Interval from GPC #' @aliases confint,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract confidence intervals for summary statistics (net benefit, win ratio, ...) estimated by GPC. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param statistic [character] the statistic summarizing the pairwise comparison: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"favorable"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' \code{"unfavorable"} displays the proportion in favor of the control. #' Default value read from \code{BuyseTest.options()}. #' @param endpoint [character] for which endpoint(s) the confidence intervals should be output? #' If \code{NULL} returns the confidence intervals for all endpoints. #' @param strata [character] the strata relative to which the statistic should be output. #' Can also be \code{"global"} or \code{FALSE} to output the statistic pooled over all strata, #' or \code{TRUE} to output each strata-specific statistic. #' @param cumulative [logical] should the summary statistic be cumulated over endpoints? #' Otherwise display the contribution of each endpoint. #' @param null [numeric] right hand side of the null hypothesis (used for the computation of the p-value). #' @param conf.level [numeric] confidence level for the confidence intervals. #' Default value read from \code{BuyseTest.options()}. #' @param alternative [character] the type of alternative hypothesis: \code{"two.sided"}, \code{"greater"}, or \code{"less"}. #' Default value read from \code{BuyseTest.options()}. #' @param transformation [logical] should the CI be computed on the inverse hyperbolic tangent scale / log scale for the net benefit / win ratio and backtransformed. #' Otherwise they are computed without any transformation. #' Default value read from \code{BuyseTest.options()}. Not relevant when using permutations or percentile bootstrap. #' @param order.Hprojection [integer, 1-2] order of the H-decomposition used to compute the variance. #' @param method.ci.resampling [character] the method used to compute the confidence intervals and p-values when using bootstrap or permutation (\code{"percentile"}, \code{"gaussian"}, \code{"student"}). #' See the details section. #' @param cluster [numeric vector] Group of observations for which the iid assumption holds . #' @param sep [character] character string used to separate the endpoint and the strata when naming the statistics. #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-summary}} for a more detailed presentation of the \code{S4BuyseTest} object. #' #' @details #' \bold{statistic}: when considering a single endpoint and denoting #' \eqn{Y} the endpoint in the treatment group, #' \eqn{X} the endpoint in the control group, #' and \eqn{\tau} the threshold of clinical relevance, #' the net benefit is \eqn{P[Y \ge X + \tau] - P[X \ge Y + \tau]}, #' the win ratio is \eqn{\frac{P[Y \ge X + \tau]}{P[X \ge Y + \tau]}}, #' the proportion in favor of treatment is \eqn{P[Y \ge X + \tau]}, #' the proportion in favor of control is \eqn{P[X \ge Y + \tau]}. #' #' \bold{method.ci.resampling}: when using bootstrap/permutation, p-values and confidence intervals are computing as follow: \itemize{ #' \item \code{percentile} (bootstrap): compute the confidence interval using the quantiles of the bootstrap estimates. #' Compute the p-value by finding the confidence level at which a bound of the confidence interval equals the null hypothesis. #' #' \item \code{percentile} (permutation): apply the selected transformation to the estimate and permutation estimates. #' Compute the confidence interval by (i) shfiting the estimate by the quantiles of the centered permutation estimates and (ii) back-transforming . #' Compute the p-value as the relative frequency at which the estimate are less extreme than the permutation estimates. #' #' \item \code{gaussian} (bootstrap and permutation): apply the selected transformation to the estimate and bootstrap/permutation estimates. #' Estimate the variance of the estimator using the empirical variance of the transformed boostrap/permutation estimates. #' Compute confidence intervals and p-values under the normality assumption and back-transform the confidence intervals. #' #' \item \code{student} (bootstrap): apply the selected transformation to the estimate, its standard error, the bootstrap estimates, and their standard error. #' Compute the studentized bootstrap estimates by dividing the centered bootstrap estimates by their standard error. #' Compute the confidence interval based on the standard error of the estimate and the quantiles of the studentized bootstrap estimates, and back-transform. #' Compute the p-value by finding the confidence level at which a bound of the confidence interval equals the null hypothesis. #' #' \item \code{student} (permutation): apply the selected transformation to the estimate, its standard error, the permutation estimates, and their standard error. #' Compute the studentized permutation estimates by dividing the centered permutation estimates by their standard error. #' Compute the confidence interval based on the standard error of the estimate and the quantiles of the studentized permutation estimates, and back-transform. #' Compute the p-value as the relative frequency at which the studentized estimate are less extreme than the permutation studentized estimates. #' #' } #' #' \bold{WARNING}: when using a permutation test, the uncertainty associated with the estimator is computed under the null hypothesis. #' Thus the confidence interval may not be valid if the null hypothesis is false. \cr #' #' @return A matrix containing a column for the estimated statistic (over all strata), #' the lower bound and upper bound of the confidence intervals, and the associated p-values. #' When using resampling methods: #' \itemize{ #' \item an attribute \code{n.resampling} specified how many samples have been used to compute the confidence intervals and the p-values. #' \item an attribute \code{method.ci.resampling} method used to compute the confidence intervals and p-values. #' } #' #' @references #' On the GPC procedure: Marc Buyse (2010). \bold{Generalized pairwise comparisons of prioritized endpoints in the two-sample problem}. \emph{Statistics in Medicine} 29:3245-3257 \cr #' On the win ratio: D. Wang, S. Pocock (2016). \bold{A win ratio approach to comparing continuous non-normal outcomes in clinical trials}. \emph{Pharmaceutical Statistics} 15:238-245 \cr #' On the Mann-Whitney parameter: Fay, Michael P. et al (2018). \bold{Causal estimands and confidence intervals asscoaited with Wilcoxon-Mann-Whitney tests in randomized experiments}. \emph{Statistics in Medicine} 37:2923-2937 \cr #' #' @keywords method #' @author Brice Ozenne ## * Method - confint #' @rdname S4BuyseTest-confint #' @exportMethod confint setMethod(f = "confint", signature = "S4BuyseTest", definition = function(object, endpoint = NULL, statistic = NULL, strata = FALSE, cumulative = TRUE, null = NULL, conf.level = NULL, alternative = NULL, method.ci.resampling = NULL, order.Hprojection = NULL, transformation = NULL, cluster = NULL, sep="."){ option <- BuyseTest.options() sep <- "." D <- length(object@endpoint) method.inference <- [email protected] add.halfNeutral <- [email protected] if(is.null(statistic)){ statistic <- option$statistic } if(is.null(transformation)){ transformation <- option$transformation } if(is.null(conf.level)){ if(!attr(method.inference,"permutation")){ conf.level <- option$conf.level }else{ conf.level <- NA } } if(is.null(alternative)){ alternative <- option$alternative } ## ** normalize and check arguments ## statistic statistic <- switch(gsub("[[:blank:]]", "", tolower(statistic)), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic) validCharacter(statistic, name1 = "statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable"), valid.length = 1, method = "confint[S4BuyseTest]") ## strata level.strata <- [email protected] if(is.null(strata)){ if(length(level.strata)==1){ strata <- "global" }else{ strata <- c("global", level.strata) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- level.strata }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "autoplot[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",level.strata), refuse.NULL = FALSE, method = "confint[S4BuyseTest]") } if(attr(slot(object,"scoring.rule"), "test.paired") && any(level.strata %in% strata)){ stop("Cannot output p-values or confidence intervals for stratified statistics with paired data. \n") } ## method.ci if(attr(method.inference,"permutation") || attr(method.inference,"bootstrap")){ if(is.null(method.ci.resampling)){ if(attr(method.inference,"studentized")){ method.ci.resampling <- "studentized" }else{ method.ci.resampling <- "percentile" } }else{ method.ci.resampling <- tolower(method.ci.resampling) } validCharacter(method.ci.resampling, name1 = "method.ci.resampling", valid.values = c("percentile","gaussian","studentized"), valid.length = 1, refuse.NULL = FALSE, method = "confint[S4BuyseTest]") if(method.ci.resampling == "studentized" && !attr(method.inference,"studentized")){ stop("Argument \'method.ci.resampling\' cannot be set to \'studentized\' unless a studentized bootstrap/permutation has been performed.\n", "Consider setting \'method.ci.resampling\' to \"percentile\" or \"gaussian\" \n", "or setting \'method.inference\' to \"studentized bootstrap\" or \"studentized permutation\" when calling BuyseTest. \n") } if(method.ci.resampling == "studentized" && cumulative == FALSE){ stop("Endpoint specific confidence intervals are not available with studentized bootstrap/permutation. \n", "Consider applying the BuyseTest function separately to each endpoint \n", "or set \'method.inference\' to \"studentized bootstrap\" or \"studentized permutation\" when calling BuyseTest. \n") } }else if(!is.null(method.ci.resampling)){ warning("Argument \'method.ci.resampling\' is disregarded when not using resampling\n") } if(attr(method.inference,"studentized") && (any(strata != "global") || (cumulative!=TRUE)) ){ stop("Can only perform statistical inference based on studentized resampling for global cumulative effects. \n", "Consider setting argument \'strata\' to FALSE and argument \'cumulative\' to TRUE. \n") } ## order.Hprojection if(attr(method.inference,"ustatistic")){ if(!is.null(order.Hprojection) && order.Hprojection != attr(method.inference,"hprojection")){ validInteger(order.Hprojection, name1 = "order.Hprojection", min = 1, max = 2, valid.length = 1, method = "confint[S4BuyseTest]") if(order.Hprojection > attr(method.inference,"hprojection")){ stop("Cannot find the second order of the H-decomposition. \n", "Consider setting order.Hprojection to 2 in BuyseTest.options before calling BuyseTest. \n") } object.hprojection <- FALSE ## move from order H-decomposition of order 2 to H-decomposition of order 1 }else{ object.hprojection <- TRUE } }else{ object.hprojection <- TRUE } ## conf.level validNumeric(conf.level, name1 = "conf.level", min = 0, max = 1, refuse.NA = FALSE, valid.length = 1, method = "confint[S4BuyseTest]") alpha <- 1-conf.level ## alternative validCharacter(alternative, name1 = "alternative", valid.values = c("two.sided","less","greater"), valid.length = 1, method = "confint[S4BuyseTest]") ## transformation validLogical(transformation, name1 = "transformation", valid.length = 1, method = "confint[S4BuyseTest]") ## endpoint valid.endpoint <- names(object@endpoint) if(!is.null(endpoint)){ if(is.numeric(endpoint)){ validInteger(endpoint, name1 = "endpoint", min = 1, max = length(valid.endpoint), valid.length = NULL, method = "iid[BuyseTest]") endpoint <- valid.endpoint[endpoint] }else{ validCharacter(endpoint, valid.length = 1:length(valid.endpoint), valid.values = valid.endpoint, refuse.NULL = FALSE) } }else{ endpoint <- valid.endpoint } n.endpoint <- length(endpoint) ## safety test.model.tte <- all(unlist(lapply(object@iidNuisance,dim))==0) if(method.inference %in% c("u-statistic","u-statistic-bebu") && [email protected] > 0){ warning("The current implementation of the asymptotic distribution is not valid when using a correction. \n", "Standard errors / confidence intervals / p-values may not be correct. \n", "Consider using a resampling approach or checking the control of the type 1 error with powerBuyseTest. \n") } ## weight if(!is.null(cluster) && any(object@weightObs!=1)){ stop("Cannot handle clustered observations when observations are weighted. \n") } ## ** extract estimate all.endpoint <- names(object@endpoint) DeltaW <- coef(object, endpoint = endpoint, statistic = statistic, strata = strata, cumulative = cumulative, resampling = FALSE, simplify = FALSE) if(length(strata)==1 && all(strata=="global")){ Delta <- stats::setNames(DeltaW["global",], endpoint) }else{ DeltaL <- stats::reshape(data.frame(strata = strata, DeltaW), direction = "long", varying = endpoint, times = endpoint, v.names = "statistic") Delta <- stats::setNames(DeltaL$statistic, paste(DeltaL$time, DeltaL$strata, sep = sep)) } if(attr(method.inference,"permutation") || attr(method.inference,"bootstrap")){ DeltaW.resampling <- coef(object, endpoint = endpoint, statistic = statistic, strata = strata, cumulative = cumulative, resampling = TRUE, simplify = FALSE) if(length(strata)==1 && all(strata=="global")){ Delta.resampling <- matrix(DeltaW.resampling[,"global",], ncol = length(endpoint), dimnames = list(NULL, endpoint)) }else{ Delta.resampling <- do.call(cbind,apply(DeltaW.resampling, MARGIN = 3 , FUN = base::identity, simplify = FALSE)) colnames(Delta.resampling) <- unlist(lapply(endpoint, paste, strata, sep = sep)) Delta.resampling <- Delta.resampling[,names(Delta),drop=FALSE] } }else{ Delta.resampling <- NULL } ## ** extract standard error if(attr(method.inference,"ustatistic") || attr(method.inference,"studentized")){ if(object.hprojection && is.null(cluster) && (length(strata)==1 && all(strata=="global")) && cumulative == TRUE){ Delta.se <- sqrt(object@covariance[endpoint,statistic]) if(attr(method.inference,"studentized")){ Delta.se.resampling <- matrix(sqrt(object@covarianceResampling[,endpoint,statistic]), ncol = n.endpoint, dimnames = list(NULL, endpoint)) }else{ Delta.se.resampling <- NULL } }else{ if(identical(order.Hprojection,2)){ warning("Inference will be performed using a first order H projection. \n") } ls.Delta.iid <- getIid(object, statistic = statistic, cumulative = cumulative, endpoint = endpoint, strata = strata, cluster = cluster, simplify = FALSE) if(length(strata)==1 || all(strata=="global")){ Delta.iid <- ls.Delta.iid[["global"]][,names(Delta),drop=FALSE] }else{ for(iS in 1:length(strata)){ ## iS <- 1 colnames(ls.Delta.iid[[iS]]) <- paste(colnames(ls.Delta.iid[[iS]]), strata[iS], sep = sep) } Delta.iid <- do.call(cbind,ls.Delta.iid)[,names(Delta),drop=FALSE] } if(is.null(cluster) && any(object@weightObs!=1)){ Delta.iid <- .colMultiply_cpp(Delta.iid, sqrt(object@weightObs)) } M.se <- sqrt(colSums(Delta.iid^2)) Delta.se <- stats::setNames(as.double(M.se), names(Delta)) Delta.se.resampling <- NULL } }else{ Delta.se <- NULL Delta.se.resampling <- NULL } ## ** null hypothesis if(attr(method.inference,"permutation") && !add.halfNeutral && is.null(null) && statistic %in% c("favorable","unfavorable")){ null <- NA }else if(is.null(null)){ null <- switch(statistic, "netBenefit" = 0, "winRatio" = 1, "favorable" = 1/2, "unfavorable" = 1/2) }else { validNumeric(null, valid.length = 1, refuse.NA = !attr(method.inference,"permutation"), min = if("statistic"=="netBenefit"){-1}else{0}, max = if("statistic"=="winRatio"){Inf}else{1}) } null <- rep(null, length(Delta)) ## ** method if(method.inference == "none"){ method.confint <- confint_none transformation <- FALSE }else if(attr(method.inference,"ustatistic")){ method.confint <- confint_Ustatistic }else if(attr(method.inference,"permutation")){ method.confint <- switch(method.ci.resampling, "percentile" = confint_percentilePermutation, "gaussian" = confint_gaussian, "studentized" = confint_studentPermutation) }else if(attr(method.inference,"bootstrap")){ method.confint <- switch(method.ci.resampling, "percentile" = confint_percentileBootstrap, "gaussian" = confint_gaussian, "studentized" = confint_studentBootstrap) if(method.ci.resampling=="percentile"){ transformation <- FALSE } } ## ** transformation if(transformation){ if(object@hierarchical){ trans.weight <- 1 }else{ trans.weight <- sum(object@weightEndpoint) } trans.name <- switch(statistic, "netBenefit" = "atanh", "winRatio" = "log", "favorable" = "atanh", "unfavorable" = "atanh" ) trans.delta <- switch(statistic, "netBenefit" = function(x){if(is.null(x)){x}else{atanh(x/trans.weight)}}, "winRatio" = function(x){if(is.null(x)){x}else{log(x)}}, "favorable" = function(x){if(is.null(x)){x}else{atanh(2*(x/trans.weight-1/2))}}, "unfavorable" = function(x){if(is.null(x)){x}else{atanh(2*(x/trans.weight-1/2))}} ) itrans.delta <- switch(statistic, "netBenefit" = function(x){if(is.null(x)){x}else{trans.weight*tanh(x)}}, "winRatio" = function(x){if(is.null(x)){x}else{exp(x)}}, "favorable" = function(x){if(is.null(x)){x}else{trans.weight*(tanh(x)/2+1/2)}}, "unfavorable" = function(x){if(is.null(x)){x}else{trans.weight*(tanh(x)/2+1/2)}} ) trans.se.delta <- switch(statistic, "netBenefit" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- (se/trans.weight)/(1-(x/trans.weight)^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "winRatio" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- se/x if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "favorable" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- 2*(se/trans.weight)/(1-(2*(x/trans.weight-1/2))^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "unfavorable" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- 2*(se/trans.weight)/(1-(2*(x/trans.weight-1/2))^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }) itrans.se.delta <- switch(statistic, "netBenefit" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- trans.weight*se*(1-(itrans.delta(x)/trans.weight)^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "winRatio" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- se*itrans.delta(x) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "favorable" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- trans.weight*(se/2)*(1-(2*(itrans.delta(x)/trans.weight-1/2))^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }, "unfavorable" = function(x,se){ if(is.null(se)){ out <- se }else{ out <- trans.weight*(se/2)*(1-(2*(itrans.delta(x)/trans.weight-1/2))^2) if(any(na.omit(se)==0)){ out[se==0] <- 0 } } return(out) }) }else{ trans.name <- "id" trans.delta <- function(x){x} itrans.delta <- function(x){x} trans.se.delta <- function(x,se){se} itrans.se.delta <- function(x,se){se} } ## ** compute the confidence intervals outConfint <- do.call(method.confint, args = list(Delta = trans.delta(Delta), Delta.resampling = trans.delta(Delta.resampling), Delta.se = trans.se.delta(Delta, se = Delta.se), Delta.se.resampling = trans.se.delta(Delta.resampling, se = Delta.se.resampling), alternative = alternative, null = trans.delta(null), alpha = alpha, endpoint = names(Delta), backtransform.delta = itrans.delta, backtransform.se = itrans.se.delta)) ## do not output CI or p-value when the estimate has not been identified index.NA <- union(which(is.infinite(outConfint[,"estimate"])),which(is.na(outConfint[,"estimate"]))) if(length(index.NA)>0){ outConfint[index.NA,c("se","lower.ci","upper.ci","p.value")] <- NA } outConfint <- as.data.frame(outConfint) ## ** number of permutations if(method.inference != "none" && (attr(method.inference,"permutation") || attr(method.inference,"bootstrap"))){ attr(outConfint, "n.resampling") <- colSums(!is.na(Delta.resampling)) }else{ attr(outConfint, "n.resampling") <- stats::setNames(rep(as.numeric(NA), D), all.endpoint) } attr(outConfint,"method.ci.resampling") <- method.ci.resampling ## ** transform if(attr(method.inference,"ustatistic")){ attr(outConfint,"nametransform") <- trans.name attr(outConfint,"transform") <- trans.delta attr(outConfint,"backtransform") <- itrans.delta } ## ** export if(attr(method.inference,"permutation") && !is.na(conf.level)){ if(is.null(attr(conf.level,"warning.permutation")) || !identical(attr(conf.level,"warning.permutation"),FALSE)){ warning("Confidence intervals are computed under the null hypothesis and therefore may not be valid. \n") } attr(outConfint,"warning") <- "Confidence intervals are computed under the null hypothesis" } return(outConfint) }) ## * confint_percentilePermutation (called by confint) confint_percentilePermutation <- function(Delta, Delta.resampling, null, alternative, alpha, endpoint, backtransform.delta, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## ** point estimate outTable[,"estimate"] <- backtransform.delta(Delta) ## ** standard error outTable[,"se"] <- apply(backtransform.delta(Delta.resampling), MARGIN = 2, FUN = stats::sd, na.rm = TRUE) ## ** confidence interval if(!is.na(alpha)){ Delta.resamplingH0 <- apply(Delta.resampling, MARGIN = 2, FUN = scale, scale = FALSE, center = TRUE) outTable[,"lower.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + apply(Delta.resamplingH0, MARGIN = 2, FUN = stats::quantile, probs = alpha/2, na.rm = TRUE), "less" = -Inf, "greater" = Delta + apply(Delta.resamplingH0, MARGIN = 2, FUN = stats::quantile, probs = alpha, na.rm = TRUE) )) outTable[,"upper.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + apply(Delta.resamplingH0, MARGIN = 2, FUN = stats::quantile, probs = 1 - alpha/2, na.rm = TRUE), "less" = Delta + apply(Delta.resamplingH0, MARGIN = 2, FUN = stats::quantile, probs = 1 - alpha, na.rm = TRUE), "greater" = Inf )) } ## ** null if(any(is.na(null))){ null[is.na(null)] <- apply(Delta.resampling,2,stats::median)[is.na(null)] } outTable[,"null"] <- backtransform.delta(null) ## ** p-value add.1 <- BuyseTest.options()$add.1.presample outTable[,"p.value"] <- sapply(1:n.endpoint, FUN = function(iE){ ## iE <- 1 test.alternative <- switch(alternative, # test whether each sample is has a cumulative proportions in favor of treatment more extreme than the point estimate "two.sided" = abs(Delta[iE] - null[iE]) <= abs(Delta.resampling[,iE] - null[iE]), "less" = Delta[iE] >= Delta.resampling[,iE], "greater" = Delta[iE] <= Delta.resampling[,iE] ) p.alternative <- (add.1 + sum(test.alternative, na.rm = TRUE)) / (add.1 + sum(!is.na(test.alternative), na.rm = TRUE)) return(p.alternative) }) ## ** export return(outTable) } ## * confint_percentileBootstrap (called by confint) confint_percentileBootstrap <- function(Delta, Delta.resampling, null, alternative, alpha, endpoint, backtransform.delta, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## ** point estimate outTable[,"estimate"] <- Delta ## ** standard error outTable[,"se"] <- apply(Delta.resampling, MARGIN = 2, FUN = stats::sd, na.rm = TRUE) ## ** confidence interval if(!is.na(alpha)){ outTable[,"lower.ci"] <- switch(alternative, "two.sided" = apply(Delta.resampling, MARGIN = 2, FUN = stats::quantile, probs = alpha/2, na.rm = TRUE), "less" = -Inf, "greater" = apply(Delta.resampling, MARGIN = 2, FUN = stats::quantile, probs = alpha, na.rm = TRUE) ) outTable[,"upper.ci"] <- switch(alternative, "two.sided" = apply(Delta.resampling, MARGIN = 2, FUN = stats::quantile, probs = 1 - alpha/2, na.rm = TRUE), "less" = apply(Delta.resampling, MARGIN = 2, FUN = stats::quantile, probs = 1 - alpha, na.rm = TRUE), "greater" = Inf ) } ## ** p.values outTable[,"null"] <- backtransform.delta(null) add.1 <- BuyseTest.options()$add.1.presample for(iE in which(!is.na(null))){ outTable[iE, "p.value"] <- boot2pvalue(stats::na.omit(Delta.resampling[,iE]), null = null[iE], estimate = Delta[iE], alternative = alternative, add.1 = add.1) } ## quantileCI(Delta.resampling[,iE], alternative = "two.sided", p.value = 0.64, sign.estimate = 1) ## quantileCI(Delta.resampling[,iE], alternative = "two.sided", p.value = 0.66, sign.estimate = 1) ## ** export return(outTable) } ## * confint_gaussian (called by confint) confint_gaussian <- function(Delta, Delta.resampling, null, alternative, alpha, endpoint, backtransform.delta, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## ** point estimate outTable[,"estimate"] <- backtransform.delta(Delta) ## ** standard error Delta.se <- apply(Delta.resampling, MARGIN = 2, FUN = stats::sd, na.rm = TRUE) ## computed based on the sample if(any(is.infinite(Delta.resampling))){ if(abs(Delta!=outTable[,"estimate"])>1e-12){ warning("Infinite value for the summary statistic after transformation in some of the bootstrap samples. \n", "Cannot compute confidence intervals or p-value under Gaussian approximation. \n", "Consider setting the argument \'transform\' to FALSE. \n") }else{ warning("Infinite value for the summary statistic in some of the bootstrap samples. \n", "Cannot compute confidence intervals or p-value under Gaussian approximation. \n") } } outTable[,"se"] <- apply(backtransform.delta(Delta.resampling), MARGIN = 2, FUN = stats::sd, na.rm = TRUE) ## ** confidence interval if(!is.na(alpha)){ outTable[,"lower.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + stats::qnorm(alpha/2) * Delta.se, "less" = -Inf, "greater" = Delta + stats::qnorm(alpha) * Delta.se )) outTable[,"upper.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + stats::qnorm(1-alpha/2) * Delta.se, "less" = Delta + stats::qnorm(1-alpha) * Delta.se, "greater" = Inf )) } ## ** p-value outTable[,"null"] <- backtransform.delta(null) outTable[,"p.value"] <- switch(alternative, "two.sided" = 2*(1-stats::pnorm(abs((Delta-null)/Delta.se))), "less" = stats::pnorm((Delta-null)/Delta.se), "greater" = 1-stats::pnorm((Delta-null)/Delta.se) ) ## ** export return(outTable) } ## * confint_studentPermutation (called by confint) confint_studentPermutation <- function(Delta, Delta.se, Delta.resampling, Delta.se.resampling, null, alternative, alpha, endpoint, backtransform.delta, backtransform.se, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## identify special case (no variability in the estimate) test.variability <- colSums(Delta.se.resampling!=0)+(apply(Delta.resampling,2,function(iDelta){length(unique(iDelta))})>1)+(Delta.se!=0) index.novar <- which(test.variability==0) index.var <- which(test.variability!=0) ## ** point estimate outTable[,"estimate"] <- backtransform.delta(Delta) ## ** standard error outTable[,"se"] <- backtransform.se(Delta, se = Delta.se) ## ** null if(any(is.na(null))){ null[is.na(null)] <- apply(Delta.resampling,2,stats::median)[is.na(null)] } outTable[,"null"] <- backtransform.delta(null) ## ** critical quantile if(!is.na(alpha) && length(index.var)>0){ Delta.statH0.resampling <- .rowCenter_cpp(Delta.resampling[,index.var,drop=FALSE],null[index.var])/Delta.se.resampling[,index.var,drop=FALSE] Delta.qInf <- switch(alternative, "two.sided" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = alpha/2), "less" = -Inf, "greater" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = alpha) ) Delta.qSup <- switch(alternative, "two.sided" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = 1-alpha/2), "less" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = 1-alpha), "greater" = Inf ) } ## ** confidence interval if(!is.na(alpha) && length(index.var)>0){ outTable[index.var,"lower.ci"] <- backtransform.delta(Delta[index.var] + Delta.qInf * Delta.se[index.var]) outTable[index.var,"upper.ci"] <- backtransform.delta(Delta[index.var] + Delta.qSup * Delta.se[index.var]) } if(length(index.novar)>0){ outTable[index.novar,"lower.ci"] <- backtransform.delta(Delta[index.novar]) outTable[index.novar,"upper.ci"] <- backtransform.delta(Delta[index.novar]) } ## ** p.value add.1 <- BuyseTest.options()$add.1.presample if(length(index.var)>0){ Delta.stat <- (Delta-null)/Delta.se Delta.stat.resampling <- (Delta.resampling-null)/Delta.se.resampling outTable[index.var,"p.value"] <- sapply(index.var, FUN = function(iE){ ## iE <- 1 test.alternative <- switch(alternative, # test whether each sample is has a cumulative proportions in favor of treatment more extreme than the point estimate "two.sided" = abs(Delta.stat[iE]) <= abs(Delta.stat.resampling[,iE]), "less" = Delta.stat[iE] >= Delta.stat.resampling[,iE], "greater" = Delta.stat[iE] <= Delta.stat.resampling[,iE] ) p.alternative <- (add.1 + sum(test.alternative, na.rm = TRUE)) / (add.1 + sum(!is.na(test.alternative), na.rm = TRUE)) return(p.alternative) }) } if(length(index.novar)>0){ outTable[index.novar,c("p.value")] <- 1 } ## ** export return(outTable) } ## * confint_studentBootstrap (called by confint) confint_studentBootstrap <- function(Delta, Delta.se, Delta.resampling, Delta.se.resampling, null, alternative, alpha, endpoint, backtransform.delta, backtransform.se, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## identify special case (no variability in the estimate) test.variability <- colSums(Delta.se.resampling!=0)+(apply(Delta.resampling,2,function(iDelta){length(unique(iDelta))})>1)+(Delta.se!=0) index.novar <- which(test.variability==0) index.var <- which(test.variability!=0) ## ** point estimate outTable[,"estimate"] <- backtransform.delta(Delta) ## ** standard error outTable[,"se"] <- backtransform.se(Delta, se = Delta.se) ## ** critical quantile ## z-transformation: center around estimate and divide by estimated se if(length(index.var)>0){ Delta.statH0.resampling <- sweep(Delta.resampling[,index.var,drop=FALSE], MARGIN = 2, FUN = "-", STATS = Delta)/Delta.se.resampling[,index.var,drop=FALSE] ## Delta.statH0.resampling <- apply(Delta.resampling[,index.var,drop=FALSE], MARGIN = 2, FUN = scale, scale = FALSE, center = TRUE)/Delta.se.resampling[,index.var,drop=FALSE] if(!is.na(alpha)){ Delta.qInf <- switch(alternative, "two.sided" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = alpha/2), "less" = -Inf, "greater" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = alpha) ) Delta.qSup <- switch(alternative, "two.sided" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = 1-alpha/2), "less" = apply(Delta.statH0.resampling, MARGIN = 2, FUN = stats::quantile, na.rm = TRUE, probs = 1-alpha), "greater" = Inf ) } } ## ** confidence interval ## normal case if(!is.na(alpha) && length(index.var)>0){ outTable[index.var,"lower.ci"] <- backtransform.delta(Delta[index.var] + Delta.qInf * Delta.se[index.var]) outTable[index.var,"upper.ci"] <- backtransform.delta(Delta[index.var] + Delta.qSup * Delta.se[index.var]) } ## special case if(!is.na(alpha) && length(index.novar)>0){ test.diff <- colSums(Delta.resampling[,index.novar,drop=FALSE] != matrix(Delta, nrow = NROW(Delta.resampling), ncol = length(Delta), byrow = TRUE)) outTable[index.novar[test.diff],c("lower.ci","upper.ci")] <- NA outTable[index.novar[test.diff==0],c("lower.ci","upper.ci")] <- backtransform.delta(Delta[index.novar[test.diff==0]]) } ## ** p.value outTable[, "null"] <- backtransform.delta(null) add.1 <- BuyseTest.options()$add.1.presample for(iE in index.var){ ## iE <- 1 outTable[iE, "p.value"] <- boot2pvalue(stats::na.omit(Delta[iE] + Delta.se[iE] * Delta.statH0.resampling[,iE]), null = null[iE], estimate = Delta[iE], ## note: estimate is not used to produce the ci, just for knowing the sign alternative = alternative, add.1 = add.1) } ## special case if(length(index.novar)>0){ outTable[index.novar[test.diff],c("p.value")] <- NA outTable[index.novar[test.diff==0],c("p.value")] <- (null==Delta) + add.1*(null!=Delta)/(NROW(Delta.resampling)+1) } ## ** export return(outTable) } ## * confint_Ustatistic (called by confint) confint_Ustatistic <- function(Delta, Delta.se, statistic, null, alternative, alpha, endpoint, backtransform.delta, backtransform.se, ...){ n.endpoint <- length(endpoint) outTable <- matrix(as.numeric(NA), nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## ** point estimate outTable[,"estimate"] <- backtransform.delta(Delta) ## ** standard error outTable[,"se"] <- backtransform.se(Delta, se = Delta.se) ## ** confidence interval if(!is.na(alpha)){ outTable[,"lower.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + stats::qnorm(alpha/2) * Delta.se, "less" = -Inf, "greater" = Delta + stats::qnorm(alpha) * Delta.se )) outTable[,"upper.ci"] <- backtransform.delta(switch(alternative, "two.sided" = Delta + stats::qnorm(1-alpha/2) * Delta.se, "less" = Delta + stats::qnorm(1-alpha) * Delta.se, "greater" = Inf )) } ## ** p-value outTable[,"null"] <- backtransform.delta(null) outTable[,"p.value"] <- switch(alternative, "two.sided" = 2*(1-stats::pnorm(abs((Delta-null)/Delta.se))), "less" = stats::pnorm((Delta-null)/Delta.se), "greater" = 1-stats::pnorm((Delta-null)/Delta.se) ) ## special case with no variability if(any(na.omit((Delta==null)*(Delta.se==0)) == 1)){ outTable[(Delta==null)*(Delta.se==0) == 1,"p.value"] <- 1 } ## ** export return(outTable) } ## * confint_none (called by confint) confint_none <- function(Delta, endpoint, ...){ n.endpoint <- length(endpoint) outTable <- matrix(NA, nrow = n.endpoint, ncol = 6, dimnames = list(endpoint, c("estimate","se","lower.ci","upper.ci","null","p.value"))) ## ** point estimate outTable[,"estimate"] <- Delta ## ** return return(outTable) } ##---------------------------------------------------------------------- ### S4BuyseTest-confint.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-confint.R
## * getCount (documentation) #' @name S4BuyseTest-getCount #' @title Extract the Number of Favorable, Unfavorable, Neutral, Uninformative pairs #' @aliases getCount,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract the number of favorable, unfavorable, neutral, uninformative pairs. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param type the type of pairs to be counted. Can be \code{"favorable"}, \code{"unfavorable"}, \code{neutral}, or \code{uninf}. Can also be \code{"all"} to select all of them. #' #' @return #' A \code{"vector"} containing the number of pairs #' #' @keywords get S4BuyseTest-method #' @author Brice Ozenne ## * getCount (code) #' @rdname S4BuyseTest-getCount setMethod(f = "getCount", signature = "S4BuyseTest", definition = function(object, type){ if (missing(type)) { type <- c("favorable","unfavorable","neutral","uninf") } validCharacter(type, valid.length = NULL, valid.values = c("favorable","unfavorable","neutral","uninf"), method = "getCount") out <- NULL if ("favorable" %in% type) {out <- c(out, favorable = [email protected])} if ("unfavorable" %in% type) {out <- c(out, unfavorable = [email protected])} if ("neutral" %in% type) {out <- c(out, neutral = [email protected])} if ("uninf" %in% type) {out <- c(out, uninf = [email protected])} return(out) } ) ## * getIid (documentation) #' @name S4BuyseTest-getIid #' @title Extract the H-decomposition of the Estimator #' @aliases getIid,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract the H-decomposition of the GPC estimator. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param endpoint [character] for which endpoint(s) the H-decomposition should be output? #' If \code{NULL} returns the sum of the H-decomposition over all endpoints. #' @param type [character] type of H-decomposition to be output. #' Can be only for the nuisance parameters (\code{"nuisance"}), #' or for the u-statistic given the nuisance parameters (\code{"u-statistic"}), #' or both. #' @param center [logical] if \code{TRUE} the H-decomposition is centered around 0 (estimated statistic is substracted). #' @param scale [logical] if \code{TRUE} the H-decomposition is rescaled (by the sample size in the corresponding arm) such that its sums of squares approximate the variance of the estimator. #' @param cluster [numeric vector] return the H-decomposition aggregated by cluster. #' @param statistic [character] statistic relative to which the H-decomposition should be output. #' @param cumulative [logical] should the H-decomposition be cumulated over endpoints? #' Otherwise display the contribution of each endpoint. #' @param strata [character vector] the strata relative to which the H-decomposition of the statistic should be output. #' Can also be \code{"global"} or \code{FALSE} to output the H-decompositon of the pooled statistic. #' or \code{TRUE} to output the H-decompositon of each strata-specific statistic. #' @param simplify [logical] should the result be coerced to the lowest possible dimension? #' #' @details WARNING: argument \code{scale} and \code{center} should be used with care as when set to \code{FALSE} they may not lead to a meaningful decomposition. #' #' @return A list of matrices, each element of the list correspond to a statistic (global or strata-specific) and each matrix has as many columns as endpoints and rows as observations. #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-summary}} for a more detailed presentation of the \code{S4BuyseTest} object. #' #' @keywords S4BuyseTest-method #' @author Brice Ozenne ## * getIid (code) #' @rdname S4BuyseTest-getIid setMethod(f = "getIid", signature = "S4BuyseTest", definition = function(object, endpoint = NULL, statistic = NULL, strata = FALSE, cumulative = TRUE, center = TRUE, scale = TRUE, type = "all", cluster = NULL, simplify = TRUE){ ## ** normalize user input option <- BuyseTest.options() n.obs <- NROW(object@iidAverage$favorable) ## iid has been stored in object if([email protected] != "u-statistic"){ stop("No H-decomposition in the object \n", "Set the argument \'method.inference\' to \"u-statistic\" when calling BuyseTest \n") } ## strata level.strata <- [email protected] n.strata <- length(level.strata) weightStrata <- object@weightStrata indexStrata <- attr(level.strata,"index") attr(level.strata,"index") <- NULL if(is.null(strata)){ if(length(level.strata)==1){ strata <- "global" }else{ strata <- c("global", level.strata) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- level.strata }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "autoplot[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",level.strata), refuse.NULL = FALSE, method = "coef[S4BuyseTest]") } ## cluster if(!is.null(cluster)){ if(length(cluster) != n.obs){ stop("Incorrect length for argument \'cluster\'. Should have length ",n.obs ,".\n") } if(!is.factor(cluster)){ cluster <- as.factor(cluster) } Ucluster <- levels(cluster) cluster <- as.numeric(cluster) } ## endpoint valid.endpoint <- names(object@endpoint) n.endpoint <- length(valid.endpoint) weightEndpoint <- object@weightEndpoint if(is.numeric(endpoint)){ validInteger(endpoint, name1 = "endpoint", min = 1, max = length(valid.endpoint), valid.length = NULL, method = "iid[BuyseTest]") endpoint <- valid.endpoint[endpoint] }else if(is.null(endpoint)){ endpoint <- valid.endpoint }else{ } validCharacter(endpoint, valid.length = 1:length(valid.endpoint), valid.values = valid.endpoint) ## pairs n.pairs <- [email protected] ntot.pair <- sum(n.pairs) ## index group indexC <- attr([email protected],"indexC") indexT <- attr([email protected],"indexT") ## type validCharacter(type, valid.length = 1, valid.values = c("all","nuisance","u-statistic"), refuse.NULL = FALSE) ## statistic if(is.null(statistic)){ statistic <- option$statistic }else{ statistic <- sapply(tolower(statistic), function(iChar){ switch(gsub("[[:blank:]]", "", iChar), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic)}) validCharacter(statistic, name1 = "statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable"), valid.length = 1, refuse.duplicates = TRUE, method = "getIid[S4BuyseTest]") } ## if(length(statistic)>1 && any(strata %in% level.strata)){ ## stop("Argument \'statistic\' must be of length one when asking for strata-specific H-decomposition. \n") ## } ## ** extract H-decomposition if(type %in% c("all","u-statistic")){ object.iid <- object@iidAverage[c("favorable","unfavorable")] }else{ object.iid <- list(favorable = matrix(0, nrow = n.obs, ncol = n.endpoint, dimnames = list(NULL, valid.endpoint)), unfavorable = matrix(0, nrow = n.obs, ncol = n.endpoint, dimnames = list(NULL, valid.endpoint)) ) } if(type %in% c("all","nuisance") && ([email protected]=="Peron")){ if(length(object@iidNuisance$favorable)>0){ object.iid$favorable <- object.iid$favorable + object@iidNuisance$favorable } ## otherwise model.tte has been passed as argument and there is no uncertainty regarding nuisance if(length(object@iidNuisance$unfavorable)>0){ object.iid$unfavorable <- object.iid$unfavorable + object@iidNuisance$unfavorable } ## otherwise model.tte has been passed as argument and there is no uncertainty regarding nuisance } ## ** remove normalization if((center==FALSE) || (scale==FALSE)){ if(center==FALSE){ delta.favorable <- coef(object, endpoint = valid.endpoint, cumulative = FALSE, statistic = "favorable", strata = level.strata, resampling = FALSE, simplify = FALSE) delta.unfavorable <- coef(object, endpoint = valid.endpoint, cumulative = FALSE, statistic = "unfavorable", strata = level.strata, resampling = FALSE, simplify = FALSE) } for(iter_strata in 1:n.strata){ ## iter_strata <- 1 iStrataC <- intersect(indexC,indexStrata[[iter_strata]]) iStrataT <- intersect(indexT,indexStrata[[iter_strata]]) ## remove scaling (by the sample size: \sum_i IF_i^2 -> 1/n^2 \sum_i IF_i^2) if(scale==FALSE || center == FALSE){ object.iid$favorable[iStrataC,] <- length(iStrataC) * object.iid$favorable[iStrataC,,drop=FALSE] object.iid$favorable[iStrataT,] <- length(iStrataT) * object.iid$favorable[iStrataT,,drop=FALSE] object.iid$unfavorable[iStrataC,] <- length(iStrataC) * object.iid$unfavorable[iStrataC,,drop=FALSE] object.iid$unfavorable[iStrataT,] <- length(iStrataT) * object.iid$unfavorable[iStrataT,,drop=FALSE] } ## remove centering if(center==FALSE){ object.iid$favorable[c(iStrataC,iStrataT),] <- .rowCenter_cpp(object.iid$favorable[c(iStrataC,iStrataT),,drop=FALSE], - delta.favorable[iter_strata,,drop=FALSE]) object.iid$unfavorable[c(iStrataC,iStrataT),] <- .rowCenter_cpp(object.iid$unfavorable[c(iStrataC,iStrataT),,drop=FALSE], -delta.unfavorable[iter_strata,,drop=FALSE]) if(scale){ ## restaure scaling object.iid$favorable[iStrataC,] <- object.iid$favorable[iStrataC,,drop=FALSE]/length(iStrataC) object.iid$favorable[iStrataT,] <- object.iid$favorable[iStrataT,,drop=FALSE]/length(iStrataT) object.iid$unfavorable[iStrataC,] <- object.iid$unfavorable[iStrataC,,drop=FALSE]/length(iStrataC) object.iid$unfavorable[iStrataT,] <- object.iid$unfavorable[iStrataT,,drop=FALSE]/length(iStrataT) } } } } ## ** cumulate over endpoints if(cumulative && n.endpoint > 1){ keep.names <- list(favorable = colnames(object.iid$favorable), unfavorable = colnames(object.iid$unfavorable)) object.iid$favorable <- .rowCumSum_cpp(.rowMultiply_cpp(object.iid$favorable, weightEndpoint)) colnames(object.iid$favorable) <- keep.names$favorable object.iid$unfavorable <- .rowCumSum_cpp(.rowMultiply_cpp(object.iid$unfavorable, weightEndpoint)) colnames(object.iid$unfavorable) <- keep.names$unfavorable } ## ** add strata weights MweightStrata <- matrix(NA, nrow = n.obs, ncol = n.endpoint, dimnames = list(NULL,valid.endpoint)) MweightStrata[unlist(indexStrata),] <- do.call(rbind,lapply(1:n.strata, function(iS){ matrix(weightStrata[iS], nrow = length(indexStrata[[iS]]), ncol = n.endpoint) })) ls.iid.favorable <- c(list(global = object.iid$favorable*MweightStrata), stats::setNames(lapply(1:n.strata, function(iStrata){ ## iStrata <- 1 iM <- object.iid$favorable iM[-indexStrata[[iStrata]],] <- 0 return(iM) }), level.strata)) ls.iid.unfavorable <- c(list(global = object.iid$unfavorable*MweightStrata), stats::setNames(lapply(1:n.strata, function(iStrata){ ## iStrata <- 1 iM <- object.iid$unfavorable iM[-indexStrata[[iStrata]],] <- 0 return(iM) }), level.strata)) ## ** aggregate at a cluster level if(!is.null(cluster)){ ls.iid.favorable <- lapply(ls.iid.favorable, function(iIID){ ## iIID <- ls.iid.favorable[[1]] do.call(rbind,by(iIID,cluster,colSums, simplify = FALSE)) }) ls.iid.unfavorable <- lapply(ls.iid.unfavorable, function(iIID){ do.call(rbind,by(iIID,cluster,colSums, simplify = FALSE)) }) } ## ** iid decomposition of the chosen statistic for each endpoint if("winRatio" %in% statistic){ Delta.favorable <- coef(object, endpoint = endpoint, cumulative = cumulative, statistic = "favorable", strata = strata, resampling = FALSE, simplify = FALSE) Delta.unfavorable <- coef(object, endpoint = endpoint, cumulative = cumulative, statistic = "unfavorable", strata = strata, resampling = FALSE, simplify = FALSE) } out <- stats::setNames(lapply(strata, function(iS){ ## iS <- strata[1] iIID.fav <- ls.iid.favorable[[iS]][,endpoint,drop=FALSE] iIID.unfav <- ls.iid.unfavorable[[iS]][,endpoint,drop=FALSE] if(statistic == "favorable"){ iOut <- iIID.fav }else if(statistic == "unfavorable"){ iOut <- iIID.unfav }else if(statistic == "netBenefit"){ iOut <- iIID.fav - iIID.unfav }else if(statistic == "winRatio"){ iOut <- .rowScale_cpp(iIID.fav,Delta.unfavorable[iS,]) - .rowMultiply_cpp(iIID.unfav, Delta.favorable[iS,]/Delta.unfavorable[iS,]^2) colnames(iOut) <- colnames(iIID.fav) } return(iOut) }), strata) ## ** output H-decomposition if(simplify && length(strata)==1){ out <- out[[1]] } if(simplify && length(endpoint)==1){ if(length(strata)==1){ out <- out[,1] }else{ out <- do.call(cbind,out) colnames(out) <- strata } } return(out) }) ## * getPairScore (documentation) #' @name S4BuyseTest-getPairScore #' @title Extract the Score of Each Pair #' @aliases getPairScore,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract the score of each pair. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param endpoint [integer/character vector] the endpoint for which the scores should be output. #' @param strata [character vector] the strata relative to which the score should be output. #' @param rm.withinStrata [logical] should the columns indicating the position of each member of the pair #' within each treatment group be removed? #' @param rm.strata [logical] should the column containing the level of the strata variable be removed from the output? #' @param rm.indexPair [logical] should the column containing the number associated to each pair be removed from the output? #' @param rm.weight [logical] should the column weight be removed from the output? #' @param rm.corrected [logical] should the columns corresponding to the scores after weighting be removed from the output? #' @param sum [logical] should the scores be cumulated over endpoints? #' @param unlist [logical] should the structure of the output be simplified when possible? #' @param trace [logical] should a message be printed to explain what happened #' when the function returned \code{NULL}? #' #' @details The maximal output (i.e. with all columns) contains for each endpoint, a data.table with: #' \itemize{ #' \item \code{"strata"}: the name of the strata to which the pair belongs. #' \item \code{"index.T"}: the index of the treatment observation in the pair relative to the original dataset. #' \item \code{"index.C"}: the index of the control observation in the pair relative to the original dataset. #' \item \code{"indexWithinStrata.T"}: the index of the treatment observation in the pair relative to the treatment group and the strata. #' \item \code{"indexWithinStrata.C"}: the index of the control observation in the pair relative to the control group and the strata. #' \item \code{"favorable"}: the probability that the endpoint is better in the treatment arm vs. in the control arm. #' \item \code{"unfavorable"}: the probability that the endpoint is worse in the treatment arm vs. in the control arm. #' \item \code{"neutral"}: the probability that the endpoint is no different in the treatment arm vs. in the control arm. #' \item \code{"uninformative"}: the weight of the pair that cannot be attributed to favorable/unfavorable/neutral. #' \item \code{"weight"}: the residual weight of the pair to be analyzed at the current outcome. Each pair starts with a weight of 1. #' \item \code{"favorable.corrected"}: same as \code{"favorable"} after weighting. #' \item \code{"unfavorable.corrected"}: same as \code{"favorable"} after weighting. #' \item \code{"neutral.corrected"}: same as \code{"favorable"} after weighting. #' \item \code{"uninformative.corrected"}: same as \code{"favorable"} after weighting. #' } #' Note that the \code{.T} and \code{.C} may change since they correspond of the label of the treatment and control arms. #' The first weighting consists in multiplying the probability by the residual weight of the pair #' (i.e. the weight of the pair that was not informative at the previous endpoint). This is always performed. #' For time to event endpoint an additional weighting may be performed to avoid a possible bias in presence of censoring. #' @keywords get S4BuyseTest-method #' @author Brice Ozenne ## * getPairScore (examples) #' @rdname S4BuyseTest-getPairScore #' @examples #' library(data.table) #' library(prodlim) #' #' ## run BuyseTest #' library(survival) ## import veteran #' #' BT.keep <- BuyseTest(trt ~ tte(time, threshold = 20, status = "status") + cont(karno), #' data = veteran, keep.pairScore = TRUE, #' trace = 0, method.inference = "none") #' #' ## Extract scores #' pScore <- getPairScore(BT.keep, endpoint = 1) #' #' ## look at one pair #' indexPair <- intersect(which(pScore$index.1 == 22), #' which(pScore$index.2 == 71)) #' pScore[indexPair] #' #' ## retrive pair in the original dataset #' pVeteran <- veteran[pScore[indexPair,c(index.1,index.2)],] #' pVeteran #' #' ## the observation from the control group is censored at 97 #' ## the observation from the treatment group has an event at 112 #' ## since the threshold is 20, and (112-20)<97 #' ## we know that the pair is not in favor of the treatment #' #' ## the formula for probability in favor of the control is #' ## Sc(97)/Sc(112+20) #' ## where Sc(t) is the survival at time t in the control arm. #' #' ## we first estimate the survival in each arm #' e.KM <- prodlim(Hist(time,status)~trt, data = veteran) #' #' ## and compute the survival #' iSurv <- predict(e.KM, times = c(97,112+20), #' newdata = data.frame(trt = 1, stringsAsFactors = FALSE))[[1]] #' #' ## the probability in favor of the control is then #' pUF <- iSurv[2]/iSurv[1] #' pUF #' ## and the complement to one of that is the probability of being neutral #' pN <- 1 - pUF #' pN #' #' if(require(testthat)){ #' testthat::expect_equal(pUF, pScore[indexPair, unfavorable]) #' testthat::expect_equal(pN, pScore[indexPair, neutral]) #' } ## * getPairScore (code) setMethod(f = "getPairScore", signature = "S4BuyseTest", definition = function(object, endpoint, strata, sum, rm.withinStrata, rm.strata, rm.indexPair, rm.weight, rm.corrected, unlist, trace){ if(length(object@tablePairScore)==0){ if(trace){ cat("pairScore was not exported from the object \n", "Consider setting the argument \'keep.pairScore\' to \"TRUE\" when calling the \"BuyseTest\" function \n", sep = "") } return(invisible(NULL)) }else{ out <- data.table::copy(object@tablePairScore) endpoint.names <- object@endpoint strata.names <- [email protected] if(!is.null(endpoint)){ if(is.numeric(endpoint)){ validInteger(endpoint, min = 1, max = length(endpoint.names), valid.length = NULL, refuse.duplicates = TRUE) }else if(is.character(endpoint)){ validCharacter(endpoint, valid.length = NULL, valid.values = endpoint.names, refuse.duplicates = TRUE) endpoint <- match(endpoint, endpoint.names) }else{ stop("Argument \'endpoint\' must be a numeric of character vector \n") } out <- out[endpoint] } if(!is.null(strata)){ if(is.numeric(strata)){ validInteger(strata, min = 1, max = length(strata.names), valid.length = NULL, refuse.duplicates = TRUE) }else if(is.character(strata)){ validCharacter(strata, valid.length = NULL, valid.values = strata.names, refuse.duplicates = TRUE) strata <- match(strata, strata.names) }else{ stop("Argument \'endpoint\' must be a numeric of character vector \n") } for(iEndpoint in 1:length(out)){ ## iEndpoint <- 1 index.strata <- which(out[[iEndpoint]]$strata %in% strata) out[[iEndpoint]][, c("strata") := factor(.SD$strata, levels = 1:length(strata.names), labels = strata.names)] out[[iEndpoint]] <- out[[iEndpoint]][index.strata] } } old.names <- c("index.C", "index.T", "indexWithinStrata.C", "indexWithinStrata.T") new.names <- c(paste0("index.",[email protected]), paste0("indexWithinStrata.",[email protected])) if(sum && length(out)>1){ out.save <- out out <- out.save[1] for(iEndpoint in 2:length(out.save)){ out[[1]][out.save[[iEndpoint]]$index.pair, c("favorable") := .SD$favorable + out.save[[iEndpoint]]$favorable] out[[1]][out.save[[iEndpoint]]$index.pair, c("unfavorable") := .SD$unfavorable + out.save[[iEndpoint]]$unfavorable] out[[1]][out.save[[iEndpoint]]$index.pair, c("neutral") := .SD$neutral + out.save[[iEndpoint]]$neutral] out[[1]][out.save[[iEndpoint]]$index.pair, c("uninf") := .SD$uninf + out.save[[iEndpoint]]$uninf] out[[1]][out.save[[iEndpoint]]$index.pair, c("weight") := .SD$weight + out.save[[iEndpoint]]$weight] out[[1]][out.save[[iEndpoint]]$index.pair, c("favorableC") := .SD$favorableC + out.save[[iEndpoint]]$favorableC] out[[1]][out.save[[iEndpoint]]$index.pair, c("unfavorableC") := .SD$unfavorableC + out.save[[iEndpoint]]$unfavorableC] out[[1]][out.save[[iEndpoint]]$index.pair, c("neutralC") := .SD$neutralC + out.save[[iEndpoint]]$neutralC] out[[1]][out.save[[iEndpoint]]$index.pair, c("uninfC") := .SD$uninfC + out.save[[iEndpoint]]$uninfC] } } for(iEndpoint in 1:length(out)){ ## iEndpoint <- 1 if(rm.withinStrata){ out[[iEndpoint]][,c("indexWithinStrata.T","indexWithinStrata.C") := NULL] data.table::setnames(out[[iEndpoint]], old = old.names[1:2], new = new.names[1:2]) }else{ data.table::setnames(out[[iEndpoint]], old = old.names, new = new.names) } if(rm.indexPair){ out[[iEndpoint]][,c("index.pair") := NULL] } if(rm.strata){ out[[iEndpoint]][,c("strata") := NULL] } if(rm.weight){ out[[iEndpoint]][,c("weight") := NULL] } if(rm.corrected){ out[[iEndpoint]][,c("favorableC","unfavorableC","neutralC","uninfC") := NULL] } } if(length(out) == 1 && unlist == TRUE){ out <- out[[1]] } return(out[]) } }) ## * getPseudovalue (documentation) #' @name S4BuyseTest-getPseudovalue #' @title Extract the pseudovalues of the Estimator #' @aliases getPseudovalue,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract the pseudovalues of the estimator. #' The average of the pseudovalues is the estimate and their standard deviation the standard error of the estimate times a factor n #' (i.e. a t-test on their mean will give asymptotically valid confidence intervals and p-values). #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param endpoint [character] for which endpoint(s) the pseudovalues should be output? #' If \code{NULL} returns the sum of the H-decomposition over all endpoints. #' @param statistic [character] the type of statistic relative to which the pseudovalues should be computed. #' Can be \code{"netBenefit"}, \code{"winRatio"}, \code{"favorable"}, or \code{"unfavorable"}. #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-summary}} for a more detailed presentation of the \code{S4BuyseTest} object. #' #' @keywords method #' @author Brice Ozenne ## * getPseudovalue (examples) #' @rdname S4BuyseTest-getPseudovalue #' @examples #' set.seed(10) #' n <- 250 #' d <- simBuyseTest(n) #' #' e.BT <- BuyseTest(treatment ~ tte(eventtime,status,2) + bin(toxicity), #' data = d, trace = 0) #' #' #### net Benefit #' pseudo <- getPseudovalue(e.BT) #' summary(lm(pseudo~1))$coef #' ## asymptotically equivalent to #' confint(e.BT, transformation = TRUE) #' ## (small differences: small sample corrections) #' #' summary(lm(getPseudovalue(e.BT, endpoint = 1)~1))$coef #' #' #### win Ratio #' pseudo <- getPseudovalue(e.BT, statistic = "winRatio") #' summary(lm(pseudo~1))$coef ## wrong p-value (should compare to 1 instead of 0) #' ## asymptotically equivalent to #' confint(e.BT, statistic = "winRatio", transformation = TRUE) #' #' #### favorable #' pseudo <- getPseudovalue(e.BT, statistic = "favorable") #' summary(lm(pseudo~1))$coef ## wrong p-value (should compare to 1/2 instead of 0) #' ## asymptotically equivalent to #' confint(e.BT, statistic = "favorable", transformation = TRUE) #' #' #### unfavorable #' pseudo <- getPseudovalue(e.BT, statistic = "unfavorable") #' summary(lm(pseudo~1))$coef ## wrong p-value (should compare to 1/2 instead of 0) #' ## asymptotically equivalent to #' confint(e.BT, statistic = "unfavorable", transformation = TRUE) ## * getPseudovalue (code) setMethod(f = "getPseudovalue", signature = "S4BuyseTest", definition = function(object, statistic = NULL, endpoint = NULL){ option <- BuyseTest.options() ## ** normalize arguments ## endpoint valid.endpoint <- names(object@endpoint) if(is.null(endpoint)){ endpoint <- utils::tail(valid.endpoint,1) }else if(is.numeric(endpoint)){ validInteger(endpoint, name1 = "endpoint", min = 1, max = length(valid.endpoint), valid.length = 1, method = "iid[BuyseTest]") endpoint <- valid.endpoint[endpoint] }else{ validCharacter(endpoint, valid.length = 1, valid.values = valid.endpoint, refuse.NULL = FALSE) } ## statistics if(is.null(statistic)){ statistic <- option$statistic }else{ statistic <- switch(gsub("[[:blank:]]", "", tolower(statistic)), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic) } validCharacter(statistic, name1 = "statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable"), valid.length = 1, method = "getPseudovalue[S4BuyseTest]") ## ** compute pseudovalue object.delta <- coef(object, statistic = statistic)[endpoint] count.favorable <- coef(object, statistic = "favorable")[endpoint] count.unfavorable <- coef(object, statistic = "unfavorable")[endpoint] object.iid <- data.frame(favorable = unname(getIid(object, endpoint = endpoint, statistic = "favorable", strata = "global", simplify = FALSE)[["global"]]), unfavorable = unname(getIid(object, endpoint = endpoint, statistic = "unfavorable", strata = "global", simplify = FALSE)[["global"]])) n.obs <- NROW(object.iid) out <- switch(statistic, "favorable" = n.obs * object.iid$favorable + object.delta, "unfavorable" = n.obs * object.iid$unfavorable + object.delta, "netBenefit" = n.obs * (object.iid$favorable - object.iid$unfavorable) + object.delta, "winRatio" = n.obs * (object.iid$favorable / count.unfavorable - object.iid$unfavorable * (count.favorable/count.unfavorable^2)) + object.delta, ) ## ** export return(out) }) ## * getSurvival (documentation) #' @name S4BuyseTest-getSurvival #' @title Extract the Survival and Survival Jumps #' @aliases getSurvival,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract the survival and survival jumps. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param type [character vector] the type of survival to be output. See details. #' @param endpoint [integer/character vector] the endpoint for which the survival should be output. #' @param strata [integer/character vector] the strata relative to which the survival should be output. #' @param unlist [logical] should the structure of the output be simplified when possible. #' @param trace [logical] should a message be printed to explain what happened #' when the function returned \code{NULL}. #' #' @details The argument \code{type} can take any of the following values: #' \itemize{ #' \item \code{"survTimeC"}: survival at the event times for the observations of the control arm. #' \item \code{"survTimeT"}: survival at the event times for the observations of the treatment arm. #' \item \code{"survJumpC"}: survival at the jump times for the survival model in the control arm. #' \item \code{"survJumpT"}: survival at the time times for the survival model in the treatment arm. #' \item \code{"lastSurv"}: survival at the last event time. #' } #' #' @keywords get S4BuyseTest-method #' @author Brice Ozenne ## * getSurvival (code) #' @rdname S4BuyseTest-getSurvival setMethod(f = "getSurvival", signature = "S4BuyseTest", definition = function(object, type, endpoint, strata, unlist, trace){ if(length(object@tableSurvival)==0){ if(trace>0){ if(all(tolower(object@type)!="timetoevent")){ add.txt <- "No endpoint of type time to event \n" }else if(tolower([email protected])!="peron"){ add.txt <- "Consider setting the argument \'scoring.rule\' to \"Peron\" when calling BuyseTest \n" }else{ add.txt <- "Consider setting the argument \'keep.survival\' to TRUE in BuyseTest.options \n" } cat("Survival was not exported from the object \n", add.txt, sep = "") } return(invisible(NULL)) }else{ if(is.null(type)){ type <- c("survTimeC","survTimeT","survJumpC","survJumpT","lastSurv") }else{ validCharacter(type, valid.length = NULL, refuse.duplicates = TRUE, valid.values = c("survTimeC","survTimeT","survJumpC","survJumpT","lastSurv")) } if(!is.null(type)){ out <- object@tableSurvival[type] }else{ out <- data.table::copy(object@tableSurvival) } endpoint.names <- object@endpoint strata.names <- [email protected] if(!is.null(endpoint)){ if(is.numeric(endpoint)){ validInteger(endpoint, min = 1, max = length(endpoint.names), valid.length = NULL, refuse.duplicates = TRUE) }else if(is.character(endpoint)){ validCharacter(endpoint, valid.length = NULL, valid.values = endpoint.names, refuse.duplicates = TRUE) endpoint <- match(endpoint, endpoint.names) }else{ stop("Argument \'endpoint\' must be a numeric of character vector \n") } if("survTimeC" %in% type){ out$survTimeC <- out$survTimeC[endpoint] } if("survTimeT" %in% type){ out$survTimeT <- out$survTimeT[endpoint] } if("survJumpC" %in% type){ out$survJumpC <- out$survJumpC[endpoint] } if("survJumpT" %in% type){ out$survJumpT <- out$survJumpT[endpoint] } if("lastSurv" %in% type){ out$lastSurv <- out$lastSurv[endpoint] } } if(!is.null(strata)){ if(is.numeric(strata)){ validInteger(strata, min = 1, max = length(strata.names), valid.length = NULL, refuse.duplicates = TRUE) }else if(is.character(strata)){ validCharacter(strata, valid.length = NULL, valid.values = strata.names, refuse.duplicates = TRUE) endpoint <- match(strata, strata.names) }else{ stop("Argument \'endpoint\' must be a numeric of character vector \n") } for(iEndpoint in 1:length(out[[1]])){ if(length(strata)==1 && unlist == TRUE){ if("survTimeC" %in% type){ out$survTimeC[[iEndpoint]] <- out$survTimeC[[iEndpoint]][[1]] } if("survTimeT" %in% type){ out$survTimeT[[iEndpoint]] <- out$survTimeT[[iEndpoint]][[1]] } if("survJumpC" %in% type){ out$survJumpC[[iEndpoint]] <- out$survJumpC[[iEndpoint]][[1]] } if("survJumpT" %in% type){ out$survJumpT[[iEndpoint]] <- out$survJumpT[[iEndpoint]][[1]] } if("lastSurv" %in% type){ out$lastSurv[[iEndpoint]] <- out$lastSurv[[iEndpoint]][1,] } }else{ if("survTimeC" %in% type){ out$survTimeC[[iEndpoint]] <- out$survTimeC[[iEndpoint]][strata] } if("survTimeT" %in% type){ out$survTimeT[[iEndpoint]] <- out$survTimeT[[iEndpoint]][strata] } if("survJumpC" %in% type){ out$survJumpC[[iEndpoint]] <- out$survJumpC[[iEndpoint]][strata] } if("survJumpT" %in% type){ out$survJumpT[[iEndpoint]] <- out$survJumpT[[iEndpoint]][strata] } if("lastSurv" %in% type){ out$lastSurv[[iEndpoint]] <- out$lastSurv[[iEndpoint]][strata,,drop=FALSE] } } } } if(length(endpoint) == 1 && unlist == TRUE){ if("survTimeC" %in% type){ out$survTimeC <- out$survTimeC[[1]] } if("survTimeT" %in% type){ out$survTimeT <- out$survTimeT[[1]] } if("survJumpC" %in% type){ out$survJumpC <- out$survJumpC[[1]] } if("survJumpT" %in% type){ out$survJumpT <- out$survJumpT[[1]] } if("lastSurv" %in% type){ out$lastSurv <- out$lastSurv[[1]] } } if(length(type) == 1 && unlist == TRUE){ out <- out[[type]] } return(out) } })
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-get.R
## * model.tables (documentation) #' @docType methods #' @name S4BuyseTest-model.tables #' @title Extract Summary for Class "S4BuyseTest" #' @aliases model.tables,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Extract a summary of the results from the \code{\link{BuyseTest}} function. #' #' @param x output of \code{\link{BuyseTest}} #' @param percentage [logical] Should the percentage of pairs of each type be displayed ? Otherwise the number of pairs is displayed. #' @param statistic [character] the statistic summarizing the pairwise comparison: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"favorable"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' \code{"unfavorable"} displays the proportion in favor of the control. #' Default value read from \code{BuyseTest.options()}. #' @param conf.level [numeric] confidence level for the confidence intervals. #' Default value read from \code{BuyseTest.options()}. #' @param strata [logical] should the strata-specific results be displayed or the results pooled across strata? #' Can also be \code{NULL} to display both. #' @param columns [character vector] subset of columns to be output (e.g. \code{"endpoint"}, \code{"favorable"}, ...). #' Can also be \code{"summary"} or \code{"print"} to only select columns displayed in the summary or print. \code{NULL} will select all columns. #' @param ... arguments to be passed to \code{\link{S4BuyseTest-confint}} #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-class}} for a presentation of the \code{S4BuyseTest} object. \cr #' \code{\link{S4BuyseTest-confint}} to output confidence interval and p-values in a matrix format. #' #' @examples #' library(data.table) #' #' dt <- simBuyseTest(1e2, n.strata = 3) #' #' \dontrun{ #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status) + Bin(toxicity), data=dt) #' } #' \dontshow{ #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status) + Bin(toxicity), data=dt, n.resampling = 10, trace = 0) #' } #' model.tables(BT) #' model.tables(BT, percentage = FALSE) #' model.tables(BT, statistic = "winRatio") #' #' @keywords methods #' @author Brice Ozenne ## * mode.tables (code) #' @rdname S4BuyseTest-model.tables #' @exportMethod model.tables setMethod(f = "model.tables", signature = "S4BuyseTest", definition = function(x, percentage = TRUE, statistic = NULL, conf.level = NULL, strata = NULL, columns = "summary", ...){ ## ** normalize and check arguments option <- BuyseTest.options() mycall <- match.call() if(is.null(statistic)){ statistic <- option$statistic } validLogical(percentage, name1 = "percentage", valid.length = 1, refuse.NA = FALSE, method = "model.tables[S4BuyseTest]") statistic <- switch(gsub("[[:blank:]]", "", tolower(statistic)), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic) validCharacter(statistic, name1 = "statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable"), valid.length = 1, method = "model.tables[S4BuyseTest]") scoring.rule <- slot(x,"scoring.rule") strata.level <- [email protected] if(is.null(strata)){ if(length(strata.level)==1 || (attr(scoring.rule,"test.paired") && "strata" %in% names(mycall) == FALSE)){ strata <- "global" }else{ strata <- c("global",strata.level) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- strata.level }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "model.tables[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",strata.level), refuse.NULL = FALSE, method = "model.tables[S4BuyseTest]") } ## ** load info from object hierarchical <- slot(x,"hierarchical") endpoint <- slot(x,"endpoint") n.endpoint <- length(endpoint) level.strata <- slot(x,"level.strata") n.strata <- length(level.strata) n.pairs <- slot(x,"n.pairs") count.favorable <- slot(x,"count.favorable") count.unfavorable <- slot(x,"count.unfavorable") count.neutral <- slot(x,"count.neutral") count.uninf <- slot(x,"count.uninf") delta <- coef(x, statistic = statistic, cumulative = FALSE, strata = c("global",strata.level), simplify = FALSE) Delta <- coef(x, statistic = statistic, cumulative = TRUE, strata = "global", simplify = FALSE) n.resampling <- [email protected] method.inference <- [email protected] ## ** compute confidence intervals and p-values outConfint <- confint(x, statistic = statistic, cumulative = TRUE, strata = "global", conf.level = conf.level, ...) ## ** generate summary table ## *** prepare table <- data.frame(matrix(NA,nrow=(n.strata+1)*n.endpoint,ncol=20), stringsAsFactors = FALSE) names(table) <- c("endpoint","restriction","threshold","weight","strata", "total","favorable","unfavorable","neutral","uninf", "delta","Delta","Delta(%)","information(%)", "lower.ci","upper.ci","null","p.value","significance","n.resampling") endpoint.restriction.threshold <- rep(NA, (n.strata+1)*n.endpoint) ## *** global statistic index.global <- seq(0,n.endpoint-1,by=1)*(n.strata+1)+1 table[index.global,"favorable"] <- as.double(colSums(count.favorable)) table[index.global,"unfavorable"] <- as.double(colSums(count.unfavorable)) table[index.global,"neutral"] <- as.double(colSums(count.neutral)) table[index.global,"uninf"] <- as.double(colSums(count.uninf)) table[index.global,"total"] <- rowSums(table[index.global,c("favorable","unfavorable","neutral","uninf")]) table[index.global,"restriction"] <- x@restriction table[index.global,"endpoint"] <- x@endpoint endpoint.restriction.threshold[index.global] <- names(x@endpoint) table[index.global,"threshold"] <- x@threshold table[index.global,"weight"] <- x@weightEndpoint table[index.global,"strata"] <- "global" table[index.global,"delta"] <- delta["global",] table[index.global,"Delta"] <- Delta["global",] table[index.global,"Delta(%)"] <- 100*Delta["global",]/Delta["global",n.endpoint] ## *** strata-sepcific statistic for(iStrata in 1:n.strata){ index.strata <- seq(0,n.endpoint-1,by=1)*(n.strata+1)+1+iStrata table[index.strata,"favorable"] <- count.favorable[iStrata,] table[index.strata,"unfavorable"] <- count.unfavorable[iStrata,] table[index.strata,"neutral"] <- count.neutral[iStrata,] table[index.strata,"uninf"] <- count.uninf[iStrata,] table[index.strata,"strata"] <- level.strata[iStrata] table[index.strata,"endpoint"] <- x@endpoint endpoint.restriction.threshold[index.strata] <- names(x@endpoint) table[index.strata,"threshold"] <- x@threshold table[index.strata,"restriction"] <- x@restriction table[index.strata,"weight"] <- x@weightEndpoint table[index.strata,"delta"] <- delta[iStrata+1,] } ## *** total table[,"total"] <- rowSums(table[,c("favorable","unfavorable","neutral","uninf")]) ## *** percentage if(identical(percentage, TRUE)){ nTot.pairs <- sum(n.pairs) table$total <- 100*table$total/nTot.pairs table$favorable <- 100*table$favorable/nTot.pairs table$unfavorable <- 100*table$unfavorable/nTot.pairs table$neutral <- 100*table$neutral/nTot.pairs table$uninf <- 100*table$uninf/nTot.pairs } ## *** information fraction and co table[index.global,"information(%)"] <- 100*cumsum(colSums(count.favorable+count.unfavorable)/sum(count.favorable+count.unfavorable)) ## *** compute CI and p-value if("lower.ci" %in% names(outConfint)){ table[index.global,"lower.ci"] <- outConfint[,"lower.ci"] } if("upper.ci" %in% names(outConfint)){ table[index.global,"upper.ci"] <- outConfint[,"upper.ci"] } table[index.global,"null"] <- outConfint[,"null"] table[index.global,"p.value"] <- outConfint[,"p.value"] table[index.global,"n.resampling"] <- attr(outConfint,"n.resampling") ## *** restrict to strata if(!is.null(strata)){ endpoint.restriction.threshold <- endpoint.restriction.threshold[table$strata %in% strata] table <- table[table$strata %in% strata,,drop = FALSE] } ## ** subset columns if(identical(columns,"summary") || any(sapply(1:9, function(iNum){identical(columns,paste0("summary",iNum))})) || identical(columns,"print")){ if(identical(columns,"summary")){ columns <- option$summary.display[[1]] }else if(identical(columns,"print")){ columns <- option$print.display }else{ columns <- option$summary.display[[which(paste0("summary",1:9)==columns)]] } columns <- setdiff(columns,"significance") if("CI" %in% columns){ index.CI <- which(columns == "CI") if(index.CI == 1){ columns <- c("lower","upper",columns[2:length(columns)]) }else if(index.CI == length(columns)){ columns <- c(columns[1:(index.CI-1)],"lower.ci","upper.ci") }else{ columns <- c(columns[1:(index.CI-1)],"lower.ci","upper.ci",columns[(index.CI+1):length(columns)]) } } if(all(is.na(table$restriction))){ columns <- setdiff(columns,"restriction") } if(all(table$weight==1)){ columns <- setdiff(columns,"weight") } if(all(abs(table$threshold)<=1e-12)){ columns <- setdiff(columns, "threshold") } if(identical(strata, "global")){ columns <- setdiff(columns,"strata") } if("delta" %in% columns && "Delta" %in% columns && n.endpoint == 1 && (n.strata==1 || identical(strata, "global"))){ columns <- setdiff(columns, "delta") } if(method.inference == "none"){ columns <- setdiff(columns,c("lower.ci","upper.ci","p.value","n.resampling")) }else if(attr(method.inference,"ustatistic")){ columns <- setdiff(columns,"n.resampling") }else if(attr(method.inference,"permutation")){ columns <- setdiff(columns,c("lower.ci","upper.ci")) } } if(!is.null(columns)){ validCharacter(columns, name1 = "columns", valid.length = NULL, valid.values = names(table), refuse.NULL = FALSE, method = "summary[S4BuyseTest]") table <- table[,columns,drop=FALSE] } ## ** export attr(table,"endpoint") <- endpoint.restriction.threshold attr(table,"transform") <- attr(outConfint,"nametransform") attr(table,"n.resampling") <- attr(outConfint,"n.resampling") attr(table,"method.ci.resampling") <- attr(outConfint,"method.ci.resampling") attr(table,"warning") <- attr(outConfint,"warning") return(table) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-model.tables.R
### S4-BuyseTest-nobs.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: Jul 3 2023 (10:00) ## Version: ## Last-Updated: jul 18 2023 (09:29) ## By: Brice Ozenne ## Update #: 39 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * Documentation - print #' @docType methods #' @name S4BuyseTest-nobs #' @title Sample Size for Class "S4BuyseTest" #' @aliases nobs,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Display the sample size in each treatmnet arm as well as the number of pairs. #' #' @param object an \R object of class \code{S4BuyseTest}, i.e., output of \code{\link{BuyseTest}} #' @param strata [character vector] the strata relative to which the number of pairs should be output. #' Can also be \code{"global"} or \code{FALSE} to output the total number of pairs (i.e. across all strata), #' or \code{TRUE} to output each strata-specific number of pairs. #' @param simplify [logical] should the result be coerced to the lowest possible dimension? #' @param ... no used, for compatibility with the generic method. #' #' @return A vector (when argument \code{strata} is \code{FALSE}) or a matrix (when argument \code{strata} is \code{TRUE}). In the latter case each line correspond to a strata. #' #' @keywords methods #' @author Brice Ozenne ## * Method - print #' @rdname S4BuyseTest-nobs #' @exportMethod nobs setMethod(f = "nobs", signature = "S4BuyseTest", definition = function(object, strata = FALSE, simplify = TRUE, ...){ ## ** normalize arguments indexC <- attr([email protected],"indexC") indexT <- attr([email protected],"indexT") level.strata <- [email protected] index.strata <- attr(level.strata,"index") if(is.null(strata)){ if(length(level.strata)==1){ strata <- "global" }else{ strata <- c("global", level.strata) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- level.strata }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "autoplot[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",level.strata), refuse.NULL = FALSE, method = "coef[S4BuyseTest]") } ## ** extract Mout <- rbind(c(length(indexC), length(indexT), sum([email protected])), cbind(sapply(lapply(index.strata, intersect, indexC), length), sapply(lapply(index.strata, intersect, indexT), length), [email protected])) rownames(Mout) <- c("global", level.strata) colnames(Mout) <- c([email protected], "pairs") ## ** export out <- Mout[strata,,drop=simplify] if(is.matrix(out)){ return(as.data.frame(out)) }else{ return(out) } } ) ##---------------------------------------------------------------------- ### S4-BuyseTest-nobs.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-nobs.R
### S4-BuyseTest-plot.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: jun 23 2023 (10:44) ## Version: ## Last-Updated: Jun 29 2023 (10:25) ## By: Brice Ozenne ## Update #: 131 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## ## * plot (documentation) #' @docType methods #' @name S4BuyseTest-plot #' @title Graphical Display for GPC #' @aliases plot,S4BuyseTest,ANY-method #' @include S4-BuyseTest.R #' #' @description Graphical display of the percentage of favorable, unfavorable, neutral, and uninformative pairs per endpoint. #' #' @param x an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param type [character] type of plot: histogram (\code{"hist"}), pie chart (\code{"pie"}), or nested pie charts (\code{"racetrack"}). #' @param strata [character vector] strata(s) relative to which the percentage should be displayed. #' @param endpoint [character vector] endpoint(s) relative to which the percentage should be displayed. #' @param label.strata [character vector] new labels for the strata levels. Should match the length of argument \code{strata}. #' @param label.endpoint [character vector] new labels for the endpoints. Should match the length of argument \code{endpoint}. #' @param color [character vector] colors used to display the percentages for each type of pair. #' @param plot [logical] should the graphic be displayed in a graphical window. #' @param ... not used, for compatibility with the generic function. #' #' @return an invisible list containing the data and the ggplot object used for graphical display. #' @keywords hplot #' #' @examples #' if(require(ggplot2)){ #' #' ## simulate data #' set.seed(10) #' df.data <- simBuyseTest(1e2, n.strata = 2) #' #' ff1 <- treatment ~ bin(toxicity) + TTE(eventtime, status = status, #' restriction = 1, threshold = 0.5) #' BT1 <- BuyseTest(ff1, data= df.data) #' plot(BT1, type = "hist") #' plot(BT1, type = "pie") #' plot(BT1, type = "racetrack") #' #' ff2 <- update(ff1, ~.+cont(score)) #' BT2 <- BuyseTest(ff2, data= df.data) #' plot(BT2, type = "hist") #' plot(BT2, type = "pie") #' plot(BT2, type = "racetrack") #' #' } ## * plot (code) #' @export setMethod(f = "plot", signature = "S4BuyseTest", definition = function(x, type = "hist", strata = "global", endpoint = NULL, label.strata = NULL, label.endpoint = NULL, plot = TRUE, color = c("#7CAE00", "#F8766D", "#C77CFF", "#00BFC4"), ...){ gg <- autoplot(x, type = type, strata = strata, endpoint = endpoint, label.strata = label.strata, label.endpoint = label.endpoint, color = color) ## ** display if(plot){ print(gg) } ## ** export return(invisible(list(plot = gg, data = gg$data))) }) ##---------------------------------------------------------------------- ### S4-BuyseTest-plot.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-plot.R
## * Documentation - print #' @docType methods #' @name S4BuyseTest-print #' @title Print Method for Class "S4BuyseTest" #' @aliases print,S4BuyseTest-method #' @include S4-BuyseTest.R S4-BuyseTest-summary.R #' #' @description Display the main results stored in a \code{S4BuyseTest} object. #' #' @param x an \R object of class \code{S4BuyseTest}, i.e., output of \code{\link{BuyseTest}} #' @param ... additional arguments passed to the summary method. #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-summary}} for a more detailed presentation of the \code{S4BuyseTest} object. #' #' @keywords print #' @author Brice Ozenne ## * Method - print #' @rdname S4BuyseTest-print #' @exportMethod print setMethod(f = "print", signature = "S4BuyseTest", definition = function(x, ...){ ## compute summary statistics outSummary <- summary(x, print = FALSE, ...) ## remove significance column table.print <- outSummary[,setdiff(names(outSummary), "significance"),drop=FALSE] print(table.print, row.names = FALSE) return(invisible(table.print)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-print.R
### sensitivity.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: mar 31 2021 (14:07) ## Version: ## Last-Updated: okt 3 2023 (19:06) ## By: Brice Ozenne ## Update #: 346 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * sensitivity (documentation) #' @docType methods #' @name S4BuyseTest-sensitivity #' @title Sensitivity Analysis for the Choice of the Thresholds #' @aliases sensitivity,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Evaluate a summary statistic (net benefit, win ratio, ...) using GPC along various thresholds of clinical relevance. #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param threshold [list] a list containing for each endpoint the thresholds to be considered. #' @param statistic [character] the statistic summarizing the pairwise comparison: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"favorable"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' \code{"unfavorable"} displays the proportion in favor of the control. #' Default value read from \code{BuyseTest.options()}. #' @param null [numeric] right hand side of the null hypothesis (used for the computation of the p-value). #' @param conf.level [numeric] confidence level for the confidence intervals. #' Default value read from \code{BuyseTest.options()}. #' @param alternative [character] the type of alternative hypothesis: \code{"two.sided"}, \code{"greater"}, or \code{"less"}. #' Default value read from \code{BuyseTest.options()}. #' @param band [logical] should simulateneous confidence intervals be computed? #' @param adj.p.value [logical] should p-value adjusted for multiple comparisons be computed? #' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. #' Otherwise they are computed without any transformation. #' Default value read from \code{BuyseTest.options()}. Not relevant when using permutations or percentile bootstrap. #' @param cpus [integer, >0] the number of CPU to use. Default value is 1. #' @param trace [logical] Should the execution of the function be traced? #' @param ... argument passsed to the function \code{transformCIBP} of the riskRegression package. #' #' @details Simulateneous confidence intervals and adjusted p-values are computed using a single-step max-test approach via the function \code{transformCIBP} of the riskRegression package. #' #' @return An S3 object of class \code{S3sensitivity}. #' @keywords htest #' ## * sensitivity (example) #' @examples #' #' \dontrun{ #' require(ggplot2) #' #' ## simulate data #' set.seed(10) #' df.data <- simBuyseTest(1e2, n.strata = 2) #' #' ## with one endpoint #' ff1 <- treatment ~ TTE(eventtime, status = status, threshold = 0.1) #' BT1 <- BuyseTest(ff1, data= df.data) #' se.BT1 <- sensitivity(BT1, threshold = seq(0,2,0.25), band = TRUE) #' plot(se.BT1) #' #' ## with two endpoints #' ff2 <- update(ff1, .~. + cont(score, threshold = 1)) #' BT2 <- BuyseTest(ff2, data= df.data) #' se.BT2 <- sensitivity(BT2, threshold = list(eventtime = seq(0,2,0.25), score = 0:2), #' band = TRUE) #' plot(se.BT2) #' plot(se.BT2, col = NA) #' } ## * sensitivity (method) #' @rdname S4BuyseTest-sensitivity #' @exportMethod sensitivity setMethod(f = "sensitivity", signature = "S4BuyseTest", definition = function(object, threshold, statistic = NULL, band = FALSE, conf.level = NULL, null = NULL, transformation = NULL, alternative = NULL, adj.p.value = FALSE, trace = TRUE, cpus = 1, ...){ ## ** normalize user input ## band if([email protected]!="u-statistic"){ stop("Cannot compute confidence bands when \'method.inference\' used to obtain the object is not \"u-statistic\". \n") } ## endpoint name.endpoint <- object@endpoint n.endpoint <- length(name.endpoint) option <- BuyseTest.options() if(is.null(statistic)){ statistic <- option$statistic } if(is.null(conf.level)){ conf.level <- option$conf.level } if(is.null(alternative)){ alternative <- option$alternative } if(is.null(null)){ null <- switch(statistic, "netBenefit" = 0, "winRatio" = 1, "favorable" = 1/2, "unfavorable" = 1/2) }else{ validNumeric(null, valid.length = 1, min = if("statistic"=="netBenefit"){-1}else{0}, max = if("statistic"=="winRatio"){Inf}else{1}) } ## threshold if(is.matrix(threshold) || is.data.frame(threshold)){ if(is.matrix(threshold)){ threshold <- as.data.frame(threshold) } if(NCOL(threshold)!=n.endpoint){ stop("When a matrix, the argument \'threshold\' should contain ",n.endpoint," columns (and not ",length(threshold),"). \n", "Each column corresponds to a prioritized endpoint. \n") } grid.threshold <- threshold if(is.null(names(grid.threshold))){ names(grid.threshold) <- names(name.endpoint) }else{ if(any(duplicated(names(grid.threshold)))){ stop("Duplicated column names in argument \"threshold\": \"",paste0(names(grid.threshold)[duplicated(names(grid.threshold))], collapse= "\" \""),"\".\n") } if(any(names(grid.threshold) %in% names(name.endpoint) == FALSE)){ stop("Incorrect column names in argument \"threshold\": \"",paste0(names(grid.threshold)[names(grid.threshold) %in% names(name.endpoint) == FALSE], collapse= "\" \""),"\".\n", "Valid names: \"",paste0(setdiff(names(name.endpoint), names(grid.threshold)), collapse= "\" \""),"\".\n") } grid.threshold <- grid.threshold[,names(name.endpoint)] } }else if(is.list(threshold) || is.vector(threshold)){ if(any(duplicated(name.endpoint))){ stop("Argument \'threshold\' must be a matrix when some endpoints are repeteadly used (i.e at different priorities) with different thresholds. \n") } if(!is.list(threshold)){ threshold <- list(threshold) } if(is.null(names(threshold))){ if(length(threshold)!=n.endpoint){ stop("When a list, the argument \'threshold\' should have length ",n.endpoint," (and not ",length(threshold),"). \n", "Each element of the list corresponds to a prioritized endpoint. \n") } }else{ if(any(duplicated(names(threshold)))){ stop("Argument \'threshold\' must not contain duplicated names. \n", "Duplicated names: \"",paste0(names(threshold)[duplicated(names(threshold))], collapse = "\" \""),"\". \n") } if(any(names(threshold) %in% name.endpoint == FALSE)){ stop("Some names used in the argument \'threshold\' does not match the existing endpoints. \n", "Incorrect names: \"",paste0(names(threshold)[names(threshold) %in% name.endpoint == FALSE], collapse = "\" \""),"\". \n", "Possible names: \"",paste0(setdiff(name.endpoint,names(threshold)), collapse = "\" \""),"\". \n") } threshold.save <- threshold threshold <- setNames(vector(mode = "list", length = n.endpoint), name.endpoint) threshold[names(threshold.save)] <- threshold.save } if(any(sapply(threshold,length)==0)){ threshold[sapply(threshold,length)==0] <- object@threshold[sapply(threshold,length)==0] } grid.threshold <- expand.grid(threshold) colnames(grid.threshold) <- name.endpoint }else{ stop("Argument \'threshold\' should be a list or a matrix \n") } ## formula ls.args <- object@call if("formula" %in% names(ls.args)){ ls.args$formula <- NULL args.tempo <- initializeFormula(object@call$formula, hierarchical = object@hierarchical) ls.args[names(args.tempo)] <- args.tempo } ls.args$trace <- 0 if (cpus == "all") { cpus <- parallel::detectCores() # this function detect the number of CPU cores } if(band && any(object@weightObs!=1)){ stop("Confidence bands cannot not currently be derived with weighted observations. \n") } ## ** run BuyseTest n.se <- NROW(grid.threshold) test.varying <- apply(grid.threshold,2,function(iX){length(unique(iX))!=1}) if(all(test.varying==FALSE)){ stop("Only a single combination of thresholds. No need for a sensitivity analysis.\n") } gridRed.threshold <- grid.threshold[,which(test.varying),drop=FALSE] if(trace>0){cat("Run ",n.se," GPC analyses: \n", sep = "")} if (cpus == 1) { ls.confint <- vector(mode="list", length = n.se) ls.iid <- vector(mode="list", length = n.se) if(trace>0){pb <- utils::txtProgressBar(max = n.se, style = 3)} for(iSe in 1:n.se){ if(trace>0){utils::setTxtProgressBar(pb, iSe)} iLS.args <- ls.args iLS.args$threshold <- as.double(grid.threshold[iSe,]) iBT <- do.call(BuyseTest, args = iLS.args) iConfint <- confint(iBT, statistic = statistic, null = null, conf.level = conf.level, alternative = alternative, transformation = transformation)[n.endpoint,] ls.confint[[iSe]] <- data.frame(c(gridRed.threshold[iSe,,drop=FALSE], iConfint)) if([email protected]=="u-statistic"){ ls.iid[[iSe]] <- getIid(iBT, statistic = statistic,simplify=FALSE)$global[,n.endpoint] } } if(trace>0){close(pb)} }else{ if(trace>0){ cl <- suppressMessages(parallel::makeCluster(cpus, outfile = "")) pb <- utils::txtProgressBar(max = n.se, style = 3) }else{ cl <- parallel::makeCluster(cpus) } test.lazyeval <- sapply(ls.args,function(x){inherits(x,"name")}) if(any(test.lazyeval)){ toExport <- unlist(lapply(ls.args[test.lazyeval],deparse)) }else{ toExport <- NULL } i <- NULL ## [:forCRANcheck:] foreach ls.sensitivity <- foreach::`%dopar%`( foreach::foreach(i=1:n.se, .export = toExport), { if(trace>0){utils::setTxtProgressBar(pb, i)} iLS.args <- ls.args iLS.args$threshold <- as.double(grid.threshold[i,]) iBT <- do.call(BuyseTest, args = iLS.args) iConfint <- confint(iBT, statistic = statistic, null = null, conf.level = conf.level, alternative = alternative, transformation = transformation)[n.endpoint,] iOut <- list(confint = data.frame(c(gridRed.threshold[i,,drop=FALSE], iConfint))) if([email protected]=="u-statistic"){ iOut[["iid"]] <- getIid(iBT, statistic = statistic)[,n.endpoint] } return(iOut) }) parallel::stopCluster(cl) if(trace>0){close(pb)} ls.confint <- lapply(ls.sensitivity,"[[","confint") if([email protected]=="u-statistic"){ ls.iid <- lapply(ls.sensitivity,"[[","iid") } } df.confint <- as.data.frame(do.call(rbind,ls.confint)) if([email protected]=="u-statistic"){ attr(df.confint, "iid") <- do.call(cbind,ls.iid) } ## ** compute confidence bands if(band || adj.p.value){ requireNamespace("riskRegression") A.iid <- array(NA, dim = c(NROW(attr(df.confint, "iid")), NCOL(attr(df.confint, "iid")),1)) A.iid[,,1] <- attr(df.confint, "iid") min.value <- switch(statistic, "netBenefit" = -1, "winRatio" = 0, "favorable" = 0, "unfavorable" = 0) max.value <- switch(statistic, "netBenefit" = 1, "winRatio" = Inf, "favorable" = 1, "unfavorable" = 1) ## temporary fix: the next few lines should be remove when riskRegression will be updated if(is.null(transformation) || identical(transformation,TRUE)){ if(statistic %in% c("none","netBenefit","winRatio") || !inherits(try(riskRegression::transformCIBP(estimate = 1, se = 1, type = "atanh2", seed = NA, band = FALSE, alternative = "two.sided"),silent=TRUE),"try-error")){ type <- switch(statistic, "netBenefit" = "atanh", "winRatio" = "log", "favorable" = "cloglog",## note: not the same transformation as confint "unfavorable" = "cloglog", ## note: not the same transformation as confint "none" = "none") }else{ type <- switch(statistic, "netBenefit" = "atanh", "winRatio" = "log", "favorable" = "atanh2", "unfavorable" = "atanh2", "none" = "none") } }else if(identical(transformation,FALSE)){ type <- "none" }else{ type <- transformation } dots <- list(...) if("seed" %in% names(dots) == FALSE){ dots$seed <- NA } if("method.band" %in% names(dots) == FALSE){ dots$method.band <- "maxT-integration" } if("n.sim" %in% names(dots) == FALSE){ dots$n.sim <- 10^4 } iBand <- do.call(riskRegression::transformCIBP, args = c(list(estimate = rbind(df.confint$estimate[df.confint$se>0]), se = rbind(df.confint$se[df.confint$se>0]), iid = A.iid[,df.confint$se>0,,drop=FALSE], null = null, conf.level = conf.level, alternative = alternative, ci = TRUE, type = type, min.value = min.value, max.value = max.value, band = TRUE, p.value = adj.p.value), dots)) if(band){ attr(df.confint,"quantileBand") <- iBand$quantile df.confint$lower.band <- rep(0,length(df.confint$se)) df.confint$lower.band[df.confint$se>0] <- iBand$lowerBand[1,] df.confint$upper.band <- rep(0,length(df.confint$se)) df.confint$upper.band[df.confint$se>0] <- iBand$upperBand[1,] } if(adj.p.value==TRUE){ df.confint$adj.p.value <- rep(1,length(df.confint$se)) df.confint$adj.p.value[df.confint$se>0] <- iBand$adj.p.value[1,] } } ## ** export attr(df.confint,"statistic") <- statistic attr(df.confint,"grid") <- grid.threshold attr(df.confint,"gridRed") <- gridRed.threshold class(df.confint) <- append("S3sensitivity",class(df.confint)) return(df.confint) }) ##---------------------------------------------------------------------- ### sensitivity.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-sensitivity.R
## * Documentation - show #' @docType methods #' @name S4BuyseTest-show #' @title Show Method for Class "S4BuyseTest" #' @aliases show,S4BuyseTest-method #' @include S4-BuyseTest.R S4-BuyseTest-summary.R #' #' @description Display the main results stored in a \code{S4BuyseTest} object. #' #' @param object an \R object of class \code{S4BuyseTest}, i.e., output of \code{\link{BuyseTest}} #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-summary}} for a more detailed presentation of the \code{S4BuyseTest} object. #' #' @keywords print #' @author Brice Ozenne ## * Method - show #' @rdname S4BuyseTest-show #' @exportMethod show setMethod(f = "show", signature = "S4BuyseTest", definition = function(object){ ## compute summary statistics outSummary <- summary(object, print = FALSE, strata = "global") ## only keep certain columns type.display <- BuyseTest.options()$print.display vec.tfunu <- c("total","favorable","unfavorable","neutral","uninformative") if(any(vec.tfunu %in% type.display)){ type.display[type.display %in% vec.tfunu] <- paste0(type.display[type.display %in% vec.tfunu],"(%)") } if("CI" %in% type.display){ type.display <- c(setdiff(type.display,"CI"),grep("^CI",names(outSummary),value=TRUE)) } type.display <- intersect(names(outSummary),type.display) ## display table.print <- outSummary[,type.display,drop=FALSE] if("significance" %in% names(table.print)){ names(table.print)[names(table.print) == "significance"] <- "" } print(table.print, row.names = FALSE) return(invisible(NULL)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-show.R
## * Documentation - summary #' @docType methods #' @name S4BuyseTest-summary #' @title Summary Method for Class "S4BuyseTest" #' @aliases summary,S4BuyseTest-method #' @include S4-BuyseTest.R #' #' @description Summarize the results from the \code{\link{BuyseTest}} function. #' #' @param object output of \code{\link{BuyseTest}} #' @param print [logical] Should the results be displayed in the console? #' @param percentage [logical] Should the percentage of pairs of each type be displayed ? Otherwise the number of pairs is displayed. #' @param statistic [character] the statistic summarizing the pairwise comparison: #' \code{"netBenefit"} displays the net benefit, as described in Buyse (2010) and Peron et al. (2016)), #' \code{"winRatio"} displays the win ratio, as described in Wang et al. (2016), #' \code{"favorable"} displays the proportion in favor of the treatment (also called Mann-Whitney parameter), as described in Fay et al. (2018). #' \code{"unfavorable"} displays the proportion in favor of the control. #' Default value read from \code{BuyseTest.options()}. #' @param conf.level [numeric] confidence level for the confidence intervals. #' Default value read from \code{BuyseTest.options()}. #' @param strata [logical] should the strata-specific results be displayed or the results pooled across strata? #' Can also be \code{NULL} to display both. #' @param type.display [numeric or character] the results/summary statistics to be displayed. #' Either an integer indicating refering to a type of display in \code{BuyseTest.options()} #' or the name of the column to be output (e.g. \code{c("strata","Delta","p.value")}). #' @param digit [integer vector] the number of digit to use for printing the counts and the delta. #' @param ... arguments to be passed to \code{\link{S4BuyseTest-confint}} #' #' @details #' \bold{Content of the output} \cr #' The "results" table in the output show the result of the GPC at each endpoint, as well as its contribution to the global statistics. #' More precisely, the column: #' \itemize{ #' \item \code{endpoint} lists the endpoints, by order of priority. #' \item \code{threshold} lists the threshold associated to each endpoint. #' \item \bold{weight:} lists the weight of each priority. #' \item \bold{strata:} list the strata relative to which the results of the priority are displayed. If \code{"global"}, then the results are over all strata at a given priority. #' \item \code{total} (or \code{total(\%)}) lists the number (or percentage) of pairs to be analyzed at the current priority (or strata). #' \item \code{favorable} (or \code{favorable(\%)}) lists the number (or percentage) of pairs classified in favor of the treatment group at the current priority (or strata). #' \item \code{unfavorable} (or \code{unfavorable(\%)}) lists the number (or percentage) of pairs classified in favor of the control group at the current priority (or strata). #' \item \code{neutral} (or \code{neutral(\%)}) lists the number (or percentage) of pairs classified as neither in favor of the treatment group nor in favor of the control group at the current priority (or strata). #' \item \code{uninf} (or \code{uninf(\%)}) lists the number (or percentage) of pairs that could not be classified at the current priority (or strata) due to missing values/censoring. #' \item \code{delta} lists the value of the priority-specific statistic (e.g. net benefit or win ratio), i.e. computed on the pairs analyzed at the current priority only. #' \item \code{Delta} lists the value of the cumulative statistic (e.g. net benefit or win ratio), i.e. computed on all the pairs analyzed up to the current priority. #' \item \code{Delta(\%)} lists the relative statistic (i.e. statistic up to the current priority divided by the final statistic). #' \item \code{information(\%)} lists the information fraction (i.e. number of favorable and unfavorable pairs up to the current priority divided by the final number of favorable and unfavorable pairs). #' \item \code{CI} lists the confidence intervals for \code{Delta} (not adjusted for multiple comparison). #' \item \code{null} lists the null hypothesis (\code{Delta=null}). #' \item \code{p.value} p-value relative to the null hypothesis (not adjusted for multiple comparison). #' \item \code{resampling} number of samples used to compute the confidence intervals or p-values from permutations or bootstrap samples. #' Only displayed if some bootstrap samples have been discarded, for example, they did not lead to sample any case or control. #' } #' Note: when using the Peron scoring rule or a correction for uninformative pairs, the columns \code{total}, \code{favorable}, \code{unfavorable}, \code{neutral}, and \code{uninf} are computing by summing the contribution of the pairs. This may lead to a decimal value. #' #' \bold{Statistic}: when considering a single endpoint and denoting #' \eqn{Y} the endpoint in the treatment group, #' \eqn{X} the endpoint in the control group, #' and \eqn{\tau} the threshold of clinical relevance, #' the net benefit is \eqn{P[Y \ge X + \tau] - P[X \ge Y + \tau]}, #' the win ratio is \eqn{\frac{P[Y \ge X + \tau]}{P[X \ge Y + \tau]}}, #' the proportion in favor of treatment is \eqn{P[Y \ge X + \tau]}, #' the proportion in favor of control is \eqn{P[X \ge Y + \tau]}. #' #' \bold{Statistical inference} \cr #' When the interest is in obtaining p-values, we recommand the use of a permutation test. #' However, when using a permutation test confidence intervals are not displayed in the summary. #' This is because there is no (to the best of our knowledge) straightforward way to obtain good confidence intervals with permutations. #' An easy way consist in using the quantiles of the permutation distribution and then shift by the point estimate of the statistic. #' This is what is output by \code{\link{S4BuyseTest-confint}}. #' However this approach leads to a much too high coverage when the null hypothesis is false. #' The limits of the confidence interval can also end up being outside of the interval of definition of the statistic #' (e.g. outside [-1,1] for the proportion in favor of treatment). #' Therefore, for obtaining confidence intervals, we recommand the boostrap method or the u-statistic method. #' #' \bold{Win ratio} \cr #' For the win ratio, the proposed implementation enables the use of thresholds and endpoints that are not time to events #' as well as the correction proposed in Peron et al. (2016) to account for censoring. #' These development have not been examined by Wang et al. (2016), or in other papers (to the best of our knowledge). #' They are only provided here by implementation convenience. #' #' \bold{Competing risks} \cr #' In presence of competing risks, looking at the net benefit/win ratio computed with respect to the event of interest #' will likely not give a full picture of the difference between the two groups. #' For instance a treatment may decrease the risk of the event of interest (i.e. increase the net benefit for this event) #' by increasing the risk of the competing event. If the competing event is death, this is not desirable. It is therefore advised to #' taking into consideration the risk of the competing event, e.g. by re-running BuyseTest where cause 1 and 2 have been inverted. #' #' @seealso #' \code{\link{BuyseTest}} for performing a generalized pairwise comparison. \cr #' \code{\link{S4BuyseTest-model.tables}} to obtain the table displayed at the end of the summary method in a \code{data.frame} format. #' \code{\link{S4BuyseTest-confint}} to output estimate, standard errors, confidence interval and p-values. #' \code{\link{S4BuyseTest-plot}} for a graphical display of the scoring of the pairs. #' \code{\link{BuyseMultComp}} for efficient adjustment for multiple comparisons. #' #' @examples #' library(data.table) #' #' dt <- simBuyseTest(1e2, n.strata = 3) #' #' \dontrun{ #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status) + Bin(toxicity), data=dt) #' } #' \dontshow{ #' BT <- BuyseTest(treatment ~ TTE(eventtime, status = status) + Bin(toxicity), data=dt, n.resampling = 10, trace = 0) #' } #' summary(BT) #' summary(BT, percentage = FALSE) #' summary(BT, statistic = "winRatio") #' #' @references #' On the GPC procedure: Marc Buyse (2010). \bold{Generalized pairwise comparisons of prioritized endpoints in the two-sample problem}. \emph{Statistics in Medicine} 29:3245-3257 \cr #' On the win ratio: D. Wang, S. Pocock (2016). \bold{A win ratio approach to comparing continuous non-normal outcomes in clinical trials}. \emph{Pharmaceutical Statistics} 15:238-245 \cr #' On the Mann-Whitney parameter: Fay, Michael P. et al (2018). \bold{Causal estimands and confidence intervals asscoaited with Wilcoxon-Mann-Whitney tests in randomized experiments}. \emph{Statistics in Medicine} 37:2923-2937. #' #' @keywords print #' @author Brice Ozenne ## * method - summary #' @rdname S4BuyseTest-summary #' @exportMethod summary setMethod(f = "summary", signature = "S4BuyseTest", definition = function(object, print = TRUE, percentage = TRUE, statistic = NULL, conf.level = NULL, strata = NULL, type.display = 1, digit = c(2,4,5), ...){ ## ** normalize and check arguments mycall <- match.call() option <- BuyseTest.options() if(length(digit) == 1){digit <- rep(digit,3)} validInteger(digit, name1 = "digit", min = 0, valid.length = 3, method = "summary[S4BuyseTest]") if(is.numeric(type.display)){ validInteger(type.display, name1 = "type.display", min = 1, max = 9, ## limitation in model.tables valid.length = 1) type.display.original <- option$summary.display[[type.display]] type.display <- paste0("summary",type.display) }else{ validCharacter(type.display, name1 = "type.display", valid.values = c("endpoint","restriction","threshold","strata","weight","total","favorable","unfavorable","neutral","uninf","information(%)", "delta","Delta","Delta(%)", "p.value","CI","significance"), valid.length = NULL) type.display.original <- type.display } if(is.null(conf.level)){ conf.level <- option$conf.level } alpha <- 1-conf.level method.inference <- slot(object,"method.inference") hierarchical <- slot(object,"hierarchical") scoring.rule <- slot(object,"scoring.rule") if(is.null(statistic)){ statistic <- option$statistic }else{ statistic <- switch(gsub("[[:blank:]]", "", tolower(statistic)), "netbenefit" = "netBenefit", "winratio" = "winRatio", "favorable" = "favorable", "unfavorable" = "unfavorable", statistic) } endpoint <- slot(object,"endpoint") n.endpoint <- length(endpoint) n.strata <- length(slot(object,"level.strata")) if(attr(scoring.rule,"test.paired") && "strata" %in% names(mycall) == FALSE){ strata <- FALSE } ## ** build table if(attr(method.inference,"permutation")){ attr(conf.level,"warning.permutation") <- FALSE } table.print <- model.tables(object, percentage = percentage, statistic = statistic, conf.level = conf.level, strata = strata, columns = type.display, ...) name.print <- names(table.print) endpoint.restriction.threshold <- attr(table.print,"endpoint") transform <- attr(table.print,"transform") n.resampling <- attr(table.print,"n.resampling") method.ci.resampling <- attr(table.print,"method.ci.resampling") ## CI when the estimate is not defined if("lower.ci" %in% name.print && "upper.ci" %in% name.print){ index.ci <- intersect(which(!is.na(table.print$lower.ci)), which(!is.na(table.print$upper.ci))) if("Delta" %in% name.print){ index.ci <- intersect(index.ci, intersect(which(!is.infinite(table.print$Delta)), which(!is.na(table.print$Delta)))) } }else{ index.ci <- NULL } ## ** reformat table ## *** add column with stars if(("p.value" %in% name.print) && ("significance" %in% type.display.original)){ colStars <- sapply(table.print[,"p.value"],function(x){ if(is.na(x)){""}else if(x<0.001){"***"}else if(x<0.01){"**"}else if(x<0.05){"*"}else if(x<0.1){"."}else{""} }) table.print[,"significance"] <- colStars } ## *** rounding ## counts col.pairs <- intersect(name.print, c("total","favorable","unfavorable","neutral","uninf")) if(!is.na(digit[1]) && length(col.pairs)>0){ table.print[,col.pairs] <- sapply(table.print[,col.pairs,drop=FALSE], round, digits = digit[1]) } col.inference <- intersect(name.print, c("delta","Delta","lower.ci","upper.ci","Delta(%)","information(%)")) if(!is.na(digit[2]) && length(col.inference)>0){ table.print[,col.inference] <- sapply(table.print[,col.inference,drop=FALSE], round, digits = digit[2]) } if(!is.na(digit[3]) && ("p.value" %in% name.print)){ table.print[!is.na(table.print$p.value),"p.value"] <- format.pval(table.print[!is.na(table.print$p.value),"p.value"], digits = digit[3]) } ## *** set Inf to NA in summary ## e.g. in the case of no unfavorable pairs the win ratio is Inf ## this is not a valid estimate and it is set to NA if("delta" %in% name.print){ table.print[is.infinite(table.print$delta), "delta"] <- NA table.print[is.nan(table.print$delta), "delta"] <- NA } if("Delta" %in% name.print){ table.print[is.infinite(table.print$Delta), "Delta"] <- NA table.print[is.nan(table.print$Delta), "Delta"] <- NA } ## *** convert NA to "" if("threshold" %in% name.print){ table.print$threshold[table.print$threshold<=1e-12] <- "" } if("restriction" %in% name.print){ table.print[is.na(table.print$restriction), "restriction"] <- "" } if("Delta" %in% name.print){ table.print[is.na(table.print$Delta), "Delta"] <- "" } if("lower.ci" %in% name.print){ table.print[is.na(table.print$lower), "lower.ci"] <- "" } if("upper.ci" %in% name.print){ table.print[is.na(table.print$upper), "upper.ci"] <- "" } if("p.value" %in% name.print){ table.print[is.na(table.print$p.value), "p.value"] <- "" } ## *** remove duplicated values in endpoint/threshold if("endpoint" %in% name.print){ table.print$endpoint[duplicated(endpoint.restriction.threshold)] <- "" } if("threshold" %in% name.print){ table.print$threshold[duplicated(endpoint.restriction.threshold)] <- "" } if("restriction" %in% name.print){ table.print$restriction[duplicated(endpoint.restriction.threshold)] <- "" } if("weight" %in% name.print){ table.print$weight[duplicated(endpoint.restriction.threshold)] <- "" } ## *** merge CI inf and CI sup column if("CI" %in% type.display.original && "lower.ci" %in% name.print && "upper.ci" %in% name.print){ qInf <- round(100*alpha/2, digits = digit[2]) qSup <- round(100*(1-alpha/2), digits = digit[2]) name.ci <- paste0("CI [",qInf,"% ; ",qSup,"%]") table.print$upper.ci[index.ci] <- paste0("[",table.print[index.ci,"lower.ci"], ";",table.print[index.ci,"upper.ci"],"]") names(table.print)[names(table.print) == "upper.ci"] <- name.ci table.print$lower.ci <- NULL } ## *** rename percentage columns vec.tfunu <- intersect(c("total","favorable","unfavorable","neutral","uninf"), name.print) if(identical(percentage,TRUE) & length(vec.tfunu)>0){ names(table.print)[match(vec.tfunu, names(table.print))] <- paste0(names(table.print)[names(table.print) %in% vec.tfunu],"(%)") } ## ** display ## *** additional text if(n.endpoint>1){ txt.endpoint <- paste0("with ",n.endpoint," ",ifelse(hierarchical, "prioritized ", ""),"endpoints", sep = "") }else{ txt.endpoint <- paste0("with 1 endpoint") } txt.strata <- if(n.strata>1){paste0(" and ",n.strata," strata")}else{""} ## *** display if(print){ cat(" Generalized pairwise comparisons ",txt.endpoint,txt.strata,"\n\n", sep = "") if(statistic == "winRatio"){ cat(" - statistic : ",if(any(!is.na(object@restriction))){"restricted "}else{""},"win ratio (delta: endpoint specific, Delta: global) \n", " - null hypothesis : Delta == 1 \n", sep = "") }else if(statistic == "netBenefit"){ cat(" - statistic : ",if(any(!is.na(object@restriction))){"restricted "}else{""},"net benefit (delta: endpoint specific, Delta: global) \n", " - null hypothesis : Delta == 0 \n", sep = "") }else if(statistic == "favorable"){ cat(" - statistic : ",if(any(!is.na(object@restriction))){"restricted "}else{""},"proportion in favor of treatment (delta: endpoint specific, Delta: global) \n", " - null hypothesis : Delta == 1/2 \n", sep = "") }else if(statistic == "favorable"){ cat(" - statistic : ",if(any(!is.na(object@restriction))){"restricted "}else{""},"proportion in favor of control (delta: endpoint specific, Delta: global) \n", " - null hypothesis : Delta == 1/2 \n", sep = "") } if(method.inference != "none"){ cat(" - confidence level: ",1-alpha," \n", sep = "") if(attr(method.inference,"permutation")){ txt.method <- "permutation test" }else if(attr(method.inference,"bootstrap")){ txt.method <- "bootstrap resampling" }else if(attr(method.inference,"ustatistic")){ test.model.tte <- all(unlist(lapply(object@iidNuisance,dim))==0) txt.method <- paste0("H-projection of order ",attr(method.inference,"hprojection")) if(transform != "id"){ txt.method <- paste0(txt.method," after ",transform," transformation \n") }else{ txt.method <- paste0(txt.method," \n") } if(test.model.tte && (scoring.rule == "Peron" || [email protected] > 0)){ txt.method <- paste0(txt.method," (ignoring the uncertainty of the nuisance parameters)") } } if(attr(method.inference,"permutation") || attr(method.inference,"bootstrap") ){ ok.resampling <- all(n.resampling[1]==n.resampling) if(ok.resampling){ txt.method <- paste0(txt.method, " with ",n.resampling[1]," samples \n") table.print$n.resampling <- NULL }else{ txt.method <- paste0(txt.method, " with [",min(n.resampling)," ; ",max(n.resampling),"] samples \n") } if(attr(method.inference,"permutation")){ txt.method.ci <- switch(method.ci.resampling, "percentile" = "p-value computed using the permutation distribution", "studentized" = "p-value computed using the studentized permutation distribution", ) }else if(attr(method.inference,"bootstrap")){ txt.method.ci <- switch(method.ci.resampling, "percentile" = "CI computed using the percentile method; p-value by test inversion", "gaussian" = "CI/p-value computed assuming normality", "studentized" = "CI computed using the studentized method; p-value by test inversion", ) } txt.method <- paste0(txt.method," ",txt.method.ci," \n") } cat(" - inference : ",txt.method, sep = "") } cat(" - treatment groups: ",[email protected][2]," (treatment) vs. ",[email protected][1]," (control) \n", sep = "") if(attr(scoring.rule,"test.paired")){ table.weightStrata <- table(paste0(round(100*object@weightStrata, digit[1]),"%")) cat(" - pair weights : ",paste(names(table.weightStrata), collapse = ", ")," (K=",paste(table.weightStrata, collapse=","),")\n", sep = "") }else if(n.strata>1){ cat(" - strata weights : ",paste(paste0(round(100*object@weightStrata, digit[1]),"%"), collapse = ", ")," \n", sep = "") } if(any(object@type == "tte") && any(attr(scoring.rule,"test.censoring"))){ if(all(attr(scoring.rule,"method.score")[object@type=="tte"]=="CRPeron")){ txt.Peron <- "cif" }else if(all(attr(scoring.rule,"method.score")[object@type=="tte"]=="SurvPeron")){ txt.Peron <- "survival" }else{ txt.Peron <- "survival/cif" } txt.scoring.rule <- switch(scoring.rule, "Gehan" = "deterministic score or uninformative", "Peron" = paste0("probabilistic score based on the ",txt.Peron," curves") ) cat(" - censored pairs : ",txt.scoring.rule,"\n", sep = "") } if(hierarchical && n.endpoint>1 && any([email protected]>0)){ Uneutral.as.uninf <- unique([email protected]) if(identical(Uneutral.as.uninf,TRUE)){ txt.neutral <- "re-analyzed using lower priority endpoints" }else if(identical(Uneutral.as.uninf,FALSE)){ txt.neutral <- "ignored at lower priority endpoints" }else{ txt.neutral <- paste0("re-analyzed using lower priority endpoints for endpoint ", paste(which([email protected]), collapse = ", "), " \n otherwise ignored at lower priority endpoints") } cat(" - neutral pairs : ",txt.neutral,"\n", sep = "") } if(!( ([email protected] == 0) && (all([email protected]==0)) )){ txt.uninf <- switch(as.character([email protected]), "0" = if(n.endpoint==1){"no contribution"}else{"no contribution at the current endpoint, analyzed at later endpoints"}, "1" = "score equals the averaged score of all informative pairs", "2" = "no contribution, their weight is passed to the informative pairs using IPCW" ) cat(" - uninformative pairs: ",txt.uninf,"\n", sep = "") } cat(" - results\n") table.print2 <- table.print if("significance" %in% names(table.print)){ names(table.print2)[names(table.print2) == "significance"] <- "" } print(table.print2, row.names = FALSE) } ## ** export return(invisible(table.print)) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest-summary.R
## * Documentation S4BuyseTest #' @name S4BuyseTest-class #' @title Class "S4BuyseTest" (output of BuyseTest) #' #' @description A \code{\link{BuyseTest}} output is reported in a \code{S4BuyseTest} object. #' #' @seealso #' \code{\link{BuyseTest}} for the function computing generalized pairwise comparisons. \cr #' \code{\link{S4BuyseTest-summary}} for the summary of the BuyseTest function results #' #' @keywords classes #' @author Brice Ozenne ## * Class S4BuyseTest #' @rdname S4BuyseTest-class #' @exportClass S4BuyseTest setClass( Class = "S4BuyseTest", representation( call = "list", count.favorable = "matrix", count.unfavorable = "matrix", count.neutral = "matrix", count.uninf = "matrix", n.pairs = "numeric", delta = "array", Delta = "matrix", type = "vector", endpoint = "vector", level.treatment = "vector", level.strata = "vector", scoring.rule = "character", hierarchical = "logical", neutral.as.uninf = "logical", add.halfNeutral = "logical", correction.uninf = "numeric", method.inference = "character", strata = "vector", threshold = "numeric", restriction = "numeric", n.resampling = "numeric", deltaResampling = "array", DeltaResampling = "array", covariance = "matrix", covarianceResampling = "array", weightObs = "numeric", weightEndpoint = "numeric", weightStrata = "numeric", weightStrataResampling = "array", iidAverage = "list", iidNuisance = "list", seed = "numeric", tablePairScore = "list", tableSurvival = "list" ) ) ## * Initialize S4BuyseTest objects methods::setMethod( f = "initialize", signature = "S4BuyseTest", definition = function(.Object, call, count_favorable, ## from cpp object count_unfavorable, ## from cpp object count_neutral, ## from cpp object count_uninf, ## from cpp object delta, ## from cpp object Delta, ## from cpp object n_pairs, ## from cpp object iidAverage_favorable, ## from cpp object iidAverage_unfavorable, ## from cpp object iidAverage_neutral, ## from cpp object iidNuisance_favorable, ## from cpp object iidNuisance_unfavorable, ## from cpp object iidNuisance_neutral, ## from cpp object covariance, ## from cpp object tableScore, ## from cpp object tableSurvival = NULL, ## added to the cpp object by .BuyseTest when requested by the user index.C, index.T, index.strata, type, endpoint, level.strata, level.treatment, scoring.rule, paired, hierarchical, neutral.as.uninf, add.halfNeutral, correction.uninf, method.inference, method.score, seed, strata, threshold, restriction, weightObs, weightEndpoint, weightStrata, pool.strata, n.resampling, deltaResampling = NULL, ## from inferenceResampling DeltaResampling = NULL, ## from inferenceResampling weightStrataResampling = NULL, ## from inferenceResampling covarianceResampling = NULL ## from inferenceResampling ){ name.endpoint <- paste0(endpoint,ifelse(!is.na(restriction),paste0("_r",restriction),""),ifelse(threshold>1e-12,paste0("_t",threshold),"")) ## ** call call <- call[-1] ## ** count dimnames(count_favorable) <- list(level.strata, name.endpoint) dimnames(count_unfavorable) <- list(level.strata, name.endpoint) dimnames(count_neutral) <- list(level.strata, name.endpoint) dimnames(count_uninf) <- list(level.strata, name.endpoint) ## ** delta/Delta dimnames(delta) <- list(level.strata, name.endpoint, c("favorable","unfavorable","neutral","uninf","netBenefit","winRatio")) dimnames(Delta) <- list(name.endpoint, c("favorable","unfavorable","neutral","uninf","netBenefit","winRatio")) ## ** n_pairs names(n_pairs) <- level.strata ## ** iid and variance if(!is.null(iidAverage_favorable) && NCOL(iidAverage_favorable)>0){ colnames(iidAverage_favorable) <- name.endpoint } if(!is.null(iidAverage_unfavorable) && NCOL(iidAverage_unfavorable)>0){ colnames(iidAverage_unfavorable) <- name.endpoint } if(!is.null(iidAverage_neutral) && NCOL(iidAverage_neutral)>0){ colnames(iidAverage_neutral) <- name.endpoint } if(!is.null(iidNuisance_favorable) && NCOL(iidNuisance_favorable)>0){ colnames(iidNuisance_favorable) <- name.endpoint } if(!is.null(iidNuisance_unfavorable) && NCOL(iidNuisance_unfavorable)>0){ colnames(iidNuisance_unfavorable) <- name.endpoint } if(!is.null(iidNuisance_neutral) && NCOL(iidNuisance_neutral)>0){ colnames(iidNuisance_neutral) <- name.endpoint } if(!is.null(covariance) && length(covariance)>0){ dimnames(covariance) <- list(name.endpoint, c("favorable","unfavorable","covariance","netBenefit","winRatio")) } ## ** tableScore if(!is.null(tableScore) && length(tableScore)>0 && any(sapply(tableScore, data.table::is.data.table)==FALSE)){ tableScore <- pairScore2dt(tableScore, level.treatment = level.treatment, level.strata = level.strata, n.strata = length(level.strata), endpoint = endpoint, threshold = threshold, restriction = restriction) } ## ** tableSurvival ## ** type type <- stats::setNames(type, name.endpoint) ## ** endpoint names(endpoint) <- name.endpoint ## ** level.strata attr(level.strata,"index") <- index.strata ## ** level.treatment attr(level.treatment,"indexC") <- index.C attr(level.treatment,"indexT") <- index.T ## ** scoring.rule scoring.rule <- c("Gehan","Peron")[scoring.rule+1] attr(scoring.rule,"test.censoring") <- attr(method.score, "test.censoring") attr(method.score, "test.censoring") <- NULL attr(scoring.rule,"test.CR") <- attr(method.score, "test.CR") attr(method.score, "test.CR") <- NULL attr(scoring.rule,"test.paired") <- paired attr(method.score, "test.paired") <- NULL attr(scoring.rule,"method.score") <- stats::setNames(method.score, name.endpoint) ## ** hierarchical ## ** neutral.as.uninf ## ** add.halfNeutral ## ** correction.uninf ## ** method.inference ## ** method.score ## ** strata if(is.null(strata)){ strata <- as.character(NA) } ## ** restriction names(restriction) <- name.endpoint ## ** threshold names(threshold) <- name.endpoint ## ** weightEndpoint names(weightEndpoint) <- name.endpoint ## ** weightStrata weightStrata <- as.double(weightStrata) attr(weightStrata,"type") <- attr(pool.strata,"type") ## ** n.resampling if(!is.null(deltaResampling)){ dimnames(deltaResampling)[[3]] <- name.endpoint dimnames(DeltaResampling)[[2]] <- name.endpoint if(attr(method.inference,"studentized")){ dimnames(covarianceResampling)[[2]] <- name.endpoint } } ## ** resampling ## ** store ## *** from c++ object [email protected] <- count_favorable [email protected] <- count_unfavorable [email protected] <- count_neutral [email protected] <- count_uninf [email protected] <- n_pairs .Object@delta <- delta .Object@Delta <- Delta .Object@iidAverage <- list(favorable = iidAverage_favorable, unfavorable = iidAverage_unfavorable, neutral = iidAverage_neutral) .Object@iidNuisance <- list(favorable = iidNuisance_favorable, unfavorable = iidNuisance_unfavorable, neutral = iidNuisance_neutral) if(!is.null(covariance)){ .Object@covariance <- covariance } .Object@tablePairScore <- tableScore ## *** required additional information .Object@call <- call .Object@type <- type .Object@endpoint <- endpoint [email protected] <- level.strata [email protected] <- level.treatment [email protected] <- scoring.rule .Object@hierarchical <- hierarchical [email protected] <- neutral.as.uninf [email protected] <- add.halfNeutral [email protected] <- correction.uninf [email protected] <- method.inference .Object@strata <- strata .Object@threshold <- threshold .Object@restriction <- restriction .Object@weightObs <- weightObs .Object@weightEndpoint <- weightEndpoint .Object@weightStrata <- weightStrata [email protected] <- n.resampling if(!missing(seed)){ .Object@seed <- seed } ## *** optional information ## resampling if(!is.null(deltaResampling)){ .Object@deltaResampling <- deltaResampling .Object@DeltaResampling <- DeltaResampling .Object@weightStrataResampling <- weightStrataResampling .Object@covarianceResampling <- covarianceResampling } ## survival if(!is.null(tableSurvival)){ .Object@tableSurvival <- tableSurvival } ## ** export ## validObject(.Object) return(.Object) }) ## * Constructor S4BuyseTest objects S4BuyseTest <- function(...) new("S4BuyseTest", ...)
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest.R
## * Documentation BuyseTest.options #' @title Class "BuyseTest.options" (global setting for the BuyseTest package) #' @name BuyseTest.options-class #' @include 1-setGeneric.R #' #' @description Class defining the global settings for the BuyseTest package. #' #' @seealso #' \code{\link{BuyseTest.options}} to select or update global settings. #' #' @keywords classes #' @author Brice Ozenne ## * Class BuyseTest.options #' @rdname BuyseTest.options-class setClass( Class = "BuyseTest.options", representation( alternative = "character", args.model.tte = "list", check = "logical", conf.level = "numeric", correction.uninf = "numeric", cpus = "numeric", debug = "numeric", engine = "character", fitter.model.tte = "character", hierarchical = "logical", keep.pairScore = "logical", keep.survival = "logical", method.inference = "character", scoring.rule = "character", n.resampling = "numeric", strata.resampling = "character", neutral.as.uninf = "logical", add.halfNeutral = "logical", add.1.presample = "logical", order.Hprojection = "numeric", pool.strata = "character", precompute = "logical", print.display = "character", statistic = "character", summary.display = "list", trace = "numeric", transformation = "logical", warning.correction = "numeric" ), ### ** Check validity of the object validity = function(object){ validNames.summary <- c("endpoint","restriction","threshold","weight","strata","total","favorable","unfavorable","neutral","uninf","information(%)", "delta","Delta","Delta(%)", "p.value","CI","significance") validCharacter(object@alternative, name1 = "@alternative", valid.values = c("two.sided","greater","less"), valid.length = 1, method = "Class BuyseTest.options") validLogical(object@check, name1 = "@check", valid.length = 1, method = "Class BuyseTest.options") validNumeric([email protected], name1 = "@conf.level", min = 0, max = 1, valid.length = 1, method = "Class BuyseTest.options") validInteger([email protected], name1 = "@correction.uninf", min = 0, max = 2, valid.length = 1, method = "Class BuyseTest.options") validInteger(object@cpus, name1 = "@cpus", min = 1, valid.length = 1, method = "Class BuyseTest.options") validInteger(object@debug, name1 = "@debug", valid.length = 1, method = "Class BuyseTest.options") validCharacter(object@engine, name1 = "@engine", valid.values = c("GPC_cpp","GPC2_cpp"), valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@fitter.model.tte", valid.values = c("prodlim", "survreg"), valid.length = 1, method = "Class BuyseTest.options") validLogical(object@hierarchical, name1 = "@hierarchical", valid.length = 1, method = "Class BuyseTest.options") validLogical([email protected], name1 = "@keep.pairScore", valid.length = 1, method = "Class BuyseTest.options") validLogical([email protected], name1 = "@keep.survival", valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@resampling", valid.values = c("bootstrap", "stratified bootstrap", "studentized bootstrap", "studentized stratified bootstrap", "permutation", "stratified permutation", "none", "u-statistic", "u-statistic-bebu"), valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@scoring.rule", valid.values = c("Gehan","Peron"), valid.length = 1, method = "Class BuyseTest.options") validInteger([email protected], name1 = "@n.resampling", min = 0, valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@n.resampling", valid.values = c(as.character(NA),"treatment","strata"), valid.length = 1, method = "Class BuyseTest.options") validLogical([email protected], name1 = "@neutral.as.uninf", valid.length = 1, method = "Class BuyseTest.options") validLogical([email protected], name1 = "@add.halfNeutral", valid.length = 1, method = "Class BuyseTest.options") validInteger([email protected], name1 = "@order.Hprojection", min = 1, max = 2, valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@pool.strata", valid.values = c("Buyse","CMH","equal","var-favorable","var-unfavorable","var-netBenefit","var-winRatio"), valid.length = 1, method = "Class BuyseTest.options") validLogical(object@precompute, name1 = "@precompute", valid.length = 1, method = "Class BuyseTest.options") validCharacter([email protected], name1 = "@print.display", valid.values = validNames.summary, valid.length = NULL, method = "Class BuyseTest.options") validCharacter(object@statistic, name1 = "@statistic", valid.values = c("netBenefit","winRatio","favorable","unfavorable"), valid.length = 1, method = "Class BuyseTest.options") lapply([email protected],validCharacter, name1 = "@summary.display", valid.values = validNames.summary, valid.length = NULL, method = "Class BuyseTest.options") validInteger(object@trace, name1 = "@trace", min = 0, max = 2, valid.length = 1, method = "Class BuyseTest.options") validLogical(object@transformation, name1 = "@transformation", valid.length = 1, method = "Class BuyseTest.options") validNumeric([email protected], name1 = "@warning.correction", min = 0, max = 1, valid.length = 1, method = "Class BuyseTest.options") return(TRUE)} ) #' @title Methods for the class "BuyseTest.options" #' @name BuyseTest.options-methods #' @aliases alloc,BuyseTest.options-method #' @description Methods to update or select global settings #' #' @param object an object of class \code{BuyseTest.options}. #' @param field a \code{list} named with the name of the fields to update and containing the values to assign to these fields #' @param name.field a \code{character vector} containing the names of the field to be selected. ## * Alloc BuyseTest.options #' @rdname BuyseTest.options-methods setMethod(f = "alloc", signature = "BuyseTest.options", definition = function(object, field){ name.field <- names(field) n.field <- length(field) for (iField in 1:n.field) { slot(object, name.field[iField]) <- field[[iField]] } validObject(object) return(object) } ) ## * Select BuyseTest.options #' @rdname BuyseTest.options-methods setMethod(f = "select", signature = "BuyseTest.options", definition = function(object, name.field){ if (is.null(name.field)) { name.field <- slotNames(object) } n.field <- length(name.field) ls.slots <- stats::setNames(vector(mode = "list", length = n.field), name.field) for (iField in 1:n.field) { ls.slots[[iField]] <- slot(object, name.field[iField]) } return(ls.slots) } )
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/S4-BuyseTest.options.R
### as.data.table.performance.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: dec 9 2021 (10:04) ## Version: ## Last-Updated: jun 27 2023 (09:55) ## By: Brice Ozenne ## Update #: 95 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * as.data.table.performance ##' @title Convert Performance Objet to data.table ##' @description Extract the AUC/brier score values or the prediction into a data.table format. ##' ##' @param x object of class \code{"performance"}. ##' @param type [character] either \code{"metric"} to extract AUC/brier score or \code{"prediction"} to extract predictions. ##' @param format [character] should the result be outcome in the long format (\code{"long"}) or in the wide format (\code{"wide"}). ##' Note relevant when using \code{type="metric"}. ##' @param keep.rownames Not used. For compatibility with the generic method. ##' @param ... Not used. For compatibility with the generic method. ##' ##' @return A data.table object ##' @keywords methods ##' ##' @export as.data.table.performance <- function(x, keep.rownames = FALSE, type = "performance", format = NULL, ...){ ## ** normalize user input if(length(type)!=1){ stop("Argument \'type\' must have length 1.") } type <- match.arg(type, c("performance", "prediction",paste0("prediction-",names(x$prediction)), "roc",paste0("roc-",names(x$prediction)), "fold")) if(!is.null(format)){ format <- match.arg(format, c("long","wide")) } ## ** extract data if(type=="performance"){ return(as.data.table(x$performance)) }else if(type %in% c("prediction","prediction-internal","prediction-external","prediction-cv")){ x.prediction <- x$prediction x.response <- x$response out <- NULL if(type=="prediction-internal"){ x.prediction <- x.prediction["internal"] x.response <- x.response["internal"] }else if(type=="prediction-external"){ x.prediction <- x.prediction["external"] x.response <- x.response["external"] }else if(type=="prediction-cv"){ x.prediction <- x.prediction["cv"] x.response <- x.response["cv"] } for(iType in names(x.prediction)){ ## iType <- names(x.prediction)[3] if(iType == "internal"){ iX.prediction <- data.table(method = "internal", outcome = x.response[[iType]], x.prediction[[iType]], observation = 1:NROW(x.prediction[[iType]]), repetition = as.numeric(NA), fold = as.numeric(NA)) }else if(iType == "external"){ iX.prediction <- data.table(method = "external", outcome = x.response[[iType]], x.prediction[[iType]], observation = 1:NROW(x.prediction[[iType]]), repetition = as.numeric(NA), fold = as.numeric(NA)) }else if(iType == "cv"){ iX <- x.prediction[[iType]] iIndex <- attr(iX,"index") attr(iX,"index") <- NULL iX.prediction <- data.table(method = "cv", outcome = as.numeric(NA),do.call(rbind,lapply(apply(iX,3,list),"[[",1)),do.call(rbind,lapply(apply(iIndex,3,list),"[[",1))) iX.prediction$outcome <- x.response[[iType]][iX.prediction$observation] } out <- rbind(out, iX.prediction) } if(!is.null(format) && format == "long"){ out <- data.table::melt(out, id.vars = intersect(names(out),c("method","outcome","observation","repetition","fold")), variable.name = "model", value.name = "prediction") } return(out) }else if(type %in% c("roc","roc-internal","roc-external","roc-cv")){ newx <- as.data.table.performance(x, type = gsub("roc","prediction",type), format = "long") Umethod <- unique(newx$method) Umodel <- unique(x$model) out <- NULL for(iMethod in Umethod){ iNewx <- newx[newx$method==iMethod] setkeyv(iNewx, c("repetition","model","prediction")) ## se: among those who have the outcome P[score>=threshold|Y=1] ## sp: among those who do not have the outcome P[score<threshold|Y=0] iOut <- iNewx[,list("observation"=c(NA,.SD$observation), "threshold"=c(0,.SD$prediction), "se"=rev(cumsum(c(0,rev(.SD$outcome))==1))/sum(.SD$outcome==1), "sp"=cumsum(c(1,.SD$outcome)==0)/sum(.SD$outcome==0)), ## below threshold classified as 1: sp is the number of 0 divided by the number of negative by = c("repetition","model")] out <- rbind(out,iOut) } if(!is.null(format) && format == "wide"){ out <- data.table::dcast(out, formula = repetition+observation~model, value.var = c("threshold","se","sp"),sep=".") }else{ out <- out[order(out[["repetition"]],out$model,1-out$sp,out$se)] } return(out) }else if(type == "fold"){ if("cv" %in% names(x$prediction) == FALSE){ message("No fold to extract as no cross-validation was performed. \n") return(NULL) } out <- as.data.table(do.call(rbind,lapply(1:dim(attr(x$prediction$cv,"index"))[3], function(k){attr(x$prediction$cv,"index")[,,k]}))) return(out) } } ## * as.data.table.performanceResample ##' @export as.data.table.performanceResample <- function(x, ...){ return(as.data.table(unclass(x))) } ##---------------------------------------------------------------------- ### as.data.table.performance.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/as.data.table.performance.R
### auc.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: dec 2 2019 (16:29) ## Version: ## Last-Updated: okt 25 2023 (11:23) ## By: Brice Ozenne ## Update #: 470 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * auc (documentation) #' @title Estimation of the Area Under the ROC Curve (EXPERIMENTAL) #' @name auc #' #' @description Estimation of the Area Under the ROC curve, possibly after cross validation, #' to assess the discriminant ability of a biomarker regarding a disease status. #' #' @param labels [integer/character vector] the disease status (should only take two different values). #' @param predictions [numeric vector] A vector with the same length as \code{labels} containing the biomarker values. #' @param fold [character/integer vector] If using cross validation, the index of the fold. #' Should have the same length as \code{labels}. #' @param observation [integer vector] If using cross validation, the index of the corresponding observation in the original dataset. #' Necessary to compute the standard error when using cross validation. #' @param direction [character] \code{">"} lead to estimate P[Y>X], #' \code{"<"} to estimate P[Y<X], #' and \code{"auto"} to estimate max(P[Y>X],P[Y<X]). #' @param add.halfNeutral [logical] should half of the neutral score be added to the favorable and unfavorable scores? #' Useful to match the usual definition of the AUC in presence of ties. #' @param pooling [character] method used to compute the global AUC from the fold-specific AUC: either an empirical average \code{"mean"} #' or a weighted average with weights proportional to the number of pairs of observations in each fold \code{"pairs"}. #' @param null [numeric, 0-1] the value against which the AUC should be compared when computing the p-value. #' @param conf.level [numeric, 0-1] the confidence level of the confidence intervals. #' @param transformation [logical] should a log-log transformation be used when computing the confidence intervals and the p-value. #' @param order.Hprojection [1,2] the order of the H-projection used to linear the statistic when computing the standard error. #' 2 is involves more calculations but is more accurate in small samples. Only active when the \code{fold} argument is \code{NULL}. #' #' @details The iid decomposition of the AUC is based on a first order decomposition. #' So its squared value will not exactly match the square of the standard error estimated with a second order H-projection. #' #' @return An S3 object of class \code{BuyseTestAUC} that inherits from data.frame. #' The last line of the object contains the global AUC value with its standard error. #' #' @references Erin LeDell, Maya Petersen, and Mark van der Laan (2015). \bold{Computationally efficient confidence intervals for cross-validated area under the ROC curve estimates}. \emph{Electron J Stat.} 9(1):1583–1607. \cr #' #' @keywords models ## * auc (example) #' @rdname auc #' @examples #' library(data.table) #' #' n <- 200 #' set.seed(10) #' X <- rnorm(n) #' dt <- data.table(Y = as.factor(rbinom(n, size = 1, prob = 1/(1+exp(1/2-X)))), #' X = X, #' fold = unlist(lapply(1:10,function(iL){rep(iL,n/10)}))) #' #' ## compute auc #' auc(labels = dt$Y, predictions = dt$X, direction = ">") #' #' ## compute auc after 10-fold cross-validation #' auc(labels = dt$Y, prediction = dt$X, fold = dt$fold, observation = 1:NROW(dt)) #' ## * auc (code) #' @export auc <- function(labels, predictions, fold = NULL, observation = NULL, direction = ">", add.halfNeutral = TRUE, null = 0.5, conf.level = 0.95, transformation = TRUE, order.Hprojection = 2, pooling = "mean"){ ## ** Normalize user imput if(length(unique(labels))!=2){ stop("Argument \'labels\' must have exactly two different values \n") } if(any(is.na(predictions))){ warning("Missing values in argument \'predictions'. \n") } if(!is.null(fold)){ if(length(fold)!=length(predictions)){ stop("When not NULL, argument \'fold\' must have the same length as argument \'predictions\' \n") } if(length(observation)!=length(predictions)){ stop("When argument \'fold\' is not NULL, argument \'observation\' must have the same length as argument \'predictions\' \n") } if(length(labels)!=length(predictions)){ ## either the user provides the outcome for each observation n.obs <- length(labels) if(!is.null(observation) && any(observation %in% 1:n.obs == FALSE)){ stop("When not NULL, argument \'observation\' must take integer values between 1 and ",n.obs,"\n", sep = "") } }else{ ## or the user provides the outcome for each prediction (i.e. several times the same value as some predictions correspond to the same obs) n.obs <- length(unique(observation)) observation <- as.numeric(as.factor(observation)) } if(any(tapply(observation,fold, function(iObs){any(duplicated(iObs))}))){ stop("The same observation cannot appear twice in the same fold. \n") } }else{ n.obs <- length(labels) if(n.obs!=length(predictions)){ stop("Argument \'labels\' and \'predictions\' must have the same length \n") } if(!is.null(observation)){ stop("Argument \'observation\' is only useful when argument \'fold\' is specified \n") } observation <- 1:n.obs } direction <- match.arg(direction, c(">","<","auto"), several.ok = TRUE) pooling <- match.arg(pooling, c("pairs","mean"), several.ok = TRUE) if(!is.logical(transformation)){ stop("Argument \'transformation\' must be TRUE or FALSE \n") } if(length(labels)==length(predictions)){ df <- data.frame(Y = labels, X = predictions, observation = observation, stringsAsFactors = FALSE) }else{ df <- data.frame(Y = labels[observation], X = predictions, observation = observation, stringsAsFactors = FALSE) } formula0 <- Y ~ cont(X) if(!is.null(fold)){ df$fold <- fold formula <- stats::update(formula0,.~.+fold) name.fold <- sort(unique(df$fold)) n.fold <- length(name.fold) }else{ df$fold <- 1 formula <- formula0 name.fold <- NULL n.fold <- 0 } if(!identical(direction,"auto") && (length(direction) %in% c(1,max(n.fold,1)) == FALSE)){ stop("Argument \'direction\' must have length 1 or the number of folds (here ",n.fold,"). \n") } if(is.na(conf.level)){ method.inference <- "none" }else{ method.inference <- "u-statistic" } ## ** Prepare ## *** Make sure that all prediction are in the increasing means outcome direction direction.save <- direction if(direction == "auto"){ e0.BT <- BuyseTest(formula, method.inference = "none", data = df, trace = 0, add.halfNeutral = add.halfNeutral) if(sum([email protected])>=sum([email protected])){ direction <- rep(">",max(n.fold,1)) }else{ direction <- rep("<",max(n.fold,1)) df$X <- -df$X } Udirection <- as.character(NA) }else if(length(direction)==1){ if(direction=="<"){ df$X <- -df$X } direction <- rep(direction, max(n.fold,1)) Udirection <- direction.save }else{ for(iFold in 1:n.fold){ if(direction[iFold]=="<"){ df[df$fold==name.fold[iFold],"X"] <- -df[df$fold==name.fold[iFold],"X"] } } Udirection <- as.character(NA) } if(is.null(fold)){ out <- data.frame(fold = "global", direction = direction, estimate = 0, se = 0, stringsAsFactors = FALSE) }else{ out <- data.frame(fold = c(name.fold,"global"), direction = c(direction,Udirection), estimate = 0, se = 0, stringsAsFactors = FALSE) } if(method.inference!="none"){ attr(out,"iid") <- matrix(NA, nrow = n.obs, ncol = n.fold+1, dimnames = list(NULL,c(name.fold,"global"))) }else{ out$se <- NA } ## ** Global AUC order.save <- BuyseTest.options()$order.Hprojection if(order.save!=order.Hprojection){ BuyseTest.options(order.Hprojection = order.Hprojection) on.exit(BuyseTest.options(order.Hprojection = order.save)) } e.BT <- BuyseTest(formula, method.inference = method.inference, data = df, trace = 0, add.halfNeutral = add.halfNeutral) ## store if(is.null(fold)){ out[out$fold=="global","estimate"] <- as.double(coef(e.BT, statistic = "favorable")) if(method.inference!="none"){ out[out$fold=="global","se"] <- as.double(confint(e.BT, statistic = "favorable")[,"se"]) ## may differ from iid when second order H-decomposition attr(out,"iid")[sort(unique(observation)),out$fold=="global"] <- getIid(e.BT, scale = TRUE, center = TRUE, statistic = "favorable") ## no need for cluster argument when fold=NULL } }else if(pooling == "mean"){ ## Here: strata have the same weigth ## WARNING: cannot use the "global" results as if there is not the same number of pairs in all strata ## it will weight differently the strata-specific AUCs if(method.inference!="none"){ attr(out,"iid")[] <- 0 } }else if(pooling == "pairs"){ ## Here: strata are weigthed according to the number of pairs out[out$fold=="global","estimate"] <- as.double(coef(e.BT, statistic = "favorable")) if(method.inference!="none"){ out[out$fold=="global","se"] <- as.double(confint(e.BT, cluster = observation, statistic = "favorable")[,"se"]) attr(out,"iid")[sort(unique(observation)),out$fold=="global"] <- getIid(e.BT, cluster = observation, scale = TRUE, center = TRUE, statistic = "favorable") ## sqrt(as.double(crossprod(attr(out,"iid")[,out$fold=="global"]))) } } ## ** Fold-specific AUC if(!is.null(fold)){ if(order.Hprojection==1){ ePOINT.BT <- coef(e.BT, statistic = "favorable", strata = TRUE)[,1] normWithinStrata <- FALSE attr(normWithinStrata, "skipScaleCenter") <- TRUE out[match(name.fold,out$fold),"estimate"] <- as.double(ePOINT.BT) if(method.inference!="none"){ iIID.BT <- getIid(e.BT, scale = normWithinStrata, center = normWithinStrata, statistic = "favorable")[,1] out[match(name.fold,out$fold),"se"] <- sqrt(as.double(tapply(iIID.BT, fold, crossprod))) ## iE.BT <- BuyseTest(formula0, method.inference = "u-statistic", data = df[df$fold==name.fold[2],,drop=FALSE], trace = 0, add.halfNeutral = add.halfNeutral) ## confint(iE.BT, statistic = "favorable") for(iFold in 1:n.fold){ attr(out,"iid")[observation[fold==name.fold[iFold]],iFold] <- iIID.BT[fold==name.fold[iFold]] } } }else{ for(iFold in 1:n.fold){ ## iFold <- 1 iData <- df[df$fold==name.fold[iFold],,drop=FALSE] iE.BT <- BuyseTest(formula0, method.inference = method.inference, data = iData, trace = 0, add.halfNeutral = add.halfNeutral) iConfint <- confint(iE.BT, statistic = "favorable") out[match(name.fold[iFold],out$fold),"estimate"] <- as.double(iConfint$estimate) if(method.inference!="none"){ out[match(name.fold[iFold],out$fold),"se"] <- as.double(iConfint$se) attr(out,"iid")[iData$observation,iFold] <- getIid(iE.BT, scale = TRUE, center = TRUE, statistic = "favorable") } } } if(pooling == "mean"){ ## same weight to each fold out[out$fold=="global","estimate"] <- mean(out[out$fold!="global","estimate"]) if(method.inference!="none"){ attr(out,"iid")[,"global"] <- rowMeans(attr(out,"iid")[,1:n.fold,drop=FALSE]) out[out$fold=="global","se"] <- sqrt(as.double(crossprod(attr(out,"iid")[,"global"]))) ## may not match sum(out[out$fold!="global","se"]^2)/n.fold^2 with non-independent folds ## also does not have 2nd order term } } } ## ** P-value and confidence interval if(method.inference!="none"){ alpha <- 1-conf.level qinf <- stats::qnorm(alpha/2) qsup <- stats::qnorm(1-alpha/2) ## riskRegression:::transformCIBP(estimate = cbind(out$estimate), se = cbind(out$se), null = 1/2, conf.level = 0.95, type = "none", ## ci = TRUE, band = FALSE, p.value = TRUE, ## min.value = 0, max.value = 1) ## riskRegression:::transformCIBP(estimate = cbind(out$estimate), se = cbind(out$se), null = 1/2, conf.level = 0.95, type = "loglog", ## ci = TRUE, band = FALSE, p.value = TRUE, ## min.value = 0, max.value = 1) if(all(out$estimate==1)){ out$lower <- 1 out$upper <- 1 out$p.value <- as.numeric(null==1) }else if(all(out$estimate==0)){ out$lower <- 0 out$upper <- 0 out$p.value <- as.numeric(null==0) }else if(transformation){ newse <- out$se / (- out$estimate * log(out$estimate)) z.stat <- (log(-log(out$estimate)) - log(-log(null)))/newse out$lower <- as.double(out$estimate ^ exp(qsup * newse)) out$upper <- as.double(out$estimate ^ exp(qinf * newse)) out$p.value <- 2*(1-stats::pnorm(abs(z.stat))) }else{ z.stat <- as.double((out[,"estimate"]-null)/out[,"se"]) out$lower <- as.double(out[,"estimate"] + qinf * out[,"se"]) out$upper <- as.double(out[,"estimate"] + qsup * out[,"se"]) out$p.value <- 2*(1-stats::pnorm(abs(z.stat))) } }else{ out$lower <- NA out$upper <- NA out$p.value <- NA } ## ** Export attr(out, "n.fold") <- n.fold class(out) <- append("BuyseTestAuc",class(out)) attr(out, "contrast") <- [email protected] return(out) } ## * Utilitites ## ** print.auc #' @exportMethod print print.BuyseTestAuc <- function(x, ...){ ## if(attr(x,"n.fold")==0){ print.data.frame(x, ...) ## }else{ ## label.upper <- paste0(attr(x,"contrast")[2],">",attr(x,"contrast")[1]) ## label.lower <- paste0(attr(x,"contrast")[1],">",attr(x,"contrast")[2]) ## x$direction <- sapply(x$direction, function(iD){ ## if(iD==">"){return(label.upper)}else if(iD=="<"){return(label.lower)}else{return(iD)} ## }) ## print.data.frame(x[x$fold == "global",c("direction","estimate","se","lower","upper","p.value")], row.names = FALSE) ## } } ## ** coef.auc #' @title Extract the AUC Value #' #' @description Extract the AUC value. #' #' @param object object of class \code{BuyseTestAUC} (output of the \code{auc} function). #' @param ... not used. For compatibility with the generic function. #' #' @return Estimated value for the AUC (numeric). #' #' @method coef BuyseTestAuc #' #' @export coef.BuyseTestAuc <- function(object,...){ object[object$fold=="global","estimate"] } ## ** confint.auc #' @title Extract the AUC value with its Confidence Interval #' #' @description Extract the AUC value with its Confidence Interval and p-value testing whether the AUC equals 0.5. #' #' @param object object of class \code{BuyseTestAUC} (output of the \code{auc} function). #' @param ... not used. For compatibility with the generic function. #' #' @return Estimated value for the AUC, its standard error, the lower and upper bound of the confidence interval and the p-value. #' #' @method confint BuyseTestAuc #' @export confint.BuyseTestAuc <- function(object,...){ out <- object[object$fold=="global",c("estimate","se","lower","upper","p.value")] rownames(out) <- NULL return(out) } ## ** iid.auc #' @title Extract the idd Decomposition for the AUC #' #' @description Extract the iid decompotion relative to AUC estimate. #' #' @param x object of class \code{BuyseTestAUC} (output of the \code{auc} function). #' @param ... not used. For compatibility with the generic function. #' #' @return A column vector. #' #' @method iid BuyseTestAuc #' @export iid.BuyseTestAuc <- function(x,...){ object <- x return(attr(object,"iid")[,"global",drop=FALSE]) } ###################################################################### ### auc.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/auc.R
### autoplot-S4BuyseTest.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: Jun 29 2023 (09:27) ## Version: ## Last-Updated: jul 18 2023 (09:29) ## By: Brice Ozenne ## Update #: 25 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * autoplot (documentation) #' @title Graphical Display for GPC #' @description Graphical display of the percentage of favorable, unfavorable, neutral, and uninformative pairs per endpoint. #' @rdname autoplot-S4BuyseTest #' #' @param object an \R object of class \code{\linkS4class{S4BuyseTest}}, i.e., output of \code{\link{BuyseTest}} #' @param type [character] type of plot: histogram (\code{"hist"}), pie chart (\code{"pie"}), or nested pie charts (\code{"racetrack"}). #' @param strata [character vector] strata(s) relative to which the percentage should be displayed. #' @param endpoint [character vector] endpoint(s) relative to which the percentage should be displayed. #' @param label.strata [character vector] new labels for the strata levels. Should match the length of argument \code{strata}. #' @param label.endpoint [character vector] new labels for the endpoints. Should match the length of argument \code{endpoint}. #' @param color [character vector] colors used to display the percentages for each type of pair. #' @param ... not used, for compatibility with the generic function. #' #' @return a ggplot object. #' @method autoplot S4BuyseTest #' @keywords hplot #' @export autoplot.S4BuyseTest <- function(object, type = "hist", strata = "global", endpoint = NULL, label.strata = NULL, label.endpoint = NULL, color = c("#7CAE00", "#F8766D", "#C77CFF", "#00BFC4"), ...){ objectS <- model.tables(object, percentage = TRUE, columns = c("endpoint","threshold","restriction","strata","total","favorable","unfavorable","neutral","uninf")) Ustrata <- slot(object,"level.strata") Uendpoint <- slot(object,"endpoint") objectS$endpoint2 <- attr(objectS,"endpoint") ## ** normalize arguments ## type type <- match.arg(type, c("hist","pie","racetrack")) ## strata level.strata <- [email protected] if(is.null(strata)){ if(length(level.strata)==1){ strata <- "global" }else{ strata <- c("global", level.strata) } }else if(identical(strata,FALSE)){ strata <- "global" }else if(identical(strata,TRUE)){ strata <- level.strata }else if(is.numeric(strata)){ validInteger(strata, name1 = "strata", valid.length = NULL, min = 1, max = length(level.strata), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "autoplot[S4BuyseTest]") }else{ validCharacter(strata, name1 = "strata", valid.length = NULL, valid.values = c("global",level.strata), refuse.NULL = FALSE, method = "autoplot[S4BuyseTest]") } ## endpoint if(!is.null(endpoint)){ if(is.numeric(endpoint)){ if(any(endpoint %in% 1:length(Uendpoint)==FALSE)){ stop("Incorrect argument \'endpoint\': when numeric should be an integer vector with values between 1 and ",length(endpoint),".\n", sep ="") } endpoint <- names(Uendpoint)[endpoint] }else if(all(!duplicated(Uendpoint)) && all(endpoint %in% Uendpoint)){ endpoint <- names(Uendpoint)[match(endpoint, Uendpoint)] }else{ endpoint <- match.arg(endpoint, names(Uendpoint), several.ok = TRUE) } }else{ endpoint <- names(Uendpoint) } objectSS <- objectS[objectS$strata %in% strata & objectS$endpoint2 %in% endpoint,c("endpoint2","strata","total","favorable","unfavorable","neutral","uninf"),drop=FALSE] if(!is.null(label.strata)){ if(length(label.strata) != length(strata)){ stop("Length of argument \'label.strata\' must match the length of argument \'stata\' (here ",length(strata),").\n") } objectSS$strata <- factor(objectSS$strata, levels = strata, labels = label.strata) }else{ objectSS$strata <- factor(objectSS$strata, levels = strata) } if(!is.null(label.endpoint)){ if(length(label.endpoint) != length(endpoint)){ stop("Length of argument \'label.endpoint\' must match the length of argument \'endpoint\' (here ",length(endpoint),").\n") } objectSS$endpoint <- factor(objectSS$endpoint2, levels = endpoint, labels = label.endpoint) }else{ objectSS$endpoint <- factor(objectSS$endpoint2, levels = endpoint) } ## ** reshape data objectSSL <- stats::reshape(objectSS, idvar = c("endpoint","strata","total"), direction = "long", varying = c("favorable","unfavorable","neutral","uninf"), times = c("favorable","unfavorable","neutral","uninf"), timevar = "type", v.names = "percentage") rownames(objectSSL) <- NULL objectSSL$strata <- factor(objectSSL$strata, levels = levels(objectSS$strata)) objectSSL$endpoint <- factor(objectSSL$endpoint, levels = levels(objectSS$endpoint)) objectSSL$type <- factor(objectSSL$type, c("favorable","unfavorable","neutral","uninf")) ## ** graphical display if(type == "pie"){ gg <- ggplot2::ggplot(objectSSL, ggplot2::aes(x="", y=.data$percentage, fill=.data$type)) gg <- gg + ggplot2::geom_bar(stat="identity", width = 1) gg <- gg + ggplot2::coord_polar("y", start=0) + ggplot2::labs(x = "", y = "", fill = "Pair (%)") gg if(length(strata)>1 && length(endpoint)>1){ gg <- gg + ggplot2::facet_grid(strata~endpoint) }else if(length(strata)>1){ gg <- gg + ggplot2::facet_wrap(~strata) }else if(length(endpoint)>1){ gg <- gg + ggplot2::facet_wrap(~endpoint) } }else if(type == "racetrack"){ gg <- ggplot2::ggplot(objectSSL, ggplot2::aes(x=rev(.data$endpoint), y=.data$percentage)) gg <- gg + ggplot2::geom_bar(stat="identity", width = 1, ggplot2::aes(fill=.data$type)) gg <- gg + ggplot2::coord_polar("y", start=0) + ggplot2::labs(x = "", y = "", fill = "Pair (%)") ## gg <- gg + ggplot2::theme(axis.text.y=ggplot2::element_blank()) ## gg <- gg + ggplot2::geom_text(data = data.frame(endpoint=levels(objectSSL$endpoint),percentage=0), mapping = ggplot2::aes(label = .data$endpoint), size = size.text) ## gg <- gg + scale_x_discrete(expand = expand_scale(add = c(1,1))) if(length(strata)>1){ gg <- gg + ggplot2::facet_wrap(~strata) } }else if(type == "hist"){ gg <- ggplot2::ggplot(objectSSL, ggplot2::aes(x=.data$endpoint, y=.data$percentage/100)) gg <- gg + ggplot2::geom_bar(ggplot2::aes(fill=.data$type), position = "stack", stat = "identity") gg <- gg + ggplot2::scale_y_continuous(labels=scales::percent) gg <- gg + ggplot2::labs(x = "", y = "", fill = "Pair (%)") if(length(strata)>1){ gg <- gg + ggplot2::facet_wrap(~strata) } } gg <- gg + ggplot2::scale_fill_manual(values = color) ## ** export return(gg) } ##---------------------------------------------------------------------- ### autoplot-S4BuyseTest.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/autoplot.S4BuyseTest.R
### brier.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: aug 5 2021 (13:44) ## Version: ## Last-Updated: jun 27 2023 (10:04) ## By: Brice Ozenne ## Update #: 192 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * brier (documentation) #' @title Estimation of the Brier Score (EXPERIMENTAL) #' @name brier #' #' @description Estimation of the brier score, possibly after cross validation, #' to assess the discriminant ability and calibration of a biomarker regarding a disease status. #' #' @param labels [integer/character vector] the disease status (should only take two different values). #' @param predictions [numeric vector] A vector with the same length as \code{labels} containing the biomarker values. #' @param iid [array, optional] influence function of the prediction. For cross validation (CV) should be a 3 dimensional array (one slice per CV fold). #' Otherwise a matrix with as many column as observations and rows as predictions. #' @param fold [character/integer vector] If using cross validation, the index of the fold. #' Should have the same length as \code{labels}. #' @param observation [integer vector] If using cross validation, the index of the corresponding observation in the original dataset. #' Necessary to compute the standard error when using cross validation. #' @param null [numeric, 0-1] the value against which the AUC should be compared when computing the p-value. #' @param conf.level [numeric, 0-1] the confidence level of the confidence intervals. #' @param transformation [logical] should a log-log transformation be used when computing the confidence intervals and the p-value. #' #' @keywords models #' #' @return An S3 object of class \code{BuyseTestBrier} that inherits from data.frame. ## * brier (code) #' @export brier <- function(labels, predictions, iid = NULL, fold = NULL, observation = NULL, null = NA, conf.level = 0.95, transformation = TRUE){ ## ** normalize user input if(length(unique(labels))!=2){ stop("Argument \'labels\' must have exactly two different values. \n") } if(is.numeric(labels)){ labels.num <- labels labels.factor <- as.factor(labels) }else if(is.factor(labels)){ labels.num <- as.numeric(labels)-1 labels.factor <- labels }else{ stop("Argument \'labels\' must a numeric or factor vector. \n") } n.obs <- length(labels) if(identical(fold,0)){fold <- NULL} if(!is.null(iid) && is.null(fold)){ if(NCOL(iid) != n.obs){ stop("Argument \'iid\' must have one column per observation. \n") } } if(!is.null(fold)){ Ufold <- unique(fold) n.fold <- length(Ufold) nFold.obs <- table(fold) if(length(predictions)!=length(fold)){ stop("When not NULL, argument \'fold\' must have the same length as argument \'predictions\' \n") } if(length(observation)!=length(predictions)){ stop("When argument \'fold\' is not NULL, argument \'observation\' must have the same length as argument \'predictions\' \n") } if(!is.null(observation) && any(observation %in% 1:n.obs == FALSE)){ stop("When not NULL, argument \'observation\' must take integer values between 1 and ",n.obs,"\n", sep = "") } if(any(sapply(tapply(observation,fold,duplicated),any))==TRUE){ stop("Cannot quantify uncertainty when the same observation appear several times in the same fold. \n") } if(!is.null(iid)){ if(n.fold!=dim(iid)[3]){ stop("The third dimension of argument \'iid\' should equal the number of folds. \n") } UnFold.obs <- unique(nFold.obs) if(length(UnFold.obs)!=1){ stop("The number of observations should be the same in each fold. \n") } if(UnFold.obs!=dim(iid)[2]){ stop("The second dimension of argument \'iid\' should equal the number of observations per fold. \n") } } }else{ external <- FALSE if(n.obs!=length(predictions)){ stop("Argument \'labels\' and \'predictions\' must have the same length. \n") } if(identical(observation,"external")){ observation <- NULL external <- TRUE }else if(!is.null(observation)){ stop("Argument \'observation\' is only useful when argument \'fold\' is specified \n") } observation <- 1:n.obs } if(!is.logical(transformation)){ stop("Argument \'transformation\' must be TRUE or FALSE \n") } if(is.na(conf.level)){ se <- FALSE }else{ se <- TRUE } ## ** prepare export if(!is.null(fold)){ name.fold <- as.character(sort(unique(fold))) n.fold <- length(name.fold) out <- data.frame(fold = c(name.fold,"global"), estimate = as.numeric(NA), se = as.numeric(NA), lower = as.numeric(NA), upper = as.numeric(NA), p.value = as.numeric(NA)) }else{ out <- data.frame(fold = "global", estimate = as.numeric(NA), se = as.numeric(NA), lower = as.numeric(NA), upper = as.numeric(NA), p.value = as.numeric(NA)) } ## ** compute brier score if(is.null(fold)){ iBrier <- (predictions - labels.num)^2 out$estimate <- mean(iBrier) if(se){ iidAverage <- (iBrier-out$estimate)/(sqrt(n.obs)*sqrt(n.obs-1)) if(is.null(iid)){ attr(out,"iid") <- iidAverage ## se: stats::sd(iBrier)/sqrt(n.obs) }else{ ## sqrt(crossprod(iidAverage)) - stats::sd(iBrier)/sqrt(n.obs) iidNuisance <- rowMeans(.rowMultiply_cpp(iid, 2*predictions - labels.num)) if(external){ browser() attr(out,"iid") <- c(iidNuisance, iidAverage) }else{ attr(out,"iid") <- iidAverage + iidNuisance } } out$se <- sqrt(crossprod(attr(out,"iid"))) } } ## ** compute brier score (CV) if(!is.null(fold)){ Uobservation <- unique(sort(observation)) n.Uobservation <- length(Uobservation) iBrier <- rep(0, length = n.obs) iFactor <- vector(mode = "list", length = n.obs) for(iObs in 1:n.obs){ ## iObs <- 1 if(any(observation==iObs)){ iFactor[[iObs]] <- setNames(n.Uobservation/(nFold.obs[as.character(fold[observation==iObs])]*n.fold),fold[observation==iObs]) iBrier[iObs] <- sum((predictions[observation==iObs] - labels.num[iObs])^2*iFactor[[iObs]]) } } out$estimate[match(name.fold,out$fold)] <- tapply((predictions-labels.num[observation])^2, fold, mean)[name.fold] out$estimate[out$fold=="global"] <- mean(iBrier[Uobservation]) ## mean(iBrier[Uobservation]) - mean(out$estimate[1:10]) if(se){ if(is.null(iid)){ out$se[match(name.fold,out$fold)] <- tapply((predictions-labels.num[observation])^2, fold, function(iDiff){sqrt(stats::var(iDiff)/length(iDiff))})[name.fold] out$se[out$fold=="global"] <- stats::sd(iBrier[Uobservation])/sqrt(n.Uobservation) ## out$se - mean(tapply((predictions-labels[observation])^2,fold,sd)) ## no need to be equal attr(out,"iid") <- rep(0, length = n.obs) attr(out,"iid")[Uobservation] <- (iBrier[Uobservation]-out$estimate[out$fold=="global"])/(sqrt(n.Uobservation)*sqrt(n.Uobservation-1)) ## out$se[out$fold=="global"] - sqrt(crossprod(attr(out,"iid"))) }else{ iidAverage <- rep(0, length = n.obs) iidNuisance <- rep(0, length = n.obs) iidAverage[Uobservation] <- (iBrier[Uobservation]-out$estimate[out$fold=="global"])/(sqrt(n.Uobservation)*sqrt(n.Uobservation-1)) ## stats::sd(iBrier[Uobservation])/sqrt(n.Uobservation) - sqrt(crossprod(iidAverage)) ## should be equal for(iFold in 1:n.fold){ ## iFold <- 1 iiFactor <- sapply(iFactor[observation[fold==name.fold[iFold]]],function(iVec){iVec[name.fold[iFold]]}) iStat <- 2*(predictions[fold==name.fold[iFold]] - labels.num[observation[fold==name.fold[iFold]]]) iidNuisance <- iidNuisance + rowMeans(.rowMultiply_cpp(iid[,,iFold], iStat*iiFactor)) ## in each fold because of CV the training and test set are separate so the uncertainties are independent term1 <- stats::sd((predictions[fold==name.fold[iFold]] - labels.num[observation[fold==name.fold[iFold]]])^2) term2 <- sqrt(crossprod(rowMeans(.rowMultiply_cpp(iid[,,iFold], iStat)))/sum(fold==name.fold[iFold])) out[out$fold==name.fold[iFold],"se"] <- term1 + term2 } attr(out,"iid") <- iidAverage + iidNuisance/sqrt(n.obs) out$se[out$fold=="global"] <- sqrt(crossprod(attr(out,"iid"))) } } } ## ** P-value and confidence interval if(se){ alpha <- 1-conf.level qinf <- stats::qnorm(alpha/2) qsup <- stats::qnorm(1-alpha/2) if(transformation){ newse <- out$se / out$estimate out$lower <- as.double(out$estimate * exp(qinf * newse)) out$upper <- as.double(out$estimate * exp(qsup * newse)) if(!is.na(null)){ z.stat <- (log(out$estimate) - log(null))/newse out$p.value <- 2*(1-stats::pnorm(abs(z.stat))) } }else{ out$lower <- as.double(out[,"estimate"] + qinf * out[,"se"]) out$upper <- as.double(out[,"estimate"] + qsup * out[,"se"]) if(!is.na(null)){ z.stat <- as.double((out[,"estimate"]-null)/out[,"se"]) out$p.value <- 2*(1-stats::pnorm(abs(z.stat))) } } } ## ** Export class(out) <- append("BuyseTestBrier",class(out)) attr(out, "contrast") <- levels(labels.factor) ## attr(out, "n.fold") <- n.fold return(out) } ## * Utilitites ## ** print.BuyseTestBrier ##' @exportMethod print print.BuyseTestBrier <- function(x, ...){ print.data.frame(x) } ## ** coef.BuyseTestBrier ##' @title Extract the Brier Score ##' ##' @description Extract the Brier score. ##' ##' @param object object of class \code{BuyseTestBrier} (output of the \code{brier} function). ##' @param ... not used. For compatibility with the generic function. ##' ##' @return Estimated value for Brier score (numeric). ##' ##' @method coef BuyseTestBrier ##' @keywords methods ##' ##' @export coef.BuyseTestBrier <- function(object,...){ object[,"estimate"] } ## ** confint.BuyseTestBrier ##' @title Extract the Brier Score with its Confidence Interval ##' ##' @description Extract the Brier score with its Confidence Interval and possibly a p-value. ##' ##' @param object object of class \code{BuyseTestBrier} (output of the \code{brier} function). ##' @param ... not used. For compatibility with the generic function. ##' ##' @return Estimated value for the brier score, its standard error, the lower and upper bound of the confidence interval and the p-value. ##' ##' @method confint BuyseTestBrier ##' @keywords methods ##' ##' @export confint.BuyseTestBrier <- function(object,...){ out <- object[object$fold=="global",c("estimate","se","lower","upper","p.value")] rownames(out) <- NULL return(out) } ## ** iid.BuyseTestBrier ##' @title Extract the idd Decomposition for the Brier Score ##' ##' @description Extract the iid decompotion relative to Brier score estimate. ##' ##' @param x object of class \code{BuyseTestBrier} (output of the \code{brier} function). ##' @param ... not used. For compatibility with the generic function. ##' ##' @return A column vector. ##' ##' @method iid BuyseTestBrier ##' @keywords methods ##' ##' @export iid.BuyseTestBrier <- function(x,...){ object <- x return(attr(object,"iid")) } ##---------------------------------------------------------------------- ### brier.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/brier.R
## * Documentation - constStrata #' @title Strata creation #' @name constStrata #' #' @description Create strata from several variables. #' #' @param data [data.frame] dataset. #' @param strata [character vector] A vector of the variables capturing the stratification factors. #' @param sep [character] string to construct the new level labels by joining the constituent ones. #' @param lex.order [logical] Should the order of factor concatenation be lexically ordered ? #' @param trace [logical] Should the execution of the function be traced ? #' @param as.numeric [logical] Should the strata be converted from factors to numeric? #' #' @details #' This function uses the \code{interaction} function from the \emph{base} package to form the strata. #' #' @return A \emph{factor vector} or a \emph{numeric vector}. #' @keywords utilities #' #' @examples #' library(data.table) #' #' library(survival) ## import veteran #' #' # strata with two variables : celltype and karno #' veteran$strata1 <- constStrata(veteran,c("celltype","karno")) #' table(veteran$strata1) #' #' # strata with three variables : celltype, karno and age dichotomized at 60 years #' veteran$age60 <- veteran$age>60 #' veteran$age60 <- factor(veteran$age60,labels=c("<=60",">60")) # convert to factor with labels #' veteran$strata2 <- constStrata(veteran,c("celltype","karno","age60")) #' table(veteran$strata2) # factor strata variable #' #' veteran$strata2 <- constStrata(veteran,c("celltype","karno","age60"), as.numeric=TRUE) #' table(veteran$strata2) # numeric strata variable #' #' @keywords function #' @author Brice Ozenne ## * Function constStrata #' @rdname constStrata #' @export constStrata <- function(data,strata,sep=".",lex.order = FALSE,trace=TRUE,as.numeric=FALSE){ if(any(strata %in% names(data) == FALSE)){ stop("constStrata : wrong specification of \'strata\' \n", "some columns requested are missing in data \n", "missing strata : ",paste(strata[strata %in% names(data) == FALSE],collapse=" "),"\n", "available variables in data : ",paste(names(data)[names(data) %in% strata == FALSE],collapse=" "),"\n") } if(data.table::is.data.table(data)){ resInteractions <- data[,interaction(.SD[[1]],drop = TRUE,lex.order=lex.order,sep=sep), .SDcols = strata] }else{ resInteractions <- interaction(as.list(data[,strata]),drop = TRUE,lex.order=lex.order,sep=sep) } levels <- levels(resInteractions) n.levels <- length(levels) ## ** display if(trace==TRUE){ table_tempo <- as.numeric(table(resInteractions)) max.num <- 5 #nchar(max(n.levels)) ncharLevels <- nchar(levels) textLevels <- sapply(1:n.levels,function(x){ paste(levels[x],paste(rep(" ",max(6-ncharLevels[x],max(ncharLevels)-ncharLevels[x])),collapse="")," : ",table_tempo[x],sep="") }) cat(n.levels," strata were founded on the ",length(strata)," strata variable",if(length(strata)>1){"s"}," (",paste(strata,collapse=" "),")\n", "(",rep("#",max.num),") strata ",paste(rep(" ",max(0,max(ncharLevels)-6)),collapse=""),": number of observations \n",sep="") for(iLevel in 1:n.levels){ cat("(",iLevel,")",paste(rep(" ",max.num-nchar(iLevel),collapse=""))," ",textLevels[[iLevel]],"\n",sep="") } cat("(total) ",rep(" ",max(ncharLevels,6))," : ",length(resInteractions),"\n",sep="") } ## ** conversion if(as.numeric==TRUE){ resInteractions <- as.numeric(resInteractions) } ## ** export return(resInteractions) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/constStrata.R
### discreteRoot.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: nov 22 2017 (13:39) ## Version: ## Last-Updated: sep 27 2023 (17:33) ## By: Brice Ozenne ## Update #: 315 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * discreteRoot - Documentation #' @title Dichotomic search for monotone function #' @description Find the root of a monotone function on a discrete grid of value using dichotomic search #' @noRd #' #' @param fn [function] objective function to minimize in absolute value. #' @param grid [vector] possible minimizers. #' @param increasing [logical] is the function fn increasing? #' @param check [logical] should the program check that fn takes a different sign for the first vs. the last value of the grid? #' @param tol [numeric] the absolute convergence tolerance. #' @author Brice Ozenne ## * discreteRoot discreteRoot <- function(fn, grid, increasing = TRUE, check = TRUE, tol = .Machine$double.eps ^ 0.5) { n.grid <- length(grid) value.grid <- rep(NA, n.grid) iter <- 1 ncv <- TRUE iSet <- 1:n.grid factor <- c(-1,1)[increasing+1] ## ** Check if(check){ value.grid[1] <- fn(grid[1]) value.grid[n.grid] <- fn(grid[n.grid]) if(sign(value.grid[1])==value.grid[n.grid]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution because the function does not change sign \n")) } if(increasing && value.grid[1] > value.grid[n.grid]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } if(!increasing && value.grid[1] < value.grid[n.grid]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } } ## ** Expore the grid using dichotomic search while(iter <= n.grid && ncv && length(iSet)>0){ iMiddle <- ceiling(length(iSet)/2) iIndexInSet <- iSet[iMiddle] if(check==FALSE || iIndexInSet %in% c(1,n.grid) == FALSE){ ## if the current index we are looking at has not already been computed, ## then evaluate the objective function. ## this is only the case when check is TRUE and we look at the borders value.grid[iIndexInSet] <- fn(grid[iIndexInSet]) } if(is.na(value.grid[iIndexInSet])){ ## handle NA value by just removing the observation from the set of possibilities iSet <- setdiff(iSet,iMiddle) iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] > tol){ ## look in subgrid corresponding to the lowest values (left part) iSet <- iSet[setdiff(1:iMiddle,iMiddle)] iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] < -tol){ ## look in subgrid corresponding to the largest values (right part) iN.set <- length(iSet) iSet <- iSet[setdiff(iMiddle:iN.set,iMiddle)] iter <- iter + 1 }else{ ## convergence ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } } ## ** If did not find a value whose image matched tol, give the closest solution if(ncv){ iIndexInSet <- which.min(abs(value.grid)) ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } return(list(par = solution, value = value, index = iIndexInSet, ## grid = stats::setNames(value.grid,grid), counts = iter, cv = (ncv==FALSE), message = NULL)) } ## * boot2pvalue (documentation) #' @title Compute the p.value from the distribution under H1 #' @description Compute the p.value associated with the estimated statistic #' using a bootstrap sample of its distribution under H1. #' @noRd #' #' @param x [numeric vector] a vector of bootstrap estimates of the statistic. #' @param null [numeric] value of the statistic under the null hypothesis. #' @param estimate [numeric] the estimated statistic. #' @param alternative [character] a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". #' @param FUN.ci [function] the function used to compute the confidence interval. #' Must take \code{x}, \code{alternative}, \code{level}, \code{sign.estimate}, and \code{type.quantile} as arguments. #' It only returns the relevant limit (either upper or lower) of the confidence interval. #' @param checkSign [logical] should a warning be output if the sign of the estimate differs from the sign of the mean bootstrap value? #' @param tol [numeric] the absolute convergence tolerance. #' @param type.quantile [interger, 1-9] quantile algorithm to be used to evaluate quantiles. Passed to the \code{stats::quantile}. #' @param add.1 [logical] conservative correction ensuring that the p-value is strictly positive. #' #' @details #' For test statistic close to 0, this function returns 1. \cr \cr #' #' For positive test statistic, this function search the quantile alpha such that: #'\itemize{ #' \item \code{quantile(x, probs = alpha)=0} when the argument alternative is set to \code{"greater"}. #' \item \code{quantile(x, probs = 0.5*alpha)=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"less"}, it returns 1. \cr \cr #' #' For negative test statistic, this function search the quantile alpha such that: #' \itemize{ #' \item \code{quantile(x, probs = 1-alpha=0} when the argument alternative is set to \code{"less"}. #' \item \code{quantile(x, probs = 1-0.5*alpha=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"greater"}, it returns 1. #' #' @examples #' set.seed(10) #' #' #### no effect #### #' x <- rnorm(1e3) #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "two.sided") #' ## expected value of 1 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "greater") #' ## expected value of 0.5 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "less") #' ## expected value of 0.5 #' #' #### positive effect #### #' x <- rnorm(1e3, mean = 1) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "two.sided") #' ## expected value of 0.32 = 2*pnorm(q = 0, mean = -1) = 2*mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "greater") #' ## expected value of 0.16 = pnorm(q = 0, mean = 1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "less") #' ## expected value of 0.84 = 1-pnorm(q = 0, mean = 1) = mean(x>=0) #' #' #### negative effect #### #' x <- rnorm(1e3, mean = -1) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "two.sided") #' ## expected value of 0.32 = 2*(1-pnorm(q = 0, mean = -1)) = 2*mean(x>=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "greater") #' ## expected value of 0.84 = pnorm(q = 0, mean = -1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "less") # pnorm(q = 0, mean = -1) #' ## expected value of 0.16 = 1-pnorm(q = 0, mean = -1) = mean(x>=0) ## * boot2pvalue (code) boot2pvalue <- function(x, null, estimate = NULL, alternative = "two.sided", FUN.ci = .quantileCI, checkSign = TRUE, tol = .Machine$double.eps ^ 0.5, type.quantile = NULL, add.1 = FALSE){ if(all(is.na(x))){ stop("Incorrect argument \'x\': only contain NA values. \n") } x.boot <- na.omit(x) n.boot <- length(x.boot) if(any(is.infinite(x.boot))){ statistic.boot <- stats::median(x.boot, na.rm = TRUE) - null }else{ statistic.boot <- mean(x.boot, na.rm = TRUE) - null } if(is.null(estimate)){ statistic <- statistic.boot }else{ statistic <- estimate - null if(checkSign && sign(statistic.boot)!=sign(statistic)){ message("the estimate and the average bootstrap estimate do not have same sign \n") } } sign.statistic <- statistic>=0 if(add.1){ zero <- 1/(n.boot+1) }else{ zero <- 0 } if(abs(statistic) < tol){ ## too small test statistic p.value <- 1 }else if(n.boot < 10){ ## too few bootstrap samples p.value <- as.numeric(NA) }else if(all(x.boot>null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = 0, "less" = 1, "greater" = zero) } else if(all(x.boot<null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = zero, "less" = zero, "greater" = 1) }else{ ## need search to obtain p.value ## when the p.value=1-coverage increases, does the quantile increases? increasing <- switch(alternative, "two.sided" = sign.statistic, "less" = FALSE, "greater" = TRUE) ## grid of confidence level grid <- seq(0,by=1/n.boot,length.out=n.boot+1) ## search for critical confidence level resSearch <- discreteRoot(fn = function(p.value){ CI <- FUN.ci(x = x.boot, level = p.value, alternative = alternative, sign.estimate = sign.statistic, type.quantile = type.quantile) return(CI[1]-null) }, grid = grid, increasing = increasing, check = FALSE) ## cv check if(is.na(resSearch$value) || length(resSearch$value)==0 || resSearch$par<0 || resSearch$par>1 || resSearch$cv == FALSE){ warning("incorrect convergence of the algorithm finding the critical quantile \n", "p-value may not be reliable \n") } ## ## ensures a conservative estimate by taking the first level where the confidence interval excludes 0 (i.e. the next level if the confidence interval still includes 0) ## if((resSearch$index+1<length(grid)) && ((resSearch$value>0 && sign.statistic==FALSE) || (resSearch$value<0 && sign.statistic==TRUE))){ ## p.value <- grid[resSearch$index+1] ## }else{ ## p.value <- resSearch$par ## } p.value <- max(resSearch$par, zero) } if(p.value %in% c(0,1)){ message("Estimated p-value of ",p.value," - consider increasing the number of boostrap samples \n") } return(p.value) } ## * quantileCI .quantileCI <- function(x, alternative, level, sign.estimate, type.quantile){ probs <- switch(alternative, "two.sided" = c(level/2,1-level/2)[2-sign.estimate], ## if positive p.value/2 otherwise 1-p.value/2 "less" = 1-level, "greater" = level) if(!is.null(type.quantile)){ bound <- stats::quantile(x, probs = probs, type = type.quantile)[1] }else{ bound <- stats::quantile(x, probs = probs)[1] } return(bound) } ##---------------------------------------------------------------------- ### discreteRoot.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/discreteRoot.R
### iid.S3sensitivity.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: jun 27 2023 (14:28) ## Version: ## Last-Updated: jun 27 2023 (14:28) ## By: Brice Ozenne ## Update #: 2 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * iid.S3sensitivity #' @export iid.S3sensitivity <- function(x, ...){ return(attr(x,"iid")) } ##---------------------------------------------------------------------- ### iid.S3sensitivity.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/iid.S3sensitivity.R
### prodlim-iid.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: apr 1 2019 (23:06) ## Version: ## Last-Updated: jan 23 2024 (13:54) ## By: Brice Ozenne ## Update #: 133 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * iid.prodlim - documentation #' @title Extract i.i.d. decomposition from a prodlim model #' @description Compute the influence function for each observation used to estimate the model #' @name iid.prodlim #' #' @param x A prodlim object. #' @param add0 [logical] add the 0 to vector of relevant times. #' @param ... not used. For compatibility with the generic method. #' #' @details #' This function is a simplified version of the iidCox function of the riskRegression package. #' Formula for the influence function can be found in (Ozenne et al., 2017). #' #' @return A list containing: #' \itemize{ #' \item IFbeta: Influence function for the regression coefficient. #' \item IFhazard: Time differential of the influence function of the hazard. #' \item IFcumhazard: Influence function of the cumulative hazard. #' \item time: Times at which the influence function has been evaluated. #' \item etime.max: Last observation time (i.e. jump or censoring) in each strata. #' \item label.strata: Strata to which each observation belong. #' \item X: Design matrix. #' \item table: Hazard at each time for each strata. #' } #' #' @keywords methods #' #' @references #' Brice Ozenne, Anne Lyngholm Sorensen, Thomas Scheike, Christian Torp-Pedersen and Thomas Alexander Gerds. #' riskRegression: Predicting the Risk of an Event using Cox Regression Models. #' The R Journal (2017) 9:2, pages 440-460. #' #' @author Brice Ozenne ## * iid.prodlim - examples #' @rdname iid.prodlim #' @examples #' library(data.table) #' library(prodlim) #' #' set.seed(10) #' dt <- simBuyseTest(10) #' setkeyv(dt, "treatment") #' #' e.KM <- prodlim(Hist(eventtime,status)~treatment, data = dt) #' lava::iid(e.KM) ## * iid.prodlim - code #' @export iid.prodlim <- function(x, add0 = FALSE, ...){ object <- x if(object$type!="surv"){ stop("Influence function only available for survival models \n") } ## ** extract elements from object X <- object$X is.strata <- !is.null(X) strataVar <- names(X) n.strataVar <- NCOL(X) if(is.strata){ n.strata <- NROW(X) }else{ n.strata <- 1 } level.strata <- as.character(interaction(X)) vec.strata <- factor(interaction(object$model.matrix[object$originalDataOrder,,drop=FALSE]), levels = level.strata) vec.strataNum <- as.numeric(vec.strata) vec.eventtime <- object$model.response[object$originalDataOrder,1] vec.status <- object$model.response[object$originalDataOrder,2] ## ** Extract baseline hazard + number at risk ## baseline hazard df.strata <- do.call(rbind,lapply(1:n.strata, function(iS){ M <- matrix(X[iS,], ncol = n.strataVar, nrow = object$size.strata[iS], byrow = TRUE, dimnames = list(NULL, strataVar)) cbind("strata.index" = iS, data.frame(M, stringsAsFactors = FALSE)) })) tableHazard <- data.table::data.table(df.strata, hazard = object$hazard, survival = object$surv, time = object$time, event = object$n.event, atrisk = object$n.risk) tableHazard.red <- tableHazard[tableHazard$event>0] if(add0){ tableHazard0 <- tableHazard[,.SD[1], by = "strata.index"] tableHazard0[,c("hazard","survival","time","event","atrisk") := list(rep(0,n.strata), rep(1,n.strata), rep(0,n.strata), rep(0,n.strata), as.double(table(vec.strata)) )] tableHazard.red <- rbind(tableHazard0,tableHazard.red) data.table::setkeyv(tableHazard.red, c("strata.index","time")) } n.times <- NROW(tableHazard.red) n.obs <- NROW(object$model.matrix) ## ** Computation of the influence function ## -\Ind[strata] \int(\lambda0/S0) - jump/S0) IFhazard <- vector(mode = "list", length = n.strata) IFcumhazard <- vector(mode = "list", length = n.strata) IFsurvival <- vector(mode = "list", length = n.strata) IFcif <- vector(mode = "list", length = n.strata) ls.Utime1 <- vector(mode = "list", length = n.strata) for(iStrata in 1:n.strata){ ## iStrata <- 1 iTableHazard <- tableHazard.red[tableHazard.red$strata.index == iStrata] ls.Utime1[[iStrata]] <- iTableHazard$time iN.time <- length(ls.Utime1[[iStrata]]) iHazard <- iTableHazard$hazard iSurvival <- iTableHazard$survival ## prepare IFhazard[[iStrata]] <- matrix(0, nrow = n.obs, ncol = iN.time) IFcumhazard[[iStrata]] <- matrix(0, nrow = n.obs, ncol = iN.time) IFsurvival[[iStrata]] <- matrix(0, nrow = n.obs, ncol = iN.time) IFcif[[iStrata]] <- matrix(0, nrow = n.obs, ncol = iN.time) ## only keep observation in the strata and with eventtime at or after the first jump iSubsetObs <- intersect(which(vec.strataNum==iStrata), which(vec.eventtime>=min(ls.Utime1[[iStrata]]))) iVec.eventtime <- vec.eventtime[iSubsetObs] iVec.status <- vec.status[iSubsetObs] iIndexJump <- prodlim::sindex(jump.times = ls.Utime1[[iStrata]], eval.times = iVec.eventtime) iDelta_iS0 <- iVec.status / iTableHazard$atrisk[iIndexJump] ## hazard iHazard_iS0 <- iHazard/iTableHazard$atrisk iIndEvent <- do.call(cbind, lapply(ls.Utime1[[iStrata]], function(iT){ (abs(iT - iVec.eventtime ) < 1e-12) * iDelta_iS0 })) iRatio <- do.call(cbind, lapply(1:iN.time, function(iT){ (iT <= iIndexJump) * iHazard_iS0[iT] })) IFhazard[[iStrata]][iSubsetObs,] <- - iRatio + iIndEvent ## cumulative hazard IFcumhazard[[iStrata]][iSubsetObs,] <- .rowCumSum_cpp(IFhazard[[iStrata]][iSubsetObs,,drop=FALSE]) ## survival ## note use exp(-surv) instead of product limit for consistency with riskRegression IFsurvival[[iStrata]][iSubsetObs,] <- .rowMultiply_cpp(-IFcumhazard[[iStrata]][iSubsetObs,,drop=FALSE], scale = exp(-cumsum(iTableHazard$hazard))) IFcif[[iStrata]][iSubsetObs,] <- 1 - IFsurvival[[iStrata]][iSubsetObs,] } ## ** Modification used by BuyseTest to enable the user to easily specify model.tte if(!is.null(object$XX) && !identical(object$X,object$XX)){ oldlevel.strata <- level.strata level.strata <- as.character(interaction(object$XX)) X <- object$XX index.strata <- match(tableHazard.red[,interaction(.SD),.SDcols = names(object$X)], oldlevel.strata) tableHazard.red[, c(names(object$X)) := NULL] tableHazard.red <- cbind(tableHazard.red[,.SD, .SDcols = "strata.index"], object$XX[index.strata,,drop=FALSE], tableHazard.red[,.SD, .SDcols = c("hazard","survival","time","event","atrisk")]) } ## ** Export return(list(IFhazard = IFhazard, IFcumhazard = IFcumhazard, IFsurvival = IFsurvival, time = ls.Utime1, etime.max = tableHazard[,max(.SD$time),by = "strata.index"][[2]], label.strata = level.strata, X = X, table = tableHazard.red )) } ##---------------------------------------------------------------------- ### prodlim-iid.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/iid.prodlim.R
### normexp.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: maj 6 2020 (14:06) ## Version: ## Last-Updated: jun 28 2023 (14:14) ## By: Brice Ozenne ## Update #: 75 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * exponential distribution ## cumulative distribution fuction for Z = X + \rho Y ## where X follows a standard normal distribution ## and Y an exponential distribution with rate parameter \lambda ## denoting \Phi the cumulative distribution function of the standard normal distribution: ## F_Z(z) = \Prob[X + \rho Y < z] ## = \int f(x,y) \Ind[x + \rho y < z] dx dy ########################### rho>0 ## = \int_{x \in [-\inf;z]} f(x) \int_{y \in [0;(z-x)/\rho] f(y) dx dy ## = \int_{x \in [-\inf;z]} f(x) (1-\exp(-(z-x)*\lambda/\rho)) dx ## = \Phi(z) - \exp(-z\lambda/rho)/\sqrt{2\pi} \int_{x \in [-\inf;z]} \exp(-x^2/2)\exp(x*\lambda/\rho) dx ## = \Phi(z) - \exp(-z\lambda/rho+\lambda^2/(2\rho^2))/\sqrt{2\pi} \int_{x \in [-\inf;z]} \exp(-(x-\lambda/rho)^2/2) dx ## = \Phi(z) - \exp(-z\lambda/rho+\lambda^2/(2\rho^2)) \Phi(z-\lambda/\rho) ##' @title Cumulative Distribution Function of a Gaussian Variable Plus an Exponential Variable ##' @noRd ##' ##' @examples ##' \dontrun{ ##' n <- 1e6 ##' ##' ## rho > 0 ##' mean(rnorm(n) + 1.5 * rexp(n, rate = 2) <= 0.1) ##' pnormexp(0.1, rate = 2, rho = 1.5) ##' mean(rnorm(n) + 1.5 * rexp(n, rate = 2) <= 0.9) ##' pnormexp(0.9, rate = 2, rho = 1.5) ##' ##' ## rho < 0 ##' mean(rnorm(n) - 1.5 * rexp(n, rate = 2) <= 0.1) ##' pnormexp(0.1, rate = 2, rho = -1.5) ##' mean(rnorm(n) - 1.5 * rexp(n, rate = 2) <= 0.9) ##' pnormexp(0.9, rate = 2, rho = -1.5) ##' } pnormexp <- function(q, rate, rho){ if(abs(rho)<1e-12){ out <- stats::pnorm(q) }else if(rho>0){ out <- stats::pnorm(q) - exp(-(rate/rho)*q+(rate/rho)^2/2)*stats::pnorm(q, mean = rate/rho) }else{ ## mean(rnorm(1e5) + rho * rexp(1e5, rate = 2) <= q) ## MONTE CARLO ## cubature::adaptIntegrate(f = function(x){dnorm(x[1])*dexp(x[2], rate = rate)*(x[1]+rho*x[2]<q)}, lowerLimit = c(-10,-10), upperLimit = c(10,10)) ## NUMERIC INTEGRATION ## integrate(f = function(x){ dnorm(x) * sapply(x, function(iX){integrate(f = function(y){dexp(y, rate = rate)}, lower = (q-iX)/rho, upper = 10)$value})}, lower = -10, upper = 10) ## integrate(f = function(x){ dnorm(x) * (1-pexp(q = (q-x)/rho, rate = rate))}, lower = -10, upper = 10) ## 1 - integrate(f = function(x){ dnorm(x) * pexp(q = (q-x)/rho, rate = rate)}, lower = -10, upper = 10)$value ## 1 - integrate(f = function(x){ dnorm(x) * pexp(q = (q-x)/rho, rate = rate)}, lower = q, upper = 10)$value ## 1 - integrate(f = function(x){ dnorm(x) * (1 - exp(-(q-x)*rate/rho))}, lower = q, upper = 10)$value ## pnorm(q) + integrate(f = function(x){ dnorm(x) * exp(-(q-x)*rate/rho)}, lower = q, upper = 10)$value out <- stats::pnorm(q) + exp(-(rate/rho)*q+(rate/rho)^2/2)*(1-stats::pnorm(q, mean = rate/rho)) } return(out) } ##' @title Density of a Gaussian Variable Plus an Exponential Variable ##' @noRd ##' ##' @examples ##' \dontrun{ ##' n <- 1e6 ##' ##' c(qnormexp(0.5, rate = 2, rho = 1.5), quantile(rnorm(n) + 1.5 * rexp(n, rate = 2), 0.5)) ##' c(qnormexp(0.95, rate = 1/10, rho = 1.5), quantile(rnorm(n) + 1.5 * rexp(n, rate = 1/10), 0.95)) ##' ##' c(qnormexp(0.5, rate = 2, rho = -1.5), quantile(rnorm(n) - 1.5 * rexp(n, rate = 2), 0.5)) ##' c(qnormexp(0.95, rate = 1/10, rho = -1.5), quantile(rnorm(n) - 1.5 * rexp(n, rate = 1/10), 0.95)) ##' } qnormexp <- function(p, rate, rho){ if(abs(rho)<1e-12){ out <- stats::qnorm(p) }else if(rho>0){ out <- sapply(p, function(iP){ stats::uniroot(function(x){pnormexp(x, rate = rate, rho = rho) - iP}, lower = stats::qnorm(iP), upper = (stats::qnorm(iP)+3) + (stats::qexp(iP, rate = rate) + 5/rate))$root }) ## iP <- tail(p,1) ## pnormexp(stats::qnorm(iP), rate = rate, rho = rho) - iP ## pnormexp((stats::qnorm(iP)+3) + (stats::qexp(iP, rate = rate) + 5/rate), rate = rate, rho = rho) - iP ## hist(rnorm(1e4) + rho * rexp(1e4, rate = rate)) }else if(rho<0){ out <- sapply(p, function(iP){ stats::uniroot(function(x){pnormexp(x, rate = rate, rho = rho) - iP}, lower = (stats::qnorm(iP)-3) - (stats::qexp(iP, rate = rate) + 5/rate), upper = stats::qnorm(iP))$root }) } return(out) } ## * Weibull distribution ## cumulative distribution fuction for Z = X + \rho Y ## where X follows a standard normal distribution ## and Y a weibull distribution with scale parameter \lambda and shape parameter k ## denoting \Phi the cumulative distribution function of the standard normal distribution: ## F_Z(z) = \Prob[X + \rho Y < z] ## = \int f(x,y) \Ind[x + \rho y < z] dx dy ## = \int_{x \in [-\inf;z]} f(x) \int_{y \in [0;(z-x)/\rho] f(y) dx dy ## = \int_{x \in [-\inf;z]} f(x) (1-\exp(-(z-x)^k/(\lambda\rho)^k)) dx ## = \Phi(z) - \int_{x \in [-\inf;z]} \exp(-x^2/2-(z-x)^k/(\lambda\rho)^k)\sqrt{2\pi} dx ##' @title Cumulative Distribution Function of a Gaussian Variable Plus an Weibull Variable ##' @noRd ##' ##' @examples ##' \dontrun{ ##' n <- 1e6 ##' ##' pnormweibull(0.1, scale = 1/2, shape = 1, rho = 1.5) ##' pnormweibull(0.8, scale = 1/2, shape = 1, rho = 1.5) ##' mean(rnorm(n) + 1.5 * rweibull(n, scale = 1/2, shape = 1) <= 0.1) ##' mean(rnorm(n) + 1.5 * rweibull(n, scale = 1/2, shape = 1) <= 0.8) ##' ##' pnormweibull(0.1, scale = 1/2, shape = 1, rho = -1.5) ##' pnormweibull(0.8, scale = 1/2, shape = 1, rho = -1.5) ##' mean(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 1) <= 0.1) ##' mean(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 1) <= 0.8) ##' ##' pnormweibull(0.1, scale = 1/2, shape = 2, rho = -1.5) ##' pnormweibull(0.8, scale = 1/2, shape = 2, rho = -1.5) ##' mean(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 2) <= 0.1) ##' mean(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 2) <= 0.8) ##' } pnormweibull <- function(q, scale, shape, rho){ if(abs(rho)<1e-12){ out <- stats::pnorm(q) }else{ if(rho>0){ if(shape==1){ out <- stats::pnorm(q) - exp(-(1/(scale*rho))*q+(1/(scale*rho))^2/2)*stats::pnorm(q, mean = 1/(scale*rho)) }else{ I <- stats::integrate(f = function(x){exp(-x^2/2)/sqrt(2*pi)*exp(-((q-x)/(rho*scale))^shape)}, lower = min(-4,q - 7^(1/shape)*rho*scale), upper = q) out <- stats::pnorm(q) - I$value } }else if(rho<0){ if(shape==1){ out <- stats::pnorm(q) + exp(-(1/(scale*rho))*q+(1/(scale*rho))^2/2)*(1-stats::pnorm(q, mean = 1/(scale*rho))) }else{ I <- stats::integrate(f = function(x){exp(-x^2/2)/sqrt(2*pi)*exp(-((q-x)/(rho*scale))^shape)}, lower = q, upper = max(4,q - 7^(1/shape)*rho*scale)) out <- stats::pnorm(q) + I$value } } } return(out) } ##' @title Density of a Gaussian Variable Plus an Weibull Variable ##' @noRd ##' ##' @examples ##' \dontrun{ ##' n <- 5e6 ##' ##' c(qnormweibull(0.5, scale = 1/2, shape = 1, rho = 1.5), ##' quantile(rnorm(n) + 1.5 * rweibull(n, scale = 1/2, shape = 1), 0.5)) ##' c(qnormweibull(0.95, scale = 10, shape = 1, rho = 1.5), ##' quantile(rnorm(n) + 1.5 * rweibull(n, scale = 10, shape = 1), 0.95)) ##' ##' c(qnormweibull(0.5, scale = 1/2, shape = 2, rho = 1.5), ##' quantile(rnorm(n) + 1.5 * rweibull(n, scale = 1/2, shape = 2), 0.5)) ##' c(qnormweibull(0.95, scale = 10, shape = 2, rho = 1.5), ##' quantile(rnorm(n) + 1.5 * rweibull(n, scale = 10, shape = 2), 0.95)) ##' ##' c(qnormweibull(0.5, scale = 1/2, shape = 1, rho = -1.5), ##' quantile(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 1), 0.5)) ##' c(qnormweibull(0.95, scale = 10, shape = 1, rho = -1.5), ##' quantile(rnorm(n) - 1.5 * rweibull(n, scale = 10, shape = 1), 0.95)) ##' ##' c(qnormweibull(0.5, scale = 1/2, shape = 2, rho = -1.5), ##' quantile(rnorm(n) - 1.5 * rweibull(n, scale = 1/2, shape = 2), 0.5)) ##' c(qnormweibull(0.95, scale = 10, shape = 2, rho = -1.5), ##' quantile(rnorm(n) - 1.5 * rweibull(n, scale = 10, shape = 2), 0.95)) ##' ##' } qnormweibull <- function(p, scale, shape, rho){ if(abs(rho)<1e-12){ out <- stats::qnorm(p) }else if(rho>0){ out <- sapply(p, function(iP){ stats::uniroot(function(x){pnormweibull(x, scale = scale, shape = shape, rho = rho) - iP}, lower = stats::qnorm(iP), upper = (stats::qnorm(iP)+3) + (stats::qweibull(iP, scale = scale, shape = shape) + 5*scale))$root }) }else if(rho<0){ out <- sapply(p, function(iP){ stats::uniroot(function(x){pnormweibull(x, scale = scale, shape = shape, rho = rho) - iP}, lower = (stats::qnorm(iP)-3) - (stats::qweibull(iP, scale = scale, shape = shape) + 5*scale), upper = stats::qnorm(iP))$root }) } return(out) } ##---------------------------------------------------------------------- ### normexp.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/normexp.R
### performance.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: aug 3 2021 (11:17) ## Version: ## Last-Updated: jul 4 2023 (18:46) ## By: Brice Ozenne ## Update #: 1191 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ##' @title Assess Performance of a Classifier ##' @description Assess the performance in term of AUC and brier score of one or several binary classifiers. ##' Currently limited to logistic regressions and random forest. ##' ##' @param object a \code{glm} or \code{range} object, or a list of such object. ##' @param data [data.frame] the training data. ##' @param newdata [data.frame] an external data used to assess the performance. ##' @param fold.size [double, >0] either the size of the test dataset (when >1) or the fraction of the dataset (when <1) to be used for testing when using cross-validation. ##' @param fold.repetition [integer] when strictly positive, the number of folds used in the cross-validation. If 0 then no cross validation is performed. ##' @param fold.balance [logical] should the outcome distribution in the folds of the cross-validation be similar to the one of the original dataset? ##' @param impute [character] in presence of missing value in the regressors of the training dataset, should a complete case analysis be performed (\code{"none"}) ##' or should the median/mean (\code{"median"}/\code{"mean"}) value be imputed. For categorical variables, the most frequent value is imputed. ##' @param individual.fit [logical] if \code{TRUE} the predictive model is refit for each individual using only the predictors with non missing values. ##' @param name.response [character] the name of the response variable (i.e. the one containing the categories). ##' @param null [numeric vector of length 2] the right-hand side of the null hypothesis relative to each metric. ##' @param se [logical] should the uncertainty about AUC/brier be computed? ##' When \code{TRUE} adapt the method of LeDell et al. (2015) to repeated cross-validation for the AUC and the brier score. ##' @param conf.level [numeric] confidence level for the confidence intervals. ##' @param auc.type [character] should the auc be computed approximating the predicted probability by a dirac (\code{"classical"}, usual AUC formula) ##' or approximating the predicted probability by a normal distribution. ##' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. ##' Otherwise they are computed without any transformation. ##' @param trace [logical] Should the execution of the function be traced. ##' @param simplify [logical] should the number of fold and the size of the fold used for the cross validation be removed from the output? ##' @param seed [integer, >0] Random number generator (RNG) state used when starting data spliting. ##' If \code{NULL} no state is set. ##' ##' @references LeDell E, Petersen M, van der Laan M. Computationally efficient confidence intervals for cross-validated area under the ROC curve estimates. Electron J Stat. 2015;9(1):1583-1607. doi:10.1214/15-EJS1035 ##' ##' @return An S3 object of class \code{performance}. ##' @keywords model ##' ##' @examples ##' ## Simulate data ##' set.seed(10) ##' n <- 100 ##' df.train <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), X1 = rnorm(n), X2 = rnorm(n)) ##' df.test <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), X1 = rnorm(n), X2 = rnorm(n)) ##' ##' ## fit logistic model ##' e.null <- glm(Y~1, data = df.train, family = binomial(link="logit")) ##' e.logit1 <- glm(Y~X1, data = df.train, family = binomial(link="logit")) ##' e.logit2 <- glm(Y~X1+X2, data = df.train, family = binomial(link="logit")) ##' ##' ## assess performance on the training set (biased) ##' ## and external dataset ##' performance(e.logit1, newdata = df.test) ##' e.perf <- performance(list(null = e.null, p1 = e.logit1, p2 = e.logit2), ##' newdata = df.test) ##' e.perf ##' summary(e.perf, order.model = c("null","p2","p1")) ##' ##' ## assess performance using cross validation ##' \dontrun{ ##' set.seed(10) ##' performance(e.logit1, fold.repetition = 10, se = FALSE) ##' set.seed(10) ##' performance(list(null = e.null, prop = e.logit1), fold.repetition = 10) ##' performance(e.logit1, fold.repetition = c(50,20,10)) ##' } ## * performance ##' @export performance <- function(object, data = NULL, newdata = NA, individual.fit = FALSE, impute = "none", name.response = NULL, fold.size = 1/10, fold.repetition = 0, fold.balance = FALSE, null = c(brier = NA, AUC = 0.5), conf.level = 0.95, se = TRUE, transformation = TRUE, auc.type = "classical", simplify = TRUE, trace = TRUE, seed = NULL){ out <- list(call = match.call(), response = list(), performance = NULL, prediction = list(), iid.auc = list(), iid.brier = list()) if(se==FALSE){ out$iid.auc <- NULL out$iid.brier <- NULL } ## ** fix randomness stop.after.init <- FALSE if(identical(seed,"only")){ stop.after.init <- TRUE } else if(!is.null(seed)){ if(!is.null(get0(".Random.seed"))){ ## avoid error when .Random.seed do not exists, e.g. fresh R session with no call to RNG old <- .Random.seed # to save the current seed on.exit(.Random.seed <<- old) # restore the current seed (before the call to the function) }else{ on.exit(rm(.Random.seed, envir=.GlobalEnv)) } set.seed(seed) } ## ** normalize user input init.args <- .performance_args(object = object, data = data, newdata = newdata, individual.fit = individual.fit, impute = impute, name.response = name.response, fold.size = fold.size, fold.repetition = fold.repetition, fold.balance = fold.balance, null = null, conf.level = conf.level, se = se, transformation = transformation, auc.type = auc.type, simplify = simplify, trace = trace, seed = seed) for(iL in init.args$args){ assign(x = iL, init.args[[iL]]) } internal <- init.args$internal fold.allnumber <- init.args$fold.allnumber fold.group <- init.args$fold.group fold.test <- init.args$fold.test save.data <- init.args$save.data names.object <- names(object) n.object <- length(object) ## ** initialize data init.data <- .performance_init(object = object, data = data, newdata = newdata, individual.fit = individual.fit, name.response = name.response, fold.size = fold.size, fold.repetition = fold.repetition, fold.balance = fold.balance, fold.group = fold.group, fold.test = fold.test, internal = internal, trace = trace) data <- init.data$data newdata <- init.data$newdata ref.response <- init.data$ref.response ref.response.num <- init.data$ref.response.num nobs.object <- init.data$nobs.object data.missingPattern <- init.data$data.missingPattern ## only data newdata.missingPattern <- init.data$newdata.missingPattern ## data, data+newdata, or newdata acorrding to argument internal external <- !is.null(newdata) nobs.newdata <- NROW(newdata) fold.size <- init.data$fold.size fold.test <- init.data$fold.test ## any(sapply(fold.test, function(x){any(duplicated(x))})) ## sapply(fold.test, function(x){min(x, na.rm=TRUE)}) ## sapply(fold.test, function(x){max(x, na.rm=TRUE)}) ## sapply(fold.test, function(x){sum(!is.na(x))}) if(stop.after.init){ return(fold.test) } ## ** predictions if(trace){ cat(" Assessment of the predictive performance of ",n.object," model", if(n.object>1){"s"},"\n\n",sep="") cat("- Prediction: ") } ## *** internal/external if(internal||external){ if(trace){ txt <- NULL if(internal){txt <- c(txt,"internal")} if(external){txt <- c(txt,"external")} cat(paste(txt, collapse = " and "), sep = "") } if(internal){ data.test <- rbind(data, newdata[,colnames(data),drop=FALSE]) }else{ data.test <- newdata } perf.intext <- .performance_runfold(object, names.object = names.object, n.object = n.object, data.train = data, n.train = nobs.object, impute = impute, data.test = data.test, n.test = NROW(data.test), missingPattern.test = newdata.missingPattern, individual.fit = individual.fit, auc.type = auc.type, se = se, trace = trace-1) if(internal){ internal.predictions <- perf.intext$estimate[1:NROW(data),,drop=FALSE] if(auc.type == "probabilistic"){ internal.se.predictions <- perf.intext$se[1:NROW(data),,drop=FALSE] }else{ internal.se.predictions <- NULL } if(se>1){ internal.iid.predictions <- perf.intext$iid[1:NROW(data),,,drop=FALSE] }else{ internal.iid.predictions <- NULL } } if(external){ if(internal){ external.predictions <- perf.intext$estimate[(NROW(data)+1):(NROW(data)+NROW(newdata)),,drop=FALSE] if(auc.type == "probabilistic"){ external.se.predictions <- perf.intext$se[(NROW(data)+1):(NROW(data)+NROW(newdata)),,drop=FALSE] }else{ external.se.predictions <- NULL } if(se>1){ external.iid.predictions <- perf.intext$iid[(NROW(data)+1):(NROW(data)+NROW(newdata)),,,drop=FALSE] }else{ external.iid.predictions <- NULL } }else{ external.predictions <- perf.intext$estimate if(auc.type == "probabilistic"){ external.se.predictions <- perf.intext$se }else{ external.se.predictions <- NULL } if(se>1){ external.iid.predictions <- perf.intext$se }else{ external.iid.predictions <- NULL } } } if(trace){ cat("\n") } } ## *** cross validation if(any(fold.repetition>0)){ if(trace){ if(internal||external){ space <- rep(" ",14) }else{ space <- "" } if(fold.group>1){ cat(space,fold.group," folds cross-validation repeated ",fold.repetition," times \n",sep="") }else{ cat(space," 1 fold cross-validation with ",fold.size," samples repeated ",fold.repetition," times \n",sep="") } } ## *** get predictions cv.indexing <- array(NA, dim = c(sum(fold.size), 3, fold.repetition), dimnames = list(NULL, c("observation","repetition","fold"), NULL)) cv.predictions <- array(NA, dim = c(sum(fold.size), n.object, fold.repetition), dimnames = list(NULL, names.object, NULL)) if(auc.type == "probabilistic"){ cv.se.predictions <- array(NA, dim = c(sum(fold.size), n.object, fold.repetition), dimnames = list(NULL, names.object, NULL)) } if(se>1){ cv.iid.predictions <- setNames(lapply(1:n.object, function(iFold){ array(0, dim = c(nobs.object, sum(fold.size), fold.repetition)) }), names.object) } if(trace){ pb <- utils::txtProgressBar(max = fold.repetition, style = 3) } for(iRepeat in 1:fold.repetition){ ## iRepeat <- 1 if(trace){ utils::setTxtProgressBar(pb, iRepeat) } cv.indexing[,"repetition",iRepeat] <- iRepeat for(iFold in 1:fold.group){ ## iFold <- 1 indexData.iFoldTest <- na.omit(fold.test[[iRepeat]][iFold,]) indexData.iFoldTrain <- setdiff(1:nobs.object,indexData.iFoldTest) indexStore.iFoldTest <- (1+sum(c(0,fold.size)[1:iFold])):sum(fold.size[1:iFold]) cv.indexing[indexStore.iFoldTest,"observation",iRepeat] <- indexData.iFoldTest cv.indexing[indexStore.iFoldTest,"fold",iRepeat] <- iFold if(!is.null(data.missingPattern)){ iData.missingPattern <- lapply(data.missingPattern, function(iModel){ ## iModel <- newdata.missingPattern[[1]] iOut <- droplevels(iModel[indexData.iFoldTest]) attr(iOut, "index") <- lapply(attr(iModel, "index"), FUN = function(iVec){which(indexData.iFoldTest %in% iVec)}) attr(iOut, "index")[sapply(attr(iOut, "index"), length)==0] <- NULL attr(iOut, "formula") <- attr(iModel, "formula")[names(attr(iOut, "index"))] return(iOut) }) }else{ iData.missingPattern <- NULL } iPerf.fold <- .performance_runfold(object, names.object = names.object, n.object = n.object, data.train = data[indexData.iFoldTrain,,drop=FALSE], n.train = length(indexData.iFoldTrain), impute = impute, data.test = data[indexData.iFoldTest,,drop=FALSE], n.test = length(indexData.iFoldTest), missingPattern.test = iData.missingPattern, individual.fit = individual.fit, auc.type = auc.type, se = se, trace = FALSE) cv.predictions[indexStore.iFoldTest,,iRepeat] <- iPerf.fold$estimate if(auc.type == "probabilistic"){ cv.se.predictions[indexStore.iFoldTest,,iRepeat] <- iPerf.fold$se } if(se>1){ for(iO in 1:n.object){ cv.iid.predictions[indexData.iFoldTrain,indexStore.iFoldTest,iRepeat] <- iPerf.fold[[iO]] } } } } } if(trace){cat("\n")} ## ** Performance if(trace){cat("- Performance:")} ls.auc <- list() ls.brier <- list() ## *** internal if(internal){ if(trace){cat(" internal")} internal.auc <- data.frame(matrix(NA, nrow = n.object, ncol = 7, dimnames = list(names.object,c("model","estimate","se","lower","upper","p.value","p.value_comp")))) internal.brier <- data.frame(matrix(NA, nrow = n.object, ncol = 7, dimnames = list(names.object,c("model","estimate","se","lower","upper","p.value","p.value_comp")))) if(se>0){ internal.iid.auc <- matrix(NA, nrow = NROW(data), ncol = n.object, dimnames = list(NULL,names.object)) internal.iid.brier <- matrix(NA, nrow = NROW(data), ncol = n.object, dimnames = list(NULL,names.object)) } ls.auc$internal <- setNames(vector(mode = "list", length = n.object), names.object) ls.brier$internal <- setNames(vector(mode = "list", length = n.object), names.object) for(iO in 1:n.object){ ## iO <- 1 if(any(is.na(internal.predictions[,iO]))){ internal.auc[iO,"model"] <- names.object[iO] internal.brier[iO,"model"] <- names.object[iO] next } ## AUC ls.auc$internal[[iO]] <- auc(labels = data$XXresponseXX, predictions = internal.predictions[,iO], add.halfNeutral = TRUE, null = null["AUC"], conf.level = conf.level, transformation = transformation) internal.auc[iO,c("model","estimate","se","lower","upper","p.value")] <- cbind(model = names.object[iO], confint(ls.auc$internal[[iO]])) if(se>0){ internal.iid.auc[,iO] <- iid(ls.auc$internal[[iO]]) if(iO>1){ iStat <- (internal.auc[iO,"estimate"] - internal.auc[iO-1,"estimate"]) / sqrt(crossprod(internal.iid.auc[,iO]-internal.iid.auc[,iO-1])) internal.auc[iO,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } ## Brier score ls.brier$internal[[iO]] <- brier(labels = data$XXresponseXX, predictions = internal.predictions[,iO], iid = internal.iid.predictions[[iO]], null = null["Brier"], conf.level = conf.level, transformation = transformation) internal.brier[iO,c("model","estimate","se","lower","upper","p.value")] <- cbind(model = names.object[iO],confint(ls.brier$internal[[iO]])) if(se>0){ internal.iid.brier[,iO] <- iid(ls.brier$internal[[iO]]) if(iO>1){ iStat <- (internal.brier[iO,"estimate"] - internal.brier[iO-1,"estimate"]) / sqrt(crossprod(internal.iid.brier[,iO]-internal.iid.brier[,iO-1])) internal.brier[iO,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } } ## export out$performance <- rbind(out$performance, cbind(method = "internal", metric = "auc", internal.auc), cbind(method = "internal", metric = "brier", internal.brier) ) out$response[["internal"]] <- data$XXresponseXX out$prediction[["internal"]] <- internal.predictions if(se>0){ out$iid.auc[["internal"]] <- internal.iid.auc out$iid.brier[["internal"]] <- internal.iid.brier } if(trace){cat("(done)")} } ## *** external if(external){ if(trace){cat(" external")} external.auc <- data.frame(matrix(NA, nrow = n.object, ncol = 7, dimnames = list(NULL,c("model","estimate","se","lower","upper","p.value","p.value_comp")))) external.brier <- data.frame(matrix(NA, nrow = n.object, ncol = 7, dimnames = list(NULL,c("model","estimate","se","lower","upper","p.value","p.value_comp")))) if(se>0){ external.iid.auc <- matrix(NA, nrow = nobs.newdata, ncol = n.object, dimnames = list(NULL,names.object)) external.iid.brier <- matrix(NA, nrow = NROW(data) + nobs.newdata, ncol = n.object, dimnames = list(NULL,names.object)) } ls.auc$external <- setNames(vector(mode = "list", length = n.object), names.object) ls.brier$external <- setNames(vector(mode = "list", length = n.object), names.object) for(iO in 1:n.object){ if(any(is.na(internal.predictions[,iO]))){ external.auc[iO,"model"] <- names.object[iO] external.brier[iO,"model"] <- names.object[iO] next } ## AUC ls.auc$external[[iO]] <- auc(labels = newdata$XXresponseXX, predictions = external.predictions[,iO], add.halfNeutral = TRUE, null = null["AUC"], conf.level = conf.level, transformation = transformation) external.auc[iO,c("model","estimate","se","lower","upper","p.value")] <- cbind(model = names.object[iO],confint(ls.auc$external[[iO]])) if(se>0){ external.iid.auc[,iO] <- iid(ls.auc$external[[iO]]) if(iO>1){ iStat <- (external.auc[iO,"estimate"] - external.auc[iO-1,"estimate"]) / sqrt(crossprod(external.iid.auc[,iO]-external.iid.auc[,iO-1])) external.auc[iO,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } ## Brier score ls.brier$external[[iO]] <- brier(labels = newdata$XXresponseXX, predictions = external.predictions[,iO], iid = external.iid.predictions[[iO]], observation = "external", null = NA, conf.level = conf.level, transformation = transformation) external.brier[iO,c("model","estimate","se","lower","upper","p.value")] <- cbind(model = names.object[iO],confint(ls.brier$external[[iO]])) if(se>0){ external.iid.brier[,iO] <- iid(ls.brier$external[[iO]]) if(iO>1){ iStat <- (external.brier[iO,"estimate"] - external.brier[iO-1,"estimate"]) / sqrt(crossprod(external.iid.brier[,iO]-external.iid.brier[,iO-1])) external.brier[iO,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } } ## export out$performance <- rbind(out$performance, cbind(method = "external", metric = "auc", external.auc), cbind(method = "external", metric = "brier", external.brier) ) out$response[["external"]] <- newdata$XXresponseXX out$prediction[["external"]] <- external.predictions if(se>0){ out$iid.auc[["external"]] <- external.iid.auc out$iid.brier[["external"]] <- external.iid.brier } if(trace){cat("(done)")} } ## *** cross-validation if(fold.repetition>0){ if(trace){cat(" CV")} n.number <- length(fold.allnumber) name.col <- c("model","estimate","se","lower","upper","p.value","p.value_comp","foldCV.number","foldCV.size") cv.auc <- data.frame(matrix(NA, nrow = n.object*n.number, ncol = length(name.col), dimnames = list(NULL,name.col))) cv.brier <- data.frame(matrix(NA, nrow = n.object*n.number, ncol = length(name.col), dimnames = list(NULL,name.col))) if(se>0){ cv.iid.auc <- matrix(NA, nrow = NROW(data), ncol = n.object, dimnames = list(NULL,names.object)) cv.iid.brier <- matrix(NA, nrow = NROW(data), ncol = n.object, dimnames = list(NULL,names.object)) } ls.auc$cv <- setNames(vector(mode = "list", length = n.object), names.object) ls.brier$cv <- setNames(vector(mode = "list", length = n.object), names.object) for(iO in 1:n.object){ ## iO <- 2 for(iNumber in 1:n.number){ ## iNumber <- 1 ## prepare iObs <- as.double(cv.indexing[,"observation",1:fold.allnumber[iNumber]]) iRepeat <- as.double(cv.indexing[,"repetition",1:fold.allnumber[iNumber]]) iPred <- as.double(cv.predictions[,iO,1:fold.allnumber[iNumber]]) if(se>1){ iCV.iid <- cv.iid.predictions[[iO]][,,1:fold.allnumber[iNumber],drop=FALSE] }else{ iCV.iid <- NULL } iCurrent <- (iO-1)*n.number+iNumber iPrevious <- (iO-2)*n.number+iNumber ## remove folds with missing values iRepeat.NA <- unique(iRepeat[is.na(iPred)]) if(length(iRepeat.NA)>0){ iPred <- iPred[iRepeat %in% iRepeat.NA == FALSE] iObs <- iObs[iRepeat %in% iRepeat.NA == FALSE] if(se>1){ iCV.iid <- iCV.iid[,,-iRepeat.NA,drop=FALSE] } iRepeat <- iRepeat[iRepeat %in% iRepeat.NA == FALSE] if(length(iRepeat)==0){ cv.auc[iCurrent,"model"] <- names.object[iO] cv.brier[iCurrent,"model"] <- names.object[iO] next } } ## AUC iAUC <- auc(labels = data$XXresponseXX, predictions = iPred, fold = iRepeat, observation = iObs, add.halfNeutral = TRUE, null = null["AUC"], conf.level = conf.level, transformation = transformation) cv.auc[iCurrent,setdiff(name.col,"p.value_comp")] <- cbind(model = names.object[iO], confint(iAUC), foldCV.number = fold.allnumber[iNumber], foldCV.size = sum(fold.size)) if(iNumber==1){ ls.auc$cv[[iO]] <- iAUC if(se>0){ cv.iid.auc[,iO] <- iid(iAUC) if(iO>1){ iStat <- (cv.auc[iCurrent,"estimate"] - cv.auc[iPrevious,"estimate"]) / sqrt(crossprod(cv.iid.auc[,iO]-cv.iid.auc[,iO-1])) cv.auc[iCurrent,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } } ## sqrt(crossprod(rowMeans(attr(ls.auc[[iO]],"iid")))) - confint(ls.auc[[iO]])["se"] ## Brier score iBrier <- brier(labels = data$XXresponseXX, predictions = iPred, fold = iRepeat, observation = iObs, iid = iCV.iid, null = null["Brier"], conf.level = conf.level, transformation = transformation) cv.brier[iCurrent,setdiff(name.col,"p.value_comp")] <- cbind(model = names.object[iO], confint(iBrier), foldCV.number = fold.allnumber[iNumber], foldCV.size = sum(fold.size)) if(iNumber==1){ ls.brier$cv[[iO]] <- iBrier if(se>0){ cv.iid.brier[,iO] <- iid(iBrier) if(iO>1){ iStat <- (cv.brier[iCurrent,"estimate"] - cv.brier[iPrevious,"estimate"]) / sqrt(crossprod(cv.iid.brier[,iO]-cv.iid.brier[,iO-1])) cv.brier[iCurrent,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } } } } ## export if(!is.null(out$performance)){ out$performance$foldCV.size <- NA out$performance$foldCV.number <- NA } out$performance <- rbind(out$performance, cbind(method = "cv", metric = "auc", cv.auc), cbind(method = "cv", metric = "brier", cv.brier) ) if(simplify){ out$performance$foldCV.size <- NULL if(n.number==1){ out$performance$foldCV.number <- NULL } } out$response[["cv"]] <- data$XXresponseXX out$prediction[["cv"]] <- cv.predictions attr(out$prediction[["cv"]],"index") <- cv.indexing if(se>0){ out$iid.auc[["cv"]] <- cv.iid.auc out$iid.brier[["cv"]] <- cv.iid.brier } if(trace){cat("(done)")} } out$auc <- ls.auc out$brier <- ls.brier if(trace){cat("\n")} ## ** export if(save.data){out$data <- data} out$args <- list(individual.fit = individual.fit, impute = impute, name.response = name.response, fold.size = fold.size, fold.repetition = fold.repetition, fold.balance = fold.balance, null = null, conf.level = conf.level, transformation = transformation, auc.type = auc.type, seed = seed) rownames(out$performance) <- NULL class(out) <- append("performance",class(out)) return(out) } ## * .performance_args ## normalize user input .performance_args <- function(object, data, newdata, individual.fit, impute, name.response, fold.size, fold.repetition, fold.balance, null, conf.level, se, transformation, auc.type, simplify, trace, seed){ ## ** object argument ## convert object into a list of models with names if(inherits(object,"ranger")){ if(any(names(object$call)[-1]=="")){ stop("All arguments must be named when calling ranger. \n") } if("probability" %in% names(object$call) == FALSE || object$call$probability == FALSE){ stop("Argument \'probability\' must be set to TRUE when calling ranger. \n") } object <- list(ranger1 = object) }else if(inherits(object,"randomForest")){ object <- list(randomForest1 = object) }else if(inherits(object,"glm")){ if(object$family$family %in% c("binomial","quasibinomial") == FALSE){ stop("Cannot handle glm with family other than binomial or quasibinomial. \n") } if(object$family$link!="logit"){ stop("Cannot handle glm with link other than logit. \n") } object <- list(logit1 = object) }else if(inherits(object,"miss.glm")){ object <- list(logit1 = object) } else if(!is.list(object)){ stop("Argument \'object\' should be a \'glm\' object, a \'miss.glm\' object, or a \'ranger\' object. \n") }else{ possible.names <- lapply(1:length(object), function(iO){ if(inherits(object[[iO]],"ranger")){ if(any(names(object[[iO]]$call)[-1]=="")){ stop("All arguments must be named when calling ranger. \n") } if("probability" %in% names(object[[iO]]$call) == FALSE || object[[iO]]$call$probability == FALSE){ stop("Argument \'probability\' must be set to TRUE when calling ranger. \n") } return(paste0("ranger",iO)) }else if(inherits(object[[iO]],"randomForest")){ return(paste0("randomForest",iO)) }else if(inherits(object[[iO]],"glm")){ if(object[[iO]]$family$family %in% c("binomial","quasibinomial") == FALSE){ stop("Cannot handle glm with family other than binomial or quasibinomial. \n") } if(object[[iO]]$family$link!="logit"){ stop("Cannot handle glm with link other than logit. \n") } return(paste0("logit",iO)) }else if(inherits(object[[iO]],"miss.glm")){ ## nothing }else{ stop("Argument \'object\' should be a list containing \'glm\', \'miss.glm\' or \'ranger\' objects. \n") } }) if(is.null(names(object))){ names(object) <- possible.names } } ## *** impute argument impute <- match.arg(impute, c("none","median","mean")) ## *** name.response argument ## extract name of the outcome variable if(is.null(name.response)){ name.response <- all.vars(formula(object[[1]]))[1] } ## *** data argument if(identical(data,FALSE) || identical(data,NA)){ internal <- FALSE data <- NULL }else if(identical(attr(data,"internal"), FALSE)){ internal <- FALSE }else{ internal <- TRUE } ## extract full training data if(is.null(data)){ ls.data <- lapply(object, stats::model.frame) if(length(unique(sapply(object, NROW)))>1){ stop("All models should be fitted using the same dataset. \n") } ls.names <- lapply(ls.data,names) index.max <- which.max(sapply(ls.names, length))[1] max.names <- ls.names[[index.max]] test.max <- sapply(ls.names, function(iNames){all(iNames %in% max.names)}) data <- ls.data[[index.max]] if(all(test.max)==FALSE){ for(iObj in 1:length(object)){ add.cols <- ls.names[[iObj]][ls.names[[iObj]] %in% colnames(data)] if(length(add.cols)>1){ data <- cbind(data,ls.data[[iObj]][,add.cols]) } } } save.data <- TRUE }else{ save.data <- FALSE } data <- as.data.frame(data) if(name.response %in% names(data) == FALSE){ stop("Could not find the variable ",name.response," in argument \'data\'. \n") } if(any(is.na(data[[name.response]]))){ stop("Cannot handle missing data in the outcome variable (argument data). \n") } ## *** newdata argument if(identical(newdata,NA) || identical(newdata,FALSE)){ newdata <- NULL } if(!is.null(newdata)){ if(name.response %in% names(newdata) == FALSE){ stop("Could not find the variable ",name.response," in argument \'newdata\'. \n") } if(any(is.na(newdata[[name.response]]))){ stop("Cannot handle missing data in the outcome variable (argument newdata). \n") } if(any(names(data) %in% names(newdata) == FALSE)){ stop("Argument \'newdata\' should contain column(s) \"",paste(names(data)[names(data) %in% names(newdata) == FALSE], collapse = "\" \""),"\" \n") } } ## *** null argument ## null hypothesis if("brier" %in% names(null) == FALSE){ stop("Argument \'null\' should be a vector with an element called brier. \n", "(corresponding to the null hypothesis for the brier score, possibly NA)") } if("AUC" %in% names(null) == FALSE){ stop("Argument \'null\' should be a vector with an element called AUC. \n", "(corresponding to the null hypothesis for the AUC, possibly NA)") } ## *** auc.type argument auc.type <- match.arg(auc.type,c("classical","probabilistic")) if(auc.type %in% "probabilistic"){ stop("Probabilistic AUC not yet implemented. \n", "Consider setting the argument \'auc.type\' to \"classical\". \n") } ## *** conf.level argument if(se==FALSE){ conf.level <- NA }else if(is.na(conf.level)){ se <- FALSE } ## *** fold.repetition arguments if(is.data.frame(fold.repetition)){ if(any(names(fold.repetition) %in% c("observation","fold","repetition") == FALSE)){ stop("When a data.frame, argument \'fold.repetition\' should contain columns \"observation\", \"fold\", \"repetition\". \n") } df.fold <- fold.repetition fold.repetition <- length(unique(df.fold$repetition)) fold.size <- max(tapply(df.fold$repetition,interaction(df.fold$repetition,df.fold$fold), length)) fold.group <- length(unique(df.fold$fold)) if(any(df.fold$repetition %in% 1:fold.repetition==FALSE)){ stop("Incorrect values for argument \'fold.repetition\' in column \"repetition\". \n", "Should contain values between 1 and ",fold.repetition,". \n") } if(any(df.fold$fold %in% 1:fold.group==FALSE)){ stop("Incorrect values for argument \'fold.repetition\' in column \"fold\". \n", "Should contain values between 1 and ",fold.group,". \n") } if(any(df.fold$observation %in% 1:NROW(data)==FALSE)){ stop("Incorrect values for argument \'fold.repetition\' in column \"observation\". \n", "Should contain values between 1 and ",NROW(data),". \n") } size.tempo <- unique(tapply(df.fold$repetition,df.fold$repetition, length)) if(length(size.tempo)>1){ stop("Incorrect structure for argument \'fold.repetition\'. \n", "Should contain the same number of lines for each repetitions. \n") } fold.test <- lapply(1:fold.repetition, function(iRep){ matrix(NA, nrow = fold.group, ncol = fold.size) }) for(iRep in 1:fold.repetition){ iIndex <- df.fold[df.fold$repetition==iRep,c("observation","fold","repetition")] for(iFold in 1:fold.group){ fold.test[[iRep]][iFold,1:sum(iIndex$fold == iFold)] <- iIndex[iIndex$fold == iFold,"observation"] } } }else{ fold.test <- NULL } if(is.na(fold.repetition) || is.null(fold.repetition)){ fold.repetition <- 0 } if(any(fold.repetition<0)){ stop("Argument \'fold.repetition\' must be positive \n") } if(any(fold.repetition %% 1 > 0)){ stop("Argument \'fold.repetition\' must be an integer \n") } ## *** fold.size arguments if(fold.size<=0){ stop("Argument \'fold.size\' must be strictly positive \n") } ## *** fold.repetition arguments if(any(fold.repetition>0)){ fold.allnumber <- fold.repetition if(length(fold.repetition)>1){ ## enable to run once performance and get the results with various number of repetitions fold.repetition <- max(fold.allnumber) } nobs.object <- NROW(data) if(fold.size<1){ fold.group <- ceiling(1/fold.size) fold.size <- rep(ceiling(nobs.object / fold.group),fold.group) if(sum(fold.size)>nobs.object){ n.extra <- sum(fold.size)-nobs.object fold.size[(fold.group-n.extra+1):fold.group] <- fold.size[(fold.group-n.extra+1):fold.group]-1 } }else{ fold.group <- 1 } }else{ fold.allnumber <- 0 fold.group <- NA } ## ** export args <- names(match.call()[-1]) n.args <- length(args) out <- stats::setNames(vector(mode = "list", length = n.args), args) for(iL in 1:n.args){ iE <- environment()[[args[iL]]] if(!is.null(iE)){ out[[args[iL]]] <- iE } } out$args <- args out$internal <- internal out$fold.allnumber <- fold.allnumber out$fold.group <- fold.group out$fold.test <- fold.test out$save.data <- save.data return(out) } ## * .performance_init ## initialize data and missing data patterns .performance_init <- function(object, data, newdata, individual.fit, name.response, fold.size, fold.repetition, fold.balance, fold.group, fold.test, internal, trace){ ## ** extract reference level of the outcome variable ref.response <- sort(data[[name.response]], decreasing = TRUE)[1] ## ** normalize the outcome variable in data if("XXresponseXX" %in% names(data)){ stop("No column called \"XXresponseXX\" should exist in argument \'data\'. \n", "This name is used internally.\n") } if(is.character(data[[name.response]])){ data$XXresponseXX <- as.factor(data[[name.response]]) }else if(is.logical(data[[name.response]])){ data$XXresponseXX <- as.numeric(data[[name.response]]) }else{ data$XXresponseXX <- data[[name.response]] } if(is.factor(data$XXresponseXX)){ if(length(levels(data$XXresponseXX))==2){ data$XXresponseXX <- as.numeric(data$XXresponseXX)-1 ref.response.num <- as.numeric(ref.response)-1 }else stop("In argument \'data\', the column corresponding to the argument \'name.response\' should take exactly two different values. \n", "Unique values found: \"",paste0(levels(data$XXresponseXX), collapse = "\" \""),"\".\n") }else if(any(data$XXresponseXX %in% 0:1 == FALSE)){ stop("In argument \'data\', the column corresponding to the argument \'name.response\' should correspond to a binary variable. \n", "Unique values found: \"",paste0(levels(data$XXresponseXX), collapse = "\" \""),"\".\n") }else{ ref.response.num <- ref.response } ## ** normalize the outcome variable in newdata if(!is.null(newdata)){ newdata <- as.data.frame(newdata) if("XXresponseXX" %in% names(newdata)){ stop("No column called \"XXresponseXX\" should exist in argument \'newdata\'. \n", "This name is used internally.\n") } if(is.character(newdata[[name.response]])){ newdata$XXresponseXX <- as.factor(newdata[[name.response]]) }else if(is.logical(newdata[[name.response]])){ newdata$XXresponseXX <- as.numeric(newdata[[name.response]]) }else{ newdata$XXresponseXX <- newdata[[name.response]] } if(is.factor(newdata$XXresponseXX)){ if(length(levels(newdata$XXresponseXX))==2){ newdata$XXresponseXX <- as.numeric(newdata$XXresponseXX)-1 }else stop("In argument \'newdata\', the column corresponding to the argument \'name.response\' should take exactly two different values. \n", "Unique values found: \"",paste0(levels(newdata$XXresponseXX), collapse = "\" \""),"\".\n") }else if(any(newdata$XXresponseXX %in% 0:1 == FALSE)){ stop("In argument \'newdata\', the column corresponding to the argument \'name.response\' should correspond to a binary variable. \n", "Unique values found: \"",paste0(levels(newdata$XXresponseXX), collapse = "\" \""),"\".\n") } } ## ** extract sample size of the training set nobs.object <- unname(unique(sapply(object,function(iO){ ## iO <- object$RF if(inherits(iO,"glm")){ return(stats::nobs(iO)) }else if(inherits(iO,"ranger")){ return(iO$num.samples) }else if(inherits(iO,"randomForest")){ return(NROW(iO$votes)) } }))) if(length(nobs.object)!=1){ if(trace){ message("The training set seems to differ in size between models: ",paste0(nobs.object, collapse = ", "),". \n") } nobs.object <- max(nobs.object) } ## ** define missing data patterns if(individual.fit){ ## and formula/index of observations for each missing data pattern object.formula <- lapply(object,function(iO){ ff <- try(formula(iO), silent = TRUE) if(inherits(ff,"try-error")){ return(eval(iO$call[[2]])) }else{ return(ff) } }) object.xvar <- lapply(object.formula, function(iF){all.vars(stats::delete.response(stats::terms(iF)))}) ## data nobs.object <- NROW(data) object.iformula <- setNames(vector(mode = "list", length = length(object)), names(object)) data.missingPattern <- setNames(vector(mode = "list", length = length(object)), names(object)) for(iO in 1:length(object)){ ## iO <- 1 iTest.na <- is.na(data[,object.xvar[[iO]],drop=FALSE]) ## fields::image.plot(iTest.na) object.iformula[[iO]] <- apply(iTest.na,1,function(iRow){ if(any(iRow)){ iNewFF <- as.formula(paste(".~.-",paste(colnames(iTest.na)[iRow],collapse="-"))) return(stats::update(object.formula[[iO]],iNewFF)) }else{ return(object.formula[[iO]]) } }) data.missingPattern[[iO]] <- interaction(as.data.frame(1*iTest.na),drop=TRUE) attr(data.missingPattern[[iO]],"index") <- tapply(1:length(data.missingPattern[[iO]]),data.missingPattern[[iO]],list) attr(data.missingPattern[[iO]],"formula") <- lapply(attr(data.missingPattern[[iO]],"index"),function(iVec){object.iformula[[iO]][[iVec[1]]]}) } ## newdata if(!is.null(newdata)){ newdata.missingPattern <- setNames(vector(mode = "list", length = length(object)), names(object)) if(internal){ newdata2 <- rbind(data,newdata) }else{ newdata2 <- newdata } for(iO in 1:length(object)){ ## iO <- 1 iTest.na <- is.na(newdata2[,object.xvar[[iO]],drop=FALSE]) newdata.missingPattern[[iO]] <- interaction(as.data.frame(1*iTest.na),drop=TRUE) attr(newdata.missingPattern[[iO]],"index") <- tapply(1:length(newdata.missingPattern[[iO]]),newdata.missingPattern[[iO]],list) attr(newdata.missingPattern[[iO]],"formula") <- lapply(attr(newdata.missingPattern[[iO]],"index"),function(iObs){ ## iObs <- 3 iVar.rm <- colnames(iTest.na)[iTest.na[iObs[1],]] if(length(iVar.rm)>0){ return(stats::update(object.formula[[iO]],as.formula(paste(".~.-",paste(iVar.rm,collapse="-"))))) }else{ return(object.formula[[iO]]) } }) } }else if(internal){ newdata.missingPattern <- data.missingPattern }else{ newdata.missingPattern <- NULL } }else{ data.missingPattern <- NULL newdata.missingPattern <- NULL } ## ** prepare folds for cross validation if(!is.null(fold.test)){ ## nothing to do }else if(fold.repetition>0){ index.response1 <- which(data$XXresponseXX==ref.response.num) index.response0 <- which(data$XXresponseXX!=ref.response.num) prevalence <- length(index.response1)/length(index.response0) if(fold.balance){ ## position of sampled observations in each fold (such that prevalence is preserved) index.sample0 <- .balanceFold(n.obs = length(index.response0), n.fold = fold.group) index.sample1 <- .balanceFold(n.obs = length(index.response1), n.fold = fold.group) fold.size <- sapply(index.sample0,length)+sapply(index.sample1,length) } fold.test <- lapply(1:fold.repetition, function(iRepeat){ ## iRepeat <- 1 iM <- matrix(NA, nrow = fold.group, ncol = max(fold.size)) if(fold.balance){ iSample0 <- sample(index.response0, replace = FALSE) iSample1 <- sample(index.response1, replace = FALSE) for(iFold in 1:fold.group){ ## iFold <- 1 iM[iFold,1:(length(index.sample0[[iFold]])+length(index.sample1[[iFold]]))] <- c(iSample0[index.sample0[[iFold]]], iSample1[index.sample1[[iFold]]]) } }else{ ## make sure there is at least one 0 and one 1 in the test dataset iSample0 <- sample(index.response0, replace = FALSE, size = fold.group) iSample1 <- sample(index.response1, replace = FALSE, size = fold.group) ## sample remaining observations iSample01 <- sample(setdiff(1:NROW(data),c(iSample0,iSample1)), replace = FALSE, size = sum(fold.size)-2*fold.group) ## collect into a matrix iM[,1] <- iSample0 iM[,2] <- iSample1 if(any(fold.size>2)){ iM[,-(1:2)] <- c(iSample01,rep(NA,length(iM)-sum(fold.size))) } } return(iM) }) }else{ fold.size <- 0 fold.test <- NULL } ## ** export out <- list(data = data, newdata = newdata, ref.response = ref.response, ref.response.num = ref.response.num, nobs.object = nobs.object, data.missingPattern = data.missingPattern, newdata.missingPattern = newdata.missingPattern, fold.size = fold.size, fold.test = fold.test) } ## * .performance_predict ##' @description Compute prediction with uncertainty for various type of models ##' @param object model from which prediction should be evaluated. ##' @param n.obs [integer] number of observations in the training set. ##' @param newdata [data.frame] test set. ##' @param se [logical] should the uncertainty (i.e. standard error, influence function) ##' associated to the predictions be extracted when possible? ##' @noRd .performance_predict <- function(object, n.obs, newdata, se){ ## ** prepare output out <- list(estimate = NULL) out$se <- rep(NA, ncol = NROW(newdata)) out$iid <- matrix(0, nrow = n.obs, ncol = NROW(newdata)) ## ** predictions if(inherits(object,"ranger")){ if(se>0){ iPred <- predict(object, data = newdata, type = "se") out$estimate <- iPred$predictions[,which(object$forest$class.values==1)] out$se <- iPred$se }else{ iPred <- predict(object, data = newdata, type = "response") out$estimate <- iPred$predictions[,which(object$forest$class.values==1)] } }else if(inherits(object,"randomForest")){ out$estimate <- stats::predict(object, newdata = newdata, type = "prob")[,2] }else if(inherits(object,"glm")){ if(object$converged==FALSE){return(NULL)} if(se>0){ iPred <- .predict.logit(object, newdata = newdata) out$estimate <- iPred["estimate",] out$se <- iPred["se",] out$iid <- attr(iPred,"iid") }else{ out$estimate <- stats::predict(object, newdata = newdata, type = "response") } }else if(inherits(object,"miss.glm")){ Xb <- stats::model.matrix(stats::formula(object), newdata) %*% stats::coef(object) out$estimate <- as.double(1/(1+exp(-Xb))) } ## ** export return(out) } ## * .performance_runfold .performance_runfold <- function(object, names.object, n.object, data.train, n.train, impute, data.test, n.test, missingPattern.test, individual.fit, auc.type, se, trace){ ## ** prepare output out <- list(estimate = matrix(NA, nrow = n.test, ncol = n.object, dimnames = list(NULL, names.object))) if(auc.type == "probabilistic"){ out$se <- matrix(NA, nrow = n.test, ncol = n.object, dimnames = list(NULL, names.object)) } if(se>1){ out$iid <- setNames(vector(mode = "list", length = n.object), names.object) } ## ** imputation if(any(is.na(data.train)) && impute != "none"){ data.train0 <- data.train col.impute <- names(which(colSums(is.na(data.train))>0)) for(iCol in col.impute){ ## iCol <- "X1" if(is.numeric(data.train[[iCol]])){ data.train[is.na(data.train[[iCol]]),iCol] <- do.call(impute, args = list(data.train[[iCol]], na.rm = TRUE)) }else{ data.train[is.na(data.train[[iCol]]),iCol] <- names(sort(table(data.train[[iCol]], useNA = "no"), decreasing = TRUE))[1] } } } ## ** evaluate predictions over models for(iO in 1:n.object){ ## iO <- 3 if(individual.fit){ ## *** Missing data ## Refit model for each observation depending on the available predictors ## Median imputation for the predictors iIndex.pattern <- attr(missingPattern.test[[iO]],"index") iFormula.pattern <- attr(missingPattern.test[[iO]],"formula") iPred.pattern <- attr(missingPattern.test[[iO]],"prediction") ## previously fit model if(se>1){ out$iid[[iO]] <- matrix(0, nrow = n.train, ncol = n.test) } ## For each set of non-missing predictors for(iPattern in 1:length(iIndex.pattern)){ ## iPattern <- 1 iName.pattern <- names(iIndex.pattern)[iPattern] ## update model and compute predictions iData.test <- data.test[iIndex.pattern[[iPattern]],all.vars(iFormula.pattern[[iPattern]]),drop=FALSE] if(inherits(object[[iO]],"miss.glm")){ iData.train <- data.train0[,all.vars(iFormula.pattern[[iPattern]]),drop=FALSE] iIndex.train <- 1:NROW(iData.train) }else{ iData.train <- data.train[,all.vars(iFormula.pattern[[iPattern]]),drop=FALSE] iIndex.train <- which(rowSums(is.na(iData.train)) == 0) } iObject <- stats::update(object[[iO]], formula = iFormula.pattern[[iPattern]], data = iData.train[iIndex.train,,drop=FALSE]) iPred <- .performance_predict(iObject, n.obs = length(iIndex.train), newdata = iData.test, se = se>1) ## store results if(!is.null(iPred)){ ## convergence check out$estimate[iIndex.pattern[[iPattern]],iO] <- as.double(iPred$estimate) if(auc.type == "probabilistic"){ out$se[iIndex.pattern[[iPattern]],iO] <- as.double(iPred$se) } if(se>1){ out$iid[[iO]][iIndex.pattern[[iPattern]],iIndex.train] <- iPred$iid } } } }else{ ## *** Complete case if(inherits(object[[iO]],"miss.glm")){ iData.train <- data.train0 }else{ iData.train <- data.train } if(!is.null(data.train)){ iPred <- .performance_predict(stats::update(object[[iO]], data = data.train), n.obs = n.train, newdata = data.test, se = se>1) }else{ iPred <- .performance_predict(object[[iO]], n.obs = n.train, newdata = data.test, se = se>1) } if(!is.null(iPred)){ ## convergence check out$estimate[,iO] <- as.double(iPred$estimate) if(auc.type == "probabilistic"){ out$se[,iO] <- as.double(iPred$se) } if(se>1){ out$iid[[iO]] <- iPred$iid } } } } if(trace){cat(" ")} ## ** export return(out) } ## * .balanceFold ##' @description Generate indexes relative to each fold. ##' @param n.obs number of observations. ##' @param n.fold number of folds. ##' @noRd .balanceFold <- function(n.obs, n.fold){ if(n.obs < n.fold){ return(c(lapply(1:(n.fold-n.obs), function(i){NULL}),as.list(1:n.obs))) } ## number of observations per fold nobs.fold <- rep(floor(n.obs/n.fold),n.fold) nobs.fold <- nobs.fold + c(rep(1,n.obs-sum(nobs.fold)),rep(0,n.fold-(n.obs-sum(nobs.fold)))) ## list of indexes out <- mapply(x = cumsum(c(0,nobs.fold[-n.fold]-1)+1), y = cumsum(nobs.fold), function(x,y){list(x:y)}) return(out) } ##---------------------------------------------------------------------- ### performance.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/performance.R
### performanceResample.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: mar 3 2022 (12:01) ## Version: ## Last-Updated: jul 4 2023 (18:46) ## By: Brice Ozenne ## Update #: 188 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * performanceResample (documentation) ##' @title Uncertainty About Performance of a Classifier (EXPERIMENTAL) ##' @description Use resampling to quantify uncertainties about the performance of one or several binary classifiers evaluated via cross-validation. ##' ##' @param object a \code{glm} or \code{range} object, or a list of such object. ##' @param data [data.frame] the training data. ##' @param name.response [character] The name of the response variable (i.e. the one containing the categories). ##' @param type.resampling [character] Should non-parametric bootstrap (\code{"bootstrap"}) or permutation of the outcome (\code{"permutation"}) be used. ##' @param n.resampling [integer,>0] Number of bootstrap samples or permutations. ##' @param fold.repetition [integer,>0] Nnumber of folds used in the cross-validation. Should be strictly positive. ##' @param conf.level [numeric, 0-1] confidence level for the confidence intervals. ##' @param cpus [integer, >0] the number of CPU to use. If strictly greater than 1, resampling is perform in parallel. ##' @param seed [integer, >0] Random number generator (RNG) state used when starting resampling. ##' If \code{NULL} no state is set. ##' @param trace [logical] Should the execution of the function be traced. ##' @param filename [character] Prefix for the files containing each result. ##' @param ... arguments passed to \code{\link{performance}}. ##' ##' @details WARNING: using bootstrap after cross-validation may not provide valid variance/CI/p-value estimates. ##' ##' @return An S3 object of class \code{performance}. ##' @keywords htest ## * performanceResample (code) ##' @export performanceResample <- function(object, data = NULL, name.response = NULL, type.resampling = "permutation", n.resampling = 1000, fold.repetition = 0, conf.level = 0.95, cpus = 1, seed = NULL, trace = TRUE, filename = NULL, ...){ ## ** Normalize arguments type.resampling <- match.arg(type.resampling, c("permutation", "bootstrap")) if(length(n.resampling)==1){ vec.resampling <- 1:n.resampling }else{ vec.resampling <- n.resampling n.resampling <- length(vec.resampling) } ## ** fix randomness if(!is.null(seed)){ tol.seed <- 10^(floor(log10(.Machine$integer.max))-1) if(n.resampling>tol.seed){ stop("Cannot set a seed per sample when considering more than ",tol.seed," samples. \n") } if(!is.null(get0(".Random.seed"))){ ## avoid error when .Random.seed do not exists, e.g. fresh R session with no call to RNG old <- .Random.seed # to save the current seed on.exit(.Random.seed <<- old) # restore the current seed (before the call to the function) }else{ on.exit(rm(.Random.seed, envir=.GlobalEnv)) } set.seed(seed) seqSeed <- sample.int(tol.seed, max(vec.resampling), replace = FALSE) }else{ seqSeed <- NULL } ## ** Point estimate initPerf <- performance(object, data = data, name.response = name.response, fold.repetition = fold.repetition, se = FALSE, trace = FALSE, seed = seqSeed[1], ...) if(!is.null(filename)){ if(!is.null(seed)){ filename <- paste0(filename,"-seed",seqSeed[1]) } saveRDS(initPerf, file = paste0(filename,".rds")) } if(is.null(data)){ data <- initPerf$data } if(is.null(name.response)){ name.response <- initPerf$args$name.response } data[["XXresponseXX"]] <- NULL ## ** single run function if(type.resampling=="permutation"){ dataResample <- as.data.frame(data) attr(dataResample,"internal") <- attr(data,"internal") ## only do CV warperResampling <- function(i){ test.factice <- i %in% vec.resampling == FALSE dataResample[[name.response]] <- sample(data[[name.response]]) iPerf <- try(suppressWarnings(performance(object, data = dataResample, name.response = name.response, fold.repetition = fold.repetition, trace = trace-1, se = FALSE, seed = seqSeed[iB], ...)), silent = FALSE) if(inherits(iPerf, "try-error") || test.factice){ return(NULL) }else{ dt.iPerf <- as.data.table(iPerf, type = "performance") if(!is.null(filename)){ saveRDS(cbind(sample = i, dt.iPerf[,c("method","metric","model","estimate")]), file = paste0(filename,"-",type.resampling,i,".rds")) } return(cbind(sample = i, dt.iPerf[,c("method","metric","model","estimate")])) } } }else if(type.resampling=="bootstrap"){ warperResampling <- function(i){ test.factice <- i %in% vec.resampling == FALSE dataResample <- data[sample(NROW(data), size = NROW(data), replace = TRUE),,drop=FALSE] attr(dataResample,"internal") <- attr(data,"internal") ## only do CV iPerf <- try(suppressWarnings(performance(object, data = dataResample, name.response = name.response, fold.repetition = fold.repetition, trace = trace-1, se = FALSE, seed = seqSeed[iB], ...)), silent = FALSE) if(inherits(iPerf, "try-error") || test.factice){ return(NULL) }else{ dt.iPerf <- as.data.table(iPerf, type = "performance") if(!is.null(filename)){ saveRDS(cbind(sample = i, dt.iPerf[,c("method","metric","model","estimate")]), file = paste0(filename,"-",type.resampling,i,".rds")) } return(cbind(sample = i, dt.iPerf[,c("method","metric","model","estimate")])) } } } ## warperResampling(5) ## serial calculations if(cpus==1){ ## ** method to loop if (trace > 0) { requireNamespace("pbapply") method.loop <- pbapply::pblapply }else{ method.loop <- lapply } ## ** loop ls.resampling <- do.call(method.loop, args = list(X = 1:max(vec.resampling), FUN = warperResampling) ) }else{ ## parallel calculations ## define cluster cl <- parallel::makeCluster(cpus) if(trace>0){ pb <- utils::txtProgressBar(max = max(vec.resampling), style = 3) progress <- function(n){utils::setTxtProgressBar(pb, n)} opts <- list(progress = progress) }else{ opts <- list() } ## link to foreach doSNOW::registerDoSNOW(cl) ## seed if (!is.null(seed)) { parallel::clusterExport(cl, varlist = "seqSeed", envir = environment()) } ## export package parallel::clusterCall(cl, fun = function(x){ suppressPackageStartupMessages(library(BuyseTest, quietly = TRUE, warn.conflicts = FALSE, verbose = FALSE)) }) toExport <- NULL iB <- NULL ## [:forCRANcheck:] foreach ls.resampling <- foreach::`%dopar%`( foreach::foreach(iB=1:max(vec.resampling), .export = toExport, .packages = "data.table", .options.snow = opts), { return(warperResampling(iB)) }) parallel::stopCluster(cl) if(trace>0){close(pb)} } ## ** statistical inference dt.resampling <- data.table::as.data.table(do.call(rbind, ls.resampling[sapply(ls.resampling,length)>0])) new.performance <- .performanceResample_inference(performance = initPerf$performance[,c("method","metric","model","estimate")], resampling = dt.resampling, type.resampling = type.resampling, conf.level = conf.level) ## ** gather results out <- list(call = match.call(), response = initPerf$response, performance = new.performance, prediction = initPerf$prediction, resampling = dt.resampling, auc = initPerf$auc, brier = initPerf$brier, data = initPerf$data, args = initPerf$args ) out$args$transformation <- NA out$args$null <- NULL out$args$conf.level <- conf.level out$args$n.resampling <- n.resampling out$args$type.resampling <- type.resampling out$args$filename <- filename ## ** export class(out) <- append("performance",class(out)) return(out) } ## * .performanceResample_inference .performanceResample_inference <- function(performance, resampling, type.resampling, conf.level){ resampling <- data.table::copy(resampling) if(type.resampling=="permutation"){ data.table::setnames(resampling, old = "estimate", new ="estimate.perm") dt.merge <- resampling[performance, on = c("method","metric","model")] if(length(unique(dt.merge$model))>1){ dt.merge[,c("delta","delta.perm") := list(c(NA,.SD$estimate[-1]-.SD$estimate[-length(.SD$estimate)]), c(NA,.SD$estimate.perm[-1]-.SD$estimate.perm[-length(.SD$estimate.perm)])), by = c("sample","metric","method")] }else{ dt.merge[,c("delta","delta.perm") := as.numeric(NA)] } out <- rbind(dt.merge[dt.merge$metric == "auc", list(estimate = mean(.SD$estimate), resample = mean(.SD$estimate.perm), se.resample = stats::sd(.SD$estimate.perm), p.value = (sum(.SD$estimate<=.SD$estimate.perm) + 1)/(NROW(.SD)+1), p.value_comp = (sum(abs(.SD$delta)<=abs(.SD$delta.perm)) + 1)/(NROW(.SD)+1)), by = c("method","metric","model")], dt.merge[dt.merge$metric == "brier", list(estimate = mean(.SD$estimate), resample = mean(.SD$estimate.perm), se.resample = stats::sd(.SD$estimate.perm), p.value = (sum(.SD$estimate>=.SD$estimate.perm) + 1)/(NROW(.SD)+1), p.value_comp = (sum(abs(.SD$delta)<=abs(.SD$delta.perm)) + 1)/(NROW(.SD)+1)), by = c("method","metric","model")] ) }else if(type.resampling=="bootstrap"){ vec.estimate <- performance[["estimate"]] M.resampling <- as.matrix(dcast(resampling, value.var = "estimate", formula = sample~method+metric+model))[,-1,drop=FALSE] out.method <- sapply(strsplit(colnames(M.resampling), split = "_", fixed = TRUE),"[[",1) out.metric <- sapply(strsplit(colnames(M.resampling), split = "_", fixed = TRUE),"[[",2) out.model <- as.character(mapply(x = paste0("^",out.method,"_",out.metric,"_"), y = colnames(M.resampling), FUN = function(x,y){gsub(pattern = x, replacement = "", x = y)})) out <- data.frame(method = out.method, metric = out.metric, model = out.model, confint_percentileBootstrap(Delta = vec.estimate, Delta.resampling = M.resampling, null = c(auc = 0.5, brier = NA)[out.metric], alternative = "two.sided", alpha = 1-conf.level, endpoint = colnames(M.resampling), backtransform.delta = function(x){x})) vecauc.estimate <- vec.estimate[out.metric=="auc"] vecbrier.estimate <- vec.estimate[out.metric=="brier"] Mauc.resampling <- M.resampling[,out.metric=="auc",drop=FALSE] Mbrier.resampling <- M.resampling[,out.metric=="brier",drop=FALSE] if(length(vecauc.estimate)>1){ deltaout <- confint_percentileBootstrap(Delta = c(0,vecauc.estimate[-length(vecauc.estimate)] - vecauc.estimate[-1], 0,vecbrier.estimate[-length(vecbrier.estimate)] - vecbrier.estimate[-1]), Delta.resampling = cbind(0,Mauc.resampling[,-ncol(Mauc.resampling)] - Mauc.resampling[,-1], 0,Mbrier.resampling[,-ncol(Mbrier.resampling)] - Mbrier.resampling[,-1]), null = c(NA, rep(0, length(vecauc.estimate)-1), NA, rep(0, length(vecbrier.estimate)-1)), alternative = "two.sided", alpha = 1-conf.level, endpoint = colnames(M.resampling), backtransform.delta = function(x){x}) out$p.value_comp <- deltaout[,"p.value"] }else{ out$p.value_comp <- NA } names(out)[names(out) == "lower.ci"] <- "lower" names(out)[names(out) == "upper.ci"] <- "upper" out$null <- NULL } ## ** export return(as.data.frame(out)) } ##---------------------------------------------------------------------- ### performanceResample.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/performanceResample.R
### plot.S3sensitivity.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: dec 10 2021 (09:34) ## Version: ## Last-Updated: jun 27 2023 (14:24) ## By: Brice Ozenne ## Update #: 26 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * plot - sensitivity #' @rdname plot-sensitivity #' @method plot S3sensitivity #' @export plot.S3sensitivity <- function(x, plot = TRUE, ...){ out <- autoplot(x, ...) if(plot){ print(out) } return(invisible(list(plot = out, data = out$data))) } ## * autoplot - sensitivity ##' @title Graphical Display for Sensitivity Analysis ##' @description Display the statistic of interest across various threshold values, possibly with confidence intervals. ##' Currently only works when varying thresholds relative to one or two variables. ##' @rdname plot-sensitivity ##' ##' @param object,x output of the sensitivity method ##' @param plot [logical] should the graph be displayed in a graphical window ##' @param col [character vector] color used to identify the thresholds relative to a second variable. ##' @param ci [logical] should the confidence intervals be displayed? ##' @param band [logical] should the simulatenous confidence intervals be displayed? ##' @param label [character] text used before the name of the variables in the legend. ##' @param size.line [numeric] width of the line connecting the point estimates. ##' @param size.point [numeric] size of the point representing the point estimates. ##' @param size.ci [numeric] width of the lines representing the confidence intervals. ##' @param alpha [numeric] transparency for the area representing the simultaneous confidence intervals. ##' @param position relative position of the error bars for a given x value. Can for instance be \code{position_dodge(width = 5)}. ##' @param ... not used. For compatibility with the generic method. ##' ##' @details The \code{autoplot} and \code{plot} methods are very similar. The main difference is that the former returns a ggplot2 object whereas the later automatically display the figure in a graphical window and returns an (invible) list with the plot and the data. ##' ##' @return a ggplot2 object ##' @method autoplot S3sensitivity ##' @keywords hplot ##' ##' @export autoplot.S3sensitivity <- function(object, col = NULL, ci = TRUE, band = TRUE, label = "Threshold for", position = NULL, size.line = 1, size.point = 1.75, size.ci = 0.5, alpha = 0.1, ...){ grid <- attr(object,"gridRed") statistic <- switch(attr(object,"statistic"), "netBenefit" = "Net benefit", "winRatio" = "Win ratio", "favorable" = "Proportion of favorable pairs", "unfavorable" = "Proportion of unfavorable pairs") if(NCOL(grid)>2){ stop("No graphical display available when the sensitivity analysis is performed on more than 2 thresholds\n") } nU.var <- apply(grid,2,function(x){length(unique(x))}) name.var <- names(sort(nU.var, decreasing = TRUE)) n.var <- length(name.var) name.col <- name.var if(n.var==1 || (!is.null(col) && all(is.na(col)))){ if("XXindexXX" %in% names(object)){ stop("No endpoint should be named \"XXindexXX\" as this name is used internally. \n") } name.col[2] <- "XXindexXX" object <- data.frame(XXindexXX = "1", object) }else{ object[[name.var[2]]] <- factor(object[[name.var[2]]], levels = sort(unique(object[[name.var[2]]]))) } ## ** display ## error bar in the legend draw_key.save <- ggplot2::GeomErrorbar$draw_key GeomErrorbar$draw_key <- function (data, params, size) { ## https://stackoverflow.com/questions/53490654/adding-the-errorbar-icon-in-legend-in-ggplot .pt <- get(".pt", envir = as.environment("package:ggplot2")) data$linetype[is.na(data$linetype)] <- 0 out <- grid::segmentsGrob(c(0.2, 0.2, 0.5), c(0.2, 0.8, 0.2), c(0.8, 0.8, 0.5), c(0.2, 0.8, 0.8), gp = grid::gpar(col = alpha(data$colour, data$alpha), lwd = data$linewidth * .pt, lty = data$linetype, lineend = "butt"), arrow = params$arrow) return(out) } on.exit(GeomErrorbar$draw_key <- draw_key.save) if(length(name.var)==1){ gg <- ggplot2::ggplot(data = object, mapping = ggplot2::aes(x = .data[[name.var[1]]], y = .data$estimate)) }else{ gg <- ggplot2::ggplot(data = object, mapping = ggplot2::aes(x = .data[[name.var[1]]], y = .data$estimate, group = .data[[name.var[2]]])) } if(band && "lower.band" %in% names(object) && "upper.band" %in% names(object)){ gg <- gg + ggplot2::geom_ribbon(ggplot2::aes(ymin=.data$lower.band, ymax = .data$upper.band, fill = .data[[name.col[2]]]), alpha = alpha) }else{ band <- FALSE } gg <- gg + ggplot2::geom_point(ggplot2::aes(color = .data[[name.col[2]]]), size = size.point) + ggplot2::geom_line(ggplot2::aes(color = .data[[name.col[2]]]), linewidth = size.line) gg <- gg + ggplot2::xlab(paste(label,name.var[1],sep=" ")) gg <- gg + ggplot2::ylab(statistic) + ggplot2::theme(legend.position = "bottom") if(ci && "lower.ci" %in% names(object) && "upper.ci" %in% names(object)){ if(!is.null(position)){ gg <- gg + ggplot2::geom_errorbar(ggplot2::aes(ymin=.data$lower.ci, ymax = .data$upper.ci, color = .data[[name.col[2]]]), size = size.ci, position = position) }else{ gg <- gg + ggplot2::geom_errorbar(ggplot2::aes(ymin=.data$lower.ci, ymax = .data$upper.ci, color = .data[[name.col[2]]]), size = size.ci) } }else{ ci <- FALSE } if(n.var==1){ if(is.null(col) || all(is.na(col))){ col <- "black" }else if(length(col)!=1){ stop("Argument \'col\' should have lenght one when the sensitivity analysis is performed on one threshold. \n") } if(ci && "lower.ci" %in% names(object) && "upper.ci" %in% names(object)){ gg <- gg + ggplot2::scale_color_manual("CIs", values = col, labels = "") }else{ gg <- gg + ggplot2::scale_color_manual("Point estimate", values = col, labels = "") } if(band){ gg <- gg + ggplot2::scale_fill_manual("Simulatenous CIs", values = col, labels = "") } }else if(n.var==2){ if(!is.null(col) && all(is.na(col))){ Ulevel.var2 <- unique(object[[name.var[2]]]) label_facet <- setNames(unique(paste(label,name.var[[2]]," : ",Ulevel.var2,sep=" ")), Ulevel.var2) gg <- gg + ggplot2::facet_grid(as.formula(paste0("~",name.var[2])), labeller = ggplot2::as_labeller(label_facet)) if(ci){ gg <- gg + ggplot2::scale_color_manual("CIs", values = "black", labels = "") }else{ gg <- gg + ggplot2::scale_color_manual("Point estimate", values = "black", labels = "") } if(band){ gg <- gg + ggplot2::scale_fill_manual("Simulatenous CIs", values = "black", labels = "") } }else if(is.null(col)){ if(ci){ gg <- gg + ggplot2::labs(color = paste0("CIs \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")")) }else{ gg <- gg + ggplot2::labs(color = paste0("Point estimate \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")")) } if(band){ gg <- gg + ggplot2::labs(fill = paste0("Simulatenous CIs \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")")) } }else{ if(length(col)!=nU.var[[name.var[2]]]){ stop("Argument \'col\' should have lenght ",nU.var[[name.var[2]]],", the number of unique thresholds relative to the endpoint \"",name.var[2],"\". \n") } if(ci){ gg <- gg + ggplot2::scale_color_manual(paste0("CIs \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")"), values = col) }else{ gg <- gg + ggplot2::scale_color_manual(paste0("Point estimate \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")"), values = col) } if(band){ gg <- gg + ggplot2::scale_fill_manual(paste0("Simulatenous CIs \n (",paste(c(tolower(label),name.col[2]),collapse=" "),")"), values = col) } } } return(gg) } ##---------------------------------------------------------------------- ### plot.S3sensitivity.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/plot.S3sensitivity.R
### powerBuyseTest.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: sep 26 2018 (12:57) ## Version: ## Last-Updated: jul 18 2023 (12:00) ## By: Brice Ozenne ## Update #: 1266 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * Documentation - powerBuyseTest #' @name powerBuyseTest #' @title Performing simulation studies with BuyseTest #' #' @description Performs a simulation studies for several sample sizes. #' Returns estimates, their standard deviation, the average estimated standard error, and the rejection rate. #' Can also be use for power calculation or to approximate the sample size needed to reach a specific power. #' #' @param sim [function] take two arguments: #' the sample size in the control group (\code{n.C}) and the sample size in the treatment group (\code{n.C}) #' and generate datasets. The datasets must be data.frame objects or inherits from data.frame. #' @param sample.size [integer vector or matrix, >0] the group specific sample sizes relative to which the simulations should be perform. #' When a vector, the same sample size is used for each group. Alternatively can be a matrix with two columns, one for each group (respectively T and C). #' @param n.rep [integer, >0] the number of simulations. #' When specifying the power instead of the sample size, should be a vector of length 2 where the second element indicates the number of simulations used to identify the sample size. #' @param null [numeric vector] For each statistic of interest, the null hypothesis to be tested. #' The vector should be named with the names of the statistics. #' @param cpus [integer, >0] the number of CPU to use. Default value is 1. #' @param export.cpus [character vector] name of the variables to export to each cluster. #' @param seed [integer, >0] Random number generator (RNG) state used when starting the simulation study. #' If \code{NULL} no state is set. #' @param alternative [character] the type of alternative hypothesis: \code{"two.sided"}, \code{"greater"}, or \code{"less"}. #' Default value read from \code{BuyseTest.options()}. #' @param conf.level [numeric, 0-1] type 1 error level. #' Default value read from \code{BuyseTest.options()}. #' @param power [numeric, 0-1] type 2 error level used to determine the sample size. Only relevant when \code{sample.size} is not given. See details. #' @param max.sample.size [interger, 0-1] sample size used to approximate the sample size achieving the requested type 1 and type 2 error (see details). #' Can have length 2 to indicate the sample in each group (respectively T and C) when the groups have unequal sample size. #' @param trace [integer] should the execution of the function be traced? #' @param transformation [logical] should the CI be computed on the logit scale / log scale for the net benefit / win ratio and backtransformed. #' Otherwise they are computed without any transformation. #' Default value read from \code{BuyseTest.options()}. #' @param order.Hprojection [integer 1,2] the order of the H-project to be used to compute the variance of the net benefit/win ratio. #' Default value read from \code{BuyseTest.options()}. #' @param ... other arguments (e.g. \code{scoring.rule}, \code{method.inference}) to be passed to \code{initializeArgs}. #' #' @details \bold{Sample size calculation}: to approximate the sample size achieving the requested type 1 (\eqn{\alpha}) and type 2 error (\eqn{\beta}), #' GPC are applied on a large sample (as defined by the argument \code{max.sample.size}): \eqn{N^*=m^*+n^*} where \eqn{m^*} is the sample size in the control group and \eqn{n^*} is the sample size in the active group. #' Then the effect (\eqn{\delta}) and the asymptotic variance of the estimator (\eqn{\sigma^2}) are estimated. The total sample size is then deduced as (two-sided case): #' \deqn{\hat{N} = \hat{\sigma}^2\frac{(u_{1-\alpha/2}+u_{1-\beta})^2}{\hat{\delta}^2}} from which the group specific sample sizes are deduced: \eqn{\hat{m}=\hat{N}\frac{m^*}{N^*}} and \eqn{\hat{n}=\hat{N}\frac{n^*}{N^*}}. Here \eqn{u_x} denotes the x-quantile of the normal distribution. \cr #' This approximation can be improved by increasing the sample size (argument \code{max.sample.size}) and/or by performing it multiple times based on a different dataset and average estimated sample size per group (second element of argument \code{n.rep}). \cr #' To evaluate the approximation, a simulation study is then performed with the estimated sample size. It will not exactly match the requested power but should provide a reasonnable guess which can be refined with further simulation studies. The larger the sample size (and/or number of CPUs) the more accurate the approximation. #' #' \bold{seed}: the seed is used to generate one seed per simulation. These simulation seeds are the same whether one or several CPUs are used. #' #' @return An S4 object of class \code{\linkS4class{S4BuysePower}}. #' @keywords htest #' #' @author Brice Ozenne ## * powerBuyseTest (examples) ##' @rdname powerBuyseTest ##' @examples ##' library(data.table) ##' ##' #### Using simBuyseTest #### ##' ## only point estimate ##' \dontrun{ ##' pBT <- powerBuyseTest(sim = simBuyseTest, sample.size = c(10, 25, 50, 75, 100), ##' formula = treatment ~ bin(toxicity), seed = 10, n.rep = 1000, ##' method.inference = "none", keep.pairScore = FALSE, cpus = 5) ##' summary(pBT) ##' model.tables(pBT) ##' } ##' ##' ## point estimate with rejection rate ##' \dontshow{ ##' powerBuyseTest(sim = simBuyseTest, sample.size = c(10, 50, 100), ##' formula = treatment ~ bin(toxicity), seed = 10, n.rep = 10, ##' method.inference = "u-statistic", trace = 4) ##' } ##' \dontrun{ ##' powerBuyseTest(sim = simBuyseTest, sample.size = c(10, 50, 100), ##' formula = treatment ~ bin(toxicity), seed = 10, n.rep = 1000, ##' method.inference = "u-statistic", trace = 4) ##' } ##' ##' #### Using user defined simulation function #### ##' ## power calculation for Wilcoxon test ##' simFCT <- function(n.C, n.T){ ##' out <- rbind(cbind(Y=stats::rt(n.C, df = 5), group=0), ##' cbind(Y=stats::rt(n.T, df = 5), group=1) + 1) ##' return(data.table::as.data.table(out)) ##' } ##' simFCT2 <- function(n.C, n.T){ ##' out <- rbind(cbind(Y=stats::rt(n.C, df = 5), group=0), ##' cbind(Y=stats::rt(n.T, df = 5), group=1) + 0.25) ##' return(data.table::as.data.table(out)) ##' } ##' ##' \dontshow{ ##' powerW <- powerBuyseTest(sim = simFCT, sample.size = c(5, 10,20,30,50,100), ##' n.rep = 10, formula = group ~ cont(Y)) ##' summary(powerW) ##' } ##' \dontrun{ ##' powerW <- powerBuyseTest(sim = simFCT, sample.size = c(5,10,20,30,50,100), ##' n.rep = 1000, formula = group ~ cont(Y), cpus = "all") ##' summary(powerW) ##' } ##' ##' ## sample size needed to reach (approximately) a power ##' ## based on summary statistics obtained on a large sample ##' \dontrun{ ##' sampleW <- powerBuyseTest(sim = simFCT, power = 0.8, formula = group ~ cont(Y), ##' n.rep = c(1000,10), max.sample.size = 2000, cpus = 5, ##' seed = 10) ##' nobs(sampleW) ##' summary(sampleW) ## not very accurate but gives an order of magnitude ##' ##' sampleW2 <- powerBuyseTest(sim = simFCT2, power = 0.8, formula = group ~ cont(Y), ##' n.rep = c(1000,10), max.sample.size = 2000, cpus = 5, ##' seed = 10) ##' summary(sampleW2) ## more accurate when the sample size needed is not too small ##' } ##' ## * powerBuyseTest (code) ##' @export powerBuyseTest <- function(sim, sample.size, n.rep = c(1000,10), null = c("netBenefit" = 0), cpus = 1, export.cpus = NULL, seed = NULL, conf.level = NULL, power = NULL, max.sample.size = 2000, alternative = NULL, order.Hprojection = NULL, transformation = NULL, trace = 1, ...){ call <- match.call() ## ** normalize and check arguments name.call <- names(call) option <- BuyseTest.options() if(is.null(conf.level)){ conf.level <- option$conf.level } if(is.null(alternative)){ alternative <- option$alternative } if(is.null(order.Hprojection)){ order.Hprojection <- option$order.Hprojection } if(is.null(transformation)){ transformation <- option$transformation } alpha <- 1 - conf.level outArgs <- initializeArgs(cpus = cpus, option = option, name.call = name.call, data = NULL, model.tte = NULL, ...) outArgs$call <- setNames(as.list(call),names(call)) ## power if(!is.null(power) && (!missing(sample.size) && !is.null(sample.size))){ warning("Argument power is disregarded when arguments \'sample.size\' is specified. \n") power <- NULL }else if(is.null(power) && (missing(sample.size) || is.null(sample.size))){ stop("Argument \'sample.size\' or argument \'power\' must be specified. \n") } ## sample size if(is.null(power)){ if(!is.numeric(sample.size) || (!is.vector(sample.size) && !is.matrix(sample.size))){ stop("Argument \'sample.size\' must be a vector of integers or a matrix of integers with two-columns (one for each group). \n") } if(any(sample.size<=0) || any(sample.size %% 1 != 0)){ stop("Argument \'sample.size\' must only contain strictly positive integers. \n") } if(is.matrix(sample.size)){ if(NCOL(sample.size)!=2){ stop("When a matrix, argument \'sample.size\' must have two-columns (one for each group). \n") } if(is.null(colnames(sample.size))){ sample.sizeT <- sample.size[,1] sample.sizeC <- sample.size[,2] }else{ if(any(c("C","T") %in% colnames(sample.size) == FALSE)){ stop("When a matrix, argument \'sample.size\' must have column names \"C\" and \"T\" \n") } sample.sizeT <- sample.size[,"T"] sample.sizeC <- sample.size[,"C"] } }else{ sample.sizeT <- sample.size sample.sizeC <- sample.size } }else{ if(length(n.rep)==1){ rep2rep <- function(x){sapply(x, function(iX){ceiling(log10(iX) + 3*pmax(0,log10(iX)-1) + pmax(0,log10(iX)-2) + 5*pmax(0,log10(iX)-3))})} ## rep2rep(10^(1:5)) ## [1] 1 5 10 20 30 n.rep <- 10000 n.rep <- c(n.rep, ceiling(log10(n.rep) + 3*pmax(0,log10(n.rep)-1) + pmax(0,log10(n.rep)-2) + 5*pmax(0,log10(n.rep)-3))) } if(!is.vector(max.sample.size)){ stop("Argument \'max.sample.size\' must be a vector. \n") } if(length(max.sample.size) %in% 1:2 == FALSE){ stop("Argument \'max.sample.size\' must have length 1 or 2. \n") } if(length(max.sample.size) == 2){ if(is.null(names(max.sample.size))){ names(max.sample.size) <- c("T","C") }else if(any(c("C","T") %in% names(max.sample.size) == FALSE)){ stop("When a vector of length 2, argument \'max.sample.size\' must have names T and C. \n") } } } ## statistic statistic <- names(null) validCharacter(statistic, name1 = "names(null)", valid.length = 1:4, valid.values = c("favorable","unfavorable","netBenefit","winRatio"), refuse.NULL = TRUE, refuse.duplicates = TRUE, method = "BuyseTest") ## sim dt.tempo <- sim(n.C = 10, n.T = 10) if(!inherits(dt.tempo, "data.frame")){ stop("The function defined by the argument \'sim\' must return a data.frame or an object that inherits from data.frame.\n") } ## cluster if(identical(cpus,"all")){ cpus <- parallel::detectCores() }else if(cpus>1){ validInteger(cpus, valid.length = 1, min = 1, max = parallel::detectCores(), method = "powerBuyseTest") } ## seed if (!is.null(seed)) { tol.seed <- 10^(floor(log10(.Machine$integer.max))-1) if(n.rep[1]>tol.seed){ stop("Cannot set a seed per simulation when considering more than ",tol.seed," similations. \n") } if(!is.null(get0(".Random.seed"))){ ## avoid error when .Random.seed do not exists, e.g. fresh R session with no call to RNG old <- .Random.seed # to save the current seed on.exit(.Random.seed <<- old) # restore the current seed (before the call to the function) }else{ on.exit(rm(.Random.seed, envir=.GlobalEnv)) } set.seed(seed) seqSeed <- sample.int(tol.seed, n.rep[1], replace = FALSE) }else{ seqSeed <- NULL } ## trace if (trace > 0) { requireNamespace("pbapply") method.loop <- pbapply::pblapply }else{ method.loop <- lapply } ## ** initialize cluster if(cpus>1){ cl <- parallel::makeCluster(cpus) ## link to foreach doSNOW::registerDoSNOW(cl) ## export if(!is.null(export.cpus)){ parallel::clusterExport(cl, export.cpus) } ## seed if (!is.null(seed)) { parallel::clusterExport(cl, varlist = "seqSeed", envir = environment()) } ## export package parallel::clusterCall(cl, fun = function(x){ suppressPackageStartupMessages(library(BuyseTest, quietly = TRUE, warn.conflicts = FALSE, verbose = FALSE)) }) } ## ** initialize sample size if(!is.null(power)){ if(length(max.sample.size)==1){ max.sample.size <- c(T = max.sample.size, C = max.sample.size) } if (trace > 1) { cat(" Determination of the sample using a large sample (T=",max.sample.size[1],", C=",max.sample.size[2],") \n\n",sep="") } if (cpus == 1) { ls.BTmax <- do.call(method.loop, args = list(X = 1:n.rep[2], FUN = function(X){ if(!is.null(seed)){set.seed(seqSeed[X])} iOut <- BuyseTest(..., data = sim(n.T = max.sample.size["T"], n.C = max.sample.size["C"]), trace = 0) return(iOut) }) ) }else if(cpus > 1){ ## define progress bar if(trace>0){ pb <- utils::txtProgressBar(max = n.rep[2], style = 3) progress <- function(n){utils::setTxtProgressBar(pb, n)} opts <- list(progress = progress) }else{ opts <- list() } ls.BTmax <- foreach::`%dopar%`( foreach::foreach(i=1:n.rep[2], .options.snow = opts), { if(!is.null(seed)){set.seed(seqSeed[i])} iOut <- BuyseTest(..., data = sim(n.T = max.sample.size["T"], n.C = max.sample.size["C"]), trace = 0) return(iOut) }) if(trace>0){close(pb)} if(n.rep[1]<=0){parallel::stopCluster(cl)} } if(ls.BTmax[[1]]@method.inference == "u-statistic"){ DeltaMax <- sapply(ls.BTmax, function(iBT){utils::tail(coef(iBT, statistic = names(null)),1) - null}) IidMax <- do.call(cbind,lapply(ls.BTmax, FUN = getIid, statistic = names(null), scale = FALSE)) ratio <- c(T = as.double(max.sample.size["T"]/sum(max.sample.size)), C = as.double(max.sample.size["C"]/sum(max.sample.size))) indexT <- attr(ls.BTmax[[1]]@level.treatment,"indexT") indexC <- attr(ls.BTmax[[1]]@level.treatment,"indexC") sigma2Max <- colMeans(IidMax[indexC,,drop=FALSE]^2)/ratio["C"] + colMeans(IidMax[indexT,,drop=FALSE]^2)/ratio["T"] if(alternative=="two.sided"){ n.approx <- sigma2Max*(stats::qnorm(1-alpha/2) + stats::qnorm(power))^2/DeltaMax^2 }else if(alternative=="less"){ if(DeltaMax<0){ n.approx <- sigma2Max*(stats::qnorm(1-alpha) + stats::qnorm(power))^2/DeltaMax^2 }else{ message("No power: positive effect detected. \n") return(invisible(DeltaMax)) } }else if(alternative=="greater"){ if(DeltaMax>0){ n.approx <- sigma2Max*(stats::qnorm(1-alpha) + stats::qnorm(power))^2/DeltaMax^2 }else{ message("No power: positive effect detected. \n") return(invisible(DeltaMax)) } } sample.sizeC <- ceiling(mean(n.approx*ratio["C"])) attr(sample.sizeC,"sample") <- unname(n.approx*ratio["C"]) sample.sizeT <- ceiling(mean(n.approx*ratio["T"])) attr(sample.sizeT,"sample") <- unname(n.approx*ratio["C"]) ## (mean(IidMax[attr([email protected],"indexC"),]^2) + mean(IidMax[attr([email protected],"indexT"),]^2))*(stats::qnorm(1-alpha/2)+stats::qnorm(power))^2/DeltaMax^2 if (trace > 1) { if(cpus==1){ cat(" - estimated effect (variance): ",unname(DeltaMax)," (",sigma2Max,")\n",sep="") cat(" - estimated sample size : (m=",sample.sizeC,", n=",sample.sizeT,")\n\n",sep="") }else{ cat(" - average estimated effect (variance): ",unname(mean(DeltaMax))," (",mean(sigma2Max),")\n",sep="") cat(" - average estimated sample size [min;max] : (m=", sample.sizeC," [",ceiling(min(n.approx*ratio["C"])),";",ceiling(max(n.approx*ratio["C"])),"], n=", sample.sizeT," [",ceiling(min(n.approx*ratio["T"])),";",ceiling(max(n.approx*ratio["T"])),"]\n\n",sep="") } } }else{ stop("Can only determine the sample size when argument \'method.inference\' equals \"u-statistic\". \n") } } ## ** test arguments if(option$check){ index.pb <- which(outArgs$status[outArgs$type=="tte"] == "..NA..") if(length(index.pb)>0){ if(any(attr(outArgs$censoring,"original") %in% c("left","right") == FALSE)){ stop("BuyseTest: wrong specification of \'status\'. \n", "\'status\' must indicate a variable in data for TTE endpoints. \n", "\'censoring\' is used to indicate whether there is left or right censoring. \n", "Consider changing \'censoring =\' into \'status =\' when in the argument \'formula\' \n") }else{ stop("BuyseTest: wrong specification of \'status\'. \n", "\'status\' must indicate a variable in data for TTE endpoints. \n", "TTE endoints: ",paste(outArgs$endpoint[outArgs$type=="tte"],collapse=" "),"\n", "proposed \'status\' for these endoints: ",paste(outArgs$status[outArgs$type=="tte"],collapse=" "),"\n") } } ## outTest <- do.call(testArgs, args = outArgs) ## outTest <- do.call(testArgs, args = c(outArgs[setdiff(names(outArgs),"data")], list(data = dt.tempo))) ## if(!is.null(outArgs$strata)){ ## stop("Cannot use argument \'strata\' with powerBuyseTest \n") ## } ## if(outArgs$method.inference %in% c("none","u-statistic") == FALSE){ ## stop("Argument \'method.inference\' must be \"none\" or \"u-statistic\" \n") ## } } cpus <- outArgs$cpus outArgs$cpus <- 1 outArgs$trace <- 0 ## ** initialization sample size and data n.sample.size <- length(sample.sizeT) sample.sizeCmax <- sample.sizeC[n.sample.size] sample.sizeTmax <- sample.sizeT[n.sample.size] outArgs$level.treatment <- levels(as.factor(dt.tempo[[outArgs$treatment]])) ## outArgs$n.strata <- 1 ## outArgs$level.strata <- "1" ## outArgs$allstrata <- NULL ## ** Display if (trace > 1) { cat(" Simulation study with BuyseTest \n\n") if(trace > 2){ argsInit <- setdiff(names(as.list(args(initializeData))), c("","copy","data")) resInitData <- do.call(initializeData, args = c(outArgs[argsInit], list(copy = FALSE, data = dt.tempo))) do.call(printGeneral, args = c(outArgs, list(M.status = resInitData$M.status, paired = resInitData$paired))) if(outArgs$method.inference!="none"){ do.call(printInference, args = outArgs) } } if(!missing(sample.size) && !is.null(sample.size)){ text.sample.size <- paste0(" - sample size: ",paste(sample.size, collapse = " "),"\n") }else{ text.sample.size <- paste0(" - sample size: ",paste(sample.sizeC, collapse = " ")," (control)\n", " : ",paste(sample.sizeT, collapse = " ")," (treatment)\n") } cat("Simulation\n", " - repetitions: ",n.rep[1],"\n", " - cpus : ",cpus,"\n", sep = "") cat(" \n") } ## ** define environment envirBT <- new.env() ## envirBT[[deparse(call)]] <- sim name.copy <- c("sim", "option", "outArgs", "sample.sizeTmax", "sample.sizeCmax", "n.sample.size", "sample.sizeC", "sample.sizeT", "n.rep", "statistic", "null", "conf.level", "alternative", "transformation", "order.Hprojection", ".BuyseTest",".powerBuyseTest") for(iObject in name.copy){ ## iObject <- name.copy[2] envirBT[[iObject]] <- eval(parse(text = iObject)) } ## ** simulation study if (cpus == 1) { ## *** sequential simulation ls.simulation <- do.call(method.loop, args = list(X = 1:n.rep[1], FUN = function(X){ if(!is.null(seed)){set.seed(seqSeed[X])} iOut <- .powerBuyseTest(i = X, envir = envirBT, statistic = statistic, null = null, conf.level = conf.level, alternative = alternative, transformation = transformation, order.Hprojection = order.Hprojection) if(!is.null(seed)){ return(cbind(iOut, seed = seqSeed[X])) }else{ return(iOut) } }) ) }else { ## *** parallel simulation ## define progress bar if(trace>0){ pb <- utils::txtProgressBar(max = n.rep[1], style = 3) progress <- function(n){utils::setTxtProgressBar(pb, n)} opts <- list(progress = progress) }else{ opts <- list() } ## try sim test <- try(parallel::clusterCall(cl, fun = function(x){ sim(n.T = sample.sizeTmax, n.C = sample.sizeCmax) }), silent = TRUE) if(inherits(test,"try-error")){ stop(paste0("Could not run argument \'sim\' when using multiple CPUs. \n Consider trying first to run powerBuyseTest with cpus=1. \n If it runs, make sure that \'sim\' does not depend on any variable in the global environment or package without explicit mention of the namespace. \n",test)) } ## run simul i <- NULL ## [:forCRANcheck:] foreach ## not recognized by parallel::clusterExport since not exported by the package toExport <- c(".BuyseTest", ".powerBuyseTest", "wsumPairScore", "S4BuyseTest", "initializeData", "calcSample", "calcPeron", "pairScore2dt", "confint_Ustatistic", "validNumeric") ls.simulation <- foreach::`%dopar%`( foreach::foreach(i=1:n.rep[1], .export = toExport, .options.snow = opts), { if(!is.null(seed)){set.seed(seqSeed[i])} iOut <- .powerBuyseTest(i = i, envir = envirBT, statistic = statistic, null = null, conf.level = conf.level, alternative = alternative, transformation = transformation, order.Hprojection = order.Hprojection) if(!is.null(seed)){ return(cbind(iOut,seed = seqSeed[i])) }else{ return(iOut) } }) parallel::stopCluster(cl) if(trace>0){close(pb)} } dt.out <- data.table::as.data.table(do.call(rbind, ls.simulation)) ## ** export BuysePower.object <- S4BuysePower( alternative = alternative, method.inference = outArgs$method.inference, conf.level = conf.level, endpoint = outArgs$endpoint, threshold = outArgs$threshold, restriction = outArgs$restriction, type = outArgs$type, null = null, max.sample.size = max.sample.size, power = power, n.rep = n.rep, results = dt.out, sample.sizeT = sample.sizeT, sample.sizeC = sample.sizeC, seed = seqSeed ) return(BuysePower.object) } ## * .powerBuyseTest .powerBuyseTest <- function(i, envir, statistic, null, conf.level, alternative, transformation, order.Hprojection){ out <- NULL allBT <- vector(mode = "list", length = envir$n.sample.size) scoring.rule <- envir$outArgs$scoring.rule iidNuisance <- envir$outArgs$iidNuisance n.endpoint <- length(envir$outArgs$endpoint) n.statistic <- length(statistic) rerun <- (envir$n.sample.size>1) ## when creating S4 object keep.args <- c("index.C", "index.T", "index.strata", "type","endpoint","level.strata","level.treatment","scoring.rule","hierarchical","neutral.as.uninf","add.halfNeutral", "correction.uninf","method.inference","method.score","paired","strata","threshold","restriction","weightObs","weightEndpoint","pool.strata","n.resampling","call") ## ** Simulate data data <- data.table::as.data.table(envir$sim(n.T = envir$sample.sizeTmax, n.C = envir$sample.sizeCmax)) iInit <- initializeData(data = data, type = envir$outArgs$type, endpoint = envir$outArgs$endpoint, Uendpoint = envir$outArgs$Uendpoint, D = envir$outArgs$D, scoring.rule = envir$outArgs$scoring.rule, status = envir$outArgs$status, Ustatus = envir$outArgs$Ustatus, method.inference = envir$outArgs$method.inference, censoring = envir$outArgs$censoring, strata = envir$outArgs$strata, pool.strata = envir$outArgs$pool.strata, treatment = envir$outArgs$treatment, hierarchical = envir$outArgs$hierarchical, copy = FALSE, keep.pairScore = envir$outArgs$keep.pairScore, endpoint.TTE = envir$outArgs$endpoint.TTE, status.TTE = envir$outArgs$status.TTE, iidNuisance = envir$outArgs$iidNuisance) out.name <- names(iInit) envir$outArgs[out.name] <- iInit ## save for subsetting the data set with other sample sizes index.C <- envir$outArgs$index.C index.T <- envir$outArgs$index.T ## ** Point estimate for the largest sample size if(envir$outArgs$method.inference %in% c("none","u-statistic")){ ## compute estimate and possibly uncertainty outPoint <- .BuyseTest(envir = envir, method.inference = envir$outArgs$method.inference, iid = envir$outArgs$iid, pointEstimation = TRUE) ## create S4 object allBT[[envir$n.sample.size]] <- do.call("S4BuyseTest", args = c(outPoint, envir$outArgs[keep.args])) }else{ data[["..strata.."]] <- NULL data[["..rowIndex.."]] <- NULL data[["..NA.."]] <- NULL allBT[[envir$n.sample.size]] <- BuyseTest(data = data, scoring.rule = envir$outArgs$scoring.rule, pool.strata = envir$outArgs$pool.strata, correction.uninf = envir$outArgs$correction.uninf, model.tte = envir$outArgs$model.tte, method.inference = envir$outArgs$method.inference, n.resampling = envir$outArgs$n.resampling, strata.resampling = envir$outArgs$strata.resampling, hierarchical = envir$outArgs$hierarchical, weightEndpoint = envir$outArgs$weightEndpoint, neutral.as.uninf = envir$outArgs$neutral.as.uninf, add.halfNeutral = envir$outArgs$add.halfNeutral, trace = FALSE, treatment = envir$outArgs$treatment, endpoint = envir$outArgs$endpoint, type = envir$outArgs$type, threshold = envir$outArgs$threshold, restriction = envir$outArgs$restriction, status = envir$outArgs$status, operator = envir$outArgs$operator, censoring = envir$outArgs$censoring, strata = envir$outArgs$strata) } ## ** Loop over other sample sizes if(rerun>0){ ## test.bebu <- envir$outArgs$keep.pairScore && (envir$outArgs$method.inference %in% c("none","u-statistic")) && all(scoring.rule <= 4) && (envir$outArgs$correction.uninf == 0) for(iSize in 1:(envir$n.sample.size-1)){ ## if(test.bebu){ REMOVED AS IT IS SLOWER TO KEEP pairScore THAN RE-RUN THE C++ CODE ## outCov2 <- inferenceUstatisticBebu(tablePairScore = allBT[[envir$n.sample.size]]@tablePairScore, ## subset.C = 1:envir$sample.sizeC[iSize], ## subset.T = 1:envir$sample.sizeT[iSize], ## order = envir$outArgs$order.Hprojection, ## weightEndpoint = envir$outArgs$weightEndpoint, ## n.pairs = envir$sample.sizeC[iSize]*envir$sample.sizeT[iSize], ## n.C = envir$sample.sizeC[iSize], ## n.T = envir$sample.sizeT[iSize], ## level.strata = envir$outArgs$level.strata, ## n.strata = envir$outArgs$n.strata, ## n.endpoint = n.endpoint, ## endpoint = envir$outArgs$endpoint) ## outPoint2 <- list(count_favorable = outCov2$count_favorable, ## count_unfavorable = outCov2$count_unfavorable, ## count_neutral = outCov2$count_neutral, ## count_uninf = outCov2$count_uninf, ## outPoint$count_uninf ## delta = outCov2$delta, ## Delta = outCov2$Delta, ## outPoint$Delta ## n_pairs = outCov2$n.pairs, ## iidAverage_favorable = matrix(nrow = 0, ncol = 0), ## iidAverage_unfavorable = matrix(nrow = 0, ncol = 0), ## iidAverage_neutral = matrix(nrow = 0, ncol = 0), ## iidNuisance_favorable = matrix(nrow = 0, ncol = 0), ## iidNuisance_unfavorable = matrix(nrow = 0, ncol = 0), ## iidNuisance_neutral = matrix(nrow = 0, ncol = 0), ## covariance = outCov2$Sigma, ## tableScore = list() ## ) ## allBT[[iSize]] <- do.call("S4BuyseTest", args = c(outPoint2, envir$outArgs[keep.args])) ## }else{ iData <- rbind(data[index.C[1:envir$sample.sizeC[iSize]]], data[index.T[1:envir$sample.sizeT[iSize]]]) if(envir$outArgs$method.inference %in% c("none","u-statistic")){ envir$outArgs[out.name] <- initializeData(data = iData, type = envir$outArgs$type, endpoint = envir$outArgs$endpoint, Uendpoint = envir$outArgs$Uendpoint, D = envir$outArgs$D, scoring.rule = scoring.rule, status = envir$outArgs$status, Ustatus = envir$outArgs$Ustatus, method.inference = envir$outArgs$method.inference, censoring = envir$outArgs$censoring, strata = envir$outArgs$strata, pool.strata = envir$outArgs$pool.strata, treatment = envir$outArgs$treatment, hierarchical = envir$outArgs$hierarchical, copy = FALSE, keep.pairScore = envir$outArgs$keep.pairScore, endpoint.TTE = envir$outArgs$endpoint.TTE, status.TTE = envir$outArgs$status.TTE, iidNuisance = iidNuisance) outPoint <- .BuyseTest(envir = envir, iid = envir$outArgs$iid, method.inference = envir$outArgs$method.inference, pointEstimation = TRUE) allBT[[iSize]] <- do.call("S4BuyseTest", args = c(outPoint, envir$outArgs[keep.args])) }else{ iData[["..strata.."]] <- NULL iData[["..rowIndex.."]] <- NULL iData[["..NA.."]] <- NULL allBT[[iSize]] <- BuyseTest(data = iData, scoring.rule = envir$outArgs$scoring.rule, pool.strata = envir$outArgs$pool.strata, correction.uninf = envir$outArgs$correction.uninf, model.tte = envir$outArgs$model.tte, method.inference = envir$outArgs$method.inference, n.resampling = envir$outArgs$n.resampling, strata.resampling = envir$outArgs$strata.resampling, hierarchical = envir$outArgs$hierarchical, weightEndpoint = envir$outArgs$weightEndpoint, neutral.as.uninf = envir$outArgs$neutral.as.uninf, add.halfNeutral = envir$outArgs$add.halfNeutral, trace = FALSE, treatment = envir$outArgs$treatment, endpoint = envir$outArgs$endpoint, type = envir$outArgs$type, threshold = envir$outArgs$threshold, restriction = envir$outArgs$restriction, status = envir$outArgs$status, operator = envir$outArgs$operator, censoring = envir$outArgs$censoring, strata = envir$outArgs$strata) } ## } } } ## ** Inference for(iSize in 1:envir$n.sample.size){ for(iStatistic in statistic){ for(iTransformation in transformation){ for(iOrder.Hprojection in order.Hprojection){ iCI <- suppressMessages(confint(allBT[[iSize]], statistic = iStatistic, null = null[iStatistic], conf.level = conf.level, alternative = alternative, order.Hprojection = iOrder.Hprojection, transformation = iTransformation)) iTransform <- "none" if(!is.null(attr(iCI,"nametransform"))){ iTransform <- attr(iCI,"nametransform") }else{ iTransform <- "none" } out <- rbind(out, cbind(data.table::data.table(n.T = envir$sample.sizeT[[iSize]], n.C = envir$sample.sizeC[[iSize]], endpoint = rownames(iCI), statistic = iStatistic, transformation = iTransform, order.Hprojection = iOrder.Hprojection, stringsAsFactors = FALSE), iCI) ) } } } } ## ** Export rownames(out) <- NULL return(out) } ###################################################################### ### powerBuyseTest.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/powerBuyseTest.R
### iid.logit.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: aug 3 2021 (11:55) ## Version: ## Last-Updated: jun 28 2023 (14:15) ## By: Brice Ozenne ## Update #: 95 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * .predict.logit (documentation) ##' @title Predicted Probability with Influence Function ##' @description Compute the predicted probabilities from a logistic regression, ##' with their (robust) standard error, ##' and the corresponding influence function. ##' @noRd ##' ##' @param object a logistic model. ##' @param newdata [data.frame] dataset containing ##' @param level [character] level of the outcome for which the probability should be computed. ##' @param robust [logit] when FALSE uses the individual contribution to the modeled variance-covariance matrix as iid decomposition. ##' ## * .predict.logit (examples) ##' @examples ##' \dontrun{ ## will not run as .predict.logit is not exported ##' set.seed(10) ##' n <- 100 ##' df <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), X1 = rnorm(n), X2 = rnorm(n)) ##' e.logit <- glm(Y~X1+X2, data = df, family = binomial(link="logit")) ##' ##' test1 <- .predict.logit(e.logit, newdata = df[1:5,]) ##' test1["se",] - sqrt(diag(crossprod(attr(test1,"iid")))) ## exact ##' test1["var.se",] - diag(crossprod(attr(test1,"iid.se"))) ## exact ##' ##' GS <- predict(e.logit, newdata = df[1:5,], se = TRUE, type = "response") ##' test2 <- .predict.logit(e.logit, newdata = df[1:5,], robust = FALSE) ##' test2["estimate",] - GS$fit ## exact ##' test2["se",] - GS$se.fit ## exact ##' test2["se",] - sqrt(diag(crossprod(attr(test2,"iid")))) ## approximate ##' ## since it uses the robust estimator of the correlation ##' ## (and the modeled estimator of the variance) ##' ##' ## Sanity check (fully stratified model) ##' df <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), ##' X1 = rnorm(n), ##' X2 = as.factor(rbinom(n, size = 1, prob = 0.5))) ##' newdata <- data.frame(X1=c(0,1),X2=as.factor(0:1)) ##' ##' e.logit <- glm(Y~X1+X2, data = df, family = binomial(link="logit")) ##' e.predlogit <- .predict.logit(e.logit, newdata = newdata) ##' cor(attr(e.predlogit,"iid")) ##' ##' e.logitS <- glm(Y~X1*X2, data = df, family = binomial(link="logit")) ##' e.predlogitS <- .predict.logit(e.logitS, newdata = newdata) ##' cor(attr(e.predlogitS,"iid")) ##' } ## * .predict.logit (simulation study) ## warper <- function(sim,n){ ## df <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), X1 = rnorm(n), X2 = rnorm(n)) ## e.logit <- glm(Y~X1+X2, data = df, family = binomial(link="logit")) ## res <- .predict.logit(e.logit, newdata = data.frame(X1=2,X2=0.5)) ## out <- cbind(sim = sim, n=n,t(res)) ## return(out) ## } ## library(pbapply) ## n.sim <- 1000 ## ls.res <- pblapply(1:n.sim, FUN = function(iSim){ ## rbind(warper(sim = iSim, n = 50), ## warper(sim = iSim, n = 100), ## warper(sim = iSim, n = 250), ## warper(sim = iSim, n = 500), ## warper(sim = iSim, n = 1000)) ## }) ## dtW.res <- as.data.table(do.call(rbind,ls.res)) ## dtW.res[,empirical.se := sd(estimate), by = "n"] ## dtW.res[,empirical.var.se := var(se), by = "n"] ## gg.se <- ggplot(dtW.res, aes(x=as.factor(n))) + geom_boxplot(aes(y=se)) + geom_point(aes(y=empirical.se), shape = 2, size = 2) + geom_line(aes(y=empirical.se, group = "d")) ## gg.var.se <- ggplot(dtW.res, aes(x=as.factor(n))) + geom_boxplot(aes(y=var.se)) + geom_point(aes(y=empirical.var.se), shape = 2, size = 2) + geom_line(aes(y=empirical.var.se, group = "d")) ## gg.var.se + coord_cartesian(ylim = c(0,0.00001)) ## * .predict.logit (code) .predict.logit <- function(object, newdata, level = NULL, robust = TRUE){ ## ** check input if (!inherits(object,"glm")){ stop("Only implemented for glm objects. \n") } if (object$family$family %in% c("binomial","quasibinomial") == FALSE){ stop("Not implemented for other families than binomial or quasibinomial. \n") } if (object$family$link!="logit"){ stop("Not implemented for other link function than logit. \n") } n.newobs <- NROW(newdata) ## ** prepare ff <- stats::formula(object) X <- stats::model.matrix(delete.response(terms(ff)), newdata) beta <- coef(object) name.param <- names(beta) n.param <- length(beta) iid.beta <- lava::iid(object) n.obs <- NROW(iid.beta) ## ** compute predictions Xbeta <- as.double(X %*% beta) ## Xbeta - predict(object, type = "link", newdata = newdata, se = FALSE) pred <- 1/(1+exp(-Xbeta)) ## pred - predict(object, type = "response", newdata = newdata, se = FALSE) ## ** compute variance of the predictions if(robust){ Sigma.beta <- crossprod(iid.beta) }else{ Sigma.beta <- stats::vcov(object) } ## var.Xbeta <- X %*% Sigma.beta %*% t(X) var.Xbeta <- rowSums((X %*% Sigma.beta) * X) ## var.pred <- var.Xbeta * (-exp(-Xbeta) / (1+exp(-Xbeta))^2)^2 var.pred <- var.Xbeta * exp(-2*Xbeta) / (1+exp(-Xbeta))^4 se.pred <- sqrt(var.pred) ## se.pred - predict(object, type = "response", newdata = newdata, se = TRUE)$se.fit ## ** compute influence function of the predictions if(robust == FALSE){ seR.beta <- sqrt(colSums(iid.beta^2)) se.beta <- sqrt(diag(Sigma.beta)) iid.beta <- .rowMultiply_cpp(iid.beta, se.beta/seR.beta) } iid.pred <- iid.beta %*% t(.colMultiply_cpp(X, scale = exp(-Xbeta)/(1+exp(-Xbeta))^2)) ## colSums(iid.pred) ## colSums(iid.pred^2) - var.pred ## ** compute the influence function of the variance of the prediction if(robust){ iid.Sigma.beta <- array(unlist(lapply(1:n.obs, function(i){crossprod(iid.beta[i,,drop=FALSE])-Sigma.beta/n.obs})), dim = c(n.param,n.param,n.obs), dimnames = list(name.param,name.param,NULL)) }else{ iid.Sigma.beta <- .vcov.logit(object, indiv = TRUE, center = TRUE) } ## apply(iid.Sigma.beta,1:2,sum) iid.var.Xbeta <- do.call(rbind,lapply(1:n.obs, FUN = function(iObs){rowSums((X %*% iid.Sigma.beta[,,iObs]) * X)})) iid.var.e2XB_1eXb4 <- iid.beta %*% t(.colMultiply_cpp(X, scale = -2*exp(-2*Xbeta)/(1+exp(-Xbeta))^4 + 4*exp(-3*Xbeta)/(1+exp(-Xbeta))^5)) ## colSums(iid.var.Xbeta) iid.var.pred <- .rowMultiply_cpp(iid.var.Xbeta, exp(-2*Xbeta) / (1+exp(-Xbeta))^4) + .rowMultiply_cpp(iid.var.e2XB_1eXb4, var.Xbeta) iid.se.pred <- .rowScale_cpp(iid.var.pred, 2*se.pred) ## ** set correct level if(!is.null(level)){ matching.Ylevel <- table(object$data[[all.vars(formula(object))[1]]], object$y) all.levels <- rownames(matching.Ylevel) level <- match.arg(level, all.levels) index.level <- which(matching.Ylevel[level,]>0) if(length(index.level) > 1){ stop("Unknown value for the outcome variable \n") }else if(index.level == 1){ out <- 1 - out iid.pred <- - iid.pred } } ## ** export out <- rbind(estimate = pred, se = se.pred, var.se = colSums(iid.se.pred^2)) attr(out,"iid") <- iid.pred attr(out,"iid.se") <- iid.se.pred return(out) } ## * .score.logit #' @title Score for Logistic Regressions #' @description Compute the first derivative of the log-likelihood IPCW logistic regressions. #' @noRd #' #' @param object a glm object corresponding to a logistic regression. #' @param indiv [logical] should the individual contribution be output instead of the total score? #' .score.logit <- function(object, indiv){ X <- stats::model.matrix(object) pi <- stats::predict(object, type = "response") Y <- object$y W <- object$prior.weights out <- .colMultiply_cpp(X, scale = W*(Y - pi)) colnames(out) <- colnames(X) if(indiv){ return(out) }else{ return(colSums(out)) } } ## * .information.wglm #' @title Information for Logistic Regressions #' @description Compute the information (i.e. opposit of the expectation of the second derivative of the log-likelihood) for logistic regressions. #' @noRd #' #' @param object a glm object corresponding to a logistic regression. #' @param indiv [logical] should the individual contribution be output instead of the total information? #' @param center [logical] should the individual contribution be centered around the average? #' .information.logit <- function(object, indiv, center){ X <- stats::model.matrix(object) n.obs <- NROW(X) n.param <- NCOL(X) pi <- stats::predict(object, type = "response") W <- object$prior.weights tXWpi <- t(.colMultiply_cpp(X, scale = W*pi*(1-pi))) if(indiv){ if(center){ Info <- tXWpi %*% X out <- array(unlist(lapply(1:n.obs, function(i){tXWpi[,i,drop=FALSE] %*% X[i,,drop=FALSE]-Info/n.obs})), dim = c(n.param,n.param,n.obs), dimnames = list(colnames(X),colnames(X),NULL)) }else{ out <- array(unlist(lapply(1:n.obs, function(i){tXWpi[,i,drop=FALSE] %*% X[i,,drop=FALSE]})), dim = c(n.param,n.param,n.obs), dimnames = list(colnames(X),colnames(X),NULL)) } return(out) ## apply(out, MARGIN = 1:2, FUN = sum) - tXWpi %*% X }else{ out <- tXWpi %*% X rownames(out) <- colnames(out) return(out) } } ## * .vcov.wglm #' @title Variance-covariance matrix for Logistic Regressions #' @description Compute the variance covariance matrix (i.e. inverse of the information) for logistic regressions. #' @noRd #' #' @param object a glm object corresponding to a logistic regression. #' @param indiv [logical] should the individual contribution be output instead of the total variance-covariance? #' @param center [logical] should the individual contribution be centered around the average? #' .vcov.logit <- function(object, indiv, center){ X <- stats::model.matrix(object) n.obs <- NROW(X) n.param <- NCOL(X) pi <- stats::predict(object, type = "response") W <- object$prior.weights tXWpi <- t(.colMultiply_cpp(X, scale = W*pi*(1-pi))) Sigma <- solve(tXWpi %*% X) if(indiv){ if(center){ out <- array(unlist(lapply(1:n.obs, function(i){Sigma %*% tXWpi[,i,drop=FALSE] %*% X[i,,drop=FALSE] %*% Sigma - Sigma/n.obs})), dim = c(n.param,n.param,n.obs), dimnames = list(colnames(X),colnames(X),NULL)) }else{ out <- array(unlist(lapply(1:n.obs, function(i){Sigma %*% tXWpi[,i,drop=FALSE] %*% X[i,,drop=FALSE] %*% Sigma})), dim = c(n.param,n.param,n.obs), dimnames = list(colnames(X),colnames(X),NULL)) } return(out) ## apply(out, MARGIN = 1:2, FUN = sum) - tXWpi %*% X }else{ out <- Sigma rownames(out) <- colnames(out) return(out) } } ##---------------------------------------------------------------------- ### iid.logit.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/predict.logit.R
### rbind.performanceResample.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: apr 21 2022 (10:05) ## Version: ## Last-Updated: apr 22 2022 (10:08) ## By: Brice Ozenne ## Update #: 34 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * rbind.performanceResample (documentation) ##' @title Combine Resampling Results For Performance Objects ##' @description Combine permutation or bootstrap samples. ##' Useful to run parallel calculations (see example below). ##' ##' @param ... performance objects. ##' @param tolerance [numeric] maximum acceptable difference between the point estimates. ##' Can be \code{NA} to skip this sanity check. ##' ##' @examples ##' if(FALSE){ ##' ##' #### simulate data #### ##' set.seed(10) ##' n <- 100 ##' df.train <- data.frame(Y = rbinom(n, prob = 0.5, size = 1), ##' X1 = rnorm(n), X2 = rnorm(n), X3 = rnorm(n), X4 = rnorm(n), ##' X5 = rnorm(n), X6 = rnorm(n), X7 = rnorm(n), X8 = rnorm(n), ##' X9 = rnorm(n), X10 = rnorm(n)) ##' df.train$Y <- rbinom(n, size = 1, ##' prob = 1/(1+exp(-df.train$X5 - df.train$X6 - df.train$X7))) ##' ##' #### fit models #### ##' e.null <- glm(Y~1, data = df.train, family = binomial(link="logit")) ##' e.logit <- glm(Y~X1+X2, data = df.train, family = binomial(link="logit")) ##' e.logit2 <- glm(Y~X1+X2+X3+X4+X5+X6+X7+X8+X9+X10, data = df.train, ##' family = binomial(link="logit")) ##' ##' #### evaluate model (same seed) #### ##' fold.repetition <- 5 ## 0: internal perf (fast) ##' ## >0: 10 fold CV repeated (slow) ##' test <- performanceResample(list(e.logit,e.logit2), seed = 10, ##' fold.repetition = fold.repetition, n.resampling = 100) ##' test.1 <- performanceResample(list(e.logit,e.logit2), seed = 10, ##' fold.repetition = fold.repetition, n.resampling = 1:50) ##' test.2 <- performanceResample(list(e.logit,e.logit2), seed = 10, ##' fold.repetition = fold.repetition, n.resampling = 51:100) ##' rbind(test.1,test.2) ##' test ##' ##' ## Note: when the prediction model call RNG then test.1 and test.2 may not give test ##' ##' #### evaluate model (different seed) #### ##' test.3 <- performanceResample(list(e.logit,e.logit2), seed = 11, ##' fold.repetition = fold.repetition, n.resampling = 1:50) ##' test.4 <- performanceResample(list(e.logit,e.logit2), seed = 12, ##' fold.repetition = fold.repetition, n.resampling = 51:100) ##' rbind(test.3,test.4, tolerance = NA) ## does not check equality of the point estimate ##' ## between test.3 and test.4 ##' test ##' } ## * rbind.performanceResample (code) ##' @export rbind.performance <- function(..., tolerance = 1e-5){ ls.perf <- list(...) out <- ls.perf[[1]] if(length(ls.perf)==1){ return(out) } if(any(sapply(ls.perf, function(iX){is.null(iX[["resampling"]])}))){ stop("Performance objects should contain permutation or boostrap samples. \n", "(i.e. be an output of the performanceResample function) \n") } if(!is.na(tolerance)){ test <- lapply(ls.perf[-1], function(x){ all.equal(x$performance[,c("method","metric","model","estimate")], ls.perf[[1]]$performance[,c("method","metric","model","estimate")], tolerance = 1e-5) }) test.logical <- sapply(test, function(iT){any(iT!=TRUE)}) if(any(test.logical)){ stop("Discrepancy between the point estimates: 1 vs. ",paste(which(test.logical)+1, collapse = ", ")," \n") } } ls.resampling <- lapply(ls.perf, "[[", "resampling") if(any(duplicated(unlist(lapply(ls.resampling, function(iL){unique(iL$sample)}))))){ stop("Same sample name among different performance objects. \n") } out$resampling <- do.call(rbind,ls.resampling) out$resampling$sample <- as.numeric(factor(out$resampling$sample)) out$resampling <- out$resampling[order(out$resampling$sample),] out$args$n.resampling <- max(out$resampling$sample) out$performance <- .performanceResample_inference(performance = out$performance[,c("method","metric","model","estimate")], resampling = out$resampling, type.resampling = out$args$type.resampling, conf.level = out$args$conf.level) return(out) } ##---------------------------------------------------------------------- ### rbind.performance.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/rbind.performanceResample.R
## * Documentation - simBuyseTest #' @title Simulation of data for the BuyseTest #' @description Simulate categorical, continuous or time to event endpoints, possibly along with a strata variable. #' Categorical endpoints are simulated by thresholding a latent Gaussian variable (tobit model), #' continuous endpoints are simulated using a Gaussian distribution, #' and time to event endpoints are simulated using Weibull distributions for the event of interest, competing events, and censoring. #' This function is built upon the \code{lvm} and \code{sim} functions from the lava package. #' #' @param n.T [integer, >0] number of patients in the treatment arm #' @param n.C [integer, >0] number of patients in the control arm #' @param format [character] the format of the output. Can be \code{"data.table"}, \code{"data.frame"} or \code{"matrix"}. #' @param argsBin [list] arguments to be passed to \code{simBuyseTest_bin}. They specify the distribution parameters of the categorical endpoints. #' @param argsCont [list] arguments to be passed to \code{simBuyseTest_continuous}. They specify the distribution parameters of the continuous endpoints. #' @param argsTTE [list] arguments to be passed to \code{simBuyseTest_TTE}. They specify the distribution parameters of the time to event endpoints. #' @param n.strata [integer, >0] number of strata. \code{NULL} indicates no strata. #' @param level.strata [list of character vector] value associated to each strata. Must have same length as \code{n.strata}. #' @param names.strata [character vector] name of the strata variables. Must have same length as \code{n.strata}. #' @param name.cluster [character] name of the cluster variable. If \code{NULL} no cluster variable is created. #' @param prefix.cluster [character] character string to be added to the cluster index. #' @param name.treatment [character] name of the treatment variable. #' @param level.treatment [character vector of length 2] levels of the treatment variable. #' @param latent [logical] If \code{TRUE} also export the latent variables (e.g. censoring times or event times). #' #' @return A data.frame, data.table, or matrix depending of the argument \code{format}. #' #' @details #' Endpoints are simulated independently of the strata variable and independently of each other, #' with the exception of categorical endpoint and the time to event endpoints that can be correlated #' by specifying a non-0 value for the \code{rho.T} and \code{rho.C} elements of the argument \code{argsBin}. #' #' Arguments in the list \code{argsBin}: #' \itemize{ #' \item\code{p.T} list of probabilities for the values taken by each endpoint (categorical endpoint, treatment group). #' \item\code{p.C} same as \code{p.T} but for the control group. #' \item\code{rho.T} value of the regression coefficient between the underlying latent variable and the survival time. #' Only implemented for weibull distributed survival times. #' \item\code{rho.C} same as \code{rho.T} but for the control group. #' \item\code{name} names of the binary variables. #' } #' #' Arguments in the list \code{argsCont}: #' \itemize{ #' \item\code{mu.T} expected value of each endpoint (continuous endpoint, treatment group). #' \item\code{mu.C} same as \code{mu.C} but for the control group. #' \item\code{sigma.T} standard deviation of the values of each endpoint (continuous endpoint, treatment group). #' \item\code{sigma.C} same as \code{sigma.T} but for the control group. #' \item\code{name} names of the continuous variables. #' } #' #' Arguments in the list \code{argsTTE}: #' \itemize{ #' \item\code{CR} should competing risks be simulated? #' \item\code{scale.T,scale.C,scale.CR,scale.censoring.T,scale.censoring.C} scale parameter of the Weibull distribution for, respectively, #' the event of interest in the treatment group, #' the event of interest in the control group, #' the competing event in both groups, #' the censoring mechanism in the treatment group, #' the censoring mechanism in the control group #' \item\code{shape.T,shape.C,shape.CR,shape.censoring.T,shape.censoring.C} shape parameter of the Weibull distribution for, respectively, #' the event of interest in the treatment group, #' the event of interest in the control group, #' the competing event in both groups, #' the censoring mechanism in the treatment group, #' the censoring mechanism in the control group #' \item\code{dist.T,dist.C,dist.CR,dist.censoring.T,dist.censoring.C} type of distribution (\code{"weibull"}, \code{"uniform"}, \code{"piecewiseExp"}) for, respectively, #' the event of interest in the treatment group, #' the event of interest in the control group, #' the competing event in both groups, #' the censoring mechanism in the treatment group, #' the censoring mechanism in the control group. #' For uniform distirbutions the (scale,shape) parameters becomes the support (min, max) of the censoring distribution. #' For piecewise exponential distributions the (scale,shape) should be lists of numeric (see example) #' and the shape parameters becomes the time parameters (first element should be 0). #' \item\code{name} names of the time to event variables. #' \item\code{name.censoring} names of the event type indicators. #' #' } #' #' @examples #' library(data.table) #' #' n <- 1e2 #' #' #### by default #### #' simBuyseTest(n) #' #' ## with a strata variable having 5 levels #' simBuyseTest(n, n.strata = 5) #' ## with a strata variable named grade #' simBuyseTest(n, n.strata = 5, names.strata = "grade") #' ## several strata variables #' simBuyseTest(1e3, n.strata = c(2,4), names.strata = c("Gender","AgeCategory")) #' #' #### only categorical endpoints #### #' args <- list(p.T = list(c(low=0.1,moderate=0.5,high=0.4))) #' dt.bin <- simBuyseTest(n, argsBin = args, argsCont = NULL, argsTTE = NULL) #' table(dt.bin$toxicity)/NROW(dt.bin) #' #' args <- list(p.T = list(c(low=0.1,moderate=0.5,high=0.4), c(0.1,0.9))) #' dt.bin <- simBuyseTest(n, argsBin = args, argsCont = NULL, argsTTE = NULL) #' table(dt.bin$toxicity1)/NROW(dt.bin) #' table(dt.bin$toxicity2)/NROW(dt.bin) #' #' #### only continuous endpoints #### #' args <- list(mu.T = c(3:5/10), sigma.T = rep(1,3)) #' dt.cont <- simBuyseTest(n, argsBin = NULL, argsCont = args, argsTTE = NULL) #' c(mean(dt.cont$score1), mean(dt.cont$score2), mean(dt.cont$score3)) #' c(sd(dt.cont$score1), sd(dt.cont$score2), sd(dt.cont$score3)) #' #' #### only TTE endpoints #### #' ## weibull distributed #' args <- list(scale.T = c(3:5/10), scale.censoring.T = rep(1,3)) #' dt.tte <- simBuyseTest(n, argsBin = NULL, argsCont = NULL, argsTTE = args) #' 1/c(sum(dt.tte$eventtime1)/sum(dt.tte$status1), #' sum(dt.tte$eventtime2)/sum(dt.tte$status2), #' sum(dt.tte$eventtime3)/sum(dt.tte$status3)) #' #' 1/c(sum(dt.tte$eventtime1)/sum(dt.tte$status1==0), #' sum(dt.tte$eventtime2)/sum(dt.tte$status2==0), #' sum(dt.tte$eventtime3)/sum(dt.tte$status3==0)) #' #' hist(dt.tte$eventtime1) #' #' ## uniform distributed #' args <- list(scale.T = 0, shape.T = 1, dist.T = "uniform", scale.censoring.T = 1e5, #' scale.C = 0, shape.C = 2, dist.C = "uniform", scale.censoring.C = 1e5) #' dt.tte <- simBuyseTest(n, argsBin = NULL, argsCont = NULL, argsTTE = args) #' #' par(mfrow=c(1,2)) #' hist(dt.tte$eventtime[dt.tte$treatment=="C"]) #' hist(dt.tte$eventtime[dt.tte$treatment=="T"]) #' #' ## piecewise constant exponential distributed #' ## time [0;4]: scale parameter 10 #' ## time [4;12]: scale parameter 13 #' ## time [12;18.]: scale parameter 18 #' ## time [18.5;36]: scale parameter 31 #' ## after that: scale parameter 37 #' vec.scale <- list(c(10,13,18,31,100)) #' vec.time <- list(c(0,4,12,18.5,36)) #' args <- list(scale.T = vec.scale, shape.T = vec.time, dist.T = "piecewiseExp", #' scale.C = 10, shape.C = 1, dist.C = "weibull", #' scale.censoring.T = 1e5) #' dt.tte <- simBuyseTest(n, argsBin = NULL, argsCont = NULL, argsTTE = args) #' #' if(require(prodlim)){ #' plot(prodlim(Hist(eventtime,status)~treatment, data = dt.tte)) #' } #' #' #### correlated categorical / time to event endpoint #### #' ## WARNING: only for weibull distributed time to event endpoint #' args.bin <- list(p.T = list(c(low=0.1,moderate=0.5,high=0.4)), rho.T = 1) #' args.tte <- list(scale.T = 2, scale.censoring.T = 1) #' dt.corr <- simBuyseTest(n, argsBin = args.bin, argsCont = NULL, argsTTE = args.tte) #' #' 1/(sum(dt.corr$eventtime)/sum(dt.corr$status)) #' 1/(sum(dt.corr$eventtime)/sum(dt.corr$status==0)) #' table(dt.corr$toxicity)/NROW(dt.corr) #' #' boxplot(eventtime ~ toxicity, data = dt.corr) #' #' @keywords datagen #' @author Brice Ozenne ## * Function simBuyseTest #' @export simBuyseTest <- function(n.T, n.C = NULL, argsBin = list(), argsCont = list(), argsTTE = list(), names.strata = NULL, level.strata = NULL, n.strata = NULL, name.cluster = "id", prefix.cluster = NULL, name.treatment = "treatment", level.treatment = c("C","T"), format = "data.table", latent = FALSE){ option <- BuyseTest.options() ## ** normalize arguments if(!is.null(level.strata)){ if(!is.null(n.strata)){ message("Argument \'n.strata\' ignored when argument \'level.strata\' is specified. \n") } if(!is.list(level.strata) && length(names.strata)==1){ level.strata <- list(level.strata) } n.strata <- sapply(level.strata,length) }else if(is.null(level.strata) && !is.null(n.strata)){ level.strata <- lapply(n.strata, function(iN){letters[1:iN]}) }else if(!is.null(names.strata)){ stop("Argument \'n.strata\' or \'names.strata\' must be specified to indicate the number of possible values per strata variable. \n") } if(is.null(names.strata) && !is.null(level.strata)){ if(!is.null(names(level.strata))){ names.strata <- names(level.strata) }else if(length(level.strata)==1){ names.strata <- "strata" }else{ names.strata <- paste0("strataVar",1:n.strata) } }else if(is.null(names.strata) && !is.null(n.strata)){ if(length(n.strata)==1){ names.strata <- "strata" }else{ names.strata <- paste0("strataVar",1:n.strata) } } ## ** check arguments if(is.null(n.C)){n.C <- n.T} if(option$check){ validNumeric(n.C, min = 0, valid.length = 1, method = "simBuyseTest") validNumeric(n.T, min = 0, valid.length = 1, method = "simBuyseTest") validInteger(n.strata, valid.length = length(names.strata), refuse.NULL = FALSE, min = 1, method = "simBuyseTest") if(!is.null(names.strata) && !is.list(level.strata)){ stop("Argument \'level.strata \' must be a list. \n") } if(!is.null(names.strata) && length(level.strata)!=length(names.strata)){ stop("Argument \'level.strata \' must be a list of ",length(names.strata)," elements. \n", "Each element indicate the possible values for each strata variable. \n") } validCharacter(level.treatment, valid.length = 2, refuse.NULL = TRUE, method = "simBuyseTest") validCharacter(name.treatment, valid.length = 1, refuse.NULL = TRUE, method = "simBuyseTest") validCharacter(format, valid.length = 1, valid.values = c("data.table","data.frame","matrix"), method = "simBuyseTest") } ## ** build the generative model mT.lvm <- lvm() mC.lvm <- lvm() lava::categorical(mC.lvm,labels=level.treatment[1]) <- name.treatment lava::categorical(mT.lvm,labels=level.treatment[2]) <- name.treatment if(!is.null(argsTTE)){ newLVM <- do.call("simBuyseTest_TTE", args = c(list(modelT = mT.lvm, modelC = mC.lvm, check = option$check), argsTTE)) mT.lvm <- newLVM$modelT mC.lvm <- newLVM$modelC latentTTE <- newLVM$latent0 scale.T <- newLVM$scale.T scale.C <- newLVM$scale.C shape.T <- newLVM$shape.T shape.C <- newLVM$shape.C }else{ latentTTE <- NULL scale.T <- NULL scale.C <- NULL shape.T <- NULL shape.C <- NULL } if(!is.null(argsBin)){ testW.T <- !is.null(argsTTE$dist.T) && any("weibull" %in% argsTTE$dist.T == FALSE) testW.C <- !is.null(argsTTE$dist.C) && any("weibull" %in% argsTTE$dist.C == FALSE) if((testW.T || testW.C) && (!is.null(argsBin$rho.T) || !is.null(argsBin$rho.C))){ stop("Simulating correlated survival times and categorical outcomes only implemented for weibull distributed times") } newLVM <- do.call("simBuyseTest_bin", args = c(list(modelT = mT.lvm, modelC = mC.lvm, check = option$check, latentTTE = latentTTE, scale.T = scale.T, scale.C = scale.C, shape.T = shape.T, shape.C = shape.C), argsBin)) mT.lvm <- newLVM$modelT mC.lvm <- newLVM$modelC } if(!is.null(argsCont)){ newLVM <- do.call("simBuyseTest_cont", args = c(list(modelT = mT.lvm, modelC = mC.lvm,check = option$check), argsCont)) mT.lvm <- newLVM$modelT mC.lvm <- newLVM$modelC } ## ** add strata variable to the generative model if(!is.null(names.strata)){ for(iterS in 1:length(n.strata)){ if(any(names.strata[iterS] %in% lava::vars(mT.lvm))){ stop("simBuyseTest: variable already in the LVM \n", "variable: ",paste(names.strata[iterS][names.strata[iterS] %in% lava::vars(mT.lvm)], collapse = " "),"\n") } lava::categorical(mT.lvm, labels = level.strata[[iterS]]) <- names.strata[iterS] lava::categorical(mC.lvm, labels = level.strata[[iterS]]) <- names.strata[iterS] } } ## ** simulate data from the generative model df.T <- lava::sim(mT.lvm, n.T, latent = latent) df.C <- lava::sim(mC.lvm, n.C, latent = latent) ## ** export if(!is.null(name.cluster)){ if(is.null(prefix.cluster)){ res <- cbind(1:(n.T+n.C), do.call(format, args = rbind(df.C, df.T))) }else{ res <- cbind(paste0(prefix.cluster,1:(n.T+n.C)), do.call(format, args = rbind(df.C, df.T))) } names(res)[1] <- name.cluster }else{ res <- do.call(format, args = rbind(df.C, df.T)) } return(res) } ## * Function simBuyseTest_bin simBuyseTest_bin <- function(modelT, modelC, check, latentTTE, scale.T, scale.C, shape.T, shape.C, p.T = c("yes" = 0.5, "no" = 0.5), p.C = NULL, rho.T = NULL, rho.C = NULL, name = NULL){ ## ** initialisation if(!is.null(p.T) && !is.list(p.T)){ p.T <- list(p.T) } n.endpoints <- length(p.T) if(is.null(name)){ if(n.endpoints == 1){name <- "toxicity"}else{name <- paste0("toxicity",1:n.endpoints)} } if(is.null(p.C)){ p.C <- p.T }else if(!is.list(p.C)){ p.C <- list(p.C) } if(is.null(rho.T)){ rho.T <- rep(0, n.endpoints) } if(is.null(rho.C)){ rho.C <- rho.T } index.rho <- union(which(rho.T!=0), which(rho.C!=0)) names.values <- vector(mode = "list", length = n.endpoints) for(iterE in 1:n.endpoints){ if(is.null(names(p.T[[iterE]]))){ names.values[[iterE]] <- 1:length(p.T[[iterE]]) }else{ names.values[[iterE]] <- names(p.T[[iterE]]) } } ## ** tests if(check){ if(length(p.C)!=length(p.T)){ stop("Arguments \'p.C\' and \'p.T\' must be a list with the same number of elements. \n", "(each element defines the probability distribution of an endpoint; there must be the same number of endpoints in both groups) \n") } if(n.endpoints!=length(name)){ stop("The length of arguments \'name\' does not match the number of endpoints defined by argument \'p.T\' \n") } validNumeric(rho.T, valid.length = n.endpoints, method = "simBuyseTest") validNumeric(rho.C, valid.length = n.endpoints, method = "simBuyseTest") if(any(index.rho %in% 1:n.endpoints == FALSE)){ stop("There should be a toxicity endpoint relative to each time to event endpoint when specifying a correlation parameter \n") } for(iterE in 1:n.endpoints){ validNumeric(p.T[[iterE]], min = 0, max = 1, valid.length = NULL, name1 = "p.T", method = "simBuyseTest") if(abs(sum(p.T[[iterE]])-1)>1e-6){ stop("For each endpoint, the sum of the probabilities in argument \'p.T\' must be 1. \n") } validNumeric(p.C[[iterE]], min = 0, max = 1, valid.length = length(p.T[[iterE]]), name1 = "p.C", method = "simBuyseTest") if(abs(sum(p.C[[iterE]])-1)>1e-6){ stop("For each endpoint, the sum of the probabilities in argument \'p.C\' must be 1. \n") } if(!identical(names(p.T[[iterE]]),names(p.C[[iterE]]))){ stop("The names in arguments \'p.T\' and \'p.C\' must be the same. \n") } } } ## ** model for(iterE in 1:n.endpoints){ if(any(name[iterE] %in% lava::vars(modelT))){ stop("simBuyseTest_bin: variable already in the LVM \n", "variable: ",paste(name[iterE][name[iterE] %in% lava::vars(modelT)], collapse = " "),"\n") } iLatent.T <- paste0("eta_",name[iterE]) iLatent.C <- paste0("eta_",name[iterE]) iCut.T <- qnormweibull(cumsum(p.T[[iterE]])[-length(p.T[[iterE]])], scale = scale.T[iterE], shape = shape.T[iterE], rho = rho.T[iterE]) iFct.T <- paste0("function(x, xcut = c(",paste0(iCut.T,collapse=","),"), xname = c(\"",paste0(names.values[[iterE]],collapse="\",\""),"\")){\n", " return(factor(findInterval(x[,1], vec = xcut), levels = 0:length(xcut), labels = xname))\n", "}") if(abs(rho.T[iterE]) > 1e-12){ lava::regression(modelT) <- as.formula(paste0(iLatent.T," ~ ",rho.T," * ",latentTTE[iterE])) } modelT <- lava::`transform<-`(modelT, as.formula(paste0(name[iterE],"~",iLatent.T)), value = eval(parse(text = iFct.T))) lava::latent(modelT) <- as.formula(paste0("~",iLatent.T)) iCut.C <- qnormweibull(cumsum(p.C[[iterE]])[-length(p.C[[iterE]])], scale = scale.C[iterE], shape = shape.C[iterE], rho = rho.C[iterE]) iFct.C <- paste0("function(x, xcut = c(",paste0(iCut.C,collapse=","),"), xname = c(\"",paste0(names.values[[iterE]],collapse="\",\""),"\")){\n", " return(factor(findInterval(x[,1], vec = xcut), levels = 0:length(xcut), labels = xname))\n", "}") if(abs(rho.C[iterE]) > 1e-12){ lava::regression(modelC) <- as.formula(paste0(iLatent.C," ~ ",rho.C," * ",latentTTE[iterE])) } modelC <- lava::`transform<-`(modelC, as.formula(paste0(name[iterE],"~",iLatent.C)), value = eval(parse(text = iFct.C))) lava::latent(modelC) <- as.formula(paste0("~",iLatent.C)) } ## ** export return(list(modelT = modelT, modelC = modelC)) } ## * Function simBuyseTest_cont simBuyseTest_cont <- function(modelT, modelC, check, mu.T = 0, sigma.T = 1, mu.C = NULL, sigma.C = NULL, name = NULL){ ## ** initialisation n.endpoints <- length(mu.T) if(is.null(name)){ if(n.endpoints == 1){name <- "score"}else{name <- paste0("score",1:n.endpoints)} } if(is.null(mu.C)){mu.C <- mu.T} if(is.null(sigma.C)){sigma.C <- sigma.T} ## ** tests if(check){ validNumeric(mu.T, valid.length = NULL, method = "simBuyseTest") validNumeric(sigma.T, valid.length = n.endpoints, min = 0, method = "simBuyseTest") validNumeric(mu.C, valid.length = n.endpoints, method = "simBuyseTest") validNumeric(sigma.C, valid.length = n.endpoints, min = 0, method = "simBuyseTest") validCharacter(name, valid.length = n.endpoints, method = "simBuyseTest") } ## ** model for(iterE in 1:n.endpoints){ if(any(name[iterE] %in% lava::vars(modelT))){ stop("simBuyseTest_cont: variable already in the LVM \n", "variable: ",paste(name[iterE][name[iterE] %in% lava::vars(modelT)], collapse = " "),"\n") } lava::distribution(modelT, name[iterE]) <- lava::gaussian.lvm(link = "identity", mean = mu.T[iterE], sd = sigma.T[iterE]) lava::distribution(modelC, name[iterE]) <- lava::gaussian.lvm(link = "identity", mean = mu.C[iterE], sd = sigma.C[iterE]) } ## ** export return(list(modelT = modelT, modelC = modelC)) } ## * Function simBuyseTest_TTE simBuyseTest_TTE <- function(modelT, modelC, CR = FALSE, scale.T = 1/2, shape.T = rep(1, length(scale.T)), dist.T = rep("weibull", length(scale.T)), scale.C = NULL, shape.C = NULL, dist.C = NULL, scale.CR = NULL, shape.CR = NULL, dist.CR = NULL, scale.censoring.T = rep(1, length(scale.T)), shape.censoring.T = rep(1, length(scale.T)), dist.censoring.T = rep("weibull", length(scale.T)), scale.censoring.C = NULL, shape.censoring.C = NULL, dist.censoring.C = NULL, name = NULL, name.censoring = NULL, check){ ## ** initialisation n.endpoints <- length(scale.T) if(is.null(name)){ if(n.endpoints == 1){name <- "eventtime"}else{name <- paste0("eventtime",1:n.endpoints)} } if(is.null(name.censoring)){ if(n.endpoints == 1){name.censoring <- "status"}else{name.censoring <- paste0("status",1:n.endpoints)} } if(is.null(scale.C)){scale.C <- scale.T} if(is.null(shape.C)){shape.C <- shape.T} if(is.null(dist.C)){dist.C <- dist.T} if(is.null(scale.CR)){scale.CR <- scale.T} if(is.null(shape.CR)){shape.CR <- shape.T} if(is.null(dist.CR)){dist.CR <- dist.T} if(is.null(scale.censoring.C)){scale.censoring.C <- scale.censoring.T} if(is.null(shape.censoring.C)){shape.censoring.C <- shape.censoring.T} if(is.null(dist.censoring.C)){dist.censoring.C <- dist.censoring.T} name0 <- paste0(name,"Uncensored") if(CR){ nameCR <- paste0(name,"CompetingRisk") } nameC <- paste0(name,"Censoring") ## ** tests if(check){ ## Note: scale and shape are list of numeric when considering piecewise constant hazards validNumeric(scale.T, valid.length = NULL, unlist = is.list(scale.T), method = "simBuyseTest") validNumeric(shape.T, valid.length = n.endpoints, unlist = is.list(shape.T), method = "simBuyseTest") validCharacter(dist.T, valid.values = c("weibull", "uniform","piecewiseExp"), valid.length = n.endpoints, method = "simBuyseTest") validNumeric(scale.C, valid.length = n.endpoints, unlist = is.list(scale.C), min = 0, method = "simBuyseTest") validNumeric(shape.C, valid.length = n.endpoints, unlist = is.list(shape.C), min = 0, method = "simBuyseTest") validCharacter(dist.C, valid.values = c("weibull","uniform","piecewiseExp"), valid.length = n.endpoints, method = "simBuyseTest") validLogical(CR, valid.length = 1, method = "simBuyseTest") if(CR){ validNumeric(scale.CR, valid.length = n.endpoints, unlist = is.list(scale.CR), min = 0, method = "simBuyseTest") validNumeric(shape.CR, valid.length = n.endpoints, unlist = is.list(shape.CR), min = 0, method = "simBuyseTest") validCharacter(dist.CR, valid.values = c("weibull","uniform","piecewiseExp"), valid.length = n.endpoints, method = "simBuyseTest") } validNumeric(scale.censoring.T, valid.length = n.endpoints, unlist = is.list(scale.censoring.T), min = 0, method = "simBuyseTest") validNumeric(shape.censoring.T, valid.length = n.endpoints, unlist = is.list(shape.censoring.T), min = 0, method = "simBuyseTest") validCharacter(dist.censoring.T, valid.values = c("weibull","uniform","piecewiseExp"), valid.length = n.endpoints, method = "simBuyseTest") validNumeric(scale.censoring.C, valid.length = n.endpoints, unlist = is.list(scale.censoring.C), min = 0, method = "simBuyseTest") validNumeric(shape.censoring.C, valid.length = n.endpoints, unlist = is.list(shape.censoring.C), min = 0, method = "simBuyseTest") validCharacter(dist.censoring.C, valid.values = c("weibull","uniform","piecewiseExp"), valid.length = n.endpoints, method = "simBuyseTest") validCharacter(name, valid.length = n.endpoints, method = "simBuyseTest") validCharacter(name.censoring, valid.length = n.endpoints, method = "simBuyseTest") } ## ** model for(iterE in 1:n.endpoints){ allvarE <- c(name[iterE], name0[iterE], nameC[iterE], name.censoring[iterE]) if(any(allvarE %in% lava::vars(modelT))){ stop("simBuyseTest_TTE: variable already in the LVM \n", "variable: ",paste(allvarE[allvarE %in% lava::vars(modelT)], collapse = " "),"\n") } if(dist.T[iterE]=="uniform"){ lava::distribution(modelT, name0[iterE]) <- lava::uniform.lvm(a = scale.T[[iterE]], b = shape.T[[iterE]]) }else if(dist.T[iterE]=="weibull"){ lava::distribution(modelT, name0[iterE]) <- lava::weibull.lvm(scale = scale.T[[iterE]], shape = 1/shape.T[[iterE]]) }else if(dist.T[iterE]=="piecewiseExp"){ lava::distribution(modelT, name0[iterE]) <- lava::coxExponential.lvm(scale = scale.T[[iterE]], timecut = shape.T[[iterE]]) } if(dist.censoring.T[iterE]=="uniform"){ lava::distribution(modelT, nameC[iterE]) <- lava::uniform.lvm(a = scale.censoring.T[[iterE]], b = shape.censoring.T[[iterE]]) }else if(dist.censoring.T[iterE]=="weibull"){ lava::distribution(modelT, nameC[iterE]) <- lava::weibull.lvm(scale = scale.censoring.T[[iterE]], shape = 1/shape.censoring.T[[iterE]]) }else if(dist.censoring.T[iterE]=="piecewiseExp"){ lava::distribution(modelT, nameC[iterE]) <- lava::coxExponential.lvm(scale = scale.censoring.T[[iterE]], timecut = shape.censoring.T[[iterE]]) } if(CR){ if(dist.CR[iterE]=="uniform"){ lava::distribution(modelT, nameCR[iterE]) <- lava::uniform.lvm(a = scale.CR[[iterE]], b = shape.CR[[iterE]]) }else if(dist.CR[iterE]=="weibull"){ lava::distribution(modelT, nameCR[iterE]) <- lava::weibull.lvm(scale = scale.CR[[iterE]], shape = 1/shape.CR[[iterE]]) }else if(dist.CR[iterE]=="piecewiseExp"){ lava::distribution(modelT, nameCR[iterE]) <- lava::coxExponential.lvm(scale = scale.CR[[iterE]], timecut = shape.CR[[iterE]]) } txtSurv <- paste0(name[iterE], "~min(",nameCR[iterE],"=2,",name0[iterE],"=1,",nameC[iterE],"=0)") }else{ txtSurv <- paste0(name[iterE], "~min(",name0[iterE],"=1,",nameC[iterE],"=0)") } modelT <- lava::eventTime(modelT, stats::as.formula(txtSurv), name.censoring[iterE]) if(dist.C[iterE]=="uniform"){ lava::distribution(modelC, name0[iterE]) <- lava::uniform.lvm(a = scale.C[[iterE]], b = shape.C[[iterE]]) }else if(dist.C[iterE]=="weibull"){ lava::distribution(modelC, name0[iterE]) <- lava::weibull.lvm(scale = scale.C[[iterE]], shape = 1/shape.C[[iterE]]) }else if(dist.C[iterE]=="piecewiseExp"){ lava::distribution(modelC, name0[iterE]) <- lava::coxExponential.lvm(scale = scale.C[[iterE]], timecut = shape.C[[iterE]]) } if(dist.censoring.C[iterE]=="uniform"){ lava::distribution(modelC, nameC[iterE]) <- lava::uniform.lvm(a = scale.censoring.C[[iterE]], b = shape.censoring.C[[iterE]]) }else if(dist.censoring.C[iterE]=="weibull"){ lava::distribution(modelC, nameC[iterE]) <- lava::weibull.lvm(scale = scale.censoring.C[[iterE]], shape = 1/shape.censoring.C[[iterE]]) }else if(dist.censoring.C[iterE]=="piecewiseExp"){ lava::distribution(modelC, nameC[iterE]) <- lava::coxExponential.lvm(scale = scale.censoring.C[[iterE]], timecut = shape.censoring.C[[iterE]]) } if(CR){ if(dist.CR[iterE]=="uniform"){ lava::distribution(modelC, nameCR[iterE]) <- lava::uniform.lvm(a = scale.CR[[iterE]], b = shape.CR[[iterE]]) }else if(dist.CR[iterE]=="weibull"){ lava::distribution(modelC, nameCR[iterE]) <- lava::weibull.lvm(scale = scale.CR[[iterE]], shape = 1/shape.CR[[iterE]]) }else if(dist.CR[iterE]=="piecewiseExp"){ lava::distribution(modelC, nameCR[iterE]) <- lava::coxExponential.lvm(scale = scale.CR[[iterE]], timecut = shape.CR[[iterE]]) } txtSurv <- paste0(name[iterE], "~min(",nameCR[iterE],"=2,",name0[iterE],"=1,",nameC[iterE],"=0)") }else{ txtSurv <- paste0(name[iterE], "~min(",name0[iterE],"=1,",nameC[iterE],"=0)") } modelC <- lava::eventTime(modelC, stats::as.formula(txtSurv), name.censoring[iterE]) if(CR){ formula.latent <- as.formula(paste0("~",name0[iterE],"+",nameC[iterE],"+",nameCR[iterE])) }else{ formula.latent <- as.formula(paste0("~",name0[iterE],"+",nameC[iterE])) } latent(modelT) <- formula.latent latent(modelC) <- formula.latent } ## ** export return(list(modelT = modelT, modelC = modelC, latent0 = name0, latentC = nameC, scale.T = scale.T, scale.C = scale.C, shape.T = shape.T, shape.C = shape.C)) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/simBuyseTest.R
## * Documentation - simCompetingRisks #' @title Simulation of Gompertz competing risks data for the BuyseTest #' @description Simulate Gompertz competing risks data with proportional (via prespecified sub-distribution hazard ratio) or #' non-proportional sub-distribution hazards. A treatment variable with two groups (treatment and control) is created. #' @name simCompetingRisks #' #' @param n.T [integer, >0] number of patients in the treatment arm #' @param n.C [integer, >0] number of patients in the control arm #' @param p.1C [integer, >0] proportion of events of interest in the control group. Can be NULL if and only if \code{(b.1T, b.1C, b.2T, b.2C)} #' are provided. #' @param sHR [double, >0] pre-specified sub-distribution hazard ratio for event of interest. Can be NULL if and only if #' \code{(b.1T, b.1C, b.2T, b.2C)} are provided. #' @param v.1C,v.1T,v.2C,v.2T [double, <0] shape parameters for Gompertz distribution of time to event of interest in control/treatment (C/T) #' group and of time to competing event in control/treatment (C/T) group respectively #' @param b.1C,b.1T,b.2C,b.2T [double, >0] rate parameters for Gompertz distribution of time to event of interest in control/treatment (C/T) #' group and of time to competing event in control/treatment (C/T) group respectively. Can be NULL if and only if \code{(p.1C, sHR)} are #' provided. #' @param cens.distrib [character] censoring distribution. Can be \code{"exponential"} for exponential censoring or \code{"uniform"} for #' uniform censoring. NULL means no censoring. #' @param param.cens [>0] parameter for censoring distribution. Should be a double for rate parameter of exponential censoring distribution #' or a vector of doubles for lower and upper bounds of uniform censoring distribution. NULL means no censoring #' @param latent [logical] If \code{TRUE}, also export the latent variables (e.g. true event times, true event types and censoring times). #' NULL sets this parameter to \code{FALSE}. #' #' @details #' The times to the event of interest and to the competing event in each group follow an improper Gompertz distribution #' (see Jeong and Fine, 2006), whose cumulative distribution function is #' #' F(t; b, v) = 1 - exp(b (1 - exp (v t)) / v) \cr #' #' and hazard functions is #' #' h(t; b, v) = b exp(v t)\cr #' #' The shape parameters must be negative to have improper distributions for the times to the two events in each group. Note however that #' in each group, the overall cumulative incidence function must be proper (i.e. the maximum values of the cumulative incidence of each #' event type sum up to 1 in each group). When only providing the shape parameters, the rate parameters are #' computed to fulfill this condition. In case you whish to provide the rate parameters too, make sure that the condition is met. #' #' @examples #' #' #### Providing p.1C and sHR #### #' d <- simCompetingRisks(n.T = 100, n.C = 100, p.1C = 0.55, v.1C = -0.30, #' v.1T = -0.30, v.2C = -0.30, v.2T = -0.30, sHR = 0.5, b.1T = NULL, #' b.1C = NULL, b.2T = NULL, b.2C = NULL) #' #' #### Providing the rate parameters #### #' d <- simCompetingRisks(n.T = 100, n.C = 100, p.1C = NULL, v.1C = -0.30, #' v.1T = -0.30, v.2C = -0.30, v.2T = -0.30, sHR = NULL, b.1T = 0.12, #' b.1C = 0.24, b.2T = 0.33, b.2C = 0.18) #' #' #### With exponential censoring #### #' d <- simCompetingRisks(n.T = 100, n.C = 100, p.1C = 0.55, v.1C = -0.30, #' v.1T = -0.30, v.2C = -0.30, v.2T = -0.30, sHR = 0.5, b.1T = NULL, #' b.1C = NULL, b.2T = NULL, b.2C = NULL, cens.distrib = "exponential", #' param.cens = 0.8, latent = TRUE) #' #' ### With uniform censoring #### #' d <- simCompetingRisks(n.T = 100, n.C = 100, p.1C = 0.55, v.1C = -0.30, #' v.1T = -0.30, v.2C = -0.30, v.2T = -0.30, sHR = 0.5, b.1T = NULL, #' b.1C = NULL, b.2T = NULL, b.2C = NULL, cens.distrib = "uniform", #' param.cens = c(0, 7), latent=TRUE) #' #' @references Jeong J-H. and Fine J. (2006) \bold{Direct parametric inference for the cumulative incidence function}. \emph{Journal of the Royal Statistical #' Society} 55: 187-200 \cr #' #' @author Eva Cantagallo #' #' @keywords datagen #' @return A data.frame #' ## * Function simCompetingRisks #' @rdname simCompetingRisks #' @export #' simCompetingRisks <- function(n.T, n.C, p.1C = NULL, v.1C, v.1T, v.2C, v.2T, sHR = NULL, b.1T = NULL, b.1C = NULL, b.2T = NULL, b.2C = NULL, cens.distrib = NULL, param.cens = NULL, latent = NULL) { # Compute rate parameters if not provided if(!is.null(b.1T) & !is.null(b.1C) & !is.null(b.2T) & !is.null(b.2C)) { p.1T <- 1 - exp(b.1T / v.1T) p.1C <- 1 - exp(b.1C / v.1C) } else if(!is.null(p.1C) & !is.null(sHR)) { b.1C <- v.1C * log(1 - p.1C) b.1T <- b.1C * sHR p.1T <- 1 - exp(b.1T / v.1T); p.2T <- 1 - p.1T p.2C <- 1 - p.1C b.2C <- v.2C * log(1 - p.2C) b.2T <- v.2T * log(1 - p.2T) } else { stop("Missing input argument: please provide either (b.1T, b.1C, b.2T, b.2C) or (p.1C, sHR)") } rF1T <- function(x) log(1 - v.1T * log(1 - x) / b.1T) / v.1T rF1C <- function(x) log(1 - v.1C * log(1 - x) / b.1C) / v.1C rF2T <- function(x) log(1 - v.2T * log(1 - x) / b.2T) / v.2T rF2C <- function(x) log(1 - v.2C * log(1 - x) / b.2C) / v.2C n <- (n.T + n.C) u <- stats::runif(n, 0, 1) data <- data.frame(treatment = c(rep(1, n.T), rep(0, n.C)), event.time = rep(0, n), event.type = rep(0, n)) indexT1 <- which(data$treatment == 1 & u < p.1T) indexT2 <- which(data$treatment == 1 & u >= p.1T) indexC1 <- which(data$treatment == 0 & u < p.1C) indexC2 <- which(data$treatment == 0 & u >= p.1C) data$event.time[indexT1] <- rF1T(u[indexT1]) data$event.type[indexT1] <- 1 data$event.time[indexT2] <- rF2T(u[indexT2] - p.1T) data$event.type[indexT2] <- 2 data$event.time[indexC1] <- rF1C(u[indexC1]) data$event.type[indexC1] <- 1 data$event.time[indexC2] <- rF2C(u[indexC2] - p.1C) data$event.type[indexC2] <- 2 if(!is.null(cens.distrib)) { if(cens.distrib == "exponential") { data$censoring.time <- stats::rexp(n, rate = param.cens[1]) } else if (cens.distrib == "uniform") { if(is.na(param.cens[2])) { stop("Missing parameter for uniform censoring distribution") } data$censoring.time <- stats::runif(n, min = param.cens[1], max = param.cens[2]) } data$time <- apply(data[, c("event.time", "censoring.time")], 1, min) data$status <- ifelse(data$time == data$event.time, data$event.type, 0) if(!latent | is.null(latent)) { data_final <- data[, c('treatment', 'time', 'status')] } else { data_final <- data } } else { data_final <- data colnames(data_final) <- c('treatment', 'time', 'status') } return(data_final) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/simCompetingRisks.R
### summary.performance.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: apr 6 2022 (14:56) ## Version: ## Last-Updated: jun 27 2023 (14:19) ## By: Brice Ozenne ## Update #: 54 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * summary.performance ##' @title Summary Method for Performance Objects ##' @description Summary of the performance of binary classifiers ##' ##' @param object output of performance. ##' @param digits [numeric vector of length 2] number of digits used for the estimates and p-values. ##' @param print [logical] should the performance be printed in the console. ##' @param order.model [character vector] ordering of the models. ##' @param ... not used. ##' ##' @keywords print ##' @method summary performance ##' @export summary.performance <- function(object, order.model = NULL, digits = c(3,3), print = TRUE, ...){ ## ** re-order models if(!is.null(order.model)){ if(any(duplicated(order.model))){ stop("Argument \'order.model\' should not contain duplicated values. \n") } Umodel <- unique(object$performance$model) if(is.numeric(order.model)){ if(!identical(sort(order.model), 1:length(order.model))){ stop("Argument \'order.model\' should contain integers from 1 to ",length(order.model)," when numeric. \n") } order.model <- Umodel[order.model] }else{ if(any(order.model %in% Umodel == FALSE)){ stop("Unknown value \"",paste(order.model[order.model %in% Umodel == FALSE], collapse = "\" \""),"\" in argument \'order.model\'. \n") } if(any(unique(object$performance$model) %in% order.model == FALSE)){ stop("Missing model \"",paste(Umodel[order.model %in% Umodel == FALSE], collapse = "\" \""),"\" in argument \'order.model\'. \n") } } if(!is.null(object$prediction$internal)){ object$prediction$internal <- object$prediction$internal[,order.model,drop=FALSE] } if(!is.null(object$prediction$external)){ object$prediction$external <- object$prediction$external[,order.model,drop=FALSE] } if(!is.null(object$prediction$cv)){ object$prediction$cv <- object$prediction$cv[,order.model,,drop=FALSE] } if(!is.null(object$resampling)){ object$resampling[,c("model") := factor(.SD$model,levels = order.model)] data.table::setkeyv(object$resampling, c("sample","method","metric","model")) object$performance <- .performanceResample_inference(performance = object$performance[order(factor(object$performance$model, levels = order.model)), c("method","metric","model","estimate")], resampling = object$resampling, type.resampling = object$args$type.resampling, conf.level = object$args$conf.level) }else{ object$performance <- object$performance[order(factor(object$performance$method, levels = c("internal","external","cv")), factor(object$performance$metric, levels = c("auc","brier")), factor(object$performance$model,levels=order.model)),,drop=FALSE] if(length(order.model)>1){ object$performance$p.value_comp <- NA for(iMethod in unique(object$performance$method)){ ## iMethod <- "internal" for(iMetric in c("auc","brier")){ ## iMetric <- "auc" iIndex <- which(object$performance$method==iMethod & object$performance$metric==iMetric) iBeta <- object$performance[iIndex,"estimate"] iIID <- object[[paste0("iid.",iMetric)]][[iMethod]][,order.model] iStat <- c(NA,diff(iBeta)) / c(NA,sqrt(colSums((iIID[,1:(NCOL(iIID)-1),drop=FALSE]-iIID[,-1,drop=FALSE])^2))) object$performance[iIndex,"p.value_comp"] <- 2*(1-stats::pnorm(abs(iStat))) } } } } object$auc <- lapply(object$auc, function(iL){iL[order.model]}) object$brier <- lapply(object$brier, function(iL){iL[order.model]}) } ## ** display results df.print <- object$performance df.print$p.value <- base::format.pval(df.print$p.value, digits = digits[1], eps = 10^(-digits[2])) df.print$p.value[is.na(object$performance$p.value)] <- "" df.print$p.value_comp <- base::format.pval(df.print$p.value_comp, digits = digits[1], eps = 10^(-digits[2])) df.print$p.value_comp[is.na(object$performance$p.value_comp)] <- "" df.print <- df.print[,union(names(which(colSums(!is.na(object$performance))>0)),"estimate")] print(df.print, digits = digits[1]) return(invisible(object$performance)) } ## * summary.performance ##' @method print performance ##' @export print.performance <- function(x, ...){ out <- summary(x) return(invisible(NULL)) } ##---------------------------------------------------------------------- ### summary.performance.R ends here
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/summary.performance.R
## * Documentation #' @name validFCTs #' @aliases validClass #' @aliases validDimension #' @aliases validInteger #' @aliases validLogical #' @aliases validNames #' @aliases validNumeric #' @aliases validPath #' @title Check Arguments of a function. #' #' @description Check the validity of the arguments in functions. #' #' @param value1 the value of the (first) argument to be checked #' @param value2 the second value of a second argument whose dimensions should be consistent with the first one #' @param name1 the name of the (first) argument. #' @param name2 the name of the second argument. #' @param validClass the acceptable classes(s) for the argument. #' @param validDimension the acceptable dimension for the argument. If \code{NULL} then name2 is used as a reference. #' @param valid.length the acceptable length(s) for the argument. If \code{NULL} no test is performed. #' @param valid.values the acceptable value(s) for the argument. If \code{NULL} no test is performed. Can also be "character" or "character_or_logical". #' @param refuse.NULL should an error be output if value is \code{NULL}. #' @param refuse.NA should an error be output if value contains \code{NA}. #' @param refuse.duplicates should an error be output if value contains duplicated values. #' @param refuse.values values that must not appear in the argument #' @param type For \code{validDimension}: the type of operator used to check the dimensions. For \code{validPath} either "dir" or "file" to check whether to path points to an existing directory or file. #' @param required.values values that must appear in the argument #' @param min the minimum acceptable value #' @param max the maximum acceptable value #' @param extension filter the files by the type of extension. #' @param method the name of the function using the argument. #' @param check.fsep display a warning when the separator is not correctly specified in #' @param addPP add ": " after the name of the function in the error message. #' @param unlist [logical] flatten argument before check. #' #' @return An invisible \code{TRUE} or an error message. #' #' @concept check #' @keywords internal ## * validCharacter #' @rdname validFCTs validCharacter <- function(value1, name1 = as.character(substitute(value1)), valid.length, valid.values = "character", refuse.NULL = TRUE, refuse.duplicates = FALSE, method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } if(is.null(value1)){ if(refuse.NULL == TRUE){ stop(method, "\'", name1, "\' must not be NULL \n") } }else{ #### check size n.value1 <- length(value1) if(!is.null(valid.length) && n.value1 %in% valid.length == FALSE){ stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), " \n", "length(", name1, ") : ", n.value1, "\n") } #### check duplicates if(refuse.duplicates == TRUE && any(duplicated(value1))){ stop(method, "\'", name1, "\' contains duplicated values: ", "\"",paste(unique(value1[duplicated(value1)]), collapse = "\" \""), "\" \n") } #### check values if(identical(valid.values,"character")){ if(any(is.character(value1) == FALSE)){ stop(method, "\'", name1, "\' must be a ", if(n.value1 == 1){"character"}else{"vector of characters"}," \n", "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n") } } else if(identical(valid.values,"character_or_logical")){ if(any( (is.character(value1) == FALSE) * (is.logical(value1) == FALSE) > 0 )){ stop(method, "\'", name1, "\' must be a ", if(n.value1 == 1){"character or logical"}else{"vector of characters or logicals"}," \n", "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n") } } else if(!is.null(valid.values) && any(value1 %in% valid.values == FALSE)){ stop(method, "wrong specification of \'", name1, "\' \n", "valid values for \'", name1, "\' : ", if(refuse.NULL == FALSE){"NULL"}, " \"", paste(valid.values, collapse = "\" \""), "\" \n", "refused value",if(sum(value1 %in% valid.values == FALSE)>1){"s"}," for \'", name1, "\' : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = "\" \""), "\"\n") } } return(invisible(TRUE)) } ## * validClass #' @rdname validFCTs validClass <- function(value1, name1 = as.character(substitute(value1)), valid.class, type = "inherits", method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } if(type == "inherits"){ if(inherits(value1, valid.class) == FALSE){ stop(method, "class of \'", name1, "\' must inherit of \"", paste(valid.class,collapse="\" \""), "\" \n") } }else if(type == "is"){ if( all(is(value1) %in% validClass == FALSE) ){ stop(method, "class of \'", name1, "\' must be one of the following \"", paste(valid.class,collapse="\" \""), "\" \n", "current superclass : \"", paste(is(value1),collapse="\" \""), "\" \n") } }else if(type == "class"){ if( class(value1) %in% validClass == FALSE){ stop(method, "class of \'", name1, "\' must be \"", paste(valid.class,collapse="\" \""),"\" \n", "current class : ", class(value1)[[1]], "\n") } } return(invisible(TRUE)) } ## * validDimension #' @rdname validFCTs validDimension <- function(value1, value2 = NULL, name1 = as.character(substitute(value1)), name2 = as.character(substitute(value2)), valid.dimension = NULL, type = c("NROW","NCOL"), method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } n.type <- length(type) #### dimension 1 test.dimension <- sapply(1:n.type, function(x){ do.call(type[x], list(value1)) }) #### dimension 2 if(is.null(valid.dimension)){ valid.dimension <- sapply(1:n.type, function(x){ do.call(type[x], list(value2)) }) test.valid.dimension <- TRUE }else if(is.null(name2)){ test.valid.dimension <- FALSE }else{ test.valid.dimension <- TRUE } #### main for(iType in 1:n.type){ if(test.dimension[iType] != valid.dimension[iType]){ if(test.valid.dimension){ stop(method, "dimension mismatch between argument \'", name1, "\' and argument \'", name2, "\' \n", type[iType],"(", name1, ") = ", test.dimension[iType], " \n", type[iType],"(", name2, ") = ", valid.dimension[iType], " \n") }else{ stop(method, "dimension mismatch between argument \'", name1, "\' and argument \'", name2, "\' \n", type[iType],"(", name1, ") = ", test.dimension[iType], " \n", type[iType],"(", name2, ") = ", valid.dimension[iType], " \n") } } } return(invisible(TRUE)) } ## * validInteger #' @rdname validFCTs validInteger <- function(value1, name1 = as.character(substitute(value1)), valid.length, min = NULL, max = NULL, refuse.NA = TRUE, refuse.NULL = TRUE, refuse.duplicates = FALSE, method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } validNumeric(value1 = value1, name1 = name1, valid.length = valid.length, min = min, max = max, refuse.NA = refuse.NA, refuse.NULL = refuse.NULL, refuse.duplicates = refuse.duplicates, method = method) ## check integer if(!is.null(value1) && any(value1 %% 1 > 0)){ stop(method, "\'", name1, "\' must contain integers not doubles \n", "invalid value(s) in ", name1, " : ", paste(value1[value1 %% 1 > 0], collapse = " "), "\n") } return(invisible(TRUE)) } ## * validLogical #' @rdname validFCTs validLogical <- function(value1, name1 = as.character(substitute(value1)), valid.length, refuse.NULL = TRUE, refuse.NA = TRUE, method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } if(is.null(value1)){ #### NULL if(refuse.NULL == TRUE){ stop(method, "\'", name1, "\' must be logical ",if(refuse.NA == FALSE){"or NA"}," and not NULL \n") } }else{ #### Size if(!is.null(valid.length) && length(value1) %in% valid.length == FALSE){ stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), " \n", "length(", name1, ") : ", length(value1), "\n") } #### Type if(any(is.logical(value1) == FALSE)){ stop(method, "\'", name1, "\' must be ", if(refuse.NULL == FALSE){"NULL or "}, if(refuse.NA == FALSE){"NA or "},"TRUE or FALSE \n", "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n") } if(refuse.NA == TRUE && any(is.na(value1)) ){ stop(method, "\'", name1, "\' must be logical ",if(refuse.NULL == FALSE){"or NULL"}," and not NA \n") } } return(invisible(TRUE)) } ## * validNames #' @rdname validFCTs validNames <- function(value1, name1 = as.character(substitute(value1)), refuse.NULL = TRUE, valid.length = NULL, valid.values = NULL, required.values = NULL, refuse.values = NULL, method = NULL, addPP = TRUE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } ## type if(is.matrix(value1)){ value1 <- colnames(value1) } if(inherits(value1,"data.frame") || is.list(value1)){ value1 <- names(value1) } ## tests if(is.null(value1)){ if(refuse.NULL == TRUE){ stop(method, "names of \'", name1, "\' must not be NULL \n") } }else{ #### check size n.value1 <- length(value1) if(!is.null(valid.length) && n.value1 %in% valid.length == FALSE){ stop(method, "\'", name1, "\' must have ", paste(valid.length, collapse = " or ")," names \n", "length(names(", name1, ")) : ", n.value1, "\n") } #### check content if(!is.null(required.values) && any(required.values %in% value1 == FALSE)){ stop(method, "\'", name1, "\' must contain specific names \n", "missing names : \"",paste(required.values[required.values %in% value1 == FALSE], collapse = "\" \""),"\" \n", "possible names : \"", paste(value1, collapse = "\" \""), "\"\n") } if(!is.null(valid.values) && any(value1 %in% valid.values == FALSE)){ stop(method, "wrong specification of \'", name1, "\' \n", "valid names for \'", name1, "\' : \"",paste(valid.values, collapse = "\" \""),"\" \n", "refused names : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = " "), "\"\n") } if(!is.null(refuse.values) && any(value1 %in% refuse.values)){ stop(method, "\'", name1, "\' contains forbidden names:", paste(value1[value1 %in% refuse.values], collapse = " "), "\"\n") } if(any(duplicated(value1))){ stop(method, "\'", name1, "\' must not contain duplicated names \n", "duplicated names : \"", paste(value1[duplicated(value1)], collapse = " "), "\"\n") } } return(invisible(TRUE)) } ## * validNumeric #' @rdname validFCTs validNumeric <- function(value1, name1 = as.character(substitute(value1)), valid.length, valid.values = NULL , min = NULL, max = NULL, refuse.NA = TRUE, refuse.NULL = TRUE, refuse.duplicates = FALSE, method = NULL, addPP = TRUE, unlist = FALSE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } if(is.null(value1)){ if(refuse.NULL == TRUE){ stop(method, "\'", name1, "\' must not be NULL \n") } }else{ #### check length if(!is.null(valid.length) && length(value1) %in% valid.length == FALSE){ stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), " \n", "length(", name1, ") : ", length(value1), "\n") } #### check NA if(unlist){ value1 <- unlist(value1) } if(refuse.NA == TRUE && any(is.na(value1))){ stop(method, "\'", name1, "\' must not contain any NA \n", "index of NA values : ", paste(which(is.na(value1)), collapse = " "), "\n") } #### check numeric if(any( (is.numeric(value1) == FALSE) * (is.na(value1) == FALSE) )){ stop(method, "\'", name1, "\' must be numeric \n", "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n") } #### check duplicates if(refuse.duplicates == TRUE && any(duplicated(value1))){ stop(method, "\'", name1, "\' contains duplicated values: ", paste(unique(value1[duplicated(value1)]), collapse = " "), "\n") } #### check min value1 if(!is.null(min) && any(stats::na.omit(value1) < min)){ stop(method, "\'", name1, "\' must be bigger than ", min, " \n", "invalid value(s): ", paste(value1[stats::na.omit(value1) < min], collapse = " "), "\n") } #### check max value1 if(!is.null(max) && any(stats::na.omit(value1) > max)){ stop(method, "\'", name1, "\' must be smaller than ", max, " \n", "invalid value(s): ", paste(value1[stats::na.omit(value1) > max], collapse = " "), "\n") } #### check valid values if(!is.null(valid.values) && any(stats::na.omit(value1) %in% valid.values == FALSE)){ stop(method, "\'", name1, "\' contains invalid values \n", "valid values for \'", name1, "\' : ", if(refuse.NULL == FALSE){"NULL"}, " \"", paste(valid.values, collapse = "\" \""), "\" \n", "refused value",if(sum(value1 %in% valid.values == FALSE)>1){"s"}," for \'", name1, "\' : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = " "), "\"\n", sep = "") } } return(invisible(TRUE)) } ## * validPath #' @rdname validFCTs validPath <- function(value1, name1 = as.character(substitute(value1)), type, method = NULL, addPP = TRUE, extension = NULL, check.fsep = FALSE){ if(!is.null(method) && addPP){ method <- paste0(method, ": ") } validCharacter(type, valid.length = 1, valid.values = c("file", "dir")) try_path <- switch(type, file = file.exists(value1), dir = dir.exists(value1) ) if(try_path == FALSE){ stop(method, "\'", name1, "\' does not lead to an existing ",switch(type,"file"="file","dir"="directory")," \n", "current value: \"", value1, "\"\n", "current path: ", getwd(), "\n") } if(type == "dir"){ if(check.fsep == TRUE && substr(value1, start = nchar(value1), stop = nchar(value1)) != "/"){ warning(method, "possible bad specification of \'", name1, "\' \n", "it should end with a fsep (e.g. \"/\") \n") } }else if(type == "file" && !is.null(extension)){ fileExtension <- tools::file_ext(value1) if(fileExtension %in% extension == FALSE){ stop(method, "\'", name1, "\' has not the expected extension \n", "current extension: \"", fileExtension, "\" \n", "expected extension: \"", paste(extension, collapse = "\" \""), "\"\n") } } return(invisible(TRUE)) }
/scratch/gouwar.j/cran-all/cranData/BuyseTest/R/valid.R
--- title: "Dealing with CR" author: "Eva Cantagallo" date: "3 avril 2019" output: pdf_document: default html_document: df_print: paged --- ```{r setup, include = FALSE} knitr::opts_chunk$set(echo = TRUE, comment = NA) library(survival) library(BuyseTest) ``` \section{Dealing with competing risks} GPC can be used to analyze time-to-event endpoints subject to competing risks. Only two competing risks can be handled for each endpoint so far : the event of interest and one competing event. The former should be denoted by 1 in the corresponding censoring variable while the latter should be denoted by 2. Note that no competing event should be present without an event of interest. As an example, let us analyze the time to an event of interest, say death due to cancer, with death due to other causes being a competing event, in the following dataset: ```{r} set.seed(2) dt.CR <- simBuyseTest(5e2, argsCont = NULL, argsTTE = list(CR = TRUE, scale.T = 0.8, scale.C = 0.5, scale.CR = 0.6)) head(dt.CR) ``` The \texttt{censoring} variable, called status in the dataset, contains the following levels: ```{r} levels(as.factor(dt.CR[,status])) ``` with 0 indicating a censored observation, 1 an event of interest (time to death due to cancer is observed) and 2 a competing event (time to death due to other causes is observed). We perform the GPC analysis on this time-to-event endpoint with a threshold of 1 year setting \texttt{scoring.rule} to \texttt{"Peron"}. In the presence of competing risks, the \texttt{"Peron"} method uses the estimate of the cumulative incidence functions to compute the pairs' score: ```{r} BT.CR <- BuyseTest(treatment ~ tte(eventtime, threshold = 1, status = "status"), data = dt.CR, trace = 0, method.inference = "none", scoring.rule = "Peron", keep.pairScore = T) summary(BT.CR) ``` If we display the score of some pairs: ```{r} getPairScore(BT.CR) ``` We see that the first pair, composed of observations 1 and 501, is classified as favorable. If we look closer at these two observations: ```{r} dt.CR[c(1,501),] ``` The treatment patient of the pair experienced the competing event while the control patient experienced the event of interest. In the GPC method it is considered that a patient experiencing the competing event will never experience the event of interest at a later time, and his time to the event of interest is thus considered infinite. In the present case, the treatment patient will never experience the event of interest and his time to the event of interest is thus considered infinite. The pair is therefore classified as favorable as the treatment patient has a longer time to the event of interest than the control patient. Similarly, pairs with the control patient having the competing event and the treatment patient having the event of interest are classified as unfavorable. Finally, pairs with two competing events are classified as neutral as both patients have an infinite time to the event of interest. In this example, the net benefit is estimated to 0.1408. This positive value indicates that the probability to have a larger time to death due to cancer with the treatment compared to the control is larger than the probability to have a larger time to death due to cancer with the control compared to the treatment. This therefore suggests a beneficial effect of the treatment compared to the control regarding the event of interest. \textbf{What about the competing event ?} The net benefit focuses on the time to the event of interest of the patients of the two treatment groups. The time to the competing event in both groups should therefore be analyzed too, to avoid missing important information about the experimental treatment and its effect on the competing event. This can be done by performing a log-rank test to determine whether there is a significant difference between the cause-specific hazards of the competing event in the two groups. \textbf{What about censoring ?} As in the absence of competing risks, censoring can be handled in several ways. The first one consists in applying the \texttt{Peron} scoring rule instead of the \texttt{Gehan} scoring rule in order to use the cumulative incidence functions estimate to compute the pairs' score (see above example). Other ways are the use of a correction, either the IPCW correction or the correction at the pair level, to remove completely the uninformative pairs: ```{r} BT.CR.corr <- BuyseTest(treatment ~ tte(eventtime, threshold = 1, status = "status"), data = dt.CR, trace = 0, method.inference = "none", scoring.rule = "Peron", correction.uninf = 1) summary(BT.CR.corr) ``` Note that in this particular example, the effect of the correction at the pair level is really moderate as the original proportion of uninformative pairs was small.
/scratch/gouwar.j/cran-all/cranData/BuyseTest/vignettes/vignette_CR.Rmd
EEB <- function(beta,nu,delta=0,S=1,alpha=0.05,type=c("marginal","cond_NRej","cond_Rej"),tol=1e-4,max.itr=5000) { if(type[1]=="marginal") { return(EEB.mar(beta,nu,delta=delta,S=S,alpha=alpha,tol=tol,max.itr=max.itr)) } if(type[1]=="cond_NRej") { return(EEB.conNRej(beta,nu,delta=delta,S=S,alpha=alpha,tol=tol,max.itr=max.itr)) } if(type[1]=="cond_Rej") { return(EEB.conRej(beta,nu,delta=delta,S=S,alpha=alpha,tol=tol,max.itr=max.itr)) } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/EEB.R
EEB.conNRej <- function(beta,nu,delta=0,S=1,alpha=0.05,tol=1e-4,max.itr=5000) { tv<-qt(1-alpha,df=nu) if(delta==0) { return(S*(qt((beta*(1-alpha)+1)/2,df=nu)+tv)) }else { #---------------------------- # Bisection method a<-tv*S b<-100 s<-0 while(s<max.itr) { c<-(a+b)/2 diff<-(b-a)/2 g<-pB.conNRej(c,nu=nu,delta=delta,S=S,alpha=alpha)-beta if(abs(g)<tol|diff<tol) { break }else { if(g<0) { a<-c }else { b<-c } s<-s+1 } } return(c) #---------------------------- } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/EEB.conNRej.R
EEB.conRej <- function(beta,nu,delta=0,S=1,alpha=0.05,tol=1e-4,max.itr=5000) { tv<-qt(1-alpha,df=nu) if(delta==0) { return(S*(qt(1-(alpha*(1-beta)/2),df=nu)+tv)) }else { #---------------------------- # Bisection method a<-S*(tv+qt(1-alpha/2,df=nu)) b<-100 s<-0 while(s<max.itr) { c<-(a+b)/2 diff<-(b-a)/2 g<-pB.conRej(c,nu=nu,delta=delta,S=S,alpha=alpha)-beta if(abs(g)<tol|diff<tol) { break }else { if(g<0) { a<-c }else { b<-c } s<-s+1 } } return(c) #---------------------------- } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/EEB.conRej.R
EEB.mar <- function(beta,nu,delta=0,S=1,alpha=0.05,tol=1e-4,max.itr=5000) { tv<-qt(1-alpha,df=nu) #---------------------------- # Bisection method a<-0 b<-100 s<-0 while(s<max.itr) { c<-(a+b)/2 diff<-(b-a)/2 g<-pB(c,nu=nu,delta=delta,S=S,alpha=alpha)-beta if(abs(g)<tol|diff<tol) { break }else { if(g<0) { a<-c }else { b<-c } s<-s+1 } } return(c) #---------------------------- }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/EEB.mar.R
pB <- function(b,nu,delta=0,S=1,alpha=0.05,type=c("marginal","cond_NRej","cond_Rej")) { if(type[1]=="marginal") { return(pB.mar(b,nu,delta=delta,S=S,alpha=alpha)) } if(type[1]=="cond_NRej") { return(pB.conNRej(b,nu,delta=delta,S=S,alpha=alpha)) } if(type[1]=="cond_Rej") { return(pB.conRej(b,nu,delta=delta,S=S,alpha=alpha)) } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/pB.R
pB.conNRej <- function(b,nu,delta=0,S=1,alpha=0.05) { tv1<-qt(1-alpha,df=nu) tv2<-qt(1-alpha/2,df=nu) dn<-pt(tv2-delta/S,df=nu)-pt(-tv2-delta/S,df=nu) if(length(b)==1) { if(b<S*tv1) { return(0) }else { nu1<-pt(min((b-delta)/S-tv1,tv2-delta/S),df=nu) nu2<-pt(max(tv1-(b+delta)/S,-tv2-delta/S),df=nu) return((nu1-nu2)/dn) } }else { ot1<-apply(as.matrix((b-delta)/S-tv1),1,function(x){return(min(x,tv2-delta/S))}) nu1<-pt(ot1,df=nu) ot2<-apply(as.matrix(tv1-(b+delta)/S),1,function(x){return(max(x,-tv2-delta/S))}) nu2<-pt(ot2,df=nu) out<-(nu1-nu2)/dn out[which(b<S*tv1)]<-0 return(out) } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/pB.conNRej.R
pB.conRej <- function(b,nu,delta=0,S=1,alpha=0.05) { tv1<-qt(1-alpha,df=nu) tv2<-qt(1-alpha/2,df=nu) dn<-pt(delta/S-tv2,df=nu)+pt(-tv2-delta/S,df=nu) if(length(b)==1) { if(b<S*(tv1+tv2)) { return(0) }else { nu1<-pt((b-delta)/S-tv1,df=nu)-pt(tv2-delta/S,df=nu) nu2<-pt(-tv2-delta/S,df=nu)-pt(tv1+(-b-delta)/S,df=nu) return((nu1+nu2)/dn) } }else { nu1<-pt((b-delta)/S-tv1,df=nu)-pt(tv2-delta/S,df=nu) nu2<-pt(-tv2-delta/S,df=nu)-pt(tv1+(-b-delta)/S,df=nu) out<-(nu1+nu2)/dn out[which(b<S*(tv1+tv2))]<-0 return(out) } }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/pB.conRej.R
pB.mar <- function(b,nu,delta=0,S=1,alpha=0.05) { tv<-qt(1-alpha,df=nu) out<-rep(NA,length(b)) out[which(b<S*tv)]<-0 itmp<-which(b>=S*tv) if(length(itmp)>0) { p1<-pt(-tv+(b[itmp]-delta)/S,df=nu) p2<-pt(tv+(-b[itmp]-delta)/S,df=nu) out[itmp]<-p1-p2 } return(out) }
/scratch/gouwar.j/cran-all/cranData/Bvalue/R/pB.mar.R
RT <- function(x,y,tau){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} n=length(x);ran=max(x)-min(x) RK<-1/(2*sqrt(pi)) Rd1K<-integrate(function(x){(-x*exp(-x^2/2)/sqrt(2*pi))^2},lower=-Inf,upper=Inf)$value Rd2K<-integrate(function(x){((x^2-1)*exp(-x^2/2)/sqrt(2*pi))^2},lower=-Inf,upper=Inf)$value mu2=1 ; mu4=3 ; mu6=15 RKc=integrate(function(x){(exp(-x^2/4)/sqrt(4*pi))^2},lower=-Inf,upper=Inf)$value IN2x4<-integrate(function(x){x^4*(exp(-x^2/2)/sqrt(2*pi))^2},lower=-Inf,upper=Inf)$value IN2x2<-integrate(function(x){x^2*(exp(-x^2/2)/sqrt(2*pi))^2},lower=-Inf,upper=Inf)$value d1N<-function (x, mu, sigma){-(exp(-((x - mu)^2/(2 * sigma^2))) * (x - mu)/(sigma^2 * sqrt(2 * (pi * sigma^2))))} d2N<-function (x, mu, sigma) {-((1 - (x - mu)^2/sigma^2) * exp(-((x - mu)^2/(2 * sigma^2)))/(sigma^2* sqrt(2 * (pi *sigma^2))))} d3N<-function(x,mu,sigma){2 * (((1 - (x - mu)^2/sigma^2)/2 + 1) * exp(-((x - mu)^2/(2 *sigma^2))) * (x - mu)/(sigma^4 * sqrt(2 * (pi *sigma^2))))} d4N<-function (x, mu, sigma){ .e1 <- sigma^2 ; .e2 <- (x - mu)^2 ; .e4 <- (1 - .e2/.e1)/2 + 1 2 * ((.e4 - 2 * ((.e4/2 + 0.5) * .e2/.e1)) * exp(-(.e2/(2 *.e1)))/(sigma^4 * sqrt(2 * (pi * .e1)))) } D=mu2*mu4*mu6-mu4^3-mu2^3*mu6+mu2^2*mu4^2 a31=-mu2^2*mu6+mu2*mu4^2;alfa31=a31/D a33=mu2*mu6-mu4^2;alfa33=a33/D delta1=(alfa31*mu4+alfa33*mu6)/6; delta2=4*(alfa31^2*RK+alfa33^2*IN2x4+2*alfa31*alfa33*IN2x2) C2I=((5*delta2)/(2*delta1))^(1/7);C2II=(delta2/delta1)^(1/7) ind=sort(x,ind=TRUE)$ix;x_ord=x[ind];y_ord=y[ind];dm=cbind(x_ord,x_ord^2,x_ord^3,x_ord^4) N_star=5;Nmax=max(min(n%/%20,N_star),1) RSS=numeric(Nmax) if(Nmax==1){Nb=1}else{ RSSb=numeric(Nmax);ent=n%/%Nmax for(b in 1:(Nmax-1)){ modelb<-try(rq(y_ord[(1+(b-1)*ent):(b*ent)]~dm[(1+(b-1)*ent):(b*ent),],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[b]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[b]=Inf} } modelB=try(rq(y_ord[((Nmax-1)*ent+1):n]~dm[((Nmax-1)*ent+1):n,],tau=tau),TRUE) if(class(modelB)=="rq"){ RSSb[Nmax]=sum(tau*residuals(modelB)*(residuals(modelB)>=0)+(tau-1)*residuals(modelB)*(residuals(modelB)<0)) }else{RSSb[Nmax]=Inf} RSS[Nmax]=sum(RSSb) modelb=rq(y_ord~dm,tau=tau) RSS[1]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) for(i in 2:(Nmax-1)){ RSSb=numeric(i) for(b in 1:(i-1)){ ent=n%/%i modelb<-try(rq(y_ord[(1+(b-1)*ent):(b*ent)]~dm[(1+(b-1)*ent):(b*ent),],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[b]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[b]=Inf} } modelb=try(rq(y_ord[((i-1)*ent+1):n]~dm[((i-1)*ent+1):n,],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[i]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[i]=Inf} RSS[i]=sum(RSSb) } Nmax=max((RSS!=Inf)*1:Nmax);Cp=numeric(Nmax) Cp[Nmax]=RSS[Nmax]/(RSS[Nmax]/(n-5*Nmax))-(n-10*Nmax) for(b in 1:(Nmax-1)){Cp[b]=RSS[b]/(RSS[Nmax]/(n-5*Nmax))-(n-10*b)} Nb=which.min(Cp) } ent=n%/%Nb curvb1=numeric(Nb);curvb2=numeric(Nb);sparsityb=numeric(Nb) d2sb=numeric(Nb); Idtauq2=numeric(Nb);Iaux1=numeric(Nb);Iaux2=numeric(Nb);Iaux3=numeric(Nb) Ig=numeric(Nb);lb=numeric(Nb);nb=numeric(Nb) for(b in 1:Nb){ if(b==Nb){ xb=dm[((Nb-1)*ent+1):n,];yb=y_ord[((Nb-1)*ent+1):n] }else{ xb=dm[(1+(b-1)*ent):(b*ent),];yb=y_ord[(1+(b-1)*ent):(b*ent)] } nb[b]=dim(xb)[1];lb[b]=max(xb[,1])-min(xb[,1]) xseq=seq(min(xb[,1]),max(xb[,1]),by=0.01) modelb=rq(yb~xb,tau=tau); coefb=coef(modelb) curvb1[b]=sum((2*coefb[3]+6*coefb[4]*xb[,1]+12*coefb[5]*xb[,2])^2) curvb2[b]=sum((2*coefb[3]+6*coefb[4]*xb[,1]+12*coefb[5]*xb[,2])*24*coefb[5]) q2<-function(x){2*coefb[3]+6*coefb[4]*x+12*coefb[5]*x^2} q2_2<-function(x){(2*coefb[3]+6*coefb[4]*x+12*coefb[5]*x^2)^2} residuosb=as.numeric(sort(residuals(modelb))) mu_est=mean(residuosb);sigma_est=sd(residuosb) hB=bwB(residuosb,tau) tau1=min(tau+hB,1);j1=ceiling(tau1*nb[b]) tau2=max(tau-hB,0);j2=max(ceiling(tau2*nb[b]),1) sparsityb[b]=max(0.005,(residuosb[j1]-residuosb[j2]))/(2*hB) m=(0.25*nb[b]^(8/9))%/%1 ntau=(nb[b]*tau)%/%1 d2sb[b]=0.5*(nb[b]/m)^3*(residuosb[min(ntau+2*m,nb[b])]-2*residuosb[min(ntau+m,nb[b])]+2*residuosb[max(ntau-m,1)]-residuosb[max(ntau-2*m,1)]) hBn=nb[b]^(-0.2) * ((4.5 * dnorm(qnorm(tau))^4)/(2 * qnorm(tau)^2 + 1)^2)^0.2 tauplus=min(tau+hBn,0.99);tauminus=max(tau-hBn,0.01) modelbplus=rq(yb~xb,tau=tauplus);coefbplus=coef(modelbplus) modelbminus=rq(yb~xb,tau=tauminus);coefbminus=coef(modelbminus) dtauq2=function(x){(2*coefbplus[3]+6*coefbplus[4]*x+12*coefbplus[5]*x^2-2*coefbminus[3]-6*coefbminus[4]*x-12*coefbminus[5]*x^2)/(2*hBn)} Idtauq2[b]=integrate(dtauq2,min(xb[,1]),max(xb[,1]))$value Rf3<- integrate(function(x,mu,sigma){d3N(x,mu,sigma)*d3N(x,mu,sigma)},lower=min(xb),upper=max(xb),mu=mu_est,sigma=sigma_est)$value;Rf3 Rf4<-integrate(function(x,mu,sigma){d4N(x,mu,sigma)*d4N(x,mu,sigma)},lower=min(xb),upper=max(xb),mu=mu_est,sigma=sigma_est)$value;Rf4 h_pil_d1<-(3*Rd1K/(mu2^2*Rf3))^(1/7)*nb[b]^(-1/7); h_pil_d2<-(5*Rd2K/(mu2^2*Rf4))^(1/7)*nb[b]^(-1/9) prediction=predict(modelb) fd1<-function(x,h){sum(d1N((x-prediction)/h,mu=0,sigma=1))/(nb[b]*h^2)} fd2<-function(x,h){sum(d2N((x-prediction)/h,mu=0,sigma=1))/(nb[b]*h^3)} pred_new=as.numeric(cbind(rep(1,length(xseq)),xseq,xseq^2,xseq^3,xseq^4)%*%coefb) Iaux1[b]=simpson(sapply(pred_new,fd1,h=h_pil_d1)^2*q2_2(xseq),min(xb[,1]),max(xb[,1])) Iaux2[b]=simpson(sapply(pred_new,fd2,h=h_pil_d2)*q2_2(xseq),min(xb[,1]),max(xb[,1])) Iaux3[b]=simpson(sapply(pred_new,fd1,h=h_pil_d1)*q2(xseq)*dtauq2(xseq),min(xb[,1]),max(xb[,1])) Ig[b]=n*lb[b]/nb[b] } curv1=sum(curvb1)/n curv2=sum(curvb2)/n sparsity=sum(sparsityb^2*lb) IA=RK*sum(sparsityb^2*Ig*lb)/2 IB=sum(sparsityb*d2sb*lb)/3 IC=sum(sparsityb*Idtauq2)*mu2 IC2=(sum(sparsityb^3*Iaux1)-sum(sparsityb^2*Iaux2)-sum(2*sparsityb*Iaux3))*mu4/4 ID=sum(sparsityb^4*Ig*lb)*2 IE=(0.5*RKc-RK)*sum(sparsityb^4*Ig^2*lb) if(curv2>0){g1=C2I*(((tau*(1-tau)*sparsity)/(curv2*n))^(1/7))}else{g1=C2II*(((tau*(1-tau)*sparsity)/(abs(curv2)*n))^(1/7))} h_pil=((RK*tau*(1-tau)*sparsity)/(n*(mu2^2)*curv1))^(1/5) return(list("h"=h_pil,"g"=g1,"IA"=IA,"IB"=IB,"IC"=IC,"IC2"=IC2,"ID"=ID,"IE"=IE)) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/RT.R
bwB <- function (x,tau) { if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if(!length(x)>1 ){stop("'x' must be a numeric vector")} iaux=complete.cases(x) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];warning("Missing values have been removed from 'x'")} x=sort(x);n=length(x) m=(0.25*n^(8/9))%/%1;n2=(n*tau)%/%1 h0=n^(-0.2) * ((4.5 * dnorm(qnorm(tau))^4)/(2 * qnorm(tau)^2 + 1)^2)^0.2 j1=ceiling(min(tau+h0,1)*n);j2=max(ceiling(max(tau-h0,0)*n),1) s=(x[j1]-x[j2])/(2*h0) Z=0.5*(n/m)^3*(x[min(n2+2*m,n)]-2*x[min(n2+m,n)]+2*x[max(n2-m,1)]-x[max(n2-2*m,1)]) return(((4.5*(s/Z)^2)/n)^(1/5)) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/bwB.R
bwCV <- function(x,y,hseq,tau){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} if(!is.numeric(hseq)|!is.vector(hseq)|any(!is.finite(hseq))) stop("'hseq' must be a sequence of values where the cross-validation function will be evaluated") if(!length(hseq)>1){stop("'hseq' must be a a sequence of values where the cross-validation function will be evaluated")} if(is.null(hseq)){ hseq=seq(sort(abs(outer(x,x,"-"))[outer(x,x,"-")!=0])[2],diff(range(x))/2,length=20)} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} nh=length(hseq); n=length(y)-1;result=numeric(nh);beta0=1;beta1=0;zz=0;yy=0;weight=0 storage.mode(n)="integer";storage.mode(zz)="double";storage.mode(yy)="double";storage.mode(weight)="double" storage.mode(beta0)="double";storage.mode(beta1)="double";storage.mode(tau)="double" fun_CV<-function(h){ result=0 for (i in 1:length(y)){ zz<-x[-i]-x[i] aux<-dnorm(zz/h) if((sum(aux)!=0)&(max(aux/sum(aux))<0.9999)){ weight=aux/sum(aux) yy=y[-i] yi<-.Fortran("barro", n, zz, yy, weight, beta0, beta1,tau)[[5]] }else{yi<-Inf} result=result+(y[i]-yi)*(tau-((y[i]-yi)<0)) } return(result) } return(hseq[which.min(lapply(hseq,fun_CV))]) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/bwCV.R
bwPI <- function(x,y,tau){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} n=length(x);a=min(x);b=max(x);Rk=1/(2*sqrt(pi));mu=1 point=seq(min(x),max(x),length=100);lp=length(point) x0=numeric(2);sparsity_p=numeric(length(point));q_second=numeric(n) alfa=0.05;c1=(1-alfa)*a+alfa*b;c2=alfa*a+(1-alfa)*b RuleThumb=RT(x,y,tau); g1=RuleThumb$g for(i in 1:n){ if((c1<x[i])&(c2>x[i])){ z1<-x-x[i];z2=z1^2;z3=z1^3 aux<-dnorm(z1/g1);weight=aux/sum(aux) model=rq(y~z1+z2+z3,weights=weight,tau=tau);q_second[i]=2*coef(model)[3] }else{q_second[i]=0} } curvature=mean(q_second^2) x0[1]=max(0.0051,RuleThumb$h) x0[2]=n^(-0.2) * ((4.5 * dnorm(qnorm(tau))^4)/(2 * qnorm(tau)^2 + 1)^2)^0.2 if(x0[2]>(1-tau)){x0[2]=0.99-tau}else if(tau<x0[2]){x0[2]=tau-0.01} IA=RuleThumb$IA; IB=RuleThumb$IB; IC=RuleThumb$IC; IC2=RuleThumb$IC2; ID=RuleThumb$ID; IE=RuleThumb$IE Fun2 <- function(p){(IA/(n*p[1]*p[2])+IB*p[2]^2+IC*p[1]^2+IC2*p[1]^4)^2+ID/(n*p[2])+IE/(n^2*p[2]^2*p[1])} Amat <- matrix(c(0,0,0,1,-1,-1,1,0), nrow=4, ncol=2) bvec <- c((-tau+0.01), (tau-0.99),0.01, 0.005) band.pil<-constrOptim(x0, Fun2, NULL, ui = Amat, ci = bvec,control = list(reltol = 1e-100))$par y_estimate_sup<-numeric(lp);y_estimate_inf<-numeric(lp) beta0=1;beta1=0;z=0;weight=0 storage.mode(n)="integer";storage.mode(z)="double";storage.mode(y)="double";storage.mode(weight)="double" storage.mode(beta0)="double";storage.mode(beta1)="double" tau_sup=min((tau+band.pil[2]),0.99);storage.mode(tau_sup)="double" tau_inf=max((tau-band.pil[2]),0.01);storage.mode(tau_inf)="double" for (i in 1:lp){ z<-x-point[i] aux<-dnorm(z/band.pil[1]) weight=aux/sum(aux) if((max(weight)>=0.9999)){ y_estimate_sup[i]<-y[which.max(weight)] y_estimate_inf[i]<-y[which.max(weight)] }else{ modelo_sup<-.Fortran("barro", n, z, y, weight, beta0, beta1,tau_sup) y_estimate_sup[i]<-modelo_sup[[5]] modelo_inf<-.Fortran("barro", n, z, y, weight, beta0, beta1,tau_inf) y_estimate_inf[i]<-modelo_inf[[5]] } } sparsity_p=(y_estimate_sup-y_estimate_inf)/(2*band.pil[2]); fp=(sparsity_p)^2; int_p=simpson(fp,a,b) h_PI=((Rk*tau*(1-tau)*int_p)/(n*mu^2*curvature))^(1/5) return(h_PI) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/bwPI.R
bwRT <- function(x,y,tau){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} n=length(x) N<-function(x,mu,sigma){exp(-(x-mu)^2/(2*sigma^2))/sqrt(2*sigma^2*pi)} RK<-integrate(function(x,mu,sigma){N(x,mu,sigma)^2},lower=-Inf,upper=Inf,mu=0,sigma=1)$value mu2=integrate(function(x){(x^2)*exp(-x^2/2)/sqrt(2*pi)},lower=-Inf,upper=Inf)$value ind=sort(x,ind=TRUE)$ix;x_ord=x[ind];y_ord=y[ind];dm=cbind(x_ord,x_ord^2,x_ord^3,x_ord^4) N_star=5; Nmax=max(min(n%/%20,N_star),1); RSS=numeric(Nmax) if(Nmax==1){Nb=1}else{ RSSb=numeric(Nmax);ent=n%/%Nmax for(b in 1:(Nmax-1)){ modelb<-try(rq(y_ord[(1+(b-1)*ent):(b*ent)]~dm[(1+(b-1)*ent):(b*ent),],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[b]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[b]=Inf} } modelB=try(rq(y_ord[((Nmax-1)*ent+1):n]~dm[((Nmax-1)*ent+1):n,],tau=tau),TRUE) if(class(modelB)=="rq"){ RSSb[Nmax]=sum(tau*residuals(modelB)*(residuals(modelB)>=0)+(tau-1)*residuals(modelB)*(residuals(modelB)<0)) }else{RSSb[Nmax]=Inf} RSS[Nmax]=sum(RSSb) modelb=rq(y_ord~dm,tau=tau) RSS[1]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) for(i in 2:(Nmax-1)){ RSSb=numeric(i) for(b in 1:(i-1)){ ent=n%/%i modelb<-try(rq(y_ord[(1+(b-1)*ent):(b*ent)]~dm[(1+(b-1)*ent):(b*ent),],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[b]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[b]=Inf} } modelb=try(rq(y_ord[((i-1)*ent+1):n]~dm[((i-1)*ent+1):n,],tau=tau),TRUE) if(class(modelb)=="rq"){ RSSb[i]=sum(tau*residuals(modelb)*(residuals(modelb)>=0)+(tau-1)*residuals(modelb)*(residuals(modelb)<0)) }else{RSSb[i]=Inf} RSS[i]=sum(RSSb) } Nmax=max((RSS!=Inf)*1:Nmax);Cp=numeric(Nmax) Cp[Nmax]=RSS[Nmax]/(RSS[Nmax]/(n-5*Nmax))-(n-10*Nmax) for(b in 1:(Nmax-1)){Cp[b]=RSS[b]/(RSS[Nmax]/(n-5*Nmax))-(n-10*b)} Nb=which.min(Cp) } ent=n%/%Nb;curvb1=numeric(Nb);sparsityb=numeric(Nb);lb=numeric(Nb);nb=numeric(Nb) for(b in 1:Nb){ if(b==Nb){ xb=dm[((Nb-1)*ent+1):n,];yb=y_ord[((Nb-1)*ent+1):n] }else{ xb=dm[(1+(b-1)*ent):(b*ent),];yb=y_ord[(1+(b-1)*ent):(b*ent)] } nb[b]=dim(xb)[1];lb[b]=max(xb[,1])-min(xb[,1]) modelb=rq(yb~xb,tau=tau); coefb=coef(modelb) curvb1[b]=sum((2*coefb[3]+6*coefb[4]*xb[,1]+12*coefb[5]*xb[,2])^2) residuosb=as.numeric(sort(residuals(modelb))) hB=bwB(residuosb,tau) tau1=min(tau+hB,1);j1=ceiling(tau1*nb[b]) tau2=max(tau-hB,0);j2=max(ceiling(tau2*nb[b]),1) sparsityb[b]=max(0.005,(residuosb[j1]-residuosb[j2]))/(2*hB) } curv1=sum(curvb1)/n sparsity=sum(sparsityb^2*lb) h_pil=((RK*tau*(1-tau)*sparsity)/(n*(mu2^2)*curv1))^(1/5) return(h_pil) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/bwRT.R
bwYJ <- function(x,y,tau){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} dpill(x,y)*((tau*(1-tau)/(dnorm(qnorm(tau)))^2)^(1/5)) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/bwYJ.R
llqr <- function(x,y,tau,t,h){ if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|!is.vector(tau)|any(!is.finite(tau))) stop("The parameter 'tau' must be a single number between 0 and 1") if(length(tau)!=1) stop("The parameter 'tau' must be a single number between 0 and 1") if(!is.numeric(tau)|(tau>=1)|(tau<=0)) stop("The parameter 'tau' must be a single number between 0 and 1") if (!is.numeric(x)){stop("'x' must be numeric")} if (!is.numeric(y)){stop("'y' must be numeric")} if(!length(x)>1 | !length(y)>1){stop("'x' and 'y' must be numeric vectors with the same length")} if(length(x)!=length(y)){stop("'x' and 'y' must be numeric vectors with the same length")} iaux=complete.cases(x,y) if(sum(!iaux)!=0){ii=which(iaux==FALSE);x=x[-ii];y=y[-ii];warning("Missing values have been removed from 'x' and 'y'")} nt=length(t);n=length(x); beta0=1;beta1=0;z=c();weight=c() storage.mode(n)="integer"; storage.mode(z)="double"; storage.mode(y)="double" storage.mode(weight)="double"; storage.mode(beta0)="double"; storage.mode(beta1)="double" y.estimated<-numeric(nt) for (i in 1:nt){ z<-x-t[i] aux<-dnorm(z/h);weight=aux/sum(aux) if(max(weight)>=0.9999){ y.estimated[i]<-y[which.max(weight)] }else{ result<-.Fortran("barro", n, z, y, weight, beta0, beta1,tau) y.estimated[i]<-result[[5]] } } return(list("x.values"=t,"y.values"=y.estimated)) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/llqr.R
simpson <- function(fxs,a,b){ np<-length(fxs);h=(b-a)/(np-1) int<-3*(fxs[1]+fxs[np])/8+7*(fxs[2]+fxs[np-1])/6+23*(fxs[3]+fxs[np-2])/24+sum(fxs[4:(np-3)]) return(int*h) }
/scratch/gouwar.j/cran-all/cranData/BwQuant/R/simpson.R
#' Plot a clusterforest object #' #' A function that can be used to plot a clusterforest object, either by returning plots #' with information such as average silhouette width and within cluster siiliarity on the cluster solutions, #' or plots of the medoid trees of each solution. #' #' This function can be used to plot a clusterforest object in two ways. If it's used without specifying a solution, #' then the average silhouette width, and within cluster similarity measures are plotted for each solution. #' If additionally, predictive_plots=TRUE, two more plots are returned, namely a plot showing for each solution the #' predictive accuracy when making predictions based on the medoid trees, and a plot showing for each solution the agreement between #' the class label for each object predicted on the basis of the random forest as a whole versus based on the medoid trees. #' These plots may be helpful in deciding how many clusters are needed to summarize the forest (see Sies & Van Mechelen, 2020). #' #' If the function is used with the clusterforest object and the number of the solution, then the medoid tree(s) #' of that solution are plotted. #' #' @param x A clusterforest object #' @param solution The solution to plot the medoid trees from. If NULL, plots with the average silhouette width, within cluster similiarty #' (and predictive accuracy) per solution are returned. Default = NULL #' @param predictive_plots Indicating whether predictive plots should be returned: A plot showing the predictive accuracy #' when making predictions based on the medoid trees, and a plot of the agreement between the class label #' for each object predicted on the basis of the random forest as a whole versus based on the medoid trees. Default = FALSE. #' @param ... Additional arguments that can be used in generic plot function, or in plot.party. #' @export #' @importFrom cluster pam #' @importFrom graphics axis plot mtext #' @import MASS #' @references \cite{Sies, A. & Van Mechelen I. (2020). C443: An R-package to see a forest for the trees. Journal of Classification.} #' @examples #' require(MASS) #' require(rpart) #'#Function to draw a bootstrap sample from a dataset #'DrawBoots <- function(dataset, i){ #'set.seed(2394 + i) #'Boot <- dataset[sample(1:nrow(dataset), size = nrow(dataset), replace = TRUE),] #'return(Boot) #'} #' #'#Function to grow a tree using rpart on a dataset #'GrowTree <- function(x,y,BootsSample, minsplit = 40, minbucket = 20, maxdepth =3){ #' controlrpart <- rpart.control(minsplit = minsplit, minbucket = minbucket, #' maxdepth = maxdepth, maxsurrogate = 0, maxcompete = 0) #' tree <- rpart(as.formula(paste(noquote(paste(y, "~")), #' noquote(paste(x, collapse="+")))), data = BootsSample, #' control = controlrpart) #' return(tree) #'} #' #'#Use functions to draw 20 boostrapsamples and grow a tree on each sample #'Boots<- lapply(1:10, function(k) DrawBoots(Pima.tr ,k)) #'Trees <- lapply(1:10, function (i) GrowTree(x=c("npreg", "glu", "bp", #' "skin", "bmi", "ped", "age"), y="type", #'Boots[[i]] )) #' #'ClusterForest<- clusterforest(observeddata=Pima.tr,treedata=Boots,trees=Trees,m=1, #' fromclus=1, toclus=5, sameobs=FALSE, no_cores=2) #'plot(ClusterForest) #'plot(ClusterForest,2) plot.clusterforest <- function(x,solution=NULL, predictive_plots=FALSE, ... ) { clusters=x$clusters medoids=x$medoids mds=x$medoidtrees sil=x$avgsilwidth sums=x$withinsim agreement=x$agreement accuracy=x$accuracy if(is.null(solution)){ # Within plot sums[unlist(lapply(sums , is.null))] <- NA M<- unlist(sums) withinplot <- plot(M, main="Within-cluster similarity plot", xlab="Number of clusters", ylab="Within-cluster similarity", xlim=c(1,length(medoids)), xaxt="n") withinplot<-withinplot + axis(1, at = seq(from = 1, to = length(medoids), by = 1)) #### Silhouete plot sil[unlist(lapply(sil , is.null))] <- NA sil<- unlist(sil) silplot <- plot(sil, main = "Silhouette plot", xlab = "Number of clusters", ylab = "Average Silhouette width", xlim=c(1,length(medoids)), xaxt="n",...) silplot <- silplot + axis(1, at = seq(from = 1, to = length(medoids), by = 1)) if(predictive_plots==TRUE){ ## accuracy accuracy[unlist(lapply(accuracy , is.null))] <- NA accuracy<- unlist(accuracy) accuracyplot <- plot(accuracy, main= "Accuracy of predictions for each solution", xlab = "Number of clusters", ylab = "accuracy", xlim = c(1,length(medoids)), xaxt = "n", ylim=c(0.3,1) ) accuracyplot<- accuracyplot + axis(1, at = seq(from = 1, to = length(medoids), by = 1)) + mtext(paste("Accuracy full forest = ", accuracy[length(accuracy)-1], ", proportion most frequent class = ", accuracy[length(accuracy)])) agreement[unlist(lapply(agreement , is.null))] <- NA agreement<- unlist(agreement) agreementplot <- plot(agreement, main= "Agreement in predicted labels by medoids vs forest ", xlab = "Number of clusters", ylab = "Agreement", xlim = c(1,length(medoids)), xaxt = "n", ylim=c(0.3,1) ) agreementplot<- agreementplot + axis(1, at = seq(from = 1, to = length(medoids), by = 1)) } } else{ for(i in 1:solution){ plot(x$medoidtrees[[solution]][[i]],...) } } } #' Print a clusterforest object #' #' A function that can be used to print a clusterforest object. #' #' @param x A clusterforest object #' @param solution The solution to print the medoid trees from. Default = NULL #' @param ... Additional arguments that can be used in the generic print function. #' @export print.clusterforest<- function(x,solution=1,...){ print(unlist(x$medoidtrees[solution], recursive=FALSE,...)) cat("Cluster to which each tree is assigned: " ,unlist(x$clusters[solution], recursive=FALSE)) } #' Summarize a clusterforest object #' #' A function to summarize a clusterforest object. #' @param object A clusterforest object #' @param ... Additional arguments that can be used in the generic summary function. #' @export summary.clusterforest<- function(object,...){ cat("Solutions checked: " , length(object$medoids), "\n") cat("Average Silhouette Width per solution: \n" , unlist(object$avgsilwidth), "\n") cat("Within cluster similarity per solution:\n " , unlist(object$withinsim), "\n") cat("Agreement predicted labels medoids vs forest per solution:\n " , unlist(object$agreement), "\n") } #' Get the cluster assignments for a solution of a clusterforest object #' #' A function to get the cluster assignments for a given solution of a clusterforest object. #' @param clusterforest A clusterforest object #' @param solution The solution for which cluster assignments should be returned. Default = 1 #' @export clusters <- function(clusterforest, solution){ UseMethod("clusters",clusterforest) } #' Get the cluster assignments for a solution of a clusterforest object #' #' A function to get the cluster assignments for a given solution of a clusterforest object. #' @param clusterforest The clusterforest object #' @param solution The solution #' @export clusters.default <- function(clusterforest, solution) { print("Make sure that the clusterforest argument is an object from class clusterforest.") } #' Get the cluster assignments for a solution of a clusterforest object #' #' A function to get the cluster assignments for a given solution of a clusterforest object. #' @param clusterforest The clusterforest object #' @param solution The solution #' @export clusters.clusterforest<- function(clusterforest, solution=1){ return(unlist(clusterforest$clusters[solution], recursive=FALSE)) } #' Get the medoid trees for a solution of a clusterforest object #' #' A function to get the medoid trees for a given solution of a clusterforest object. #' @param clusterforest A clusterforest object #' @param solution The solution for which medoid trees should be returned. Default = 1 #' @export medoidtrees <- function(clusterforest, solution){ UseMethod("medoidtrees",clusterforest) } #' Get the medoid trees for a solution of a clusterforest object #' #' A function to get the medoid trees for a given solution of a clusterforest object. #' #' @param clusterforest A clusterforest object #' @param solution The solution for which medoid trees should be returned. Default = 1 #' @export medoidtrees.default <- function(clusterforest, solution) { print("Make sure that the clusterforest argument is an object from class clusterforest.") } #' Get the medoid trees for a solution of a clusterforest object #' #' A function to get the medoid trees for a given solution of a clusterforest object. #' #' @param clusterforest A clusterforest object #' @param solution The solution for which medoid trees should be returned. Default = 1 #' @export medoidtrees.clusterforest<- function(clusterforest, solution=1){ return(unlist(clusterforest$medoidtrees[solution], recursive=FALSE)) } #' Get the similarity matrix that wast used to create a clusterforest object #' #' A function to get the similarity matrix used to obtain a clusterforest object. #' #' @param clusterforest A clusterforest object #' @export treesimilarities <- function(clusterforest){ UseMethod("medoidtrees",clusterforest) } #' Get the similarity matrix that wast used to create a clusterforest object #' #' A function to get the similarity matrix used to obtain a clusterforest object. #' #' @param clusterforest A clusterforest object #' @export treesimilarities.default <- function(clusterforest) { print("Make sure that the clusterforest argument is an object from class clusterforest.") } #' Get the similarity matrix that wast used to create a clusterforest object #' #' A function to get the similarity matrix used to obtain a clusterforest object. #' #' @param clusterforest A clusterforest object #' @export treesimilarities.clusterforest<- function(clusterforest){ return(clusterforest$treesimilarities) }
/scratch/gouwar.j/cran-all/cranData/C443/R/clusterMethods.R
#' Clustering the classification trees in a forest based on similarities #' #' A function to get insight into a forest of classification trees by clustering the trees in a forest using Partitioning Around Medoids (PAM, Kaufman & Rousseeuw, 2009), based on user provided similarities, #' or based on similarities calculated by the package using a similarity measure chosen by the user (see Sies & Van Mechelen, 2020). #' #' The user should provide the number of clusters that the solution should contain, or a range of numbers that should be explored. #' In the latter case, the resulting clusterforest object will contain clustering results for each solution. #' On this clusterforest object, several methods, such as plot, print and summary, can be used. #' #' @param observeddata The entire observed dataset #' @param treedata A list of dataframes on which the trees are based. Not necessary if the data set is included in the tree object already. #' @param trees A list of trees of class party, classes inheriting from party (e.g., glmtree), classes that can be coerced to party (i.e., rpart, Weka_tree, XMLnode), or a randomForest or ranger object. #' @param simmatrix A similaritymatrix with the similarities between all trees. Should be square, symmetric and have ones on the diagonal. Default=NULL #' @param m Similarity measure that should be used to calculate similarities, in the case that no similarity matrix was provided by the user. Default=NULL. #' m=1 is based on counting common predictors; #' m=2 is based on counting common predictor-split point combinations; #' m=3 is based on common ordered sets of predictor-range part combinations (see Shannon & Banks (1999)); #' m=4 is based on the agreement of partitions implied by leaf membership (Chipman, 1998); #' m=5 is based on the agreement of partitions implied by class labels (Chipman, 1998); #' m=6 is based on the number of predictor occurrences in definitions of leaves with same class label; #' m=7 is based on the number of predictor-split point combinations in definitions of leaves with same class label #' m=8 measures closeness to logical equivalence (applicable in case of binary predictors only) #' @param tol A vector with for each predictor a number that defines the tolerance zone within which two split points of the predictor in question are assumed equal. For example, if the tolerance for predictor X #' is 1, then a split on that predictor in tree A will be assumed equal to a split in tree B as long as the splitpoint in tree B is within the splitpoint in tree A + or - 1. Only applicable for m=1 and m=6. Default=NULL #' @param weight If 1, the number of dissimilar paths in the Shannon and Banks measure (m=2), should be weighted by 1/their length (Otherwise they are weighted equally). Only applicable for m=2. Default=NULL #' @param fromclus The lowest number of clusters for which the PAM algorithm should be run. Default=1. #' @param toclus The highest number of clusters for which the PAM algorithm should be run. Default=1. #' @param treecov A vector/dataframe with the covariate value(s) for each tree in the forest (1 column per covariate) in the case of known #' sources of variation underlying the forest, that should be linked to the clustering solution. #' @param sameobs Are the same observations included in every tree data set? For example, in the case of subsamples or bootstrap samples, the answer is no. Default=FALSE #' @param seed A seed number that should be used for the multi start procedure (based on which initial medoids are assigned). Default=NULL. #' @param no_cores Number of CPU cores used for computations. Default=detectCores(logical=FALSE) #' @return The function returns an object of class clusterforest, with attributes: #' \item{medoids}{the position of the medoid trees in the forest (i.e., which element of the list of partytrees)} #' \item{medoidtrees}{the medoid trees} #' \item{clusters}{The cluster to which each tree in the forest is assigned} #' \item{avgsilwidth}{The average silhouette width for each solution (see Kaufman and Rousseeuw, 2009)} #' \item{accuracy}{For each solution, the accuracy of the predicted class labels based on the medoids.} #' \item{agreement}{For each solution, the agreement between the predicted class label for each observation based on the forest as a whole, and those based on the #' medoids only (see Sies & Van Mechelen,2020)} #' \item{withinsim}{Within cluster similarity for each solution (see Sies & Van Mechelen, 2020)} #' \item{treesimilarities}{Similarity matrix on which clustering was based} #' \item{treecov}{covariate value(s) for each tree in the forest} #' \item{seed}{seed number that was used for the multi start procedure (based on which initial medoids were assigned)} #' @export #' @importFrom cluster pam #' @importFrom partykit nodeids data_party id_node kids_node varid_split split_node index_split breaks_split right_split node_party #' @importFrom stats predict #' @importFrom graphics axis plot #' @importFrom parallel detectCores makeCluster clusterExport parSapply stopCluster #' @importFrom igraph graph_from_incidence_matrix max_bipartite_match #' @importFrom stats complete.cases #' @importFrom methods is #' @importFrom ranger treeInfo #' @importFrom randomForest getTree #' @importFrom foreach foreach %do% %dopar% #' @importFrom doParallel registerDoParallel #' @import MASS #' @import partykit #' @import rpart #' #' @references \cite{Kaufman, L., & Rousseeuw, P. J. (2009). Finding groups in data: an introduction to cluster analysis (Vol. 344). John Wiley & Sons.} #' @references \cite{Sies, A. & Van Mechelen I. (2020). C443: An R-package to see a forest for the trees. Journal of Classification.} #' @references \cite{Shannon, W. D., & Banks, D. (1999). Combining classification trees using MLE. Statistics in medicine, 18(6), 727-740.} #' @references \cite{Chipman, H. A., George, E. I., & McCulloh, R. E. (1998). Making sense of a forest of trees. Computing Science and Statistics, 84-92.} #' @examples #' require(MASS) #' require(ranger) #' require(rpart) #'#Function to draw a bootstrap sample from a dataset #'DrawBoots <- function(dataset, i){ #'set.seed(2394 + i) #'Boot <- dataset[sample(1:nrow(dataset), size = nrow(dataset), replace = TRUE),] #'return(Boot) #'} #' #'#Function to grow a tree using rpart on a dataset #'GrowTree <- function(x,y,BootsSample, minsplit = 40, minbucket = 20, maxdepth =3){ #' controlrpart <- rpart.control(minsplit = minsplit, minbucket = minbucket, maxdepth = maxdepth, #' maxsurrogate = 0, maxcompete = 0) #' tree <- rpart(as.formula(paste(noquote(paste(y, "~")), noquote(paste(x, collapse="+")))), #' data = BootsSample, control = controlrpart) #' return(tree) #'} #' #'#Use functions to draw 10 boostrapsamples and grow a tree on each sample #'Boots<- lapply(1:10, function(k) DrawBoots(Pima.tr ,k)) #'Trees <- lapply(1:10, function (i) GrowTree(x=c("npreg", "glu", "bp", "skin", #' "bmi", "ped", "age"), y="type", Boots[[i]] )) #' #'#Clustering the trees in this forest #'ClusterForest<- clusterforest(observeddata=Pima.tr,treedata=Boots,trees=Trees,m=1, #'fromclus=1, toclus=2, sameobs=FALSE, no_cores=2) #' #'#Example RandomForest #'Pima.tr.ranger <- ranger(type ~ ., data = Pima.tr, keep.inbag = TRUE, num.trees=20, #'max.depth=3) #' #'ClusterForest<- clusterforest(observeddata=Pima.tr,trees=Pima.tr.ranger,m=5, #' fromclus=1, toclus=2, sameobs=FALSE, no_cores=2) clusterforest <- function (observeddata, treedata=NULL, trees, simmatrix=NULL, m=NULL, tol=NULL, weight=NULL,fromclus=1, toclus=1, treecov=NULL, sameobs=FALSE, seed=NULL, no_cores = detectCores(logical=FALSE)){ ############################## Check forest input ##################### ### Some checks whether correct forest information is provided by user if(typeof(trees) != "list" ) { cat("trees must be a list of party tree objects, a list of trees that can be converted to party objects, or a randomforest or ranger object") return(NULL) } if('ranger' %in% class(trees)){ trees<-sapply(1:trees$num.trees, function (k) ranger2party(observeddata, trees, k)) }else if('randomForest' %in% class(trees)){ trees<-sapply(1:trees$ntree, function (k) randomForest2party(observeddata, trees, k)) }else if(!'party' %in% class(trees[[1]])){ tryCatch(trees<- lapply(1:length(trees), function (i) as.party(trees[[i]])), error=function(e){cat("trees must be a list of party tree objects or objects that can be coerced to party trees")}) } if(!is.null(trees[[1]]$data)){ treedt<- lapply(1:length(trees), function(k) trees[[k]]$data) } if(is.null(trees[[1]]$data)){ if(is.null(treedata)){ cat("please submit treedata") return(NULL) } if(typeof(treedata) != "list" || !is(treedata[[1]], "data.frame")) { cat("please submit correct treedata: a list of data frames on which the trees were grown") return(NULL) } if(length(treedata) != length(trees)){ cat("the number of data frames provided must be the same as the number of trees") return(NULL) } treedt=treedata } cl <- makeCluster(no_cores) doParallel::registerDoParallel(cl) ## Turn user provided forest information into object of class forest. forest <- list(partytrees = trees, treedata = treedt, observeddata=observeddata) class(forest) <- append(class(forest), "forest") ######################################################################### ########################## Calculate Similarities ####################### # Check whether at least one of the arguments is not null (user provided # similarity matrix or similarity measure) if (is.null(simmatrix) & is.null(m)){ cat("Either a similarity matrix (simmatrix) should be provided, or a similarity measure (m) should be chosen") return(NULL) } # If user provided similarity matrix, check whether it is a square matrix, # whether it's symmetric and whether it contains ones on the diagonal. if (!is.null(simmatrix)){ #check square if(nrow(simmatrix) != ncol(simmatrix)){ cat("The similarity matrix should be a square matrix") return(NULL) } #check symmetric if(!isSymmetric(simmatrix)){ cat("The similarity matrix should be a symmetric") return(NULL) } #check ones on diagonal if(!sum(diag(simmatrix) == 1) == nrow(simmatrix)){ cat("The similarity matrix should have ones on the diagonal") return(NULL) } if(nrow(simmatrix) != length(trees)){ cat("The similarity matrix should have the same dimensions as the number of trees in the forest") return(NULL) } #turn into treesimilarities object treesimilarities <- simmatrix attr(treesimilarities, "class") <- "treesimilarities" } # If user didn't provide similarity matrix -- calculate using chosen measure if (is.null(simmatrix) & !is.null(m)){ X= unlist(unique(lapply(1:length(forest$partytrees), function (k) attr(forest$partytrees[[k]]$terms, "term.labels")))) Y= unlist(lapply(1:length(forest$partytrees), function(k) colnames(forest$treedata[[k]])[!colnames(forest$treedata[[k]]) %in% X])) if (m == 1){ sim <- M1(forest$observeddata, forest$treedata, Y, X, forest$partytrees, tol=NULL) } if(m == 2){ if(is.null(tol)){ cat("Please provide a tolerance zone for each predictor") return(NULL) } else{ sim <- M1(forest$observeddata, forest$treedata, Y, X, forest$partytrees, tol) } } if (m == 3){ Xclass<- sapply(1:length(X), function(i) is(forest$observeddata[,X[i]], "integer" )|is(forest$observeddata[,X[i]],"numeric" )) if("FALSE" %in% Xclass){ cat("This measure only works on numerical splitvariables (class integer or numeric)") return(NULL) } else{ sim <- M2(forest$observeddata, forest$treedata, Y, forest$partytrees, weight) } } if (m == 4){ sim <- M4(forest$observeddata, forest$treedata, Y, forest$partytrees,sameobs) } if (m == 5){ sim <- M3(forest$observeddata, forest$treedata, Y,forest$partytrees, sameobs) } if (m == 6){ Xclass<- sapply(1:length(X), function(i) is(forest$observeddata[,X[i]] , "integer")|is(forest$observeddata[,X[i]], "numeric" ) ) if("FALSE" %in% Xclass){ cat("This measure only works on numerical splitvariables (class integer or numeric)") return(NULL) } sim <- M6(forest$observeddata, forest$treedata, Y, X, forest$partytrees, tol=NULL) } if(m==7){ Xclass<- sapply(1:length(X), function(i) is(forest$observeddata[,X[i]], "integer" )|is(forest$observeddata[,X[i]], "numeric" ) ) if("FALSE" %in% Xclass){ cat("This measure only works on numerical splitvariables (class integer or numeric)") return(NULL) } if(is.null(tol)){ cat("Please provide a tolerance zone for each predictor") return(NULL) } else{ sim <- M6(forest$observeddata, forest$treedata, Y, X, forest$partytrees, tol) } } if (m == 8){ Xclass<- sapply(1:length(X), function(i) is(forest$fulldata[,X[i]], "factor" ) ) if("FALSE" %in% Xclass){ cat("This measure only works on binary splitvariables (class factor)") return(NULL) }else{ Xbin<- sapply(1:length(X), function(i) levels(forest$fulldata[,X[i]]) > 2 ) if("FALSE" %in% Xbin){ cat("This measure only works on binary splitvariables (class factor)") return(NULL) } else{ sim <- M5(forest$fulldata, forest$treedata, Y, X, forest$partytrees, sameobs) } } } # Turn similarity matrix into treesimilarities object treesimilarities <- sim attr(treesimilarities, "class") <- "treesimilarities" } ################################# End similarities ####################### ############################## Clustering ################################ trees=forest$partytrees treedata=forest$treedata observeddata=forest$observeddata medoids<- list(0) clusters<- list(0) mds<- list(0) sil<- list(0) agreement<-list(0) sums<- list(0) meds<- list(0) obj<- c(0) medsseeds<- list(0) correct<- list(0) pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata, treedata[[i]], Y[i], trees[[i]])) g <- lapply(1:length(trees), function(k) pamtrees[[k]]$predresp) g_matrix <- t(sapply(1:length(trees), function(k) as.numeric(levels(g[[k]])[g[[k]]]))) forpred <- function(k) { levels_g1[which.max( c(sum(g_matrix[,k] == levels_g1[1], na.rm=TRUE), sum(g_matrix[,k] == levels_g1[2], na.rm=TRUE)))] } levels_g1 = levels(g[[1]]) forestpred <- sapply(1:nrow(observeddata), forpred) for (i in fromclus:toclus) { # first do the clustering with BUILD and SWAP phase clustering <- pam(x = 1 - treesimilarities, k = i, diss = TRUE, pamonce=3) if(is.null(seed)){ seed <- round(runif(1,0,100000)) } #Then try 100 random starts + SWAP Phase for (j in 1:100){ set.seed(seed+j) initmedoids = sample(1:nrow(treesimilarities),i) medsseeds[[j]] <- pam(1-treesimilarities,k=i,medoids=initmedoids, diss=TRUE, pamonce=3) obj[j] <- medsseeds[[j]]$objective[2] } # check whether objective function of one of the multistarts is better than the one of the build algorithm # and if so, continue with the best result if( round(min(obj), 10) < round(clustering$objective[2], 10) ){ clustering <- medsseeds[[ which.min(round(min(obj), 10))]] } medoids[[i]] <- clustering$medoids clusters[[i]] <- clustering$clustering meds <- vector(mode = "list", length = i) for(j in 1:i){ meds[[j]] <- trees[[medoids[[i]][j]]] } mds[[i]] <- meds sil[[i]] <- clustering $ silinfo $ avg.width gmed <- g[c(medoids[[i]])] w <- table(clusters[[i]]) #unweighted #medpred1 <- sapply(1:nrow(observeddata), function (k) levels(gmed[[1]])[which.max( c(sum(unlist(lapply(gmed, `[[`, k)) == levels(unlist(lapply(gmed, `[[`, k)))[1], na.rm=TRUE), sum(unlist(lapply(gmed, `[[`, k)) == levels(unlist(lapply(gmed, `[[`, k)))[2], na.rm=TRUE) ) )] ) #gmed_matrix <- as.numeric(levels(gmed[[1]])[gmed[[1]]]) gmed_matrix <- t(sapply(1:i, function(k) as.numeric(levels(gmed[[k]])[gmed[[k]]]))) levels_gmed1 = levels(gmed[[1]]) medpred_weighted <- function(k) { levels_gmed1[which.max( c(sum((gmed_matrix[,k] == levels_gmed1[1]) * w , na.rm=TRUE), sum((gmed_matrix[,k] == levels_gmed1[2]) * w, na.rm=TRUE)))] } #weighted medpred <- sapply(1:nrow(observeddata), medpred_weighted) agreement[[i]] <- mean(forestpred == medpred) correct[[i]] <- mean(forest$observeddata[,Y][1] == medpred) sumw<- numeric(0) for (j in 1:i){ sumw[j] <- sum(treesimilarities[clusters[[i]]==j, medoids[[i]][j]]) } sums[[i]] <- sum(sumw) / dim(treesimilarities)[1] } stopCluster(cl) correct[[toclus + 1]] <- mean(forest$observeddata[,Y][1]== forestpred) correct[[toclus + 2]] <- max(table(forest$observeddata[,Y][1]))/sum(table(forest$observeddata[,Y][1])) value <- list(medoids = medoids, medoidtrees = mds, clusters=clusters, avgsilwidth=sil, agreement=agreement, accuracy=correct, withinsim=sums, treesimilarities=treesimilarities, treecov=treecov, seed=seed) attr(value, "class") <- "clusterforest" return(value) } ################# subfunctions ############################# #Measure 1: number of splitvariables & splitpoints in common/total number of splitvariables largest tree #If tol = null, only splitvariables taken into account M1 <- function (observeddata, treedata, Y, X, trees, tol){ #check whether there are any categorical predictors #if so, this measure with splitvariables can not be used. Xclass<- sapply(1:length(X), function(i) class(observeddata[,X[i]] ) == "integer"|class(observeddata[,X[i]] ) == "numeric") if(!is.null(tol)){ if("FALSE" %in% Xclass){ cat("This measure only works on numerical splitvariables (class integer or numeric)") return(NULL) } } pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) simmatrix <- matrix(c(0), length(trees), length(trees)) splits <- lapply(1:length(trees), function(i) splitv(pamtrees[[i]], tol)) s <- sapply(1:length(trees), function (k) sapply(k:length(trees), function (l) sim(splits1=splits[[k]], splits2=splits[[l]], X=X, tol=tol))) #replace naNs -- trees without splits should have similarity of 1 with each other for (i in 1:length(trees)){ si<- s[[i]] si[is.nan(si)] <- 1 simmatrix[i, c(i:length(trees))] <- si } ind <- lower.tri(simmatrix) #make matrix symmetric simmatrix[ind] <- t(simmatrix)[ind] return(simmatrix) } ### Measure 2: Paths M2<- function(observeddata, treedata, Y, trees, weight){ if (is.null(weight)){weight=0} pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) n <- length(pamtrees) simmatrix <- matrix(c(0), n, n) subs <- sapply (1:n, function (k) returnsubpaths(pamtrees[[k]])) #split up each path into all subpaths dis <- matrix(c(0), length(pamtrees), length(pamtrees)) d <- sapply(1:n, function (s) sapply(s:n, function (j) dissim(subs[[s]], subs[[j]], weight) )) for (i in 1:n){ dis[i, c(i:n)] <- d[[i]] } ind <- lower.tri(dis) #make matrix symmetric dis[ind] <- t(dis)[ind] sim <- 1 - round(dis,4) return(sim) } ####### Measure 3: classification labels agreement M3<- function (observeddata, treedata, Y, trees, sameobs){ pamtrees<- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) n <- length(pamtrees) sim <- matrix(0, length(pamtrees), length(pamtrees)) # if treedata contains same observations as fulldata, then use the training data to evaluate agreement, #otherwise use fulldata if(sameobs==TRUE){ s <- sapply(1:n, function (s) sapply(s:n, function (j) mean(pamtrees[[s]]$predresptrain==pamtrees[[j]]$predresptrain))) }else{ s <- sapply(1:n, function (s) sapply(s:n, function (j) mean(pamtrees[[s]]$predresp==pamtrees[[j]]$predresp))) } for (i in 1:n){ sim[i,c(i:n)] <- s[[i]] } ind <- lower.tri(sim) #make matrix symmetric sim[ind] <- t(sim)[ind] return(sim) } ###### Measure 4: Partition metric ################ M4 <- function (observeddata, treedata, Y, trees, sameobs){ pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) n <- length(trees) if(sameobs==TRUE){ par<- lapply(1:n, function (s) part(treedata[[s]], pamtrees[[s]]$prednodetrain)) }else{ par<- lapply(1:n, function (s) part(treedata[[s]], pamtrees[[s]]$prednode)) } par<- lapply(1:n, function (s) part(treedata[[s]], pamtrees[[s]]$prednode)) si <- sapply(1:n, function (s) sapply (s:n, function (j) metr(par[[s]], par[[j]], treedata[[s]]))) sim <- matrix(0, length(trees), length(trees)) for (i in 1:n){ sim[i, c(i:n)] <- si[[i]] } ind <- lower.tri(sim) #make matrix symmetric sim[ind] <- t(sim)[ind] return(sim) } ### set of splitvariables and splitpoints and the prediction of a leaf M6 <- function (observeddata, treedata, Y, X, trees, tol){ pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) n <- length(pamtrees) s <- lapply(1:n, function (k) splitvsets(pamtrees[[k]]$path1)) s0<- lapply(1:n, function (k) splitvsets(pamtrees[[k]]$path0)) for (k in 1:n){ if(is(treedata[[1]][,Y[1]], "factor") ) { treedata[[k]][,Y[k]] <- as.factor(treedata[[k]][,Y[k]]) }else{ treedata[[k]] = treedata[[k]] }#check whether why is a factor, if not-- make it a factor } best<- lapply(1:n, function(k) (sum(treedata[[k]][,Y[k]] == levels(treedata[[k]][,Y[k]])[2])/nrow(treedata[[k]] ))>0.5) simmatrix <- foreach (k = 1:n, .combine=cbind) %dopar% { sim <- numeric(n) for (l in k:n) { sim[l] <- simsets(paths1=s[[k]], paths2=s[[l]], paths01=s0[[k]], paths02=s0[[l]], X=X, tol=tol, best1=best[[k]], best2=best[[l]]) } sim } for (k in 1:n) { for (l in k:n) { simmatrix[k, l] = simmatrix[l, k] } } simmatrix = matrix(simmatrix, ncol=length(pamtrees), nrow=length(pamtrees), dimnames = NULL) return(simmatrix) } M5 <- function(observeddata, treedata, Y, X, trees, sameobs){ pamtrees <- lapply(1:length(trees), function (i) pamtree(observeddata,treedata[[i]], Y[i],trees[[i]])) n <-length(pamtrees) s <- matrix(0, length(pamtrees), length(pamtrees)) di <- lapply(1:length(pamtrees), function (i) disjnorm(pamtrees[[i]], trees[[i]],observeddata, treedata[[i]] ,X, Y[i], sameobs)) if(sameobs==TRUE){ si <- sapply(1:n, function (i) sapply (i:n, function (j) dis(di[[i]], di[[j]], pamtrees[[i]]$predresptrain, pamtrees[[j]]$predresptrain))) } else{ si <- sapply(1:n, function (i) sapply (i:n, function (j) dis(di[[i]], di[[j]], pamtrees[[i]]$predresp, pamtrees[[j]]$predresp))) } si <- sapply(1:n, function (i) sapply (i:n, function (j) dis(di[[i]], di[[j]], pamtrees[[i]]$predresp, pamtrees[[j]]$predresp))) for (i in 1:n){ s[i, c(i:n)] <- si[[i]] } ind <- lower.tri(s) #make matrix symmetric s[ind] <- t(s)[ind] s<- round(s, digits=3) return(s) } ## Function to turn each tree into a pam tree, containing the set of rules, the predictions on the full dataset and the nodes ## observeddata is the full dataset, treedata is the dataset for the current tree, Y is a vector with the outcome for each tree and tree are the partytrees pamtree <- function(observeddata,treedata,Y,tree){ if(length(tree) > 2){ ## Check whether there was a split paths <- listrulesparty(x=tree) # lists all the paths from root to leave prednode <- predict(tree, newdata = observeddata, type = "node") #predicts node for every row of full data if(!is(treedata[,Y], "factor" )) {treedata[,Y] <- as.factor(treedata[,Y])} #check whether y is a factor, if not-- make it a factor if(!is(observeddata[,Y], "factor") ) {observeddata[,Y] <- as.factor(observeddata[,Y])} #check whether y is a factor, if not-- make it a factor predresp <- predict(tree, newdata= observeddata, type="response") #predicts response for every row of full data predresp <- factor(predresp, levels=c(levels(observeddata[,Y])[1], levels(observeddata[,Y])[2])) predresptrain <- predict(tree, newdata = treedata, type="response") #predicts response for every row of full data predresptrain <- factor(predresptrain, levels=c(levels(observeddata[,Y])[1], levels(observeddata[,Y])[2])) prednodetrain <- predict(tree, newdata = treedata, type = "node") #predicts node for every row of tree data frame <- matrix(c(0), length(unique(prednodetrain)), 2) #create matrix with one row for each node value frame[, 1] <- sort(unique(prednodetrain)) frame[, 2] <- sapply(sort(unique(prednodetrain)), function(k) levels(predresptrain[prednodetrain == k][1])[predresptrain[prednodetrain == k][1]]) # check the predicted response for every node #the paths that lead to a response of the second level of y path1 <- paths[frame[, 2] == levels(observeddata[,Y])[2]] path0 <- paths[frame[, 2] == levels(observeddata[,Y])[1]] paths <- sapply(1:length(paths), function (k) strsplit(paths[k], " & ")) #split rules with multiple conditions in substrings path1 <- sapply(1:length(path1), function (k) strsplit(path1[k], " & ")) path0 <- sapply(1:length(path0), function (k) strsplit(path0[k], " & ")) #if there was no split }else{ paths<- NULL path1<-NULL path0<- NULL tree <- list(numeric(0), numeric(0)) if(is(treedata[,Y], "factor") ) {treedata[,Y] <- as.factor(treedata[,Y])} y <- treedata[, Y] #check what class is most prevalent if(sum(y == levels(y)[1], na.rm=T) > sum(y == levels(y)[2], na.rm=T)){ g1 <- levels(y)[1] } else{ g1<- levels(y)[2] } predresp <- rep(g1, length(y)) predresp<- factor(predresp, levels=levels(treedata[,Y])) prednode <- rep(1, length(y)) prednodetrain <- rep(1, length(y)) predresptrain<- rep(g1, length(y)) } value <- list(pamtree = paths, path0=path0, path1= path1, prednode = prednode, predresp=predresp,prednodetrain=prednodetrain, predresptrain=predresptrain) attr(value, "class") <- "pamtree" return(value) } #### grow party tree (for turning ranger/randomforest tree into partytree) grow.party.tree <- function(party.tree, ranger.tree, data, factor.terms.index, currentNodeNumber) { node <- ranger.tree[ranger.tree$nodeID == currentNodeNumber, ] factor.terms.index.left <- factor.terms.index factor.terms.index.right <- factor.terms.index # Create individual node if (node$terminal == TRUE) { newNode <- list(id = node$nodeID) } else { data.is.factor <- is.factor(data[[node$splitvarName]]) data.is.ordered <- is.ordered(data[[node$splitvarName]]) if (data.is.factor && !data.is.ordered) { index <- factor.terms.index[[node$splitvarName]] index[as.integer(unlist(strsplit(node$splitval, ',')))] = 2L newNode <- list(id = node$nodeID, split = partysplit( varid = as.integer(node$splitvarID + 1), index = index ), kids = c(as.integer(node$leftChild), as.integer(node$rightChild))) factor.terms.index.left[[node$splitvarName]] <- replace(index, index==2L, NA) factor.terms.index.right[[node$splitvarName]] <- replace(replace(index, index==1L, NA), index==2L, 1L) } else { newNode <- list(id = node$nodeID, split = partysplit(varid = as.integer(node$splitvarID + 1), breaks = as.numeric(node$splitval)), kids = c(as.integer(node$leftChild), as.integer(node$rightChild))) } } # Traverse tree recursively if (node$terminal == FALSE) { leftChildren <- grow.party.tree(party.tree, ranger.tree, data, factor.terms.index.left, node$leftChild) rightChildren <- grow.party.tree(party.tree, ranger.tree, data, factor.terms.index.right, node$rightChild) party.tree <- c(party.tree, leftChildren, rightChildren) } # Add newly created node to list of nodes party.tree <- c(party.tree, list(newNode)) party.tree } generic2party <- function(data, generic.tree, inbag, formula, weights) { response <- all.vars(formula)[1] terms <- terms(formula, data = data) factor.terms <- all.vars(terms)[-1] factor.terms.index <- list() for(factor.term in factor.terms) { factor.terms.index[[factor.term]] <- rep(1L, length(levels(data[[factor.term]]))) } data <- data[complete.cases(data), c(all.vars(terms)[-1], response)] data <- as.data.frame(lapply(data, rep, inbag)) if (is.null(weights)) { weights <- rep(1L, nrow(data)) } nodelist = list() nodelist <- grow.party.tree(nodelist, generic.tree, data, factor.terms.index, 0) nodes <- as.partynode(nodelist) fitted <- fitted_node(nodes, data = data) tree <- party(nodes, data = data, fitted = data.frame("(fitted)" = fitted, "(response)" = data[[response]], "(weights)" = weights, check.names = FALSE), terms = terms(formula, data = data) ) as.constparty(tree) } ranger2party <- function(data, ranger.forest, treeNumber, weights = NULL) { if (!exists("inbag.counts", where=ranger.forest)) { stop("Run ranger with the keep.inbag=T parameter") } ranger.tree <- treeInfo(ranger.forest, tree = treeNumber) formula <- formula(ranger.forest$call[[2]]) inbag <- ranger.forest$inbag.counts[[treeNumber]] generic2party(data, ranger.tree, inbag, formula, weights) } randomForest2party <- function(data, randomForest.forest, treeNumber, weights = NULL) { if (!exists("inbag", where=randomForest.forest)) { stop("Run randomForest with the keep.inbag=T parameter") } randomForest.tree.without.labels <- data.frame(getTree(randomForest.forest, k = treeNumber, labelVar = F)) randomForest.tree.with.labels <- data.frame(getTree(randomForest.forest, k = treeNumber, labelVar = T)) # Convert randomForest format to Ranger format colnames(randomForest.tree.with.labels) <- c("leftChild", "rightChild", "splitvarName", "splitval", "terminal", "prediction") colnames(randomForest.tree.without.labels) <- c("leftChild", "rightChild", "splitvarID", "splitval", "terminal", "prediction") nodeID <- 0:(nrow(randomForest.tree.with.labels) -1) leftChild <- randomForest.tree.with.labels$leftChild - 1L rightChild <- randomForest.tree.with.labels$rightChild - 1L splitvarID <- randomForest.tree.without.labels$splitvarID - 1L splitvarName <- as.character(randomForest.tree.with.labels$splitvarName) splitval <- randomForest.tree.with.labels$splitval terminal <- ifelse(randomForest.tree.without.labels$terminal == -1, TRUE, FALSE) is.na(rightChild) <- is.na(splitvarID) <- is.na(leftChild) <- is.na(splitval) <- terminal prediction <- randomForest.tree.with.labels$prediction generic.tree <- data.frame(nodeID, leftChild, rightChild, splitvarID, splitvarName, splitval, terminal, prediction) idx.unordered <- apply(array(splitvarName), 1, function(x) { !("ordered" %in% class(data[[x]]) || "numeric" %in% class(data[[x]]))}) idx.unordered[terminal] <- FALSE if (any(idx.unordered)) { if (any(generic.tree$splitval[idx.unordered] > (2^31 - 1))) { warning("Unordered splitting levels can only be shown for up to 31 levels.") generic.tree$splitval[idx.unordered] <- NA } else { generic.tree$splitval[idx.unordered] <- sapply(generic.tree$splitval[idx.unordered], function(x) { paste(which(as.logical(intToBits(2^31-1-x))), collapse = ",") }) } } formula <- formula(randomForest.forest$call[[2]]) inbag <- randomForest.forest$inbag[, treeNumber] generic2party(data, generic.tree, inbag, formula, weights) } ### partykit:::.list.rules.party listrulesparty <- function (x, i = NULL, ...) { if (is.null(i)) i <- nodeids(x, terminal = TRUE) if (length(i) > 1) { ret <- sapply(i, listrulesparty, x = x) names(ret) <- if (is.character(i)) i else names(x)[i] return(ret) } if (is.character(i) && !is.null(names(x))) i <- which(names(x) %in% i) stopifnot(length(i) == 1 & is.numeric(i)) stopifnot(i <= length(x) & i >= 1) i <- as.integer(i) dat <- data_party(x, i) if (!is.null(x$fitted)) { findx <- which("(fitted)" == names(dat))[1] fit <- dat[, findx:ncol(dat), drop = FALSE] dat <- dat[, -(findx:ncol(dat)), drop = FALSE] if (ncol(dat) == 0) dat <- x$data } else { fit <- NULL dat <- x$data } rule <- c() recFun <- function(node) { if (id_node(node) == i) return(NULL) kid <- sapply(kids_node(node), id_node) whichkid <- max(which(kid <= i)) split <- split_node(node) ivar <- varid_split(split) svar <- names(dat)[ivar] index <- index_split(split) if (is.factor(dat[, svar])) { if (is.null(index)) index <- ((1:nlevels(dat[, svar])) > breaks_split(split)) + 1 slevels <- levels(dat[, svar])[index == whichkid] srule <- paste(svar, " %in% c(\"", paste(slevels, collapse = "\", \"", sep = ""), "\")", sep = "") } else { if (is.null(index)) index <- 1:length(kid) breaks <- cbind(c(-Inf, breaks_split(split)), c(breaks_split(split), Inf)) sbreak <- breaks[index == whichkid, ] right <- right_split(split) srule <- c() if (is.finite(sbreak[1])) srule <- c(srule, paste(svar, ifelse(right, ">", ">="), sbreak[1])) if (is.finite(sbreak[2])) srule <- c(srule, paste(svar, ifelse(right, "<=", "<"), sbreak[2])) srule <- paste(srule, collapse = " & ") } rule <<- c(rule, srule) return(recFun(node[[whichkid]])) } node <- recFun(node_party(x)) paste(rule, collapse = " & ") } #### Subfunctions M1 ###### #get splitvariables and splitpoints splitv <- function (tree, tol){ paths <- tree$pamtree # check whether there was a split if(length(paths) > 0){ pathsw <- paths leaves <- length(paths) l <- sapply(1:leaves, function (k) length(paths[[k]])) spaths <- lapply(1:leaves, function(k) sub(" <=.", ':', paths[[k]])) spaths <- lapply(1:leaves, function(k) sub(" <.", ':', spaths[[k]])) spaths <- lapply(1:leaves, function(k) sub(" >=.", ':', spaths[[k]])) spaths <- lapply(1:leaves, function(k) sub(" >.", ':', spaths[[k]])) spaths <- lapply(1:leaves, function(k) sub(" %in%.*", '', spaths[[k]])) splitvarsa <- unique(unlist(spaths)) splitvars <- sub(":.*", '', splitvarsa) if(!is.null(tol)){ splitvarsa <- sub(".*:", '', splitvarsa) splitvarsa <- as.numeric(splitvarsa) } else{ splitvarsa<- NULL } nsplitvar1 <- length(splitvars) # number of splits in tree i }else{ splitvars <- NULL splitvarsa <- NULL nsplitvar1 <- 0 } return(list(splitvars = splitvars, splitpoints = splitvarsa, nsplits = nsplitvar1)) } #calculate jaccard index sim<- function (splits1, splits2, X, tol){ ### Only predictors if(is.null(tol)){ common1 <- length(splits1$splitvars[pmatch(splits1$splitvars, splits2$splitvars, nomatch = 0)]) #pmatch: no doubles total1 <- splits1$nsplits + splits2$nsplits - common1 Jaccard <- common1 / total1 } ### Also splitpoints if(!is.null(tol)){ # Create matrix That shows for each variable of the splits1 whether it is equal to each variable in splits2 same <- matrix(c(0), length(splits1[[1]]), length(splits2[[1]])) if(length(same > 0)){ for (i in 1:length(splits1[[1]])){ for(j in 1:length(splits2[[1]])){ same[i, j] <- splits1[[1]][[i]] == splits2[[1]][[j]] } } # For those variables that are the same in both trees, put splitpoints of tree 2 in the matrix splitpoints <- matrix(c(rep(splits2$splitpoints, nrow(same))), nrow(same), ncol(same), byrow=T) s <- c(splitpoints) sm <- c(same) s[sm == 0] <- NA splitpoints <- matrix(s, nrow(same), ncol(same), byrow=F) # Get the right tolerance for each variables in splits1 tsa <- splits1$splitvars t <- match(tsa,X) correct<- matrix(c(0), nrow(same), ncol(same)) # Look whether splitpoint of splits2 is within tolerance zone of splits1 for (i in 1:length(splits1$splitvars)){ d <- as.numeric(tol[t[i]]) correct[i, ] <- findInterval(splitpoints[i, ], c(as.numeric(splits1$splitpoints[i]) - d, as.numeric(splits1[[2]][i]) + d) ) == 1 } correct[is.na(correct)] <- 0 g <- graph_from_incidence_matrix(correct) common<- max_bipartite_match(g)$matching_size total<- splits1[[3]] + splits2[[3]] - common Jaccard<- common / total } else{ if(length(splits1[[1]]) == 0 & length(splits2[[1]]) == 0){ Jaccard <- 1 } else{Jaccard <- 0} } } return(Jaccard) } # function returns all the subpaths, takes away splitpoints and removes direction of last split of each path. # then returns only unique subpaths ## subfunction M2 returnsubpaths <- function(tree){ paths <- tree$pamtree if(length(paths) > 0){ leaves<- length(paths) l<- sapply(1:leaves, function (k) length(paths[[k]])) lastpaths <- lapply(1:leaves, function(k) sub("<.*", '', paths[[k]][l[k]])) # remove direction and splitpoint last attribut of path lastpaths <- lapply(1:leaves, function(k) sub(">.*", '', lastpaths[[k]])) # place it back in paths for(j in 1:leaves){ paths[[j]][l[j]]<- lastpaths[j] } paths <- lapply(1:leaves, function(k) sub("<.*", '-', paths[[k]])) #replace splitpoints with - or + paths <- lapply(1:leaves, function(k) sub(">.*", '+', paths[[k]])) upaths <- unique(paths) subpaths <- list(0) for(j in 1:length(upaths)){ d<- list(0) d[[1]] <- upaths[[j]] if(length(upaths[[j]]) > 1){ #check whether more than one split (otherwise no subpaths and d has just one element) for (i in 2:length(upaths[[j]])){ d[[i]] <- d[[i - 1]][- length(d[[i - 1]])] #Split each path up into all subpaths } } subpaths[[j]] <- d } subpaths <- unlist(subpaths, recursive=FALSE) lastsubpaths <- lapply(1:length(subpaths), function(k) gsub("[[:punct:]]", '', subpaths[[k]][length(subpaths[[k]])])) # remove punctuation from last attribute of each subpath for(j in 1:length(subpaths)){ subpaths[[j]][length(subpaths[[j]])] <- lastsubpaths[j] } subpaths<- unique(subpaths) }else{subpaths<- NULL} return(subpaths) } #calculates number of distinct subpaths in two sets of subpaths dissim<- function (subs1,subs2,weights){ l <- sapply(1:length(subs1), function (k) length(subs1[[k]])) # length of each subpath of each path d <- c(0) l2 <- sapply(1:length(subs2), function (k) length(subs2[[k]])) #length of each subpath of each path for (k in 1:(max(max(l), max(l2)))){ #iterate until longest subpath of the two trees a <- as.numeric(subs1[l == k] %in% subs2[l2 == k]) # number of subpaths of tree i in j b <- as.numeric(subs2[l2 == k] %in% subs1[l == k]) # number of subpaths of tree j in i if(weights==0){d[k] <- (length(a) + length(b)) - (sum(a) + sum(b))} # if weighs 0 every dissimilar subpath weighted equally if(weights==1){d[k] <- 1 / k * ((length(a) + length(b)) - (sum(a) + sum(b)))} # if weights 1 every dissimilar subpath weighted by 1/length subpath } if(weights == 0){dis <- sum(d) / (length(l[l > 0]) + length(l2[l2 > 0]))} # divide #dissimilar subpaths by maximum dissimilar subpaths wl <- sum(1 / l) wl2 <- sum(1 / l2) wl[is.infinite(wl)] <- 0 wl2[is.infinite(wl2)] <- 0 if(weights == 1){dis <- sum(d) / (wl+wl2)} #divide weighted #dissimilar subpaths by maximum weighted # dissimilar subpaths dis[is.na(dis)] <- 0 return(dis) } ### Subfunctions M4 part <- function (data, tree1){ t1 <- matrix(c(0), nrow(data), nrow(data)) for(i in 1:nrow(data) - 1){ t1[i,] <- tree1[i] == tree1 # for each observation with each other observation: Same leaf? } return(t1) } metr<- function (t1, t2, data){ ind <- upper.tri(t1) part<- sum(t1[ind] == t2[ind]) / choose(nrow(data), 2) return(part) } #### Subfunctions M6 # function to get back the splitvariables and slit points for each path splitvsets <- function (paths){ splitvars<- list(length(paths)) splitvarsa<- list(length(paths)) nsplitvar1<- list(length(paths)) #paths <- trees[[1]] if(length(paths) > 0){ for(i in 1:length(paths)){ pathsw <- paths[[i]] # splitvarsa1 <- unique(unlist(spaths)) splitvarsa1<- unique(unlist(pathsw)) splitvars1<- sub("\\-.*$", '', splitvarsa1) splitvars[[i]]<- sub("\\d.*$", '', splitvars1) splitvarsa1 <- sub(".*=", '', pathsw) splitvarsa1 <- sub(".*>", '', splitvarsa1) splitvarsa1 <- sub(".*<", '', splitvarsa1) splitvarsa[[i]] <- c(as.numeric(splitvarsa1)) } }else{ splitvars <- NULL splitvarsa <- NULL #nsplitvar1 <- 0 } return(list(splitvars = splitvars, splitvarsa = splitvarsa)) } simsets<- function (paths1, paths2, paths01, paths02, X, tol,best1,best2){ if(length(paths1[[1]]) > 0 & length(paths2[[1]]) > 0) { if(is.null(tol)){ if(identical(paths1$splitvars, paths2$splitvars) & identical(paths01$splitvars, paths02$splitvars)){ sim <- 1 } else { J1 <- JaccardPaths(paths1, paths2, tol,X) J0 <- JaccardPaths(paths01, paths02,tol,X) sim <- (J1+J0)/2 } } else { J1 <- JaccardPaths(paths1, paths2, tol,X) J0 <- JaccardPaths(paths01, paths02,tol,X) sim <- (J1+J0)/2 } } else{ if(length(paths1[[1]]) == 0 & length(paths2[[1]]) == 0){ if(best1==best2){ sim <- 1 } else{sim <- 0} } else{sim <- 0} } return(sim) } JaccardPaths <- function(paths1, paths2, tol,X){ Jaccard<- matrix(c(0),length(paths1[[1]]),length(paths2[[1]])) ## for each path in tree 1 and for each path in tree 2 for (i in 1:length(paths1[[1]])){ for (j in 1:length(paths2[[1]])){ same <- matrix(c(0), length(paths1[[1]][[i]]), length(paths2[[1]][[j]])) # for the path check whether each variable has a match in the other path if(length(same > 0)){ for (k in 1:length(paths1[[1]][[i]])){ for(m in 1:length(paths2[[1]][[j]])){ same[k, m] <- paths1[[1]][[i]][[k]] == paths2[[1]][[j]][[m]] } } ### In case splitpoints are taken into account if (!is.null(tol)){ # For those variables that are the same in both trees, put splitpoints in the matrix splitpoints <- matrix(c(rep(paths2[[2]][[j]], nrow(same))), nrow(same), ncol(same), byrow=T) s <- c(splitpoints) sm <- c(same) s[sm == 0] <- NA splitpoints <- matrix(s, nrow(same), ncol(same), byrow=F) tsa<- paths1[[1]][[i]] tsa<- sub(" <=.", '', tsa) tsa<- sub(" <.", '', tsa) tsa<- sub(" >=.", '', tsa) tsa<- sub(" >.", '', tsa) t <- match(tsa,X) correct<- matrix(c(0), nrow(same), ncol(same)) # Look whether splitpoint of splits2 is within tolerance zone of splits1 for (h in 1:length(paths1[[1]][[i]])){ d <- as.numeric(tol[t[h]]) correct[h, ] <- findInterval(splitpoints[h, ], c(as.numeric(paths1[[2]][[i]][h]) - d, as.numeric(paths1[[2]][[i]][h]) + d) ) == 1 } }else{ correct<- same } correct[is.na(correct)] <- 0 g <- graph_from_incidence_matrix(correct) common<- max_bipartite_match(g)$matching_size total<- length(paths1[[1]][[i]]) + length(paths2[[2]][[j]]) - common Jaccard[i,j]<- common / total }else{ if(length(paths1[[1]][[i]]) == 0 & length(paths2[[1]][[j]]) == 0){ Jaccard[i,j] <- 1 } else{Jaccard[i,j] <- 0} } } } g <- graph_from_incidence_matrix(Jaccard, weighted=TRUE) common<- max_bipartite_match(g)$matching_weight sim<- common/min(length(paths1[[1]]), length(paths2[[2]])) return(sim) } ##### sUBFUNCTIONS M5 ############################################################## dis <- function (tree1,tree2, predresp1, predresp2){ if( ! is.null(tree1) & ! is.null(tree2)){ common <- matrix(c(0), length(tree1), length(tree2)) # how many matches in each rule for(i in 1:length(tree1)){ for(j in 1: length(tree2)){ common[i, j] <- length(unlist(tree1[i])[pmatch(unlist(tree1[i]), unlist(tree2[j]), nomatch = 0)]) } } rows <- apply(common, 2, max) cols <- apply(common, 1, max) #common rules are those that have all their elementary conjuncts in common #calculate jaccard common <- min(sum(rows == length(tree1[[1]])), sum(cols == length(tree1[[1]]))) total <- length(tree1) + length(tree2) - common s <- common / total }else if(is.null(tree1) & ! is.null(tree2) | ! is.null(tree1) & is.null(tree2)){ s <- 0 }else{ #check what prediction was made by the trivial rule if(predresp1[1] == predresp2[1]){ s <- 1 } else{s<-0} } return(s) } disjnorm <- function (pamtree, tree,observeddata, treedata,X, Y, sameobs){ path <- pamtree$path1 #Paths that lead to leaf with predicted class 1 if(sameobs==TRUE){ predresp<- pamtree$predresptrain }else{ predresp<- pamtree$predresp } if(length(path) > 0){ leaves <- length(path) # number of leaves l <- sapply(1:leaves, function (k) length(path[[k]])) # for each path number of elements levels <- matrix(c(0), length(X), 2) for (i in 1:length(X)){ levels[i, ] <- levels(observeddata[, X[i]]) } levels <- cbind(X, levels) npaths <-path spaths <- lapply(1:leaves, function(k) sapply(1:l[k], function (j) sub(" %in%.*", '', path[[k]][j]))) ## for(i in 1:length(X)){ npaths <- lapply(1:leaves, function(k) sapply(1:l[k], function (j) sub(paste(levels[i,2]), '-', npaths[[k]][j]))) } for(i in 1:length(X)){ npaths <- lapply(1:leaves, function(k) sapply(1:l[k], function (j) sub(paste(levels[i,3]), '+', npaths[[k]][j]))) } for(i in 1:length(X)){ signs <- lapply(1:leaves, function (i) gsub("[^-+]+", "", npaths[[i]])) } paths <- lapply(1:leaves, function (i) paste(spaths[[i]], signs[[i]])) b <- sapply (1:length(spaths), function (s) X[pmatch(X, spaths[[s]], nomatch = 0) == 0]) if(length(b) > 0){ if(is.list(b)){ for(i in 1:length(b)){ now <- b[[i]] nowe <- now[now != ""] nowe <- sort(nowe) # Add those variables in plus and minus form to those paths if(length(nowe > 0)){ a <- matrix(c(0), 2, length(nowe)) a[1, ] <- c(paste(nowe, '+', sep=' ')) a[2, ] <- c(paste(nowe, '-', sep=' ')) grid <- do.call(expand.grid, split(a, col(a))) for (k in 1:nrow(grid)){ paths <- c(paths, list(unlist(c(paths[i], paste(unlist(grid[k,c(1:ncol(grid))])))))) } } else {paths <- paths} } }else{ now <- b nowe <- now[now != ""] nowe <- sort(nowe) # Add those variables in plus and minus form to those paths if(length(nowe > 0)){ a <- matrix(c(0), 2, length(nowe)) a[1, ] <- c(paste(nowe, '+', sep=' ')) a[2, ] <- c(paste(nowe, '-', sep=' ')) grid <- do.call(expand.grid, split(a, col(a))) paths <- lapply (1:nrow(grid), function (k) c(paths[[1]], paste(unlist(grid[k,c(1:ncol(grid))])))) } else { paths <- paths } } } else{paths <- paths} l <- sapply(1:length(paths), function (k) length(paths[[k]])) paths <- unique(paths) paths <- paths[lengths(paths) == max(l)] }else{ ### if tree only root if(predresp[1] == levels(observeddata[,Y])[1]){ paths <- NULL }else{ a <- matrix(c(0), 2, length(X)) a[1, ] <- c(paste(X, '+', sep=' ')) a[2, ] <- c(paste(X, '-', sep=' ')) grid <- do.call(expand.grid, split(a, col(a))) paths <- lapply (1:nrow(grid), function (k) paste(unlist(grid[k, c(1:ncol(grid))]))) } } return(paths) }
/scratch/gouwar.j/cran-all/cranData/C443/R/clusterforest.R
#' Drug consumption data set #' #' A dataset collected by Fehrman et al. (2017), freely available on the UCI Machine Learning Repository (Lichman, 2013) containing records of 1885 respondents regarding their use of 18 types of drugs, and their measurements on 12 predictors. #' #' All predictors were originally categorical and were quantified by Fehrman et al. (2017). The meaning of the values can be found on #' \url{https://archive.ics.uci.edu/dataset/373/drug+consumption+quantified}. #' The original response categories for each drug were: never used the drug, used it over a decade ago, or in the last decade, year, month, week, or day. #' We transformed these into binary response categories, where 0 (non-user) consists of the categories never used the drug and used it over a decade ago and 1 (user) consists of all other categories. #' @format A data frame with 1185 rows and 32 variables: #' \describe{ #' \item{ID}{Respondent ID} #' \item{Age}{Age of respondent} #' \item{Gender}{Gender of respondent, where 0.48 denotes female and -0.48 denotes male} #' \item{Edu}{Level of education of participant} #' \item{Country}{Country of current residence of participant} #' \item{Ethn}{Ethnicity of participant} #' \item{Neuro}{NEO-FFI-R Neuroticism score} #' \item{Extr}{NEO-FFI-R Extraversion score} #' \item{Open}{NEO-FFI-R Openness to experience score} #' \item{Agree}{NEO-FFI-R Agreeableness score} #' \item{Consc}{NEO-FFI-R Conscientiousness score} #' \item{Impul}{Impulsiveness score measured by BIS-11} #' \item{Sensat}{Sensation seeking score measured by ImpSS} #' \item{Alc}{Alcohol user (1) or non-user (0)} #' \item{Amphet}{Amphetamine user (1) or non-user (0)} #' \item{Amyl}{Amyl nitrite user (1) or non-user (0)} #' \item{Benzos}{Benzodiazepine user (1) or non-user (0)} #' \item{Caff}{Caffeine user (1) or non-user (0)} #' \item{Can}{Cannabis user (1) or non-user (0)} #' \item{Choco}{Chocolate user (1) or non-user (0)} #' \item{Coke}{Coke user (1) or non-user (0)} #' \item{Crack}{Crack user (1) or non-user (0)} #' \item{Ecst}{Ecstacy user (1) or non-user (0)} #' \item{Her}{Heroin user (1) or non-user (0)} #' \item{Ket}{Ketamine user (1) or non-user (0)} #' \item{Leghighs}{Legal Highs user (1) or non-user (0)} #' \item{LSD}{LSD user (1) or non-user (0)} #' \item{Meth}{Methadone user (1) or non-user (0)} #' \item{Mush}{Magical Mushroom user (1) or non-user (0)} #' \item{Nico}{Nicotine user (1) or non-user (0)} #' \item{Semeron}{Semeron user (1) or non-user (0), fictitious drug to identify over-claimers} #' \item{VSA}{volatile substance abuse user(1) or non-user (0)} #' #' } #' @source \url{https://archive.ics.uci.edu/dataset/373/drug+consumption+quantified} #' @references \cite{Fehrman, E., Muhammad, A. K., Mirkes, E. M., Egan, V., & Gorban, A. N. (2017). The Five Factor Model of personality and evaluation of drug consumption risk. In Data Science (pp. 231-242). Springer, Cham.} #' \cite{Lichman, M. (2013). UCI machine learning repository.} "drugs"
/scratch/gouwar.j/cran-all/cranData/C443/R/drugs.R
#' Mapping the tree clustering solution to a known source of variation underlying the forest #' #' A function that can be used to get insight into a clusterforest solution, in the case that there are known #' sources of variation underlying the forest. These known sources of variation must be included in the clusterforest object #' (and thus must be defined when running the clusterforest function) #' In case of a categorical covariate, it visualizes the number of trees from each value of the covariate that belong to each cluster. #' In case of a continuous covariate, it returns the mean and standard deviation of the covariate in each cluster. #' @param clusterforest The clusterforest object, indluding the treecov attribute. #' @param solution The solution #' @return \item{multiplot}{In case of categorical covariate, for each value of the covariate, a bar plot with the number of trees that belong to each cluster} #' \item{heatmap}{In case of a categorical covariate, a heatmap with for each value of the covariate, the number of trees that belong to each cluster} #' \item{clustermeans}{In case of a continuous covariate, the mean of the covariate in each cluster} #' \item{clusterstds}{In case of a continuous covariate, the standard deviation of the covariate in each cluster} #' @export #' @importFrom plyr mapvalues #' @importFrom methods is #' @examples #' require(rpart) #' data_Amphet <-drugs[,c ("Amphet","Age", "Gender", "Edu", "Neuro", "Extr", "Open", "Agree", #' "Consc", "Impul","Sensat")] #' data_cocaine <-drugs[,c ("Coke","Age", "Gender", "Edu", "Neuro", "Extr", "Open", "Agree", #' "Consc", "Impul","Sensat")] #' #' #'#Function to draw a bootstrap sample from a dataset #'DrawBoots <- function(dataset, i){ #'set.seed(2394 + i) #'Boot <- dataset[sample(1:nrow(dataset), size = nrow(dataset), replace = TRUE),] #'return(Boot) #'} #' #'#Function to grow a tree using rpart on a dataset #'GrowTree <- function(x,y,BootsSample, minsplit = 40, minbucket = 20, maxdepth =3){ #' #' controlrpart <- rpart.control(minsplit = minsplit, minbucket = minbucket, maxdepth = maxdepth, #' maxsurrogate = 0, maxcompete = 0) #' tree <- rpart(as.formula(paste(noquote(paste(y, "~")), noquote(paste(x, collapse="+")))), #' data = BootsSample, control = controlrpart) #' return(tree) #'} #' #' #Draw bootstrap samples and grow trees #' BootsA<- lapply(1:5, function(k) DrawBoots(data_Amphet,k)) #' BootsC<- lapply(1:5, function(k) DrawBoots(data_cocaine,k)) #' Boots = c(BootsA,BootsC) #' #' TreesA <- lapply(1:5, function (i) GrowTree(x=c ("Age", "Gender", "Edu", "Neuro", #' "Extr", "Open", "Agree","Consc", "Impul","Sensat"), y="Amphet", BootsA[[i]] )) #' TreesC <- lapply(1:5, function (i) GrowTree(x=c ( "Age", "Gender", "Edu", "Neuro", #' "Extr", "Open", "Agree", "Consc", "Impul","Sensat"), y="Coke", BootsC[[i]] )) #' Trees=c(TreesA,TreesC) #' #'#Cluster the trees #'ClusterForest<- clusterforest(observeddata=drugs,treedata=Boots,trees=Trees,m=1, #'fromclus=2, toclus=2, treecov=rep(c("Amphet","Coke"),each=5), sameobs=FALSE, no_cores=2) #' #' #Link cluster result to known source of variation #' treesource(ClusterForest, 2) treesource <- function(clusterforest, solution){ UseMethod("treesource",clusterforest) } #' Mapping the tree clustering solution to a known source of variation underlying the forest #' #' A function that can be used to get insight into a clusterforest solution, in the case that there is a known #' source of variation underlying the forest. #' It visualizes the number of trees from each source that belong to each cluster. #' @param clusterforest The clusterforest object #' @param solution The solution #' @export treesource.default <- function(clusterforest, solution) { print("Make sure that the clustering argument is an object from class clusterforest.") } #' Mapping the tree clustering solution to a known source of variation underlying the forest #' #' A function that can be used to get insight into a clusterforest solution, in the case that there is a known #' source of variation underlying the forest. #' It visualizes the number of trees from each source that belong to each cluster. #' @param clusterforest The clusterforest object #' @param solution The solution #' @export #' @importFrom plyr mapvalues #' @importFrom RColorBrewer brewer.pal #' @importFrom grDevices colorRampPalette #' @importFrom gridExtra grid.arrange #' @importFrom ggplot2 ggplot aes geom_tile scale_fill_gradientn geom_bar ggtitle ylim #' @importFrom stats frequency runif sd #' treesource.clusterforest <- function(clusterforest, solution) { if(is.null(clusterforest$treecov)){ cat('The clusterforest object should contain a treecov attrbiute. Make sure you provide it as an argument when using the clusterforest() function') return(NULL) } #check if matrix first if(is.null(ncol(clusterforest$treecov))){ ncov=1} else{ ncov= ncol(clusterforest$treecov) } mean_c = list(0) sd_c=list(0) heatmaps=list(0) multiplots=list(0) for( k in 1: ncov){ if(is.null(ncol(clusterforest$treecov))){ covx<- clusterforest$treecov} else{ covx<- clusterforest$treecov[,k] } if(is((covx),"numeric")|is(covx, "integer")){ clustering <- clusterforest$clusters[[solution]] clevels=sort(unique(clustering)) mean_c[[k]]<- sapply(1:length(unique(clustering)), function (i) mean(covx[clustering==clevels[i]])) sd_c[[k]]<- sapply(1:length(unique(clustering)), function (i) sd(covx[clustering==clevels[i]]) ) } if(!is(covx, "numeric") & !is(covx, "integer")){ Clusters <- Sources <- freq <- cluster<- NULL source <- covx treesource<- as.numeric(mapvalues(source, from=c(unique(source)), to=seq(1,length(unique(source))))) clustering <- clusterforest$clusters[[solution]] Real <- matrix(c(0), length(unique(source)), length(unique(clustering))) for(i in 1:length(unique(clustering))){ for (j in 1:length(unique(treesource))){ Real[j,i] <- length(clustering[clustering == i & treesource == j]) } } row.names(Real) <- c(paste(unique(source))) colnames(Real) <- c(paste(unique(clustering))) # heatmap df.data <- expand.grid(Sources = c(paste(unique(source))) , Clusters = c(paste(unique(clustering)))) df.data$freq <- as.vector(Real) heatmap <- ggplot(data = df.data, aes(x = Clusters, y = Sources)) + geom_tile(aes(fill = freq)) hm.palette <- colorRampPalette(brewer.pal(9, 'YlOrRd'), space = 'Lab') heatmaps[[k]] <- heatmap + scale_fill_gradientn(colours = hm.palette(100)) ## plot per cov level p <- list() for(i in 1:length(unique(source))){ R <- data.frame(cluster = seq(1:length(unique(clustering))), frequency = Real[i, ]) p[[i]]<- ggplot(data = R, aes(x = cluster, y = frequency)) + geom_bar(stat ="identity") + ylim(0,sum(as.numeric(source==unique(source[1])))) p[[i]] <- p[[i]] + ggtitle(paste(unique(source)[i])) } multiplots[[k]] <- do.call(grid.arrange, p) } } return(list(clustermeans=mean_c , clusterstds=sd_c, multiplot = multiplots, heatmap = heatmaps)) }
/scratch/gouwar.j/cran-all/cranData/C443/R/treesourceClusterMethod.R
#' @export C5.0 <- function(x, ...) UseMethod("C5.0") #' C5.0 Decision Trees and Rule-Based Models #' #' Fit classification tree models or rule-based models using #' Quinlan's C5.0 algorithm #' #' This model extends the C4.5 classification algorithms described #' in Quinlan (1992). The details of the extensions are largely #' undocumented. The model can take the form of a full decision #' tree or a collection of rules (or boosted versions of either). #' #' When using the formula method, factors and other classes are #' preserved (i.e. dummy variables are not automatically created). #' This particular model handles non-numeric data of some types #' (such as character, factor and ordered data). #' #' The cost matrix should by CxC, where C is the number of #' classes. Diagonal elements are ignored. Columns should #' correspond to the true classes and rows are the predicted #' classes. For example, if C = 3 with classes Red, Blue and Green #' (in that order), a value of 5 in the (2,3) element of the matrix #' would indicate that the cost of predicting a Green sample as #' Blue is five times the usual value (of one). Note that when #' costs are used, class probabilities cannot be generated using #' [predict.C5.0()]. #' #' Internally, the code will attempt to halt boosting if it #' appears to be ineffective. For this reason, the value of #' `trials` may be different from what the model actually #' produced. There is an option to turn this off in #' [C5.0Control()]. #' #' @aliases C5.0.default C5.0.formula C5.0 #' @param x a data frame or matrix of predictors. #' @param y a factor vector with 2 or more levels #' @param trials an integer specifying the number of boosting #' iterations. A value of one indicates that a single model is #' used. #' @param rules A logical: should the tree be decomposed into a #' rule-based model? #' @param weights an optional numeric vector of case weights. Note #' that the data used for the case weights will not be used as a #' splitting variable in the model (see #' \url{http://www.rulequest.com/see5-win.html#CASEWEIGHT} for #' Quinlan's notes on case weights). #' @param control a list of control parameters; see #' [C5.0Control()] #' @param costs a matrix of costs associated with the possible #' errors. The matrix should have C columns and rows where C is the #' number of class levels. #' @param formula a formula, with a response and at least one predictor. #' @param data an optional data frame in which to interpret the #' variables named in the formula. #' @param subset optional expression saying that only a subset of #' the rows of the data should be used in the fit. #' @param na.action a function which indicates what should happen #' when the data contain `NA`. The default is to include #' missing values since the model can accommodate them. #' @param \dots other options to pass into the function (not #' currently used with default method) #' @return An object of class `C5.0` with elements: #' #' \item{boostResults}{ a parsed version of the boosting table(s) #' shown in the output } #' \item{call}{ the function call } \item{caseWeights}{ not #' currently supported. } #' \item{control}{ an echo of the specifications from #' [C5.0Control()] } #' \item{cost}{ the text version of the cost matrix (or "") } #' \item{costMatrix}{ an echo of the model argument } #' \item{dims}{ original dimensions of the predictor matrix or #' data frame } #' \item{levels}{ a character vector of factor levels for the #' outcome } #' \item{names}{ a string version of the names file } #' \item{output}{ a string version of the command line output } #' \item{predictors}{ a character vector of predictor names } #' \item{rbm}{ a logical for rules } #' \item{rules}{ a character version of the rules file } #' \item{size}{ n integer vector of the tree/rule size (or sizes #' in the case of boosting) } #'. \item{tree}{ a string version of the tree file } #' \item{trials}{ a named vector with elements `Requested` #' (an echo of the function call) and `Actual` (how many the #' model used) } #' @note The command line version currently supports more data #' types than the R port. Currently, numeric, factor and ordered #' factors are allowed as predictors. #' @author Original GPL C code by Ross Quinlan, R code and #' modifications to C by Max Kuhn, Steve Weston and Nathan Coulter #' @seealso [C5.0Control()], [summary.C5.0()], #' [predict.C5.0()], [C5imp()] #' @references Quinlan R (1993). C4.5: Programs for Machine #' Learning. Morgan Kaufmann Publishers, #' \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @useDynLib C50 #' @examples #' #' library(modeldata) #' data(mlc_churn) #' #' treeModel <- C5.0(x = mlc_churn[1:3333, -20], y = mlc_churn$churn[1:3333]) #' treeModel #' summary(treeModel) #' #' ruleModel <- C5.0(churn ~ ., data = mlc_churn[1:3333, ], rules = TRUE) #' ruleModel #' summary(ruleModel) #' #' @export #' @rawNamespace export(C5.0.default) #' @rdname C5.0 #' @importFrom Cubist makeDataFile makeNamesFile QuinlanAttributes C5.0.default <- function(x, y, trials = 1, rules = FALSE, weights = NULL, control = C5.0Control(), costs = NULL, ...) { funcCall <- match.call(expand.dots = TRUE) if (!is.factor(y)) stop("C5.0 models require a factor outcome", call. = FALSE) if (is.null(colnames(x))) stop("column names are required", call. = FALSE) if (control$bands > 2 & !rules) { warning("rule banding only works with rules; ", "'rules' was changed to TRUE", call. = FALSE) rules <- TRUE } ## to do add weightings lvl <- levels(y) nClass <- length(lvl) if (!is.null(costs)) { if (!is.matrix(costs)) stop("'costs' should be a matrix", call. = FALSE) if (ncol(costs) != nClass | nrow(costs) != nClass) stop("'cost should be a ", nClass, "x", nClass, "matrix", call. = FALSE) if (is.null(dimnames(costs))) { warning("no dimnames were given for the cost matrix; ", "the factor levels will be used", call. = FALSE) colnames(costs) <- lvl rownames(costs) <- lvl } else { if (is.null(colnames(costs)) | is.null(rownames(costs))) stop("both row and column names are needed", call. = FALSE) } costString <- makeCostFile(costs) } else costString <- "" maxtrials <- 100 if (trials < 1 | trials > maxtrials) stop("number of boosting iterations must be between 1 and ", maxtrials, call. = FALSE) if (!is.data.frame(x) & !is.matrix(x)) stop("x must be a matrix or data frame", call. = FALSE) if (inherits(x, "tbl_df")) { x <- as.data.frame(x) } if (!is.null(weights) && !is.numeric(weights)) stop("case weights must be numeric", call. = FALSE) ## TODO: add case weights to these files when needed namesString <- makeNamesFile(x, y, w = weights, label = control$label, comments = TRUE) dataString <- makeDataFile(x, y, weights) Z <- .C( "C50", as.character(namesString), as.character(dataString), as.character(costString), as.logical(control$subset), # -s "use the Subset option" var name: SUBSET as.logical(rules), # -r "use the Ruleset option" var name: RULES ## for the bands option, I'm not sure what the default should be. as.integer(control$bands), # -u "sort rules by their utility into bands" var name: UTILITY ## The documentation has two options for boosting: ## -b use the Boosting option with 10 trials ## -t trials ditto with specified number of trial ## I think we should use -t as.integer(trials), # -t : " ditto with specified number of trial", var name: TRIALS as.logical(control$winnow), # -w "winnow attributes before constructing a classifier" var name: WINNOW as.double(control$sample), # -S : use a sample of x% for training # and a disjoint sample for testing var name: SAMPLE as.integer(control$seed), # -I : set the sampling seed value as.integer(control$noGlobalPruning), # -g: "turn off the global tree pruning stage" var name: GLOBAL as.double(control$CF), # -c: "set the Pruning CF value" var name: CF ## Also, for the number of minimum cases, I'm not sure what the ## default should be. The code looks like it dynamically sets the ## value (as opposed to a static, universal integer as.integer(control$minCases), # -m : "set the Minimum cases" var name: MINITEMS as.logical(control$fuzzyThreshold), # -p "use the Fuzzy thresholds option" var name: PROBTHRESH as.logical(control$earlyStopping), # toggle C5.0 to check to see if we should stop boosting early ## the model is returned in 2 files: .rules and .tree tree = character(1), # pass back C5.0 tree as a string rules = character(1), # pass back C5.0 rules as a string output = character(1), # get output that normally goes to screen PACKAGE = "C50" ) ## Figure out how may trials were actually used. modelContent <- strsplit( if (rules) Z$rules else Z$tree, "\n" )[[1]] entries <- grep("^entries", modelContent, value = TRUE) if (length(entries) > 0) { actual <- as.numeric(substring(entries, 10, nchar(entries) - 1)) } else actual <- trials if (trials > 1) { boostResults <- getBoostResults(Z$output) ## This next line is here to avoid a false positive warning in R ## CMD check: ## * checking R code for possible problems ... NOTE ## C5.0.default: no visible binding for global variable 'Data' Data <- NULL size <- if (!is.null(boostResults)) subset(boostResults, Data == "Training Set")$Size else NA } else { boostResults <- NULL size <- length(grep("[0-9])$", strsplit(Z$output, "\n")[[1]])) } out <- list( names = namesString, cost = costString, costMatrix = costs, caseWeights = !is.null(weights), control = control, trials = c(Requested = trials, Actual = actual), rbm = rules, boostResults = boostResults, size = size, dims = dim(x), call = funcCall, levels = levels(y), output = Z$output, tree = Z$tree, predictors = colnames(x), rules = Z$rules ) class(out) <- "C5.0" out } #' @export #' @rawNamespace export(C5.0.formula) #' @rdname C5.0 #' @importFrom stats na.pass model.extract .getXlevels terms C5.0.formula <- function (formula, data, weights, subset, na.action = na.pass, ...) { call <- match.call() m <- match.call(expand.dots = FALSE) m$rules <- m$trails <- m$control <- m$cost <- m$... <- NULL m$na.action <- na.action m[[1L]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") y <- model.extract(m, "response") wt <- model.extract(m, "weights") if (length(wt) == 0L) wt <- NULL if ("(weights)" %in% colnames(m)) m[, "(weights)"] <- NULL m <- m[, -1, drop = FALSE] out <- C5.0.default(x = m, y = y, weights = wt, ...) out$call <- call out$Terms <- Terms out$xlevels <- .getXlevels(Terms, m) out } #' Control for C5.0 Models #' #' Various parameters that control aspects of the C5.0 fit. #' #' @param subset A logical: should the model evaluate groups of #' discrete predictors for splits? Note: the C5.0 command line #' version defaults this parameter to `FALSE`, meaning no #' attempted groupings will be evaluated during the tree growing #' stage. #' @param bands An integer between 2 and 1000. If `TRUE`, the #' model orders the rules by their affect on the error rate and #' groups the rules into the specified number of bands. This #' modifies the output so that the effect on the error rate can be #' seen for the groups of rules within a band. If this options is #' selected and `rules = FALSE`, a warning is issued and #' `rules` is changed to `TRUE`. #' @param winnow A logical: should predictor winnowing (i.e #' feature selection) be used? #' @param noGlobalPruning A logical to toggle whether the final, #' global pruning step to simplify the tree. #' @param CF A number in (0, 1) for the confidence factor. #' @param minCases an integer for the smallest number of samples #' that must be put in at least two of the splits. #' @param fuzzyThreshold A logical toggle to evaluate possible #' advanced splits of the data. See Quinlan (1993) for details and #' examples. #' @param sample A value between (0, .999) that specifies the #' random proportion of the data should be used to train the model. #' By default, all the samples are used for model training. Samples #' not used for training are used to evaluate the accuracy of the #' model in the printed output. #' @param seed An integer for the random number seed within the C #' code. #' @param earlyStopping A logical to toggle whether the internal #' method for stopping boosting should be used. #' @param label A character label for the outcome used in the #' output. @return A list of options. #' @author Original GPL C code by Ross Quinlan, R code and #' modifications to C by Max Kuhn, Steve Weston and Nathan Coulter #' @seealso [C5.0()],[predict.C5.0()], #' [summary.C5.0()], [C5imp()] #' @references Quinlan R (1993). C4.5: Programs for Machine #' Learning. Morgan Kaufmann Publishers, #' \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @examples #' library(modeldata) #' data(mlc_churn) #' #' treeModel <- C5.0(x = mlc_churn[1:3333, -20], #' y = mlc_churn$churn[1:3333], #' control = C5.0Control(winnow = TRUE)) #' summary(treeModel) #' #' @export C5.0Control <- function(subset = TRUE, ## in C, equals SUBSET=0, /* subset tests allowed */ bands = 0, winnow = FALSE, noGlobalPruning = FALSE, CF = 0.25, minCases = 2, fuzzyThreshold = FALSE, sample = 0.0, seed = sample.int(4096, size = 1) - 1L, earlyStopping = TRUE, label = "outcome") { if (CF < 0 | CF > 1) stop("confidence level must between 0 and 1", call. = FALSE) if (sample < 0.0 | sample > .999) stop("sampling percentage must be between 0.0 and .999", call. = FALSE) if (bands == 1 | bands > 10000) stop("if used, bands must be between 2 and 10000", call. = FALSE) list( subset = subset, bands = bands, winnow = winnow, noGlobalPruning = noGlobalPruning, CF = CF, minCases = minCases, fuzzyThreshold = fuzzyThreshold, sample = sample, earlyStopping = earlyStopping, label = label, seed = seed %% 4096L ) } #' @export print.C5.0 <- function(x, ...) { cat("\nCall:\n", truncateText(deparse(x$call, width.cutoff = 500)), "\n\n", sep = "") if (x$rbm) cat("Rule-Based Model\n") else cat("Classification Tree\n") cat("Number of samples:", x$dims[1], "\nNumber of predictors:", x$dims[2], "\n\n") if (x$trials["Requested"] > 1) { if (x$trials[1] == x$trials[2]) { cat("Number of boosting iterations:", x$trials["Requested"], "\n") } else { cat( "Number of boosting iterations:", x$trials["Requested"], "requested; ", x$trials["Actual"], "used due to early stopping\n" ) } if (!all(is.na(x$size))) cat( ifelse(x$rbm, "Average number of rules:", "Average tree size:"), round(mean(x$size, na.rm = TRUE), 1), "\n\n" ) else cat("\n") } else cat(ifelse(x$rbm, "Number of Rules:", "Tree size:"), x$size, "\n\n") otherOptions <- NULL if (x$control$subset) otherOptions <- c(otherOptions, "attempt to group attributes") if (x$control$winnow) otherOptions <- c(otherOptions, "winnowing") if (x$control$noGlobalPruning) otherOptions <- c(otherOptions, "no global pruning") if (x$control$CF != 0.25) otherOptions <- c(otherOptions, paste("confidence level: ", x$control$CF, sep = "")) if (x$control$minCases != 2) otherOptions <- c(otherOptions, paste("minimum number of cases: ", x$control$minCases, sep = "")) if (x$control$fuzzyThreshold) otherOptions <- c(otherOptions, "fuzzy thresholds") if (x$control$bands > 0) otherOptions <- c(otherOptions, paste(x$control$bands, " utility bands", sep = "")) if (!x$control$earlyStopping & x$trials["Requested"] > 1) otherOptions <- c(otherOptions, "early stopping for boosting") if (x$control$sample > 0) otherOptions <- c(otherOptions, paste(round(100 * x$control$sample, 1), "% sub-sampling", sep = "")) if (!is.null(otherOptions)) { cat(truncateText(paste( "Non-standard options:", paste(otherOptions, collapse = ", ") ))) cat("\n\n") } if (x$cost != "") { cat("Cost Matrix:\n") print(x$costMatrix) } output <- strsplit(x$output, "\n")[[1]] sizeIndex <- grep("^\t.*Size", output) if (length(sizeIndex) > 0 & FALSE) { out <- strsplit(output[sizeIndex + 2], " ")[[1]] out <- out[!(out %in% c("\t", ""))] out <- out[!grepl("[[:punct:]]", out)] if (length(out) > 0) cat("Tree Size: ", out, "\n") } } #' Summaries of C5.0 Models #' #' This function prints out detailed summaries for C5.0 models. #' #' The output of this function mirrors the output of the C5.0 #' command line version. #' #' The terminal nodes have text indicating the number of samples #' covered by the node and the number that were incorrectly #' classified. Note that, due to how the model handles missing #' values, the sample numbers may be fractional. #' #' There is a difference in the attribute usage numbers between #' this output and the nominal command line output. Although the #' calculations are almost exactly the same (we do not add 1/2 to #' everything), the C code does not display that an attribute was #' used if the percentage of training samples covered by the #' corresponding splits is very low. Here, the threshold was #' lowered and the fractional usage is shown. #' #' @param object an object of class `C5.0` #' @param \dots other options (not currently used) #' @return A list with values \item{output }{a single text string #' with the model output} \item{comp2 }{the call to this function} #' @author Original GPL C code by Ross Quinlan, R code and #' modifications to C by Max Kuhn, Steve Weston and Nathan Coulter #' @seealso [C5.0()], [C5.0Control()], #' [summary.C5.0()], [C5imp()] #' @references Quinlan R (1993). C4.5: Programs for Machine #' Learning. Morgan Kaufmann Publishers, #' \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @examples #' #' library(modeldata) #' data(mlc_churn) #' #' treeModel <- C5.0(x = mlc_churn[1:3333, -20], y = mlc_churn$churn[1:3333]) #' summary(treeModel) #' #' @export #' @method summary C5.0 summary.C5.0 <- function(object, ...) { out <- list(output = object$output, call = object$call) class(out) <- "summary.C5.0" out } #' @export print.summary.C5.0 <- function(x, ...) { cat("\nCall:\n", truncateText(deparse(x$call, width.cutoff = 500)), "\n\n", sep = "") cat(x$output) cat("\n") invisible(x) } truncateText <- function(x) { if (length(x) > 1) x <- paste(x, collapse = "") w <- options("width")$width if (nchar(x) <= w) return(x) cont <- TRUE out <- x while (cont) { tmp <- out[length(out)] tmp2 <- substring(tmp, 1, w) spaceIndex <- gregexpr("[[:space:]]", tmp2)[[1]] stopIndex <- spaceIndex[length(spaceIndex) - 1] - 1 tmp <- c(substring(tmp2, 1, stopIndex), substring(tmp, stopIndex + 1)) out <- if (length(out) == 1) tmp else c(out[1:(length(x) - 1)], tmp) if (all(nchar(out) <= w)) cont <- FALSE } paste(out, collapse = "\n") } #' Variable Importance Measures for C5.0 Models #' #' This function calculates the variable importance (aka attribute usage) for #' C5.0 models. #' #' #' By default, C5.0 measures predictor importance by determining the percentage #' of training set samples that fall into all the terminal nodes after the #' split (this is used when `metric = "usage"`). For example, the #' predictor in the first split automatically has an importance measurement of #' 100 percent. Other predictors may be used frequently in splits, but if the #' terminal nodes cover only a handful of training set samples, the importance #' scores may be close to zero. The same strategy is applied to rule-based #' models as well as the corresponding boosted versions of the model. #' #' There is a difference in the attribute usage numbers between this output and #' the nominal command line output. Although the calculations are almost #' exactly the same (we do not add 1/2 to everything), the C code does not #' display that an attribute was used if the percentage of training samples #' covered by the corresponding splits is very low. Here, the threshold was #' lowered and the fractional usage is shown. #' #' When `metric = "splits"`, the percentage of splits associated with each #' predictor is calculated. #' #' @param object an object of class `C5.0` #' @param metric either 'usage' or 'splits' (see Details below) #' @param pct a logical: should the importance values be converted to be #' between 0 and 100? #' @param \dots other options (not currently used) #' @return a data frame with a column `Overall` with the predictor usage #' values. The row names indicate the predictor. #' @author Original GPL C code by Ross Quinlan, R code and modifications to C #' by Max Kuhn, Steve Weston and Nathan Coulter #' @seealso [C5.0()], [C5.0Control()], #' [summary.C5.0()],[predict.C5.0()] #' @references Quinlan R (1993). C4.5: Programs for Machine Learning. Morgan #' Kaufmann Publishers, \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @examples #' #' library(modeldata) #' data(mlc_churn) #' #' treeModel <- C5.0(x = mlc_churn[1:3333, -20], y = mlc_churn$churn[1:3333]) #' C5imp(treeModel) #' C5imp(treeModel, metric = "splits") #' #' @export C5imp <- function(object, metric = "usage", pct = TRUE, ...) { if (!(metric %in% c("usage", "splits"))) stop("metric should be either 'usage' or 'splits'") allVar <- getOriginalVars(object) allVar <- gsub("\\", "", allVar, fixed = TRUE) if (metric == "usage") { object$output <- strsplit(object$output, "\n")[[1]] usageIndex <- grep("Attribute usage:", object$output, fixed = TRUE) if (length(usageIndex) == 0) stop("Error in parsing model output") object$output <- object$output[usageIndex:length(object$output)] usageData <- grep("%\t", object$output, fixed = TRUE, value = TRUE) usageData <- strsplit(usageData, "%", fixed = TRUE) if (!all(unlist(lapply(usageData, length)) == 2)) stop("Error in parsing model output") usageData <- lapply(usageData, function(x) gsub("[[:blank:]]", "", x)) usageData <- as.data.frame(do.call("rbind", usageData), stringsAsFactors = FALSE) elim <- allVar[!(allVar %in% usageData$V2)] if (length(elim) > 0) { elimVars <- data.frame(V1 = 0, V2 = elim, stringsAsFactors = FALSE) usageData <- rbind(usageData, elimVars) } out <- data.frame(Overall = as.numeric(as.character(usageData$V1))) rownames(out) <- usageData$V2 } else { varData <- strsplit(paste(object$tree, object$rules), "\n")[[1]] varData <- grep("att=", varData, value = TRUE) varData <- breakUp(varData) varData <- unlist(lapply(varData, function(x) x["att"])) varData <- as.data.frame(table(varData), stringsAsFactors = FALSE) elim <- allVar[!(allVar %in% varData$varData)] if (length(elim) > 0) { elimVars <- data.frame(varData = elim, Freq = 0, stringsAsFactors = FALSE) varData <- rbind(varData, elimVars) } out <- data.frame(Overall = as.numeric(as.character(varData$Freq))) if (pct) out$Overall <- out$Overall / sum(out$Overall) * 100 rownames(out) <- varData$varData } out[order(out$Overall, decreasing = TRUE), , drop = FALSE] } breakUp <- function(y) { y <- gsub("\"", "", y) y <- strsplit(y, " ", fixed = TRUE) y <- lapply( y, function(z) { z <- strsplit(z, "=", fixed = TRUE) nms <- unlist(lapply(z, function(a) a[1])) val <- unlist(lapply(z, function(a) a[2])) names(val) <- nms val } ) y } getOriginalVars <- function(x) { treeDat <- strsplit(x$names, "\n")[[1]] varStart <- grep(paste(x$control$label, ":", sep = ""), treeDat) if (length(varStart) == 0) stop("cannot parse names file") treeDat <- treeDat[(varStart + 1):length(treeDat)] treeDat <- strsplit(treeDat, ":") unlist(lapply(treeDat, function(x) x[1])) } getVars <- function(x) { ## One of these is always "" treeDat <- paste(x$tree, x$rules) treeDat <- strsplit(treeDat, "\n")[[1]] treeDat <- grep("att=", treeDat, value = TRUE) treeDat } getAtt <- function(x) { strt <- regexpr("att=", x) if (length(strt) == 0) stop("cannot parse model file") strt <- strt + 5 stp <- regexpr("(forks=)|(cut=)|(val=)", x) if (length(stp) == 0) stop("cannot parse model file") stp <- stp - 3 substring(x, strt, stp) } C5predictors <- function(x, ...) unique(getAtt(getVars(x))) getBoostResults <- function(x) { output <- strsplit(x, "\n")[[1]] ## what above when sampling is used srt <- grep("^Trial\t", output) stp <- grep("^boost\t", output) ## error check for srt, stp if (length(srt) == 0 | length(stp) == 0) return(NULL) if (any(stp - srt <= 0)) return(NULL) if (length(srt) != length(stp)) return(NULL) if (length(stp) > 1) { trainSrt <- grep("Evaluation on training data", output) testSrt <- grep("Evaluation on test data", output) if (testSrt < trainSrt) { srt <- rev(srt) stp <- rev(stp) } trainBoost <- parseBoostTable(output[(srt[1] + 4):(stp[1] - 1)]) trainBoost$Data <- "Training Set" testBoost <- parseBoostTable(output[(srt[2] + 4):(stp[2] - 1)]) testBoost$Data <- "Test Set" boostResults <- rbind(trainBoost, testBoost) } else { boostResults <- parseBoostTable(output[(srt[1] + 4):(stp[1] - 1)]) boostResults$Data <- "Training Set" } boostResults } parseBoostTable <- function(x) { x <- gsub("(", " ", x, fixed = TRUE) x <- gsub("%)", "", x, fixed = TRUE) x <- strsplit(x, "[[:space:]]") x <- lapply(x, function(x) x[x != ""]) if (all(unlist(lapply(x, length)) %in% 4:5)) { x <- do.call("rbind", x) x <- matrix(as.numeric(x), ncol = ncol(x)) x <- as.data.frame(x) cls <- c("Trial", "Size", "Errors", "Percent", "Cost") colnames(x) <- cls[1:ncol(x)] x$Trial <- x$Trial + 1 } else x <- NULL x }
/scratch/gouwar.j/cran-all/cranData/C50/R/C5.0.R
#' Plot a decision tree #' #' Plot a decision tree. #' #' #' @param x an object of class `C5.0` #' @param trial an integer for how many boosting iterations are #' used for prediction. NOTE: the internals of `C5.0` are #' zero-based so to get the initial decision tree you must use #' `trial = 0`. If `trial` is set too large, it is reset #' to the largest value and a warning is given. #' @param subtree an optional integer that can be used to isolate #' nodes below the specified split. See #' [partykit::party()] for more details. #' @param ... options passed to [partykit::plot.party()] #' @return No value is returned; a plot is rendered. #' @author Mark Culp, Max Kuhn #' @seealso [C5.0()], [partykit::party()] #' @references Quinlan R (1993). C4.5: Programs for Machine #' Learning. Morgan Kaufmann Publishers, #' \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @examples #' #' mod1 <- C5.0(Species ~ ., data = iris) #' plot(mod1) #' plot(mod1, subtree = 3) #' #' #' mod2 <- C5.0(Species ~ ., data = iris, trials = 10) #' plot(mod2) ## should be the same as above #' #' ## plot first weighted tree #' plot(mod2, trial = 1) #' #' @export #' @method plot C5.0 #' @importFrom stats model.frame model.weights as.formula na.omit #' @importFrom stats delete.response #' @importFrom partykit as.partynode partynode partysplit as.party #' @importFrom partykit fitted_node party #' @importFrom graphics plot plot.C5.0 <- function(x, trial = 0, subtree = NULL, ...) { if (x$rules != "") stop("tree models only", call. = FALSE) if (trial > x$trials["Actual"] - 1) { warning( paste( "Only", x$trials["Actual"], "trials are in the model.", "Setting 'trial' to", x$trials["Actual"] - 1, "(the plot code is zero-based)." ), call. = FALSE ) trial <- x$trials["Actual"] - 1 } x <- as.party(x, trial = trial) if (!is.null(subtree)) { if (subtree < 0 || subtree > length(x)) stop("For this model, 'subtree' should be between zero and ", length(x), call. = FALSE) else x <- x[subtree] } if (any(names(list(...)) == "trials")) { warning( "The option 'trials' was passed and will be ignored. ", " Did you mean to use 'trial'?", call. = FALSE ) } plot(x, ...) } #' @importFrom stats terms model.response model.weights model.frame.C5.0 <- function (formula, ...) { if (!is.null(formula$model)) return(formula$model) mf <- formula$call mf <- mf[c(1L, match( c("formula", "data", "subset", "na.action", "weights"), names(mf), 0L ))] if (is.null(mf$na.action)) mf$na.action <- na.omit mf$drop.unused.levels <- FALSE mf[[1L]] <- as.name("model.frame") env <- if (!is.null(environment(formula$Terms))) environment(formula$Terms) else parent.frame() mf <- eval(mf, env) term_info <- terms(mf) # Now we want to get the appropriate columns back in a certain # order and with some potential name changes. # First get the predictors x_names <- labels(term_info) # in case of non-standard names: x_names <- gsub("`", "", x_names) dat <- mf[, x_names, drop = FALSE] # Add the outcome column with the right name all_names <- all.vars(attr(term_info, "predvars")) y_name <- all_names[attr(term_info, "response")] dat[[y_name]] <- model.response(mf) # Potentially get weights wts <- model.weights(mf) if (!is.null(wts)) dat$`(weights)` <- wts return(dat) } #' Convert C5.0 object to party format #' @param obj A `C5.0` class object' #' @param trial An integer for the specific tree to plot. #' @param ... Not currently used. #' @return A `party` object #' @keywords internal #' @method as.party C5.0 #' @export #' @export as.party.C5.0 as.party.C5.0 <- function(obj, trial = 0, ...) { out <- strsplit(obj$output, "\n")[[1]] out <- out[out != ""] out <- out[grep("^\t", out, invert = TRUE)] out <- out[grep("\\*\\*\\*", out, invert = TRUE)] tr <- as.vector(obj$trials)[2] if (tr > 1) { if (trial > (tr - 1)) { trial = tr - 1 } iv1 <- match(paste("----- Trial ", trial, ": -----", sep = ""), out) out <- out[grep("^\t", out, invert = TRUE)] iv2 <- match(paste("----- Trial ", trial + 1, ": -----", sep = ""), out) if (is.na(iv2)) { iv2 <- grep("Evaluation on training data", out) } out <- out[(iv1 + 2):(iv2 - 1)] } else{ indx <- 1:which(out == "Decision tree:") out <- out[-indx] l1 = length(out) out <- out[1:(l1 - 2)] } check1 <- cbind(grep("\\{", out), grep("\\}", out)) indv1 <- which(check1[, 1] != check1[, 2]) if (length(indv1) > 0) { a17 <- check1[indv1, , drop = FALSE] rml = NULL for (j in 1:dim(a17)[1]) { nterms = diff(a17[j, ]) vlaps = NULL for (i in 1:nterms) { iv37 <- out[a17[j, 1]:a17[j, 2]][-1][i] if (i == nterms) { vlap <- strsplit(iv37, ":")[[1]] arv12 = gsub(" ", "", vlap[(length(vlap) - 1)][1]) if (is.na(match("", arv12))) { vlap <- paste(arv12, vlap[length(vlap)], sep = ":") } else{ vlap = vlap[length(vlap)] vlap = gsub(" ", "", vlap) vlap <- paste(vlap, ":", sep = "") } } else{ v1 <- strsplit(iv37, " ")[[1]] vlap <- v1[length(v1)] } vlaps = paste(vlaps, vlap, sep = "") } vlap <- paste(out[a17[j, 1]], vlaps, sep = "") out[a17[j, 1]] = vlap pts <- unique(sort((a17[j, 1] + 1):a17[j, 2])) rml = c(rml, pts) } out = out[-rml] } indtrees <- grep("SubTree", out) if (length(indtrees) > 0) { while (length(indtrees) > 0) { xval <- t(sapply(1:length(indtrees), function(i) grep(paste("[S", i, "]", sep = ""), out, fixed = TRUE))) end1 = length(out) j = length(indtrees) ind.x = xval[j, 1] torb <- sapply(1:length(obj$predictors), function(i) { v <- grep(obj$predictors[i], out[ind.x], fixed = TRUE) if (length(v) < 1) v = 0 v }) adj <- strsplit(out[ind.x], obj$pred[which(torb > 0)])[[1]][1] ind.y <- xval[j, 2] + 1 stree <- paste(adj, out[ind.y:end1], sep = " ") out <- c(out[1:ind.x], stree, out[-c(1:ind.x, ind.y:end1)]) out <- out[-length(out)] out[ind.x] = gsub(paste(" \\[S", length(indtrees), "\\]", sep = ""), "", out[ind.x]) indtrees <- grep("SubTree", out) } } is.default <- !("Terms" %in% names(obj)) if (!is.default) { mf <- model.frame(obj) } else{ xspot <- match("x", names(obj$call))[1] yspot <- match("y", names(obj$call))[1] wspot <- match("weights", names(obj$call))[1] if (is.na(wspot)) { mf <- data.frame(x = eval(parse(text = paste(obj$call)[xspot])), y = eval(parse(text = paste(obj$call)[yspot]))) names(mf) <- c(obj$pred, "y") } else{ mf <- data.frame(eval(parse(text = paste(obj$call)[xspot])), eval(parse(text = paste(obj$call)[yspot])), eval(parse(text = paste(obj$call)[wspot]))) ind1 <- length(names(mf)) - 1 ind2 <- length(names(mf)) names(mf) <- c(obj$pred, "y", "(weights)") } } if (length(out) == 1) { pn <- as.partynode(partynode(1L), from = 1L) } else{ n.cat <-sapply(1:length(obj$pred), function(i)is.factor(mf[, obj$pred[i]])) adj.pred<-as.vector(sapply(obj$pred,function(i){gsub("`","",i)})) f.mat <- lapply(1:length(out), function(i) { valpred<-integer(0) vec<-strsplit(out[i],":")[[1]] vec<-vec[vec!=""] varp<-as.vector(sapply(adj.pred,function(j){ ind<-grep(paste0(j,' '),vec) if(length(ind)==0)return(-1) return(ind) })) if(!any(varp>0)){ stop("Variable match was not found.") } valpred<-as.vector(which(varp>0)) valpred<-valpred[which.max(nchar(adj.pred[valpred]))] a1<-gsub(obj$pred[valpred],"",out[i]) if(n.cat[valpred]){ ##process this if(length(grep(" in \\{",a1))>0){ vec<-a1 while(length(grep("^in",vec))==0){ vec<-sub("^.","",vec) } a2<-sub("in \\{","",vec) if(length(grep(":",a2))>0){ a2<-strsplit(a2,"\\}:") if(length(a2)>2){ stop("The code currently does not work with factor levels or responses that have the symbol '}:' in them.") } }else{ a2<-sub("\\}$","",a2) } a2<-a2[[1]][1] a1<-sub(a2,"X",vec) a2<-paste0("{",a2,"}",collapse="") }else{ vec<-a1 while(length(grep("^=",vec))==0){ vec<-sub("^.","",vec) } a2<-sub("^= ","",vec) a2<-strsplit(a2,":") if(length(a2)>2){ stop("The code currently does not work with factor levels or responses that have the symbol ':' in them.") } a2<-a2[[1]][1] a1<-sub(a2,"X",vec) } } a1 <- strsplit(a1, " ")[[1]] a1 <- gsub(":", "", a1) a1 <- gsub("\\.\\.\\.", "", a1) a1 <- a1[a1 != ""] if(n.cat[valpred]){ a1[2]<-a2 } as.vector(c(adj.pred[valpred],a1)) }) indvars <- sapply(1:length(f.mat), function(i) { v = match(obj$predictors, f.mat[[i]][1]) a1 <- which(!is.na(v)) if (length(a1) == 0) a1 = 0 a1 }) treestr <- sapply(1:length(out), function(i) { avec = obj$predictors[indvars[i]] avec = nchar(strsplit(out[[i]], avec)[[1]][1]) avec }) treestr <- as.numeric(as.factor(treestr)) indclass <- sapply(1:length(f.mat), function(i) { v <- match(obj$lev, f.mat[[i]][4]) if (any(!is.na(v))) { which(!is.na(v)) } else{ NA } }) cuts <- sapply(1:length(f.mat), function(i) f.mat[[i]][3]) vars <- sapply(1:length(f.mat), function(i) strsplit(f.mat[[i]][2], "=")[[1]][1]) xlevels <- list() if (sum(n.cat) > 0) { r1 = 1 for (i in 1:length(n.cat)) { if (n.cat[i]) { xlevels[[r1]] <- list(varid = obj$pred[i], lev = levels(mf[, obj$pred[i]])) r1 = r1 + 1 } } } c5.split <- function(i, j, r, k = NULL) { ##i=variable, j=cuts, r=TRUE,xlevs if (!n.cat[i]) { partysplit( varid = as.integer(i), breaks = as.numeric(j[1]), right = r, info = k, prob = NULL ) } else{ ind1 <- match(obj$pred[i], sapply(1:length(xlevels), function(i) xlevels[[i]]$varid)) xlev <- xlevels[[ind1]]$lev lj = length(j) j<- gsub('[\\}\\{]','',j) a1s = sapply(j, function(i) strsplit(i, ",")) index = rep(NA, length(xlev)) for (i1 in 1:lj) { index[match(a1s[[i1]], xlev)] = as.integer(i1) } partysplit( varid = as.integer(i), index = index, info = k, prob = NULL ) } } c5.node <- function(tvec, vvec, bvec, vvars) { if (length(tvec) == 1 | any(tvec < 0)) { return(partynode(1L)) } l <- list() ind <- which(tvec == 1) lind <- length(ind) ind2 <- !vvars[ind[1]] == ">" split1 <- c5.split(vvec[ind[1]], bvec[ind], TRUE) for (i in 1:lind) { str = ind[i] if (i == lind) { term = length(tvec) } else{ term = ind[i + 1] - 1 } val = ind[i]:term l[[i]] = list( tvec = tvec[val] - 1, vec = vvec[val], bvec = bvec[val], vvars = vvars[val] ) } if (!ind2) { tmp = l[[1]] l[[1]] = l[[2]] l[[2]] = tmp } partynode(1L, split = split1, kids = lapply(1:length(l), function(i) { i = l[[i]] c5.node(i$tvec, i$vec, i$bvec, i$vvars) })) } pn <- as.partynode(c5.node(treestr, indvars, cuts, vars), from = 1L) } if (is.default) { if (is.na(wspot)) { p <- dim(mf)[2] dat1 <- data.frame( "(fitted)" = fitted_node(pn, data = mf), "(response)" = mf[, p], check.names = FALSE ) } else{ p <- dim(mf)[2] - 1 dat1 <- data.frame( "(fitted)" = fitted_node(pn, data = mf), "(response)" = mf[, p], "(weights)" = model.weights(mf), check.names = FALSE ) } fn = as.formula(paste("y ~ ", paste(obj$pred, collapse = " + "), sep = "")) g7 <- party(pn, data = mf[0L, ], fitted = dat1 , terms = terms(fn)) } else{ p1 <- all.vars(attr(obj$Terms, "predvars"))[attr(obj$Terms, "response")] if (is.na(p1)) { stop("Error in Response") } C5.0_fitted <- function(p1) { ret <- as.data.frame(matrix(nrow = NROW(mf), ncol = 0)) ret[["(fitted)"]] <- fitted_node(pn, data = mf) ret[["(response)"]] <- mf[, p1] ret[["(weights)"]] <- model.weights(mf) ret } fitted <- C5.0_fitted(p1) g7 <- party( pn, data = mf[0L, , drop = FALSE], fitted = fitted, terms = obj$Terms, info = list(method = "C5.0") ) } class(g7) <- c("constparty", class(g7)) g7 }
/scratch/gouwar.j/cran-all/cranData/C50/R/as.party.C5.0.R
makeCostFile <- function(cst) { classes <- colnames(cst) out <- "" for (i in 1:nrow(cst)) { for (j in 1:ncol(cst)) { if (i != j && cst[i, j] > 1) { out <- paste(out, paste(classes[i], ", ", classes[j], ": ", cst[i, j], "\n", sep = ""), sep = "") } } } out }
/scratch/gouwar.j/cran-all/cranData/C50/R/cost_matrix.R
## From http://www.rulequest.com/see5-unix.html: ## Names, labels, classes, and discrete values are represented by ## arbitrary strings of characters, with some fine print: ## ## - Tabs and spaces are permitted inside a name or value, but C5.0 ## collapses every sequence of these characters to a single space. ## ## - Special characters (comma, colon, period, vertical bar '|') can ## appear in names and values, but must be prefixed by the escape ## character '\'. For example, the name "Filch, Grabbit, and Co." ## would be written as 'Filch\, Grabbit\, and Co\.'. (Colons in ## times and periods in numbers do not need to be escaped.) formatCharacters <- function(x) { ## Note that "useBytes=TRUE" is specified to avoid errors ## such as "input string 18 is invalid in this locale". ## for some reason, escaping : doesn't work... x <- gsub(":", ".", x, fixed = TRUE, useBytes=TRUE) ## gsub special chars with escapes gsub("([^[:alnum:]^[:space:]])", '\\\\\\1' , x, useBytes=TRUE) } if(FALSE) { for(i in levels(ticdata$STYPE)) { print(i) l1 <- c("level.a","level:a") set.seed(2) testData <- data.frame(class = sample(LETTERS[1:2], size = 20, replace = TRUE)) testData$A = ifelse(testData$class == "A", l1[1], l1[2]) testData$A[1:5] <- l1[1] testData$B <- rnorm(nrow(testData)) names(testData)[2] <- "A" test1 <- C5.0(testData[,-1], testData[,1]) summary(test1) } }
/scratch/gouwar.j/cran-all/cranData/C50/R/formatAttributes.R
#' Predict new samples using a C5.0 model #' #' This function produces predicted classes or confidence values #' from a C5.0 model. #' #' Note that the number of trials in the object my be less than #' what was specified originally (unless `earlyStopping = FALSE` #' was used in [C5.0Control()]. If the number requested #' is larger than the actual number available, the maximum actual #' is used and a warning is issued. #' #' Model confidence values reflect the distribution of the classes #' in terminal nodes or within rules. #' #' For rule-based models (i.e. not boosted), the predicted #' confidence value is the confidence value from the most specific, #' active rule. Note that C4.5 sorts the rules, and uses the first #' active rule for prediction. However, the default in the original #' sources did not normalize the confidence values. For example, #' for two classes it was possible to get confidence values of #' (0.3815, 0.8850) or (0.0000, 0.922), which do not add to one. #' For rules, this code divides the values by their sum. The #' previous values would be converted to (0.3012, 0.6988) and (0, #' 1). There are also cases where no rule is activated. Here, equal #' values are assigned to each class. #' #' For boosting, the per-class confidence values are aggregated #' over all of the trees created during the boosting process and #' these aggregate values are normalized so that the overall #' per-class confidence values sum to one. #' #' When the `cost` argument is used in the main function, class #' probabilities derived from the class distribution in the #' terminal nodes may not be consistent with the final predicted #' class. For this reason, requesting class probabilities from a #' model using unequal costs will throw an error. #' #' @param object an object of class `C5.0` #' @param newdata a matrix or data frame of predictors #' @param trials an integer for how many boosting iterations are #' used for prediction. See the note below. #' @param type either `"class"` for the predicted class or #' `"prob"` for model confidence values. #' @param na.action when using a formula for the original model #' fit, how should missing values be handled? #' @param \dots other options (not currently used) #' @return when `type = "class"`, a factor vector is returned. #' When `type = "prob"`, a matrix of confidence values is returned #' (one column per class). #' @author Original GPL C code by Ross Quinlan, R code and #' modifications to C by Max Kuhn, Steve Weston and Nathan Coulter #' @seealso [C5.0()], [C5.0Control()], #' [summary.C5.0()], [C5imp()] #' @references Quinlan R (1993). C4.5: Programs for Machine #' Learning. Morgan Kaufmann Publishers, #' \url{http://www.rulequest.com/see5-unix.html} #' @keywords models #' @examples #' #' library(modeldata) #' data(mlc_churn) #' #' treeModel <- C5.0(x = mlc_churn[1:3333, -20], y = mlc_churn$churn[1:3333]) #' predict(treeModel, mlc_churn[3334:3350, -20]) #' predict(treeModel, mlc_churn[3334:3350, -20], type = "prob") #' #' #' @export #' @rawNamespace export(predict.C5.0) #' @importFrom Cubist makeDataFile makeNamesFile QuinlanAttributes predict.C5.0 <- function (object, newdata = NULL, trials = object$trials["Actual"], type = "class", na.action = na.pass, ...) { if (!(type %in% c("class", "prob"))) stop("type should be either 'class' or 'prob'", call. = FALSE) if (object$cost != "" & type == "prob") stop("confidence values (i.e. class probabilities) should ", " not be used with costs", call. = FALSE) if (is.null(newdata)) stop("newdata must be non-null", call. = FALSE) if (!is.null(object$Terms)) { object$Terms <- delete.response(object$Terms) newdata <- model.frame(object$Terms, newdata, na.action = na.action, xlev = object$xlevels) } else newdata <- newdata[, object$predictors, drop = FALSE] if (is.null(colnames(newdata))) stop("column names are required", call. = FALSE) if (length(trials) > 1) stop("only one value of trials is allowed") if (trials > object$trials["Actual"]) warning( paste( "'trials' should be <=", object$trials["Actual"], "for this object. Predictions generated using", object$trials["Actual"], "trials" ), call. = FALSE ) ## If there are case weights used during training, the C code ## will expect a column of weights in the new data but the ## values will be ignored. `makeDataFile` puts those last in ## the data when `C5.0.default` is run, so we will add a ## column of NA values at the end here if (object$caseWeights) newdata$case_weight_pred <- NA ## make cases file caseString <- makeDataFile(x = newdata, y = NULL) ## When passing trials to the C code, convert to ## zero if the original version of trials is used if (trials <= 0) stop("'trials should be a positive integer", call. = FALSE) if (trials == object$trials["Actual"]) trials <- 0 ## Add trials (not object$trials) as an argument Z <- .C( "predictions", as.character(caseString), as.character(object$names), as.character(object$tree), as.character(object$rules), as.character(object$cost), pred = integer(nrow(newdata)), confidence = double(length(object$levels) * nrow(newdata)), trials = as.integer(trials), output = character(1), PACKAGE = "C50" ) if(any(grepl("Error limit exceeded", Z$output))) stop(Z$output, call. = FALSE) if (type == "class") { out <- factor(object$levels[Z$pred], levels = object$levels) } else { out <- matrix(Z$confidence, ncol = length(object$levels), byrow = TRUE) if (!is.null(rownames(newdata))) rownames(out) <- rownames(newdata) colnames(out) <- object$levels } out }
/scratch/gouwar.j/cran-all/cranData/C50/R/predict.C5.0.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) library(C50) library(modeldata) ## ----credit-data-------------------------------------------------------------- library(modeldata) data(credit_data) ## ----credit-vars-------------------------------------------------------------- vars <- c("Home", "Seniority") str(credit_data[, c(vars, "Status")]) # a simple split set.seed(2411) in_train <- sample(1:nrow(credit_data), size = 3000) train_data <- credit_data[ in_train,] test_data <- credit_data[-in_train,] ## ----tree-mod----------------------------------------------------------------- library(C50) tree_mod <- C5.0(x = train_data[, vars], y = train_data$Status) tree_mod ## ----tree-summ---------------------------------------------------------------- summary(tree_mod) ## ----tree-plot, fig.width = 10------------------------------------------------ plot(tree_mod) ## ----tree-boost--------------------------------------------------------------- tree_boost <- C5.0(x = train_data[, vars], y = train_data$Status, trials = 3) summary(tree_boost) ## ----rule-mod----------------------------------------------------------------- rule_mod <- C5.0(x = train_data[, vars], y = train_data$Status, rules = TRUE) rule_mod summary(rule_mod) ## ----pred--------------------------------------------------------------------- predict(rule_mod, newdata = test_data[1:3, vars]) predict(tree_boost, newdata = test_data[1:3, vars], type = "prob") ## ----cost--------------------------------------------------------------------- cost_mat <- matrix(c(0, 2, 1, 0), nrow = 2) rownames(cost_mat) <- colnames(cost_mat) <- c("bad", "good") cost_mat cost_mod <- C5.0(x = train_data[, vars], y = train_data$Status, costs = cost_mat) summary(cost_mod) # more samples predicted as "bad" table(predict(cost_mod, test_data[, vars])) # that previously table(predict(tree_mod, test_data[, vars]))
/scratch/gouwar.j/cran-all/cranData/C50/inst/doc/C5.0.R
--- title: "C5.0 Classification Models" vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{C5.0 Classification Models} output: knitr:::html_vignette: toc: yes --- ```{r setup, include = FALSE} knitr::opts_chunk$set(echo = TRUE) library(C50) library(modeldata) ``` The `C50` package contains an interface to the C5.0 classification model. The main two modes for this model are: * a basic tree-based model * a rule-based model Many of the details of this model can be found in [Quinlan (1993)](https://books.google.com/books?id=b3ujBQAAQBAJ&lpg=PP1&ots=sQ2nTTEpC1&dq=C4.5%3A%20Programs%20for%20Machine%20Learning&lr&pg=PR6#v=onepage&q=C4.5:%20Programs%20for%20Machine%20Learning&f=false) although the model has new features that are described in [Kuhn and Johnson (2013)](http://appliedpredictivemodeling.com/). The main public resource on this model comes from the [RuleQuest website](http://www.rulequest.com/see5-info.html). To demonstrate a simple model, we'll use the credit data that can be accessed in the [`modeldata` package](https://github.com/tidymodels/modeldata): ```{r credit-data} library(modeldata) data(credit_data) ``` The outcome is in a column called `Status` and, to demonstrate a simple model, the `Home` and `Seniority` predictors will be used. ```{r credit-vars} vars <- c("Home", "Seniority") str(credit_data[, c(vars, "Status")]) # a simple split set.seed(2411) in_train <- sample(1:nrow(credit_data), size = 3000) train_data <- credit_data[ in_train,] test_data <- credit_data[-in_train,] ``` ## Classification Trees To fit a simple classification tree model, we can start with the non-formula method: ```{r tree-mod} library(C50) tree_mod <- C5.0(x = train_data[, vars], y = train_data$Status) tree_mod ``` To understand the model, the `summary` method can be used to get the default `C5.0` command-line output: ```{r tree-summ} summary(tree_mod) ``` A graphical method for examining the model can be generated by the `plot` method: ```{r tree-plot, fig.width = 10} plot(tree_mod) ``` A variety of options are outlines in the documentation for `C5.0Control` function. Another option that can be used is the `trials` argument which enables a boosting procedure. This method is model similar to AdaBoost than to more statistical approaches such as stochastic gradient boosting. For example, using three iterations of boosting: ```{r tree-boost} tree_boost <- C5.0(x = train_data[, vars], y = train_data$Status, trials = 3) summary(tree_boost) ``` Note that the counting is zero-based. The `plot` method can also show a specific tree in the ensemble using the `trial` option. # Rule-Based Models C5.0 can create an initial tree model then decompose the tree structure into a set of mutually exclusive rules. These rules can then be pruned and modified into a smaller set of _potentially_ overlapping rules. The rules can be created using the `rules` option: ```{r rule-mod} rule_mod <- C5.0(x = train_data[, vars], y = train_data$Status, rules = TRUE) rule_mod summary(rule_mod) ``` Note that no pruning was warranted for this model. There is no `plot` method for rule-based models. # Predictions The `predict` method can be used to get hard class predictions or class probability estimates (aka "confidence values" in documentation). ```{r pred} predict(rule_mod, newdata = test_data[1:3, vars]) predict(tree_boost, newdata = test_data[1:3, vars], type = "prob") ``` # Cost-Sensitive Models A cost-matrix can also be used to emphasize certain classes over others. For example, to get more of the "bad" samples correct: ```{r cost} cost_mat <- matrix(c(0, 2, 1, 0), nrow = 2) rownames(cost_mat) <- colnames(cost_mat) <- c("bad", "good") cost_mat cost_mod <- C5.0(x = train_data[, vars], y = train_data$Status, costs = cost_mat) summary(cost_mod) # more samples predicted as "bad" table(predict(cost_mod, test_data[, vars])) # that previously table(predict(tree_mod, test_data[, vars])) ```
/scratch/gouwar.j/cran-all/cranData/C50/inst/doc/C5.0.Rmd
## ---- echo = FALSE, results = "hide",message=FALSE,warning=FALSE-------------- library(C50) library(knitr) opts_chunk$set(comment = NA, digits = 3, prompt = TRUE, tidy = FALSE) ## ----------------------------------------------------------------------------- library(C50) mod <- C5.0(Species ~ ., data = iris) summary(mod) ## ----------------------------------------------------------------------------- predict(mod, iris[130,], type = "prob")
/scratch/gouwar.j/cran-all/cranData/C50/inst/doc/Class_Probability_Calcs.R
--- title: "Class Probability Calculations" vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Class Probability Calculations} output: knitr:::html_vignette: toc: yes --- ```{r, echo = FALSE, results = "hide",message=FALSE,warning=FALSE} library(C50) library(knitr) opts_chunk$set(comment = NA, digits = 3, prompt = TRUE, tidy = FALSE) ``` This document describes exactly how the model computes class probabilities using the data in the terminal nodes. Here is an example model using the iris data: ```{r} library(C50) mod <- C5.0(Species ~ ., data = iris) summary(mod) ``` Suppose that we are predicting the sample in row 130 with a petal length of `r iris[130,"Petal.Length"]` and a petal width of `r iris[130,"Petal.Width"]`. From this tree, the terminal node shows `virginica (6/2)` which means a predicted class of the virginica species with a probability of 4/6 = 0.66667. However, we get a different predicted probability: ```{r} predict(mod, iris[130,], type = "prob") ``` When we wanted to describe the technical aspects of the [C5.0](https://www.rulequest.com/see5-info.html) and [cubist](https://www.rulequest.com/cubist-info.html) models, the main source of information on these models was the raw C source code from the [RuleQuest website](https://www.rulequest.com/download.html). For many years, both of these models were proprietary commercial products and we only recently open-sourced. Our intuition is that Quinlan quietly evolved these models from the versions described in the most recent publications to what they are today. For example, it would not be unreasonable to assume that C5.0 uses [AdaBoost](https://en.wikipedia.org/wiki/AdaBoost). From the sources, a similar reweighting scheme is used but it does not appear to be the same. For classifying new samples, the C sources have ```c ClassNo PredictTreeClassify(DataRec Case, Tree DecisionTree){ ClassNo c, C; double Prior; /* Save total leaf count in ClassSum[0] */ ForEach(c, 0, MaxClass) { ClassSum[c] = 0; } PredictFindLeaf(Case, DecisionTree, Nil, 1.0); C = SelectClassGen(DecisionTree->Leaf, (Boolean)(MCost != Nil), ClassSum); /* Set all confidence values in ClassSum */ ForEach(c, 1, MaxClass){ Prior = DecisionTree->ClassDist[c] / DecisionTree->Cases; ClassSum[c] = (ClassSum[0] * ClassSum[c] + Prior) / (ClassSum[0] + 1); } Confidence = ClassSum[C]; return C; } ``` Here: * The predicted probability is the "confidence" value * The prior is the class probabilities from the training set. For the iris data, this value is 1/3 for each of the classes * The array `ClassSum` is the probabilities of each class in the terminal node although `ClassSum[0]` is the number of samples in the terminal node (which, if there are missing values, can be fractional). For sample 130, the virginica values are: ``` (ClassSum[0] * ClassSum[c] + Prior) / (ClassSum[0] + 1) = ( 6 * (4/6) + (1/3)) / ( 6 + 1) = 0.6190476 ``` Why is it doing this? This will tend to avoid class predictions that are absolute zero or one. Basically, it can be viewed to be _similar_ to how Bayesian methods operate where the simple probability estimates are "shrunken" towards the prior probabilities. Note that, as the number of samples in the terminal nodes (`ClassSum[0]`) becomes large, this operation has less effect on the final results. Suppose `ClassSum[0] = 10000`, then the predicted virginica probability would be 0.6663337, which is closer to the simple estimate. This is very much related to the [Laplace Correction](https://en.wikipedia.org/wiki/Additive_smoothing). Traditionally, we would add a value of one to the denominator of the simple estimate and add the number of classes to the bottom, resulting in `(4+1)/(6+3) = 0.5555556`. C5.0 is substituting the prior probabilities and their sum (always one) into this equation instead. To be fair, there are well known Bayesian estimates of the sample proportions under different prior distributions for the two class case. For example, if there were two classes, the estimate of the class probability under a uniform prior would be the same as the basic Laplace correction (using the integers and not the fractions). A more flexible Bayesian approach is the [Beta-Binomial model](https://en.wikipedia.org/wiki/Beta-binomial_distribution), which uses a Beta prior instead of the uniform. The downside here is that two extra parameters need to be estimated (and it only is defined for two classes)
/scratch/gouwar.j/cran-all/cranData/C50/inst/doc/Class_Probability_Calcs.Rmd