content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
DispConPwr <- function(gamma.theta.0, group1.name, group2.name) {
## Prints the calculated conditional power.
##
## Args:
## gamma.theta.0: Conditional power.
## group1.name: Name of group 1.
## group2.name: Name of group 2.
##
## Results:
## Returns the calculated conditional power.
cat("Conditional Power",
formatC(x = gamma.theta.0,
digits = 4,
format = "f"),
"\n\n")
message(paste("Note: Conditional power is calculated in view of the hazard ratio which is defined as the ratio of the hazard of group ",
group2.name,
" to the hazard of group ",
group1.name,
".",
sep = ""))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispConPwr.R
|
DispConPwrAll <- function(gamma.theta.0.exp,
gamma.theta.0.nonmix.exp,
gamma.theta.0.nonmix.wei,
gamma.theta.0.nonmix.gamma,
group1.name, group2.name) {
## Prints the calculated conditional power.
##
## Args:
## gamma.theta.0.exp: Conditional power within the exponential model.
## gamma.theta.0.nonmix.exp: Conditional power within the non-mixture model
## with exponential survival.
## gamma.theta.0.nonmix.wei: Conditional power within the non-mixture model
## with Weibull type survival.
## gamma.theta.0.nonmix.gamma: Conditional power within the non-mixture model
## with Gamma type survival.
## group1.name: Name of group 1.
## group2.name: Name of group 2.
##
## Results:
## Returns the calculated conditional power.
res <- data.frame(c(formatC(x = gamma.theta.0.exp,
digits = 4,
format = "f"),
formatC(x = gamma.theta.0.nonmix.exp,
digits = 4,
format = "f"),
formatC(x = gamma.theta.0.nonmix.wei,
digits = 4,
format = "f"),
formatC(x = gamma.theta.0.nonmix.gamma,
digits = 4,
format = "f")),
row.names = c("Exponential",
"Non-Mixture-Exponential",
"Non-Mixture-Weibull",
"Non-Mixture-Gamma"))
colnames(x = res) <- "Conditional Power"
cat("Conditional Power", "\n")
cat("-----------------", "\n")
print(x = res)
cat("\n")
message(paste("Note: Conditional power is calculated in view of the hazard ratio which is defined as the ratio of the hazard of group ",
group2.name,
" to the hazard of group ",
group1.name,
".",
sep = ""))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispConPwrAll.R
|
DispDataAll <- function(group1.name, group2.name,
interim.data1, interim.data2,
summary.exp,
summary.nonmix.exp,
summary.nonmix.wei,
summary.nonmix.gamma,
theta.0) {
## Displays the passed parameters.
##
## Args:
## Parameters from power calculations.
##
## Results:
## Displays the passed parameters.
# interim analysis
res1 <- data.frame(cbind(c(interim.data1[3],
interim.data1[1],
interim.data1[4],
floor(x = interim.data1[2])),
c(interim.data2[3],
interim.data2[1],
interim.data2[4],
floor(x = interim.data2[2]))),
row.names = c("Patients",
"Death Events",
"Censored",
"Person Months"))
colnames(res1) <- c(group1.name, group2.name)
# exponential model
res2 <- data.frame(cbind(c(formatC(x = summary.exp[1],
digits = 4,
format = "f"),
formatC(x = summary.exp[3],
digits = 4,
format = "f"),
formatC(x = summary.exp[5],
digits = 4,
format = "f"),
floor(x = summary.exp[7])),
c(formatC(x = summary.exp[2],
digits = 4,
format = "f"),
formatC(x = summary.exp[4],
digits = 4,
format = "f"),
formatC(x = summary.exp[6],
digits = 4,
format = "f"),
floor(x = summary.exp[8]))),
row.names = c("log(Likelihood)",
"AIC",
"lambda",
"Further Person Months"))
colnames(res2) <- c(group1.name, group2.name)
# non-mixture model with exponential survival
res3 <- data.frame(cbind(c(formatC(x = summary.nonmix.exp[1],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[3],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[5],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[6],
digits = 4,
format = "f"),
floor(x = summary.nonmix.exp[9])),
c(formatC(x = summary.nonmix.exp[2],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[4],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[7],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.exp[8],
digits = 4,
format = "f"),
floor(x = summary.nonmix.exp[10]))),
row.names = c("log(Likelihood)",
"AIC",
"lambda",
"c",
"Further Person Months"))
colnames(res3) <- c(group1.name, group2.name)
# non-mixture model with Weibull type survival
res4 <- data.frame(cbind(c(formatC(x = summary.nonmix.wei[1],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[3],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[5],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[6],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[7],
digits = 4,
format = "f"),
floor(x = summary.nonmix.wei[11])),
c(formatC(x = summary.nonmix.wei[2],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[4],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[8],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[9],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.wei[10],
digits = 4,
format = "f"),
floor(x = summary.nonmix.wei[12]))),
row.names = c("log(Likelihood)",
"AIC",
"lambda",
"k",
"c",
"Further Person Months"))
colnames(res4) <- c(group1.name, group2.name)
# non-mixture model with Gamma type survival
res5 <- data.frame(cbind(c(formatC(x = summary.nonmix.gamma[1],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[3],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[5],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[6],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[7],
digits = 4,
format = "f"),
floor(x = summary.nonmix.gamma[11])),
c(formatC(x = summary.nonmix.gamma[2],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[4],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[8],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[9],
digits = 4,
format = "f"),
formatC(x = summary.nonmix.gamma[10],
digits = 4,
format = "f"),
floor(x = summary.nonmix.gamma[12]))),
row.names = c("log(Likelihood)",
"AIC",
"a",
"b",
"c",
"Further Person Months"))
colnames(res5) <- c(group1.name, group2.name)
cat("Interim Analysis", "\n")
cat("----------------", "\n")
print(x = res1)
cat("\n")
cat("Postulated Hazard Ratio:", theta.0, "\n\n\n\n")
cat("Exponential", "\n")
cat("AIC =",
formatC(x = (summary.exp[3] + summary.exp[4]),
digits = 4,
format = "f"),
"\n")
cat("-----------", "\n")
print(x = res2)
cat("\n")
cat("Estimated Hazard Ratio:",
formatC(x = summary.exp[9],
digits = 4,
format = "f"),
"\n\n\n\n")
cat("Non-Mixture-Exponential", "\n")
cat("AIC =",
formatC(x = (summary.nonmix.exp[3] + summary.nonmix.exp[4]),
digits = 4,
format = "f"),
"\n")
cat("-----------------------", "\n")
print(x = res3)
cat("\n")
cat("Estimated Hazard Ratio:",
formatC(x = summary.nonmix.exp[11],
digits = 4,
format = "f"),
"\n\n\n\n")
cat("Non-Mixture-Weibull", "\n")
cat("AIC =",
formatC(x = (summary.nonmix.wei[3] + summary.nonmix.wei[4]),
digits = 4,
format = "f"),
"\n")
cat("-------------------", "\n")
print(x = res4)
cat("\n")
cat("Estimated Hazard Ratio:",
formatC(x = summary.nonmix.wei[13],
digits = 4,
format = "f"),
"\n\n\n\n")
cat("Non-Mixture-Gamma", "\n")
cat("AIC =",
formatC(x = (summary.nonmix.gamma[3] + summary.nonmix.gamma[4]),
digits = 4,
format = "f"),
"\n")
cat("-----------------", "\n")
print(x = res5)
cat("\n")
cat("Estimated Hazard Ratio:",
formatC(x = summary.nonmix.gamma[13],
digits = 4,
format = "f"),
"\n\n\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispDataAll.R
|
DispDataExp <- function(group1.name, n1, d1, n1.alive, o1, lambda1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda2.hat, O2.star,
theta.0, theta.hat) {
## Displays the passed parameters.
##
## Args:
## Parameters from exponential power calculations.
##
## Results:
## Displays the passed parameters.
res1 <- data.frame(cbind(c(n1,
d1,
n1.alive,
floor(x = o1),
formatC(x = lambda1.hat,
digits = 4,
format = "f"),
floor(x = O1.star)),
c(n2,
d2,
n2.alive,
floor(x = o2),
formatC(x = lambda2.hat,
digits = 4,
format = "f"),
floor(x = O2.star))),
row.names = c("Patients",
"Death Events",
"Censored",
"Person Months",
"Estimated Hazard",
"Further Person Months"))
colnames(res1) <- c(group1.name, group2.name)
res2 <- data.frame(c(formatC(x = theta.0,
digits = 4,
format = "f"),
formatC(x = theta.hat,
digits = 4,
format = "f")),
row.names = c("Postulated Hazard Ratio",
"Estimated Hazard Ratio"))
colnames(res2) <- ""
print(x = res1)
print(x = res2)
cat("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispDataExp.R
|
DispDataNonMixExp <- function(group1.name, n1, d1, n1.alive, o1, lambda1.hat, c1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda2.hat, c2.hat, O2.star,
theta.0, theta.hat) {
## Displays the passed parameters.
##
## Args:
## Parameters from non-mixture
## exponential power calculations.
##
## Results:
## Displays the passed parameters.
res1 <- data.frame(cbind(c(n1,
d1,
n1.alive,
floor(x = o1),
formatC(x = lambda1.hat,
digits = 4,
format = "f"),
formatC(x = c1.hat,
digits = 4,
format = "f"),
floor(x = O1.star)),
c(n2,
d2,
n2.alive,
floor(x = o2),
formatC(x = lambda2.hat,
digits = 4,
format = "f"),
formatC(x = c2.hat,
digits = 4,
format = "f"),
floor(x = O2.star))),
row.names = c("Patients",
"Death Events",
"Censored",
"Person Months",
"Estimated Parameter",
"Estimated Survival Fraction",
"Further Person Months"))
colnames(res1) <- c(group1.name, group2.name)
res2 <- data.frame(c(formatC(x = theta.0,
digits = 4,
format = "f"),
formatC(x = theta.hat,
digits = 4,
format = "f")),
row.names = c("Postulated Hazard Ratio",
"Estimated Hazard Ratio"))
colnames(res2) <- ""
print(x = res1)
print(x = res2)
cat("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispDataNonMixExp.R
|
DispDataNonMixGamma <- function(group1.name, n1, d1, n1.alive, o1, a1.hat, b1.hat, c1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, a2.hat, b2.hat, c2.hat, O2.star,
theta.0, theta.hat) {
## Displays the passed parameters.
##
## Args:
## Parameters from non-mixture
## Gamma type power calculations.
##
## Results:
## Displays the passed parameters.
res1 <- data.frame(cbind(c(n1,
d1,
n1.alive,
floor(x = o1),
formatC(x = a1.hat,
digits = 4,
format = "f"),
formatC(x = b1.hat,
digits = 4,
format = "f"),
formatC(x = c1.hat,
digits = 4,
format = "f"),
floor(x = O1.star)),
c(n2,
d2,
n2.alive,
floor(x = o2),
formatC(x = a2.hat,
digits = 4,
format = "f"),
formatC(x = b2.hat,
digits = 4,
format = "f"),
formatC(x = c2.hat,
digits = 4,
format = "f"),
floor(x = O2.star))),
row.names = c("Patients",
"Death Events",
"Censored",
"Person Months",
"Estimated Shape Parameter",
"Estimated Rate Parameter",
"Estimated Survival Fraction",
"Further Person Months"))
colnames(res1) <- c(group1.name, group2.name)
res2 <- data.frame(c(formatC(x = theta.0,
digits = 4,
format = "f"),
formatC(x = theta.hat,
digits = 4,
format = "f")),
row.names = c("Postulated Hazard Ratio",
"Estimated Hazard Ratio"))
colnames(res2) <- ""
print(x = res1)
print(x = res2)
cat("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispDataNonMixGamma.R
|
DispDataNonMixWei <- function(group1.name, n1, d1, n1.alive, o1, lambda1.hat, k1.hat, c1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda2.hat, k2.hat, c2.hat, O2.star,
theta.0, theta.hat) {
## Displays the passed parameters.
##
## Args:
## Parameters from non-mixture
## Weibull type power calculations.
##
## Results:
## Displays the passed parameters.
res1 <- data.frame(cbind(c(n1,
d1,
n1.alive,
floor(x = o1),
formatC(x = lambda1.hat,
digits = 4,
format = "f"),
formatC(x = k1.hat,
digits = 4,
format = "f"),
formatC(x = c1.hat,
digits = 4,
format = "f"),
floor(x = O1.star)),
c(n2,
d2,
n2.alive,
floor(x = o2),
formatC(x = lambda2.hat,
digits = 4,
format = "f"),
formatC(x = k2.hat,
digits = 4,
format = "f"),
formatC(x = c2.hat,
digits = 4,
format = "f"),
floor(x = O2.star))),
row.names = c("Patients",
"Death Events",
"Censored",
"Person Months",
"Estimated Scale Parameter",
"Estimated Shape Parameter",
"Estimated Survival Fraction",
"Further Person Months"))
colnames(res1) <- c(group1.name, group2.name)
res2 <- data.frame(c(formatC(x = theta.0,
digits = 4,
format = "f"),
formatC(x = theta.hat,
digits = 4,
format = "f")),
row.names = c("Postulated Hazard Ratio",
"Estimated Hazard Ratio"))
colnames(res2) <- ""
print(x = res1)
print(x = res2)
cat("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/DispDataNonMixWei.R
|
FctPersMonNonMixExp <- function(data, lambda.hat, group.name) {
## Calculates the values of some function of the person months
## in the non-mixture model with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0.
##
## Args:
## data: Data frame which consists of at least three columns for the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda.hat: Estimator of the parameter lambda.
## group.name: Name of the group.
##
## Results:
## Returns the value of some function of the person months.
# function of person months, and their verification
o.stroke <- sum(1 - exp(- lambda.hat * data[, 3]))
if (o.stroke <= 0) {
stop(paste("Number of person months in", group.name, "must be bigger than 0.",
call. = FALSE))
}
return(o.stroke)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/FctPersMonNonMixExp.R
|
FctPersMonNonMixGamma <- function(data, a.hat, b.hat, group.name) {
## Calculates the values of some function of the person months
## in the non-mixture model with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## with Gamma^(0) being the regularized incomplete Gamma function
## of the upper bound.
##
## Args:
## data: Data frame which consists of at least three columns for the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## a.hat: Estimator of the parameter lambda.
## b.hat: Estimator of the parameter k.
## group.name: Name of the group.
##
## Results:
## Returns the value of some function of the person months.
# function of person months, and their verification
o.stroke <- sum(stats::pgamma(q = data[, 3],
shape = a.hat,
rate = b.hat))
if (o.stroke <= 0) {
stop(paste("Number of person months in", group.name, "must be bigger than 0.",
call. = FALSE))
}
return(o.stroke)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/FctPersMonNonMixGamma.R
|
FctPersMonNonMixWei <- function(data, lambda.hat, k.hat, group.name) {
## Calculates the values of some function of the patient time
## in the non-mixture model with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0.
##
## Args:
## data: Data frame which consists of at least three columns for the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda.hat: Estimator of the parameter lambda.
## k.hat: Estimator of the parameter k.
## group.name: Name of the group.
##
## Results:
## Returns the value of some function of the patient time.
# function of patient time, and their verification
o.stroke <- sum(1 - exp(- lambda.hat * data[, 3]^k.hat))
if (o.stroke <= 0) {
stop(paste("Number of person months in", group.name, "must be bigger than 0.",
call. = FALSE))
}
return(o.stroke)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/FctPersMonNonMixWei.R
|
GenerateDataFrame <- function() {
## Generates a data frame for testing conditional power calculations
## where the data is created by random.
## The data frame consists of three columns:
## First = group (two different expressions: 'A' and 'B')
## Second = status (1 = event, 0 = censored)
## Third = event time
##
## Args:
## None
##
## Results:
## Returns data frame for testing conditional power calculations.
# number of patients (circa 200)
n <- stats::rpois(n = 1,
lambda = 200)
# initialization of group, status and event time vectors
group <- vector(mode = "character",
length = 0)
stat <- vector(mode = "numeric",
length = 0)
time <- vector(mode = "numeric",
length = 0)
# probability of being censored for each patient of group A
# respectively group B between 0.4 and 0.6
# to avoid extreme (implausible) values
cens.prob.A <- stats::runif(n = 1,
min = 0.4,
max = 0.6)
cens.prob.B <- stats::runif(n = 1,
min = 0.4,
max = 0.6)
# generating data rowwise
# by choice of group with probability 0.5
for (i in 1 : n) {
if (stats::rbinom(n = 1,
size = 1,
prob = 0.5) == 1) {
group <- c(group, "A")
stat <- c(stat, stats::rbinom(n = 1,
size = 1,
prob = (1 - cens.prob.A)))
time <- c(time, (0.01 + round(x = stats::rexp(n = 1,
rate = (1 - cens.prob.A)),
digits = 2)))
}
else {
group <- c(group, "B")
stat <- c(stat, stats::rbinom(n = 1,
size = 1,
prob = (1 - cens.prob.B)))
time <- c(time, (0.01 + round(x = stats::rexp(n = 1,
rate = (1 - cens.prob.B)),
digits = 2)))
}
}
# data frame
data <- data.frame(group, stat, time)
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/GenerateDataFrame.R
|
InitValLikelihoodNonMixExp <- function(data) {
## Calculates initial values for the maximum likelihood estimation
## in the non-mixure model with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0,
## via maximum likelihood estimation of lambda as the parameter
## in the simple exponential model (taking into account only
## the patients who died) and using the relative frequency of survival
## for the estimation of c.
##
## Args:
## df: Data frame which consists of at least three columns
## with the group in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
##
## Results:
## Returns calculated initial values for the maximum likelihood estimation.
# auxiliary variables
d <- sum(data[, 2])
o.d <- sum(data[, 3][data[, 2] == 1])
n.alive <- sum(1 - data[, 2])
n <- length(x = data[, 1])
# initial values...
lambda.0 <- d / o.d
c.0 <- n.alive / n
# ...and if applicable projection into feasible region
eps <- 1e-6
if (lambda.0 < eps) {
lambda.0 <- eps
}
if (c.0 < eps) {
c.0 <- eps
}
if (c.0 > 1 - eps) {
c.0 <- 1 - eps
}
return(c(lambda.0, c.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/InitValLikelihoodNonMixExp.R
|
InitValLikelihoodNonMixGamma <- function(data) {
## Calculates initial values for the maximum likelihood estimation
## in the non-mixure model with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## via method of moments for a and b as the parameters
## in the simple Gamma model (taking into account only
## the patiens who died) and using the relative frequency of survival
## for the estimation of c.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
##
## Results:
## Returns calculated initial values for the maximum likelihood estimation.
# auxiliary variables
t <- data[, 3][data[, 2] == 1]
n.d <- length(x = t)
m1 <- sum(t) / n.d
m2 <- sum(t^2) / n.d
n.alive <- sum(1 - data[, 2])
n <- length(x = data[, 1])
# initial values...
b.0 <- m1 / (m2 - m1^2)
a.0 <- m1 * b.0
c.0 <- n.alive / n
# ...and if applicable projection into feasible region
eps <- 1e-6
if (a.0 < eps) {
a.0 <- eps
}
if (b.0 < eps) {
b.0 <- eps
}
if (c.0 < eps) {
c.0 <- eps
}
if (c.0 > 1 - eps) {
c.0 <- 1 - eps
}
return(c(a.0, b.0, c.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/InitValLikelihoodNonMixGamma.R
|
InitValLikelihoodNonMixWei <- function(data) {
## Calculates initial values for the maximum likelihood estimation
## in the non-mixure model with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0
## via least square method for lambda and k as the parameters
## in the simple Weibull model (taking into account only
## the patiens who died) and using the relative frequency of survival
## for the estimation of c.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
##
## Results:
## Returns calculated initial values for the maximum likelihood estimation.
# auxiliary variables
t <- data[, 3][data[, 2] == 1]
sort.t <- sort(x = t)
log.sort.t <- log(sort.t)
n.d <- length(x = t)
i <- 1 : n.d
F <- (i - 0.3) / (n.d + 0.4)
log.minus.log.S <- log(- log(1 - F))
# parameters from least square method
b <- ((sum(log.sort.t * log.minus.log.S) - sum(log.sort.t) * sum(log.minus.log.S) / n.d)
/ (sum(log.sort.t^2) - sum(log.sort.t)^2 / n.d))
a <- sum(log.minus.log.S) / n.d - b * sum(log.sort.t) / n.d
# further auxiliary variables
n.alive <- sum(1 - data[, 2])
n <- length(x = data[, 1])
# initial values...
lambda.0 <- exp(a)
k.0 <- b
c.0 <- n.alive / n
# ...and if applicable projection into feasible region
eps <- 1e-6
if (lambda.0 < eps) {
lambda.0 <- eps
}
if (k.0 < eps) {
k.0 <- eps
}
if (c.0 < eps) {
c.0 <- eps
}
if (c.0 > 1 - eps) {
c.0 <- 1 - eps
}
return(c(lambda.0, k.0, c.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/InitValLikelihoodNonMixWei.R
|
InterimData <- function(data, group.name) {
## Calculates number of death events, person months, number of patients
## and number of patients still alive out of the passed data frame.
##
## Args:
## data: Data frame which consists of at least three columns for the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## group.name: Name of the group.
##
## Results:
## Returns number of death events, person months, number of patients
## and number of patients still alive.
# number of death events, and its verification
d <- sum(data[, 2])
if (d < 0) {
stop(paste("Number of death events in", group.name, "must be bigger than or equal to 0.",
call. = FALSE))
}
# person months, and its verification
o <- sum(data[, 3])
if (o <= 0) {
stop(paste("Number of person months in", group.name, "must be bigger than 0.",
call. = FALSE))
}
# number of patients at the beginning
n <- length(data[, 1])
# number of patients still alive
n.alive <- sum(1 - data[, 2])
return(c(d, o, n, n.alive))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/InterimData.R
|
IsValid <- function(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km) {
## Checks the passed parameters.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2).
## theta.0: Originally postulated clinically relevant difference (hazard ratio).
## alpha: Significance level for conditional power calculations.
## disp.data: Logical value indicating if all calculated data should be displayed.
## plot.km: Logical value indicating if Kaplan-Meier curves should be plotted.
##
## Returns:
## Stops calculations if parameters are invalid, otherwise no abort.
# check of data
if (length(x = data) < 3) {
stop("data must have three or more columns.",
call. = FALSE)
}
if (length(x = data[, 1]) <= 0) {
stop("Columns of data must have dimension bigger than 0.",
call. = FALSE)
}
if (length(x = levels(x = as.factor(x = data[, 1]))) != 2) {
stop("First column of data must consist of exactly two different groups.",
call. = FALSE)
}
if (is.numeric(x = data[, 2]) == FALSE) {
stop("Entries in the second column must be numerical.",
call. = FALSE)
}
length <- length(x = unique(x = data[, 2]))
if (length != 2 &&
length != 1) {
stop("Second column of data must consist of one or two different values.",
call. = FALSE)
}
unique <- unique(x = data[, 2])
if (length == 2) {
if (sort(x = unique)[1] != 0 ||
sort(x = unique)[2] != 1) {
stop("Entries in the second column must be 0 and 1.",
call. = FALSE)
}
}
else { # length == 1
if (unique != 1) {
stop("Entries in the second column must be 1 (and 0).",
call. = FALSE)
}
}
data0 <- split(x = data,
f = data[, 1])
if (sum(data0[[1]][, 2]) <= 0 ||
sum(data0[[2]][, 2]) <= 0) {
stop("There must be minimum one event in each group.",
call. = FALSE)
}
if (is.numeric(x = data[, 3]) == FALSE) {
stop("Entries in the third column of data must be numerical.",
call. = FALSE)
}
if (min(sort(x = data[, 3])) <= 0) {
stop("Entries in the third column must be bigger than 0.",
call. = FALSE)
}
# check of cont.time
if (length(x = cont.time) != 1) {
stop("cont.time must be a single value.",
call. = FALSE)
}
if (is.numeric(x = cont.time) == FALSE) {
stop("cont.time must be numerical.",
call. = FALSE)
}
if (cont.time < 0) {
stop("cont.time must be bigger than or equal to 0.",
call. = FALSE)
}
# check of new.pat
if (length(x = new.pat) != 2) {
stop("new.pat must be a two-dimensional vector.",
call. = FALSE)
}
if (is.numeric(x = new.pat) == FALSE) {
stop("Entries in new.pat must be numerical.",
call. = FALSE)
}
if (new.pat[1] < 0 ||
new.pat[2] < 0) {
stop("Entries in new.pat must be bigger than or equal to 0.",
call. = FALSE)
}
# check of theta.0
if (length(x = theta.0) != 1) {
stop("theta.0 must be a single value.",
call. = FALSE)
}
if (is.numeric(x = theta.0) == FALSE) {
stop("theta.0 must be numerical.",
call. = FALSE)
}
if (theta.0 <= 0) {
stop("theta.0 must be bigger than 0.",
call. = FALSE)
}
# check of alpha
if (length(x = alpha) != 1) {
stop("alpha must be a single value.",
call. = FALSE)
}
if (is.numeric(x = alpha) == FALSE) {
stop("alpha must be numerical.",
call. = FALSE)
}
if (alpha < 0 ||
alpha > 1) {
stop("alpha must be choosen between 0 and 1, respectively equal to 0 or 1.",
call. = FALSE)
}
# check of disp.data
if (length(x = disp.data) != 1) {
stop("disp.data must be a single value.",
call. = FALSE)
}
if (is.logical(x = disp.data) == FALSE) {
stop("disp.data must be logical.",
call. = FALSE)
}
# check of plot.km
if (length(x = plot.km) != 1) {
stop("plot.km must be a single value.",
call. = FALSE)
}
if (is.logical(x = plot.km) == FALSE) {
stop("plot.km must be logical.",
call. = FALSE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/IsValid.R
|
LikelihoodNonMixExp <- function(data1, data2, data,
lambda1.0, c1.0,
lambda2.0, c2.0,
lambda.0, c.0) {
## Calculates the maximum likelihood estimators
## of the parameters lambda and c in the non-mixture model
## with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0.
##
## Args:
## data1: Data frame which consists of at least three columns
## with the group in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns
## with the group in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data: Data frame which consists of at least three columns
## with the group in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda1.0: Initial value for the estimate of the parameter lambda in group 1.
## c1.0: Initial value for the estimate of the parameter c in group 1.
## lambda2.0: Initial value for the estimate of the parameter lambda in group 2.
## c2.0: Initial value for the estimate of the parameter c in group 2.
## lambda.0: Initial value for the estimate of the parameter lambda for all data.
## c.0: Initial value for the estimate of the parameter c for all data.
##
## Returns:
## Returns the maximum likelihood estimates for the parameters
## lambda and c, i. e. estimates within the ordinary model
## and estimates under the proportional hazard assumption.
# GROUP 1 (data1) -> lambda1.hat, c1.hat
# log-likelihood function of parameters lambda and c for data of group 1
l1 <- function(x) {
(log(x[1]) * sum(data1[, 2])
- x[1] * sum(data1[, 2] * data1[, 3])
+ log(- log(x[2])) * sum(data1[, 2])
+ log(x[2]) * sum(1 - exp(- x[1] * data1[, 3])))
}
# gradient of log-likelihood function
l1Grad <- function(x) {
c(1 / x[1] * sum(data1[, 2])
- sum(data1[, 2] * data1[, 3])
+ log(x[2]) * sum(data1[, 3] * exp(- x[1] * data1[, 3])),
1 / (x[2] * log(x[2])) * sum(data1[, 2])
+ 1 / x[2] * sum(1 - exp(- x[1] * data1[, 3])))
}
# constraints for parameters lambda and c for data of group 1
eps <- 1e-7
A1 <- matrix(data = c(1, 0,
0, 1,
0, - 1),
nrow = 3,
ncol = 2,
byrow = TRUE)
b1 <- c(eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda and c for data of group 1
res1 <- stats::constrOptim(theta = c(lambda1.0, c1.0),
f = l1,
grad = l1Grad,
ui = A1,
ci = b1,
control = list(fnscale = -1))
# GROUP 2 (data2) -> lambda2.hat, c2.hat
# log-likelihood function of parameters lambda and c for data of group 2
l2 <- function(x) {
(log(x[1]) * sum(data2[, 2])
- x[1] * sum(data2[, 2] * data2[, 3])
+ log(- log(x[2])) * sum(data2[, 2])
+ log(x[2]) * sum(1 - exp(- x[1] * data2[, 3])))
}
# gradient of log-likelihood function
l2Grad <- function(x) {
c(1 / x[1] * sum(data2[, 2])
- sum(data2[, 2] * data2[, 3])
+ log(x[2]) * sum(data2[, 3] * exp(- x[1] * data2[, 3])),
1 / (x[2] * log(x[2])) * sum(data2[, 2])
+ 1 / x[2] * sum(1 - exp(- x[1] * data2[, 3])))
}
# constraints for parameters lambda and c for data of group 2
eps <- 1e-7
A2 <- matrix(data = c(1, 0,
0, 1,
0, - 1),
nrow = 3,
ncol = 2,
byrow = TRUE)
b2 <- c(eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda and c for data of group 2
res2 <- stats::constrOptim(theta = c(lambda2.0, c2.0),
f = l2,
grad = l2Grad,
ui = A2,
ci = b2,
control = list(fnscale = -1))
# ALL DATA (data) -> lambda.hat
# log-likelihood function of parameters lambda and c for all data
lAll <- function(x) {
(log(x[1]) * sum(data[, 2])
- x[1] * sum(data[, 2] * data[, 3])
+ log(- log(x[2])) * sum(data[, 2])
+ log(x[2]) * sum(1 - exp(- x[1] * data[, 3])))
}
# gradient of log-likelihood function
lAllGrad <- function(x) {
c(1 / x[1] * sum(data[, 2])
- sum(data[, 2] * data[, 3])
+ log(x[2]) * sum(data[, 3] * exp(- x[1] * data[, 3])),
1 / (x[2] * log(x[2])) * sum(data[, 2])
+ 1 / x[2] * sum(1 - exp(- x[1] * data[, 3])))
}
# constraints for parameters lambda and c for all data
eps <- 1e-7
AAll <- matrix(data = c(1, 0,
0, 1,
0, - 1),
nrow = 3,
ncol = 2,
byrow = TRUE)
bAll <- c(eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda and c for all data
resAll <- stats::constrOptim(theta = c(lambda.0, c.0),
f = lAll,
grad = lAllGrad,
ui = AAll,
ci = bAll,
control = list(fnscale = -1))
lambda.hat <- resAll$par[1]
# GROUP 1 (data1) with fixed lambda -> c1.cond.hat
# log-likelihood function of parameter c for data of group 1
l1.cond <- function(x) {
(log(lambda.hat) * sum(data1[, 2])
- lambda.hat * sum(data1[, 2] * data1[, 3])
+ log(- log(x)) * sum(data1[, 2])
+ log(x) * sum(1 - exp(- lambda.hat * data1[, 3])))
}
# gradient of log-likelihood function
l1Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data1[, 2])
+ 1 / x * sum(1 - exp(- lambda.hat * data1[, 3])))
}
# constraints for parameter c for data of group 1
eps <- 1e-7
A1.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b1.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 1
res1.cond <- stats::constrOptim(theta = c1.0,
f = l1.cond,
grad = l1Grad.cond,
ui = A1.cond,
ci = b1.cond,
control = list(fnscale = -1))
# GROUP 2 (data2) with fixed lambda -> c2.cond.hat
# log-likelihood function of parameter c for data of group 2
l2.cond <- function(x) {
(log(lambda.hat) * sum(data2[, 2])
- lambda.hat * sum(data2[, 2] * data2[, 3])
+ log(- log(x)) * sum(data2[, 2])
+ log(x) * sum(1 - exp(- lambda.hat * data2[, 3])))
}
# gradient of log-likelihood function
l2Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data2[, 2])
+ 1 / x * sum(1 - exp(- lambda.hat * data2[, 3])))
}
# constraints for parameter c for data of group 2
eps <- 1e-7
A2.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b2.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 2
res2.cond <- stats::constrOptim(theta = c2.0,
f = l2.cond,
grad = l2Grad.cond,
ui = A2.cond,
ci = b2.cond,
control = list(fnscale = -1))
return(c(res1$par[1], res1$par[2],
res2$par[1], res2$par[2],
resAll$par[1],
res1.cond$par,
res2.cond$par,
res1.cond$value,
res2.cond$value))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/LikelihoodNonMixExp.R
|
LikelihoodNonMixGamma <- function(data1, data2, data,
a1.0, b1.0, c1.0,
a2.0, b2.0, c2.0,
a.0, b.0, c.0) {
## Calculates the maximum likelihood estimators
## of the parameters a, b and c in the non-mixture model
## with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## with Gamma^(0) being the regularized incomplete Gamma function
## of the upper bound.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data: Data frame which consists of at least three columns with the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## a1.0: Initial value for the estimate of the parameter a in group 1.
## b1.0: Initial value for the estimate of the parameter b in group 1.
## c1.0: Initial value for the estimate of the parameter c in group 1.
## a2.0: Initial value for the estimate of the parameter a in group 2.
## b2.0: Initial value for the estimate of the parameter b in group 2.
## c2.0: Initial value for the estimate of the parameter c in group 2.
## a.0: Initial value for the estimate of the parameter a for all data.
## b.0: Initial value for the estimate of the parameter b for all data.
## c.0: Initial value for the estimate of the parameter c for all data.
##
## Returns:
## Returns the maximum likelihood estimates for the parameters
## a, b and c, i. e. estimates within the ordinary model
## and estimates under the proportional hazard assumption.
# GROUP 1 (data1) -> a1.hat, b1.hat, c1.hat
# log-likelihood function of parameters a, b and c for data of group 1
l1 <- function(x) {
(x[1] * log(x[2]) * sum(data1[, 2])
- lgamma(x = x[1]) * sum(data1[, 2])
+ (x[1] - 1) * sum(data1[, 2] * log(data1[, 3]))
- x[2] * sum(data1[, 2] * data1[, 3])
+ log(- log(x[3])) * sum(data1[, 2])
+ log(x[3]) * sum(stats::pgamma(q = data1[, 3],
shape = x[1],
rate = x[2])))
}
# auxiliary function
Integrand1 <- function(x, par) {
log(x) * x^(par - 1) * exp(-x)
}
# gradient of log-likelihood function
l1Grad <- function(x) {
c(log(x[2]) * sum(data1[, 2])
- digamma(x = x[1]) * sum(data1[, 2])
+ sum(data1[, 2] * log(data1[, 3]))
+ log(x[3]) * sum((gamma(x = x[1])
* sapply(data1[, 3], function(t) {
stats::integrate(f = Integrand1,
lower = 0,
upper = x[2] * t,
par = x[1])$value
})
- gamma(x = x[1]) * stats::pgamma(q = data1[, 3],
shape = x[1],
rate = x[2])
* stats::integrate(f = Integrand1,
lower = 0,
upper = Inf,
par = x[1])$value)
/ gamma(x = x[1])^2),
x[1] / x[2] * sum(data1[, 2])
- sum(data1[, 2] * data1[, 3])
+ log(x[3]) * sum((x[2] * data1[, 3])^(x[1] - 1) * exp(- x[2] * data1[, 3]) * data1[, 3]
/ gamma(x = x[1])),
1 / (x[3] * log(x[3])) * sum(data1[, 2])
+ 1 / x[3] * sum(stats::pgamma(q = data1[, 3],
shape = x[1],
rate = x[2])))
}
# constraints for parameters a, b and c for data of group 1
eps <- 1e-7
A1 <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
b1 <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters a, b and c for data of group 1
res1 <- stats::constrOptim(theta = c(a1.0, b1.0, c1.0),
f = l1,
grad = l1Grad,
ui = A1,
ci = b1,
control = list(fnscale = -1))
# GROUP 2 (data2) -> a2.hat, b2.hat, c2.hat
# log-likelihood function of parameters a, b and c for data of group 2
l2 <- function(x) {
(x[1] * log(x[2]) * sum(data2[, 2])
- lgamma(x = x[1]) * sum(data2[, 2])
+ (x[1] - 1) * sum(data2[, 2] * log(data2[, 3]))
- x[2] * sum(data2[, 2] * data2[, 3])
+ log(- log(x[3])) * sum(data2[, 2])
+ log(x[3]) * sum(stats::pgamma(q = data2[, 3],
shape = x[1],
rate = x[2])))
}
# auxiliary function
Integrand2 <- function(x, par) {
log(x) * x^(par - 1) * exp(-x)
}
# gradient of log-likelihood function
l2Grad <- function(x) {
c(log(x[2]) * sum(data2[, 2])
- digamma(x = x[1]) * sum(data2[, 2])
+ sum(data2[, 2] * log(data2[, 3]))
+ log(x[3]) * sum((gamma(x = x[1])
* sapply(data2[, 3], function(t) {
stats::integrate(f = Integrand2,
lower = 0,
upper = x[2] * t,
par = x[1])$value
})
- gamma(x = x[1]) * stats::pgamma(q = data2[, 3],
shape = x[1],
rate = x[2])
* stats::integrate(f = Integrand2,
lower = 0,
upper = Inf,
par = x[1])$value)
/ gamma(x = x[1])^2),
x[1] / x[2] * sum(data2[, 2])
- sum(data2[, 2] * data2[, 3])
+ log(x[3]) * sum((x[2] * data2[, 3])^(x[1] - 1) * exp(- x[2] * data2[, 3]) * data2[, 3]
/ gamma(x = x[1])),
1 / (x[3] * log(x[3])) * sum(data2[, 2])
+ 1 / x[3] * sum(stats::pgamma(q = data2[, 3],
shape = x[1],
rate = x[2])))
}
# constraints for parameters a, b and c for data of group 2
eps <- 1e-7
A2 <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
b2 <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters a, b and c for data of group 2
res2 <- stats::constrOptim(theta = c(a2.0, b2.0, c2.0),
f = l2,
grad = l2Grad,
ui = A2,
ci = b2,
control = list(fnscale = -1))
# ALL DATA (data) -> a.hat, b.hat
# log-likelihood function of parameters a, b and c for all data
lAll <- function(x) {
(x[1] * log(x[2]) * sum(data[, 2])
- lgamma(x = x[1]) * sum(data[, 2])
+ (x[1] - 1) * sum(data[, 2] * log(data[, 3]))
- x[2] * sum(data[, 2] * data[, 3])
+ log(- log(x[3])) * sum(data[, 2])
+ log(x[3]) * sum(stats::pgamma(q = data[, 3],
shape = x[1],
rate = x[2])))
}
# auxiliary function
IntegrandAll <- function(x, par) {
log(x) * x^(par - 1) * exp(-x)
}
# gradient of log-likelihood function
lAllGrad <- function(x) {
c(log(x[2]) * sum(data[, 2])
- digamma(x = x[1]) * sum(data[, 2])
+ sum(data[, 2] * log(data[, 3]))
+ log(x[3]) * sum((gamma(x = x[1])
* sapply(data[, 3], function(t) {
stats::integrate(f = IntegrandAll,
lower = 0,
upper = x[2] * t,
par = x[1])$value
})
- gamma(x = x[1]) * stats::pgamma(q = data[, 3],
shape = x[1],
rate = x[2])
* stats::integrate(f = IntegrandAll,
lower = 0,
upper = Inf,
par = x[1])$value)
/ gamma(x = x[1])^2),
x[1] / x[2] * sum(data[, 2])
- sum(data[, 2] * data[, 3])
+ log(x[3]) * sum((x[2] * data[, 3])^(x[1] - 1) * exp(- x[2] * data[, 3]) * data[, 3]
/ gamma(x = x[1])),
1 / (x[3] * log(x[3])) * sum(data[, 2])
+ 1 / x[3] * sum(stats::pgamma(q = data[, 3],
shape = x[1],
rate = x[2])))
}
# constraints for parameters a, b and c for all data
eps <- 1e-7
AAll <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
bAll <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters a, b and c for all data
resAll <- stats::constrOptim(theta = c(a.0, b.0, c.0),
f = lAll,
grad = lAllGrad,
ui = AAll,
ci = bAll,
control = list(fnscale = -1))
a.hat <- resAll$par[1]
b.hat <- resAll$par[2]
# GROUP 1 (data1) with fixed a and b -> c1.cond.hat
# log-likelihood function of parameter c for data of group 1
l1.cond <- function(x) {
(a.hat * log(b.hat) * sum(data1[, 2])
- lgamma(x = a.hat) * sum(data1[, 2])
+ (a.hat - 1) * sum(data1[, 2] * log(data1[, 3]))
- b.hat * sum(data1[, 2] * data1[, 3])
+ log(- log(x)) * sum(data1[, 2])
+ log(x) * sum(stats::pgamma(q = data1[, 3],
shape = a.hat,
rate = b.hat)))
}
# auxiliary function
Integrand1.cond <- function(x, par) {
log(x) * x^(par - 1) * exp(-x)
}
# gradient of log-likelihood function
l1Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data1[, 2])
+ 1 / x * sum(stats::pgamma(q = data1[, 3],
shape = a.hat,
rate = b.hat)))
}
# constraints for parameter c for data of group 1
eps <- 1e-7
A1.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b1.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 1
res1.cond <- stats::constrOptim(theta = c1.0,
f = l1.cond,
grad = l1Grad.cond,
ui = A1.cond,
ci = b1.cond,
control = list(fnscale = -1))
# GROUP 2 (data2) with fixed a and b -> c2.cond.hat
# log-likelihood function of parameter c for data of group 2
l2.cond <- function(x) {
(a.hat * log(b.hat) * sum(data2[, 2])
- lgamma(x = a.hat) * sum(data2[, 2])
+ (a.hat - 1) * sum(data2[, 2] * log(data2[, 3]))
- b.hat * sum(data2[, 2] * data2[, 3])
+ log(- log(x)) * sum(data2[, 2])
+ log(x) * sum(stats::pgamma(q = data2[, 3],
shape = a.hat,
rate = b.hat)))
}
# auxiliary function
Integrand2.cond <- function(x, par) {
log(x) * x^(par - 1) * exp(-x)
}
# gradient of log-likelihood function
l2Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data2[, 2])
+ 1 / x * sum(stats::pgamma(q = data2[, 3],
shape = a.hat,
rate = b.hat)))
}
# constraints for parameter c for data of group 2
eps <- 1e-7
A2.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b2.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 2
res2.cond <- stats::constrOptim(theta = c2.0,
f = l2.cond,
grad = l2Grad.cond,
ui = A2.cond,
ci = b2.cond,
control = list(fnscale = -1))
return(c(res1$par[1], res1$par[2], res1$par[3],
res2$par[1], res2$par[2], res2$par[3],
resAll$par[1], resAll$par[2],
res1.cond$par,
res2.cond$par,
res1.cond$value,
res2.cond$value))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/LikelihoodNonMixGamma.R
|
LikelihoodNonMixWei <- function(data1, data2, data,
lambda1.0, k1.0, c1.0,
lambda2.0, k2.0, c2.0,
lambda.0, k.0, c.0) {
## Calculates the maximum likelihood estimators
## of the parameters lambda, k and c in the non-mixture model
## with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data: Data frame which consists of at least three columns with the group
## in the first (all of the same group),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda1.0: Initial value for the estimate of the parameter lambda in group 1.
## k1.0: Initial value for the estimate of the parameter k in group 1.
## c1.0: Initial value for the estimate of the parameter c in group 1.
## lambda2.0: Initial value for the estimate of the parameter lambda in group 2.
## k2.0: Initial value for the estimate of the parameter k in group 2.
## c2.0: Initial value for the estimate of the parameter c in group 2.
## lambda.0: Initial value for the estimate of the parameter lambda for all data.
## k.0: Initial value for the estimate of the parameter k for all data.
## c.0: Initial value for the estimate of the parameter c for all data.
##
## Returns:
## Returns the maximum likelihood estimates for the parameters
## lambda, k and c, i. e. estimates within the ordinary model
## and estimates under the proportional hazard assumption.
# GROUP 1 (data1) -> lambda1.hat, k1.hat, c1.hat
# log-likelihood function of parameters lambda, k and c for data of group 1
l1 <- function(x) {
(log(x[1]) * sum(data1[, 2])
+ log(x[2]) * sum(data1[, 2])
+ (x[2] - 1) * sum(data1[, 2] * log(data1[, 3]))
- x[1] * sum(data1[, 2] * data1[, 3]^x[2])
+ log(- log(x[3])) * sum(data1[, 2])
+ log(x[3]) * sum(1 - exp(- x[1] * data1[, 3]^x[2])))
}
# gradient of log-likelihood function
l1Grad <- function(x) {
c(1 / x[1] * sum(data1[, 2])
- sum(data1[, 2] * data1[, 3]^x[2])
+ log(x[3]) * sum(data1[, 3]^x[2] * exp(- x[1] * data1[, 3]^x[2])),
1 / x[2] * sum(data1[, 2])
+ sum(data1[, 2] * log(data1[, 3]))
- x[1] * sum(data1[, 2] * data1[, 3]^x[2] * log(data1[, 3]))
+ x[1] * log(x[3]) * sum(exp(- x[1] * data1[, 3]^x[2]) * data1[, 3]^x[2] * log(data1[, 3])),
1 / (x[3] * log(x[3])) * sum(data1[, 2])
+ 1 / x[3] * sum(1 - exp(- x[1] * data1[, 3]^x[2])))
}
# constraints for parameters lambda, k and c for data of group 1
eps <- 1e-7
A1 <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
b1 <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda, k and c for data of group 1
res1 <- stats::constrOptim(theta = c(lambda1.0, k1.0, c1.0),
f = l1,
grad = l1Grad,
ui = A1,
ci = b1,
control = list(fnscale = -1))
# GROUP 2 (data2) -> lambda2.hat, k2.hat, c2.hat
# log-likelihood function of parameters lambda, k and c for data of group 2
l2 <- function(x) {
(log(x[1]) * sum(data2[, 2])
+ log(x[2]) * sum(data2[, 2])
+ (x[2] - 1) * sum(data2[, 2] * log(data2[, 3]))
- x[1] * sum(data2[, 2] * data2[, 3]^x[2])
+ log(- log(x[3])) * sum(data2[, 2])
+ log(x[3]) * sum(1 - exp(- x[1] * data2[, 3]^x[2])))
}
# gradient of log-likelihood function
l2Grad <- function(x) {
c(1 / x[1] * sum(data2[, 2])
- sum(data2[, 2] * data2[, 3]^x[2])
+ log(x[3]) * sum(data2[, 3]^x[2] * exp(- x[1] * data2[, 3]^x[2])),
1 / x[2] * sum(data2[, 2])
+ sum(data2[, 2] * log(data2[, 3]))
- x[1] * sum(data2[, 2] * data2[, 3]^x[2] * log(data2[, 3]))
+ x[1] * log(x[3]) * sum(exp(- x[1] * data2[, 3]^x[2]) * data2[, 3]^x[2] * log(data2[, 3])),
1 / (x[3] * log(x[3])) * sum(data2[, 2])
+ 1 / x[3] * sum(1 - exp(- x[1] * data2[, 3]^x[2])))
}
# constraints for parameters lambda, k and c for data of group 2
eps <- 1e-7
A2 <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
b2 <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda, k and c for data of group 2
res2 <- stats::constrOptim(theta = c(lambda2.0, k2.0, c2.0),
f = l2,
grad = l2Grad,
ui = A2,
ci = b2,
control = list(fnscale = -1))
# ALL DATA (data) -> lambda.hat, k.hat
# log-likelihood function of parameters lambda, k and c for all data
lAll <- function(x) {
(log(x[1]) * sum(data[, 2])
+ log(x[2]) * sum(data[, 2])
+ (x[2] - 1) * sum(data[, 2] * log(data[, 3]))
- x[1] * sum(data[, 2] * data[, 3]^x[2])
+ log(- log(x[3])) * sum(data[, 2])
+ log(x[3]) * sum(1 - exp(- x[1] * data[, 3]^x[2])))
}
# gradient of log-likelihood function
lAllGrad <- function(x) {
c(1 / x[1] * sum(data[, 2])
- sum(data[, 2] * data[, 3]^x[2])
+ log(x[3]) * sum(data[, 3]^x[2] * exp(- x[1] * data[, 3]^x[2])),
1 / x[2] * sum(data[, 2])
+ sum(data[, 2] * log(data[, 3]))
- x[1] * sum(data[, 2] * data[, 3]^x[2] * log(data[, 3]))
+ x[1] * log(x[3]) * sum(exp(- x[1] * data[, 3]^x[2]) * data[, 3]^x[2] * log(data[, 3])),
1 / (x[3] * log(x[3])) * sum(data[, 2])
+ 1 / x[3] * sum(1 - exp(- x[1] * data[, 3]^x[2])))
}
# constraints for parameters lambda, k and c for all data
eps <- 1e-7
AAll <- matrix(data = c(1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, - 1),
nrow = 4,
ncol = 3,
byrow = TRUE)
bAll <- c(eps, eps, eps, - (1 - eps))
# maximum likelihood estimators for parameters lambda, k and c for all data
resAll <- stats::constrOptim(theta = c(lambda.0, k.0, c.0),
f = lAll,
grad = lAllGrad,
ui = AAll,
ci = bAll,
control = list(fnscale = -1))
lambda.hat <- resAll$par[1]
k.hat <- resAll$par[2]
# GROUP 1 (data1) with fixed lambda and k -> c1.cond.hat
# log-likelihood function of parameter c for data of group 1
l1.cond <- function(x) {
(log(lambda.hat) * sum(data1[, 2])
+ log(k.hat) * sum(data1[, 2])
+ (k.hat - 1) * sum(data1[, 2] * log(data1[, 3]))
- lambda.hat * sum(data1[, 2] * data1[, 3]^k.hat)
+ log(- log(x)) * sum(data1[, 2])
+ log(x) * sum(1 - exp(- lambda.hat * data1[, 3]^k.hat)))
}
# gradient of log-likelihood function
l1Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data1[, 2])
+ 1 / x * sum(1 - exp(- lambda.hat * data1[, 3]^k.hat)))
}
# constraints for parameter c for data of group 1
eps <- 1e-7
A1.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b1.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 1
res1.cond <- stats::constrOptim(theta = c1.0,
f = l1.cond,
grad = l1Grad.cond,
ui = A1.cond,
ci = b1.cond,
control = list(fnscale = -1))
# GROUP 2 (data2) with fixed lambda and k -> c2.cond.hat
# log-likelihood function of parameter c for data of group 2
l2.cond <- function(x) {
(log(lambda.hat) * sum(data2[, 2])
+ log(k.hat) * sum(data2[, 2])
+ (k.hat - 1) * sum(data2[, 2] * log(data2[, 3]))
- lambda.hat * sum(data2[, 2] * data2[, 3]^k.hat)
+ log(- log(x)) * sum(data2[, 2])
+ log(x) * sum(1 - exp(- lambda.hat * data2[, 3]^k.hat)))
}
# gradient of log-likelihood function
l2Grad.cond <- function(x) {
(1 / (x * log(x)) * sum(data2[, 2])
+ 1 / x * sum(1 - exp(- lambda.hat * data2[, 3]^k.hat)))
}
# constraints for parameter c for data of group 2
eps <- 1e-7
A2.cond <- matrix(data = c(1,
- 1),
nrow = 2,
ncol = 1,
byrow = TRUE)
b2.cond <- c(eps, - (1 - eps))
# maximum likelihood estimator for parameter c for data of group 2
res2.cond <- stats::constrOptim(theta = c2.0,
f = l2.cond,
grad = l2Grad.cond,
ui = A2.cond,
ci = b2.cond,
control = list(fnscale = -1))
return(c(res1$par[1], res1$par[2], res1$par[3],
res2$par[1], res2$par[2], res2$par[3],
resAll$par[1], resAll$par[2],
res1.cond$par,
res2.cond$par,
res1.cond$value,
res2.cond$value))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/LikelihoodNonMixWei.R
|
PersMonExp <- function(d, o, n.alive, new.pat, cont.time) {
## Calculates the further person months
## in the exponential survival, i. e.
## S(t) = exp( - lambda * t), lambda > 0, t >= 0,
## from the interim analysis until the end of time of continuation.
##
## Args:
## d: Parameter.
## o: Parameter.
## n.alive: Number of patients still alive.
## new.pat: Number of patients who will be recruited each time unit.
## cont.time: Period of time of continuing the trial.
##
## Results:
## Further person months.
# calculation of further person months
O.star <- (n.alive * (1 - exp(- d / o * cont.time)) / (d / o)
+ new.pat * cont.time / (d / o)
- new.pat * (1 - exp(- d / o * cont.time)) / (d / o)^2)
return(O.star)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PersMonExp.R
|
PersMonNonMixExp <- function(lambda, c, n.alive, new.pat, cont.time) {
## Calculates the further person months
## in the non-mixture model with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0,
## from the interim analysis until the end of time of continuation.
##
## Args:
## lambda: Parameter.
## c: Parameter.
## n.alive: Number of patients still alive.
## new.pat: Number of patients who will be recruited each time unit.
## cont.time: Period of time of continuing the trial.
##
## Results:
## Further person months.
# auxiliary functions / integrands
Integrand1 <- function(x) {
c^(1 - exp(- lambda * x))
}
Integrand2 <- function(x) {
c^exp(- lambda * x)
}
Integrand3 <- function(x) {
(stats::integrate(f = Integrand2,
lower = 0,
upper = x)$value * c^(1 - exp(- lambda * x)))
}
# calculation of further person months
O.star <- (n.alive * stats::integrate(f = Integrand1,
lower = 0,
upper = cont.time)$value
+ new.pat / c * stats::integrate(f = Vectorize(Integrand3),
lower = 0,
upper = cont.time)$value)
return(O.star)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PersMonNonMixExp.R
|
PersMonNonMixGamma <- function(a, b, c, n.alive, new.pat, cont.time) {
## Calculates the further person months
## in the non-mixture model with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## with Gamma^(0) being the regularized incomplete Gamma function of the upper bound,
## from the interim analysis until the end of time of continuation.
##
## Args:
## a: Parameter.
## b: Parameter.
## c: Parameter.
## n.alive: Number of patients still alive.
## new.pat: Number of patients who will be recruited each time unit.
## cont.time: Period of time of continuing the trial.
##
## Results:
## Further person months.
# auxiliary functions / integrands
Integrand1 <- function(x) {
c^stats::pgamma(q = x,
shape = a,
rate = b)
}
Integrand2 <- function(x) {
c^(1 - stats::pgamma(q = x,
shape = a,
rate = b))
}
Integrand3 <- function(x) {
(stats::integrate(f = Integrand2,
lower = 0,
upper = x)$value * c^stats::pgamma(q = x,
shape = a,
rate = b))
}
# calculation of further person months
O.star <- (n.alive * stats::integrate(f = Integrand1,
lower = 0,
upper = cont.time)$value
+ new.pat / c * stats::integrate(f = Vectorize(Integrand3),
lower = 0,
upper = cont.time)$value)
return(O.star)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PersMonNonMixGamma.R
|
PersMonNonMixWei <- function(lambda, k, c, n.alive, new.pat, cont.time) {
## Calculates the further person months
## in the non-mixture model with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0,
## from the interim analysis until the end of time of continuation.
##
## Args:
## lambda: Parameter.
## k: Parameter.
## c: Parameter.
## n.alive: Number of patients still alive.
## new.pat: Number of patients who will be recruited each time unit.
## cont.time: Period of time of continuing the trial.
##
## Results:
## Further person months.
# auxiliary functions / integrands
Integrand1 <- function(x) {
c^(1 - exp(- lambda * x^k))
}
Integrand2 <- function(x) {
c^exp(- lambda * x^k)
}
Integrand3 <- function(x) {
(stats::integrate(f = Integrand2,
lower = 0,
upper = x)$value * c^(1 - exp(- lambda * x^k)))
}
# calculation of further person months
O.star <- (n.alive * stats::integrate(f = Integrand1,
lower = 0,
upper = cont.time)$value
+ new.pat / c * stats::integrate(f = Vectorize(Integrand3),
lower = 0,
upper = cont.time)$value)
return(O.star)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PersMonNonMixWei.R
|
PlotConPwr <- function(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
model.name) {
## Plots the conditional power curve.
##
## Args:
## theta: Vector of hazard ratios for plotting.
## gamma.theta: Conditional power according to theta.
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1).
## gamma.theta.0: Conditional power according to theta.0.
## group1.name: Name of group 1.
## group2.name: Name of group 2.
## model.name: Name of the used model for estimation.
##
## Returns:
## Plot of the condtional power curve.
graphics::plot(x = log(theta),
y = gamma.theta,
type = "l",
main = model.name,
#sub = paste("log(",
# theta.0,
# ") = ",
# formatC(x = log(theta.0),
# digits = 4,
# format = "f"),
# " ",
# "CP(",
# theta.0,
# ") = ",
# formatC(x = gamma.theta.0,
# digits = 4,
# format = "f"),
# sep = ""),
xlab = paste("log(Hazard Ratio) = log(Hazard ",
group2.name,
" / Hazard ",
group1.name,
")",
sep = ""),
ylab = "Conditional Power",
col = "red",
ylim = c(0, 1))
graphics::abline(v = log(theta.0),
lty = 3)
graphics::abline(h = gamma.theta.0,
lty = 3)
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotConPwr.R
|
PlotConPwrAll <- function(theta,
gamma.theta.exp,
gamma.theta.nonmix.exp,
gamma.theta.nonmix.wei,
gamma.theta.nonmix.gamma,
theta.0,
gamma.theta.0.exp,
gamma.theta.0.nonmix.exp,
gamma.theta.0.nonmix.wei,
gamma.theta.0.nonmix.gamma,
group1.name, group2.name) {
## Plots the conditional power curves.
##
## Args:
## theta: Vector of hazard ratios for plotting.
## gamma.theta.exp: Conditional power within the exponential model.
## gamma.theta.nonmix.exp: Conditional power within the non-mixture model
## with exponential survival.
## gamma.theta.nonmix.wei: Conditional power within the non-mixture model
## with Weibull type survival.
## gamma.theta.nonmix.gamma: Conditional power within the non-mixture model
## with Gamma type survival.
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1).
## gamma.theta.0.exp: Conditional power within the exponential model
## according to theta.0.
## gamma.theta.0.nonmix.exp: Conditional power within the non-mixture model
## with exponential survival
## according to theta.0.
## gamma.theta.0.nonmix.wei: Conditional power within the non-mixture model
## with Weibull type survival
## according to theta.0.
## gamma.theta.0.nonmix.gamma: Conditional power within the non-mixture model
## with Gamma type survival
## according to theta.0.
## group1.name: Name of group 1.
## group2.name: Name of group 2.
##
## Returns:
## Plot of the condtional power curves.
graphics::plot(x = log(theta),
y = gamma.theta.exp,
type = "l",
xlab = paste("log(Hazard Ratio) = log(Hazard ",
group2.name,
" / Hazard ",
group1.name,
")",
sep = ""),
ylab = "Conditional Power",
col = "red",
ylim = c(0, 1))
graphics::lines(x = log(theta),
y = gamma.theta.nonmix.exp,
col = "blue")
graphics::lines(x = log(theta),
y = gamma.theta.nonmix.wei,
col = "green")
graphics::lines(x = log(theta),
y = gamma.theta.nonmix.gamma,
col = "yellow")
graphics::abline(v = log(theta.0),
lty = 3)
graphics::legend(x = "topright",
legend = c("Exponential",
"Non-Mixture-Exponential",
"Non-Mixture-Weibull",
"Non-Mixture-Gamma"),
col = c("red",
"blue",
"green",
"yellow"),
lty = c(1,
1,
1,
1),
bg = "white")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotConPwrAll.R
|
PlotEstExp <- function(data1, data2,
lambda1.hat, lambda2.hat,
group1.name, group2.name) {
## Plots the survival curves
## under the exponential survival model, i. e.
## S(t) = exp(- lambda * t), lambda > 0, t >= 0,
## with the estimated parameters.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda1.hat: Maximum likelihood estimator for lambda1.
## lambda2.hat: Maximum likelihood estimator for lambda2.
## group1.name: Expression for group 1.
## group2.name: Expression for group 2.
##
## Results:
## Plots the survival curves
## under the exponential survival model
## in an 1 x 2 plot array
## in which the first component consists of the Kaplan-Meier curves
## and the second component is still empty.
# range of time for survival curves
# of group 1 and group 2
max1 <- max(data1[, 3])
max2 <- max(data2[, 3])
# choice of suitable stepwidth of time variables
# of group 1 and group 2
if (max1 / 0.01 <= 1000) {
t1 <- seq(from = 0,
to = max1,
by = 0.01)
}
else {
t1 <- seq(from = 0,
to = max1,
length.out = 1000)
}
if (max2 / 0.01 <= 1000) {
t2 <- seq(from = 0,
to = max2,
by = 0.01)
}
else {
t2 <- seq(from = 0,
to = max2,
length.out = 1000)
}
# survival curves of group 1 and group 2
S1 <- exp(- lambda1.hat * t1)
S2 <- exp(- lambda2.hat * t2)
# plot of survival curve of group 1
graphics::lines(x = t1,
y = S1,
col = "blue")
# plot of survival curve of group 2
graphics::lines(x = t2,
y = S2,
col = "green")
graphics::legend(x = "topright",
legend = c(group1.name, group2.name),
col = c("blue", "green"),
lty = c(1, 1),
pch = c(3, 3),
bg = "white")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotEstExp.R
|
PlotEstNonMixExp <- function(data1, data2,
lambda1.hat, c1.hat,
lambda2.hat, c2.hat,
group1.name, group2.name) {
## Plots the survival curves
## under the non-mixture model with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0,
## with the estimated parameters.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda1.hat: Maximum likelihood estimator for lambda1.
## c1.hat: Maximum likelihood estimator for c1.
## lambda2.hat: Maximum likelihood estimator for lambda2.
## c2.hat: Maximum likelihood estimator for c2.
## group1.name: Expression for group 1.
## group2.name: Expression for group 2.
##
## Results:
## Plots the survival curves
## under the non-mixture model with exponential survival
## in an 1 x 2 plot array
## in which the first component consists of the Kaplan-Meier curves
## and the second component is still empty.
# range of time for survival curves
# of group 1 and group 2
max1 <- max(data1[, 3])
max2 <- max(data2[, 3])
# choice of suitable stepwidth of time variables
# of group 1 and group 2
if (max1 / 0.01 <= 1000) {
t1 <- seq(from = 0,
to = max1,
by = 0.01)
}
else {
t1 <- seq(from = 0,
to = max1,
length.out = 1000)
}
if (max2 / 0.01 <= 1000) {
t2 <- seq(from = 0,
to = max2,
by = 0.01)
}
else {
t2 <- seq(from = 0,
to = max2,
length.out = 1000)
}
# survival curves of group 1 and group 2
S1 <- c1.hat^(1 - exp(- lambda1.hat * t1))
S2 <- c2.hat^(1 - exp(- lambda2.hat * t2))
# plot of survival curve of group 1
graphics::lines(x = t1,
y = S1,
col = "blue")
# plot of survival curve of group 2
graphics::lines(x = t2,
y = S2,
col = "green")
graphics::legend(x = "topright",
legend = c(group1.name, group2.name),
col = c("blue", "green"),
lty = c(1, 1),
pch = c(3, 3),
bg = "white")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotEstNonMixExp.R
|
PlotEstNonMixGamma <- function(data1, data2,
a1.hat, b1.hat, c1.hat,
a2.hat, b2.hat, c2.hat,
group1.name, group2.name) {
## Plots the survival curves
## under the non-mixture model with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## with Gamma^(0) being the regularized incomplete Gamma function of the upper bound,
## with the estimated parameters.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## a1.hat: Maximum likelihood estimator for a1.
## b1.hat: Maximum likelihood estimator for b1.
## c1.hat: Maximum likelihood estimator for c1.
## a2.hat: Maximum likelihood estimator for a2.
## b1.hat: Maximum likelihood estimator for b2.
## c2.hat: Maximum likelihood estimator for c2.
## group1.name: Expression for group 1.
## group2.name: Expression for group 2.
##
## Results:
## Plots the survival curves
## under the non-mixture model with Gamma type survival
## in an 1 x 2 plot array
## in which the first component consists of the Kaplan-Meier curves
## and the second component is still empty.
# range of time for survival curves
# of group 1 and group 2
max1 <- max(data1[, 3])
max2 <- max(data2[, 3])
# choice of suitable stepwidth of time variables
# of group 1 and group 2
if (max1 / 0.01 <= 1000) {
t1 <- seq(from = 0,
to = max1,
by = 0.01)
}
else {
t1 <- seq(from = 0,
to = max1,
length.out = 1000)
}
if (max2 / 0.01 <= 1000) {
t2 <- seq(from = 0,
to = max2,
by = 0.01)
}
else {
t2 <- seq(from = 0,
to = max2,
length.out = 1000)
}
# survival curves of group 1 and group 2
S1 <- c1.hat^stats::pgamma(q = t1,
shape = a1.hat,
rate = b1.hat)
S2 <- c2.hat^stats::pgamma(q = t2,
shape = a2.hat,
rate = b2.hat)
# plot of survival curve of group 1
graphics::lines(x = t1,
y = S1,
col = "blue")
# plot of survival curve of group 2
graphics::lines(x = t2,
y = S2,
col = "green")
graphics::legend(x = "topright",
legend = c(group1.name, group2.name),
col = c("blue", "green"),
lty = c(1, 1),
pch = c(3, 3),
bg = "white")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotEstNonMixGamma.R
|
PlotEstNonMixWei <- function(data1, data2,
lambda1.hat, k1.hat, c1.hat,
lambda2.hat, k2.hat, c2.hat,
group1.name, group2.name) {
## Plots the survival curves
## under the non-mixture model with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0,
## with the estimated parameters.
##
## Args:
## data1: Data frame which consists of at least three columns with the group
## in the first (all of group 1),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## data2: Data frame which consists of at least three columns with the group
## in the first (all of group 2),
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## lambda1.hat: Maximum likelihood estimator for lambda1.
## k1.hat: Maximum likelihood estimator for k1.
## c1.hat: Maximum likelihood estimator for c1.
## lambda2.hat: Maximum likelihood estimator for lambda2.
## k1.hat: Maximum likelihood estimator for k2.
## c2.hat: Maximum likelihood estimator for c2.
## group1.name: Expression for group 1.
## group2.name: Expression for group 2.
##
## Results:
## Plots the survival curves
## under the non-mixture model with Weibull type survival
## in an 1 x 2 plot array
## in which the first component consists of the Kaplan-Meier curves
## and the second component is still empty.
# range of time for survival curves
# of group 1 and group 2
max1 <- max(data1[, 3])
max2 <- max(data2[, 3])
# choice of suitable stepwidth of time variables
# of group 1 and group 2
if (max1 / 0.01 <= 1000) {
t1 <- seq(from = 0,
to = max1,
by = 0.01)
}
else {
t1 <- seq(from = 0,
to = max1,
length.out = 1000)
}
if (max2 / 0.01 <= 1000) {
t2 <- seq(from = 0,
to = max2,
by = 0.01)
}
else {
t2 <- seq(from = 0,
to = max2,
length.out = 1000)
}
# survival curves of group 1 and group 2
S1 <- c1.hat^(1 - exp(- lambda1.hat * t1^k1.hat))
S2 <- c2.hat^(1 - exp(- lambda2.hat * t2^k2.hat))
# plot of survival curve of group 1
graphics::lines(x = t1,
y = S1,
col = "blue")
# plot of survival curve of group 2
graphics::lines(x = t2,
y = S2,
col = "green")
graphics::legend(x = "topright",
legend = c(group1.name, group2.name),
col = c("blue", "green"),
lty = c(1, 1),
pch = c(3, 3),
bg = "white")
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotEstNonMixWei.R
|
PlotKM <- function(data, model.name) {
## Plots the Kaplan-Meier curves for the passed data frame.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (values 1 and 2) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## model.name: Name of the used model for estimation.
##
## Returns:
## Plots the Kaplan-Meier curves
## being the first component in an 1 x 2 array
## in which the second component is still empty.
# package 'survival' for survival analysis
km <- survival::survfit(formula = survival::Surv(time = data[, 3],
event = data[, 2]) ~ data[, 1])
# plot of Kaplan-Meier curves
graphics::plot(x = km,
main = model.name,
xlab = "Time",
ylab = "Survival",
col = c("blue", "green"),
ylim = c(0, 1))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/PlotKM.R
|
SplitData <- function(data) {
## Splits data frame into two data frames, each for one group,
## and converts group expressions for internal calculations
## into values 1 and 2.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
##
## Results:
## Returns two data frames, each for one group,
## and the original names of the two groups.
data0 <- split(x = data,
f = data[, 1])
data1 <- data0[[1]]
data2 <- data0[[2]]
group1.name <- as.character(x = data1[1, 1])
group2.name <- as.character(x = data2[1, 1])
data1[, 1] <- as.numeric(x = 1)
data2[, 1] <- as.numeric(x = 2)
return(list(data1, group1.name, data2, group2.name))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/SplitData.R
|
################################################################################
# ChangePointTests.R
################################################################################
# 2018-08-10
# Curtis Miller
################################################################################
# Definition of all change point statistical tests.
################################################################################
################################################################################
# GENERAL STATISTICAL FUNCTIONS
################################################################################
#' Variance Estimation Consistent Under Change
#'
#' Estimate the variance (using the sum of squared errors) with an estimator
#' that is consistent when the mean changes at a known point.
#'
#' This is the estimator
#'
#' \deqn{\hat{\sigma}^2_{T,t} = T^{-1}\left(\sum_{s = 1}^t \left(X_s -
#' \bar{X}_t\right)^2 + \sum_{s = t + 1}^{T}\left(X_s - \tilde{X}_{T - t}
#' \right)^2\right)}
#'
#' where \eqn{\bar{X}_t = t^{-1}\sum_{s = 1}^t X_s} and \eqn{\tilde{X}_{T - t} =
#' (T - t)^{-1} \sum_{s = t + 1}^{T} X_s}. In this implementation, \eqn{T} is
#' computed automatically as \code{length(x)} and \code{k} corresponds to
#' \eqn{t}, a potential change point.
#'
#' @param x A numeric vector for the data set
#' @param k The potential change point at which the data set is split
#' @import stats
#' @return The estimated change-consistent variance
#' @examples
#' CPAT:::cpt_consistent_var(c(rnorm(500, mean = 0), rnorm(500, mean = 1)), k = 500)
cpt_consistent_var <- function(x, k) {
n <- length(x)
if (n < k | k < 0) {stop("k must be an integer between 1 and length(x)")}
x1 <- x[1:k]
x2 <- x[(k + 1):n]
mu1 <- mean(x1)
mu2 <- mean(x2)
sse1 <- sum((x1 - mu1)^2)
sse2 <- sum((x2 - mu2)^2)
(sse1 + sse2)/n
}
#' Weights for Long-Run Variance
#'
#' Compute some weights for long-run variance. This code comes directly from the
#' source code of \pkg{cointReg}; see \code{\link[cointReg]{getLongRunWeights}}.
#'
#' @param n Length of weights' vector
#' @param bandwidth A number for the bandwidth
#' @param kernel The kernel function; see \code{\link[cointReg]{getLongRunVar}}
#' for possible values
#' @return List with components \code{w} containing the vector of weights and
#' \code{upper}, the index of the largest non-zero entry in \code{w}
#' @examples
#' CPAT:::getLongRunWeights(10, 1)
getLongRunWeights <- function(n, bandwidth, kernel = "ba") {
w <- numeric(n - 1)
bw<- bandwidth
if (kernel == "tr") {
w <- w + 1
upper <- min(bw, n - 1)
}
else if (kernel == "ba") {
upper <- ceiling(bw) - 1
if (upper > 0) {
j <- 1:upper
}
else {
j <- 1
}
w[j] <- 1 - j/bw
}
else if (kernel == "pa") {
upper1 <- floor(bw/2)
if (upper1 > 0) {
j <- 1:upper1
}
else {
j <- 1
}
jj <- j/bw
w[j] <- 1 - 6 * jj^2 + 6 * jj^3
j2 <- (floor(bw/2) + 1):bw
jj2 <- j2/bw
w[j2] <- 2 * (1 - jj2)^3
upper <- ceiling(bw) - 1
}
else if (kernel == "bo") {
upper <- ceiling(bw) - 1
if (upper > 0) {
j <- 1:upper
}
else {
j <- 1
}
jj <- j/bw
w[j] <- (1 - jj) * cos(pi * jj) + sin(pi * jj)/pi
}
else if (kernel == "da") {
upper <- n - 1
j <- 1:upper
w[j] <- sin(pi * j/bw)/(pi * j/bw)
}
else if (kernel == "qs") {
sc <- 1.2 * pi
upper <- n - 1
j <- 1:upper
jj <- j/bw
w[j] <- 25/(12 * pi^2 * jj^2) * (sin(sc * jj)/(sc * jj) -
cos(sc * jj))
}
if (upper <= 0)
upper <- 1
list(w = w, upper = upper)
}
#' Long-Run Variance Estimation With Possible Change Points
#'
#' Computes the estimates of the long-run variance in a change point context, as
#' described in \insertCite{horvathricemiller19}{CPAT}. By default it uses
#' kernel and bandwidth selection as used in the package \pkg{cointReg}, though
#' changing the parameters \code{kernel} and \code{bandwidth} can change this
#' behavior. If \pkg{cointReg} is not installed, the Bartlett internal (defined
#' internally) will be used and the bandwidth will be the square root of the
#' sample size.
#'
#' @param dat The data vector
#' @param kernel If character, the identifier of the kernel function as used in
#' \pkg{cointReg} (see \code{\link[cointReg]{getLongRunVar}}); if
#' function, the kernel function to be used for long-run variance
#' estimation (default is the Bartlett kernel in \pkg{cointReg})
#' @param bandwidth If character, the identifier for how to compute the
#' bandwidth as defined in \pkg{cointReg} (see
#' \code{\link[cointReg]{getBandwidth}}); if function, a function
#' to use for computing the bandwidth; if numeric, the bandwidth
#' value to use (the default is to use Andrews' method, as used in
#' \pkg{cointReg})
#' @return A vector of estimates of the long-run variance
#' @references
#' \insertAllCited{}
#' @examples
#' x <- rnorm(1000)
#' CPAT:::get_lrv_vec(x)
#' CPAT:::get_lrv_vec(x, kernel = "pa", bandwidth = "nw")
get_lrv_vec <- function(dat, kernel = "ba", bandwidth = "and") {
has_cointreg <- requireNamespace("cointReg", quietly = TRUE)
n <- length(dat)
if (is.character(bandwidth)) {
if (!has_cointreg) {
warning("cointReg is not installed! Defaulting to sqrt.")
bandwidth <- sqrt
} else {
if (is.character(kernel) &&
kernel %in% c("ba", "pa", "qs", "th", "tr")) {
kervar <- kernel
} else {
kervar = "ba"
}
h <- cointReg::getBandwidth(dat, bandwidth = bandwidth, kernel = kervar)
}
} else if (is.numeric(bandwidth)) {
if (bandwidth <= 0) {
stop("Bandwidth must be greater than zero.")
} else {
h <- bandwidth
}
} else if (!is.function(bandwidth)) {
stop(paste("bandwidth must be a function, a valid character string, or a",
"non-negative number."))
} else {
h <- bandwidth(n)
}
if (is.character(kernel)) {
kern_vals <- c(1, getLongRunWeights(n, kernel = kernel,
bandwidth = h)$w[-n])
} else if (!is.function(kernel)) {
stop("kernel must be a function or a valid character string.")
} else {
kern_vals <- sapply(1:(n - 1)/h, kernel)
}
# The maximum lag that needs to be checked
max_l <- max(which(kern_vals != 0))
# dat <- rnorm(100)
#dat <- 1:10
dat_mat <- matrix(rep(dat, times = n), byrow = FALSE, nrow = n)
dat_lower <- dat_mat; dat_upper <- dat_mat;
dat_lower[lower.tri(dat_mat, diag = FALSE)] <- NA
dat_upper[!lower.tri(dat_mat, diag = FALSE)] <- NA
x_bar <- colMeans(dat_lower, na.rm = TRUE)
x_tilda <- colMeans(dat_upper, na.rm = TRUE)
mean_mat <- matrix(rep(x_bar, times = n), byrow = TRUE, nrow = n)
mean_mat[lower.tri(diag(n), diag = FALSE)] <- matrix(rep(x_tilda, times = n),
byrow = TRUE,
nrow = n)[lower.tri(
diag(n), diag = FALSE)]
# l increases by row (starts at 0), t by column
y <- dat_mat - mean_mat
# Function implemented in Rcpp for speed
# It accepts the matrix y and the evaluation of the kernel function stored
# in kern_vals, and returns a vector containing estimated long-run variances
# at points t
sigma <- get_lrv_vec_cpp(y, kern_vals, max_l)
# "Equivalent" R code (slower)
# covs <- sapply(1:n, function(t) {
# sapply(0:(n - 1), function(l) {
# mean(y[1:(n - l),t]*y[(1 + l):n,t])
# })
# })
#
# sigma <- sapply(2:(n - 2), function(t) {
# covs[0 + 1, t] + 2 * sum(vkernel(1:(n - 1)/h) * covs[1:(n - 1), t])
# })
if (any(sigma < 0)) {
warning(paste("A negative variance was computed! This may be due to a bad",
"kernel being chosen."))
}
sigma
}
################################################################################
# TEST STATISTIC COMPUTATION FUNCTIONS
################################################################################
#' Compute the CUSUM Statistic
#'
#' This function computes the CUSUM statistic (and can compute weighted/trimmed
#' variants, depending on the values of \code{kn} and \code{tau}).
#'
#' The definition of the statistic is
#'
#' \deqn{T^{-1/2} \max_{1 \leq t \leq T} \hat{\sigma}_{t,T}^{-1} \left|
#' \sum_{s = 1}^t X_s - \frac{t}{T}\sum_{s = 1}^T \right|}
#'
#' A more general version is
#'
#' \deqn{T^{-1/2} \max_{t_T \leq t \leq T - t_T} \hat{\sigma}_{t,T}^{-1}
#' \left(\frac{t}{T}
#' \left(\frac{T - t}{T}\right)\right)^{\tau} \left| \sum_{s = 1}^t X_s -
#' \frac{t}{T}\sum_{s = 1}^T \right|}
#'
#' The parameter \code{kn} corresponds to the trimming parameter \eqn{t_T} and
#' the parameter \code{tau} corresponds to \eqn{\tau}.
#'
#' See \insertCite{horvathricemiller19}{CPAT} for more details.
#'
#' @param dat The data vector
#' @param kn A function corresponding to the trimming parameter \eqn{t_T} in the
#' trimmed CUSUM variant; by default, is a function returning 1 (for
#' no trimming)
#' @param tau The weighting parameter \eqn{\tau} for the weighted CUSUM
#' statistic; by default, is 0 (for no weighting)
#' @param estimate Set to \code{TRUE} to return the estimated location of the
#' change point
#' @param use_kernel_var Set to \code{TRUE} to use kernel methods for long-run
#' variance estimation (typically used when the data is
#' believed to be correlated); if \code{FALSE}, then the
#' long-run variance is estimated using
#' \eqn{\hat{\sigma}^2_{T,t} = T^{-1}\left(
#' \sum_{s = 1}^t \left(X_s - \bar{X}_t\right)^2 +
#' \sum_{s = t + 1}^{T}\left(X_s -
#' \tilde{X}_{T - t}\right)^2\right)}, where
#' \eqn{\bar{X}_t = t^{-1}\sum_{s = 1}^t X_s} and
#' \eqn{\tilde{X}_{T - t} = (T - t)^{-1}
#' \sum_{s = t + 1}^{T} X_s}
#' @param custom_var Can be a vector the same length as \code{dat} consisting of
#' variance-like numbers at each potential change point (so
#' each entry of the vector would be the "best estimate" of
#' the long-run variance if that location were where the
#' change point occured) or a function taking two parameters
#' \code{x} and \code{k} that can be used to generate this
#' vector, with \code{x} representing the data vector and
#' \code{k} the position of a potential change point; if
#' \code{NULL}, this argument is ignored
#' @param kernel If character, the identifier of the kernel function as used in
#' \pkg{cointReg} (see \code{\link[cointReg]{getLongRunVar}}); if
#' function, the kernel function to be used for long-run variance
#' estimation (default is the Bartlett kernel in \pkg{cointReg})
#' @param bandwidth If character, the identifier for how to compute the
#' bandwidth as defined in \pkg{cointReg} (see
#' \code{\link[cointReg]{getBandwidth}}); if function, a function
#' to use for computing the bandwidth; if numeric, the bandwidth
#' value to use (the default is to use Andrews' method, as used in
#' \pkg{cointReg})
#' @param get_all_vals If \code{TRUE}, return all values for the statistic at
#' every tested point in the data set
#' @return If both \code{estimate} and \code{get_all_vals} are \code{FALSE}, the
#' value of the test statistic; otherwise, a list that contains the test
#' statistic and the other values requested (if both are \code{TRUE},
#' the test statistic is in the first position and the estimated change
#' point in the second)
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::stat_Vn(rnorm(1000))
#' CPAT:::stat_Vn(rnorm(1000), kn = function(n) {0.1 * n}, tau = 1/2)
#' CPAT:::stat_Vn(rnorm(1000), use_kernel_var = TRUE, bandwidth = "nw", kernel = "bo")
stat_Vn <- function(dat, kn = function(n) {1}, tau = 0, estimate = FALSE,
use_kernel_var = FALSE, custom_var = NULL, kernel = "ba",
bandwidth = "and", get_all_vals = FALSE) {
# Formerly named statVn()
# Here is equivalent (slow) R code
# n = length(dat)
# return(n^(-1/2)*max(sapply(
# floor(max(kn(n),1)):min(n - floor(kn(n)),n-1), function(k)
# (k/n*(1 - k/n))^(-tau)/
# sqrt((sum((dat[1:k] -
# mean(dat[1:k]))^2)+sum((dat[(k+1):n] -
# mean(dat[(k+1):n]))^2))/n)*
# abs(sum(dat[1:k]) - k/n*sum(dat))
# )))
if (use_kernel_var) {
lrv <- get_lrv_vec(dat, kernel, bandwidth)
} else if (!is.null(custom_var)) {
use_kernel_var <- TRUE # Otherwise stat_Zn_cpp() will ignore lrv
if (is.function(custom_var)) {
# This may seem silly, but this is so that error codes refer to
# custom_var, and we don't want recursion either
custom_var_temp <- custom_var
custom_var <- purrr::partial(custom_var_temp, x = dat, .lazy = FALSE)
custom_var_vec <- vapply(1:length(dat), custom_var,
FUN.VALUE = numeric(1))
} else if (is.numeric(custom_var)) {
if (length(custom_var) < length(dat)) stop("custom_var must have" %s%
"length at least" %s%
length(dat) %s0% ", the" %s%
"length of the data set")
custom_var_vec <- custom_var
} else {
stop("Don't know how to handle custom_var of class" %s% class(custom_var))
}
if (any(custom_var_vec < 0)) stop("custom_var suggests a negative" %s%
"variance, which is impossible.")
lrv <- custom_var_vec
} else {
# A vector must be passed to stat_Zn_cpp, so we'll pass one with an
# impossible value
lrv <- c(-1)
}
res <- stat_Vn_cpp(dat, kn(length(dat)), tau, use_kernel_var, lrv,
get_all_vals)
res[[2]] <- as.integer(res[[2]])
if (!estimate & !get_all_vals) {
return(res[[1]])
} else {
return(res[c(TRUE, estimate, get_all_vals)])
}
}
#' Compute the Darling-Erdös Statistic
#'
#' This function computes the Darling-Erdös statistic.
#'
#' If \eqn{\bar{A}_T(\tau, t_T)} is the weighted and trimmed CUSUM statistic
#' with weighting parameter \eqn{\tau} and trimming parameter \eqn{t_T} (see
#' \code{\link{stat_Vn}}), then the Darling-Erdös statistic is
#'
#' \deqn{l(a_T) \bar{A}_T(1/2, 1) - u(b_T)}
#'
#' with \eqn{l(x) = \sqrt{2 \log x}} and \eqn{u(x) = 2 \log x + \frac{1}{2} \log
#' \log x - \frac{1}{2} \log \pi} (\eqn{\log x} is the natural logarithm of
#' \eqn{x}). The parameter \code{a} corresponds to \eqn{a_T} and \code{b} to
#' \eqn{b_T}; these are both \code{log} by default.
#'
#' See \insertCite{horvathricemiller19}{CPAT} to learn more.
#'
#' @param dat The data vector
#' @param a The function that will be composed with
#' \eqn{l(x) = (2 \log x)^{1/2}}
#' @param b The function that will be composed with
#' \eqn{u(x) = 2 \log x + \frac{1}{2} \log \log x - \frac{1}{2} \log
#' \pi}
#' @param estimate Set to \code{TRUE} to return the estimated location of the
#' change point
#' @param use_kernel_var Set to \code{TRUE} to use kernel methods for long-run
#' variance estimation (typically used when the data is
#' believed to be correlated); if \code{FALSE}, then the
#' long-run variance is estimated using
#' \eqn{\hat{\sigma}^2_{T,t} = T^{-1}\left(
#' \sum_{s = 1}^t \left(X_s - \bar{X}_t\right)^2 +
#' \sum_{s = t + 1}^{T}\left(X_s -
#' \tilde{X}_{T - t}\right)^2\right)}, where
#' \eqn{\bar{X}_t = t^{-1}\sum_{s = 1}^t X_s} and
#' \eqn{\tilde{X}_{T - t} = (T - t)^{-1}
#' \sum_{s = t + 1}^{T} X_s}
#' @param kernel If character, the identifier of the kernel function as used in
#' \pkg{cointReg} (see \code{\link[cointReg]{getLongRunVar}}); if
#' function, the kernel function to be used for long-run variance
#' estimation (default is the Bartlett kernel in \pkg{cointReg})
#' @param bandwidth If character, the identifier for how to compute the
#' bandwidth as defined in \pkg{cointReg} (see
#' \code{\link[cointReg]{getBandwidth}}); if function, a function
#' to use for computing the bandwidth; if numeric, the bandwidth
#' value to use (the default is to use Andrews' method, as used in
#' \pkg{cointReg})
#' @param custom_var Can be a vector the same length as \code{dat} consisting of
#' variance-like numbers at each potential change point (so
#' each entry of the vector would be the "best estimate" of
#' the long-run variance if that location were where the
#' change point occured) or a function taking two parameters
#' \code{x} and \code{k} that can be used to generate this
#' vector, with \code{x} representing the data vector and
#' \code{k} the position of a potential change point; if
#' \code{NULL}, this argument is ignored
#' @param get_all_vals If \code{TRUE}, return all values for the statistic at
#' every tested point in the data set
#' @return If both \code{estimate} and \code{get_all_vals} are \code{FALSE}, the
#' value of the test statistic; otherwise, a list that contains the test
#' statistic and the other values requested (if both are \code{TRUE},
#' the test statistic is in the first position and the estimated changg
#' point in the second)
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::stat_de(rnorm(1000))
#' CPAT:::stat_de(rnorm(1000), use_kernel_var = TRUE, bandwidth = "nw", kernel = "bo")
stat_de <- function(dat, a = log, b = log, estimate = FALSE,
use_kernel_var = FALSE, custom_var = NULL, kernel = "ba",
bandwidth = "and", get_all_vals = FALSE) {
# Formerly known as statDE()
n <- length(dat)
l <- function(x) {sqrt(2*log(x))}
u <- function(x) {2*log(x) + 1/2*log(log(x)) - 1/2*log(pi)}
res <- stat_Vn(dat, kn = function(n) {1}, tau = 1/2, estimate = TRUE,
use_kernel_var = use_kernel_var, kernel = kernel,
bandwidth = bandwidth, get_all_vals = get_all_vals,
custom_var = custom_var)
res[[2]] <- as.integer(res[[2]])
res[[1]] <- l(a(n)) + res[[1]] - u(b(n))
if (get_all_vals) {
res[[3]] <- l(a(n)) + res[[3]] - u(b(n))
}
if (!estimate & !get_all_vals) {
return(res[[1]])
} else {
return(res[c(TRUE, estimate, get_all_vals)])
}
}
#' Compute the Hidalgo-Seo Statistic
#'
#' This function computes the Hidalgo-Seo statistic for a change in mean model.
#'
#' For a data set \eqn{x_t} with \eqn{n} observations, the test statistic is
#'
#' \deqn{\max_{1 \leq s \leq n - 1} (\mathcal{LM}(s) - B_n)/A_n}
#'
#' where \eqn{\hat{u}_t = x_t - \bar{x}} (\eqn{\bar{x}} is the sample mean),
#' \eqn{a_n = (2 \log \log n)^{1/2}}, \eqn{b_n = a_n^2 - \frac{1}{2} \log \log
#' \log n - \log \Gamma (1/2)}, \eqn{A_n = b_n / a_n^2}, \eqn{B_n =
#' b_n^2/a_n^2}, \eqn{\hat{\Delta} = \hat{\sigma}^2 = n^{-1} \sum_{t = 1}^{n}
#' \hat{u}_t^2}, and \eqn{\mathcal{LM}(s) = n (n - s)^{-1} s^{-1}
#' \hat{\Delta}^{-1} \left( \sum_{t = 1}^{s} \hat{u}_t\right)^2}.
#'
#' If \code{corr} is \code{FALSE}, then the residuals are assumed to be
#' uncorrelated. Otherwise, the residuals are assumed to be correlated and
#' \eqn{\hat{\Delta} = \hat{\gamma}(0) + 2 \sum_{j = 1}^{\lfloor \sqrt{n}
#' \rfloor} (1 - \frac{j}{\sqrt{n}}) \hat{\gamma}(j)} with \eqn{\hat{\gamma}(j)
#' = \frac{1}{n}\sum_{t = 1}^{n - j} \hat{u}_t \hat{u}_{t + j}}.
#'
#' This statistic was presented in \insertCite{hidalgoseo13}{CPAT}.
#'
#' @param dat The data vector
#' @param estimate Set to \code{TRUE} to return the estimated location of the
#' change point
#' @param corr If \code{TRUE}, the long-run variance will be computed under the
#' assumption of correlated residuals; ignored if \code{custom_var}
#' is not \code{NULL} or \code{use_kernel_var} is \code{TRUE}
#' @param get_all_vals If \code{TRUE}, return all values for the statistic at
#' every tested point in the data set
#' @param custom_var Can be a vector the same length as \code{dat} consisting of
#' variance-like numbers at each potential change point (so
#' each entry of the vector would be the "best estimate" of
#' the long-run variance if that location were where the
#' change point occured) or a function taking two parameters
#' \code{x} and \code{k} that can be used to generate this
#' vector, with \code{x} representing the data vector and
#' \code{k} the position of a potential change point; if
#' \code{NULL}, this argument is ignored
#' @param use_kernel_var Set to \code{TRUE} to use kernel methods for long-run
#' variance estimation (typically used when the data is
#' believed to be correlated); if \code{FALSE}, then the
#' long-run variance is estimated using
#' \eqn{\hat{\sigma}^2_{T,t} = T^{-1}\left(
#' \sum_{s = 1}^t \left(X_s - \bar{X}_t\right)^2 +
#' \sum_{s = t + 1}^{T}\left(X_s -
#' \tilde{X}_{T - t}\right)^2\right)}, where
#' \eqn{\bar{X}_t = t^{-1}\sum_{s = 1}^t X_s} and
#' \eqn{\tilde{X}_{T - t} = (T - t)^{-1}
#' \sum_{s = t + 1}^{T} X_s}; if \code{custom_var} is not
#' \code{NULL}, this argument is ignored
#' @param kernel If character, the identifier of the kernel function as used in
#' \pkg{cointReg} (see \code{\link[cointReg]{getLongRunVar}}); if
#' function, the kernel function to be used for long-run variance
#' estimation (default is the Bartlett kernel in \pkg{cointReg})
#' @param bandwidth If character, the identifier for how to compute the
#' bandwidth as defined in \pkg{cointReg} (see
#' \code{\link[cointReg]{getBandwidth}}); if function, a function
#' to use for computing the bandwidth; if numeric, the bandwidth
#' value to use (the default is to use Andrews' method, as used in
#' \pkg{cointReg})
#' @return If both \code{estimate} and \code{get_all_vals} are \code{FALSE}, the
#' value of the test statistic; otherwise, a list that contains the test
#' statistic and the other values requested (if both are \code{TRUE},
#' the test statistic is in the first position and the estimated change
#' point in the second)
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::stat_hs(rnorm(1000))
#' CPAT:::stat_hs(rnorm(1000), corr = FALSE)
stat_hs <- function(dat, estimate = FALSE, corr = TRUE, get_all_vals = FALSE,
custom_var = NULL, use_kernel_var = FALSE, kernel = "ba",
bandwidth = "and") {
# Formerly named statHS()
n <- length(dat)
mu <- mean(dat)
u <- dat - mu
if (corr) {
m <- sqrt(n)
Delta <- sum(u^2) / n + sum(sapply(1:m, function(j) {
2 * (1 - j/m) * sum(u[(j + 1):n] * u[1:(n - j)])/n
}))
} else {
# Delta will be the analog of lrv in stat_Zn, but this variable was already
# defined and the naming is in alignment with Hidalgo's paper
Delta <- ((n-1)/n) * var(u)
}
# To implement kernel LRV estimation that is consistent under H_A, I play
# around with custom_var; this will be the avenue through which that
# functionality is implemented.
if (is.null(custom_var)) {
if (use_kernel_var) {
Delta <- get_lrv_vec(dat, kernel, bandwidth)
} else {
Delta <- rep(Delta, length(dat))
}
} else {
if (is.function(custom_var)) {
# To allow greater flexibility, Delta will be made a vector so that Delta
# can take values at different possible change points; this allows for
# greater experimentation on our part
# This may seem silly, but this is so that error codes refer to
# custom_var, and we don't want recursion either
custom_var_temp <- custom_var
custom_var <- purrr::partial(custom_var_temp, x = dat, .lazy = FALSE)
custom_var_vec <- vapply(1:length(dat), custom_var,
FUN.VALUE = numeric(1))
} else if (is.numeric(custom_var)) {
if (length(custom_var) < length(dat)) stop("custom_var must have" %s%
"length at least" %s%
length(dat) %s0% ", the" %s%
"length of the data set")
custom_var_vec <- custom_var
} else {
stop("Don't know how to handle custom_var of class" %s% class(custom_var))
}
if (any(custom_var_vec < 0, na.rm = TRUE)) {
stop("custom_var suggests a negative variance, which is impossible.")
}
Delta <- custom_var_vec
}
la_mu <- sapply(1:(n - 1), function(s) {
n/(n - s) * 1/s * sum(u[1:s])^2/Delta[s]
})
a_n <- sqrt(2 * log(log(n)))
b_n <- 2 * log(log(n)) - log(log(log(n)))/2 - log(pi/4)/2
A_n <- b_n/a_n^2 # Matching notation in Hidalgo and Seo (2013)
B_n <- A_n * b_n
stat <- (max(la_mu) - B_n)/A_n
est <- which.max(la_mu)
res <- list("statistic" = stat,
"estimate" = est,
"stat_vals" = (la_mu - B_n)/A_n)
if (!estimate & !get_all_vals) {
return(res[[1]])
} else {
return(res[c(TRUE, estimate, get_all_vals)])
}
}
#' Univariate Andrews Test for End-of-Sample Structural Change
#'
#' This implements Andrews' test for end-of-sample change, as described by
#' \insertCite{andrews03;textual}{CPAT}. This test was derived for detecting a
#' change in univariate data. See \insertCite{andrews03}{CPAT} for
#' a description of the test.
#'
#' @param x Vector of the data to test
#' @param M Numeric index of the location of the first potential change point
#' @param pval If \code{TRUE}, return a p-value
#' @param stat If \code{TRUE}, return a test statistic
#' @return If both \code{pval} and \code{stat} are \code{TRUE}, a list
#' containing both; otherwise, a number for one or the other, depending
#' on which is \code{TRUE}
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::andrews_test(rnorm(1000), M = 900)
andrews_test <- function(x, M, pval = TRUE, stat = TRUE) {
mu <- mean(x)
u <- x - mu
m <- length(x) - M # Deriving m and n as described in Andrews (2003)
n <- length(x) - m
Sigma <- Reduce("+", lapply(1:(n + 1), function(j)
{u[j:(j + m - 1)] %*% t(u[j:(j + m - 1)])})) /
(n + 1)
S <- (t(u[(n + 1):(n + m)]) %*% solve(Sigma) %*% u[(n + 1):(n + m)])[1,1]
submu <- sapply(1:(n - m + 1),
function(j) {
(sum(x[1:(j - 1)]) + sum(u[(j + ceiling(m/2)):n]))/
(n - ceiling(m/2))})
Sj <- sapply(1:(n - m + 1), function(j) {
uj <- x - submu[j]
(t(uj[j:(j + m - 1)]) %*% solve(Sigma) %*%
uj[j:(j + m - 1)])[1,1]
})
res <- list("pval" = mean(S <= Sj), "stat" = S)[c(pval, stat)]
if (length(res) == 1) {
return(res[[1]])
} else {
return(res)
}
}
#' Compute the Rényi-Type Statistic
#'
#' This function computes the Rényi-type statistic.
#'
#' The definition of the statistic is
#'
#' \deqn{\max_{t_T \leq t \leq T - t_T} \hat{\sigma}_{t,T}^{-1}
#' \left|t^{-1}\sum_{s = 1}^{t}X_s - (T - t)^{-1}\sum_{s = t + 1}^{T}
#' X_s \right|}
#'
#' The parameter \code{kn} corresponds to the trimming parameter \eqn{t_T}.
#'
#' @param dat The data vector
#' @param kn A function corresponding to the trimming parameter \eqn{t_T}; by
#' default, the square root function
#' @param estimate Set to \code{TRUE} to return the estimated location of the
#' change point
#' @param use_kernel_var Set to \code{TRUE} to use kernel methods for long-run
#' variance estimation (typically used when the data is
#' believed to be correlated); if \code{FALSE}, then the
#' long-run variance is estimated using
#' \eqn{\hat{\sigma}^2_{T,t} = T^{-1}\left(
#' \sum_{s = 1}^t \left(X_s - \bar{X}_t\right)^2 +
#' \sum_{s = t + 1}^{T}\left(X_s -
#' \tilde{X}_{T - t}\right)^2\right)}, where
#' \eqn{\bar{X}_t = t^{-1}\sum_{s = 1}^t X_s} and
#' \eqn{\tilde{X}_{T - t} = (T - t)^{-1}
#' \sum_{s = t + 1}^{T} X_s}; if \code{custom_var} is not
#' \code{NULL}, this argument is ignored
#' @param custom_var Can be a vector the same length as \code{dat} consisting of
#' variance-like numbers at each potential change point (so
#' each entry of the vector would be the "best estimate" of
#' the long-run variance if that location were where the
#' change point occured) or a function taking two parameters
#' \code{x} and \code{k} that can be used to generate this
#' vector, with \code{x} representing the data vector and
#' \code{k} the position of a potential change point; if
#' \code{NULL}, this argument is ignored
#' @param kernel If character, the identifier of the kernel function as used in
#' \pkg{cointReg} (see \code{\link[cointReg]{getLongRunVar}}); if
#' function, the kernel function to be used for long-run variance
#' estimation (default is the Bartlett kernel in \pkg{cointReg})
#' @param bandwidth If character, the identifier for how to compute the
#' bandwidth as defined in \pkg{cointReg} (see
#' \code{\link[cointReg]{getBandwidth}}); if function, a function
#' to use for computing the bandwidth; if numeric, the bandwidth
#' value to use (the default is to use Andrews' method, as used in
#' \pkg{cointReg})
#' @param get_all_vals If \code{TRUE}, return all values for the statistic at
#' every tested point in the data set
#' @return If both \code{estimate} and \code{get_all_vals} are \code{FALSE}, the
#' value of the test statistic; otherwise, a list that contains the test
#' statistic and the other values requested (if both are \code{TRUE},
#' the test statistic is in the first position and the estimated change
#' point in the second)
#' @examples
#' CPAT:::stat_Zn(rnorm(1000))
#' CPAT:::stat_Zn(rnorm(1000), kn = function(n) {floor(log(n))})
#' CPAT:::stat_Zn(rnorm(1000), use_kernel_var = TRUE, bandwidth = "nw",
#' kernel = "bo")
stat_Zn <- function(dat, kn = function(n) {floor(sqrt(n))}, estimate = FALSE,
use_kernel_var = FALSE, custom_var = NULL, kernel = "ba",
bandwidth = "and", get_all_vals = FALSE) {
# Formerly known as statZn()
if (use_kernel_var) {
lrv <- get_lrv_vec(dat, kernel, bandwidth)
} else if (!is.null(custom_var)) {
use_kernel_var <- TRUE # Otherwise stat_Zn_cpp() will ignore lrv
if (is.function(custom_var)) {
# This may seem silly, but this is so that error codes refer to
# custom_var, and we don't want recursion either
custom_var_temp <- custom_var
custom_var <- purrr::partial(custom_var_temp, x = dat, .lazy = FALSE)
custom_var_vec <- vapply(1:length(dat), custom_var,
FUN.VALUE = numeric(1))
} else if (is.numeric(custom_var)) {
if (length(custom_var) < length(dat)) stop("custom_var must have" %s%
"length at least" %s%
length(dat) %s0% ", the" %s%
"length of the data set")
custom_var_vec <- custom_var
} else {
stop("Don't know how to handle custom_var of class" %s% class(custom_var))
}
if (any(custom_var_vec < 0)) stop("custom_var suggests a negative" %s%
"variance, which is impossible.")
lrv <- custom_var_vec
} else {
# A vector must be passed to stat_Zn_cpp, so we'll pass one with an
# impossible value
lrv <- c(-1)
}
res <- stat_Zn_cpp(dat, kn(length(dat)), use_kernel_var, lrv, get_all_vals)
res[[2]] <- as.integer(res[[2]])
if (!estimate & !get_all_vals) {
return(res[[1]])
} else {
return(res[c(TRUE, estimate, get_all_vals)])
}
# Here is equivalen (slow) R code
#n = length(dat)
#return(sqrt(kn(n))*max(sapply(
# floor(kn(n)):(n - floor(kn(n))), function(k)
# abs(1/k*sum(dat[1:k]) -
# 1/(n-k)*sum(dat[(k+1):n]))/sqrt((sum((dat[1:k] -
# mean(dat[1:k]))^2)+sum((dat[(k+1):n] -
# mean(dat[(k+1):n]))^2))/n))))
}
#' Multivariate Andrews' Test for End-of-Sample Structural Change
#'
#' This implements Andrews' test for end-of-sample change, as described by
#' \insertCite{andrews03;textual}{CPAT}. This test was derived for detecting a
#' change in multivarate data, aso originally described. See
#' \insertCite{andrews03}{CPAT} for a description of the test.
#'
#' @param formula The regression formula, which will be passed to
#' \code{\link[stats]{lm}}
#' @param data \code{data.frame} containing the data
#' @inheritParams andrews_test
#' @return If both \code{pval} and \code{stat} are \code{TRUE}, a list
#' containing both; otherwise, a number for one or the other, depending
#' on which is \code{TRUE}
#' @references
#' \insertAllCited{}
#' @examples
#' x <- rnorm(1000)
#' y <- 1 + 2 * x + rnorm(1000)
#' df <- data.frame(x, y)
#' CPAT:::andrews_test_reg(y ~ x, data = df, M = 900)
andrews_test_reg <- function(formula, data, M, pval = TRUE, stat = TRUE) {
if (!methods::is(formula, "formula")) stop("Bad formula passed to" %s%
"argument \"formula\"")
fit <- lm(formula = formula, data = data)
beta <- coefficients(fit)
d <- length(beta)
X <- model.matrix(fit)
u <- residuals(fit)
y <- fit$model[[1]]
m <- nrow(X) - M # Deriving m and n as described in Andrews (2003)
n <- nrow(X) - m
Sigma <- Reduce("+", lapply(1:(n + 1), function(j)
{u[j:(j + m - 1)] %*% t(u[j:(j + m - 1)])})) /
(n + 1)
if (d <= m) {
V <- t(X[(n + 1):(n + m),]) %*% solve(Sigma) %*% X[(n + 1):(n + m),]
A <- t(X[(n + 1):(n + m),]) %*% solve(Sigma) %*% u[(n + 1):(n + m)]
S <- (t(A) %*% solve(V) %*% A)[1,1]
} else {
S <- (t(u[(n + 1):(n + m)]) %*% solve(Sigma) %*% u[(n + 1):(n + m)])[1,1]
}
subbeta <- lapply(1:(n - m + 1),
function(j) {
coefficients(lm(formula = formula,
data = data[c(1:(j - 1),
(j + ceiling(m/2)):n),]))
})
Sj <- sapply(1:(n - m + 1), function(j) {
yj <- y[j:(j + m - 1)]
Xj <- X[j:(j + m - 1),]
uj <- yj - Xj %*% subbeta[[j]]
Vj <- t(Xj) %*%
solve(Sigma) %*% Xj
Aj <- t(Xj) %*%
solve(Sigma) %*% uj
(t(Aj) %*% solve(Vj) %*% A)[1,1] # Sj
})
res <- list("pval" = mean(S <= Sj), "stat" = S)[c(pval, stat)]
if (length(res) == 1) {
return(res[[1]])
} else {
return(res)
}
}
################################################################################
# STATISTICAL TEST INTERFACES
################################################################################
#' CUSUM Test
#'
#' Performs the (univariate) CUSUM test for change in mean, as described in
#' \insertCite{horvathricemiller19}{CPAT}. This is effectively an interface to
#' \code{\link{stat_Vn}}; see its documentation for more details. p-values are
#' computed using \code{\link{pkolmogorov}}, which represents the limiting
#' distribution of the statistic under the null hypothesis.
#'
#' @param x Data to test for change in mean
#' @param stat_plot Whether to create a plot of the values of the statistic at
#' all potential change points
#' @inheritParams stat_Vn
#' @return A \code{htest}-class object containing the results of the test
#' @references
#' \insertAllCited{}
#' @examples
#' CUSUM.test(rnorm(1000))
#' CUSUM.test(rnorm(1000), use_kernel_var = TRUE, kernel = "bo",
#' bandwidth = "nw")
#' @export
CUSUM.test <- function(x, use_kernel_var = FALSE, stat_plot = FALSE,
kernel = "ba", bandwidth = "and") {
testobj <- list()
testobj$method <- "CUSUM Test for Change in Mean"
testobj$data.name <- deparse(substitute(x))
res <- stat_Vn(x,
estimate = TRUE,
use_kernel_var = use_kernel_var,
kernel = kernel,
bandwidth = bandwidth,
get_all_vals = stat_plot)
stat <- res[[1]]
est <- res[[2]]
if (stat_plot) {
plot.ts(res[[3]], main = "Value of Test Statistic", ylab = "Statistic")
}
attr(stat, "names") <- "A"
attr(est, "names") <- "t*"
testobj$p.value <- 1 - pkolmogorov(res[[1]])
testobj$estimate <- est
testobj$statistic <- stat
class(testobj) <- "htest"
testobj
}
#' Darling-Erdös Test
#'
#' Performs the (univariate) Darling-Erdös test for change in mean, as described
#' in \insertCite{horvathricemiller19}{CPAT}. This is effectively an interface
#' to \code{\link{stat_de}}; see its documentation for more details. p-values
#' are computed using \code{\link{pdarling_erdos}}, which represents the
#' limiting distribution of the test statistic under the null hypothesis when
#' \code{a} and \code{b} are chosen appropriately. (Change those parameters at
#' your own risk!)
#'
#' @param x Data to test for change in mean
#' @param stat_plot Whether to create a plot of the values of the statistic at
#' all potential change points
#' @inheritParams stat_de
#' @return A \code{htest}-class object containing the results of the test
#' @references
#' \insertAllCited{}
#' @examples
#' DE.test(rnorm(1000))
#' DE.test(rnorm(1000), use_kernel_var = TRUE, kernel = "bo", bandwidth = "nw")
#' @export
DE.test <- function(x, a = log, b = log, use_kernel_var = FALSE,
stat_plot = FALSE, kernel = "ba", bandwidth = "and") {
l <- function(x) {sqrt(2*log(x))}
u <- function(x) {2*log(x) + 1/2*log(log(x)) - 1/2*log(pi)}
testobj <- list()
testobj$method <- "Darling-Erdos Test for Change in Mean"
testobj$data.name <- deparse(substitute(x))
params <- c(l(a(length(x))), u(b(length(x))))
names(params) <- c("a(" %s0% deparse(substitute(a)) %s0% "(T))", "b(" %s0%
deparse(substitute(b)) %s0% "(T))")
testobj$parameter <- params
res <- stat_de(x,
estimate = TRUE,
a = a,
b = b,
use_kernel_var = use_kernel_var,
kernel = kernel,
bandwidth = bandwidth,
get_all_vals = stat_plot)
stat <- res[[1]]
est <- res[[2]]
if (stat_plot) {
plot.ts(res[[3]], main = "Value of Test Statistic", ylab = "Statistic")
}
attr(stat, "names") <- "A"
attr(est, "names") <- "t*"
testobj$p.value <- 1 - pdarling_erdos(res[[1]])
testobj$estimate <- est
testobj$statistic <- stat
class(testobj) <- "htest"
testobj
}
#' Rényi-Type Test
#'
#' Performs the (univariate) Rényi-type test for change in mean, as described in
#' \insertCite{horvathricemiller19}{CPAT}. This is effectively an interface to
#' \code{\link{stat_Zn}}; see its documentation for more details. p-values are
#' computed using \code{\link{pZn}}, which represents the limiting distribution
#' of the test statistic under the null hypothesis, which represents the
#' limiting distribution of the test statistic under the null hypothesis when
#' \code{kn} represents a sequence \eqn{t_T} satisfying \eqn{t_T \to \infty}
#' and \eqn{t_T/T \to 0} as \eqn{T \to \infty}. (\code{\link[base]{log}} and
#' \code{\link[base]{sqrt}} should be good choices.)
#'
#' @param x Data to test for change in mean
#' @param stat_plot Whether to create a plot of the values of the statistic at
#' all potential change points
#' @inheritParams stat_Zn
#' @return A \code{htest}-class object containing the results of the test
#' @references
#' \insertAllCited{}
#' @examples
#' HR.test(rnorm(1000))
#' HR.test(rnorm(1000), use_kernel_var = TRUE, kernel = "bo", bandwidth = "nw")
#' @export
HR.test <- function(x, kn = log, use_kernel_var = FALSE, stat_plot = FALSE,
kernel = "ba", bandwidth = "and") {
testobj <- list()
testobj$method <- "Horvath-Rice Test for Change in Mean"
testobj$data.name <- deparse(substitute(x))
res <- stat_Zn(x,
kn = kn,
estimate = TRUE,
use_kernel_var = use_kernel_var,
kernel = kernel,
bandwidth = bandwidth,
get_all_vals = stat_plot)
stat <- res[[1]]
est <- res[[2]]
if (stat_plot) {
series <- ts(res[[3]], start = ceiling(kn(length(x))))
plot.ts(res[[3]], main = "Value of Test Statistic", ylab = "Statistic")
}
kn_val <- kn(length(x))
attr(kn_val, "names") <- deparse(substitute(kn)) %s0% "(T)"
attr(stat, "names") <- "D"
attr(est, "names") <- "t*"
testobj$parameters <- kn_val
testobj$p.value <- 1 - pZn(res[[1]])
testobj$estimate <- est
testobj$statistic <- stat
class(testobj) <- "htest"
testobj
}
#' Hidalgo-Seo Test
#'
#' Performs the (univariate) Hidalgo-Seo test for change in mean, as described
#' in \insertCite{horvathricemiller19}{CPAT}. This is effectively an interface
#' to \code{\link{stat_hs}}; see its documentation for more details. p-values
#' are computed using \code{\link{phidalgo_seo}}, which represents the limiting
#' distribution of the test statistic when the null hypothesis is true.
#'
#' @param x Data to test for change in mean
#' @param stat_plot Whether to create a plot of the values of the statistic at
#' all potential change points
#' @inheritParams stat_hs
#' @return A \code{htest}-class object containing the results of the test
#' @references
#' \insertAllCited{}
#' @examples
#' HS.test(rnorm(1000))
#' HS.test(rnorm(1000), corr = FALSE)
#' @export
HS.test <- function(x, corr = TRUE, stat_plot = FALSE) {
testobj <- list()
testobj$method <- "Hidalgo-Seo Test for Change in Mean"
testobj$data.name <- deparse(substitute(x))
params <- c(corr)
names(params) <- c("Correlated Residuals")
testobj$parameter <- params
res <- stat_hs(x, estimate = TRUE, corr = corr, get_all_vals = stat_plot)
stat <- res[[1]]
est <- res[[2]]
if (stat_plot) {
plot.ts(res[[3]], main = "Value of Test Statistic", ylab = "Statistic")
}
attr(stat, "names") <- "A"
attr(est, "names") <- "t*"
testobj$p.value <- 1 - phidalgo_seo(res[[1]])
testobj$estimate <- est
testobj$statistic <- stat
class(testobj) <- "htest"
testobj
}
#' Andrews' Test for End-of-Sample Structural Change
#'
#' Performs Andrews' test for end-of-sample structural change, as described in
#' \insertCite{andrews03}{CPAT}. This function works for both univariate and
#' multivariate data depending on the nature of \code{x} and whether
#' \code{formula} is specified. This function is thus an interface to
#' \code{\link{andrews_test}} and \code{\link{andrews_test_reg}}; see the
#' documentation of those functions for more details.
#'
#' @param x Data to test for change in mean (either a vector or
#' \code{data.frame})
#' @inheritParams andrews_test_reg
#' @return A \code{htest}-class object containing the results of the test
#' @references
#' \insertAllCited{}
#' @examples
#' Andrews.test(rnorm(1000), M = 900)
#' x <- rnorm(1000)
#' y <- 1 + 2 * x + rnorm(1000)
#' df <- data.frame(x, y)
#' Andrews.test(df, y ~ x, M = 900)
#' @export
Andrews.test <- function(x, M, formula = NULL) {
testobj <- list()
testobj$method <- "Andrews' Test for Structural Change"
testobj$data.name <- deparse(substitute(x))
if (is.numeric(x)) {
mchange <- length(x) - M
res <- andrews_test(x, M, pval = TRUE, stat = TRUE)
} else if (is.data.frame(x)) {
mchange <- nrow(x) - M
res <- andrews_test_reg(formula, x, M, pval = TRUE, stat = TRUE)
} else {
stop("x must be vector-like or a data frame")
}
stat <- res[["stat"]]
attr(mchange, "names") <- "m"
attr(stat, "names") <- "S"
testobj$p.value <- res[["pval"]]
testobj$parameters <- mchange
testobj$statistic <- stat
class(testobj) <- "htest"
testobj
}
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/ChangePointTests.R
|
################################################################################
# Data.R
################################################################################
# 2018-09-22
# Curtis Miller
################################################################################
# Documentation for package datasets
################################################################################
#' Fama-French Five Factors
#'
#' Data set containing the five factors described by
#' \insertCite{famafrench15;textual}{CPAT}, from the data library maintained by
#' Kenneth French. Data ranges from July 1, 1963 to October
#' 31, 2017.
#'
#' @format A data frame with 13679 rows and 6 variables:
#' \describe{
#' \item{Mkt.RF}{Market excess returns}
#' \item{RF}{The risk-free rate of return}
#' \item{SMB}{The return on a diversified portfolio of small stocks minus
#' return on a diversified portfolio of big stocks}
#' \item{HML}{The return of a portfolio of stocks with a high
#' book-to-market (B/M) ratio minus the return of a portfolio
#' of stocks with a low B/M ratio}
#' \item{RMW}{The return of a portfolio of stocks with robust profitability
#' minus a portfolio of stocks with weak profitability}
#' \item{CMA}{The return of a portfolio of stocks with conservative
#' investment minus the return of a portfolio of stocks with
#' aggressive investment}
#' }
#'
#' Row names are dates in YYYYMMDD format.
#'
#' @source \url{http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html}
"ff"
#' Bank Portfolio Returns
#'
#' Data set representing the returns of an industry portfolio representing the
#' banking industry based on company four-digit SIC codes, obtained from the
#' data library maintained by Kenneth French. Data ranges from July 1, 1926
#' to October 31, 2017.
#'
#' @format A data frame with 24099 rows and 1 variable:
#' \describe{
#' \item{Banks}{The return of a portfolio representing the banking industry}
#' }
#'
#' Row names are dates in YYYY-MM-DD format.
#'
#' @source \url{http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html}
"banks"
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/Data.R
|
################################################################################
# ProbabilityFunctions.R
################################################################################
# 2018-08-10
# Curtis Miller
################################################################################
# Functions for probability distributions and simulating random variables
################################################################################
################################################################################
# DENSITY FUNCTIONS
################################################################################
#' Rényi-Type Statistic Limiting Distribution Density Function
#'
#' Function for computing the value of the density function of the limiting
#' distribution of the Rényi-type statistic.
#'
#' @param x Point at which to evaluate the density function (note that this
#' parameter is not vectorized)
#' @param summands Number of summands to use in summation (the default should be
#' machine accurate)
#' @return Value of the density function at \eqn{x}
#' @examples
#' CPAT:::dZn(1)
dZn <- function(x, summands = NULL) {
if (!is.numeric(summands)) {
if (x > 6) {
return(1)
} else if (x <= 0) {
return(0)
} else {
# Used some rootfinding procedures to find the proper number of summands
# to guarantee machine-level accuracy, thus yielding these numbers
best_summands <- c(5, 9, 14, 18, 23, 27, 32)
summands <- best_summands[ceiling(x)]
}
}
if (x <= 0) return(0)
2 * pi * sqrt(pZn(x, summands = summands)) * sum((-1)^(0:summands) *
(2 * (0:summands) + 1)/x^3 * exp(-pi^2*(2*(0:summands)+1)^2/
(8*x^2)))
}
dZn <- Vectorize(dZn, "x")
################################################################################
# CDF FUNCTIONS
################################################################################
#' Kolmogorov CDF
#'
#' CDF of the Kolmogorov distribution.
#'
#' @param q Quantile input to CDF
#' @param summands Number of summands for infinite sum (the default should have
#' machine accuracy)
#' @return If \eqn{Z} is the random variable following the Kolmogorov
#' distribution, the quantity \eqn{P(Z \leq q)}
#' @examples
#' CPAT:::pkolmogorov(0.1)
pkolmogorov <- function(q, summands = ceiling(q * sqrt(72) + 3/2)) {
# Formerly called pKolmogorov
sqrt(2 * pi) * sapply(q, function(x) { if (x > 0) {
sum(exp(-(2 * (1:summands) - 1)^2 * pi^2/(8 * x^2)))/x
} else {
0
}})
}
pkolmogorov <- Vectorize(pkolmogorov, "q")
#' Darling-Erdös Statistic CDF
#'
#' CDF for the limiting distribution of the Darling-Erdös statistic.
#'
#' @param q Quantile input to CDF
#' @return If \eqn{Z} is the random variable with this distribution, the
#' quantity \eqn{P(Z \leq q)}
#' @examples
#' CPAT:::pdarling_erdos(0.1)
pdarling_erdos <- function(q) {
# Formerly called pDarlingErdos
exp(-2 * exp(-q))
}
pdarling_erdos <- Vectorize(pdarling_erdos, "q")
#' Rènyi-Type Statistic CDF
#'
#' CDF for the limiting distribution of the Rènyi-type statistic.
#'
#' @param q Quantile input to CDF
#' @param summands Number of summands for infinite sum; if \code{NULL},
#' automatically determined
#' @return If \eqn{Z} is the random variable following the limiting
#' distribution, the quantity \eqn{P(Z \leq q)}
#' @examples
#' CPAT:::pZn(0.1)
pZn <- function(q, summands = NULL) {
if (!is.numeric(summands)) {
if (q > 6) {
return(1)
} else if (q <= 0) {
return(0)
} else {
# Used some rootfinding procedures to find the proper number of summands
# to guarantee machine-level accuracy, thus yielding these numbers
best_summands <- c(5, 9, 14, 18, 23, 27, 32)
summands <- best_summands[ceiling(q)]
}
}
if (q > 100) return(1)
sapply(q, function(x) {(4/pi * sum((-1)^(0:summands)/(2*(0:summands) + 1) *
exp(-pi^2 * (2 * (0:summands) + 1)^2/
(8 * x^2))))^2})
}
pZn <- Vectorize(pZn, "q")
#' Hidalgo-Seo Statistic CDF
#'
#' CDF of the limiting distribution of the Hidalgo-Seo statistic
#'
#' @param q Quantile input to CDF
#' @return If \eqn{Z} is the random variable following the limiting
#' distribution, the quantity \eqn{P(Z \leq q)}
#' @examples
#' CPAT:::phidalgo_seo(0.1)
phidalgo_seo <- function(q) {
# Formerly called pHidalgoSeo
pdarling_erdos(q/2)
}
################################################################################
# QUANTILE FUNCTIONS
################################################################################
#' Darling-Erdös Statistic Limiting Distribution Quantile Function
#'
#' Quantile function for the limiting distribution of the Darling-Erdös
#' statistic.
#'
#' @param p The probability associated with the desired quantile
#' @return The quantile associated with \code{p}
#' @examples
#' CPAT:::qdarling_erdos(0.5)
qdarling_erdos <- function(p) {
# Formerly called qDarlingErdos
-log(log(1/sqrt(p)))
}
qdarling_erdos <- Vectorize(qdarling_erdos, "p")
#' Hidalgo-Seo Statistic Limiting Distribution Quantile Function
#'
#' Quantile function for the limiting distribution of the Hidalgo-Seo statistic
#'
#' @param p The probability associated with the desired quantile
#' @return A The quantile associated with \code{p}
#' @examples
#' CPAT:::qhidalgo_seo(0.5)
qhidalgo_seo <- function(p) {
# Formerly called qHidalgoSeo
2 * qdarling_erdos(p)
}
#' Rènyi-Type Statistic Quantile Function
#'
#' Quantile function for the limiting distribution of the Rènyi-type statistic.
#'
#' This function uses \code{\link[stats]{uniroot}} for finding this quantity,
#' and many of the the accepted parameters are arguments for that function; see
#' its documentation for more details.
#'
#' @param p Value of the CDF at the quantile
#' @param summands Number of summands for infinite sum
#' @param interval,tol,... Arguments to be passed to
#' \code{\link[stats]{uniroot}}
#' @return The quantile associated with \code{p}
#' @examples
#' CPAT:::qZn(0.5)
qZn <- function(p, summands = 500, interval = c(0, 100),
tol = .Machine$double.eps, ...) {
if (p == 1) return(Inf)
if (p == 0) return(0)
if (p < 0 | p > 1) return(NaN)
objective <- function(q) {pZn(q, summands = summands) - p}
# Set up arguments for uniroot()
args <- list(...); args$tol <- tol; args$interval <- interval
args$f <- objective
res <- do.call(uniroot, args)
res$root
}
qZn <- Vectorize(qZn, "p")
#' Kolmogorov Distribution Quantile Function
#'
#' Quantile function for the Kolmogorov distribution.
#'
#' This function uses \code{\link[stats]{uniroot}} for finding this quantity,
#' and many of the the accepted parameters are arguments for that function; see
#' its documentation for more details.
#'
#' @param p Value of the CDF at the quantile
#' @param summands Number of summands for infinite sum
#' @param interval,tol,... Arguments to be passed to
#' \code{\link[stats]{uniroot}}
#' @return The quantile associated with \code{p}
#' @examples
#' CPAT:::qkolmogorov(0.5)
qkolmogorov <- function(p, summands = 500, interval = c(0, 100),
tol = .Machine$double.eps, ...) {
if (p == 1) return(Inf)
if (p == 0) return(0)
if (p < 0 | p > 1) return(NaN)
objective <- function(q) {pkolmogorov(q, summands = summands) - p}
# Set up arguments for uniroot()
args <- list(...); args$tol <- tol; args$interval <- interval
args$f <- objective
res <- do.call(uniroot, args)
res$root
}
qkolmogorov <- Vectorize(qkolmogorov, "p")
################################################################################
# SIMULATION FUNCTIONS
################################################################################
#' Rènyi-Type Statistic Simulation (Assuming Variance)
#'
#' Simulates multiple realizations of the Rènyi-type statistic when the long-run
#' variance of the data is known.
#'
#' @param size Number of realizations to simulate
#' @param kn A function returning a positive integer that is used in the
#' definition of the Rènyi-type statistic effectively setting the
#' bounds over which the maximum is taken
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param args A list of arguments to be passed to \code{gen_func}
#' @param sd The square root of the second moment of the data
#' @return A vector of simulated realizations of the Rènyi-type statistic
#' @examples
#' CPAT:::sim_Zn(100, kn = function(n) {floor(log(n))})
#' CPAT:::sim_Zn(100, kn = function(n) {floor(log(n))},
#' gen_func = CPAT:::rchangepoint, args = list(changepoint = 250,
#' mean2 = 1))
sim_Zn <- function(size, kn, n = 500, gen_func = rnorm, args = NULL, sd = 1) {
# Formerly called simZn
Zn_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n), args))
}
max(sapply(floor(kn(n)):(n - floor(kn(n))), function(k)
abs(1/k*sum(dataset[1:k]) -
1/(n-k)*sum(dataset[(k+1):n]))
))
}
sqrt(kn(n))/sd * sapply(1:size, function(throwaway) Zn_realization())
}
#' CUSUM Statistic Simulation (Assuming Variance)
#'
#' Simulates multiple realizations of the CUSUM statistic when the long-run
#' variance of the data is known.
#'
#' @param size Number of realizations to simulate
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param sd The square root of the second moment of the data
#' @param args A list of arguments to be passed to \code{gen_func}
#' @return A vector of simulated realizations of the CUSUM statistic
#' @examples
#' CPAT:::sim_Vn(100)
#' CPAT:::sim_Vn(100, gen_func = CPAT:::rchangepoint,
#' args = list(changepoint = 250, mean2 = 1))
sim_Vn <- function(size, n = 500, gen_func = rnorm, sd = 1, args = NULL) {
# Formerly called simVn
Vn_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n),
args))
}
dataset_mean <- mean(dataset)
max(sapply(1:n, function(k) abs(sum(dataset[1:k]) - k * dataset_mean)))
}
(1/(sd*sqrt(n))) * sapply(1:size, function(throwaway) Vn_realization())
}
#' CUSUM Statistic Simulation
#'
#' Simulates multiple realizations of the CUSUM statistic.
#'
#' This differs from \code{sim_Vn()} in that the long-run variance is estimated
#' with this function, while \code{sim_Vn()} assumes the long-run variance is
#' known. Estimation can be done in a variety of ways. If \code{use_kernel_var}
#' is set to \code{TRUE}, long-run variance estimation using kernel-based
#' techniques will be employed; otherwise, a technique resembling standard
#' variance estimation will be employed. Any technique employed, though, will
#' account for the potential break points, as described in
#' \insertCite{horvathricemiller19;textual}{CPAT}. See the documentation for
#' \code{\link{stat_Vn}} for more details.
#'
#' The parameters \code{kernel} and \code{bandwidth} control parameters for
#' long-run variance estimation using kernel methods. These parameters will be
#' passed directly to \code{\link{stat_Vn}}.
#'
#' Versions of the CUSUM statistic, such as the weighted or trimmed statistics,
#' can be simulated with the function by passing values to \code{kn} and
#' \code{tau}; again, see the documentation for \code{\link{stat_Vn}}.
#'
#' @param size Number of realizations to simulate
#' @param kn A function returning a positive integer that is used in the
#' definition of the trimmed CUSUSM statistic effectively setting the
#' bounds over which the maximum is taken
#' @param tau The weighting parameter for the weighted CUSUM statistic (defaults
#' to zero for no weighting)
#' @param use_kernel_var Set to \code{TRUE} to use kernel-based long-run
#' variance estimation (\code{FALSE} means this is not
#' employed)
#' @param kernel If character, the identifier of the kernel function as used in
#' the \pkg{cointReg} (see documentation for
#' \code{cointReg::getLongRunVar}); if function, the kernel
#' function to be used for long-run variance estimation (default
#' is the Bartlett kernel in \pkg{cointReg}); this parameter
#' has no effect if \code{use_kernel_var} is \code{FALSE}
#' @param bandwidth If character, the identifier of how to compute the bandwidth
#' as defined in the \pkg{cointReg} package (see
#' documentation for \code{cointReg::getLongRunVar}); if
#' function, a function to use for computing the bandwidth; if
#' numeric, the bandwidth to use (the default behavior is to
#' use the method described in \insertCite{andrews91b}{CPAT},
#' as used in \pkg{cointReg}); this parameter has no effect if
#' \code{use_kernel_var} is \code{FALSE}
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param args A list of arguments to be passed to \code{gen_func}
#' @param parallel Whether to use the \pkg{foreach} and \pkg{doParallel}
#' packages to parallelize simulation (which needs to be
#' initialized in the global namespace before use)
#' @return A vector of simulated realizations of the CUSUM statistic
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::sim_Vn_stat(100)
#' CPAT:::sim_Vn_stat(100, kn = function(n) {floor(0.1 * n)}, tau = 1/3,
#' use_kernel_var = TRUE, gen_func = CPAT:::rchangepoint,
#' args = list(changepoint = 250, mean2 = 1))
sim_Vn_stat <- function(size, kn = function(n) {1}, tau = 0,
use_kernel_var = FALSE, kernel = "ba",
bandwidth = "and", n = 500, gen_func = rnorm,
args = NULL, parallel = FALSE) {
# Formerly called simVnStat
Vn_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n),
args))
}
stat_Vn(dataset, kn = kn, tau = tau, use_kernel_var = use_kernel_var,
kernel = kernel, bandwidth = bandwidth)
# The following should be equivalent (yet slow) R code
#n = length(dataset)
#dataset_mean = mean(dataset)
#return(max(sapply(
# 1:n, function(k)
# abs(sum(dataset[1:k]) -
# k*dataset_mean)/sqrt((sum((dataset[1:k] -
# mean(dataset[1:k]))^2)+sum((dataset[(k+1):n] -
# mean(dataset[(k+1):n]))^2))/n)))/sqrt(n))
}
has_parallel <- requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (parallel & !has_parallel) {
warning("Either foreach or doParallel is not available; defaulting" %s%
"to non-parallel implementation")
parallel <- FALSE
} else {
times <- foreach::times
`%dopar%` <- foreach::`%dopar%`
}
if (parallel) {
foreach::foreach(i = 1:size, .combine = 'c') %dopar% Vn_realization()
} else {
sapply(1:size, function(throwaway) Vn_realization())
}
}
#' Rènyi-Type Statistic Simulation
#'
#' Simulates multiple realizations of the Rènyi-type statistic.
#'
#' This differs from \code{sim_Zn()} in that the long-run variance is estimated
#' with this function, while \code{sim_Zn()} assumes the long-run variance is
#' known. Estimation can be done in a variety of ways. If \code{use_kernel_var}
#' is set to \code{TRUE}, long-run variance estimation using kernel-based
#' techniques will be employed; otherwise, a technique resembling standard
#' variance estimation will be employed. Any technique employed, though, will
#' account for the potential break points, as described in
#' \insertCite{horvathricemiller19;textual}{CPAT}. See the documentation for
#' \code{\link{stat_Zn}} for more details.
#'
#' The parameters \code{kernel} and \code{bandwidth} control parameters for
#' long-run variance estimation using kernel methods. These parameters will be
#' passed directly to \code{\link{stat_Zn}}.
#'
#' @param size Number of realizations to simulate
#' @param kn A function returning a positive integer that is used in the
#' definition of the Rènyi-type statistic effectively setting the
#' bounds over which the maximum is taken
#' @param use_kernel_var Set to \code{TRUE} to use kernel-based long-run
#' variance estimation (\code{FALSE} means this is not
#' employed)
#' @param kernel If character, the identifier of the kernel function as used in
#' the \pkg{cointReg} (see documentation for
#' \code{cointReg::getLongRunVar}); if function, the kernel
#' function to be used for long-run variance estimation (default
#' is the Bartlett kernel in \pkg{cointReg}); this parameter
#' has no effect if \code{use_kernel_var} is \code{FALSE}
#' @param bandwidth If character, the identifier of how to compute the bandwidth
#' as defined in the \pkg{cointReg} package (see
#' documentation for \code{cointReg::getLongRunVar}); if
#' function, a function to use for computing the bandwidth; if
#' numeric, the bandwidth to use (the default behavior is to
#' use the \insertCite{andrews91b;textual}{CPAT} method, as
#' used in \pkg{cointReg}); this parameter has no effect if
#' \code{use_kernel_var} is \code{FALSE}
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param args A list of arguments to be passed to \code{gen_func}
#' @param parallel Whether to use the \pkg{foreach} and \pkg{doParallel}
#' packages to parallelize simulation (which needs to be
#' initialized in the global namespace before use)
#' @return A vector of simulated realizations of the Rènyi-type statistic
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::sim_Zn_stat(100)
#' CPAT:::sim_Zn_stat(100, kn = function(n) {floor(log(n))},
#' use_kernel_var = TRUE, gen_func = CPAT:::rchangepoint,
#' args = list(changepoint = 250, mean2 = 1))
sim_Zn_stat <- function(size, kn = function(n) {floor(sqrt(n))},
use_kernel_var = FALSE, kernel = "ba",
bandwidth = "and", n = 500, gen_func = rnorm,
args = NULL, parallel = FALSE) {
# Formerly called simZnStat
Zn_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n),
args))
}
stat_Zn(dataset, kn, use_kernel_var = use_kernel_var,
kernel = kernel, bandwidth = bandwidth)
}
has_parallel <- requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (parallel & !has_parallel) {
warning("Either foreach or doParallel is not available; defaulting" %s%
"to non-parallel implementation")
parallel <- FALSE
} else {
times <- foreach::times
`%dopar%` <- foreach::`%dopar%`
}
if (parallel) {
foreach::foreach(i = 1:size, .combine = 'c') %dopar% Zn_realization()
} else {
sapply(1:size, function(throwaway) Zn_realization())
}
}
#' Darling-Erdös Statistic Simulation
#'
#' Simulates multiple realizations of the Darling-Erdös statistic.
#'
#' If \code{use_kernel_var} is set to \code{TRUE}, long-run variance estimation
#' using kernel-based techniques will be employed; otherwise, a technique
#' resembling standard variance estimation will be employed. Any technique
#' employed, though, will account for the potential break points, as described
#' in \insertCite{horvathricemiller19;textual}{CPAT}. See the documentation for
#' \code{\link{stat_de}} for more details.
#'
#' The parameters \code{kernel} and \code{bandwidth} control parameters for
#' long-run variance estimation using kernel methods. These parameters will be
#' passed directly to \code{\link{stat_de}}.
#'
#' @param size Number of realizations to simulate
#' @param a The function that will be composed wit
#' \eqn{l(x) = (2 \log(x))^{1/2}}
#' @param b The function that will be composed with
#' \eqn{u(x) = 2 \log(x) + \frac{1}{2} \log(\log(x)) -
#' \frac{1}{2}\log(pi)}
#' @param use_kernel_var Set to \code{TRUE} to use kernel-based long-run
#' variance estimation (\code{FALSE} means this is not
#' employed)
#' @param kernel If character, the identifier of the kernel function as used in
#' the \pkg{cointReg} (see documentation for
#' \code{cointReg::getLongRunVar}); if function, the kernel
#' function to be used for long-run variance estimation (default
#' is the Bartlett kernel in \pkg{cointReg}); this parameter
#' has no effect if \code{use_kernel_var} is \code{FALSE}
#' @param bandwidth If character, the identifier of how to compute the bandwidth
#' as defined in the \pkg{cointReg} package (see
#' documentation for \code{cointReg::getLongRunVar}); if
#' function, a function to use for computing the bandwidth; if
#' numeric, the bandwidth to use (the default behavior is to
#' use the \insertCite{andrews91b;textual}{CPAT} method, as
#' used in \pkg{cointReg}); this parameter has no effect if
#' \code{use_kernel_var} is \code{FALSE}
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param args A list of arguments to be passed to \code{gen_func}
#' @param parallel Whether to use the \pkg{foreach} and \pkg{doParallel}
#' packages to parallelize simulation (which needs to be
#' initialized in the global namespace before use)
#' @return A vector of simulated realizations of the Darling-Erdös statistic
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::sim_de_stat(100)
#' CPAT:::sim_de_stat(100, use_kernel_var = TRUE,
#' gen_func = CPAT:::rchangepoint,
#' args = list(changepoint = 250, mean2 = 1))
sim_de_stat <- function(size, a = log, b = log, use_kernel_var = FALSE,
kernel = "ba", bandwidth = "and", n = 500,
gen_func = rnorm, args = NULL, parallel = FALSE) {
# Formerly called simDEStat
de_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n), args))
}
stat_de(dataset, a = a, b = b, use_kernel_var = use_kernel_var,
kernel = kernel, bandwidth = bandwidth)
}
has_parallel <- requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (parallel & !has_parallel) {
warning("Either foreach or doParallel is not available; defaulting" %s%
"to non-parallel implementation")
parallel <- FALSE
} else {
times <- foreach::times
`%dopar%` <- foreach::`%dopar%`
}
if (parallel) {
foreach::foreach(i = 1:size, .combine = 'c') %dopar% de_realization()
} else {
sapply(1:size, function(throwaway) de_realization())
}
}
#' Hidalgo-Seo Statistic Simulation
#'
#' Simulates multiple realizations of the Hidalgo-Seo statistic.
#'
#' If \code{corr} is \code{TRUE}, then the residuals of the data-generating
#' process are assumed to be correlated and the test accounts for this in
#' long-run variance estimation; see the documentation for \code{\link{stat_hs}}
#' for more details. Otherwise, the sample variance is the estimate for the
#' long-run variance, as described in \insertCite{hidalgoseo13;textual}{CPAT}.
#'
#' @param size Number of realizations to simulate
#' @param corr Whether long-run variance should be computed under the assumption
#' of correlated residuals
#' @param use_kernel_var Set to \code{TRUE} to use kernel-based long-run
#' variance estimation (\code{FALSE} means this is not
#' employed); \emph{TODO: NOT CURRENTLY IMPLEMENTED}
#' @param kernel If character, the identifier of the kernel function as used in
#' the \pkg{cointReg} (see documentation for
#' \code{cointReg::getLongRunVar}); if function, the kernel
#' function to be used for long-run variance estimation (default
#' is the Bartlett kernel in \pkg{cointReg}); this parameter
#' has no effect if \code{use_kernel_var} is \code{FALSE};
#' \emph{TODO: NOT CURRENTLY IMPLEMENTED}
#' @param bandwidth If character, the identifier of how to compute the bandwidth
#' as defined in the \pkg{cointReg} package (see
#' documentation for \code{cointReg::getLongRunVar}); if
#' function, a function to use for computing the bandwidth; if
#' numeric, the bandwidth to use (the default behavior is to
#' use the \insertCite{andrews91b;textual}{CPAT} method, as
#' used in \pkg{cointReg}); this parameter has no effect if
#' \code{use_kernel_var} is \code{FALSE}; \emph{TODO: NOT
#' CURRENTLY IMPLEMENTED}
#' @param n The sample size for each realization
#' @param gen_func The function generating the random sample from which the
#' statistic is computed
#' @param args A list of arguments to be passed to \code{gen_func}
#' @param parallel Whether to use the \pkg{foreach} and \pkg{doParallel}
#' packages to parallelize simulation (which needs to be
#' initialized in the global namespace before use)
#' @return A vector of simulated realizations of the Hidalgo-Seo statistic
#' @references
#' \insertAllCited{}
#' @examples
#' CPAT:::sim_hs_stat(100)
#' CPAT:::sim_hs_stat(100, gen_func = CPAT:::rchangepoint,
#' args = list(changepoint = 250, mean2 = 1))
sim_hs_stat <- function(size, corr = TRUE, gen_func = rnorm, args = NULL,
n = 500, parallel = FALSE, use_kernel_var = FALSE,
kernel = "ba", bandwidth = "and") {
# Formerly known as simHSStat
hs_realization <- function() {
# Generate data set
if (!is.list(args)) {
dataset <- do.call(gen_func, list(n = n))
} else {
dataset <- do.call(gen_func, c(list(n = n),
args))
}
stat_hs(dataset, corr = corr)
}
has_parallel <- requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (parallel & !has_parallel) {
warning("Either foreach or doParallel is not available; defaulting" %s%
"to non-parallel implementation")
parallel <- FALSE
} else {
times <- foreach::times
`%dopar%` <- foreach::`%dopar%`
}
if (parallel) {
foreach::foreach(i = 1:size, .combine = 'c') %dopar% hs_realization()
} else {
sapply(1:size, function(throwaway) hs_realization())
}
}
#' Simulate Univariate Data With a Single Change Point
#'
#' This function simulates univariate data with a structural change.
#'
#' This function generates artificial change point data, where up to the
#' specified change point the data has one mean, and after the point it has a
#' different mean. By default, the function simulates standard Normal data with
#' no change. If \code{changepoint} is \code{NULL}, then by default the change
#' point will be at about the middle of the data.
#'
#' @param n An integer for the data set's sample size
#' @param changepoint An integer for where the change point occurs
#' @param mean1 The mean prior to the change point
#' @param mean2 The mean after the change point
#' @param dist The function with which random data will be generated
#' @param meanparam A string for the parameter in \code{dist} representing the
#' mean
#' @param ... Other arguments to be passed to dist
#' @return A vector of the simulated data
#' @examples
#' CPAT:::rchangepoint(500)
#' CPAT:::rchangepoint(500, changepoint = 10, mean2 = 2, sd = 2)
#' CPAT:::rchangepoint(500, changepoint = 250, dist = rexp, meanparam = "rate",
#' mean1 = 1, mean2 = 2)
rchangepoint <- function(n, changepoint = NULL, mean1 = 0, mean2 = 0,
dist = rnorm, meanparam = "mean", ...) {
if (is.null(changepoint)) {
changepoint <- ceiling(n/2)
} else if (!is.integer(changepoint)) {
changepoint <- round(changepoint)
}
# Arguments to be passed to dist prior to changepoint
distargs_pre <- list(...)
distargs_pre$n <- changepoint
distargs_pre[[meanparam]] <- mean1
# Arguments to be passed to dist after changepoint
distargs_post <- list(...)
distargs_post$n <- n - changepoint
distargs_post[[meanparam]] <- mean2
# Get and return data
datavec1 <- do.call(dist, distargs_pre)
datavec2 <- do.call(dist, distargs_post)
c(datavec1, datavec2)
}
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/ProbabilityFunctions.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
stat_Vn_cpp <- function(dat, kn, tau, use_kernel_var, lrv_est, get_all_vals) {
.Call('_CPAT_stat_Vn_cpp', PACKAGE = 'CPAT', dat, kn, tau, use_kernel_var, lrv_est, get_all_vals)
}
stat_Zn_cpp <- function(dat, kn, use_kernel_var, lrv_est, get_all_vals) {
.Call('_CPAT_stat_Zn_cpp', PACKAGE = 'CPAT', dat, kn, use_kernel_var, lrv_est, get_all_vals)
}
get_lrv_vec_cpp <- function(Y, kern, max_l) {
.Call('_CPAT_get_lrv_vec_cpp', PACKAGE = 'CPAT', Y, kern, max_l)
}
cond_var_gradient_hessian_cpp <- function(var, eps, omega, alpha, beta, init_vals) {
.Call('_CPAT_cond_var_gradient_hessian_cpp', PACKAGE = 'CPAT', var, eps, omega, alpha, beta, init_vals)
}
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/RcppExports.R
|
################################################################################
# StartupMessage.R
################################################################################
# 2018-09-20
# Curtis Miller
################################################################################
# Package startup message functions, for fancy loading.
################################################################################
#' Create Package Startup Message
#'
#' Makes package startup message.
#'
#' @import utils
#' @examples
#' CPAT:::CPAT_startup_message()
CPAT_startup_message <- function() {
c(paste0(" ________ _________ _________ __________\n / // __",
"_ // ___ // /\n / ____// / / // / / //___ ",
" ___/\n / / / /__/ // /__/ / / /\n / /___ / __",
"____// ___ / / /\n / // / / / / / / /\n/",
"_______//__/ /__/ /__/ /__/ v. ",
utils::packageVersion("CPAT")),
"\nType citation(\"CPAT\") for citing this R package in publications")
}
#' Package Attach Hook Function
#'
#' Hook triggered when package attached
#'
#' @param lib a character string giving the library directory where the package
#' defining the namespace was found
#' @param pkg a character string giving the name of the package
#' @examples
#' CPAT:::.onAttach(.libPaths()[1], "CPAT")
.onAttach <- function(lib, pkg) {
msg <- CPAT_startup_message()
if (!interactive())
msg[1] <- paste("Package 'CPAT' version", packageVersion("CPAT"))
packageStartupMessage(msg)
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/StartupMessage.R
|
################################################################################
# Utils.R
################################################################################
# 2018-08-27
# Curtis Miller
################################################################################
# Functions and other extras used throughout the package
################################################################################
################################################################################
# ROXYGEN2 TAGS
################################################################################
#' @useDynLib CPAT
#' @importFrom Rcpp sourceCpp
NULL
################################################################################
# OPERATORS
################################################################################
#' Concatenate (With Space)
#'
#' Concatenate and form strings (with space separation)
#'
#' @param x One object
#' @param y Another object
#' @return A string combining \code{x} and \code{y} with a space separating them
#' @examples
#' `%s%` <- CPAT:::`%s%`
#' "Hello" %s% "world"
`%s%` <- function(x, y) {paste(x, y)}
#' Concatenate (Without Space)
#'
#' Concatenate and form strings (no space separation)
#'
#' @inheritParams %s%
#' @return A string combining \code{x} and \code{y}
#' @examples
#' `%s0%` <- CPAT:::`%s0%`
#' "Hello" %s0% "world"
`%s0%` <- function(x, y) {paste0(x, y)}
|
/scratch/gouwar.j/cran-all/cranData/CPAT/R/Utils.R
|
#' CPBayes: An R-package implemeting a Bayesian meta analysis method for studying cross-phenotype
#' genetic associations.
#'
#' Simultaneous analysis of genetic associations with multiple phenotypes may reveal shared
#' genetic susceptibility across traits (pleiotropy). CPBayes is a Bayesian meta analysis
#' method for studying cross-phenotype genetic associations. It uses summary-level data
#' across multiple phenotypes to simultaneously measure the evidence of aggregate-level
#' pleiotropic association and estimate an optimal subset of traits associated with the
#' risk locus. CPBayes is based on a spike and slab prior.
#'
#' The package consists of following functions:
#'\code{\link{analytic_locFDR_BF_uncor}}, \code{\link{cpbayes_uncor}}; \code{\link{analytic_locFDR_BF_cor}}, \code{\link{cpbayes_cor}}; \code{\link{post_summaries}}, \code{\link{forest_cpbayes}}, \code{\link{estimate_corln}}.
#'
#' @section Functions:
#' \describe{
#' \item{\code{\link{analytic_locFDR_BF_uncor}}}{It analytically computes the local FDR (locFDR)
#' and Bayes factor (BF) quantifying the evidence of aggregate-level pleiotropic association
#' for uncorrelated summary statistics.}
#' \item{\code{\link{cpbayes_uncor}}}{It implements CPBayes (based on MCMC) for uncorrelated summary statistics to
#' figure out the optimal subset of non-null traits underlying a pleiotropic signal and other insights.
#' The summary statistics across traits/studies are uncorrelated when the studies
#' have no overlapping/genetically related subjects.}
#' \item{\code{\link{analytic_locFDR_BF_cor}}}{It analytically computes the local FDR (locFDR)
#' and Bayes factor (BF) for correlated summary statistics.}
#' \item{\code{\link{cpbayes_cor}}}{It implements CPBayes (based on MCMC) for correlated summary statistics to figure out
#' the optimal subset of non-null traits underlying a pleiotropic signal and other insights. The summary statistics across
#' traits/studies are correlated when the studies have overlapping/genetically related subjects
#' or the phenotypes were measured in a cohort study.}
#' \item{\code{\link{post_summaries}}}{It summarizes the MCMC data produced by
#' \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}}.
#' It computes additional summaries to provide a better insight into a pleiotropic signal.
#' It works in the same way for both \code{\link{cpbayes_uncor}} and \code{\link{cpbayes_cor}}.}
#' \item{\code{\link{forest_cpbayes}}}{It creates a forest plot presenting the pleiotropy result obtained by
#' \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}}. It works in the same way for
#' both \code{\link{cpbayes_uncor}} and \code{\link{cpbayes_cor}}.}
#' \item{\code{\link{estimate_corln}}}{It computes an approximate correlation matrix of
#' the beta-hat vector for multiple overlapping case-control studies using the
#' sample-overlap count matrices.}
#' }
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#'
#' @docType package
#'
#' @name CPBayes
#'
#' @importFrom mvtnorm dmvnorm
#' @importFrom purrr map_dbl
#' @importFrom MASS mvrnorm
#' @importFrom utils combn
#' @importFrom stats runif rnorm rbeta quantile qchisq qbeta pchisq pbeta p.adjust dist aggregate sd dnorm qnorm
#' @importFrom forestplot forestplot fpColors
#' @importFrom grDevices dev.off pdf
NULL
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/CPBayes.R
|
## Function to call CPBayes_cor (CPBayes function for correlated summary statistics)
#' Run correlated version of CPBayes.
#'
#' Run correlated version of CPBayes when the main genetic effect (beta/log(odds ratio)) estimates across
#' studies/traits are correlated.
#' @param BetaHat A numeric vector of length K where K is the number of phenotypes. It
#' contains the beta-hat values across studies/traits. No default is specified.
#' @param SE A numeric vector with the same dimension as BetaHat providing the standard errors
#' corresponding to BetaHat. Every element of SE must be positive. No default is specified.
#' @param Corln A numeric square matrix of order K by K providing the correlation matrix of BetaHat.
#' The number of rows & columns of Corln must be the same as the length of BetaHat. No default
#' is specified. See \code{\link{estimate_corln}}.
#' @param Phenotypes A character vector of the same length as BetaHat providing the name
#' of the phenotypes. Default is specified as trait1, trait2, . . . , traitK. Note that
#' BetaHat, SE, Corln, and Phenotypes must be in the same order.
#' @param Variant A character vector of length one providing the name of the genetic variant.
#' Default is `Variant'.
#' @param UpdateSlabVar A logical vector of length one. If TRUE, the variance of the slab distribution
#' that presents the prior distribution of non-null effects is
#' updated at each MCMC iteration in a range (MinSlabVar -- MaxSlabVar) (see next). If FALSE,
#' it is fixed at (MinSlabVar + MaxSlabVar)/2. Default is TRUE.
#' @param MinSlabVar A numeric value greater than 0.01 providing the minimum value of the
#' variance of the slab distribution. Default is 0.6.
#' @param MaxSlabVar A numeric value smaller than 10.0 providing the maximum value of the
#' variance of the slab distribution. Default is 1.0. **Note that,
#' a smaller value of the slab variance will increase the sensitivity of CPBayes while selecting the optimal
#' subset of associated traits but at the expense of lower specificity. Hence the slab variance
#' parameter in CPBayes is inversely related to the level of false discovery rate (FDR) in a frequentist
#' FDR controlling procedure. For a specific dataset, an user
#' can experiment different choices of these three arguments: UpdateSlabVar, MinSlabVar, and MaxSlabVar.
#' @param MCMCiter A positive integer greater than or equal to 2200 providing the total number of
#' iterations in the MCMC. Default is 7500.
#' @param Burnin A positive integer greater than or equal to 200 providing the burn in period
#' in the MCMC. Default is 200. Note that the MCMC sample size (MCMCiter - Burnin) must be at least 2000, which is 7000 by default.
#' @return The output produced by \code{\link{cpbayes_cor}} is a list which consists of various components.
#' \item{variantName}{It is the name of the genetic variant provided by the user. If
#' not specified by the user, default name is `Variant'.}
#' \item{log10_BF}{It provides the log10(Bayes factor) produced by CPBayes that measures the
#' evidence of the overall pleiotropic association.}
#' \item{locFDR}{It provides the local false discovery rate (posterior probability of null association) produced by
#' CPBayes which is a measure of the evidence
#' of aggregate-level pleiotropic association. Bayes factor is adjusted for prior odds, but
#' locFDR is solely a function of the posterior odds. locFDR can sometimes be small
#' indicating an association, but log10_BF may not indicate an association. Hence, always check both log10_BF and locFDR.}
#' \item{subset}{It provides the optimal subset of associated/non-null traits selected by
#' CPBayes. It is NULL if no phenotype is selected.}
#' \item{important_traits}{It provides the traits which yield a trait-specific posterior
#' probability of association (PPAj) > 20\%. Even if a phenotype is not
#' selected in the optimal subset of non-null traits, it can produce a non-negligible
#' value of PPAj. Note that, `important_traits' is expected to include the traits already
#' contained in `subset'. It provides both the name of the important traits and their
#' corresponding value of PPAj. Always check 'important_traits' even if 'subset' contains
#' a single trait. It helps to better explain an observed pleiotropic signal.}
#' \item{auxi_data}{It contains supplementary data including the MCMC data which is used later
#' by \code{\link{post_summaries}} and \code{\link{forest_cpbayes}}:
#' \enumerate{
#' \item traitNames: Name of all the phenotypes.
#' \item K: Total number of phenotypes.
#' \item mcmc.samplesize: MCMC sample size.
#' \item PPAj: Trait-specific posterior probability of association for all the traits.
#' \item Z.data: MCMC data on the latent association status of all the traits (Z).
#' \item sim.beta: MCMC data on the unknown true genetic effect (beta) on each trait.
#' \item betahat: The beta-hat vector provided by the user which will be used by \code{\link{forest_cpbayes}}.
#' \item se: The standard error vector provided by the user which will be used by \code{\link{forest_cpbayes}}.
#' }
#' }
#' \item{uncor_use}{'Yes' or 'No'. Whether the combined strategy of CPBayes (implemented for correlated
#' summary statistics) used the uncorrelated version or not.}
#' \item{runtime}{It provides the runtime (in seconds) taken by \code{\link{cpbayes_cor}}. It will help the user to
#' plan the whole analysis.}
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{analytic_locFDR_BF_cor}}, \code{\link{estimate_corln}}, \code{\link{post_summaries}}, \code{\link{forest_cpbayes}}, \code{\link{analytic_locFDR_BF_uncor}}, \code{\link{cpbayes_uncor}}
#'
#' @examples
#' data(ExampleDataCor)
#' BetaHat <- ExampleDataCor$BetaHat
#' BetaHat
#' SE <- ExampleDataCor$SE
#' SE
#' cor <- ExampleDataCor$cor
#' cor
#' traitNames <- paste("Disease", 1:10, sep = "")
#' SNP1 <- "rs1234"
#' result <- cpbayes_cor(BetaHat, SE, cor, Phenotypes = traitNames, Variant = SNP1)
#' str(result)
#'
#' @export
cpbayes_cor <- function(BetaHat, SE, Corln, Phenotypes, Variant, UpdateSlabVar = TRUE, MinSlabVar = 0.6, MaxSlabVar = 1.0, MCMCiter = 7500, Burnin = 500)
{
UpdateDE <- UpdateSlabVar
# Check whether any of the primary arguments is missing
if(missing(BetaHat) || missing (SE))
stop("BetaHat or SE vector is missing!", call. = FALSE)
if(missing(Corln))
stop("Correlation matrix is missing!", call. = FALSE)
# Argument 1 :: BetaHat
BetaHat <- checkPrimaryVar(BetaHat, "BetaHat")
# Argument 2 :: SE
SE <- checkPrimaryVar(SE, "SE")
# Check whether all entries are strictly positive
if(!all(SE > 0))
stop("One or more elements in the SE vector are not positive!", call. = FALSE)
# Argument 1 and 2 ::
if(length(BetaHat) != length(SE))
stop("BetaHat and SE vectors must have the same number of elements!", call. = FALSE)
# Argument 3 :: Correlation
COR <- checkCorln(Corln, BetaHat)
# Argument 4 :: Phenotype names
if(!missing(Phenotypes))
checkPhen(Phenotypes, BetaHat)
else Phenotypes = paste("trait", 1:length(BetaHat), sep = "")
# Argument 5 :: Variant name
if(!missing(Variant))
{
Variant <- checkVarName(Variant)
variantName <- unname(Variant) # Assignment
}
else variantName <- "Variant"
# Argument 6 :: Update model parameter DE
# Check whether argument 6 is a vector of length 1
if(!is.vector(UpdateDE) || (length(UpdateDE) != 1))
{
warning("UpdateSlabVar is not a scalar (default option used).", call. = FALSE)
UpdateDE <- TRUE
}
if(!is.logical(UpdateDE))
{
warning("UpdateSlabVar not provided as logical (default option used).", call. = FALSE)
UpdateDE <- TRUE
}
# Argument 7 & 8:: Minimum and maximum value of slab variance
# Argument 6 & 7:: Minimum and maximum value of slab variance
SlabVarList <- chkSlabVar(MinSlabVar, MaxSlabVar)
MinSlabVar <- SlabVarList[["MinSlabVar"]]
MaxSlabVar <- SlabVarList[["MaxSlabVar"]]
# Argument 9 :: Number of MCMC iteration
mcmcParam <- chkMCMCparam(MCMCiter, Burnin)
MCMCiter <- mcmcParam[["MCMCiter"]]
Burnin <- mcmcParam[["Burnin"]]
# Call CPBayes function
RESULT <- combined_CPBayes( variantName, Phenotypes, BetaHat, SE, COR, UpdateDE, MinSlabVar, MaxSlabVar, MCMCiter, Burnin )
print_result(RESULT)
invisible(RESULT)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/FuncUserCor.R
|
## Function to call cpbayes_uncor (CPBayes function for uncorrelated phenotypes)
#' Run uncorrelated version of CPBayes.
#'
#' Run uncorrelated version of CPBayes when the main genetic effect (beta/log(odds ratio)) estimates across
#' studies/traits are uncorrelated.
#' @param BetaHat A numeric vector of length K where K is the number of phenotypes. It
#' contains the beta-hat values across studies/traits. No default is specified.
#' @param SE A numeric vector with the same dimension as BetaHat providing the standard errors
#' corresponding to BetaHat. Every element of SE must be positive. No default is specified.
#' @param Phenotypes A character vector of the same length as BetaHat providing the name of
#' the phenotypes. Default is specified as trait1, trait2, . . . , traitK. Note that BetaHat,
#' SE, and Phenotypes must be in the same order.
#' @param Variant A character vector of length one specifying the name of the genetic variant.
#' Default is `Variant'.
#' @param UpdateSlabVar A logical vector of length one. If TRUE, the variance of the slab distribution
#' that presents the prior distribution of non-null effects
#' is updated at each MCMC iteration in a range (MinSlabVar -- MaxSlabVar) (see next). If FALSE,
#' it is fixed at (MinSlabVar + MaxSlabVar)/2. Default is TRUE.
#' @param MinSlabVar A numeric value greater than 0.01 providing the minimum value of
#' the variance of the slab distribution. Default is 0.6.
#' @param MaxSlabVar A numeric value smaller than 10.0 providing the maximum value of
#' the variance of the slab distribution. Default is 1.0. **Note that,
#' a smaller value of the slab variance will increase the sensitivity of CPBayes while selecting the optimal
#' subset of associated traits but at the expense of lower specificity. Hence the slab variance
#' parameter in CPBayes is inversely related to the level of false discovery rate (FDR) in a frequentist
#' FDR controlling procedure. For a specific dataset, an user
#' can experiment different choices of these three arguments: UpdateSlabVar, MinSlabVar, and MaxSlabVar.
#' @param MCMCiter A positive integer greater than or equal to 2200 providing the total number of
#' iterations in the MCMC. Default is 7500.
#' @param Burnin A positive integer greater than or equal to 200 providing the burn in period
#' in the MCMC. Default is 500. Note that the MCMC sample size (MCMCiter - Burnin) must be at least 2000, which is 7000 by default.
#' @return The output produced by the function is a list which consists of various components.
#' \item{variantName}{It is the name of the genetic variant provided by the user. If not
#' specified by the user, default name is `Variant'.}
#' \item{log10_BF}{It provides the log10(Bayes factor) produced by CPBayes that measures the
#' evidence of the overall pleiotropic association.}
#' \item{locFDR}{It provides the local false discovery rate (posterior probability of null association) produced by CPBayes
#' which is a measure of the evidence of the
#' aggregate-level pleiotropic association. Bayes factor is adjusted for prior odds, but
#' locFDR is solely a function of the posterior odds. locFDR can sometimes be small
#' indicating an association, but log10_BF may not indicate an association. Hence, always check both log10_BF and locFDR.}
#' \item{subset}{It provides the optimal subset of associated/non-null traits selected
#' by CPBayes. It is NULL if no phenotype is selected.}
#' \item{important_traits}{It provides the traits which yield a trait-specific posterior probability of
#' association (PPAj) > 20\%. Even if a phenotype is not selected in the optimal subset of non-null
#' traits, it can produce a non-negligible value of trait-specific posterior probability of
#' association (PPAj). Note that, `important_traits' is expected to include the traits
#' already contained in `subset'. It provides both the name of the important traits and
#' their corresponding values of PPAj. Always check 'important_traits' even if 'subset' contains
#' a single trait. It helps to better explain an observed pleiotropic signal.}
#' \item{auxi_data}{It contains supplementary data including the MCMC data which is used later
#' by \code{\link{post_summaries}} and \code{\link{forest_cpbayes}}:
#' \enumerate{
#' \item traitNames: Name of all the phenotypes.
#' \item K: Total number of phenotypes.
#' \item mcmc.samplesize: MCMC sample size.
#' \item PPAj: Trait-specific posterior probability of association for all the traits.
#' \item Z.data: MCMC data on the latent association status of all the traits (Z).
#' \item sim.beta: MCMC data on the unknown true genetic effect (beta) on all the traits.
#' \item betahat: The beta-hat vector provided by the user which will be used by \code{\link{forest_cpbayes}}.
#' \item se: The standard error vector provided by the user which will be used by \code{\link{forest_cpbayes}}.
#' }
#' }
#' \item{runtime}{It provides the runtime (in seconds) taken by \code{\link{cpbayes_uncor}}. It will help the user
#' to plan the whole analysis.}
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{analytic_locFDR_BF_uncor}}, \code{\link{post_summaries}}, \code{\link{forest_cpbayes}}, \code{\link{analytic_locFDR_BF_cor}}, \code{\link{cpbayes_cor}}, \code{\link{estimate_corln}}
#'
#' @examples
#' data(ExampleDataUncor)
#' BetaHat <- ExampleDataUncor$BetaHat
#' BetaHat
#' SE <- ExampleDataUncor$SE
#' SE
#' traitNames <- paste("Disease", 1:10, sep = "")
#' SNP1 <- "rs1234"
#' result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1)
#' str(result)
#'
#' @export
cpbayes_uncor <- function(BetaHat, SE, Phenotypes, Variant, UpdateSlabVar = TRUE, MinSlabVar = 0.6, MaxSlabVar = 1.0, MCMCiter = 7500, Burnin = 500)
{
UpdateDE <- UpdateSlabVar
# Check whether any of the primary arguments is missing
if(missing(BetaHat) || missing (SE))
stop("BetaHat or SE vector is missing!", call. = FALSE)
# Argument 1 :: BetaHat
BetaHat <- checkPrimaryVar(BetaHat, "BetaHat")
# Argument 2 :: SE
SE <- checkPrimaryVar(SE, "SE")
# Check whether all entries are strictly positive
if(!all(SE > 0))
stop("One or more elements in the SE vector are not positive!", call. = FALSE)
# Argument 1 and 2 ::
if(length(BetaHat) != length(SE))
stop("BetaHat and SE vectors must have the same number of elements!", call. = FALSE)
# Argument 3 :: Phenotype names
if(!missing(Phenotypes))
checkPhen(Phenotypes, BetaHat)
else Phenotypes = paste("trait", 1:length(BetaHat), sep = "")
# Argument 4 :: Variant name
if(!missing(Variant))
{
Variant <- checkVarName(Variant)
variantName <- unname(Variant) # Assignment
}
else variantName <- "Variant"
# Argument 5 :: Update model parameter DE
# Check whether argument 5 is a vector of length 1
if(!is.vector(UpdateDE) || (length(UpdateDE) != 1))
{
warning("UpdateSlabVar is not a scalar (default option used).", call. = FALSE)
UpdateDE <- TRUE
}
if(!is.logical(UpdateDE))
{
warning("UpdateSlabVar not provided as logical (default option used).", call. = FALSE)
UpdateDE <- TRUE
}
# Argument 6 & 7:: Minimum and maximum value of slab variance
SlabVarList <- chkSlabVar(MinSlabVar, MaxSlabVar)
MinSlabVar <- SlabVarList[["MinSlabVar"]]
MaxSlabVar <- SlabVarList[["MaxSlabVar"]]
# Argument 8 :: Number of MCMC iteration
mcmcParam <- chkMCMCparam(MCMCiter, Burnin)
MCMCiter <- mcmcParam[["MCMCiter"]]
Burnin <- mcmcParam[["Burnin"]]
# Call CPBayes function
RESULT <- CPBayes_uncor( variantName, Phenotypes, BetaHat, SE, UpdateDE, MinSlabVar, MaxSlabVar, MCMCiter, Burnin )
print_result(RESULT)
invisible(RESULT)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/FuncUserUncor.R
|
## This file contains MCMC functions for updating the beta parameters.
## update beta in uncorrelated case
uncorrelated_beta_update <- function(K, s.e., tau, de, Z, X)
{
s2 = (s.e.)^2
TAU = rep(tau,K)
indx = which(Z==1)
if(length(indx) > 0) TAU[indx] = tau/de
sigma2.inv <- (1/s2) + (1/TAU^2)
sigma <- sqrt(1/sigma2.inv)
mean <- (sigma^2/s2)*X
beta = rnorm(K,mean,sigma) ;
return(beta) ;
}
## Update the beta parameters in correlated case
correlated_beta_update <- function(K, tau, de, Z, X, Sig1.inv)
{
TAU = rep(tau,K)
indx = which(Z==1)
if(length(indx) > 0) TAU[indx] = tau/de
Sig2.inv = diag(1/(TAU^2))
combo.inv <- solve(Sig1.inv+Sig2.inv) ;
mean <- combo.inv %*% Sig1.inv %*% X ;
Sigma <- combo.inv ;
beta <- mvrnorm(1,mean,Sigma)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/MCMC_beta_update_functions.R
|
##=================================**********************************************===============================##
## This file contains some functions that are called by both of the uncorrelated and correlated versions of CPBayes
## For example, to initiate the parameter values in the MCMC. Update Z, q, de in the MCMC. Note that, the full conditional
## posterior distributions of these parameters in the MCMC for both uncorrelated and correlated versions are the same.
##=================================**********************************************===============================##
################################ Function implementing the FDR procedure #############################
BH_selection <- function(pv, traits, level){
K <- length(pv) ## Number of phenotypes
pval <- matrix(0, K, 2)
pval[ ,1] <- seq(1,K)
pval[ ,2] <- pv
pval <- pval[order(pval[,2]), ]
cutoff <- seq(1,K) * (1/K) * level
comparison <- pval[ ,2] - cutoff
selected_traits <- NULL
if(any(comparison <= 0) == TRUE){
BH_posi <- max(which(comparison <= 0))
select_index <- sort(pval[1:BH_posi,1])
selected_traits <- traits[select_index]
}
return(selected_traits)
}
##################################***************************************###################################
##*************** initialization for the CpgBayes MCMC ****************##
initiate_MCMC <- function( K, X, s.e. )
{
## minimum and maximum choices of the intial value of q
min.q = 0.05
max.q = 0.95
q.mode = min.q
## level of FDR correction in the BY or BH procedure
level = 0.01
pv = pchisq( (X/s.e.)^2, df=1, lower.tail=F )
traits <- paste0("Trait", 1:K)
select <- BH_selection(pv, traits, level)
K1.FDR <- length(select)
nonnull.set = 0
Z = rep(0,K) ## Introduce the allocations
if( K1.FDR > 0 )
{
nonnull.set <- match(select, traits)
Z[nonnull.set] <- rep(1, K1.FDR)
q.mode = K1.FDR/K
}
if( q.mode == 1 ) q.mode <- max.q
beta = X ## beta = X, under continuous spike, beta is always non-zero
data <- list( beta = beta, Z = Z, q = q.mode, K1_FDR = K1.FDR )
}
##*************** Update allocations (Z) for both uncorrelated and correlated cases ****************##
##*************** for the model in which q is included in the MCMC to be updated ****************##
Z_update <- function(K, q, tau, de, beta)
{
Z = rep(0,K) ;
log.ratio = log(q) - log(1-q) + log(de) - ( ((beta^2)/(2*(tau^2))) * ((de^2)-1) ) ;
pr0 = 1/(1+exp(log.ratio)) ;
u = runif(K,0,1) ;
non_zero = which(u>pr0) ;
if(length(non_zero)>0) Z[non_zero] = 1
data <- list(prob = pr0, Z = Z)
return(data)
}
##*************** Update q for both of uncorrelated and correlated versions of CPBayes ****************##
q_update <- function(K, Z, shape1, shape2)
{
k1 = sum(Z)
k2 = K-k1
sh1 = shape1+k1
sh2 = shape2+k2
q = rbeta(1,sh1,sh2)
return(q)
}
##*************** Update allocations (Z) for both uncorrelated and correlated cases ****************##
##*************** for the model in which q is integrated out from the model ****************##
Z_integrated_update <- function(K, log_ratio_p1, tau_const, de, beta)
{
Z = rep(0,K) ;
log.ratio = log_ratio_p1 + log(de) + ( ((beta^2)/tau_const) * (1-(de^2)) )
pr0 = 1/(1+exp(log.ratio))
u = runif(K,0,1)
non_zero = which(u > pr0)
if(length(non_zero) > 0) Z[non_zero] = 1
data <- list(prob = pr0, Z = Z)
return(data)
}
##*************** Function to update the 'de' parameter ****************##
##*************** for both uncorrelated and correlated versions for CPBayes ***************##
## Updating the 'de' parameter when length(Z==1) > 0
de_update1 <- function(min_de, max_de, shape1_de, beta, Z, tau)
{
K1 = sum(Z)
non_zero = which(Z>0)
beta2 = beta^2
const = (1/(2*(tau^2))) * sum(beta2[non_zero])
y1 = (min_de^2)*2*const
y2 = (max_de^2)*2*const
u = runif(1,0,1)
df = shape1_de+K1
prob1 = pchisq(y1, df=df, ncp=0, lower.tail=TRUE)
prob2 = pchisq(y2, df=df, ncp=0, lower.tail=TRUE)
prob = prob2-prob1
p = prob1 + (u*prob)
y = qchisq(p, df=df, ncp=0, lower.tail=TRUE)
d = sqrt(y/(2*const))
return(d)
}
## Updating the 'de' parameter when length(Z==1) = 0
de_update0 <- function(min_de, max_de, shape1_de)
{
u = runif(1,0,1)
prob1 = pbeta(min_de, shape1_de, 1, lower.tail = TRUE)
prob2 = pbeta(max_de, shape1_de, 1, lower.tail = TRUE)
prob = prob2-prob1
p = prob1 + (u*prob)
d = qbeta(p, shape1_de, 1, lower.tail = TRUE)
return(d)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/common_MCMC_functions.R
|
##=================================********** Correlated version of CPBayes ***********==============================##
## This function is the main MCMC function for implementing CPBayes in case of correlated summary statistics.
## Correlated summary statistics arise for case-control statistics with overlapping subjects or cohort data.
## It calls the function: initiate_MCMC() from 'common_MCMC_functions.R' to initialize the parameters in the MCMC.
## It calls: correlated_beta_update() from 'cor_MCMC_functions.R'; & Z_update(), q_update(), de_update1(), de_update0()
## from 'common_MCMC_functions.R' to update different parameters in MCMC.
## It calls select_subset(), overall_pleio_measure() from 'summary_functions.R' to summarize the MCMC data.
##=================================***************************************************===============================##
##=================================**************Argument explainations***************===============================##
## variantName: name of the genetic variant. It must be a character vector of length of 1.
## traitNames: name of phenotypes. It must be a character vector containing the phenotypes names.
## X: beta hat vector.
## S: covariance matrix of X (beta hat) for the phenotypes.
## updateDE: logical. Indicates whether to update the parameter in MCMC or not.
## RP: total number of replications in the MCMC.
## burn.in: burn in period - after which the MCMC sample should be selected.
## Note that, traitNames, X, s.e., and S, all will have the same order in phenotypes.
##=================================***************************************************===============================##
##library("MASS") ## MASS package is required
CPBayes_cor = function(variantName, traitNames, X, s.e., corln, updateDE, MinSlabVar, MaxSlabVar, RP, burn.in)
{
set.seed(10)
K = length(X)
PPAj_thr = 0.20 ## PPAj threshold
## If the covariance matrix is not positive definite, the diagonal elements are incremented to make it PD
S <- diag(s.e.)%*%corln%*%diag(s.e.)
epsilon = 10^(-5)
increment = rep(epsilon,K)
while(det(S) <= 0)
{ diag(S) = diag(S)+increment
}
S.inv = solve(S) ## inverse of the cov matrix to be passed in MCMC function
## set the initial choices of parameters
tau <- 0.01 ## spike sd
CentralSlabVar <- (MinSlabVar+MaxSlabVar)/2
nonNullVar <- CentralSlabVar ## central value of slab variance
de <- sqrt(tau^2/nonNullVar) ## 1/de - ratio of spike and slab variances
## specfications for updating 'de' parameter
min_var <- MinSlabVar ## minimum value of spike variance
max_var <- MaxSlabVar ## maximum value of slab variance
max_de <- tau/sqrt(min_var) ## maximum choice of 'de' parameter
min_de <- tau/sqrt(max_var) ## minimum choice of 'de'
shape1_de <- 1 ## choice of shape1 parameter of Beta(shape1,1) prior of updating 'de'
## an informed initialization
initiate <- initiate_MCMC( K, X, s.e. )
beta <- initiate$beta
Z <- initiate$Z
q <- initiate$q
nA <- initiate$K1_FDR ## number of associated traits
## specify the shape parameters of 'q' prior
shape2 <- 1 ## shape2 parameter for Beta prior of q
LB <- 0.1; UB <- 0.5;
qm <- nA/K
if(qm < LB) qm <- LB
if(qm > UB) qm <- UB
#qm <- 0.25
shape1 <- (qm/(1-qm)) * shape2
thinning <- 1 ## thinning period for MCMC
mcmc.samplesize <- (RP-burn.in) %/% thinning; Z.data <- matrix(0,mcmc.samplesize,K); row <- 0 ;
sim.beta <- matrix(0,mcmc.samplesize,K); sample_probZ_zero <- matrix(0,mcmc.samplesize,K);
for(rp in 1:RP)
{
## Update the main beta parameters
beta = correlated_beta_update(K, tau, de, Z, X, S.inv)
## Update Z
res_Z <- Z_update(K, q, tau, de, beta)
Z <- res_Z$Z
probZ_zero <- res_Z$prob
## Update q
q = q_update(K, Z, shape1, shape2)
## Update de
if(updateDE == TRUE){
if(sum(Z) > 0)
de <- de_update1(min_de, max_de, shape1_de, beta, Z, tau)
else de <- de_update0(min_de, max_de, shape1_de)
}
if(rp > burn.in && rp%%thinning == 0)
{ row = row + 1 ;
Z.data[row,] = Z ;
sim.beta[row,] = beta ;
sample_probZ_zero[row,] = probZ_zero ;
}
} # close rp
##......................................... Compute the summary .................................................##
## selection of subset
cor.subset <- select_subset( K, Z.data, mcmc.samplesize )
selected_traits <- NULL
if( length(cor.subset) > 0 ) selected_traits <- traitNames[cor.subset]
## extracting traits having PPAj > PPAj_thr
asso.pr = colSums(Z.data)/mcmc.samplesize
which_traits = which(asso.pr > PPAj_thr)
imp_PPAj = 0; imp_traits = 0; important_phenos = NULL
if(length(which_traits) > 0){
imp_PPAj = asso.pr[which_traits]
imp_traits = traitNames[which_traits]
important_phenos = data.frame( traits = imp_traits, PPAj = imp_PPAj, stringsAsFactors = FALSE)
}
## calculate the Bayes factor and PPNA
pleio_evidence <- overall_pleio_measure( K, shape1, shape2, sample_probZ_zero )
log10_BF_cor <- pleio_evidence$log10_BF
PPNA.cor <- pleio_evidence$PPNA
## return the outputs
data = list( variantName = variantName, log10_BF = log10_BF_cor, locFDR = PPNA.cor, subset = selected_traits,
important_traits = important_phenos, auxi_data = list( traitNames = traitNames, K = K,
mcmc.samplesize = mcmc.samplesize, PPAj = asso.pr, Z.data = Z.data, sim.beta = sim.beta, betahat = X, se = s.e. ) )
}
##=================================***********Combined strategy of CPBayes************===============================##
## This function combines the uncorrelated and correlated versions of CPBayes to propose a combined strategy.
## It first runs the correlated version of CPBayes.
## Then check whether the phenotypes having smallest univariate p-values are selected.
## If the checking answers 'negative', we run the uncorrelated version and accept the results obtained.
## So, it first calls CPBayes_cor(), then if necessary, it calls CPBayes_uncor()
## This is the primary function for performing correlated version of CPBayes
##=================================***************************************************===============================##
##=================================**************Argument explainations***************===============================##
## variantName: name of the genetic variant. It must be a character vector of length of 1
## traitNames: name of phenotypes. It must be a character vector containing the phenotypes names
## X: beta hat vector
## s.e.: standard error vector
## corln: correlation matrix of X (beta hat) for the phenotypes
## updateDE: logical. Indicates whether to update the parameter in MCMC or not
## RP: total number of replications in the MCMC
## burn.in: burn in period, after which the MCMC sample should be selected
## Note that, traitNames, X, s.e., and corln, all will have the same order in phenotypes
##=================================***************************************************===============================##
## combined strategy using both of the correlated and uncorrelated versions of the Bayes meta analysis for using in correlated case
combined_CPBayes = function(variantName, traitNames, X, s.e., corln, updateDE, MinSlabVar, MaxSlabVar, RP, burn.in)
{
ptm1 <- proc.time()
K = length(X)
## First, run correlated version of CPBayes
res_cor = CPBayes_cor(variantName, traitNames, X, s.e., corln, updateDE, MinSlabVar, MaxSlabVar, RP, burn.in)
selected_traits = res_cor$subset
cor.subset <- match(selected_traits, traitNames)
## compute the length of the subset of traits selected by CPBayes
K1_cor = length(cor.subset)
if(K1_cor > 1) cor.subset = sort(cor.subset)
indi_uncor = 0 ## whether the uncorrelated version of CPBayes was chosen for final analysis
if(K1_cor > 0){
pv = pchisq( (X/s.e.)^2, df=1, lower.tail=F )
sort.pv = sort(pv, index.return = TRUE)
sorted = sort.pv$x
sorted.index = sort.pv$ix
pv.subset = sorted.index[1:K1_cor]
pv.subset = sort(pv.subset)
## checking whether correlated CPBayes selected traits having smalled p-values
diff = pv.subset-cor.subset
Sum = diff%*%diff
## implementing uncor CPBayes if the checking is not satisfied
if(Sum > 0){
res_uncor = CPBayes_uncor( variantName, traitNames, X, s.e., updateDE, MinSlabVar, MaxSlabVar, RP, burn.in )
indi_uncor = 1
}
}
combined_res <- 0
uncor_use <- 0
if(indi_uncor == 0){
combined_res <- res_cor; uncor_use <- "No";
}else{
combined_res <- res_uncor; uncor_use <- "Yes";
}
ptm2 <- proc.time()
ptm <- ptm2-ptm1
#print(ptm2-ptm1)
combined_res$uncor_use <- uncor_use ## Did the combined strategy choose uncorrelated version
combined_res$runtime <- ptm
#combined_res$corCPBayes <- res_cor ## collecting the output generated by Correlated CPBayes
return(combined_res)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/cor_CPBayes_functions.R
|
## Function to estimate correlation using case-control sample overlap matrix
#' Estimate correlation structure of beta-hat vector for multiple overlapping case-control studies
#' using sample-overlap matrices.
#'
#' It computes an approximate correlation matrix of the estimated beta (log odds ratio) vector for multiple overlapping
#' case-control studies using the sample-overlap matrices which describe
#' the number of cases or controls shared between studies/traits, and the number of subjects
#' who are case for one study/trait but control for another study/trait. For a cohort study,
#' the phenotypic correlation matrix should be a reasonable substitute of this correlation matrix.
#' These approximations are accurate when none of the diseases/traits is associated with
#' the environmental covariates and genetic variant.
#'
#'***Important note on the estimation of correlation structure of correlated beta-hat vector:***
#' In general, environmental covariates are expected to be present in a study and associated
#' with the phenotypes of interest. Also, a small proportion of genome-wide genetic variants
#' are expected to be associated. Hence the above approximation of the correlation matrix
#' may not be accurate. So in general, we recommend
#' an alternative strategy to estimate the correlation matrix using the genome-wide summary
#' statistics data across traits as follows. First, extract all the SNPs for each of which the
#' trait-specific univariate association p-value across all the traits are > 0.1. The
#' trait-specific univariate association p-values are obtained using the beta-hat
#' and standard error for each trait. Each of the SNPs selected in this way is either weakly
#' or not associated with any of the phenotypes (null SNP). Next, select a set of independent
#' null SNPs from the initial set of null SNPs by using a threshold of r^2 < 0.01 (r: the
#' correlation between the genotypes at a pair of SNPs). In the absence of in-sample
#' linkage disequilibrium (LD) information, one can use the reference panel LD information
#' for this screening. Finally, compute the correlation
#' matrix of the effect estimates (beta-hat vector) as the sample correlation matrix of the
#' beta-hat vector across all the selected independent null SNPs. This strategy is more
#' general and applicable to a cohort study or multiple overlapping studies for binary or
#' quantitative traits with arbitrary distributions. It is also useful when the beta-hat vector
#' for multiple non-overlapping studies become correlated due to genetically related
#' individuals across studies. Misspecification of the correlation
#' structure can affect the results produced by CPBayes to some extent. Hence, if
#' genome-wide summary statistics data across traits is available, we highly recommend to use
#' this alternative strategy to estimate the correlation matrix of the beta-hat vector.
#' See our paper for more details.
#'
#' @param n11 An integer square matrix (number of rows must be the same as the number of
#' studies/traits) providing the
#' number of cases shared between all possible pairs of studies/traits. So (k,l)-th element of n11
#' is the number of subjects who are case for both k-th and l-th study/trait. Note that the diagonal elements of
#' n11 are the number of cases in the studies/traits. If no case is shared between studies/traits,
#' the off-diagonal elements of n11 will be zero. No default is specified.
#' @param n00 An integer square matrix (number of rows must be the same as the
#' number of studies/traits) providing the
#' number of controls shared between all possible pairs of studies/traits. So (k,l)-th element of n00
#' is the number subjects who are control for both k-th and l-th study/trait. Note that the diagonal
#' elements of n00 are the number of controls in the studies/traits. If no control is
#' shared between studies/traits,
#' the off-diagonal elements will be zero. No default is specified.
#' @param n10 An integer square matrix (number of rows must be the same as the
#' number of studies/traits) providing the
#' number of subjects who are case for one study/trait and control for another study/trait.
#' Clearly, the diagonal elements
#' will be zero. An off diagonal element, e.g., (k,l)-th element of n10 is the number of subjects who
#' are case for k-th study/trait and control for l-th study/trait. If there is no such overlap,
#' all the elements
#' of n10 will be zero. No default is specified.
#' @return This function returns an approximate correlation matrix of the beta-hat vector for
#' multiple overlapping case-control studies. See the example below.
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{cpbayes_cor}}
#'
#' @examples
#' data(SampleOverlapMatrix)
#' n11 <- SampleOverlapMatrix$n11
#' n11
#' n00 <- SampleOverlapMatrix$n00
#' n00
#' n10 <- SampleOverlapMatrix$n10
#' n10
#' cor <- estimate_corln(n11, n00, n10)
#' cor
#'
#' @export
estimate_corln <- function(n11, n00, n10)
{
## Checking of the three matrices n11, n00, n10
## checking of n11 - diagonal elements > 0, (symmetric)
## checking of n00 - diagonal elements > 0 (symmetric)
## checking of n10 - diagonal elements = 0
## each matrix will be integer square matrix, of same dimension
if(missing(n11) || missing(n00) || missing(n10))
stop("n11, n00 or n10 matrix is missing!", call. = FALSE)
else
chkEstCorln(n11, n00, n10)
n01 <- t(n10) #n01 = transpose(n10)
n1 <- diag(n11) #number of cases for different traits
n0 <- diag(n00) #number of control for different traits
n <- n1+n0 #total sample size of different studies
sqrt_n <- sqrt(n) #square root of sample size of different studies
sqrt_samp_size_prod <- sqrt_n%*%t(sqrt_n) #sqrt of sample size product matrix
sqrt_n0 <- sqrt(n0) #sqrt of number of controls across studies
t_sqrt_n0 <- t(sqrt_n0) #transpose of sqrt of number of controls column vector
sqrt_n1 <- sqrt(n1) #sqrt of number of cases across studies
t_sqrt_n1 <- t(sqrt_n1) #transpose of number of cases column vector
sqrt_n0_n0_prod <- sqrt_n0 %*% t_sqrt_n0
sqrt_n1_n1_prod <- sqrt_n1 %*% t_sqrt_n1
sqrt_n1_n0_prod <- sqrt_n1 %*% t_sqrt_n0
sqrt_n0_n1_prod <- sqrt_n0 %*% t_sqrt_n1
a1 <- n11/sqrt_samp_size_prod #1st component of 1st part of correlation
a2 <- sqrt_n0_n0_prod/sqrt_n1_n1_prod #2nd component of 1st part of correlation
a <- a1*a2
b1 <- n10/sqrt_samp_size_prod #1st component of 2nd part of correlation
b2 <- sqrt_n0_n1_prod/sqrt_n1_n0_prod #2nd component of 2nd part of correlation
b <- b1*b2
c1 <- n01/sqrt_samp_size_prod #1st component of 3rd part of correlation
c2 <- sqrt_n1_n0_prod/sqrt_n0_n1_prod #2nd component of 3rd part of correlation
c <- c1*c2
d1 <- n00/sqrt_samp_size_prod #1st component of 4th part of correlation
d2 <- sqrt_n1_n1_prod/sqrt_n0_n0_prod #2nd component of 4th part of correlation
d <- d1*d2
corr <- a-b-c+d
diag(corr) <- rep(1,nrow(n11))
return(corr)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/corln_estimation.R
|
# Documentation of the example data set to demonstrate how to run the uncorrelated version of CPBayes
#'An example data for uncorrelated summary statistics.
#'
#'ExampleDataUncor is a list which has two components: BetaHat, SE. The numeric vector
#'ExampleDataUncor$BetaHat contains the main genetic effect (beta/log(odds ratio)) estimates
#'for a single nucleotide polymorphism (SNP) obtained from 10 separate case-control studies
#'for 10 different diseases. In each case-control study comprising a
#'distinct set of 7000 cases and 10000 controls, we fit a logistic regression
#'of the case-control status on the genotype coded as the minor allele count
#'for all the individuals in the sample. One can also include various covariates,
#'such as, age, gender, principal components (PCs) of ancestries in the logistic regression.
#'From each logistic regression for a
#'disease, we obtain the estimate of the main genetic association parameter
#'(beta/log(odds ratio)) along with the corresponding standard error.
#'Since the studies do not have any overlapping subject, the beta-hat across the traits are uncorrelated.
#'ExampleDataUncor$SE is the second numeric vector that contains the standard errors corresponding to
#'the uncorrelated beta-hat vector.
#' @usage data(ExampleDataUncor)
#'
#' @format A list of two numeric vectors each of length 10 (for 10 studies):
#' \describe{
#' \item{BetaHat}{beta hat vector of length 10.}
#' \item{SE}{standard error vector corresponding to beta-hat vector.}
#' }
#'
#' @examples
#' data(ExampleDataUncor)
#' BetaHat <- ExampleDataUncor$BetaHat
#' BetaHat
#' SE <- ExampleDataUncor$SE
#' SE
#' \donttest{cpbayes_uncor(BetaHat, SE)}
"ExampleDataUncor"
#' An example data for correlated summary statistics.
#'
#'ExampleDataCor is a list consisting of three components: BetaHat, SE, cor. ExampleDataCor$BetaHat is a
#'numeric vector that contains the main genetic effect (beta/log(odds ratio)) estimates
#'for a SNP across 10 overlapping case-control studies for 10 different diseases. Each of the 10 studies has
#'a distinct set of 7000 cases and a common set of 10000 controls shared across all the studies.
#'In each case-control study, we fit a logistic regression of the case-control status on the genotype
#'coded as the minor allele count for all the individuals in the sample. One can also include various
#'covariates, such as, age, gender, principal components (PCs) of ancestries in the logistic regression.
#'From each logistic regression for a disease, we obtain the estimate of the main genetic association
#'parameter (beta/log(odds ratio)) along with the corresponding standard error. Since the studies have overlapping
#'subjects, the beta-hat across traits are correlated. ExampleDataCor$SE contains the standard error vector
#'corresponding to the correlated beta-hat vector. ExampleDataCor$cor is a numeric square matrix providing
#'the correlation matrix of the correlated beta-hat vector.
#'
#' @usage data(ExampleDataCor)
#'
#' @format A list consisting of two numeric vectors (each of length 10) and a numeric square matrix of
#'dimension 10 by 10:
#'\describe{
#' \item{BetaHat}{beta hat vector of length 10.}
#' \item{SE}{standard error vector corresponding to the beta-hat vector.}
#' \item{cor}{correlation matrix of the beta-hat vector.}
#'}
#'
#' @examples
#' data(ExampleDataCor)
#' BetaHat <- ExampleDataCor$BetaHat
#' BetaHat
#' SE <- ExampleDataCor$SE
#' SE
#' cor <- ExampleDataCor$cor
#' cor
#' \donttest{cpbayes_cor(BetaHat, SE, cor)}
"ExampleDataCor"
#' An example data of sample-overlap matrices.
#'
#' An example data of sample-overlap matrices for five different diseases in the Kaiser
#' GERA cohort (a real data).
#' SampleOverlapMatrix is a list that contains an example of the sample overlap matrices
#' for five different diseases in the Kaiser GERA cohort. SampleOverlapMatrix$n11
#' provides the number of cases shared between all possible pairs of diseases.
#' SampleOverlapMatrix$n00 provides the number of controls shared between all possible
#' pairs of diseases. SampleOverlapMatrix$n10 provides the number of subjects who
#' are case for one disease and control for another disease.
#'
#' @usage data(SampleOverlapMatrix)
#'
#' @format A list consisting of three integer square matrices (each of dimension 5 by 5):
#'\describe{
#' \item{n11}{number of cases shared between all possible pairs of diseases.}
#' \item{n00}{number of controls shared between all possible pairs of diseases.}
#' \item{n10}{number of subjects who are case for one disease and control for another disease.}
#'}
#' @examples
#' data(SampleOverlapMatrix)
#' n11 <- SampleOverlapMatrix$n11
#' n11
#' n00 <- SampleOverlapMatrix$n00
#' n00
#' n10 <- SampleOverlapMatrix$n10
#' n10
#'\donttest{estimate_corln(n11,n00,n10)}
"SampleOverlapMatrix"
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/data.R
|
##============================ Some supporting functions for functions for users ===========================##
## Check primary variables
checkPrimaryVar <- function(VAR, nameVAR)
{
# Check whether VAR is a vector
if(!is.vector(VAR))
{
if(is.matrix(VAR) && any(dim(VAR)==1))
{
VAR <- as.vector(VAR)
warning( paste(nameVAR, "is a matrix!"), call. = FALSE)
}
else
stop(paste(nameVAR, "must be a vector."), call. = FALSE)
}
# Check whether VAR is a numeric vector
if(!is.numeric(VAR))
stop(paste(nameVAR, "must be a numeric vector."), call. = FALSE)
# Check whether there is any NA
if(any(is.na(VAR)))
stop(paste(nameVAR, "for one or more phenotypes are missing!"), call. = FALSE)
# Check whether there is more than one non-missing arguments
if(length(VAR) <= 1)
stop(paste("Number of elements in the", nameVAR, "vector must be more than 1!"), call. = FALSE)
return(VAR)
}
## Check input variable 'Phenotypes'
checkPhen <- function(Phenotypes, BetaHat)
{
# Check whether argument 3 is a vector
if(!is.vector(Phenotypes))
stop("Phenotypes must be a vector.", call. = FALSE)
# Check whether argument 3 is a character vector
if(!is.character(Phenotypes))
stop("Phenotypes must be a character vector.", call. = FALSE)
# Check whether there is duplicate phenotyes
if(length(Phenotypes) > length(unique(Phenotypes)))
stop("Two or more phenotypes have the same name!", call. = FALSE)
# Check whether argument 3 is a vector of length more than 1
if(length(Phenotypes) != length(BetaHat))
stop("BetaHat and Phenotypes vectors must have the same number of elements!", call. = FALSE)
}
## Check input variable
checkVarName <- function(variantName)
{
# Check whether argument 4 is a vector
if(!is.vector(variantName))
stop("Variant must be a vector.", call. = FALSE)
# Check whether argument 4 is a vector of length 1
if(length(variantName) > 1)
stop("Variant must be a vector of length 1.", call. = FALSE)
# Check whether argument 4 is NA
if(is.na(variantName))
{
warning("Variant is NA!", call. = FALSE)
variantName <- as.character(variantName)
}
# Check whether argument 4 is not NA but numeric
if(!is.na(variantName) && !is.character(variantName))
{
warning("Variant is not a character vactor!", call. = FALSE)
variantName <- as.character(variantName)
}
return(variantName)
}
checkCorln <- function(Corln, BetaHat)
{
# Check whether Corln is a data.frame
if(is.data.frame(Corln))
stop("Corln must be a matrix not a data.frame. Use as.matrix() to convert the data.frame into matrix.", call. = FALSE)
# Check whether Corln is a matrix
if(!is.matrix(Corln))
stop("Corln must be a matrix.", call. = FALSE)
# Check whether Corln is numeric
if(!is.numeric(Corln))
stop("Corln must be a numeric matrix.", call. = FALSE)
# Check whether there is any
if(any(is.na(Corln)))
stop("One or more entries of Corln are missing!", call. = FALSE)
# Check for Corln, whether number of rows = number of columns
if(nrow(Corln) != ncol(Corln))
stop("Number of rows and columns of Corln are different!", call. = FALSE)
# Check whether number of rows of corln matrix is same as no. of entries in BetaHat
if(nrow(Corln) != length(BetaHat))
stop("Number of rows of Corln and length of BetaHat do not match!", call. = FALSE)
# Save as matrix
Cor <- as.matrix(Corln)
# Check whether a symmetric matrix. First, make row names and col names identical.
row.names(Cor) <- colnames(Cor)
if(!isSymmetric(Cor))
stop("Corln is not symmetric!", call. = FALSE)
# Check whether a negative definite matrix
if(det(Cor) < 0)
stop("Corln is negative definite!", call. = FALSE)
# Check whether diagonal elements are 1
if(dist(rbind(diag(Cor), rep(1, dim(Cor)[1]))) != 0)
stop("Diagonal elements of Corln are not 1!", call. = FALSE)
# Check whether a singular matrix
if(det(Cor) == 0)
warning("Corln is a singular matrix!", call. = FALSE)
return(Cor)
}
print_result <- function(input)
{
#cat("RESULT ::", "\n")
#gvar <- paste(" genetic_variant", input$genetic_variant, sep = " : ")
#cat(gvar, "\t")
#BF <- input$log10_BF
#BF <- round(BF, digits = 2)
#BF <- paste("log10_BF", BF, sep = ": ")
#cat(BF, "\n")
#PPNA <- input$locFDR
#x <- PPNA
#count <- 0
#while(x < 1){ x <- 10*x; count <- count+1 }
#PPNA <- round(PPNA, digits = count+1)
#locFDR <- paste("locFDR", PPNA, sep = " : ")
#cat(locFDR, "\n")
cat("Important traits with trait-specific posterior prob of assoc: \n")
if(sum(dim(input$important_traits)) > 0){
dat = input$important_traits
dat$PPAj = round(dat$PPAj, 2)
print(dat)
cat("\n")
}else{
cat("None\n")
}
}
chk_n_stp1 <- function(arg, argName)
{
# Check whether arg is a data.frame
if(is.data.frame(arg))
stop(paste(argName, "must be a matrix not a data.frame. Use as.matrix() to convert the data.frame into matrix."), call. = FALSE)
# Check whether arg is a matrix
if(!is.matrix(arg))
stop(paste(argName, "must be a matrix."), call. = FALSE)
# Check for arg, whether number of rows = number of columns
if(nrow(arg) != ncol(arg))
stop(paste("Number of rows and columns of", argName, "are different!"), call. = FALSE)
# Check whether there is any
if(any(is.na(arg)))
stop(paste("One or more entries of", argName, "are missing!"), call. = FALSE)
# Check whether arg is numeric
if(!is.numeric(arg))
stop(paste(argName, "must be a numeric matrix."), call. = FALSE)
# Check whether non-negative integer matrix
if(any(arg%%1 != 0) || any(arg < 0))
stop(paste("Every element of", argName, "must be a non-negative integer."), call. = FALSE)
}
chkEstCorln <- function(n11, n00, n10)
{
chk_n_stp1(n11, "n11")
chk_n_stp1(n00, "n00")
chk_n_stp1(n10, "n10")
if((nrow(n11) != nrow(n00)) || (nrow(n10) != nrow(n00)))
stop("n11, n00, n10 matrices must have same dimension.", call. = FALSE)
row.names(n11) <- colnames(n11)
row.names(n00) <- colnames(n00)
row.names(n10) <- colnames(n10)
if(!isSymmetric(n11))
stop("n11 must be symmetric!", call. = FALSE)
if(!isSymmetric(n00))
stop("n00 must be symmetric!", call. = FALSE)
if(any(diag(n11) == 0))
stop("Diagonal elements of n11 must be positive integer.", call. = FALSE)
if(any(diag(n00) == 0))
stop("Diagonal elements of n00 must be positive integer.", call. = FALSE)
if(any(diag(n10) != 0))
stop("Diagonal elements of n10 must be zero.", call. = FALSE)
}
chkSlabVarSpikeVar <- function(SpikeVar=0.0001, SlabVar=0.8){
defaultVal <- list("SpikeVar" = 0.0001, "SlabVar" = 0.8)
minVal <- list("SpikeVar" = 0, "SlabVar" = 0)
var_list <- list("SpikeVar" = defaultVal[["SpikeVar"]], "SlabVar" = defaultVal[["SlabVar"]])
if(!missing(SpikeVar)) var_list[["SpikeVar"]] <- SpikeVar
if(!missing(SlabVar)) var_list[["SlabVar"]] <- SlabVar
for(var in names(var_list)) {
if(!is.vector(var_list[[var]]) || length(var_list[[var]]) != 1) {
warning(var, " is not a scalar (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
else if(!is.numeric(var_list[[var]])) {
warning(var, " is not numeric (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
else if(var_list[[var]] <= minVal[[var]]) {
warning(var, " is not positive (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
}
return(var_list)
}
chkSlabVar <- function(MinSlabVar = 0.6, MaxSlabVar = 1.0) {
defaultVal <- list("MinSlabVar" = 0.6, "MaxSlabVar" = 1.0)
boundVal <- list("MinSlabVar" = 0.01, "MaxSlabVar" = 10.0)
var_list <- list("MinSlabVar" = defaultVal[["MinSlabVar"]], "MaxSlabVar" = defaultVal[["MaxSlabVar"]])
if(!missing(MinSlabVar)) var_list[["MinSlabVar"]] <- MinSlabVar
if(!missing(MaxSlabVar)) var_list[["MaxSlabVar"]] <- MaxSlabVar
for(var in names(var_list)) {
# Check whether argument 6 is a vector of length 1
if(!is.vector(var_list[[var]]) || (length(var_list[[var]]) != 1)) {
warning(var, " is not a scalar (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
# Check whether argument 6 is numeric
else if(!is.numeric(var_list[[var]])) {
warning(var, " is not numeric (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
}
# Check whether argument 6 is more than its minimum bound
if(var_list[["MinSlabVar"]] < boundVal[["MinSlabVar"]]){
warning("MinSlabVar should not be smaller than ", boundVal[["MinSlabVar"]], ", so it is assigned to ", boundVal[["MinSlabVar"]], ".", call. = FALSE)
var_list[["MinSlabVar"]] <- boundVal[["MinSlabVar"]]
}
if(var_list[["MaxSlabVar"]] > boundVal[["MaxSlabVar"]]){
warning("MaxSlabVar should not be bigger than ", boundVal[["MaxSlabVar"]], ", so it is assigned to ", boundVal[["MaxSlabVar"]], ".", call. = FALSE)
var_list[["MaxSlabVar"]] <- boundVal[["MaxSlabVar"]]
}
# Argument 6 and 7 :: Checking MinSlabVar < MaxSlabVar
if(var_list[["MinSlabVar"]] >= var_list[["MaxSlabVar"]]){
print(var_list[["MinSlabVar"]])
print(var_list[["MaxSlabVar"]])
warning("MaxSlabVar is not bigger than MinSlabVar! (default option used).", call. = FALSE)
var_list[["MaxSlabVar"]] <- defaultVal[["MaxSlabVar"]]
var_list[["MinSlabVar"]] <- defaultVal[["MinSlabVar"]]
}
return(var_list)
}
chkMCMCparam <- function(MCMCiter = 7500, Burnin = 500) {
defaultVal <- list("MCMCiter" = 7500, "Burnin" = 500)
lBound <- list("MCMCiter" = 2200, "Burnin" = 200)
min_mcmc_sample_size = lBound$MCMCiter - lBound$Burnin
var_list <- list("MCMCiter" = defaultVal[["MCMCiter"]], "Burnin" = defaultVal[["Burnin"]])
if(!missing(MCMCiter)) var_list[["MCMCiter"]] <- MCMCiter
if(!missing(Burnin)) var_list[["Burnin"]] <- Burnin
for(var in names(var_list)) {
if(!is.vector(var_list[[var]]) || (length(var_list[[var]]) != 1)){
warning(var, " is not a scalar (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}else if(!is.numeric(var_list[[var]]) || var_list[[var]]%%1 != 0){
print(var_list[[var]])
warning(var, " not provided as integer (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}else if(var_list[[var]] < lBound[[var]]){
warning(var, " should be at least ", lBound[[var]], " (default option used).", call. = FALSE)
var_list[[var]] <- defaultVal[[var]]
}
}
if((var_list[["MCMCiter"]] - var_list[["Burnin"]]) < min_mcmc_sample_size)
{
warning("MCMC sample size (MCMCiter - Burnin) provided less than 2000 (default options used)", call. = FALSE)
var_list[["MCMCiter"]] <- defaultVal[["MCMCiter"]]
var_list[["Burnin"]] <- defaultVal[["Burnin"]]
}
return(var_list)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/help.R
|
## DEPENDENCY: source("common_MCMC_functions.R"); library("mvtnorm"); library("purrr");
## source("common_MCMC_functions.R"); library("mvtnorm"); library("purrr");
## Adding additional function to compute locFDR in straightforward way for correlated case.
##============= choose the shape parameters of the Beta distribution for 'q' prior. ==================
choose_shape_parameters = function(K, X, SE){
## an informed initialization
level = 0.01
pv = pchisq( (X/SE)^2, df=1, lower.tail=FALSE)
traits <- paste0("Trait", 1:K)
select <- BH_selection(pv, traits, level)
nA <- length(select)
## specify the shape parameters of 'q' prior
shape2 <- 1 ## shape2 parameter for Beta prior of q
LB <- 0.1; UB <- 0.5;
qm <- nA/K
if(qm < LB) qm <- LB
if(qm > UB) qm <- UB
shape1 <- (qm/(1-qm)) * shape2
shapes <- list(shape1 = shape1, shape2 = shape2)
shapes
}
###################################################################################################
################################# UNCORRELATED SUMMARY STATISTICS ##################################
####################################################################################################
## writing a quotient for computing locFDR in the uncorrelated case
logexponent = function(x, s, v){
## x = betahat, s = standard error, v = prior sd
s2 = s^2
v2 = v^2
sigma2 = 1/((1/s2) + (1/v2))
sigma = sqrt(sigma2)
mu = (sigma2/s2) * x
part1 = -log(sqrt(2*pi)) + log(sigma) - log(s) - log(v)
part2 = (mu^2/(2*sigma2)) - (x^2/(2*s2))
log_exponent = part1 + part2
}
################################## Most recent code for computing locFDR theoretically ##########################
##-------------------------------------------------------------------------------------------------------------##
#' Analytic calculation of the local FDR & Bayes factor for uncorrelated summary statistics.
#'
#' Run the \code{\link{analytic_locFDR_BF_uncor}} function to analytically compute the local FDR & Bayes factor (BF)
#' that quantifies the evidence of aggregate-level pleiotropic association for uncorrelated summary statistics.
#' Here a fixed value of slab variance is considred instead of a range of it in \code{\link{cpbayes_uncor}}.
#' @param BetaHat A numeric vector of length K where K is the number of phenotypes. It
#' contains the beta-hat values across studies/traits. No default.
#' @param SE A numeric vector with the same dimension as BetaHat providing the standard errors
#' corresponding to BetaHat. Every element of SE must be positive. No default.
#' @param SpikeVar Variance of spike (normal distribution with small variance) representing the null effect distribution.
#' Default is 10^(-4).
#' @param SlabVar Variance of slab normal distribution representing the non-null effect distribution.
#' Default is 0.8.
#' @return The output produced by the function is a list which consists of the local FDR and log10(Bayes factor).
#' \item{locFDR}{It provides the analytically computed local false discovery rate (posterior probability of null association) under CPBayes model
#' (a Bayesian analog of the p-value) which is a measure of the evidence of the
#' aggregate-level pleiotropic association. Bayes factor is adjusted for prior odds, but
#' locFDR is solely a function of the posterior odds.}
#' \item{log10_BF}{It provides the analytically computed log10(Bayes factor) produced by CPBayes that measures the
#' evidence of the overall pleiotropic association.}
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{cpbayes_uncor}}, \code{\link{analytic_locFDR_BF_cor}}, \code{\link{cpbayes_cor}}, \code{\link{estimate_corln}}, \code{\link{post_summaries}}, \code{\link{forest_cpbayes}}
#'
#' @examples
#' data(ExampleDataUncor)
#' BetaHat <- ExampleDataUncor$BetaHat
#' BetaHat
#' SE <- ExampleDataUncor$SE
#' SE
#' result <- analytic_locFDR_BF_uncor(BetaHat, SE)
#' str(result)
#'
#' @export
analytic_locFDR_BF_uncor = function(BetaHat, SE, SpikeVar=0.0001, SlabVar=0.8){
# Check whether any of the primary arguments is missing
if(missing(BetaHat) || missing (SE))
stop("BetaHat or SE vector is missing!", call. = FALSE)
# Argument 1 :: BetaHat
BetaHat <- checkPrimaryVar(BetaHat, "BetaHat")
# Argument 2 :: SE
SE <- checkPrimaryVar(SE, "SE")
# Check whether all entries are strictly positive
if(!all(SE > 0))
stop("One or more elements in the SE vector are not positive!", call. = FALSE)
# Argument 1 and 2 ::
if(length(BetaHat) != length(SE))
stop("BetaHat and SE vectors must have the same number of elements!", call. = FALSE)
# Argument 3 and 4 ::
chkVar = chkSlabVarSpikeVar(SpikeVar, SlabVar)
spikevar = chkVar[["SpikeVar"]]
slabvar = chkVar[["SlabVar"]]
X = BetaHat
K = length(X) ## number of traits
shapes = choose_shape_parameters(K, X, SE) ## get the shape params
shape1 = shapes$shape1; shape2 = shapes$shape2;
p = shape1/(shape1+shape2) ;
logp = log(p); logq = log(1-p);
## log null density under all traits null.
trait_index = 1:K; zeromean = numeric(K); Variance = (SE)^2
var_null = Variance + rep(spikevar, K); sd_null = sqrt(var_null);
null_log_density = dnorm(X, mean = zeromean, sd = sd_null, log = TRUE)
null_log_density = sum(null_log_density)
null_log_density = null_log_density + (K*logq) ## combine with null prior probability
## compute the complete data likelihood
log_compL = as.list(1:2)
var_nonnull = Variance + rep(slabvar, K); sd_nonnull = sqrt(var_nonnull);
log_compL[[1]] = logp + dnorm(X, mean = zeromean, sd = sd_nonnull, log = TRUE) ## under non-null
log_compL[[2]] = logq + dnorm(X, mean = zeromean, sd = sd_null, log = TRUE) ## under null
total = exp(log_compL[[1]]) + exp(log_compL[[2]])
log_density = log(total)
logL = sum(log_density)
## compute local FDR.
locFDR = exp(null_log_density - logL)
## compute log10BF
prior_prob_null = exp(K*logq);
logBF = log(1-locFDR) - log(locFDR) + log(prior_prob_null) - log(1-prior_prob_null)
log10_BF = log10(exp(logBF))
if(log10_BF == Inf) log10_BF <- 300
pleio_measure = list(locFDR = locFDR, log10_BF = log10_BF)
pleio_measure
}
###################################################################################################
################################## CORRELATED SUMMARY STATISTICS ##################################
####################################################################################################
########### Compute the log-likelihood under a causal configuration of traits' association status.
non_null_density_computation = function(c, K, spikevar, slabvar, zeromean, X, S){
TAU = rep(spikevar, K); TAU[c] = slabvar;
Sigma = (diag(TAU)) + S
log_density = dmvnorm(X, mean=zeromean, sigma=Sigma, log=TRUE)
log_density
}
########### Main function to compute the local FDR and the Bayes factor for correlated summary statistics ##########
#' Analytic calculation of the local FDR & Bayes factor for correlated summary statistics.
#'
#' Run the \code{\link{analytic_locFDR_BF_cor}} function to analytically compute the local FDR & Bayes factor (BF)
#' that quantifies the evidence of aggregate-level pleiotropic association for correlated summary statistics.
#' Here a fixed value of slab variance is considred instead of a range of it in \code{\link{cpbayes_cor}}.
#' @param BetaHat A numeric vector of length K where K is the number of phenotypes. It
#' contains the beta-hat values across studies/traits. No default.
#' @param SE A numeric vector with the same dimension as BetaHat providing the standard errors
#' corresponding to BetaHat. Every element of SE must be positive. No default.
#' @param Corln A numeric square matrix of order K by K providing the correlation matrix of BetaHat.
#' The number of rows & columns of Corln must be the same as the length of BetaHat. No default
#' is specified. See \code{\link{estimate_corln}}.
#' @param SpikeVar Variance of spike (normal distribution with small variance) representing the null effect distribution.
#' Default is 10^(-4).
#' @param SlabVar Variance of slab normal distribution representing the non-null effect distribution.
#' Default is 0.8.
#' @return The output produced by the function is a list which consists of the local FDR and log10(Bayes factor).
#' \item{locFDR}{It provides the analytically computed local false discovery rate (posterior probability of null association) under CPBayes model
#' (a Bayesian analog of the p-value) which is a measure of the evidence of the
#' aggregate-level pleiotropic association. Bayes factor is adjusted for prior odds, but
#' locFDR is solely a function of the posterior odds.}
#' \item{log10_BF}{It provides the analytically computed log10(Bayes factor) produced by CPBayes that measures the
#' evidence of the overall pleiotropic association.}
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{cpbayes_cor}}, \code{\link{estimate_corln}}, \code{\link{analytic_locFDR_BF_uncor}}, \code{\link{cpbayes_uncor}}, \code{\link{post_summaries}}, \code{\link{forest_cpbayes}}
#'
#' @examples
#' data(ExampleDataCor)
#' BetaHat <- ExampleDataCor$BetaHat
#' BetaHat
#' SE <- ExampleDataCor$SE
#' SE
#' cor <- ExampleDataCor$cor
#' cor
#' result <- cpbayes_cor(BetaHat, SE, cor)
#' str(result)
#'
#' @export
analytic_locFDR_BF_cor = function(BetaHat, SE, Corln, SpikeVar=0.0001, SlabVar=0.8){
# Check whether any of the primary arguments is missing
if(missing(BetaHat) || missing (SE))
stop("BetaHat or SE vector is missing!", call. = FALSE)
if(missing(Corln))
stop("Correlation matrix is missing!", call. = FALSE)
# Argument 1 :: BetaHat
BetaHat <- checkPrimaryVar(BetaHat, "BetaHat")
# Argument 2 :: SE
SE <- checkPrimaryVar(SE, "SE")
# Check whether all entries are strictly positive
if(!all(SE > 0))
stop("One or more elements in the SE vector are not positive!", call. = FALSE)
# Argument 1 and 2 ::
if(length(BetaHat) != length(SE))
stop("BetaHat and SE vectors must have the same number of elements!", call. = FALSE)
# Argument 3 :: Correlation
corln <- checkCorln(Corln, BetaHat)
# Argument 4 and 5 ::
chkVar = chkSlabVarSpikeVar(SpikeVar, SlabVar)
spikevar = chkVar[["SpikeVar"]]
slabvar = chkVar[["SlabVar"]]
X = BetaHat
K = length(X) ## number of traits
shapes = choose_shape_parameters(K, X, SE) ## get the shape params
shape1 = shapes$shape1; shape2 = shapes$shape2;
p = shape1/(shape1+shape2) ;
logp = log(p); logq = log(1-p);
## If the covariance matrix is not positive definite, the diagonal elements are incremented to make it PD
S <- diag(SE) %*% corln %*% diag(SE)
epsilon = 10^(-5)
increment = rep(epsilon,K)
while(det(S) <= 0) diag(S) = diag(S)+increment
### start computing log-likelihood under different causal configurations
trait_index = 1:K; zeromean = numeric(K);
## log null density under all traits null.
Sigma = ( spikevar * diag(K) ) + S ## the covariance matrix
null_log_density = dmvnorm(X, mean=zeromean, sigma=Sigma, log=TRUE)
null_log_density = null_log_density + (K*logq) ## combine with the prior prob of Z=0
total_density = exp(null_log_density) ## total data likelihood
## computing the log likelihood under the non-null configuration of the causal status of the traits
for(k in 1:K){
cn = combn(trait_index, k, simplify = FALSE)
log_density = map_dbl(cn, non_null_density_computation, K=K, spikevar=spikevar, slabvar=slabvar, zeromean=zeromean, X=X, S=S)
log_prior_prob = (k*logp) + ((K-k)*logq)
log_density = log_density + log_prior_prob ## combine with the prior prob of the causal configuration.
total_density = total_density + sum(exp(log_density))
}
locFDR = exp(null_log_density - log(total_density))
prior_prob_null = exp(K*logq);
logBF = log(1-locFDR) - log(locFDR) + log(prior_prob_null) - log(1-prior_prob_null)
log10_BF = log10(exp(logBF))
if(log10_BF == Inf) log10_BF <- 300
pleio_measure = list(locFDR = locFDR, log10_BF = log10_BF)
pleio_measure
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/locFDR_BF_theoretic.R
|
##=================================************************************************=================================##
## This file contains functions that are used for summarizing the MCMC data generated to make inference on pleiotropy.
## These functions are also the same for both uncorrelated and correlated versions of CPBayes.
## There is also one function which can be used for making some post summaries for interesting variants
## For example, estimating the direction of associations and posterior mean/median of beta or odds ratios
##=================================************************************************=================================##
##======================********** subset selection using the MCMC sample data ***********==========================##
select_subset <- function( K, Z.data, mcmc.samplesize )
{
#asso.pr = colSums(Z.data)/mcmc.samplesize
z = as.data.frame(Z.data)
Z.summary = aggregate(z, by=z, length)[1:(ncol(z)+1)]
row.no = which.max(Z.summary[,K+1])
select.Z = Z.summary[row.no,]
select.Z = select.Z[-(K+1)]
subset = which(select.Z==1)
#Z.sort.summary.uncor = Z.summary[order(Z.summary[,K+1]),] ;
#data <- list( subset = subset, asso.pr = asso.pr )
return(subset)
}
##======================********** compute log10BF and PPNA using the MCMC sample data ***********==================##
overall_pleio_measure <- function( K, shape1, shape2, sample_probZ_zero )
{
## calculate the Bayes factor, first compue the posterior odds
log_probZ_zero <- log(sample_probZ_zero)
log_probZ_zero <- rowSums(log_probZ_zero)
probZ_zero <- exp(log_probZ_zero)
poste_deno <- mean(probZ_zero)
log_posterior_deno <- log(poste_deno)
log_posterior_nume <- log(1-poste_deno)
log_posterior_odds <- log_posterior_nume - log_posterior_deno
## compute the prior odds
p1 <- shape1/(shape1+shape2)
p0 <- 1-p1; log.p0 <- log(p0); log.nume <- log(1-(p0^K));
log.deno = K*log.p0; log.prior.odds = log.nume - log.deno;
log_BF <- log_posterior_odds-log.prior.odds
log10_BF <- log10(exp(log_BF))
if(log10_BF == Inf) log10_BF <- 300 ## assigning an upper bound if the BF becomes infinity
## compute the PPNA using the Posterior odds computed in the above
a <- exp(log_posterior_odds)
log_deno <- log(1+a)
PPNA <- exp(-log_deno)
if(PPNA == 0) PPNA <- 10^(-300) ## assigning an lower bound if it becomes zero.
data <- list( log10_BF = log10_BF, PPNA = PPNA )
return(data)
}
##===============********** Estimate directions of associations using the MCMC sample data ***********==============##
estimate_directions <- function( K, sim.beta)
{
## estimation of the effect directions
direction <- sign(sim.beta)
direction <- (direction+1)/2 ## convert from +1/-1 scale to 0/1 scale
direction_prob <- colMeans(direction) ## probability of an effect being positive
positive_association <- direction_prob>0.5
effect_direction <- character(K)
effect_direction[which(positive_association==TRUE)] <- "positive"
effect_direction[which(positive_association==FALSE)] <- "negative"
direction <- effect_direction
}
##===========================********** post summaries for a pleiotropy signal ***********==========================##
## (1-level)% credible interval to be computed by CPBayes
## default value of level = 0.05. It also computes posterior summary of beta and odds ratio
##===========================*************************************************************==========================##
## Post processing of the mcmc data genarated by MCMC.
#' Post summary of the MCMC data generated by the uncorrelated or correlated version of CPBayes.
#'
#' Run the \code{\link{post_summaries}} function to summarize the MCMC data produced by
#' \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}} and obtain meaningful insights
#' into an observed pleiotropic signal.
#'
#' @param mcmc_output A list returned by either
#' \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}}. This list
#' contains the primary results and MCMC data produced by \code{\link{cpbayes_uncor}}
#' or \code{\link{cpbayes_cor}}. No default is specified. See the example below.
#' @param level A numeric value. (1-level)\% credible interval (Bayesian analog of the
#' confidence interval) of the unknown true genetic effect (beta/odds ratio)
#' on each trait is computed. Default choice is 0.05.
#' @return The output produced by this function is a list that consists of various components.
#' \item{variantName}{It is the name of the genetic variant provided by the user. If not
#' specified by the user, default name is `Variant'.}
#' \item{log10_BF}{It provides the log10(Bayes factor) produced by CPBayes that measures
#' the evidence of the overall pleiotropic association.}
#' \item{locFDR}{It provides the local false discovery rate (posterior probability of null association) produced by
#' CPBayes (a Bayesian analog of the p-value) which is a measure of the evidence
#' of aggregate-level pleiotropic association. Bayes factor is adjusted for prior odds, but
#' locFDR is solely a function of posterior odds. locFDR can sometimes be significantly small
#' indicating an association, but log10_BF may not. Hence, always check both log10_BF and locFDR.}
#' \item{subset}{A data frame providing the optimal subset of associated/non-null traits
#' along with their trait-specific posterior probability of association (PPAj) and direction
#' of associations. It is NULL if no phenotype is selected by CPBayes.}
#' \item{important_traits}{It provides the traits which yield a trait-specific posterior
#' probability of association (PPAj) > 20\%. Even if a phenotype is not selected in the
#' optimal subset of non-null traits, it can produce a non-negligible value of
#' trait-specific posterior probability of association. We note that `important_traits'
#' is expected to include the traits already contained in `subset'. It provides the
#' name of the important traits and their trait-specific posterior probability of
#' association (PPAj) and the direction of associations. Always check
#' 'important_traits' even if 'subset' contains a single trait.
#' It helps to better explain an observed pleiotropic signal.}
#' \item{traitNames}{It returns the name of all the phenotypes specified by the user.
#' Default is trait1, trait2, ... , traitK.}
#' \item{PPAj}{Data frame providing the trait-specific posterior probability of
#' association for all the phenotypes.}
#' \item{poste_summary_beta}{Data frame providing the posterior summary of the
#' unknown true genetic effect (beta) on each trait. It gives posterior mean,
#' median, standard error, credible interval (lower and upper limits) of the
#' true beta corresponding to each trait.}
#' \item{poste_summary_OR}{Data frame providing the posterior summary of the unknown
#' true genetic effect (odds ratio) on each trait. It gives posterior mean, median,
#' standard error, credible interval (lower and upper limits) of the true odds
#' ratio corresponding to each trait.}
#'
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{cpbayes_uncor}}, \code{\link{cpbayes_cor}}
#'
#' @examples
#' data(ExampleDataUncor)
#' BetaHat <- ExampleDataUncor$BetaHat
#' BetaHat
#' SE <- ExampleDataUncor$SE
#' SE
#' traitNames <- paste("Disease", 1:10, sep = "")
#' SNP1 <- "rs1234"
#' result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1)
#' PleioSumm <- post_summaries(result, level = 0.05)
#' str(PleioSumm)
#'
#' @export
post_summaries <- function( mcmc_output, level = 0.05 )
{
genetic_variant <- mcmc_output$variantName
log10_BF <- mcmc_output$log10_BF
PPNA <- mcmc_output$locFDR
subset <- mcmc_output$subset
important_phenos <- mcmc_output$important_traits
MCMC_data <- mcmc_output$auxi_data
traitNames <- MCMC_data$traitNames
K <- MCMC_data$K
mcmc.samplesize <- MCMC_data$mcmc.samplesize
asso.pr <- MCMC_data$PPAj
Z.data <- MCMC_data$Z.data
sim.beta <- MCMC_data$sim.beta
asso_prob = data.frame( traits = traitNames, PPAj = asso.pr, stringsAsFactors = FALSE)
## estimate the direction of associations
alldirection <- estimate_directions( K, sim.beta )
optimal_subset <- data.frame( traits = subset, stringsAsFactors = FALSE)
if( length(subset) > 0 ){
selected <- match(subset, traitNames)
optimal_subset$PPAj <- asso.pr[selected]
optimal_subset$direction <- alldirection[selected]
}
imp_traits <- important_phenos$traits
if( length(imp_traits) > 0 ){
selected <- match(imp_traits, traitNames)
direct <- alldirection[selected]
important_phenos$direction = direct
}
## for summarizing the 'beta' parameters themselves
poste_mean_beta <- 0
poste_median_beta <- 0
poste_se_beta <- 0
CI_l <- 0 ## lower limit of 95% credible interval for beta
CI_u <- 0 ## upper limit of 95% credible interval for beta
poste_mean_OR <- 0 ## for summarizing the odds ratio (OR) = exp(beta)
poste_median_OR <- 0
CI_OR_l <- 0 ## lower limit of 95% credible interval for odds ratios
CI_OR_u <- 0 ## upper limit of 95% credible interval for odds ratios
lev <- level
lev1 <- lev/2
lev2 <- 1-(lev/2)
for( k in 1:K ){
poste_mean_beta[k] <- mean(sim.beta[,k])
poste_se_beta[k] <- sd(sim.beta[,k])
beta_qtls <- quantile( sim.beta[,k], prob = c( lev1, 0.5, lev2 ) )
CI_l[k] <- beta_qtls[1]
poste_median_beta[k] <- beta_qtls[2]
CI_u[k] <- beta_qtls[3]
ORs <- exp(sim.beta[,k])
poste_mean_OR[k] <- mean(ORs)
or_qtls <- quantile( ORs, prob = c( lev1, 0.5, lev2 ) )
CI_OR_l[k] <- or_qtls[1]
poste_median_OR[k] <- or_qtls[2]
CI_OR_u[k] <- or_qtls[3]
}
poste_summary_beta = data.frame( traits = traitNames, poste_mean = poste_mean_beta,
poste_median = poste_median_beta, poste_se = poste_se_beta, lCl = CI_l,
uCl = CI_u, stringsAsFactors = FALSE)
poste_summary_OR = data.frame( traits = traitNames, poste_mean = poste_mean_OR, poste_median = poste_median_OR,
lCl = CI_OR_l, uCl = CI_OR_u, stringsAsFactors = FALSE)
data = list( variantName = genetic_variant, log10_BF = log10_BF, locFDR = PPNA, subset = optimal_subset,
important_traits = important_phenos, traitNames = traitNames, PPAj = asso_prob,
poste_summary_beta = poste_summary_beta, poste_summary_OR = poste_summary_OR )
}
##===========================********** Forest plot for a pleiotropy signal ***********==========================##
## (1-level)% confidence interval of beta parameter is to be plotted in the forest plot. default value of level = 0.05.
##===========================*************************************************************==========================##
## Forest plot presenting pleiotropy result obtained by CPBayes.
#' Forest plot presenting pleiotropy result obtained by CPBayes.
#'
#' Run the \code{\link{forest_cpbayes}} function to create a forest plot that presents the pleiotropy result obtained
#' by \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}}.
#'
#' @param mcmc_output A list returned by either
#' \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}}. This list
#' contains all the primary results and MCMC data produced by \code{\link{cpbayes_uncor}}
#' or \code{\link{cpbayes_cor}}. No default is specified. See the example below.
#' @param level A numeric value. (1-level)\% confidence interval of the unknown true genetic effect (beta/log(odds ratio))
#' on each trait is plotted in the forest plot. Default choice is 0.05.
#' @param PPAj_cutoff A numeric value. It's a user-specified threshold of PPAj (trait-specific posterior probability
#' of association). Only those traits having PPAj values above this cut-off are included in the forest plot. So, the choice of
#' this variable as '0.0' includes all traits in the forest plot. Default is 0.01.
#' @return The output produced by this function is a diagram file in .pdf format. The details of the diagram are as follows:
#' \item{file_name}{The pdf file is named after the genetic variant. So, if the argument `Variant'
#' in \code{\link{cpbayes_uncor}} or \code{\link{cpbayes_cor}} is specified as 'rs1234', the figure file is named as rs1234.pdf.}
#' \item{Column1}{First column in the figure specifies the name of the phenotypes.}
#' \item{Column2}{Second column provides the trait-specific univariate association p-value for a trait.}
#' \item{Column3}{Third column provides the trait-specific posterior probability of association (PPAj) produced by CPBayes.}
#' \item{Column4}{Fourth column states whether a phenotype was selected in the optimal subset of associated/non-null traits
#' detected by CPBayes. If a phenotype was not selected, selected and positively associated, selected and negatively associated,
#' its association status is stated as null, positive and negative, respectively.}
#' \item{Column5}{In the right section of the figure, the primary eatimate and confidence interval of the beta/log odds ratio parameter for
#' a trait is plotted.}
#' @references Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
#'
#' @seealso \code{\link{cpbayes_uncor}}, \code{\link{cpbayes_cor}}
#'
#' @examples
#' data(ExampleDataUncor)
#' BetaHat <- ExampleDataUncor$BetaHat
#' SE <- ExampleDataUncor$SE
#' traitNames <- paste("Disease", 1:10, sep = "")
#' SNP1 <- "rs1234"
#' result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1)
#' \dontrun{forest_cpbayes(result, level = 0.05)}
#'
#' @export
forest_cpbayes <- function(mcmc_output, level = 0.05, PPAj_cutoff = 0.01){
result <- mcmc_output
betahat <- result$auxi_data$betahat
se <- result$auxi_data$se
summ <- post_summaries(result) ## summary of CPBayes results
traits <- summ$traitNames ## phenotypes
K <- length(traits) ## number of traits
selection <- rep("null", K) ## if no trait is selected
if(is.null(summ$subset) == FALSE){
selected_traits <- summ$subset$traits
select_trait_posi <- match(selected_traits, traits)
direction <- summ$subset$direction
selection[select_trait_posi] <- direction
}
upper_alfa <- abs(qnorm( (level/2), 0,1))
shift <- upper_alfa*se
lowCIbeta <- betahat - shift ## lower confidence interval of beta
upCIbeta <- betahat + shift ## upper confidence interval of
pvalues <- pchisq( (betahat/se)^2, df=1, lower.tail=F )
for(j in 1:K){
x <- pvalues[j]
count <- 0
while(x < 1){ x <- 10*x; count <- count+1 }
pvalues[j] <- round(pvalues[j], digits = count) ## or, digits = count
}
PPAj <- summ$PPAj$PPAj
select = which(PPAj > PPAj_cutoff)
PPAj <- round(100*summ$PPAj$PPAj, digits = 1)
PPAj <- paste(as.character(PPAj), "%", sep = "")
labeltext <- data.frame( Trait = traits, Pvalue = as.character(pvalues), PPAj = PPAj, selection = selection, stringsAsFactors = FALSE )
df_names <- data.frame( Trait = "Trait", Pvalue = "pvalue", PPAj = "PPAj", selection = "association", stringsAsFactors=F)
labeltext = rbind(df_names,labeltext)
betahat <- c(NA, betahat)
lowCIbeta <- c(NA, lowCIbeta)
upCIbeta <- c(NA, upCIbeta)
log10BF <- round(summ$log10_BF, digits = 2)
PPNA <- summ$locFDR
x <- PPNA
count <- 0
while(x < 1){ x <- 10*x; count <- count+1 }
PPNA <- round(PPNA, digits = count+1)
if(length(select) > 0){
select <- select+1
select <- c(1,select)
title <- summ$variantName
pdffile <- paste(title, ".pdf", sep = "")
title <- paste("Pleiotropy at ", title, sep = "") ## "PPNA = ", PPNA,
pdf(pdffile)
forestplot(labeltext[select, ], betahat[select], lowCIbeta[select], upCIbeta[select],
zero = 0, lineheight = "auto", boxsize = 0.12, xlab = "Estimate and CI of log(OR)",
col = fpColors(lines="red", box="darkred"), title = title, new_page = FALSE)
# boxsize = 0.12,
dev.off()
}else{
print("Forest plot not created, because no trait has a PPA_j value above the threshold specified.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/summary_functions.R
|
##=================================****** uncorrelated version of CPBayes ******================================##
## This function is the main MCMC function for implementing CPBayes in case of uncorrelated summary statistics.
## Uncorrelated summary statistics arise for separate case-control statistics without any overlapping subjects.
## It calls the function: initiate_MCMC() from 'common_MCMC_functions.R' to initialize the parameters in the MCMC.
## It calls: uncorrelated_beta_update(), Z_update(), q_update(), de_update1(), de_update0()
## from 'uncor_MCMC_functions.R' to update different parameters in MCMC.
## It calls select_subset(), overall_pleio_measure() from 'summary_functions.R' to summarize the MCMC data.
##=================================**********************************************===============================##
#library("MASS")
CPBayes_uncor = function( variantName, traitNames, X, s.e., updateDE, MinSlabVar, MaxSlabVar, RP, burn.in )
{
ptm1 <- proc.time()
set.seed(10)
K = length(X)
PPAj_thr = 0.20 ## PPAj threshold
tau <- 0.01 ## choice of spike sd (var = tau^2)
CentralSlabVar <- (MinSlabVar+MaxSlabVar)/2
nonNullVar <- CentralSlabVar ## central choice of slab variance
de <- sqrt(tau^2/nonNullVar) ## 1/de = ratio of slab sd and spike sd
## v0 (minimum of slab variance - min_var), v1 (maximum of slab variance - max_var)
min_var <- MinSlabVar
max_var <- MaxSlabVar
max_de <- tau/sqrt(min_var) ## maximum value of 'de'
min_de <- tau/sqrt(max_var) ## mimimum value of 'de'
shape1_de <- 1 ## shape1 parameter of the Beta prior of 'de' (shape2 parameter = 1, always)
## an informed initialization of the MCMC parameters
initiate <- initiate_MCMC( K, X, s.e. )
beta <- initiate$beta
Z <- initiate$Z
q <- initiate$q
nA <- initiate$K1_FDR ## number of associated traits
shape2 <- 1 ## shape2 parameter for Beta prior of q
LB <- 0.1; UB <- 0.5;
qm <- nA/K
if(qm < LB) qm <- LB
if(qm > UB) qm <- UB
#qm <- 0.25
shape1 <- (qm/(1-qm)) * shape2
thinning <- 1 ## thinning period in the MCMC
mcmc.samplesize <- (RP-burn.in) %/% thinning; Z.data <- matrix(0,mcmc.samplesize,K); row <- 0 ;
sim.beta <- matrix(0,mcmc.samplesize,K); sample_probZ_zero <- matrix(0,mcmc.samplesize,K);
for( rp in 1:RP )
{
## Update beta
beta = uncorrelated_beta_update(K, s.e., tau, de, Z, X)
## Update Z using the q-included version
res_Z <- Z_update(K, q, tau, de, beta)
## using the q-integrated out version
#res_Z <- Z_integrated_update(K, log_ratio_p1, tau_const, de, beta)
## collecting the otput from Z-update function
Z <- res_Z$Z
probZ_zero <- res_Z$prob
q = q_update(K, Z, shape1, shape2) ## Update q
## Update de
if(updateDE == TRUE){
if(sum(Z) > 0)
de <- de_update1(min_de, max_de, shape1_de, beta, Z, tau)
else de <- de_update0(min_de, max_de, shape1_de)
}
## collecting the MCMC sample obtained after the burn in period
if(rp > burn.in && rp%%thinning == 0)
{
row = row + 1 ;
Z.data[row,] = Z ;
sim.beta[row,] = beta ;
sample_probZ_zero[row,] = probZ_zero;
}
} ## closing the loop for MCMC iterations
##----------------------- Compute the summary obtained from the MCMC data ------------------------------------##
## selection of subset
uncor.subset <- select_subset( K, Z.data, mcmc.samplesize )
selected_traits <- NULL
if( length(uncor.subset) > 0 )
selected_traits <- traitNames[uncor.subset]
## extracting traits having PPAj > PPAj_thr
asso.pr = colSums(Z.data)/mcmc.samplesize
which_traits = which(asso.pr > PPAj_thr)
imp_PPAj = 0; imp_traits = 0; important_phenos = NULL
if(length(which_traits) > 0){
imp_PPAj = asso.pr[which_traits]
imp_traits = traitNames[which_traits]
important_phenos = data.frame( traits = imp_traits, PPAj = imp_PPAj, stringsAsFactors = FALSE)
}
## calculate the Bayes factor and PPNA
pleio_evidence <- overall_pleio_measure( K, shape1, shape2, sample_probZ_zero )
log10_BF_uncor <- pleio_evidence$log10_BF
PPNA.uncor <- pleio_evidence$PPNA
ptm2 <- proc.time() ## time taken for the analysis
ptm <- ptm2-ptm1
#cat(" run time (in seconds):", "\n")
#print(ptm2-ptm1)
## return the outputs. A post summary from the MCMC data can be computed for interesting variants
data = list( variantName = variantName, log10_BF = log10_BF_uncor, locFDR = PPNA.uncor,
subset = selected_traits, important_traits = important_phenos, auxi_data = list( traitNames = traitNames,
K = K, mcmc.samplesize = mcmc.samplesize, PPAj = asso.pr, Z.data = Z.data, sim.beta = sim.beta, betahat = X, se = s.e.), runtime = ptm )
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/R/uncor_CPBayes_functions.R
|
## ----install_package, eval=FALSE, collapse = TRUE-----------------------------
# install.packages("CPBayes")
# library("CPBayes")
## ----load_Beta, collapse = TRUE-----------------------------------------------
library("CPBayes")
# Load the beta hat vector
BetaHatfile <- system.file("extdata", "BetaHat.rda", package = "CPBayes")
load(BetaHatfile)
BetaHat
## ----load_SE, collapse = TRUE-------------------------------------------------
# Load the standard error vector
SEfile <- system.file("extdata", "SE.rda", package = "CPBayes")
load(SEfile)
SE
## ----names, collapse = TRUE---------------------------------------------------
# Specify the name of the traits and the genetic variant.
traitNames <- paste("Disease", 1:10, sep = "")
SNP1 <- "rs1234"
traitNames
SNP1
## ----example_analytic_calculation_uncor, collapse = TRUE----------------------
#Run analytic_locFDR_BF_uncor function to analytically compute locFDR and log10BF for uncorrelated summary statistics.
result <- analytic_locFDR_BF_uncor(BetaHat, SE)
str(result)
## ----uncor_example, collapse = TRUE-------------------------------------------
# Run the uncorrelated version of CPBayes (based on MCMC).
result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
## ----result_structure, collapse= TRUE-----------------------------------------
# Overall summary of the primary results produced by cpbayes_uncor.
str(result)
## ----post_summary, collapse= TRUE---------------------------------------------
# Post summary of the MCMC data produced by cpbayes_uncor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
## ----forest_plot, eval = FALSE, collapse= TRUE--------------------------------
# # Forest plot for the pleiotropy result obtained by cpbayes_uncor.
# forest_cpbayes(result, level = 0.05)
## ----load_cBeta, collapse = TRUE----------------------------------------------
# Load the beta-hat vector
datafile <- system.file("extdata", "cBetaHat.rda", package = "CPBayes")
load(datafile)
cBetaHat
## ----load_cSE, collapse = TRUE------------------------------------------------
# Load the standard error vector
datafile <- system.file("extdata", "cSE.rda", package = "CPBayes")
load(datafile)
cSE
## ----load_cor, collapse = TRUE------------------------------------------------
# Load the correlation matrix of the beta-hat vector (cBetaHat)
datafile <- system.file("extdata", "cor.rda", package = "CPBayes")
load(datafile)
cor
## ----example_analytic_calculation_cor, collapse = TRUE------------------------
# Run analytic_locFDR_BF_cor function to analytically compute locFDR and log10BF for correlated summary statistics.
result <- analytic_locFDR_BF_cor(cBetaHat, cSE, cor)
str(result)
## ----cor_example, collapse = TRUE---------------------------------------------
# Run the correlated version of CPBayes.
result <- cpbayes_cor(cBetaHat, cSE, cor, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
## ----result_structure_cor, collapse= TRUE-------------------------------------
# Overall summary of the primary results produced by cpbayes_cor.
str(result)
## ----post_summary_cor, collapse= TRUE-----------------------------------------
# Post summary of the MCMC data produced by cpbayes_cor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
## ----forest_plot_cor, eval=FALSE, collapse= TRUE------------------------------
# # Forest plot for the pleiotropy result obtained by cpbayes_cor.
# forest_cpbayes(result, level = 0.05)
## ----corln_estimation_example, collapse = TRUE--------------------------------
# Example data of sample-overlap matrices
SampleOverlapMatrixFile <- system.file("extdata", "SampleOverlapMatrix.rda", package = "CPBayes")
load(SampleOverlapMatrixFile)
SampleOverlapMatrix
## ----run_corln_estimation, collapse = TRUE------------------------------------
# Estimate the correlation matrix of correlated beta-hat vector
n11 <- SampleOverlapMatrix$n11
n00 <- SampleOverlapMatrix$n00
n10 <- SampleOverlapMatrix$n10
cor <- estimate_corln(n11, n00, n10)
cor
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/inst/doc/cpbayes.R
|
---
title: "CPBayes (Bayesian meta analysis for studying cross-phenotype genetic associations) package"
author: "Arunabha Majumdar, Tanushree Haldar, John Witte"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_width: 7
fig_height: 6
vignette: >
%\VignetteIndexEntry{CPBayes (Bayesian meta analysis for studying cross-phenotype genetic associations) package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Introduction
Simultaneous analysis of genetic associations with multiple phenotypes may reveal shared genetic susceptibility across traits (pleiotropy). CPBayes is a Bayesian meta analysis method for studying cross-phenotype genetic associations. It uses summary-level data across multiple phenotypes to simultaneously measure the evidence of aggregate-level pleiotropic association and estimate an optimal subset of traits associated with the risk locus. CPBayes model is based on a spike and slab prior.
This R-package consists of following functions:
1. analytic_locFDR_BF_uncor: This function analytically computes the local FDR & Bayes factor (BF) that quantifies the evidence of aggregate-level pleiotropic association for uncorrelated summary statistics.
1. cpbayes_uncor: It implements CPBayes for uncorrelated summary statistics to figure out the optimal subset of non-null traits underlying a pleiotropic signal and other insights. The summary statistics across traits/studies are uncorrelated when the studies have no overlapping subjects.
1. analytic_locFDR_BF_cor: This function analytically computes the local FDR & Bayes factor (BF) that quantifies the evidence of aggregate-level pleiotropic association for correlated summary statistics.
1. cpbayes_cor: It implements CPBayes for correlated summary statistics to figure out the optimal subset of non-null traits underlying a pleiotropic signal and other insights. The summary statistics across traits/studies are correlated when the studies have overlapping subjects or the phenotypes were measured in a cohort study.
1. post\_summaries: It summarizes the MCMC data produced by cpbayes\_uncor or cpbayes\_cor. It computes additional summaries to provide a better insight into a pleiotropic signal. It works in the same way for both cpbayes\_uncor and cpbayes\_cor.
1. forest\_cpbayes: It creates a forest plot presenting the pleiotropy result obtained by cpbayes\_uncor or cpbayes\_cor. It works in the same way for both cpbayes\_uncor and cpbayes\_cor.
1. estimate\_corln: It computes an approximate correlation matrix of the beta-hat vector for multiple overlapping case-control studies using the sample-overlap matrices.
# Installation
You can install CPBayes from CRAN.
```{r install_package, eval=FALSE, collapse = TRUE}
install.packages("CPBayes")
library("CPBayes")
```
# How to run CPBayes for uncorrelated summary statistics.
Load the example data.
```{r load_Beta, collapse = TRUE}
library("CPBayes")
# Load the beta hat vector
BetaHatfile <- system.file("extdata", "BetaHat.rda", package = "CPBayes")
load(BetaHatfile)
BetaHat
```
BetaHat contains an example data of the main genetic effect (beta/log odds ratio) estimates for a single nucleotide polymorphism (SNP) obtained from 10 separate case-control studies for 10 different diseases. In each case-control study comprising a distinct set of 7000 cases and 10000 controls, we fit a logistic regression of the case-control status on the genotype coded as the minor allele count for all the individuals in the sample. One can also include various covariates, such as, age, gender, principal components (PCs) of ancestries in the logistic regression. From each logistic regression for a disease, we obtain the estimate of the main genetic association parameter (beta/logodds ratio) along with the corresponding standard error. Since the studies do not have any overlapping subject, beta-hat across the diseases are uncorrelated.
```{r load_SE, collapse = TRUE}
# Load the standard error vector
SEfile <- system.file("extdata", "SE.rda", package = "CPBayes")
load(SEfile)
SE
```
SE contains the standard errors corresponding to the above beta hat vector across 10 separate case-control studies.
Next we specify the name of the diseases/phenotypes and the genetic variant.
```{r names, collapse = TRUE}
# Specify the name of the traits and the genetic variant.
traitNames <- paste("Disease", 1:10, sep = "")
SNP1 <- "rs1234"
traitNames
SNP1
```
Now, since the studies are non-overlapping, the summary statistics across traits are uncorrelated. Here we run the analytic_locFDR_BF_uncor function for this example data.
```{r example_analytic_calculation_uncor, collapse = TRUE}
#Run analytic_locFDR_BF_uncor function to analytically compute locFDR and log10BF for uncorrelated summary statistics.
result <- analytic_locFDR_BF_uncor(BetaHat, SE)
str(result)
```
So, locFDR [result\$locFDR] was analytically computed as 3.43*10^(-6) and log10(Bayes factor) [result\$log10_BF] was estimated as 3.93 indicating an aggregate-level pleiotropic association. While analytically computing locFDR (BF), a fixed value of slab variance is considered.
Next we implement CPBayes for this example data. Since the studies are non-overlapping, the summary statistics across traits are uncorrelated. Hence we run the cpbayes\_uncor function.
```{r uncor_example, collapse = TRUE}
# Run the uncorrelated version of CPBayes (based on MCMC).
result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
```
There are more options of arguments to pass into the function (see the Arguments section of cpbayes\_uncor in the CPBayes manual). After running cpbayes\_uncor, it prints the list of important traits for which the trait-specific posterior probability of association (PPAj) > 20%. However, the printed output is only a part of 'result' which is a list that constitutes of various components. An overall summary of 'result' can be seen by using the str() function (as shown below).
```{r result_structure, collapse= TRUE}
# Overall summary of the primary results produced by cpbayes_uncor.
str(result)
```
Here, result\$variantName returns the name of the genetic variant specified by the user. Here, it is 'rs1234'. Estimated based on the MCMC posterior sample, result\$log10\_BF provides the log10(Bayes factor) and result\$locFDR provides the local false discovery rate (posterior probability of null association) evaluating the aggregate-level pleiotropic association. These values were different from that obtained by the analytical version (above). The main reason is that the locFDR (log10BF) obtained by cpbayes\_uncor() function are estimated based on MCMC sample and a range of slab variance is considered. We recommend using the locFDR (log10BF) value obtained from the analytical version. Next, result\$subset provides the optimal subset of associated/non-null traits selected by CPBayes. CPBayes selected Disease7, Disease9, and Disease10 as associated/non-null. It also gives a list of important traits (important\_traits) comprising phenotypes having PPAj > 20%. Even if a phenotype is not selected in the optimal subset of non-null traits, it can produce a non-negligible value of trait-specific posterior probability of association (PPAj) and can be promising to be pleiotropic. For example, Disease8 had a PPAj of 21% and was listed in important\_traits, but was not included in the optimal subset. A detailed interpretation of all the outputs are described in the Value section of cpbayes\_uncor in the CPBayes manual.
The post\_summaries function provides important insights into an obseved pleiotropic signal, e.g. the direction of associations, trait-specific posterior probability of associations (PPAj), posterior mean/median and 95\% credible interval (Bayesian analog of the confidence interval) of the unknown true genetic effect (beta/odds ratio) on each trait, etc.
```{r post_summary, collapse= TRUE}
# Post summary of the MCMC data produced by cpbayes_uncor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
```
So we have to pass the list 'result' returned by cpbayes\_uncor as the first argument and the 'level' as the second argument into the post\_summaries function. If 'level' is not specified, the default value is 0.05. Note that post\_summaries computes (1-level)\% credible interval of the unknown true genetic effect (beta/odds ratio) on each trait. It estimates the direction of association with the important traits, the vector of trait-specific posterior probability of association (PPAj), etc. For detailed description of different outputs provided by this function, see the Value section of post_summaries in the CPBayes manual.
Next we run the forest\_cpbayes function to create a forest plot that presents the pleiotropy result produced by cpbayes\_uncor.
```{r forest_plot, eval = FALSE, collapse= TRUE}
# Forest plot for the pleiotropy result obtained by cpbayes_uncor.
forest_cpbayes(result, level = 0.05)
```
Similarly as for the post\_summaries function, we need to pass the same list `result' returned by cpbayes\_uncor as the first argument into the function. Second argument is the level whose default value is 0.05. In the forest plot, (1-level)% confidence interval of the beta/log odds ratio parameter is plotted for each trait. For more details, please see the section of forest_cpbayes function in the CPBayes manual.
# How to run CPBayes for correlated summary statistics.
Next we demonstrate how to run CPBayes for correlated summary statistics. Get the path to the data.
```{r load_cBeta, collapse = TRUE}
# Load the beta-hat vector
datafile <- system.file("extdata", "cBetaHat.rda", package = "CPBayes")
load(datafile)
cBetaHat
```
Here, 'c' in cBetaHat stands for correlated case. cBetaHat contains an example data of the main genetic association parameter (beta/log odds ratio) estimates for a SNP across 10 overlapping case-control studies for 10 different diseases. Each of the 10 studies has a distinct set of 7000 cases and a common set of 10000 controls shared across all the studies. In each case-control study, we fit a logistic regression of the case-control status on the genotype coded as the minor allele count for all the individuals in the sample. One can also include various covariates, such as, age, gender, principal components (PCs) of ancestries in the logistic regression. From each logistic regression for a disease, we obtain the estimate of the main genetic effect (beta/log odds ratio) along with the corresponding standard error. Since the studies have overlapping subjects, beta-hat across the diseases are correlated.
```{r load_cSE, collapse = TRUE}
# Load the standard error vector
datafile <- system.file("extdata", "cSE.rda", package = "CPBayes")
load(datafile)
cSE
```
cSE contains the standard errors corresponding to the above beta hat vector across 10 overlapping case-control studies.
```{r load_cor, collapse = TRUE}
# Load the correlation matrix of the beta-hat vector (cBetaHat)
datafile <- system.file("extdata", "cor.rda", package = "CPBayes")
load(datafile)
cor
```
Since the summary statistics across traits are correlated, we run the the analytic_locFDR_BF_cor function for this example data.
```{r example_analytic_calculation_cor, collapse = TRUE}
# Run analytic_locFDR_BF_cor function to analytically compute locFDR and log10BF for correlated summary statistics.
result <- analytic_locFDR_BF_cor(cBetaHat, cSE, cor)
str(result)
```
So the locFDR [result\$locFDR] was analytically computed as 3.54*10^(-10) and log10(Bayes factor) [result\$log10_BF] was estimated as 9.18 indicating an aggregate-level pleiotropic association.
The correlation matrix of the beta-hat vector (cBetaHat) is given by 'cor' which we estimated by employing the estimate_corln function (demonstrated later in this tutorial) using the sample-overlap matrices (explained later in this tutorial). Next we run the correlated version of CPBayes based on MCMC for this example data.
```{r cor_example, collapse = TRUE}
# Run the correlated version of CPBayes.
result <- cpbayes_cor(cBetaHat, cSE, cor, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
```
There are more options of arguments to pass into the function (see the Arguments section of cpbayes\_cor in the CPBayes manual). After running cpbayes\_cor, it prints the list of important traits for which the trait-specific posterior probability of association (PPAj) > 20%. However, the printed outputs are only a part of 'result' which is a list that constitutes of various components. An overall summary of 'result' can be seen by using the str() function (as shown below).
```{r result_structure_cor, collapse= TRUE}
# Overall summary of the primary results produced by cpbayes_cor.
str(result)
```
Here, result\$variantName returns the name of the genetic variant specified by the user. Here, it is 'rs1234'. Estimated based on the MCMC sample, result\$log10\_BF provides the log10(Bayes factor) and result\$locFDR provides the local false discovery rate (posterior probability of null association) measuring the evidence of aggregate-level pleiotropic association. Again, these values were different from that obtained by the analytical version. The main reason is that the locFDR (log10BF) obtained by cpbayes\_cor() function are estimated based on MCMC sample and a range of slab variance is considered. We recommend using the locFDR (log10BF) value obtained from the analytical version. However, for large number of traits (say > 25), analytic_locFDR_BF_cor may be slow to run. Next, result\$subset provides the optimal subset of associated traits selected by CPBayes. A detailed interpretation of all the outputs are described in the Value section of cpbayes\_cor in the CPBayes manual.
The post\_summaries function provides important insights into an observed pleiotropic signal, e.g., the direction of associations, trait-specific posterior probability of associations (PPAj), posterior mean/median and 95\% credible interval (Bayesian analog of the confidence interval) of the unknown true genetic effect (beta/odds ratio) on each trait, etc.
```{r post_summary_cor, collapse= TRUE}
# Post summary of the MCMC data produced by cpbayes_cor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
```
Note that, post\_summaries works exactly in the same way for both cpbayes\_cor and cpbayes\_uncor. For detailed description of different outputs provided by post\_summaries, see the Value section of post_summaries in the CPBayes manual.
Next we run the forest\_cpbayes function to create a forest plot that presents the pleiotropy result produced by cpbayes\_cor.
```{r forest_plot_cor, eval=FALSE, collapse= TRUE}
# Forest plot for the pleiotropy result obtained by cpbayes_cor.
forest_cpbayes(result, level = 0.05)
```
Note that, forest\_cpbayes works exactly in the same way for both cpbayes_cor and cpbayes_uncor. For more details, see the section of forest_cpbayes function in the CPBayes manual.
# How to run estimate_corln.
The function estimate\_corln estimates the correlation matrix of the beta-hat vector for multiple overlapping case-control studies using the sample-overlap matrices which describe the number of cases or controls shared between studies/traits, and the number of subjects who are case for one study/trait but control for another study/trait. For a cohort study, the phenotypic correlation matrix should be a reasonable substitute of this correlation matrix.
```{r corln_estimation_example, collapse = TRUE}
# Example data of sample-overlap matrices
SampleOverlapMatrixFile <- system.file("extdata", "SampleOverlapMatrix.rda", package = "CPBayes")
load(SampleOverlapMatrixFile)
SampleOverlapMatrix
```
SampleOverlapMatrix is a list that contains an example of the sample overlap matrices for five different diseases in the Kaiser GERA cohort (a real data). The list constitutes of three matrices as follows. SampleOverlapMatrix\$n11 provides the number of cases shared between all possible pairs of studies/traits. SampleOverlapMatrix\$n00 provides the number of controls shared between all possible pairs of studies/traits. SampleOverlapMatrix\$n10 provides the number of subjects who are case for one study/trait and control for another study/trait. For more detailed explanation, see the Arguments section of estimate\_corln in the CPBayes manual.
```{r run_corln_estimation, collapse = TRUE}
# Estimate the correlation matrix of correlated beta-hat vector
n11 <- SampleOverlapMatrix$n11
n00 <- SampleOverlapMatrix$n00
n10 <- SampleOverlapMatrix$n10
cor <- estimate_corln(n11, n00, n10)
cor
```
The function estimate\_corln computes an approximate correlation matrix of the correlated beta-hat vector obtained from multiple overlapping case-control studies using the sample-overlap matrices. Note that for a cohort study, the phenotypic correlation matrix should be a reasonable substitute of this correlation matrix. These approximations of the correlation structure are accurate when none of the diseases/traits is associated with the environmental covariates and genetic variant. While demonstrating cpbayes\_cor, we used simulated data for 10 overlapping case-control studies with each study having a distinct set of 7000 cases and a common set of 10000 controls shared across all the studies. We used the estimate\_corln function to estimate the correlation matrix of the correlated beta-hat vector using the sample-overlap matrices.
***Important note on the estimation of correlation structure of correlated beta-hat vector:*** In general, environmental covariates are expected to be present in a study and associated with the phenotypes of interest. Also, a small proportion of genome-wide genetic variants are expected to be associated. Hence the above approximations of the correlation matrix may not be accurate. So in general, we recommend an alternative strategy to estimate the correlation matrix using the genome-wide summary statistics data across traits as follows. First, extract all the SNPs for each of which the trait-specific univariate association p-value across all the traits are > 0.1. The trait-specific univariate association p-values are obtained using the beta-hat and standard error for each trait. Each of the SNPs selected in this way is either weakly or not associated with any of the phenotypes (null SNP). Next, select a set of independent null SNPs from the initial set of null SNPs by using a threshold of r^2 < 0.01 (r: the correlation between the genotypes at a pair of SNPs). In the absence of in-sample linkage disequilibrium (LD) information, one can use the reference panel LD information for this screening. Finally, compute the correlation matrix of the effect estimates (beta-hat vector) as the sample correlation matrix of the beta-hat vector across all the selected independent null SNPs. This strategy is more general and applicable to a cohort study or multiple overlapping studies for binary or quantitative traits with arbitrary distributions. It is also useful when the beta-hat vector for multiple non-overlapping studies become correlated due to genetically related individuals across studies. Misspecification of the correlation structure can affect the results produced by CPBayes to some extent. Hence, if genome-wide summary statistics data across traits is available, we recommend this alternative strategy to estimate the correlation matrix of the beta-hat vector.
See our paper for more details: Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/inst/doc/cpbayes.Rmd
|
---
title: "CPBayes (Bayesian meta analysis for studying cross-phenotype genetic associations) package"
author: "Arunabha Majumdar, Tanushree Haldar, John Witte"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_width: 7
fig_height: 6
vignette: >
%\VignetteIndexEntry{CPBayes (Bayesian meta analysis for studying cross-phenotype genetic associations) package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Introduction
Simultaneous analysis of genetic associations with multiple phenotypes may reveal shared genetic susceptibility across traits (pleiotropy). CPBayes is a Bayesian meta analysis method for studying cross-phenotype genetic associations. It uses summary-level data across multiple phenotypes to simultaneously measure the evidence of aggregate-level pleiotropic association and estimate an optimal subset of traits associated with the risk locus. CPBayes model is based on a spike and slab prior.
This R-package consists of following functions:
1. analytic_locFDR_BF_uncor: This function analytically computes the local FDR & Bayes factor (BF) that quantifies the evidence of aggregate-level pleiotropic association for uncorrelated summary statistics.
1. cpbayes_uncor: It implements CPBayes for uncorrelated summary statistics to figure out the optimal subset of non-null traits underlying a pleiotropic signal and other insights. The summary statistics across traits/studies are uncorrelated when the studies have no overlapping subjects.
1. analytic_locFDR_BF_cor: This function analytically computes the local FDR & Bayes factor (BF) that quantifies the evidence of aggregate-level pleiotropic association for correlated summary statistics.
1. cpbayes_cor: It implements CPBayes for correlated summary statistics to figure out the optimal subset of non-null traits underlying a pleiotropic signal and other insights. The summary statistics across traits/studies are correlated when the studies have overlapping subjects or the phenotypes were measured in a cohort study.
1. post\_summaries: It summarizes the MCMC data produced by cpbayes\_uncor or cpbayes\_cor. It computes additional summaries to provide a better insight into a pleiotropic signal. It works in the same way for both cpbayes\_uncor and cpbayes\_cor.
1. forest\_cpbayes: It creates a forest plot presenting the pleiotropy result obtained by cpbayes\_uncor or cpbayes\_cor. It works in the same way for both cpbayes\_uncor and cpbayes\_cor.
1. estimate\_corln: It computes an approximate correlation matrix of the beta-hat vector for multiple overlapping case-control studies using the sample-overlap matrices.
# Installation
You can install CPBayes from CRAN.
```{r install_package, eval=FALSE, collapse = TRUE}
install.packages("CPBayes")
library("CPBayes")
```
# How to run CPBayes for uncorrelated summary statistics.
Load the example data.
```{r load_Beta, collapse = TRUE}
library("CPBayes")
# Load the beta hat vector
BetaHatfile <- system.file("extdata", "BetaHat.rda", package = "CPBayes")
load(BetaHatfile)
BetaHat
```
BetaHat contains an example data of the main genetic effect (beta/log odds ratio) estimates for a single nucleotide polymorphism (SNP) obtained from 10 separate case-control studies for 10 different diseases. In each case-control study comprising a distinct set of 7000 cases and 10000 controls, we fit a logistic regression of the case-control status on the genotype coded as the minor allele count for all the individuals in the sample. One can also include various covariates, such as, age, gender, principal components (PCs) of ancestries in the logistic regression. From each logistic regression for a disease, we obtain the estimate of the main genetic association parameter (beta/logodds ratio) along with the corresponding standard error. Since the studies do not have any overlapping subject, beta-hat across the diseases are uncorrelated.
```{r load_SE, collapse = TRUE}
# Load the standard error vector
SEfile <- system.file("extdata", "SE.rda", package = "CPBayes")
load(SEfile)
SE
```
SE contains the standard errors corresponding to the above beta hat vector across 10 separate case-control studies.
Next we specify the name of the diseases/phenotypes and the genetic variant.
```{r names, collapse = TRUE}
# Specify the name of the traits and the genetic variant.
traitNames <- paste("Disease", 1:10, sep = "")
SNP1 <- "rs1234"
traitNames
SNP1
```
Now, since the studies are non-overlapping, the summary statistics across traits are uncorrelated. Here we run the analytic_locFDR_BF_uncor function for this example data.
```{r example_analytic_calculation_uncor, collapse = TRUE}
#Run analytic_locFDR_BF_uncor function to analytically compute locFDR and log10BF for uncorrelated summary statistics.
result <- analytic_locFDR_BF_uncor(BetaHat, SE)
str(result)
```
So, locFDR [result\$locFDR] was analytically computed as 3.43*10^(-6) and log10(Bayes factor) [result\$log10_BF] was estimated as 3.93 indicating an aggregate-level pleiotropic association. While analytically computing locFDR (BF), a fixed value of slab variance is considered.
Next we implement CPBayes for this example data. Since the studies are non-overlapping, the summary statistics across traits are uncorrelated. Hence we run the cpbayes\_uncor function.
```{r uncor_example, collapse = TRUE}
# Run the uncorrelated version of CPBayes (based on MCMC).
result <- cpbayes_uncor(BetaHat, SE, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
```
There are more options of arguments to pass into the function (see the Arguments section of cpbayes\_uncor in the CPBayes manual). After running cpbayes\_uncor, it prints the list of important traits for which the trait-specific posterior probability of association (PPAj) > 20%. However, the printed output is only a part of 'result' which is a list that constitutes of various components. An overall summary of 'result' can be seen by using the str() function (as shown below).
```{r result_structure, collapse= TRUE}
# Overall summary of the primary results produced by cpbayes_uncor.
str(result)
```
Here, result\$variantName returns the name of the genetic variant specified by the user. Here, it is 'rs1234'. Estimated based on the MCMC posterior sample, result\$log10\_BF provides the log10(Bayes factor) and result\$locFDR provides the local false discovery rate (posterior probability of null association) evaluating the aggregate-level pleiotropic association. These values were different from that obtained by the analytical version (above). The main reason is that the locFDR (log10BF) obtained by cpbayes\_uncor() function are estimated based on MCMC sample and a range of slab variance is considered. We recommend using the locFDR (log10BF) value obtained from the analytical version. Next, result\$subset provides the optimal subset of associated/non-null traits selected by CPBayes. CPBayes selected Disease7, Disease9, and Disease10 as associated/non-null. It also gives a list of important traits (important\_traits) comprising phenotypes having PPAj > 20%. Even if a phenotype is not selected in the optimal subset of non-null traits, it can produce a non-negligible value of trait-specific posterior probability of association (PPAj) and can be promising to be pleiotropic. For example, Disease8 had a PPAj of 21% and was listed in important\_traits, but was not included in the optimal subset. A detailed interpretation of all the outputs are described in the Value section of cpbayes\_uncor in the CPBayes manual.
The post\_summaries function provides important insights into an obseved pleiotropic signal, e.g. the direction of associations, trait-specific posterior probability of associations (PPAj), posterior mean/median and 95\% credible interval (Bayesian analog of the confidence interval) of the unknown true genetic effect (beta/odds ratio) on each trait, etc.
```{r post_summary, collapse= TRUE}
# Post summary of the MCMC data produced by cpbayes_uncor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
```
So we have to pass the list 'result' returned by cpbayes\_uncor as the first argument and the 'level' as the second argument into the post\_summaries function. If 'level' is not specified, the default value is 0.05. Note that post\_summaries computes (1-level)\% credible interval of the unknown true genetic effect (beta/odds ratio) on each trait. It estimates the direction of association with the important traits, the vector of trait-specific posterior probability of association (PPAj), etc. For detailed description of different outputs provided by this function, see the Value section of post_summaries in the CPBayes manual.
Next we run the forest\_cpbayes function to create a forest plot that presents the pleiotropy result produced by cpbayes\_uncor.
```{r forest_plot, eval = FALSE, collapse= TRUE}
# Forest plot for the pleiotropy result obtained by cpbayes_uncor.
forest_cpbayes(result, level = 0.05)
```
Similarly as for the post\_summaries function, we need to pass the same list `result' returned by cpbayes\_uncor as the first argument into the function. Second argument is the level whose default value is 0.05. In the forest plot, (1-level)% confidence interval of the beta/log odds ratio parameter is plotted for each trait. For more details, please see the section of forest_cpbayes function in the CPBayes manual.
# How to run CPBayes for correlated summary statistics.
Next we demonstrate how to run CPBayes for correlated summary statistics. Get the path to the data.
```{r load_cBeta, collapse = TRUE}
# Load the beta-hat vector
datafile <- system.file("extdata", "cBetaHat.rda", package = "CPBayes")
load(datafile)
cBetaHat
```
Here, 'c' in cBetaHat stands for correlated case. cBetaHat contains an example data of the main genetic association parameter (beta/log odds ratio) estimates for a SNP across 10 overlapping case-control studies for 10 different diseases. Each of the 10 studies has a distinct set of 7000 cases and a common set of 10000 controls shared across all the studies. In each case-control study, we fit a logistic regression of the case-control status on the genotype coded as the minor allele count for all the individuals in the sample. One can also include various covariates, such as, age, gender, principal components (PCs) of ancestries in the logistic regression. From each logistic regression for a disease, we obtain the estimate of the main genetic effect (beta/log odds ratio) along with the corresponding standard error. Since the studies have overlapping subjects, beta-hat across the diseases are correlated.
```{r load_cSE, collapse = TRUE}
# Load the standard error vector
datafile <- system.file("extdata", "cSE.rda", package = "CPBayes")
load(datafile)
cSE
```
cSE contains the standard errors corresponding to the above beta hat vector across 10 overlapping case-control studies.
```{r load_cor, collapse = TRUE}
# Load the correlation matrix of the beta-hat vector (cBetaHat)
datafile <- system.file("extdata", "cor.rda", package = "CPBayes")
load(datafile)
cor
```
Since the summary statistics across traits are correlated, we run the the analytic_locFDR_BF_cor function for this example data.
```{r example_analytic_calculation_cor, collapse = TRUE}
# Run analytic_locFDR_BF_cor function to analytically compute locFDR and log10BF for correlated summary statistics.
result <- analytic_locFDR_BF_cor(cBetaHat, cSE, cor)
str(result)
```
So the locFDR [result\$locFDR] was analytically computed as 3.54*10^(-10) and log10(Bayes factor) [result\$log10_BF] was estimated as 9.18 indicating an aggregate-level pleiotropic association.
The correlation matrix of the beta-hat vector (cBetaHat) is given by 'cor' which we estimated by employing the estimate_corln function (demonstrated later in this tutorial) using the sample-overlap matrices (explained later in this tutorial). Next we run the correlated version of CPBayes based on MCMC for this example data.
```{r cor_example, collapse = TRUE}
# Run the correlated version of CPBayes.
result <- cpbayes_cor(cBetaHat, cSE, cor, Phenotypes = traitNames, Variant = SNP1, MCMCiter = 5500, Burnin = 500)
```
There are more options of arguments to pass into the function (see the Arguments section of cpbayes\_cor in the CPBayes manual). After running cpbayes\_cor, it prints the list of important traits for which the trait-specific posterior probability of association (PPAj) > 20%. However, the printed outputs are only a part of 'result' which is a list that constitutes of various components. An overall summary of 'result' can be seen by using the str() function (as shown below).
```{r result_structure_cor, collapse= TRUE}
# Overall summary of the primary results produced by cpbayes_cor.
str(result)
```
Here, result\$variantName returns the name of the genetic variant specified by the user. Here, it is 'rs1234'. Estimated based on the MCMC sample, result\$log10\_BF provides the log10(Bayes factor) and result\$locFDR provides the local false discovery rate (posterior probability of null association) measuring the evidence of aggregate-level pleiotropic association. Again, these values were different from that obtained by the analytical version. The main reason is that the locFDR (log10BF) obtained by cpbayes\_cor() function are estimated based on MCMC sample and a range of slab variance is considered. We recommend using the locFDR (log10BF) value obtained from the analytical version. However, for large number of traits (say > 25), analytic_locFDR_BF_cor may be slow to run. Next, result\$subset provides the optimal subset of associated traits selected by CPBayes. A detailed interpretation of all the outputs are described in the Value section of cpbayes\_cor in the CPBayes manual.
The post\_summaries function provides important insights into an observed pleiotropic signal, e.g., the direction of associations, trait-specific posterior probability of associations (PPAj), posterior mean/median and 95\% credible interval (Bayesian analog of the confidence interval) of the unknown true genetic effect (beta/odds ratio) on each trait, etc.
```{r post_summary_cor, collapse= TRUE}
# Post summary of the MCMC data produced by cpbayes_cor.
PleioSumm <- post_summaries(result, level = 0.05)
str(PleioSumm)
```
Note that, post\_summaries works exactly in the same way for both cpbayes\_cor and cpbayes\_uncor. For detailed description of different outputs provided by post\_summaries, see the Value section of post_summaries in the CPBayes manual.
Next we run the forest\_cpbayes function to create a forest plot that presents the pleiotropy result produced by cpbayes\_cor.
```{r forest_plot_cor, eval=FALSE, collapse= TRUE}
# Forest plot for the pleiotropy result obtained by cpbayes_cor.
forest_cpbayes(result, level = 0.05)
```
Note that, forest\_cpbayes works exactly in the same way for both cpbayes_cor and cpbayes_uncor. For more details, see the section of forest_cpbayes function in the CPBayes manual.
# How to run estimate_corln.
The function estimate\_corln estimates the correlation matrix of the beta-hat vector for multiple overlapping case-control studies using the sample-overlap matrices which describe the number of cases or controls shared between studies/traits, and the number of subjects who are case for one study/trait but control for another study/trait. For a cohort study, the phenotypic correlation matrix should be a reasonable substitute of this correlation matrix.
```{r corln_estimation_example, collapse = TRUE}
# Example data of sample-overlap matrices
SampleOverlapMatrixFile <- system.file("extdata", "SampleOverlapMatrix.rda", package = "CPBayes")
load(SampleOverlapMatrixFile)
SampleOverlapMatrix
```
SampleOverlapMatrix is a list that contains an example of the sample overlap matrices for five different diseases in the Kaiser GERA cohort (a real data). The list constitutes of three matrices as follows. SampleOverlapMatrix\$n11 provides the number of cases shared between all possible pairs of studies/traits. SampleOverlapMatrix\$n00 provides the number of controls shared between all possible pairs of studies/traits. SampleOverlapMatrix\$n10 provides the number of subjects who are case for one study/trait and control for another study/trait. For more detailed explanation, see the Arguments section of estimate\_corln in the CPBayes manual.
```{r run_corln_estimation, collapse = TRUE}
# Estimate the correlation matrix of correlated beta-hat vector
n11 <- SampleOverlapMatrix$n11
n00 <- SampleOverlapMatrix$n00
n10 <- SampleOverlapMatrix$n10
cor <- estimate_corln(n11, n00, n10)
cor
```
The function estimate\_corln computes an approximate correlation matrix of the correlated beta-hat vector obtained from multiple overlapping case-control studies using the sample-overlap matrices. Note that for a cohort study, the phenotypic correlation matrix should be a reasonable substitute of this correlation matrix. These approximations of the correlation structure are accurate when none of the diseases/traits is associated with the environmental covariates and genetic variant. While demonstrating cpbayes\_cor, we used simulated data for 10 overlapping case-control studies with each study having a distinct set of 7000 cases and a common set of 10000 controls shared across all the studies. We used the estimate\_corln function to estimate the correlation matrix of the correlated beta-hat vector using the sample-overlap matrices.
***Important note on the estimation of correlation structure of correlated beta-hat vector:*** In general, environmental covariates are expected to be present in a study and associated with the phenotypes of interest. Also, a small proportion of genome-wide genetic variants are expected to be associated. Hence the above approximations of the correlation matrix may not be accurate. So in general, we recommend an alternative strategy to estimate the correlation matrix using the genome-wide summary statistics data across traits as follows. First, extract all the SNPs for each of which the trait-specific univariate association p-value across all the traits are > 0.1. The trait-specific univariate association p-values are obtained using the beta-hat and standard error for each trait. Each of the SNPs selected in this way is either weakly or not associated with any of the phenotypes (null SNP). Next, select a set of independent null SNPs from the initial set of null SNPs by using a threshold of r^2 < 0.01 (r: the correlation between the genotypes at a pair of SNPs). In the absence of in-sample linkage disequilibrium (LD) information, one can use the reference panel LD information for this screening. Finally, compute the correlation matrix of the effect estimates (beta-hat vector) as the sample correlation matrix of the beta-hat vector across all the selected independent null SNPs. This strategy is more general and applicable to a cohort study or multiple overlapping studies for binary or quantitative traits with arbitrary distributions. It is also useful when the beta-hat vector for multiple non-overlapping studies become correlated due to genetically related individuals across studies. Misspecification of the correlation structure can affect the results produced by CPBayes to some extent. Hence, if genome-wide summary statistics data across traits is available, we recommend this alternative strategy to estimate the correlation matrix of the beta-hat vector.
See our paper for more details: Majumdar A, Haldar T, Bhattacharya S, Witte JS (2018) An efficient Bayesian meta analysis approach for studying cross-phenotype genetic associations. PLoS Genet 14(2): e1007139.
|
/scratch/gouwar.j/cran-all/cranData/CPBayes/vignettes/cpbayes.Rmd
|
#' Cluster-Polarization Coefficient
#'
#' Implements clustering algorithms and calculates cluster-polarization coefficient.
#' Contains support for hierarchical clustering, k-means clustering, partitioning
#' around medoids, density-based spatial clustering with noise, and manual assignment
#' of cluster membership.
#'
#' @details
#' \code{type} must take one of six values: \cr
#' \code{"hclust"}: agglomerative hierarchical clustering with \code{\link{hclust}()}, \cr
#' \code{"diana"}: divisive hierarchical clustering with \code{\link{diana}()}, \cr
#' \code{"kmeans"}: k-means clustering with \code{\link{kmeans}()}, \cr
#' \code{"pam"}: k-medoids clustering with \code{\link{pam}()}, \cr
#' \code{"dbscan"}: density-based clustering with \code{\link{dbscan}()}, \cr
#' \code{"manual"}: no clustering is necessary, researcher has specified cluster assignments.
#'
#' For all clustering methods, additional arguments to fine-tune clustering
#' performance, such as the specific algorithm to be used, should be passed to
#' \code{CPC()} and will be inherited by the specified clustering function. In
#' particular, if \code{type = "kmeans"}, using a large number of random starts is
#' recommended. This can be specified with the \code{nstart} argument to
#' \code{\link{kmeans}()}, passed directly to \code{CPC()}.
#'
#' If \code{type = "manual"}, \code{data} must contain a vector identifying cluster
#' membership for each observation, and \code{cols} and \code{clusters} must be
#' defined.
#'
#' @param data a numeric vector or \code{n x k} matrix or data frame. If
#' \code{type = "manual"}, \code{data} must be a matrix containing a vector
#' identifying cluster membership for each observation, to be passed to
#' \code{clusters} argument.
#' @param type a character string giving the type of clustering method to be used.
#' See Details.
#' @param k the desired number of clusters. Required if \code{type} is one of \code{"hclust"},
#' \code{"diana"}, \code{"kmeans"}, or \code{"pam"}.
#' @param epsilon radius of epsilon neighborhood. Required if \code{type = "dbscan"}.
#' @param model a logical indicating whether clustering model output should be
#' returned. Defaults to \code{FALSE}.
#' @param adjust a logical indicating whether the adjusted CPC should be calculated.
#' Defaults to \code{FALSE}. Note that both CPC and adjusted CPC are automatically
#' calculated and returned if \code{model = TRUE}.
#' @param cols columns of \code{data} to be used in CPC calculation. Only used if
#' \code{type = "manual"}.
#' @param clusters column of \code{data} indicating cluster membership for each
#' observation. Only used if \code{type = "manual"}.
#' @param ... arguments passed to other functions.
#'
#' @return If \code{model = TRUE}, \code{CPC()} returns a list with components
#' containing output from the specified clustering function, all sums of squares, the
#' CPC, the adjusted CPC, and associated standard errors. If \code{model = FALSE}, \code{CPC()} returns
#' a numeric vector of length 1 giving the CPC (if \code{adjust = FALSE}) or adjusted CPC (if
#' \code{adjust = TRUE}).
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#' clusters <- matrix(c(rep(1, 25), rep(2, 25)), ncol = 1)
#' data <- cbind(data, clusters)
#'
#' CPC(data[,c(1:2)], "kmeans", k = 2)
#' CPC(data, "manual", cols = 1:2, clusters = 3)
#'
#' @import stats
#' @importFrom cluster pam diana
#' @importFrom dbscan dbscan
#' @export
CPC <- function(data, type, k = NULL, epsilon = NULL, model = FALSE, adjust = FALSE,
cols = NULL, clusters = NULL, ...) {
data <- as.matrix(data)
input <- data[colSums(!is.na(data)) > 0]
input <- matrix(na.omit(input), ncol = ncol(data))
cluster <- NULL
k <- ifelse(!type %in% c("dbscan", "manual"), k, 0)
if(length(unique(input)) < k){
warning("More clusters than unique data points; NAs generated")
return(NA)
}
else{
switch (type,
dbscan = {
output_dbscan <- dbscan(x = input, eps = epsilon, ...)
new_dbscan <- cbind(input, unlist(output_dbscan$cluster))
new_dbscan <- subset(new_dbscan, new_dbscan[,ncol(new_dbscan)] != 0)
new_dbscan <- CPCdata.frame(data = new_dbscan,
cols = -ncol(new_dbscan),
clusters = ncol(new_dbscan))
data_dbscan <- as.matrix(new_dbscan[,-ncol(new_dbscan)])
data_dbscan <- apply(data_dbscan, 2, as.numeric)
WSS_dbscan <- c()
for (i in unique(new_dbscan$cluster)) {
data_temp <- new_dbscan[new_dbscan$cluster == i,]
data_temp <- as.matrix(data_temp[,-ncol(new_dbscan)])
data_temp <- apply(data_temp, 2, as.numeric)
WSS <- SS(as.matrix(data_temp))
WSS_dbscan <- c(WSS_dbscan, WSS)
}
TSS_dbscan <- SS(as.matrix(data_dbscan))
TWSS_dbscan <- sum(WSS_dbscan)
BSS_dbscan <- TSS_dbscan - TWSS_dbscan
n_i <- nrow(as.matrix(data_dbscan))
n_j <- ncol(as.matrix(data_dbscan))
n_k <- length(unique(new_dbscan$cluster))
CPC <- BSS_dbscan/TSS_dbscan
CPC_sd <- sqrt((2*(n_j*n_k - n_j)*(n_i - n_j*n_k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_dbscan/TSS_dbscan)*((n_i - n_j)/(n_i - n_j*n_k))
CPC.adj_sd <- sqrt((2*(n_j*n_k - n_j))/((n_i - n_j*n_k)*(n_i - n_j + 1)))
if(model){
list(cluster = output_dbscan$cluster,
minPts = output_dbscan$minPts,
data = input,
WSS = WSS_dbscan,
TWSS = TWSS_dbscan,
BSS = BSS_dbscan,
TSS = TSS_dbscan,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
hclust = {
input <- apply(input, 2, as.numeric)
input_dist <- dist(input)
output_hclust <- hclust(input_dist, ...)
cut_hclust <- as.data.frame(cutree(output_hclust, k = k))
colnames(cut_hclust) <- "cluster"
new_hclust <- cbind(input, cut_hclust)
WSS_hclust <- c()
for (i in 1:k) {
WSS <- SS(new_hclust[new_hclust$cluster == i,])
WSS_hclust <- c(WSS_hclust, WSS)
}
TSS_hclust <- SS(input)
TWSS_hclust <- sum(WSS_hclust)
BSS_hclust <- TSS_hclust - TWSS_hclust
n_i <- nrow(input)
n_j <- ncol(input)
CPC <- BSS_hclust/TSS_hclust
CPC_sd <- sqrt((2*(n_j*k - n_j)*(n_i - n_j*k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_hclust/TSS_hclust)*((n_i - n_j)/(n_i - n_j*k))
CPC.adj_sd <- sqrt((2*(n_j*k - n_j))/((n_i - n_j*k)*(n_i - n_j + 1)))
if(model){
list(merge = output_hclust$merge,
height = output_hclust$height,
order = output_hclust$order,
labels = output_hclust$labels,
method = output_hclust$method,
call = output_hclust$call,
dist.method = output_hclust$dist.method,
data = new_hclust,
WSS = WSS_hclust,
TWSS = TWSS_hclust,
BSS = BSS_hclust,
TSS = TSS_hclust,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
diana = {
input <- apply(input, 2, as.numeric)
input_dist <- dist(input)
output_diana <- diana(input_dist, ...)
cut_diana <- as.data.frame(cutree(output_diana, k = k))
colnames(cut_diana) <- "cluster"
new_diana <- cbind(input, cut_diana)
WSS_diana <- c()
for (i in 1:k) {
WSS <- SS(new_diana[new_diana$cluster == i,])
WSS_diana <- c(WSS_diana, WSS)
}
TSS_diana <- SS(input)
TWSS_diana <- sum(WSS_diana)
BSS_diana <- TSS_diana - TWSS_diana
n_i <- nrow(input)
n_j <- ncol(input)
CPC <- BSS_diana/TSS_diana
CPC_sd <- sqrt((2*(n_j*k - n_j)*(n_i - n_j*k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_diana/TSS_diana)*((n_i - n_j)/(n_i - n_j*k))
CPC.adj_sd <- sqrt((2*(n_j*k - n_j))/((n_i - n_j*k)*(n_i - n_j + 1)))
if(model){
list(order = output_diana$order,
height = output_diana$height,
dc = output_diana$merge,
diss = output_diana$diss,
call = output_diana$call,
data = new_diana,
WSS = WSS_diana,
TWSS = TWSS_diana,
BSS = BSS_diana,
TSS = TSS_diana,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
kmeans = {
input <- apply(input, 2, as.numeric)
output_kmeans <- kmeans(x = input, centers = k, ...)
cluster_kmeans <- as.data.frame(output_kmeans$cluster)
colnames(cluster_kmeans) <- "cluster"
new_kmeans <- cbind(input, cluster_kmeans)
n_i <- nrow(input)
n_j <- ncol(input)
CPC <- output_kmeans$betweenss/output_kmeans$totss
CPC_sd <- sqrt((2*(n_j*k - n_j)*(n_i - n_j*k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 -
(output_kmeans$tot.withinss/output_kmeans$totss)*((n_i - n_j)/(n_i - n_j*k))
CPC.adj_sd <- sqrt((2*(n_j*k - n_j))/((n_i - n_j*k)*(n_i - n_j + 1)))
if(model){
list(centers = output_kmeans$centers,
size = output_kmeans$size,
iter = output_kmeans$iter,
ifault = output_kmeans$ifault,
data = new_kmeans,
WSS = output_kmeans$withinss,
TWSS = output_kmeans$tot.withinss,
BSS = output_kmeans$betweenss,
TSS = output_kmeans$totss,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
pam = {
input <- apply(input, 2, as.numeric)
output_pam <- pam(x = input, k = k, ...)
cluster_pam <- as.data.frame(output_pam$clustering)
colnames(cluster_pam) <- "cluster"
new_pam <- cbind(input, cluster_pam)
WSS_pam <- c()
for (i in 1:k) {
WSS <- SS(new_pam[new_pam$cluster == i,])
WSS_pam <- c(WSS_pam, WSS)
}
TSS_pam <- SS(input)
TWSS_pam <- sum(WSS_pam)
BSS_pam <- TSS_pam - TWSS_pam
n_i <- nrow(input)
n_j <- ncol(input)
CPC <- BSS_pam/TSS_pam
CPC_sd <- sqrt((2*(n_j*k - n_j)*(n_i - n_j*k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_pam/TSS_pam)*((n_i - n_j)/(n_i - n_j*k))
CPC.adj_sd <- sqrt((2*(n_j*k - n_j))/((n_i - n_j*k)*(n_i - n_j + 1)))
if(model){
list(medoids = output_pam$medoids,
id.med = output_pam$id.med,
objective = output_pam$objective,
isolation = output_pam$isolation,
clusinfo = output_pam$clusinfo,
silinfo = output_pam$silinfo,
diss = output_pam$diss,
call = output_pam$call,
data = new_pam,
WSS = WSS_hclust,
TWSS = TWSS_hclust,
BSS = BSS_hclust,
TSS = TSS_hclust,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
hclust = {
input <- apply(input, 2, as.numeric)
input_dist <- dist(input)
output_hclust <- hclust(input_dist, ...)
cut_hclust <- as.data.frame(cutree(output_hclust, k = k))
colnames(cut_hclust) <- "cluster"
new_hclust <- cbind(input, cut_hclust)
WSS_hclust <- c()
for (i in 1:k) {
WSS <- SS(new_hclust[new_hclust$cluster == i,])
WSS_hclust <- c(WSS_hclust, WSS)
}
TSS_hclust <- SS(input)
TWSS_hclust <- sum(WSS_hclust)
BSS_hclust <- TSS_hclust - TWSS_hclust
n_i <- nrow(input)
n_j <- ncol(input)
CPC <- BSS_hclust/TSS_hclust
CPC_sd <- sqrt((2*(n_j*k - n_j)*(n_i - n_j*k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_hclust/TSS_hclust)*((n_i - n_j)/(n_i - n_j*k))
CPC.adj_sd <- sqrt((2*(n_j*k - n_j))/((n_i - n_j*k)*(n_i - n_j + 1)))
if(model){
list(merge = output_hclust$merge,
height = output_hclust$height,
order = output_hclust$order,
labels = output_hclust$labels,
method = output_hclust$method,
call = output_hclust$call,
dist.method = output_hclust$dist.method,
data = new_hclust,
WSS = WSS_hclust,
TWSS = TWSS_hclust,
BSS = BSS_hclust,
TSS = TSS_hclust,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
},
manual = {
input <- CPCdata.frame(data = data, cols = cols, clusters = clusters)
data_manual <- as.matrix(input[, -ncol(input)])
data_manual <- apply(data_manual, 2, as.numeric)
WSS_manual <- c()
for (i in unique(input$cluster)) {
data_temp <- input[input$cluster == i,]
data_temp <- as.matrix(data_temp[, -ncol(input)])
data_temp <- apply(data_temp, 2, as.numeric)
WSS <- SS(as.matrix(data_temp))
WSS_manual <- c(WSS_manual, WSS)
}
TSS_manual <- SS(as.matrix(data_manual))
TWSS_manual <- sum(WSS_manual)
BSS_manual <- TSS_manual - TWSS_manual
n_i <- nrow(input)
n_j <- ncol(input)
n_k <- length(unique(input$cluster))
CPC <- BSS_manual/TSS_manual
CPC_sd <- sqrt((2*(n_j*n_k - n_j)*(n_i - n_j*n_k))/(((n_i - n_j)^2)*(n_i - n_j + 1)))
CPC.adj <- 1 - (TWSS_manual/TSS_manual)*((n_i - n_j)/(n_i - n_j*n_k))
CPC.adj_sd <- sqrt((2*(n_j*n_k - n_j))/((n_i - n_j*n_k)*(n_i - n_j + 1)))
if(model){
list(data = input,
WSS = WSS_manual,
TWSS = TWSS_manual,
BSS = BSS_manual,
TSS = TSS_manual,
CPC = CPC,
CPC_sd = CPC_sd,
CPC.adj = CPC.adj,
CPC.adj_sd = CPC.adj_sd)
}
else{
if(adjust){
CPC.adj
}
else{
CPC
}
}
}
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/CPC.R
|
#' Data Manipulation for CPC Calculation
#'
#' Converts numeric matrix to data frame with necessary format for
#' \code{"manual"} \code{\link{CPC}()} calculation.
#'
#' @param data a numeric \code{n x k} matrix or data frame.
#' @param cols columns in \code{data} to be used for calculating \code{\link{CPC}()}.
#' @param clusters column in \code{data} giving cluster membership.
#'
#' @return Returns a data frame with dimensions identical to those of \code{data}.
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#' clusters <- matrix(c(rep(1, 25), rep(2, 25)), ncol = 1)
#' data <- cbind(data, clusters)
#' CPCdata.frame(data, 1:2, 3)
#'
#' @export
CPCdata.frame <- function(data, cols, clusters) {
new_data <- as.data.frame(data[,cols])
new_clusters <- as.data.frame(data[,clusters])
colnames(new_clusters) <- "cluster"
na.omit(cbind(new_data, new_clusters))
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/CPCdata.frame.R
|
#' Euclidean Distance from Dimension Means
#'
#' Calculates two-dimensional Euclidean distance between all points and dimension means.
#'
#' @param data an \code{n x 2} matrix or data frame.
#'
#' @return Returns a numeric vector of length 1.
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#'
#' Euclidean(data)
#'
#' @import stats
#'
#' @export
Euclidean <- function(data) {
data <- as.data.frame(na.omit(data))
colnames(data) <- c("x", "y")
data$x_mean <- mean(data$x)
data$y_mean <- mean(data$y)
data$distance <- sqrt((data$x - data$x_mean)^2 + (data$y - data$y_mean)^2)
out <- sum(data$distance)/nrow(data)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/Euclidean.R
|
#' Sum-of-Squares Calculation
#'
#' Calculates sums of squares for uni- or multi-dimensional numeric data using the
#' distance matrix.
#'
#' @param data a numeric vector or \code{n x k} matrix or data frame.
#' @param ... arguments passed to \code{\link{dist}()}.
#'
#' @return Returns a numeric vector of length 1.
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#' SS(data)
#'
#' @import stats
#' @importFrom Rfast Dist
#'
#' @export
SS <- function(data, ...) {
sum(as.matrix(Dist(data)^2))/(2*nrow(data))
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/SS.R
|
#' Test for Bivariate Correlation
#'
#' Calculates correlation coefficient between two variables and returns a list containing the
#' correlation estimate, its standard error, the p-value of a null-hypothesis significance test, and the
#' number of observations used.
#'
#' @details
#' Additional arguments to alter the type of null hypothesis significance test, the method used to
#' calculate the correlation coefficient, the confidence level, or other options should be passed to
#' \code{correlate}() and will be inherited by \code{\link{cor.test}()}. Note that unlike
#' \code{\link{cor.test}()}, both arguments \code{x} and \code{y} are required.
#'
#' @param x a numeric vector.
#' @param y a numeric vector.
#' @param ... arguments passed to \code{\link{cor.test}()}.
#'
#' @return Returns a list with elements containing the correlation coefficient estimate, its associated
#' standard error, the p-value of a null-hypothesis significance test, and the number of observations
#' used, all as numeric vectors of length 1.
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#'
#' correlate(data[, 1], data[, 2])
#'
#' @import stats
#'
#' @export
correlate <- function(x, y, ...) {
cor_result <- cor.test(x, y, ...)
cor_se <- sqrt((1 - cor_result$estimate[["cor"]]^2)/(cor_result$parameter[["df"]]))
out <- list("estimate" = cor_result$estimate[["cor"]],
"se" = cor_se,
"p-value" = cor_result$p.value,
"observations" = cor_result$parameter[["df"]] + 2)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/correlate.R
|
#' Multidimensional Difference-in-Means
#'
#' Calculates average Euclidean distance between means in arbitrary dimensions.
#'
#' @param data a numeric vector or \code{n x k} matrix or data frame containing a vector
#' identifying cluster membership for each observation, to be passed to
#' \code{clusters} argument.
#' @param cols columns of \code{data} to be used in difference-in-means calculation.
#' @param clusters column of \code{data} indicating cluster membership for each
#' observation.
#'
#' @return Returns a numeric vector of length 1.
#'
#' @examples
#' data <- matrix(c(rnorm(50, 0, 1), rnorm(50, 5, 1)), ncol = 2, byrow = TRUE)
#' clusters <- matrix(c(rep(1, 25), rep(2, 25)), ncol = 1)
#' data <- cbind(data, clusters)
#'
#' diff_multidim(data, 1:2, 3)
#'
#' @import stats
#'
#' @export
diff_multidim <- function(data, cols, clusters) {
input <- CPCdata.frame(data, cols, clusters)
means <- c()
for (i in unique(input$cluster)) {
name <- paste0("cluster_", i)
assign(name, apply(as.data.frame(input[input$cluster == i, -ncol(input)]), 2, as.numeric))
obs <- nrow(input[input$cluster == i,])
if (obs > 1) {
means <- c(means, colMeans(eval(parse(text = name))))
}
else {
means <- c(means, colMeans(t(as.matrix(t(as.matrix(eval(parse(text = name))))))))
}
}
diff <- mean(as.numeric(dist(matrix(means, ncol = length(unique(cols)), byrow = TRUE))))
return(diff)
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/diff_multidim.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("\nTo cite in publications and working papers, please use:\n")
packageStartupMessage(" Mehlhaff, Isaac D. A Group-Based Approach to Measuring Polarization. American Political Science Review (forthcoming).\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CPC/R/zzz.R
|
#Qianxing Mo, [email protected]
#Department of Epidemiology and Biostatistics
#Memorial Sloan-Kettering Cancer Center, NY 10021
#The input for phcpe fuction must be a 'coxph' or 'cph' object
#Note, the default setting for model.matrix has been changed, now it doesn't
#need to exclude the first column of the return matrix.
# old: design = model.matrix(coxfit)[,-1]; updated: design = model.matrix(coxfit)
phcpe <- function(coxfit, CPE.SE=FALSE,out.ties=FALSE) {
if(class(coxfit)[1] != "coxph" && class(coxfit)[1] != "cph"){
stop("Error! Input must be a coxph or cph object")
}
row <- as.integer(sum(coxfit$n))
col <- as.integer(length(coxfit$coefficients))
design <- model.matrix(coxfit)
design <- as.double(as.vector(t(design)))
xbeta <- as.double(as.vector(coxfit$linear.predictors))
varbeta <- as.double(as.vector(t(coxfit$var)))
bandwidth <- as.double(0.5*sd(coxfit$linear.predictors)*(row^(-1/3)))
if(CPE.SE==TRUE){
if(row >= 3000) {
message("It may take about n*n minutes to calculate 10000*n rows of data.")
}
if(out.ties == FALSE){
res <- .C("coxcpe",row,col,bandwidth,xbeta,design,varbeta,out=as.double(rep(0, 3)),PACKAGE="CPE")
}else{
res <- .C("cpeNoTies",row,col,bandwidth,xbeta,design,varbeta,out=as.double(rep(0, 3)),PACKAGE="CPE")
}
return(list(CPE = res$out[1], CPE.SE = res$out[3]))
}else {
if(out.ties == FALSE){
res <- .C("coxcpeOnly",row,xbeta,out=as.double(0), PACKAGE="CPE")
}else {
res <- .C("cpeOnlyNoTies",row,xbeta,out=as.double(0), PACKAGE="CPE")
}
return(list(CPE=res$out))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPE/R/phcpe.R
|
#Qianxing Mo, [email protected]
#Department of Epidemiology and Biostatistics
#Memorial Sloan-Kettering Cancer Center, NY 10021
#The input for phcpe fuction must be a 'coxph' or 'cph' object
phcpe2 <- function(coef,coef.var,design,CPE.SE=FALSE,out.ties=FALSE){
covar = as.matrix(coef.var)
design = as.matrix(design)
row <- as.integer(nrow(design))
col <- as.integer(ncol(design))
if(dim(covar)[1] != dim(covar)[2] || dim(covar)[1] != length(coef) || length(coef) != col){
message("Error: the dimensions of coef, coef.var, or design do not match!\n")
stop("length(coef) == ncol(design) == dim(coef.var)[1] == dim(coef.var)[2]\n")
}
xbeta <- as.double(as.vector(design%*%coef))
design <- as.double(as.vector(t(design)))
varbeta <- as.double(as.vector(t(covar)))
bandwidth <- as.double(0.5*sd(xbeta)*(row^(-1/3)))
if(CPE.SE==TRUE){
if(row >= 3000) {
message("It may take about n*n minutes to calculate 10000*n rows of data.")
}
if(out.ties == FALSE){
res <- .C("coxcpe",row,col,bandwidth,xbeta,design,varbeta,out=as.double(rep(0, 3)),PACKAGE="CPE")
}else{
res <- .C("cpeNoTies",row,col,bandwidth,xbeta,design,varbeta,out=as.double(rep(0, 3)),PACKAGE="CPE")
}
return(list(CPE = res$out[1], CPE.SE = res$out[3]))
}else {
if(out.ties == FALSE){
res <- .C("coxcpeOnly",row,xbeta,out=as.double(0), PACKAGE="CPE")
}else {
res <- .C("cpeOnlyNoTies",row,xbeta,out=as.double(0), PACKAGE="CPE")
}
return(list(CPE=res$out))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPE/R/phcpe2.R
|
### create a simple data set for testing
set.seed(199)
nn <- 1000
time <- rexp(nn)
status <- sample(0:1, nn, replace=TRUE)
covar <- matrix(rnorm(3*nn), ncol=3)
survd <- data.frame(time, status, covar)
names(survd) <- c("time","status","x1","x2","x3")
coxph.fit <- coxph(Surv(time,status)~x1+x2+x3,data=survd)
### Calculate CPE and CPE.SE
phcpe(coxph.fit)
### Calculate CPE only (needs much less time)
phcpe(coxph.fit, CPE.SE=FALSE)
#*** For unknown reason, 'coxph.fit' may need to be removed before running cph()***
rm(coxph.fit)
cph.fit <- cph(Surv(time, status)~x1+x2+x3, data=survd)
### Calculate CPE and CPE.SE
phcpe(cph.fit)
### Calculate CPE only (needs much less time)
phcpe(cph.fit, CPE.SE=FALSE)
|
/scratch/gouwar.j/cran-all/cranData/CPE/demo/phcpe.R
|
#'
#' @title Competing Proximal Gradients Library for Ensembles of Generalized Linear Models
#'
#' @description \code{cpg} computes the coefficients for ensembles of generalized linear models via competing proximal gradients.
#'
#' @param x Design matrix.
#' @param y Response vector.
#' @param glm_type Description of the error distribution and link function to be used for the model. Must be one of "Linear" or
#' "Logistic". Default is "Linear".
#' @param G Number of groups in the ensemble.
#' @param include_intercept Argument to determine whether there is an intercept. Default is TRUE.
#' @param alpha_s Sparsity mixing parmeter. Default is 3/4.
#' @param alpha_d Diversity mixing parameter. Default is 1.
#' @param lambda_sparsity Sparsity tuning parameter value.
#' @param lambda_diversity Diversity tuning parameter value.
#' @param tolerance Convergence criteria for the coefficients. Default is 1e-8.
#' @param max_iter Maximum number of iterations in the algorithm. Default is 1e5.
#'
#' @return An object of class \code{cpg}
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{coef.CPGLIB}}, \code{\link{predict.CPGLIB}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CPGLIB - Multiple Groups
#' cpg.out <- cpg(x.train, y.train,
#' glm_type = "Logistic",
#' G = 5, include_intercept = TRUE,
#' alpha_s = 3/4, alpha_d = 1,
#' lambda_sparsity = 0.01, lambda_diversity = 1,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' cpg.prob <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' cpg.class <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' plot(prob.test, cpg.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-cpg.prob)^2)
#' mean(abs(y.test-cpg.class))
#'
#' }
#'
cpg <- function(x, y,
glm_type = c("Linear", "Logistic")[1],
G = 5,
include_intercept = TRUE,
alpha_s = 3/4, alpha_d = 1,
lambda_sparsity, lambda_diversity,
tolerance = 1e-8, max_iter = 1e5){
# Check response data
y <- Check_Response_CPGLIB(y, glm_type)
# Check data
Check_Data_ProxGrad(x, y,
glm_type,
alpha_s,
lambda_sparsity,
tolerance, max_iter)
# Shuffling the data
n <- nrow(x)
random.permutation <- sample(1:n, n)
x.permutation <- x[random.permutation, ]
y.permutation <- y[random.permutation]
# Setting the model type
type.cpp <- switch(glm_type,
"Linear" = 1,
"Logistic" = 2)
# Setting to include intercept parameter for CPP computation
include_intercept.cpp <- sum(include_intercept)
# Source code computation
cpg.out <- CPGLIB_Main(x.permutation, y.permutation,
type.cpp,
G,
include_intercept.cpp,
alpha_s, alpha_d,
lambda_sparsity, lambda_diversity,
tolerance, max_iter)
# Object construction
cpg.out <- construct.CPGLIB(cpg.out, match.call(), glm_type, G, lambda_sparsity, lambda_diversity)
# Return source code output
return(cpg.out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/CPGLIB.R
|
#'
#' @title Coefficients for CPGLIB Object
#'
#' @description \code{coef.CPGLIB} returns the coefficients for a CPGLIB object.
#'
#' @method coef CPGLIB
#'
#' @param object An object of class CPGLIB.
#' @param groups The groups in the ensemble for the coefficients. Default is all of the groups in the ensemble.
#' @param ensemble_average Option to return the average of the coefficients over all the groups in the ensemble. Default is FALSE.
#' @param ... Additional arguments for compatibility.
#'
#' @return The coefficients for the CPGLIB object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cpg}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CPGLIB - Multiple Groups
#' cpg.out <- cpg(x.train, y.train,
#' glm_type="Logistic",
#' G=5, include_intercept=TRUE,
#' alpha_s=3/4, alpha_d=1,
#' lambda_sparsity=0.01, lambda_diversity=1,
#' tolerance=1e-5, max_iter=1e5)
#'
#' # Coefficients for each group
#' cpg.coef <- coef(cpg.out, ensemble_average = FALSE)
#' }
#'
coef.CPGLIB <- function(object, groups = NULL, ensemble_average = FALSE, ...){
# Check input data
if(!any(class(object) %in% "CPGLIB"))
stop("The object should be of class \"CPGLIB\"")
# Checking groups
if(is.null(groups))
groups <- 1:object$G else if(!is.null(groups) && !all(groups %in% (1:object$G)))
stop("The groups specified are not valid.")
# Return of coefficients
if(!ensemble_average)
return(rbind(t(object$Intercept[groups,]), object$Betas[,groups, drop=FALSE])) else
return(apply(rbind(t(object$Intercept[groups,]), object$Betas[,groups, drop=FALSE]), 1, mean))
}
#'
#' @title Coefficients for cv.CPGLIB Object
#'
#' @method coef cv.CPGLIB
#'
#' @description \code{coef.cv.CPGLIB} returns the coefficients for a cv.CPGLIB object.
#'
#' @param object An object of class cv.CPGLIB.
#' @param groups The groups in the ensemble for the coefficients. Default is all of the groups in the ensemble.
#' @param ensemble_average Option to return the average of the coefficients over all the groups in the ensemble. Default is FALSE.
#' @param ... Additional arguments for compatibility.
#'
#' @return The coefficients for the cv.CPGLIB object. Default is FALSE.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cv.cpg}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#' mean(y.test)
#'
#' # CV CPGLIB - Multiple Groups
#' cpg.out <- cv.cpg(x.train, y.train,
#' glm_type = "Logistic",
#' G = 5, include_intercept = TRUE,
#' alpha_s = 3/4, alpha_d = 1,
#' n_lambda_sparsity = 100, n_lambda_diversity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#' cpg.coef <- coef(cpg.out)
#'
#' # Coefficients for each group
#' cpg.coef <- coef(cpg.out, ensemble_average = FALSE)
#'
#' }
#'
#'
coef.cv.CPGLIB <- function(object, groups = NULL, ensemble_average = FALSE, ...){
# Check input data
if(!any(class(object) %in% "cv.CPGLIB"))
stop("The object should be of class \"cv.CPGLIB\"")
# Checking groups
if(is.null(groups))
groups <- 1:object$G else if(!is.null(groups) && !all(groups %in% (1:object$G)))
stop("The groups specified are not valid.")
# Extracting coefficients
if(length(groups)==1)
extracted.coef <- as.matrix(c(object$Intercept[groups, object$Optimal_Index, drop=TRUE],
object$Betas[, groups, object$Optimal_Index, drop=TRUE]),
ncol=1) else
extracted.coef <- rbind(t(object$Intercept[groups, object$Optimal_Index, drop=TRUE]),
object$Betas[, groups, object$Optimal_Index, drop=TRUE])
# Return of coefficients
if(!ensemble_average)
return(extracted.coef) else
return(apply(extracted.coef, 1, mean))
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/CPGLIB_Coefficient_Functions.R
|
# -----------------------------------------------------------------------
# Object Construction for CPGLIB object
#
# object: the CPGLIB object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.CPGLIB <- function(object, fn_call, glm_type, G, lambda_sparsity, lambda_diversity){
class(object) <- append("CPGLIB", class(object))
object$call <- fn_call
object$glm_type <- glm_type
object$G <- G
object$lambda_sparsity <- lambda_sparsity
object$lambda_diversity <- lambda_diversity
return(object)
}
# -----------------------------------------------------------------------
# Object Construction for cv.CPGLIB object
#
# object: the cv.CPGLIB object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.cv.CPGLIB <- function(object, fn_call, glm_type, G, n_lambda_sparsity, n_lambda_diversity){
class(object) <- append("cv.CPGLIB", class(object))
object$call <- fn_call
object$glm_type <- glm_type
object$G <- G
object$n_lambda_sparsity <- n_lambda_sparsity
object$n_lambda_diversity <- n_lambda_diversity
return(object)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/CPGLIB_Construction_Functions.R
|
#'
#' @title Predictions for CPGLIB Object
#'
#' @description \code{predict.CPGLIB} returns the predictions for a CPGLIB object.
#'
#' @method predict CPGLIB
#'
#' @param object An object of class CPGLIB.
#' @param newx New data for predictions.
#' @param groups The groups in the ensemble for the predictions. Default is all of the groups in the ensemble.
#' @param ensemble_type The type of ensembling function for the models. Options are "Model-Avg", "Coef-Avg" or "Weighted-Prob" for
#' classifications predictions. Default is "Model-Avg".
#' @param class_type The type of predictions for classification. Options are "prob" and "class". Default is "prob".
#' @param ... Additional arguments for compatibility.
#'
#' @return The predictions for the CPGLIB object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cpg}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CPGLIB - Multiple Groups
#' cpg.out <- cpg(x.train, y.train,
#' glm_type = "Logistic",
#' G = 5, include_intercept = TRUE,
#' alpha_s = 3/4, alpha_d = 1,
#' lambda_sparsity = 0.01, lambda_diversity = 1,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' cpg.prob <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' cpg.class <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' plot(prob.test, cpg.prob, pch=20)
#' abline(h=0.5,v=0.5)
#' mean((prob.test-cpg.prob)^2)
#' mean(abs(y.test-cpg.class))
#'
#' }
#'
predict.CPGLIB <- function(object, newx,
groups = NULL,
ensemble_type = c("Model-Avg", "Coef-Avg", "Weighted-Prob", "Majority-Vote")[1],
class_type = c("prob", "class")[1],
...){
# Check input data
if(!any(class(object) %in% "CPGLIB"))
stop("The object should be of class \"CPGLIB\"")
# Checking groups
if(is.null(groups))
groups <- 1:object$G else if(!is.null(groups) && !all(groups %in% (1:object$G)))
stop("The groups specified are not valid.")
# Check ensemble function
if(!any(ensemble_type %in% c("Model-Avg", "Coef-Avg", "Weighted-Prob", "Majority-Vote")))
stop("The argument \"ensemble_type\" must be one of \"Model-Avg\", \"Coef-Avg\", \"Weighted-Prob\" or \"Majority-Vote\".")
# Argument compability
if(object$glm_type!="Logistic" && any(ensemble_type %in% c("Weighted-Prob", "Majority-Vote")))
stop("The \"ensemble_type\" argument is incompatible with the GLM type.") else{
if((ensemble_type %in% c("Weighted-Prob", "Majority-Vote")) && class_type=="prob")
stop("The options \"Weighted-Prob\" or \"Majority-Vote\" must have the argument \"class_type\" set to \"class\".")
}
if(object$glm_type=="Linear"){ # LINEAR MODEL
cpg.coef <- coef(object, groups=groups, ensemble_average=TRUE)
return(cpg.coef[1] + newx %*% cpg.coef[-1])
} else if(object$glm_type=="Logistic"){ # LOGISTIC MODEL
if(!(class_type %in% c("prob", "class")))
stop("The variable \"type\" must be one of: \"prob\", or \"class\".")
if(ensemble_type=="Model-Avg"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
logistic.prob <- apply(logistic.prob, 1, mean)
if(class_type=="prob")
return(logistic.prob) else if(class_type=="class")
return(round(logistic.prob, 0))
} else if(ensemble_type=="Coef-Avg"){
cpg.coef <- coef(object, ensemble_average=TRUE)
logistic.prob <- exp(cpg.coef[1] + newx %*% cpg.coef[-1])/(1+exp(cpg.coef[1] + newx %*% cpg.coef[-1]))
if(class_type=="prob")
return(logistic.prob) else if(class_type=="class")
return(round(logistic.prob, 0))
} else if(ensemble_type=="Weighted-Prob"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
return(as.numeric(apply(logistic.prob, 1, function(x) return(prod(x)>prod(1-x)))))
} else if(ensemble_type=="Majority-Vote"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
return(as.numeric(apply(2*round(logistic.prob, 0), 1, mean)>=1))
}
} else if(object$glm_type=="Gamma"){ # GAMMA MODEL
if(ensemble_type=="Model-Avg"){
cpg.coef <- coef(object)
gamma.predictions <- sapply(groups, function(x, cpg.coef)
exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]),
cpg.coef=cpg.coef)
return(apply(gamma.predictions, 1, mean))
} else if(ensemble_type=="Coef-Avg"){
cpg.coef <- coef(object, groups=groups, ensemble_average=TRUE)
return(exp(cpg.coef[1] + newx %*% cpg.coef[-1]))
}
} else if(object$glm_type=="Poisson"){ # POISSON MODEL
if(ensemble_type=="Model-Avg"){
cpg.coef <- coef(object)
poisson.predictions <- sapply(groups, function(x, cpg.coef)
exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]),
cpg.coef=cpg.coef)
return(apply(poisson.predictions, 1, mean))
} else if(ensemble_type=="Coef-Avg"){
cpg.coef <- coef(object, groups=groups, ensemble_average=TRUE)
return(exp(cpg.coef[1] + newx %*% cpg.coef[-1]))
}
}
}
#'
#' @title Predictions for cv.ProxGrad Object
#'
#' @description \code{predict.cv.CPGLIB} returns the predictions for a ProxGrad object.
#'
#' @method predict cv.CPGLIB
#'
#' @param object An object of class cv.CPGLIB.
#' @param newx New data for predictions.
#' @param groups The groups in the ensemble for the predictions. Default is all of the groups in the ensemble.
#' @param ensemble_type The type of ensembling function for the models. Options are "Model-Avg", "Coef-Avg" or "Weighted-Prob" for
#' classifications predictions. Default is "Model-Avg".
#' @param class_type The type of predictions for classification. Options are "prob" and "class". Default is "prob".
#' @param ... Additional arguments for compatibility.
#'
#' @return The predictions for the cv.CPGLIB object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cv.cpg}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#' mean(y.test)
#'
#' # CV CPGLIB - Multiple Groups
#' cpg.out <- cv.cpg(x.train, y.train,
#' glm_type = "Logistic",
#' G = 5, include_intercept = TRUE,
#' alpha_s = 3/4, alpha_d = 1,
#' n_lambda_sparsity = 100, n_lambda_diversity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' cpg.prob <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' cpg.class <- predict(cpg.out, newx = x.test, type = "class",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' plot(prob.test, cpg.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-cpg.prob)^2)
#' mean(abs(y.test-cpg.class))
#'
#' }
#'
#'
predict.cv.CPGLIB <- function(object, newx,
groups = NULL,
ensemble_type = c("Model-Avg", "Coef-Avg", "Weighted-Prob", "Majority-Vote")[1],
class_type = c("prob", "class")[1],
...){
# Check input data
if(!any(class(object) %in% "cv.CPGLIB"))
stop("The object should be of class \"cv.CPGLIB\"")
# Checking groups
if(is.null(groups))
groups <- 1:object$G else if(!is.null(groups) && !all(groups %in% (1:object$G)))
stop("The groups specified are not valid.")
# Check ensemble function
if(!any(ensemble_type %in% c("Model-Avg", "Coef-Avg", "Weighted-Prob", "Majority-Vote")))
stop("The argument \"ensemble_type\" must be one of \"Model-Avg\", \"Coef-Avg\", \"Weighted-Prob\" or \"Majority-Vote\".")
# Argument compability
if(object$glm_type!="Logistic" && any(ensemble_type %in% c("Weighted-Prob", "Majority-Vote")))
stop("The \"ensemble_type\" argument is incompatible with the GLM type.") else{
if((ensemble_type %in% c("Weighted-Prob", "Majority-Vote")) && class_type=="prob")
stop("The options \"Weighted-Prob\" or \"Majority-Vote\" must have the argument \"class_type\" set to \"class\".")
}
if(object$glm_type=="Linear"){ # LINEAR MODEL
cpg.coef <- coef(object, groups=groups, ensemble_average=TRUE)
return(cpg.coef[1] + newx %*% cpg.coef[-1])
} else if(object$glm_type=="Logistic"){ # LOGISTIC MODEL
if(!(class_type %in% c("prob", "class")))
stop("The variable \"type\" must be one of: \"prob\", or \"class\".")
if(ensemble_type=="Model-Avg"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
logistic.prob <- apply(logistic.prob, 1, mean)
if(class_type=="prob")
return(logistic.prob) else if(class_type=="class")
return(round(logistic.prob, 0))
} else if(ensemble_type=="Coef-Avg"){
cpg.coef <- coef(object, groups=groups, ensemble_average=TRUE)
logistic.prob <- exp(cpg.coef[1] + newx %*% cpg.coef[-1])/(1+exp(cpg.coef[1] + newx %*% cpg.coef[-1]))
if(class_type=="prob")
return(logistic.prob) else if(class_type=="class")
return(round(logistic.prob, 0))
} else if(ensemble_type=="Weighted-Prob"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
return(as.numeric(apply(logistic.prob, 1, function(x) return(prod(x)>prod(1-x)))))
} else if(ensemble_type=="Majority-Vote"){
cpg.coef <- coef(object)
logistic.prob <- sapply(groups, function(cpg.coef, x)
return(exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x])/(1+exp(cpg.coef[1,x] + newx %*% cpg.coef[-1,x]))),
cpg.coef=cpg.coef)
return(as.numeric(apply(2*round(logistic.prob, 0), 1, mean)>=1))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/CPGLIB_Prediction_Functions.R
|
# -----------------------------------------------------------------------
# Checking Input Data for CPGLIB Object
# -----------------------------------------------------------------------
Check_Data_CPGLIB <- function(x, y,
glm_type,
G,
alpha_s, alpha_d,
lambda_sparsity, lambda_diversity,
tolerance, max_iter){
# Check for design matrix and response vector
if(all(!inherits(x, "matrix"), !inherits(x, "data.frame"))) {
stop("x should belong to one of the following classes: matrix, data.frame.")
} else if(all(!inherits(y, "matrix"), !inherits(y, "numeric"), !inherits(y, "integer"))) {
stop("y should belong to one of the following classes: matrix, numeric, integer.")
} else if(any(anyNA(x), any(is.nan(x)), any(is.infinite(x)))) {
stop("x should not have missing, infinite or nan values.")
} else if(any(anyNA(y), any(is.nan(y)), any(is.infinite(y)))) {
stop("y should not have missing, infinite or nan values.")
} else {
if(inherits(y, "matrix")) {
if(ncol(y)>1){
stop("y should be a vector")
}
# Force to vector if input was a matrix
y <- as.numeric(y)
}
len_y <- length(y)
if(len_y != nrow(x)) {
stop("y and x should have the same number of rows.")
}
}
# Check input for GLM type
if(!(glm_type %in% c("Linear", "Logistic")))
stop("The GLM type specified in invalid.")
# Check number of groups
if(!inherits(G, "numeric")) {
stop("G should be numeric.")
} else if(any(!G == floor(G), G < 1)) {
stop("G should be an integer, one or greater.")
}
# Check alpha_s value
if(!inherits(alpha_s, "numeric")) {
stop("alpha_s should be numeric.")
} else if(!all(alpha_s <= 1, alpha_s > 0)) {
stop("alpha_s should be between 0 and 1.")
}
# Check alpha_d value
if(!inherits(alpha_d, "numeric")) {
stop("alpha_d should be numeric.")
} else if(!all(alpha_d <= 1, alpha_d > 0)) {
stop("alpha_d should be between 0 and 1.")
}
# Check input for lambda_sparsity
if(!inherits(lambda_sparsity, "numeric")) {
stop("lambda_sparsity should be numeric.")
} else if(lambda_sparsity < 0) {
stop("lambda_sparsity should be a positive.")
}
# Check input for lambda_diversity
if(!inherits(lambda_diversity, "numeric")) {
stop("lambda_diversity should be numeric.")
} else if(lambda_diversity < 0) {
stop("lambda_diversity should be a positive.")
}
# Check tolerance
if(!inherits(tolerance, "numeric")) {
stop("tolerance should be numeric.")
} else if(!all(tolerance < 1, tolerance > 0)) {
stop("tolerance should be between 0 and 1.")
}
# Check maximum number of iterations
if(!inherits(max_iter, "numeric")) {
stop("max_iter should be numeric.")
} else if(any(!max_iter == floor(max_iter), max_iter <= 0)) {
stop("max_iter should be a positive integer.")
}
}
# -----------------------------------------------------------------------
# Checking Input Data for cv.CPGLIB Object
# -----------------------------------------------------------------------
Check_Data_CV_CPGLIB <- function(x, y,
glm_type,
G,
alpha_s, alpha_d,
n_lambda_sparsity, n_lambda_diversity,
tolerance, max_iter,
n_folds,
n_threads){
# Check for design matrix and response vector
if(all(!inherits(x, "matrix"), !inherits(x, "data.frame"))) {
stop("x should belong to one of the following classes: matrix, data.frame.")
} else if(all(!inherits(y, "matrix"), !inherits(y, "numeric"), !inherits(y, "integer"))) {
stop("y should belong to one of the following classes: matrix, numeric, integer.")
} else if(any(anyNA(x), any(is.nan(x)), any(is.infinite(x)))) {
stop("x should not have missing, infinite or nan values.")
} else if(any(anyNA(y), any(is.nan(y)), any(is.infinite(y)))) {
stop("y should not have missing, infinite or nan values.")
} else {
if(inherits(y, "matrix")) {
if(ncol(y)>1){
stop("y should be a vector")
}
# Force to vector if input was a matrix
y <- as.numeric(y)
}
len_y <- length(y)
if(len_y != nrow(x)) {
stop("y and x should have the same number of rows.")
}
}
# Check tolerance
if(!inherits(tolerance, "numeric")) {
stop("tolerance should be numeric.")
} else if(!all(tolerance < 1, tolerance > 0)) {
stop("tolerance should be between 0 and 1.")
}
# Check alpha_s value
if(!inherits(alpha_s, "numeric")) {
stop("alpha_s should be numeric.")
} else if(!all(alpha_s <= 1, alpha_s > 0)) {
stop("alpha_s should be between 0 and 1.")
}
# Check alpha_d value
if(!inherits(alpha_d, "numeric")) {
stop("alpha_d should be numeric.")
} else if(!all(alpha_d <= 1, alpha_d > 0)) {
stop("alpha_d should be between 0 and 1.")
}
# Check maximum number of iterations
if(!inherits(max_iter, "numeric")) {
stop("max_iter should be numeric.")
} else if(any(!max_iter == floor(max_iter), max_iter <= 0)) {
stop("max_iter should be a positive integer.")
}
# Check input for number of candidates for sparsity value
if(!inherits(n_lambda_sparsity, "numeric")) {
stop("n_lambda_sparsity should be numeric")
} else if(any(!n_lambda_sparsity == floor(n_lambda_sparsity), n_lambda_sparsity <= 0)) {
stop("n_lambda_sparsity should be a positive integer")
}
# Check input for number of candidates for diversity value
if(!inherits(n_lambda_diversity, "numeric")) {
stop("n_lambda_diversity should be numeric")
} else if(any(!n_lambda_diversity == floor(n_lambda_diversity), n_lambda_diversity <= 0)) {
stop("n_lambda_diversity should be a positive integer")
}
# Check input for number of folds
if(!inherits(n_folds, "numeric")) {
stop("n_folds should be numeric")
} else if(any(!n_folds == floor(n_folds), n_folds <= 0)) {
stop("n_folds should be a positive integer")
}
# Check input for number of threads
if(!inherits(n_threads, "numeric")) {
stop("n_threads should be numeric")
} else if(any(!n_threads == floor(n_threads), n_threads <= 0)) {
stop("n_threads should be a positive integer")
}
}
# -----------------------------------------------------------------------
# Checking Response Data for CPGLIB and cv.CPGLIB Object
# -----------------------------------------------------------------------
# Modifying response input data
Check_Response_CPGLIB <- function(y, glm_type){
if(glm_type=="Logistic"){
if(length(unique(y))!=2)
stop("The response vector \"y\" must contain at most 2 classes if \"glm_type\" is \"Logistic\".") else{
if(!all(y %in% c(0,1)))
return(ifelse(y==y[1], 1, 0)) else
return(y)
}
} else if(glm_type=="Linear")
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/Check_Data_CPGLIB.R
|
# -----------------------------------------------------------------------
# Checking Input Data for ProxGrad Object
# -----------------------------------------------------------------------
Check_Data_ProxGrad <- function(x, y,
glm_type,
alpha_s,
lambda_sparsity,
tolerance, max_iter){
# Check for design matrix and response vector
if(all(!inherits(x, "matrix"), !inherits(x, "data.frame"))) {
stop("x should belong to one of the following classes: matrix, data.frame.")
} else if(all(!inherits(y, "matrix"), !inherits(y, "numeric"), !inherits(y, "integer"))) {
stop("y should belong to one of the following classes: matrix, numeric, integer.")
} else if(any(anyNA(x), any(is.nan(x)), any(is.infinite(x)))) {
stop("x should not have missing, infinite or nan values.")
} else if(any(anyNA(y), any(is.nan(y)), any(is.infinite(y)))) {
stop("y should not have missing, infinite or nan values.")
} else {
if(inherits(y, "matrix")) {
if(ncol(y)>1){
stop("y should be a vector")
}
# Force to vector if input was a matrix
y <- as.numeric(y)
}
len_y <- length(y)
if(len_y != nrow(x)) {
stop("y and x should have the same number of rows.")
}
}
# Check input for GLM type
if(!(glm_type %in% c("Linear", "Logistic")))
stop("The GLM type specified in invalid.")
# Check alpha_s value
if(!inherits(alpha_s, "numeric")) {
stop("alpha_s should be numeric.")
} else if(!all(alpha_s <= 1, alpha_s > 0)) {
stop("alpha_s should be between 0 and 1.")
}
# Check input for lambda_sparsity
if(!inherits(lambda_sparsity, "numeric")) {
stop("lambda_sparsity should be numeric.")
} else if(lambda_sparsity < 0) {
stop("lambda_sparsity should be a positive.")
}
# Check tolerance
if(!inherits(tolerance, "numeric")) {
stop("tolerance should be numeric.")
} else if(!all(tolerance < 1, tolerance > 0)) {
stop("tolerance should be between 0 and 1.")
}
# Check maximum number of iterations
if(!inherits(max_iter, "numeric")) {
stop("max_iter should be numeric.")
} else if(any(!max_iter == floor(max_iter), max_iter <= 0)) {
stop("max_iter should be a positive integer.")
}
}
# -----------------------------------------------------------------------
# Checking Input Data for cv.ProxGrad Object
# -----------------------------------------------------------------------
Check_Data_CV_ProxGrad <- function(x, y,
glm_type,
alpha_s,
n_lambda_sparsity,
tolerance, max_iter,
n_folds,
n_threads){
# Check for design matrix and response vector
if(all(!inherits(x, "matrix"), !inherits(x, "data.frame"))) {
stop("x should belong to one of the following classes: matrix, data.frame.")
} else if(all(!inherits(y, "matrix"), !inherits(y, "numeric"), !inherits(y, "integer"))) {
stop("y should belong to one of the following classes: matrix, numeric, integer.")
} else if(any(anyNA(x), any(is.nan(x)), any(is.infinite(x)))) {
stop("x should not have missing, infinite or nan values.")
} else if(any(anyNA(y), any(is.nan(y)), any(is.infinite(y)))) {
stop("y should not have missing, infinite or nan values.")
} else {
if(inherits(y, "matrix")) {
if(ncol(y)>1){
stop("y should be a vector")
}
# Force to vector if input was a matrix
y <- as.numeric(y)
}
len_y <- length(y)
if(len_y != nrow(x)) {
stop("y and x should have the same number of rows.")
}
}
# Check input for GLM type
if(!(glm_type %in% c("Linear", "Logistic")))
stop("The GLM type specified in invalid.")
# Check alpha_s value
if(!inherits(alpha_s, "numeric")) {
stop("alpha_s should be numeric.")
} else if(!all(alpha_s <= 1, alpha_s > 0)) {
stop("alpha_s should be between 0 and 1.")
}
# Check input for number of candidates for sparsity value
if(!inherits(n_lambda_sparsity, "numeric")) {
stop("n_lambda_sparsity should be numeric")
} else if(any(!n_lambda_sparsity == floor(n_lambda_sparsity), n_lambda_sparsity <= 0)) {
stop("n_lambda_sparsity should be a positive integer")
}
# Check tolerance
if(!inherits(tolerance, "numeric")) {
stop("tolerance should be numeric.")
} else if(!all(tolerance < 1, tolerance > 0)) {
stop("tolerance should be between 0 and 1.")
}
# Check maximum number of iterations
if(!inherits(max_iter, "numeric")) {
stop("max_iter should be numeric.")
} else if(any(!max_iter == floor(max_iter), max_iter <= 0)) {
stop("max_iter should be a positive integer.")
}
# Check input for number of folds
if(!inherits(n_folds, "numeric")) {
stop("n_folds should be numeric")
} else if(any(!n_folds == floor(n_folds), n_folds <= 0)) {
stop("n_folds should be a positive integer")
}
# Check input for number of threads
if(!inherits(n_threads, "numeric")) {
stop("n_threads should be numeric")
} else if(any(!n_threads == floor(n_threads), n_threads <= 0)) {
stop("n_threads should be a positive integer")
}
}
# -----------------------------------------------------------------------
# Checking Response Data for ProxGrad and cv.ProxGrad Object
# -----------------------------------------------------------------------
# Modifying response input data
Check_Response <- function(y, glm_type){
if(glm_type=="Logistic"){
if(length(unique(y))!=2)
stop("The response vector \"y\" must contain at most 2 classes if \"glm_type\" is \"Logistic\".") else{
if(!all(y %in% c(0,1)))
return(ifelse(y==y[1], 1, 0)) else
return(y)
}
} else if(glm_type=="Linear")
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/Check_Data_ProxGrad.R
|
# -----------------------------------------------------------------------
# Object Construction for ProxGrad object
#
# object: the ProxGrad object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.ProxGrad <- function(object, fn_call, glm_type, lambda_sparsity){
class(object) <- append("ProxGrad", class(object))
object$call <- fn_call
object$coef <- c(object$Intercept, object$Betas)
object$lambda_sparsity <- lambda_sparsity
return(object)
}
# -----------------------------------------------------------------------
# Object Construction for cv.ProxGrad object
#
# object: the cv.ProxGrad object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.cv.ProxGrad <- function(object, fn_call, glm_type){
class(object) <- append("cv.ProxGrad", class(object))
object$call <- fn_call
object$glm_type <- glm_type
object$coef <- c(object$Intercept[object$Optimal_Index], object$Betas[,object$Optimal_Index])
return(object)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/Construction_Functions.R
|
#'
#' @useDynLib CPGLIB
#' @importFrom Rcpp sourceCpp
#'
#'
#' @importFrom stats coef predict
#'
#' @title Generalized Linear Models via Proximal Gradients
#'
#' @description \code{ProxGrad} computes the coefficients for generalized linear models using proximal gradients.
#'
#' @param x Design matrix.
#' @param y Response vector.
#' @param glm_type Description of the error distribution and link function to be used for the model. Must be one of "Linear" or
#' "Logistic" . Default is "Linear".
#' @param include_intercept Argument to determine whether there is an intercept. Default is TRUE.
#' @param alpha_s Elastic net mixing parmeter. Default is 3/4.
#' @param lambda_sparsity Sparsity tuning parameter value.
#' @param tolerance Convergence criteria for the coefficients. Default is 1e-8.
#' @param max_iter Maximum number of iterations in the algorithm. Default is 1e5.
#'
#' @return An object of class ProxGrad.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{coef.ProxGrad}}, \code{\link{predict.ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # ProxGrad - Single Group
#' proxgrad.out <- ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' lambda_sparsity = 0.01,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' proxgrad.prob <- predict(proxgrad.out, newx = x.test, type = "prob")
#' proxgrad.class <- predict(proxgrad.out, newx = x.test, type = "class")
#' plot(prob.test, proxgrad.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-proxgrad.prob)^2)
#' mean(abs(y.test-proxgrad.class))
#'
#' }
#'
ProxGrad <- function(x, y,
glm_type = c("Linear", "Logistic")[1],
include_intercept=TRUE,
alpha_s = 3/4,
lambda_sparsity,
tolerance = 1e-8, max_iter = 1e5){
# Check response data
y <- Check_Response(y, glm_type)
# Check data
Check_Data_ProxGrad(x, y,
glm_type,
alpha_s,
lambda_sparsity,
tolerance, max_iter)
# Shuffling the data
n <- nrow(x)
random.permutation <- sample(1:n, n)
x.permutation <- x[random.permutation, ]
y.permutation <- y[random.permutation]
# Setting the model type
type.cpp <- switch(glm_type,
"Linear" = 1,
"Logistic" = 2)
# Setting to include intercept parameter for CPP computation
include_intercept.cpp <- sum(include_intercept)
# Source code computation
ProxGrad.out <- ProxGrad_Main(x.permutation, y.permutation,
type.cpp,
include_intercept.cpp,
alpha_s,
lambda_sparsity,
tolerance, max_iter)
# # Object construction
ProxGrad.out <- construct.ProxGrad(ProxGrad.out, match.call(), glm_type, lambda_sparsity)
# Return source code output
return(ProxGrad.out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/ProxGrad.R
|
#'
#' @title Coefficients for ProxGrad Object
#'
#' @description \code{coef.ProxGrad} returns the coefficients for a ProxGrad object.
#'
#' @method coef ProxGrad
#'
#' @param object An object of class ProxGrad.
#' @param ... Additional arguments for compatibility.
#'
#' @return The coefficients for the ProxGrad object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # ProxGrad - Single Group
#' proxgrad.out <- ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' lambda_sparsity = 0.01,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Coefficients
#' coef(proxgrad.out)
#'
#' }
#'
coef.ProxGrad <- function(object, ...){
# Check input data
if(!any(class(object) %in% "ProxGrad"))
stop("The object should be of class \"ProxGrad\"")
return(object$coef)
}
#'
#' @title Coefficients for cv.ProxGrad Object
#'
#' @method coef cv.ProxGrad
#'
#' @description \code{coef.cv.ProxGrad} returns the coefficients for a cv.ProxGrad object.
#'
#' @param object An object of class cv.ProxGrad.
#' @param ... Additional arguments for compatibility.
#'
#' @return The coefficients for the cv.ProxGrad object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cv.ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CV ProxGrad - Single Group
#' proxgrad.out <- cv.ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' n_lambda_sparsity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Coefficients
#' coef(proxgrad.out)
#'
#' }
#'
coef.cv.ProxGrad <- function(object, ...){
# Check input data
if(!any(class(object) %in% "cv.ProxGrad"))
stop("The object should be of class \"cv.ProxGrad\"")
return(object$coef)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/ProxGrad_Coefficient_Functions.R
|
# -----------------------------------------------------------------------
# Object Construction for ProxGrad object
#
# object: the ProxGrad object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.ProxGrad <- function(object, fn_call, glm_type, lambda_sparsity){
class(object) <- append("ProxGrad", class(object))
object$call <- fn_call
object$glm_type <- glm_type
object$lambda_sparsity <- lambda_sparsity
object$coef <- c(object$Intercept, object$Betas)
return(object)
}
# -----------------------------------------------------------------------
# Object Construction for cv.ProxGrad object
#
# object: the cv.ProxGrad object
# fn_call: the function call
# -----------------------------------------------------------------------
construct.cv.ProxGrad <- function(object, fn_call, glm_type, n_lambda_sparsity){
class(object) <- append("cv.ProxGrad", class(object))
object$call <- fn_call
object$glm_type <- glm_type
object$n_lambda_sparsity <- n_lambda_sparsity
object$coef <- c(object$Intercept[object$Optimal_Index], object$Betas[,object$Optimal_Index])
return(object)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/ProxGrad_Construction_Functions.R
|
#'
#' @title Predictions for ProxGrad Object
#'
#' @description \code{predict.ProxGrad} returns the predictions for a ProxGrad object.
#'
#' @method predict ProxGrad
#'
#' @param object An object of class ProxGrad
#' @param newx New data for predictions.
#' @param type The type of predictions for binary response. Options are "prob" (default) and "class".
#' @param ... Additional arguments for compatibility.
#'
#' @return The predictions for the ProxGrad object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # ProxGrad - Single Group
#' proxgrad.out <- ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' lambda_sparsity = 0.01,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' proxgrad.prob <- predict(proxgrad.out, newx = x.test, type = "prob")
#' proxgrad.class <- predict(proxgrad.out, newx = x.test, type = "class")
#' plot(prob.test, proxgrad.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-proxgrad.prob)^2)
#' mean(abs(y.test-proxgrad.class))
#'
#' }
#'
predict.ProxGrad <- function(object, newx, type = c("prob", "class")[1], ...){
# Check input data
if(!any(class(object) %in% "ProxGrad"))
stop("The object should be of class \"ProxGrad\"")
split.coef <- coef(object)
if(object$glm_type=="Linear"){
return(split.coef[1] + newx %*% split.coef[-1])
} else if(object$glm_type=="Logistic"){
if(!(type %in% c("prob", "class")))
stop("The variable \"type\" must be one of: \"prob\", or \"class\".")
logistic.prob <- exp(split.coef[1] + newx %*% split.coef[-1])/(1+exp(split.coef[1] + newx %*% split.coef[-1]))
if(type=="prob")
return(logistic.prob) else if(type=="class")
return(round(logistic.prob, 0))
}
}
#'
#' @title Predictions for cv.ProxGrad Object
#'
#' @description \code{predict.cv.ProxGrad} returns the predictions for a ProxGrad object.
#'
#' @method predict cv.ProxGrad
#'
#' @param object An object of class cv.ProxGrad.
#' @param newx New data for predictions.
#' @param type The type of predictions for binary response. Options are "prob" (default) and "class".
#' @param ... Additional arguments for compatibility.
#'
#' @return The predictions for the cv.ProxGrad object.
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{cv.ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CV ProxGrad - Single Group
#' proxgrad.out <- cv.ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' n_lambda_sparsity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' proxgrad.prob <- predict(proxgrad.out, newx = x.test, type = "prob")
#' proxgrad.class <- predict(proxgrad.out, newx = x.test, type = "class")
#' plot(prob.test, proxgrad.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-proxgrad.prob)^2)
#' mean(abs(y.test-proxgrad.class))
#'
#' }
#'
predict.cv.ProxGrad <- function(object, newx, type = c("prob", "class")[1], ...){
# Check input data
if(!any(class(object) %in% "cv.ProxGrad"))
stop("The object should be of class \"cv.ProxGrad\".")
split.coef <- coef(object)
if(object$glm_type=="Linear"){
return(split.coef[1] + newx %*% split.coef[-1])
} else if(object$glm_type=="Logistic"){
if(!(type %in% c("prob", "class")))
stop("The variable \"type\" must be one of: \"prob\", or \"class\".")
logistic.prob <- exp(split.coef[1] + newx %*% split.coef[-1])/(1+exp(split.coef[1] + newx %*% split.coef[-1]))
if(type=="prob")
return(logistic.prob) else if(type=="class")
return(round(logistic.prob, 0))
} else if(object$glm_type=="Gamma"){
return(-1/(split.coef[1] + newx %*% split.coef[-1]))
} else if(object$glm_type=="Poisson"){
return(exp(split.coef[1] + newx %*% split.coef[-1]))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/ProxGrad_Prediction_Functions.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
CPGLIB_Main <- function(x, y, type, G, include_intercept, alpha_s, alpha_d, lambda_sparsity, lambda_diversity, tolerance, max_iter) {
.Call('_CPGLIB_CPGLIB_Main', PACKAGE = 'CPGLIB', x, y, type, G, include_intercept, alpha_s, alpha_d, lambda_sparsity, lambda_diversity, tolerance, max_iter)
}
CV_CPGLIB_Main <- function(x, y, type, G, full_diversity, include_intercept, alpha_s, alpha_d, n_lambda_sparsity, n_lambda_diversity, tolerance, max_iter, n_folds, n_threads) {
.Call('_CPGLIB_CV_CPGLIB_Main', PACKAGE = 'CPGLIB', x, y, type, G, full_diversity, include_intercept, alpha_s, alpha_d, n_lambda_sparsity, n_lambda_diversity, tolerance, max_iter, n_folds, n_threads)
}
CV_ProxGrad_Main <- function(x, y, type, include_intercept, alpha_s, n_lambda_sparsity, tolerance, max_iter, n_folds, n_threads) {
.Call('_CPGLIB_CV_ProxGrad_Main', PACKAGE = 'CPGLIB', x, y, type, include_intercept, alpha_s, n_lambda_sparsity, tolerance, max_iter, n_folds, n_threads)
}
ProxGrad_Main <- function(x, y, type, include_intercept, alpha_s, lambda_sparsity, tolerance, max_iter) {
.Call('_CPGLIB_ProxGrad_Main', PACKAGE = 'CPGLIB', x, y, type, include_intercept, alpha_s, lambda_sparsity, tolerance, max_iter)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/RcppExports.R
|
#'
#' @title Competing Proximal Gradients Library for Ensembles of Generalized Linear Models - Cross-Validation
#'
#' @description \code{cv.cpg} computes and cross-validates the coefficients for ensembles of generalized linear models via competing proximal gradients.
#'
#' @param x Design matrix.
#' @param y Response vector.
#' @param glm_type Description of the error distribution and link function to be used for the model. Must be one of "Linear" or
#' "Logistic". Default is "Linear".
#' @param G Number of groups in the ensemble.
#' @param full_diversity Argument to determine if the overlap between the models should be zero. Default is FALSE.
#' @param include_intercept Argument to determine whether there is an intercept. Default is TRUE.
#' @param alpha_s Sparsity mixing parmeter. Default is 3/4.
#' @param alpha_d Diversity mixing parameter. Default is 1.
#' @param n_lambda_sparsity Number of candidates for sparsity tuning parameter. Default is 100.
#' @param n_lambda_diversity Number of candidates for diveristy tuning parameter. Default is 100.
#' @param tolerance Convergence criteria for the coefficients. Default is 1e-8.
#' @param max_iter Maximum number of iterations in the algorithm. Default is 1e5.
#' @param n_folds Number of cross-validation folds. Default is 10.
#' @param n_threads Number of threads. Default is a single thread.
#'
#' @return An object of class \code{cv.cpg}
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{coef.cv.CPGLIB}}, \code{\link{predict.cv.CPGLIB}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 300
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 150
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # CV CPGLIB - Multiple Groups
#' cpg.out <- cv.cpg(x.train, y.train,
#' glm_type = "Logistic",
#' G = 5, include_intercept = TRUE,
#' alpha_s = 3/4, alpha_d = 1,
#' n_lambda_sparsity = 100, n_lambda_diversity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' cpg.prob <- predict(cpg.out, newx = x.test, type = "prob",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' cpg.class <- predict(cpg.out, newx = x.test, type = "class",
#' groups = 1:cpg.out$G, ensemble_type = "Model-Avg")
#' plot(prob.test, cpg.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-cpg.prob)^2)
#' mean(abs(y.test-cpg.class))
#'
#' }
#'
cv.cpg <- function(x, y,
glm_type = c("Linear", "Logistic")[1],
G = 5,
full_diversity = FALSE,
include_intercept=TRUE,
alpha_s = 3/4, alpha_d = 1,
n_lambda_sparsity = 100, n_lambda_diversity = 100,
tolerance = 1e-8, max_iter = 1e5,
n_folds = 10,
n_threads = 1){
# Check response data
y <- Check_Response_CPGLIB(y, glm_type)
# Check data
Check_Data_CV_CPGLIB(x, y,
glm_type,
G,
alpha_s, alpha_d,
n_lambda_sparsity, n_lambda_diversity,
tolerance, max_iter,
n_folds,
n_threads)
# Shuffling the data
n <- nrow(x)
random.permutation <- sample(1:n, n)
x.permutation <- x[random.permutation, ]
y.permutation <- y[random.permutation]
# Setting the model type
type.cpp <- switch(glm_type,
"Linear" = 1,
"Logistic" = 2)
# Setting to return fully diverse models for CPP computation
full_diversity.cpp <- sum(full_diversity)
# Setting to include intercept parameter for CPP computation
include_intercept.cpp <- sum(include_intercept)
# Source code computation
cpg.out <- CV_CPGLIB_Main(x.permutation, y.permutation,
type.cpp,
G,
full_diversity.cpp,
include_intercept.cpp,
alpha_s, alpha_d,
n_lambda_sparsity, n_lambda_diversity,
tolerance, max_iter,
n_folds,
n_threads)
# Object construction
cpg.out <- construct.cv.CPGLIB(cpg.out, match.call(), glm_type, G, n_lambda_sparsity, n_lambda_diversity)
# Return source code output
return(cpg.out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/cvCPGLIB.R
|
#'
#' @title Generalized Linear Models via Proximal Gradients - Cross-validation
#'
#' @description \code{cv.ProxGrad} computes and cross-validates the coefficients for generalized linear models using proximal gradients.
#'
#' @param x Design matrix.
#' @param y Response vector.
#' @param glm_type Description of the error distribution and link function to be used for the model. Must be one of "Linear" or
#' "Logistic". Default is "Linear".
#' @param include_intercept Argument to determine whether there is an intercept. Default is TRUE.
#' @param alpha_s Elastic net mixing parmeter. Default is 3/4.
#' @param n_lambda_sparsity Sparsity tuning parameter value. Default is 100.
#' @param tolerance Convergence criteria for the coefficients. Default is 1e-8.
#' @param max_iter Maximum number of iterations in the algorithm. Default is 1e5.
#' @param n_folds Number of cross-validation folds. Default is 10.
#' @param n_threads Number of threads. Default is a single thread.
#'
#' @return An object of class cv.ProxGrad
#'
#' @export
#'
#' @author Anthony-Alexander Christidis, \email{[email protected]}
#'
#' @seealso \code{\link{coef.cv.ProxGrad}}, \code{\link{predict.cv.ProxGrad}}
#'
#' @examples
#' \donttest{
#' # Data simulation
#' set.seed(1)
#' n <- 50
#' N <- 2000
#' p <- 1000
#' beta.active <- c(abs(runif(p, 0, 1/2))*(-1)^rbinom(p, 1, 0.3))
#' # Parameters
#' p.active <- 100
#' beta <- c(beta.active[1:p.active], rep(0, p-p.active))
#' Sigma <- matrix(0, p, p)
#' Sigma[1:p.active, 1:p.active] <- 0.5
#' diag(Sigma) <- 1
#'
#' # Train data
#' x.train <- mvnfast::rmvn(n, mu = rep(0, p), sigma = Sigma)
#' prob.train <- exp(x.train %*% beta)/
#' (1+exp(x.train %*% beta))
#' y.train <- rbinom(n, 1, prob.train)
#' # Test data
#' x.test <- mvnfast::rmvn(N, mu = rep(0, p), sigma = Sigma)
#' prob.test <- exp(x.test %*% beta)/
#' (1+exp(x.test %*% beta))
#' y.test <- rbinom(N, 1, prob.test)
#'
#' # ProxGrad - Single Groups
#' proxgrad.out <- cv.ProxGrad(x.train, y.train,
#' glm_type = "Logistic",
#' include_intercept = TRUE,
#' alpha_s = 3/4,
#' n_lambda_sparsity = 100,
#' tolerance = 1e-5, max_iter = 1e5)
#'
#' # Predictions
#' proxgrad.prob <- predict(proxgrad.out, newx = x.test, type = "prob")
#' proxgrad.class <- predict(proxgrad.out, newx = x.test, type = "class")
#' plot(prob.test, proxgrad.prob, pch = 20)
#' abline(h = 0.5,v = 0.5)
#' mean((prob.test-proxgrad.prob)^2)
#' mean(abs(y.test-proxgrad.class))
#'
#' }
#'
cv.ProxGrad <- function(x, y,
glm_type = c("Linear", "Logistic")[1],
include_intercept=TRUE,
alpha_s = 3/4,
n_lambda_sparsity = 100,
tolerance = 1e-8, max_iter = 1e5,
n_folds = 10,
n_threads = 1){
# Check response data
y <- Check_Response(y, glm_type)
# Check data
Check_Data_CV_ProxGrad(x, y,
glm_type,
alpha_s,
n_lambda_sparsity,
tolerance, max_iter,
n_folds,
n_threads)
# Shuffling the data
n <- nrow(x)
random.permutation <- sample(1:n, n)
x.permutation <- x[random.permutation, ]
y.permutation <- y[random.permutation]
# Setting the model type
type.cpp <- switch(glm_type,
"Linear" = 1,
"Logistic" = 2)
# Setting to include intercept parameter for CPP computation
include_intercept.cpp <- sum(include_intercept)
# Source code computation
cv.ProxGrad.out <- CV_ProxGrad_Main(x.permutation, y.permutation,
type.cpp,
include_intercept.cpp,
alpha_s,
n_lambda_sparsity,
tolerance, max_iter,
n_folds,
n_threads)
# # Object construction
cv.ProxGrad.out <- construct.cv.ProxGrad(cv.ProxGrad.out, match.call(), glm_type, n_lambda_sparsity)
# Return source code output
return(cv.ProxGrad.out)
}
|
/scratch/gouwar.j/cran-all/cranData/CPGLIB/R/cvProxGrad.R
|
Boxcox<-function(boxcox,varcod)
{
if (missing(boxcox))
{X2<-NULL
coup1<-NULL}
else
{
X2<-matrix(nrow=length(varcod),ncol=length(boxcox))
for (i in 1:length(boxcox))
{
if (boxcox[i]==0)
{
X2[,i]<-log(varcod)}
else{
X2[,i]<-(1/boxcox[i])*(varcod^{boxcox[i]}-1)}
}
coup1<-rep(1,length(boxcox))
}
res<-list(X2,coup1)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/Boxcox.R
|
CPMCGLM<-function(formula,family,link,data,varcod,dicho,nb.dicho,categ,nb.categ,boxcox,nboxcox,FP,N=1000,cutpoint)
{
if(missing(dicho) & missing(nb.dicho) & missing(categ) & missing(nb.categ) & missing(boxcox) & missing(nboxcox) & missing(cutpoint) & missing(FP) ){stop("You need to enter at least one transformation in the function")}
if(!missing(dicho) & !missing(nb.dicho))stop("Specify only one argument for the dichotomous transformation ('dicho' or 'nb.dicho')")
if(!missing(categ) & !missing(nb.categ))stop("Specify only one argument for the categorical transformation ('categ' or 'nb.categ')")
if(!missing(boxcox) & !missing(nboxcox))stop("Specify only one argument for the boxcox transformation ('boxcox' or 'nboxcox')")
continuous<-"FALSE"
if (missing(boxcox))
{
if(!missing(nboxcox)){
if((nboxcox > 5)){stop("The nboxcox argument must not be upper than 5")}
if(nboxcox<6){
if (nboxcox==0) {boxcox<-NULL}
if (nboxcox==1) {boxcox<-1}
if (nboxcox==2) {boxcox<-c(1,0)}
if (nboxcox==3) {boxcox<-c(1,0,2)}
if (nboxcox==4) {boxcox<-c(1,0,2,0.5)}
if (nboxcox==5) {boxcox<-c(1,0,2,0.5,1.5)}
}
}
}
if(missing(dicho) & (!missing(nb.dicho))){
dicho<-c((1:nb.dicho)/(nb.dicho+1))
}
if(missing(categ) & (!missing(nb.categ))){
categ<-matrix(NA,ncol=nb.categ+1,nrow=nb.categ)
for(i in 1:nb.categ)
{
categ[i,1:(i+1)]<-c(1:(1+i))/(i+2)
}
}
if(missing(categ) & (!missing(dicho))){
quantile<-matrix(NA,ncol=1,nrow=length(dicho))
for( i in 1:length(dicho))
{
quantile[i,1]<-dicho[i]
}
}
if(missing(categ) & (missing(dicho))){quantile<-NULL}
if(!missing(categ) & (missing(dicho))){quantile<-categ}
if(!missing(categ) & (!missing(dicho))){
binc<-matrix(NA,ncol=ncol(categ),nrow=length(dicho))
for( i in 1:length(dicho))
{
binc[i,1]<-dicho[i]
}
quantile<-rbind(binc,categ)
}
call <- match.call()
if(!(family %in% c("binomial","gaussian","poisson"))) stop("This family of distribution is not taken into account in this function")
family1<- switch(family,
"binomial"=binomial,
"gaussian"=gaussian ,
"poisson"=poisson
)
#========================== 14/02/2012 =========================
if(missing(data)) stop("Missing data argument.")
if(missing(family)) stop("Missing family argument.")
if(missing(link)) stop("Missing link argument.")
if(missing(varcod)) stop("Missing varcod argument.")
if(missing(dicho) & missing(boxcox) & missing(categ) & missing(nb.dicho) & missing(nboxcox) & missing(nb.categ) & missing(cutpoint) & missing(FP)){
stop("No transformation is available")
}
if(class(formula)!="formula") stop("The argument formula must be a formula.")
if(class(data)!="data.frame") stop("The argument must be a data frame.")
if(class(varcod)!="character") stop("The argument varcod must be a character.")
if(!missing(boxcox)){
if(is.vector(boxcox)=="FALSE") stop("The argument boxcox must be a vector.")
}
# Prise en compte de la variable formula
m <- match.call(expand.dots=FALSE)
m$family <- m$link <- m$varcod <- m$quantile <- m$FP <- m$dicho <- m$nb.dicho<- m$nb.categ<- m$categ<- m$continuous <- m$boxcox<- m$nboxcox <-m$cutpoint <- m$N <- NULL
m[[1]] <- as.name("model.frame")
m <- eval(m)
# var dependante
NamesY <- all.names(update(formula,"~1"))[2]
Y <- as.vector(model.response(model.frame(formula=update(formula,"~1"),data=data)))
# var expli
mat.exp <- if (!is.empty.model(attr(m,"terms")))model.matrix(attr(m,"terms"),m, contrasts)
mtt <- model.matrix(attr(m,"terms"),m, contrasts)
Nom1 <- attributes(mtt)$dimnames[[2]]
type <- paste("factor\\(",varcod,"\\)",sep="")
if(length(grep(type,Nom1))!= 0){
stop("The coding variable needs to be continuous")
}
if(ncol(mat.exp)==1)stop("the dataset must be contain the varcod variable.")
mat.exp <- mat.exp[,-1]
# add
factor.names <- function(x){
x <- matrix(x,nrow=1)
Names <- apply(x,MARGIN=2,FUN=function(x){
if(length(grep("factor",x))!= 0){
pos1 <- grep("\\(",unlist(strsplit(x,split="")))+1
pos2 <- grep("\\)",unlist(strsplit(x,split="")))-1
compris.factor <- substr(x,start=pos1,stop=pos2)
after.factor <- substr(x,start=(pos2+2),stop=length(unlist(strsplit(x,split=""))))
paste(compris.factor,after.factor,sep=".")
}else{
x
}
}
)
return(Names)
}
if(is.matrix(mat.exp)){
colnames(mat.exp) <-factor.names(colnames(mat.exp))
}
if(is.vector(mat.exp)){
Z <- NULL
nb <- 0
namesZ <- NULL
var.cod <- as.vector(mat.exp)
ind.ajust <- 0
}
# matrice de variable d'ajustement: Z
if(!is.vector(mat.exp)){
if(!(varcod %in% colnames(mat.exp)))stop("varcod argument it is not present in the dataset.")
ind.ajust <- 1
if(ncol(mat.exp)!=2){
Z <- mat.exp[,-grep(varcod,colnames(mat.exp))]
nb <- ncol(Z)
namesZ <- NULL
names1 <- unlist(colnames(Z))
for(i in 1:nb){
if(i < nb) namesZ <- paste(namesZ,names1[i],"+")
else namesZ <- paste(namesZ,colnames(Z)[i])
}
}else{
Z <- as.matrix(mat.exp[,-grep(varcod,colnames(mat.exp))])
nb <- 1
namesZ <- NULL
names1 <- as.matrix(colnames(mat.exp))
posi<-grep(varcod,names1)
namesZ <-names1[-posi,]
colnames(Z)<-namesZ
}
var.cod <- as.vector(mat.exp[,varcod])
}
n<-nrow(data)
coup<-NULL
a <- Codage(data,quantile,var.cod)
X3 <- a[[1]]
coup2<- a[[2]]
b <- Boxcox(boxcox,var.cod)
X2<- b[[1]]
coup1<- b[[2]]
d <- codcut(data,cutpoint,var.cod)
X4<- d [[1]]
coup4<-d [[2]]
e <- PF(FP,var.cod)
X5 <- e[[1]]
coup5 <- e[[2]]
if(continuous=="TRUE"){
X1<-cbind(X3,X4,var.cod,X2)
coup3<-1
}else{
X1<-cbind(X3,X4,X2)
coup3<-NULL
}
if( missing(dicho) & missing(nb.dicho) & missing(categ) & missing(nb.categ) & missing(boxcox) & missing(nboxcox) & missing(cutpoint)){
X1ter <- X5
}else{
if(missing(FP)){X1ter <- X1}else{
X1bis <- array(data=NA,c(length(var.cod),ncol(FP),ncol(X1)))
for(i in 1:ncol(X1)){X1bis[,1,i] <- X1[,i]}
X1ter <- abind(X1bis,X5)
}
}
coup<-c(coup2,coup4,coup3,coup1,coup5)
if(ind.ajust==1){
formula1 <-update(formula,paste("~",namesZ))
}else{
formula1 <-update(formula,paste("~",1))
}
data <- as.data.frame(cbind(Y,mat.exp))
colnames(data)[1] <- NamesY
t.obs<-test.score(formula=formula1,data=data,codage=X1ter,Z=Z,Y=Y,family1=family1,family=family,link1=link,ind.ajust=ind.ajust)
pval<-p.val<-NULL
if(missing(FP)){nbtransf1 <- ncol(X1ter) }else{ nbtransf1 <- dim(X1ter)[3] }
for (i in 1:nbtransf1){
pval[i]<-(1-pchisq(t.obs[i],coup[i]))
p.val[i]<- min(p.val[i-1],pval[i])
}
if(sum(is.na(p.val))!=0){stop("At least one of the coding, it isn't adapt for the model specification")}
#Initialisation des param?tres
pval.exact1<-pval.bonf1<-pval.naive1<-NULL
control<-rep(1,nbtransf1)
if(all.equal(coup,control)==TRUE){
pval.exact1<-test.liquet(formula=formula1,data=data,codage=X1ter,Z=Z,Y=Y,family=family,link1=link,family1=family1)}
else {pval.exact1<-"Correction not available for these codings"}
#Reechantillonage
###Initialisation des parametres
Y1<-matrix(ncol=N,nrow=n)
tboot1<-matrix(nrow=N,ncol=nbtransf1)
alphaboot<-NULL
alphapermut<-NULL
tpermut1<-matrix(nrow=N,ncol=nbtransf1)
#bootstrap
#Coefficient sous H0
link0 <- link
beta<-glm(formula1,family=family1(link0),data)$coeff
res<-glm(formula1,family=family1(link0),data)$res
design <- cbind(rep(1,n),Z)
#different pour chaque fonction
param<- switch(link,
"logit"= exp(design%*%beta)/(1+exp(design%*%beta)),
"probit"= pnorm(design%*%beta),
"log"=exp(design%*%beta),
"identity"=(design%*%beta)
)
tboot<-matrix(ncol=nbtransf1,nrow=N)
pval.boot<-matrix(ncol=nbtransf1,nrow=N)
#Simulation des Y1
progress_bar_tk <- create_progress_bar (title="step 1 :Parametric Bootstrap resampling","tk")
progress_bar_tk $ init (N)
for (j in 1:N){
progress_bar_tk $ step ()
#Pour une regression logistique
Y1[,j]<- switch(family,
"gaussian"= rnorm(n,mean=mean(param),sd=sd(res)) ,
"binomial"= rbinom(n,1,prob=param) ,
"poisson"= rpois(n,mean(param)),
)
if(ind.ajust==1){
f1<-paste("~",namesZ)
}else{
f1<-"~1"
}
tboot[j,]<-tmultboot(f1,data,Y1=Y1[,j],Z,X1ter,family1,family=family,link1=link)
for (i in 1:nbtransf1){
pval.boot[j,i]<-(1-pchisq(tboot[j,i],coup[i]))
}
}
cat("\n")
progress_bar_tk $ term ()
#Permutation
tpermut<-matrix(ncol=nbtransf1,nrow=N)
pval.permut<-matrix(ncol=nbtransf1,nrow=N)
# barre de progression
progress_bar_tk <- create_progress_bar (title="step 2 : Permutation resampling","tk")
progress_bar_tk $ init (N)
for (k in 1:N){
data1<-NULL
progress_bar_tk $ step ()
ind.permut <- permut(length(Y))
if(ind.ajust==1){
if(ncol(Z)==1)
{
Z11<-as.matrix(Z[ind.permut,])
colnames(Z11)<-namesZ
formula2 <-formula(paste("Y","~",namesZ))
data.permut <- data.frame(Y=Y[ind.permut],varcod=var.cod,Z11)
tpermut[k,]<-test.score(formula=formula2,data=data.permut,codage=X1ter,Z=Z11,Y=Y[ind.permut],family1,family=family,link1=link,ind.ajust=ind.ajust)
}
else{
formula2 <-formula(paste("Y","~",namesZ))
data.permut <- data.frame(Y=Y[ind.permut],varcod=var.cod,Z[ind.permut,])
tpermut[k,]<-test.score(formula=formula2,data=data.permut,codage=X1ter,Z=Z[ind.permut,],Y=Y[ind.permut],family1,family=family,link1=link,ind.ajust=ind.ajust)
}
}else{
formula2<-formula(paste("Y","~",1))
data.permut <- data.frame(Y=Y[ind.permut],varcod=var.cod)
tpermut[k,]<-test.score(formula=formula2,data=data.permut,codage=X1ter,Z=Z,Y=Y[ind.permut],family1=family1,family=family,link1=link,ind.ajust=ind.ajust)
}
for (i in 1:nbtransf1){
pval.permut[k,i]<-(1-pchisq(tpermut[k,i],coup[i]))
}
}
cat("\n")
progress_bar_tk $ term ()
pval.boot2<-pval.permut2<-matrix(0,ncol=nbtransf1,nrow=N)
for (i in 1:nbtransf1)
{
for (j in 1:N){
pval.boot[is.na(pval.boot)] <- 9999
pval.permut[is.na(pval.permut)] <- 9999
if (i>1){
pval.boot2[j,i]<-min(pval.boot2[j,i-1],min(pval.boot[j,i]))
pval.permut2[j,i]<-min(pval.permut2[j,i-1],min(pval.permut[j,i]))
}
else{
pval.boot2[j,i]<-min(pval.boot[j,i])
pval.permut2[j,i]<-min(pval.permut[j,i])
}
}
}
cat("\n")
for (i in 1:nbtransf1){
#Calcul des differentes pvaleur ajustees
pval.bonf<-p.val[i]*i
pval.bonf1<-c(pval.bonf1,pval.bonf)
pval.naive<-p.val[i]
pval.naive1<-c(pval.naive1,pval.naive)
#Bootstrap
#Calcul de la proportion de p-valeur superieur a la p-valeur sur le jeu de donnees initiale
for (ii in 1:N){
if(pval.boot2[ii,i]< p.val[i]){tboot1[ii,i]<-1}
}
alphaboot[i]<-(length(which(tboot1[,i]>0))/N)
#Permutation
#Calcul de la proportion de p-valeur superieur a la p-valeur sur le jeu de donnees initiale
for (l in 1:N){
if(pval.permut2[l,i]< p.val[i]){tpermut1[l,i]<-1}
}
alphapermut[i]<-(length(which(tpermut1[,i]>0))/N)
}
cat("\n")
####INFORMATIONS COMPLEMENTAIRES####
trans<-transf(quantile,continuous,boxcox,cutpoint,FP)
adj<-ncol(Z)
#Nombre de transformation par type
if(continuous=="TRUE"){q<-"TRUE"}else{q<-"FALSE"}
if(class(quantile)=="NULL"){nbq=0}else{nbq=nrow(quantile)}
if(missing(cutpoint)){nbc=0}else{nbc=nrow(cutpoint)}
if(missing(boxcox)){nbb=0}else{nbb=length(boxcox)}
if(missing(FP)){nbf=0}else{nbf=nrow(FP)}
#Meilleure transformation
BC<-trans[which.min(pval.naive1)]
print(substr(BC,1,22))
#Meilleure codage
bestcod1<-bestcod(continuous=continuous,quantile=quantile,boxcox=boxcox,cutpoint=cutpoint,PF=FP,pval.naive=pval.naive1)
print(bestcod1)
bcod<-which(is.na(bestcod1)==TRUE)
if((is.numeric(bestcod1)) & (length(bcod!=0)) ) {bestcod1<-bestcod1[-bcod]}
info<-list("link"=link,"nbt"=nbtransf1,"nbb"=nbb,"nbq"=nbq,"nbf"=nbf,"nbcont"=q,"adj"=adj,"trans"=trans,"BC"=BC,"bestcod"=bestcod1,"nbc"=nbc)
if(substr(BC,1,8)=="Quantile"){
vq<-quantile(var.cod,bestcod1)
}else{vq<-NULL}
out <- NULL
out$call <- call
## Complementary information
out$n <- n
out$N <- N
out$family <- family
out$link <- link
out$nbt <- nbtransf1
out$nbb <- nbb
out$nbq <- nbq
out$nbf <- nbf
out$nbcont <- q
out$nbc <- nbc
out$adj <- adj
out$vq<-vq
out$trans <- trans
out$BC <- BC
out$bestcod <-bestcod1
##
out$naive.pvalue <- pval.naive1[nbtransf1]
if(is.numeric(pval.exact1)==TRUE)
{out$exact.pvalue <- pval.exact1[nbtransf1]}
else{out$exact.pvalue<-pval.exact1}
out$bonferroni.adjusted.pvalue <- pval.bonf1[nbtransf1]
out$parametric.bootstrap.adjusted.pvalue <- alphaboot[nbtransf1]
out$permutation.adjusted.pvalue <- alphapermut[nbtransf1]
class(out) <- c("CPMCGLM")
out
}
## end
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/CPMCGLM.R
|
Codage<-function(data,quantile,var.cod)
{
if (class(quantile)=="NULL")
{
X1<-NULL
coup2<-NULL
}else{
X1<-matrix(nrow=length(var.cod),ncol=nrow(quantile))
#A partir de chaque point de coupure defini dans quantile on d???finie une variable binaire
for (i in 1:nrow(quantile)){
X1[,i]<- cut(var.cod,breaks=c(min(var.cod),quantile(var.cod,quantile[i,1:(ncol(quantile))]),max(var.cod)),include.lowest = TRUE)
}
coup2<-NULL
for (i in 1:nrow(quantile)){
coup2[i]<-unique(max(X1[,i]))-1
}
X1<-as.matrix(X1)-1
}
res<-list(X1,coup2)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/Codage.R
|
PF <- function(FP,varcod){
coup5 <- NULL
X5 <- NULL
if (min(varcod)<0){
varcod <- varcod - floor(min(varcod))
}
if (missing(FP)){
XFP <- NULL
coup5 <- NULL
} else {
XFP <-array(dim=c(length(varcod),ncol(FP),nrow(FP)))
for (k in 1:nrow(FP)){
coup5 <- c(coup5,length(na.omit(FP[k,])))
for(j in 1:length(na.omit(FP[k,]))){
for(i in 1:length(varcod)){
if(j==1){
XFP[i,j,k]<-varcod[i]^(FP[k,j])
} else {
if(FP[k,j]!=FP[k,j-1]){
XFP[i,j,k]<-varcod[i]^(FP[k,j])
} else {
XFP[i,j,k] <- XFP[i,j-1,k]*log(varcod[i])
}
}
}
}
}
}
res <- list(X5=XFP, coup5=coup5)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/FP.R
|
bestcod<-function(quantile,continuous,boxcox,cutpoint,PF,pval.naive){
posminp<-which.min(pval.naive)
if(missing(PF)){
if (missing(cutpoint)){
if (class(quantile)=="NULL"){
if (missing(boxcox)){
if (continuous=="TRUE"){
transf<-"original continuous variable"
}else{
transf<-"Coding definition is missing"}
}else{
if (continuous=="TRUE"){
if(posminp==1){
transf<-"original continuous variable"}
if(posminp>1){
transf<-boxcox[posminp-1] }
}else{
transf<-boxcox[posminp]
}
}
}else{
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp>nrow(quantile)){transf<-"original continuous variable"}
}else{
transf<-quantile[posminp,]}
}else{
if (continuous=="TRUE") {
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp==nrow(quantile)+1){transf<-"original continuous variable"}
if(posminp>nrow(quantile)+1){transf<-boxcox[posminp-nrow(quantile)+1]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp>nrow(quantile)){transf<-boxcox[posminp-nrow(quantile)]}
}
}
}
}else{
if (class(quantile)=="NULL"){
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp>nrow(cutpoint)){transf<-"original continuous variable"}
}else{
transf<-cutpoint[posminp,]
}
}else{
if (continuous=="TRUE"){
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp==nrow(cutpoint)+1){transf<-"original continuous variable"}
if(posminp>nrow(cutpoint)+1){transf<-boxcox[posminp-nrow(cutpoint)+1]}
}else{
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp>nrow(cutpoint)){transf<-boxcox[posminp-nrow(quantile)]}
}
}
}else{
if (missing(boxcox)){
if(continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if( (posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp==(nrow(quantile)+nrow(cutpoint)+1)){transf<-"original continuous variable"}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if((posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
}
}else{
if (continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if( (posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp==(nrow(quantile)+nrow(cutpoint)+1)){transf<-"original continuous variable"}
if(posminp>(nrow(quantile)+nrow(cutpoint)+1)){transf<-boxcox[posminp-(nrow(quantile)+nrow(cutpoint)+1)]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if( (posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp>(nrow(quantile)+nrow(cutpoint))){transf<-boxcox[posminp-(nrow(quantile)+nrow(cutpoint))]}
}
}
}
}
}else{
if (missing(cutpoint)){
if (class(quantile)=="NULL"){
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp==1){transf<-"original continuous variable"}
if(posminp>1){transf <- PF[posminp-1,]}
}else{
transf<-transf <- PF[posminp,]}
}else{
if (continuous=="TRUE"){
if(posminp==1){transf<-"original continuous variable"}
if((posminp>1)&(posminp<length(boxcox)+2)){transf<-boxcox[posminp-1]}
if(posminp>length(boxcox)+1){transf<-PF[posminp-(length(boxcox)+1),]}
}else{
if(posminp<length(boxcox)+1){transf<-boxcox[posminp]}
if(posminp>length(boxcox)){transf<-PF[posminp-(length(boxcox)),]}
}
}
}else{
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp==nrow(quantile)+1){transf<-"original continuous variable"}
if(posminp>nrow(quantile)+1){transf<-PF[posminp-nrow(quantile)+1,]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp>nrow(quantile)){transf<-PF[posminp-nrow(quantile),]}}
}else{
if (continuous=="TRUE") {
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if(posminp==nrow(quantile)+1){transf<-"original continuous variable"}
if((posminp>nrow(quantile)+1)&(posminp<(nrow(quantile)+length(boxcox)+2))){transf<-boxcox[posminp-(nrow(quantile)+1)]}
if(posminp>(nrow(quantile)+length(boxcox)+1)){transf<-PF[posminp-(nrow(quantile)+length(boxcox)+1),]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if((posminp>nrow(quantile))&(posminp<(nrow(quantile)+length(boxcox)+1))){transf<-boxcox[posminp-(nrow(quantile))]}
if(posminp>(nrow(quantile)+length(boxcox))){transf<-PF[posminp-(nrow(quantile)+length(boxcox)),]}
}
}
}
}else{
if (class(quantile)=="NULL"){
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp==nrow(cutpoint)+1){transf<-"original continuous variable"}
if(posminp>nrow(cutpoint)+1){transf<-PF[posminp-nrow(cutpoint)+1,]}
}else{
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp>nrow(cutpoint)){transf<-PF[posminp-nrow(cutpoint),]}}
}else{
if (continuous=="TRUE"){
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if(posminp==nrow(cutpoint)+1){transf<-"original continuous variable"}
if((posminp>nrow(cutpoint)+1)&(posminp<nrow(cutpoint)+1+length(boxcox)+1)){transf <- boxcox[posminp-(nrow(cutpoint)+1)]}
if(posminp>(nrow(cutpoint)+1+length(boxcox))){transf<-PF[posminp-(nrow(cutpoint)+1+length(boxcox)),]}
}else{
if(posminp<nrow(cutpoint)+1){transf<-cutpoint[posminp,]}
if((posminp>nrow(cutpoint))&(posminp<nrow(cutpoint)+length(boxcox)+1)){transf <- boxcox[posminp-(nrow(cutpoint))]}
if(posminp>nrow(cutpoint)+length(boxcox)){transf<-PF[posminp-(nrow(cutpoint)+length(boxcox)),]}
}
}
}else{
if (missing(boxcox)){
if (continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if((posminp>nrow(quantile))&(posminp<nrow(cutpoint)+nrow(quantile)+1)){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp==nrow(cutpoint)+nrow(quantile)+1){transf<-"original continuous variable"}
if(posminp>nrow(cutpoint)+nrow(quantile)+1){transf<-PF[posminp-(nrow(quantile)+nrow(cutpoint)+1),]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if((posminp>nrow(quantile))&(posminp<nrow(cutpoint)+nrow(quantile)+1)){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp>nrow(cutpoint)+nrow(quantile)){transf<-PF[posminp-(nrow(quantile)+nrow(cutpoint)),]}
}
}else{
if (continuous=="TRUE"){
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if( (posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
if(posminp==(nrow(quantile)+nrow(cutpoint)+1)){transf<-"original continuous variable"}
if((posminp>(nrow(quantile)+nrow(cutpoint)+1))&(posminp<(nrow(quantile)+nrow(cutpoint)+length(boxcox)+2))){transf<-boxcox[posminp-(nrow(quantile)+nrow(cutpoint)+1)]}
if(posminp>(nrow(quantile)+nrow(cutpoint)+length(boxcox)+1)){transf<-PF[posminp-(nrow(quantile)+nrow(cutpoint)+1+length(boxcox)),]}
}else{
if(posminp<nrow(quantile)+1){transf<-quantile[posminp,]}
if( (posminp>nrow(quantile)) & (posminp<nrow(quantile)+nrow(cutpoint+1)) ){transf<-cutpoint[posminp-nrow(quantile),]}
if((posminp>(nrow(quantile)+nrow(cutpoint)))&(posminp<(nrow(quantile)+nrow(cutpoint)+length(boxcox)+1))){transf<-boxcox[posminp-(nrow(quantile)+nrow(cutpoint))]}
if(posminp>(nrow(quantile)+nrow(cutpoint)+length(boxcox))){transf<-PF[posminp-(nrow(quantile)+nrow(cutpoint)+length(boxcox)),]}
}
}
}
}
}
return(transf)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/bestcod.R
|
codcut<-function(data,cutpoint,var.cod)
{
coup3<-NULL
if (missing(cutpoint))
{
X1<-NULL
}else{
X1<-matrix(nrow=length(var.cod),ncol=nrow(cutpoint))
for (i in 1:nrow(cutpoint))
{
X1[,i]<- cut(var.cod,breaks=c(min(var.cod),cutpoint[i,1:(ncol(cutpoint))],max(var.cod)),include.lowest = TRUE)
}
for (i in 1:nrow(cutpoint))
{
coup3[i]<-unique(max(X1[,i]))-1
}
X1<-as.matrix(X1)-1
}
res<-list(X1,coup3)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/codcut.R
|
permut<-function(n)
{
ind<-1:n
a<-sample(ind,n)
asort <- sort(a)
b <- sample(asort,n)
ind <- b
return(ind)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/permut.R
|
print.CPMCGLM <-function(x, ...)
{
if(class(x)!="CPMCGLM") stop("The argument Object must be a class CPMCGLM ")
cl <- x$call
if (!is.null(cl)){
cat("Call:\n")
dput(cl)
cat("\n")
}
cat("Generalize Linear Model Summary \n")
cat(" Family:","",x$family,"\n")
cat(" Link:","",x$link,"\n")
cat(" Number of subject:","",x$n,"\n")
cat(" Number of adjustment variable:","",x$adj,"\n")
cat("\n")
cat("Resampling \n")
cat(" N: ",x$N,"\n")
n.pvalue<-round(x$naive.pvalue,4)
if(is.numeric(x$exact.pvalue)==T){e.pvalue<-round(x$exact.pvalue,4)}
else{e.pvalue<-x$exact.pvalue}
b.pvalue<-round(x$bonferroni.adjusted.pvalue,4)
pa.pvalue<-round(x$parametric.bootstrap.adjusted.pvalue,4)
if(is.numeric(x$permutation.adjusted.pvalue)){pe.pvalue<-round(x$permutation.adjusted.pvalue,4)}
else{pe.pvalue<-x$permutation.adjusted.pvalue}
cat("\nBest coding \n")
if(substr(x$BC,1,8)=="Quantile"){
if (length(x$bestcod)==1){
cat("Method:","","Dichotomous","","transformation","\n")}
else{cat("Method:","",substr(x$BC,1,8),"\n")}
cat("Value of the order quantile cutpoints:","",x$bestcod,"\n")
cat("Value of the quantile cutpoints:","",x$vq,"\n")
}
if(substr(x$BC,1,8)=="Cutpoint"){
cat("Method:","",substr(x$BC,1,8),"\n")
cat("Value of the quantile cutpoints:","",x$bestcod,"\n")
}
if(substr(x$BC,1,6)=="Boxcox"){
cat("Method:","",substr(x$BC,1,6),"\n")
cat("Value of the BoxCox parameter:","",x$bestcod,"\n")
}
if(substr(x$BC,1,22)=="Fractionnal Polynomial"){
cat("Method:","",substr(x$BC,1,22),"\n")
cat("Value of the Fractionnal Polynomial parameters:","",na.omit(x$bestcod),"\n")
}
cat("\n")
cat("Corresponding adjusted pvalue: \n")
cat("\n")
if(is.numeric(e.pvalue)==T){
R1<-data.frame(rbind(n.pvalue,e.pvalue,b.pvalue,pa.pvalue,pe.pvalue))
R1[R1<0.0001]<-paste("<0.",paste(rep("0",3),collapse=""),"1",sep="")
R1[R1>1]<- 1
colnames(R1)<-rep("Adjusted pvalue",length(b.pvalue))
rownames(R1)<-c("naive","exact","bonferroni","bootstrap","permutation")
print(R1,digits=3)
}else{
R1<-data.frame(rbind(n.pvalue,b.pvalue,pa.pvalue,pe.pvalue))
R1[R1==0]<-paste("<0.",paste(rep("0",3),collapse=""),"1",sep="")
R1[R1>1]<- 1
colnames(R1)<-rep("Adjusted pvalue",length(b.pvalue))
rownames(R1)<-c("naive","bonferroni","bootstrap","permutation")
print(R1,digits=3)
cat("exact:","",e.pvalue)
cat("\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/print.CPMCGLM.R
|
summary.CPMCGLM <-function(object, ...)
{
if(class(object)!="CPMCGLM") stop("The argument Object must be a class CPMCGLM ")
cat("Summary of CPMCGLM Package\n")
n.pvalue<-round(object$naive.pvalue,4)
if(is.numeric(object$exact.pvalue)==T){e.pvalue<-round(object$exact.pvalue,4)}
else{e.pvalue<-object$exact.pvalue}
b.pvalue<-round(object$bonferroni.adjusted.pvalue,4)
pa.pvalue<-round(object$parametric.bootstrap.adjusted.pvalue,4)
pe.pvalue<-round(object$permutation.adjusted.pvalue,4)
cat("\nBest coding \n")
if(substr(object$BC,1,8)=="Quantile"){
cat("Method:","",substr(object$BC,1,8),"\n")
cat("Value of the quantile cutpoints:","",object$bestcod,"\n")
}
if(substr(object$BC,1,8)=="Cutpoint"){
cat("Method:","",substr(object$BC,1,8),"\n")
cat("Value of the quantile cutpoints:","",object$bestcod,"\n")
}
if(substr(object$BC,1,6)=="Boxcox"){
cat("Method:","",substr(object$BC,1,6),"\n")
cat("Value of the BoxCox parameter:","",object$bestcod,"\n")
}
if(object$BC=="Original"){
cat("The best association is found with the original continuous variable","\n")
}
cat("\n")
cat("Corresponding adjusted pvalue: \n")
cat("\n")
if(is.numeric(e.pvalue)==T){
R1<-data.frame(rbind(n.pvalue,e.pvalue,b.pvalue,pa.pvalue,pe.pvalue))
R1[R1<0.0001]<-paste("<0.",paste(rep("0",3),collapse=""),"1",sep="")
R1[R1>1]<- 1
colnames(R1)<-rep("Adjusted pvalue",length(b.pvalue))
rownames(R1)<-c("naive","exact","bonferroni","bootstrap","permutation")
print(R1,digits=3)
}else{
R1<-data.frame(rbind(n.pvalue,b.pvalue,pa.pvalue,pe.pvalue))
R1[R1==0]<-paste("<0.",paste(rep("0",3),collapse=""),"1",sep="")
R1[R1>1]<- 1
colnames(R1)<-rep("Adjusted pvalue",length(b.pvalue))
rownames(R1)<-c("naive","bonferroni","bootstrap","permutation")
print(R1,digits=3)
cat("exact:","",e.pvalue)
cat("\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/summary.CPMCGLM.R
|
test.liquet<-function(formula,data,codage,Z,Y,family,link1,family1)
{
if (length(dim(codage))==3){
Xcod <- matrix(NA,nrow=nrow(data),ncol=dim(codage)[3])
for (i in 1:dim(codage)[3]){
Xcod[,i] <- codage[,1,i]
}
codage <- Xcod
}
model.h0 <- glm(formula,family=family1(link=link1),data)
tol <- 10E-40
#Pour la famille exponentielle
phi<-switch(family,
"binomial"=1,
"gaussian"=var(Y),
"poisson"=1,
)
pi3 <- model.h0$fitted
V<-switch(family,
"binomial"=diag(pi3*(1-pi3),length(Y)),
"gaussian"=diag(var(Y),length(Y)),
"poisson"=diag(model.h0$weight,length(Y)),
)
I<-diag(nrow(codage))
ZZ<-cbind(rep(1,nrow(data)),Z)
H<-V%*%ZZ%*%(solve(t(ZZ)%*%V%*%ZZ))%*%t(ZZ)
#initialisation des param???tres
num<-NULL
num1<-NULL
J<-NULL
t<-NULL
# t1<-NULL
Var<-NULL
ZZ1<-NULL
for(i in 1:ncol(codage))
{
num[i]<- (1/phi)*t(codage[,i])%*%(Y-pi3)
Var[i]<- (1/phi)^2*t(codage[,i])%*%(I-H)%*%V%*%codage[,i]
# t[i]<-num[i]/(sqrt(Var[i]))
ZZ1[[i]] <- cbind(rep(1,nrow(data)),Z,codage[,i])
num1[[i]] <- t(ZZ1[[i]])%*%(Y-pi3)
temp <- t(ZZ1[[i]])%*%V%*%ZZ1[[i]]
if((det(temp)==0)||(1/det(temp)<tol)){
t[i] <- NA
}else{
J[[i]] <- solve(temp,tol=tol)
t[i] <- sqrt(t(num1[[i]])%*%J[[i]]%*%num1[[i]])
}
}
Corr<-matrix(nrow=ncol(codage),ncol=ncol(codage))
for (i in 1:ncol(codage))
{
for (j in 1:ncol(codage))
{
Corr[i,j]<-((1/phi)^2*codage[,i]%*%(I-H)%*%V%*%codage[,j])/(sqrt(Var[i])%*%sqrt(Var[j]))
}
}
pval.exact<-NULL
for(i in 1:ncol(codage))
{
pval.exact<-c(pval.exact,1-pmvnorm(lower=-rep(max(abs(t[1:i])),i),upper=rep(max(abs(t[1:i])),i),mean=rep(0,i),sigma=Corr[1:i,1:i]))
}
res <- pval.exact
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/test.liquet.R
|
test.score <- function(formula,data,codage,Z,Y,family,family1,link1,ind.ajust){
tol=10E-40
model.h0 <- glm(formula,family=family1(link=link1),data=data)
pi2 <- model.h0$fitted
V<-switch(family,
"binomial"=diag(pi2*(1-pi2),length(Y)),
"gaussian"=diag(var(Y),length(Y)),
"poisson"=diag(model.h0$weight,length(Y)),
)
ZZ<-J<-t<-NULL
num<-NULL
if (length(dim(codage))==2){
for (i in 1:ncol(codage)){
ZZ[[i]] <- cbind(rep(1,nrow(data)),Z,matrix(codage[,i],nrow=nrow(data)))
ZZ[[i]] <- ZZ[[i]][,colSums(is.na(ZZ[[i]])) != nrow(ZZ[[i]])]
num[[i]] <- t(ZZ[[i]])%*%(Y-pi2)
temp <- t(ZZ[[i]])%*%V%*%ZZ[[i]]
if((det(temp)==0)||(1/det(temp)<tol)){
t[i] <- NA
}else{
J[[i]] <- solve(temp,tol=tol)
t[i] <- t(num[[i]])%*%J[[i]]%*%num[[i]]
}
}
} else {
for (i in 1:dim(codage)[3]){
ZZ[[i]] <- cbind(rep(1,nrow(data)),Z,matrix(codage[,,i],nrow=nrow(data)))
ZZ[[i]] <- ZZ[[i]][,colSums(is.na(ZZ[[i]])) != nrow(ZZ[[i]])]
num[[i]] <- t(ZZ[[i]])%*%(Y-pi2)
temp <- t(ZZ[[i]])%*%V%*%ZZ[[i]]
if((det(temp)==0)||(1/det(temp)<tol)){
t[i] <- NA
}else{
J[[i]] <- solve(temp,tol=tol)
t[i] <- t(num[[i]])%*%J[[i]]%*%num[[i]]
}
}
}
res<-t
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/test.score.R
|
####test du score
tmultboot <- function(formula,data,Y1,Z,codage,family1,family=family,link1){
#Definition du seuil de tol?rance
tol <- 10E-40
#Definition de la formule sous H0
form<-formula(paste("Y1",formula))
#Calcul des param?tres du mod?le sous H0
model.h0 <- glm(formula=form,data=data,family=family1(link=link1))
pi1 <- model.h0$fitted
V<-switch(family,
"binomial"=diag(pi1*(1-pi1),length(Y1)),
"gaussian"=diag(var(Y1),length(Y1)),
"poisson"=diag(model.h0$weight,length(Y1)),
)
ZZ<-J<-t<-NULL
num<-NULL
#Calcul de la statistique de test pour chaque ?chantillon
if (length(dim(codage))==2){
for (j in 1:ncol(codage))
{
ZZ[[j]] <- cbind(rep(1,nrow(data)),Z,
matrix(codage[,j],nrow=nrow(data)))
ZZ[[j]] <- ZZ[[j]][,colSums(is.na(ZZ[[j]])) != nrow(ZZ[[j]])]
num[[j]] <- t(ZZ[[j]])%*%(Y1-pi1)
temp <- t(ZZ[[j]])%*%V%*%ZZ[[j]]
if((det(temp)==0)||(1/det(temp)<tol)){
t[j] <- NA
}else{
J[[j]] <- solve(temp,tol=tol)
t[j] <- t(num[[j]])%*%J[[j]]%*%num[[j]]
}
}
}
else
{
for (j in 1:dim(codage)[3])
{
ZZ[[j]] <- cbind(rep(1,nrow(data)),Z,
matrix(codage[,,j],nrow=nrow(data)))
ZZ[[j]] <- ZZ[[j]][,colSums(is.na(ZZ[[j]])) != nrow(ZZ[[j]])]
num[[j]] <- t(ZZ[[j]])%*%(Y1-pi1)
temp <- t(ZZ[[j]])%*%V%*%ZZ[[j]]
if((det(temp)==0)||(1/det(temp)<tol)){
t[j] <- NA
}else{
J[[j]] <- solve(temp,tol=tol)
t[j] <- t(num[[j]])%*%J[[j]]%*%num[[j]]
}
}
}
res<-t
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/tmultboot.R
|
transf<-function(quantile,continuous,boxcox,cutpoint,FP)
{
#Vecteur des transformations
if(missing(FP)){
if (missing(cutpoint))
{
if (class(quantile)=="NULL")
{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c("Original")
}
else{
trans<-"Coding definition is missing"
}
}
else{
if (continuous=="TRUE")
{
trans<-c("Original",paste("Boxcox",1:length(boxcox),sep=""))
}
else{
trans<-c(paste("Boxcox",1:length(boxcox),sep=""))
}
}
}
else{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),"Original")
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Boxcox",1:length(boxcox),sep=""))
}
}
}
}
else{
if (class(quantile)=="NULL")
{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original")
}
else{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""))
}
else{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Boxcox",1:length(boxcox),sep=""))
}
}
}
else{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original")
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Boxcox",1:length(boxcox),sep=""))
}
}
}
}
}else{
if (missing(cutpoint))
{
if (class(quantile)=="NULL")
{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c("Original",paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c("Original",paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
}
else{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),"Original",paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
}
}
else{
if (class(quantile)=="NULL")
{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
}
else{
if (missing(boxcox))
{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
else{
if (continuous=="TRUE")
{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),"Original",paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
else{
trans<-c(paste("Quantile",1:nrow(quantile),sep=""),paste("Cutpoint",1:nrow(cutpoint),sep=""),paste("Boxcox",1:length(boxcox),sep=""),paste("Fractionnal Polynomial",1:nrow(FP),sep=""))
}
}
}
}
}
return(trans)
}
|
/scratch/gouwar.j/cran-all/cranData/CPMCGLM/R/transf.R
|
#' Example of the dataset used in the paper.
#'
#' A real dataset from spanish speakers, translated using automatic algorithms
#'
#' @format A data frame with 4364 rows and 3 variables:
#' \describe{
#' \item{ID}{Id of the people}
#' \item{Concept}{Concept being described by the person ID}
#' \item{Property}{A property mentioned for the corresponding concept for the person ID}
#' }
"data_paper"
|
/scratch/gouwar.j/cran-all/cranData/CPNCoverageAnalysis/R/data_paper.R
|
#' Test example dataset.
#'
#' A toy dataset containing the description of ten people, over 3 concept, with multiple properties.
#'
#' @format A data frame with 65 rows and 3 variables:
#' \describe{
#' \item{ID}{Id of the people}
#' \item{Concept}{Concept being described by the person ID}
#' \item{Property}{A property mentioned for the corresponding concept for the person ID}
#' }
"data_test"
|
/scratch/gouwar.j/cran-all/cranData/CPNCoverageAnalysis/R/data_test.R
|
#' Estimate the number of people needed and expected number of unique properties for a determined coverage based on the estimated norms
#'
#' @param est_norms a data frame with the estimated norms (generated by generateNorms)
#' @param target_cover float between 0 and 1, corresponding to coverage (the fraction of the total incidence probabilities of the reported properties that are in the reference sample)
#' @return a vector with the extra number of participant to achieve the especific coverage, and the estimate of the number of unique properties listed by the new amount of suggested people
#' @export
#' @examples
#' estimated_norms=generate_norms(data_test)
#' estimate_participant(estimated_norms,0.8)
estimate_participant <- function(est_norms, target_cover) {
# Creating the initial output est_t_S <- matrix(0, dim = c(dim(est_norms)[1], 4))
est_t_S <- data.frame(matrix(0, dim(est_norms)[1], 4))
est_t_S[, 1] <- est_norms$Concept
# Estimating the new number of participant to achieve the coverage
est_t_S[, 2] <- (log((est_norms$U/est_norms$Q1) * (1 - target_cover))/log((((est_norms$T - 1) * est_norms$Q1)/(((est_norms$T -
1) * est_norms$Q1) + 2 * est_norms$Q2)))) - 1
# Approximating to the highest integer
est_t_S[, 2] <- ceiling(est_t_S[, 2])
# Obtaining the indexes for the special cases
indexT2T <- est_t_S[, 2] > 2*est_norms$T
est_t_S[, 2][indexT2T] = 2*est_norms$T[indexT2T]
indexTL0 <- (est_t_S[, 2] <= 0) & (est_t_S[, 2] > -Inf)
indexTinf <- est_norms$Q2 == 0
# Estimating Q0 (the properties that has not been listed)
estQ0 <- est_norms$S_hat - est_norms$S_obs
# Estimating the expected number of unique listed properties
est_t_S[, 3] <- est_norms$S_obs + estQ0 * (1 - (1 - (est_norms$Q1/(est_norms$T * estQ0 + est_norms$Q1)))^est_t_S[,
2])
# Eliminating negative participant (the objective is already fulfilled)
est_t_S[, 2][indexTL0] <- 0
est_t_S[, 3][indexTL0] <- est_norms$S_hat[indexTL0]
# If the expected number of new participant is more than twice the current number of participant a
# warning is added
est_t_S[, 4] <- ""
est_t_S[, 4][indexT2T] <- "t_star > 2T, t_star = 2T"
est_t_S[, 4][indexTinf] <- "Q2 = 0, cannot calculate t_star"
# Labeling the dataframe
colnames(est_t_S) <- c("Concept", "T_star", "S_hat_star", "Warning")
return(est_t_S)
}
|
/scratch/gouwar.j/cran-all/cranData/CPNCoverageAnalysis/R/estimate_participant.R
|
#' Calculate all the norms from a Conceptual properties
#'
#' @param orig_data a data frame of size nx3 (id, concept, property)
#' @return a data frame with all the estimations
#' @export
#' @examples
#' generate_norms(data_test)
generate_norms <- function(orig_data) {
# preprocessing the data eliminating repeated rows ------------------------
orig_data <- unique(orig_data)
# changing all the word to lowercase and getting rid of spaces
orig_data[, 2] <- toupper(trimws(orig_data[, 2]))
orig_data[, 3] <- toupper(trimws(orig_data[, 3]))
# Obtaining the unique concepts -------------------------------------------
vConcept <- as.vector(unique(orig_data[, 2]))
vConcept <- sort(vConcept)
numConcepts <- length(vConcept)
# Creating empty data frame with the elements to create and adding the concepts
results <- data.frame(matrix(0, numConcepts, 11))
names(results) <- c("Concepts", "Q1", "Q2", "T", "S_obs", "U", "S_hat", "sd_S_hat", "CI_l", "CI_U",
"C_T")
results[, 1] <- vConcept
# Estimating the characteristics for each concept ------------------------
for (i in c(1:numConcepts)) {
# Obtaining the set of data corresponding to each unique concept
tempData = orig_data[orig_data[, 2] == vConcept[i], ]
# list with unique and number of users for the concept
users <- as.vector(unique(tempData[, 1]))
numUsers <- length(users)
results[i, 4] <- numUsers
# List with unique features and number of features
features <- as.vector(unique(tempData[, 3]))
numFeatures <- length(features) #S0
results[i, 5] <- numFeatures
# Transforming the data to a binary matrix of featuresXusers mtI[i,j]=1 implies feature i was
# mentioned by user j
lstI <- tapply(X = tempData[, 3], INDEX = tempData[, 1], FUN = function(x) {
features %in% x
})
mtI <- matrix(unlist(lstI), ncol = numUsers, byrow = F)
mtI <- mtI * 1
# obtaining the value of U
U <- sum(sum(mtI))
results[i, 6] <- U
# frequency of each feature
frequencyFeature <- rowSums(mtI)
# vector of Q vectorQ[2]=5 implies that 5 features were named only two times
vectorQ <- numeric(numUsers)
vectorQ[as.numeric(names(table(frequencyFeature)))] <- table(frequencyFeature)
# Obtaining Q1 and Q2
if (length(vectorQ)==1){
vectorQ[2]=0 #in this case Q2 does not exist
}
results[i, 2:3] = vectorQ[1:2]
# Calculating the varianza for the estimation of S (Shat)
A <- (numUsers - 1)/numUsers
# Analyzing the possible cases for Q
if (vectorQ[2] > 0) {
Q0hat <- A * (vectorQ[1]^2)/(2 * vectorQ[2])
# estimating S and its variance
Shat <- results[i, 5] + Q0hat
Q1_2 <- vectorQ[1]/vectorQ[2] #ratio entre Q1 y Q2
varShat <- vectorQ[2] * ((A/2) * Q1_2^2 + A^2 * Q1_2^3 + (A^2)/4 * Q1_2^4)
} else {
Q0hat <- A * (vectorQ[1] * (vectorQ[1] - 1))/2
# estimating S and its variance
Shat <- results[i, 5] + Q0hat
varShat <- A * vectorQ[1] * (vectorQ[1] - 1)/2 + A^2 * vectorQ[1] * (2 * vectorQ[1] - 1)^2/4 -
A^2 * vectorQ[1]^4/(4 * Shat)
}
# Recording the obtained results
results[i, 7] <- Shat
results[i, 8] <- sqrt(varShat)
# Estimating the confidence intervals Note, a log-normal distributuion is assumed
D <- exp(1.96 * sqrt(log(1 + (varShat/(Shat - numFeatures)^2))))
results[i, 9] <- numFeatures + ((Shat - numFeatures)/D)
results[i, 10] <- numFeatures + ((Shat - numFeatures) * D)
# Estimating the coverage
results[i, 11] <- 1 - vectorQ[1]/U * ((vectorQ[1] * (numUsers - 1))/(vectorQ[1] * (numUsers -
1) + 2 * vectorQ[2]))
}
row.names(results) <- NULL
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CPNCoverageAnalysis/R/generate_norms.R
|
#' Simulate properties based on the empricial distribution of the original data and new words with frequency one
#'
#' @param orig_data a data frame of size nx3 (id, concept, property). The empriical distribution is generated from this data
#' @param new_words integer greater than 0, corresponding to the number of words with frequency one that should be added to the empirical distribution
#' @param number_subjects number of subjects to be sampled. Each subject with generates new properties
#' @return a vector with the extra number of participant to achieve the especific coverage, and the estimate of the number of unique properties listed by the new amount of suggested people
#' @importFrom graphics hist
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @export
#' @examples
#' orig_data=data_paper[data_paper[,2]=="Decision",]
#' property_simulator(orig_data, 84, 15)
property_simulator <- function(orig_data, new_words, number_subjects) {
#Obtaining the number of words per user
concepts_per_subject=hist(orig_data[,1],breaks=unique(orig_data[,1]),plot=F)
mean_cps=mean(concepts_per_subject$counts)
std_cps=sd(concepts_per_subject$counts)
#Adding new words for the tail of the distribution
temp_data=orig_data[,3]
levels(temp_data)=c(levels(temp_data),as.character(c(1:new_words)))
temp_data[(length(temp_data)+1):(length(temp_data)+new_words)]=as.character(c(1:new_words))
#Estimating the empirical distribution using the neworiginal data and added new words
emp_dist=table(temp_data)/length(temp_data)
#Final sampling process
#Generating an empty data frame
new_properties=data.frame(ID=numeric(0),Concept=character(0),Property=character(0))
for (i in c(1:number_subjects)){
#Sampling the number of properties to be sampled
num_properties=round(rnorm(1,mean=mean_cps,sd=std_cps))
#If the number of properties is less than 1, one property is sampled
if (num_properties<1){
num_properties=1
}
#If the number of properties is more than the possible number of properties, all properties are sampled
if (num_properties>nlevels(temp_data)){
num_properties=nlevels(temp_data)
}
#Sampling the new properties
sampled_properties=sample(x=row.names(emp_dist),prob = emp_dist,replace=F,size=num_properties)
#Creating the data frame qith the sampled properties
temp_properties=data.frame(Subject=i,Concept=orig_data[1,2],Code=sampled_properties)
#Adding the sampled properties to the data frame
new_properties=rbind(new_properties, temp_properties)
}
return(new_properties)
}
|
/scratch/gouwar.j/cran-all/cranData/CPNCoverageAnalysis/R/property_simulator.R
|
#' Aggregation of expert's estimatives by similarity of values
#' @description This function computes the aggregated value of different expert's estimatives, using Beta PERT distributions to randomize the decision matrix.
#' @param x Decision matrix of expert estimatives (rows) and criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param min Vector of minimum values in each criterion scale. For common scales to all criteria, the vector must repeat the minimum value as many times as the number of criteria.
#' @param max Vector of maximum values in each criterion scale. For common scales to all criteria, the vector must repeat the maximum value as many times as the number of criteria.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis of the random variable.
#' @param w Weights describing the expert experience in the subject matter.
#' @param b Beta describes the balance between the expert weights and their opinions. Beta varies in the interval [0,1]. The higher the index, the higher the importance of weights.
#' @return SM are the Similarity Matrices per criterion. CDC describes the Consensus Coefficient matrix. Agg.value gives the aggregated value of expert opinions per criterion.
#' @examples
#' ## Expert's estimatives on four criteria
#' Exp.1 = c(4,7,6,8)
#' Exp.2 = c(4,3,6,5)
#' Exp.3 = c(3,8,2,9)
#' Exp.4 = c(6,8,9,7)
#' Exp.5 = c(5,9,2,4)
#' Exp.6 = c(7,6,5,5)
#' x = rbind(Exp.1,Exp.2,Exp.3,Exp.4,Exp.5) # Decision matrix
#' min = c(0,0,0,0) # Minimum scale values.
#' max = c(10,10,10,10) # Maximum scale values.
#' s = 4 # Shape
#' w = c(0.4,0.3,0.2,0.06,0.04) # Expert relevance.
#' b = 0.4
#' Agg.Sim(x,min,max,s,w,b)
#' @importFrom mc2d dpert ppert
#' @importFrom stats integrate
#' @export
Agg.Sim = function(x,min,max,s,w,b){
PMax = x
A = x
B = x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],A[,j][-i],max[j],s))*dpert(x,min[j],B[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
PMax = PMax[,]
####
PMin = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMin[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],A[,j][-i],max[j],s))*dpert(x,min[j],B[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
PMin = PMin[,]
# Number of Experts for looping
NrEsp = nrow(x)
A = x
B = x
# Cross matrices PMax e Pmin
PMax = vector("list", ncol(x))
PMin = vector("list", ncol(x))
### P.MAX e P.min
for (k in 1:(ncol(x)))
{
PMax[[k]] = matrix(0,nrow=NrEsp,ncol=NrEsp)
PMin[[k]] = matrix(0,nrow=NrEsp,ncol=NrEsp)
for (j in 1:(NrEsp))
{
for (i in 1:(NrEsp))
{
PMax[[k]][i,j] = (integrate(Vectorize(function(x) {(ppert(x,min[k],A[i,k],max[k],s))*dpert(x,min[k],B[j,k],max[k],s)}),min[k],max[k]))$value
PMin[[k]][i,j] = (integrate(Vectorize(function(x) {(1-ppert(x,min[k],A[i,k],max[k],s))*dpert(x,min[k],B[j,k],max[k],s)}),min[k],max[k]))$value
}}}
# Similarity matrices per criterion
S = vector("list", ncol(x))
for (k in 1:(ncol(x)))
{
S[[k]] = PMax[[k]]/PMin[[k]]
S[[k]] = ifelse(S[[k]]>1,1/S[[k]],S[[k]])
S[[k]] = ifelse(S[[k]]>0.999,1,S[[k]])
rownames(S[[k]]) = paste0("Exp",1:NrEsp)
colnames(S[[k]]) = paste0("Exp",1:NrEsp)
}
names(S) = paste0("Crit",1:ncol(x))
### Aggregation of estimatives
AE = vector("list", ncol(x))
for (k in 1:(ncol(x)))
{
AE[[k]] = apply(S[[k]],1,sum)
AE[[k]] = (AE[[k]]-1)/(NrEsp-1)
}
names(AE) = paste0("Crit",1:ncol(x))
### Relative Aggregation (RAD) and Coeficient of Consensus (CDC)
Soma = vector("list", ncol(x))
RAD = vector("list", ncol(x))
CDC = vector("list", ncol(x))
for (k in 1:(ncol(x)))
{
Soma[[k]] = sum(AE[[k]])
RAD[[k]] = AE[[k]]/Soma[[k]]
CDC[[k]] = b*w+(1-b)*RAD[[k]]
}
names(CDC) = paste0("Crit",1:ncol(x))
CDC = t(matrix(unlist(CDC),ncol(x),NrEsp))
rownames(CDC) = paste0("Exp",1:NrEsp)
colnames(CDC) = paste0("Crit",1:ncol(x))
### Final aggregation (New alternative parameters)
A.mode = vector("list", ncol(x))
for (k in 1:(ncol(x)))
{
A.mode[[k]] = sum(CDC[[k]]*A[,k])
}
names(A.mode) = paste0("Crit",1:ncol(x))
A.mode = unlist(A.mode)
Result = list(SM = S, CDC = CDC, Agg.value = A.mode)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/agg_sim.R
|
#' Probabilistic AHP using Beta PERT distributions
#' @description This function computes criteria weights, using AHP and randomic pair-wise evaluations by Beta PERT distributions.
#' @param n Random numbers created from Beta PERT distributions, using the parameters 'min', 'mean' and 'max' of each pair-wise criteria comparison elicited from the experts.
#' @param s Shape of a Beta PERT distribution, as described in package "mc2d". There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data elicited from experts.
#' @param list List of pair-wise comparison matrices of expert opinions. The function 'list' is embedded in R.
#' @return Weights returned from a simulation of AHP with Beta PERT distributions. The weights are driven from the simulated matrix that gives the minimum AHP Consistent Ratio.
#' @references Saaty, Thomas L. (1980). The analytic hierarchy process: planning, priority setting, resource allocation, McGraw-Hill.
#' @examples
#'n=5000 # simulation
#'s=6 # shape of Beta PERT distribution
#'# Expert pair-wise evaluations
#'Exp.1 = matrix(c(1,0.2,0.3,5,1,0.2,3,5,1),3,3)
#'Exp.2 = matrix(c(1,2,8,0.5,1,6,0.12,0.16,1),3,3)
#'Exp.3 = matrix(c(1,0.5,0.5,2,1,6,2,0.16,1),3,3)
#'Exp.4 = matrix(c(1,3,4,0.3,1,0.5,0.25,0.3,1),3,3)
#'Exp.5 = matrix(c(1,4,5,0.25,1,1,0.2,1,1),3,3)
#'list = list(Exp.1,Exp.2,Exp.3,Exp.4,Exp.5)
#' AHP.Beta(n,s,list)
#' @importFrom mc2d dpert ppert rpert
#' @export
AHP.Beta = function (n,s,list) {
min = apply(simplify2array(list), 1:2, min)
mean = apply(simplify2array(list), 1:2, mean)
max = apply(simplify2array(list), 1:2, max)
c=nrow(min)
d = c^2
simu = vector("list", d)
k=1
for (i in 1:c)
{
for (j in 1:c)
{
simu[[k]] <- rpert(n,min[i,j],mean[i,j],max[i,j],s)
k = k+1
}}
un = unlist(simu)
abc = matrix(un,c^2,n,byrow=TRUE)
### AHP
m = vector("list", n)
weight = vector("list", n)
CI = vector("list", n)
for (a in 1:n)
{
m[[a]] = as.vector(abc[,a])
matrix = matrix(m[[a]],nrow = c,ncol = c, byrow = TRUE)
for (i in 1:c)
{
weight[[a]][i] <- prod(matrix[i,])^(1/c)
}
temp_sum <- sum(weight[[a]])
weight[[a]] <- weight[[a]]/temp_sum
lambda_max <- Re(eigen(matrix)$values[1])
CI[[a]] <- (lambda_max-c)/(c-1)
}
### Saaty's Random Index
RI = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45,1.49)
min = which.min(CI)
index = CI[[min]]/RI[c]
w.min = weight[[min]]
Result = c(Weight=w.min)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/ahp_beta.R
|
#' Probabilistic AHP using Uniform distributions
#' @description This function computes criteria weights, using AHP and randomic pair-wise evaluations by Uniform distributions.
#' @param n Random numbers created from Uniform distributions, using the parameters 'min' and 'max' of each pair-wise criteria comparison elicited from the experts.
#' @param list Pair-wise comparison matrices of expert opinions. The function 'list' is embedded in R.
#' @return Weights returned from a simulation of AHP with Uniform distributions. The weights are driven from the simulated matrix that gives the minimum AHP Consistent Index.
#' @references Saaty, Thomas L. (1980). The analytic hierarchy process: planning, priority setting, resource allocation, McGraw-Hill.
#' @examples
#' n=5000 # Simulation
#'# Expert pair-wise evaluations
#' Exp.1 = matrix(c(1,0.2,0.3,5,1,0.2,3,5,1),3,3)
#' Exp.2 = matrix(c(1,2,8,0.5,1,6,0.12,0.16,1),3,3)
#' Exp.3 = matrix(c(1,0.5,0.5,2,1,6,2,0.16,1),3,3)
#' Exp.4 = matrix(c(1,3,4,0.3,1,0.5,0.25,0.3,1),3,3)
#' Exp.5 = matrix(c(1,4,5,0.25,1,1,0.2,1,1),3,3)
#' list = list(Exp.1,Exp.2,Exp.3,Exp.4,Exp.5)
#' AHP.Unif(n,list)
#' @importFrom stats runif
#' @export
AHP.Unif = function (n,list) {
min = apply(simplify2array(list), 1:2, min)
max = apply(simplify2array(list), 1:2, max)
c=nrow(min)
d = c^2
simu = vector("list", d)
k=1
for (i in 1:c)
{
for (j in 1:c)
{
simu[[k]] <- runif(n,min[i,j],max[i,j])
k = k+1
}}
un = unlist(simu)
abc = matrix(un,c^2,n,byrow=TRUE)
### AHP
m = vector("list", n)
weight = vector("list", n)
CI = vector("list", n)
for (a in 1:n)
{
m[[a]] = as.vector(abc[,a])
matrix = matrix(m[[a]],nrow = c,ncol = c, byrow = TRUE)
for (i in 1:c)
{
weight[[a]][i] <- prod(matrix[i,])^(1/c)
}
temp_sum <- sum(weight[[a]])
weight[[a]] <- weight[[a]]/temp_sum
lambda_max <- Re(eigen(matrix)$values[1])
CI[[a]] <- (lambda_max-c)/(c-1)
}
### Saaty's Random Index
RI = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45,1.49)
min = which.min(CI)
index = CI[[min]]/RI[c]
w.min = weight[[min]]
Result = c(Weight=w.min)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/ahp_unif.R
|
#' CPP Additive Weighting with Probabilistic AHP using Beta PERT distributions
#' @description This function computes CPP by additive weighting. Experts' estimatives are based on pair-wise comparisons of criteria and are joined in a list of matrices. The estimatives are used as parameters to probabilistic distributions. The minimum, mean, and maximum values of each pair of criteria are used to model Beta PERT distributions. Randomic values are generated and applied to the AHP method. The matrix that comprises de minimum AHP Consistent Index is used to return the criteria weights.
#' @param n Random numbers created from Beta PERT distributions, using the parameters 'min', 'mean' and 'max' of each pair-wise criteria comparison elicited from the experts.
#' @param s Shape of a Beta PERT distribution, as described in Package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data elicited from experts.
#' @param list Pair-wise comparison matrices of expert opinions. The function 'list' is embedded in R.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @return Weights returned from the AHP method. PMax are the joint probabilities of each alternative being higher than the others, per criterion. CPP gives the final scores and ranks of alternatives by weighted sum.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Saaty, Thomas L. (1980). The analytic hierarchy process: planning, priority setting, resource allocation, McGraw-Hill.
#' @examples
#' n=5000 # simulation
#' s=6 # shape of Beta PERT distribution
#' # Expert pair-wise evaluations
#' Exp.1 = matrix(c(1,0.2,0.3,5,1,0.2,3,5,1),3,3)
#' Exp.2 = matrix(c(1,2,8,0.5,1,6,0.12,0.16,1),3,3)
#' Exp.3 = matrix(c(1,0.5,0.5,2,1,6,2,0.16,1),3,3)
#' Exp.4 = matrix(c(1,3,4,0.3,1,0.5,0.25,0.3,1),3,3)
#' Exp.5 = matrix(c(1,4,5,0.25,1,1,0.2,1,1),3,3)
#' list = list(Exp.1,Exp.2,Exp.3,Exp.4,Exp.5)
#' # Alternatives' original scores
#' Alt.1 = c(30,86,-5)
#' Alt.2 = c(26,77,-12)
#' Alt.3 = c(22,93,-4)
#' Alt.4 = c(34,65,-10)
#' Alt.5 = c(31,80,-8)
#' Alt.6 = c(29,79,-9)
#' Alt.7 = c(37,55,-15)
#' Alt.8 = c(21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8) # Decision matrix
#' CPP.AHP.Beta(n,s,list,x)
#' @importFrom mc2d dpert ppert
#' @importFrom stats integrate dnorm pnorm
#'@export
CPP.AHP.Beta = function (n,s,list,x){
min = apply(simplify2array(list), 1:2, min)
mean = apply(simplify2array(list), 1:2, mean)
max = apply(simplify2array(list), 1:2, max)
c=nrow(min)
d = c^2
simu = vector("list", d)
k=1
for (i in 1:c)
{
for (j in 1:c)
{
simu[[k]] <- rpert(n,min[i,j],mean[i,j],max[i,j],s)
k = k+1
}}
un = unlist(simu)
abc = matrix(un,c^2,n,byrow=TRUE)
### AHP
m = vector("list", n)
weight = vector("list", n)
CI = vector("list", n)
for (a in 1:n)
{
m[[a]] = as.vector(abc[,a])
matrix = matrix(m[[a]],nrow = c,ncol = c, byrow = TRUE)
for (i in 1:c)
{
weight[[a]][i] <- prod(matrix[i,])^(1/c)
}
temp_sum <- sum(weight[[a]])
weight[[a]] <- weight[[a]]/temp_sum
lambda_max <- Re(eigen(matrix)$values[1])
CI[[a]] <- (lambda_max-c)/(c-1)
}
### Saaty's Random Indices (RI)
RI = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45,1.49)
min = which.min(CI)
index = CI[[min]]/RI[c]
w.min = weight[[min]]
w = w.min
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
### PMax by Normal distributions
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax = PMax[,]
### SAW
saw = PMax%*%w
rank = rank(-saw)
SAW = cbind(saw, rank)
colnames(SAW) = c("SAW","Rank")
Result = list(Weights.AHP=w, PMax=PMax, CPP=SAW)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_ahp_beta.R
|
#' CPP Additive Weighting with Probabilistic AHP using Uniform distributions
#' @description This function computes CPP by additive weighting. Experts' estimatives are based on pair-wise comparisons of criteria and are joined in a list of matrices. The estimatives are used as parameters to probabilistic distributions. The minimum and maximum values of each pair of criteria are used to model Uniform distributions. Randomic values are generated and applied to the AHP method. The matrix that comprises de minimum AHP Consistent Index is used to return the criteria weights.
#' @param n Random numbers based on Uniform distributions, using the parameters 'min' and 'max' of each pair-wise criteria comparison elicited from the experts.
#' @param list Pair-wise comparison matrices of expert opinions. The function 'list' is embedded in R.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @return Weights returned from the AHP method. PMax are the joint probabilities of each alternative being higher than the others, per criterion. CPP gives the final scores and ranks of alternatives by weighted sum.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer
#' @references Saaty, Thomas L. (1980). The analytic hierarchy process: planning, priority setting, resource allocation, McGraw-Hill.
#' @examples
#' # Computing weights by the AHP method, with 'n' simulated matrices.
#' n=5000 # simulation
#' # Expert pair-wise evaluations
#' Exp.1 = matrix(c(1,0.2,0.3,5,1,0.2,3,5,1),3,3)
#' Exp.2 = matrix(c(1,2,8,0.5,1,6,0.12,0.16,1),3,3)
#' Exp.3 = matrix(c(1,0.5,0.5,2,1,6,2,0.16,1),3,3)
#' Exp.4 = matrix(c(1,3,4,0.3,1,0.5,0.25,0.3,1),3,3)
#' Exp.5 = matrix(c(1,4,5,0.25,1,1,0.2,1,1),3,3)
#' list = list(Exp.1,Exp.2,Exp.3,Exp.4,Exp.5)
#' # Alternatives' original scores
#' Alt.1 = c(30,86,-5)
#' Alt.2 = c(26,77,-12)
#' Alt.3 = c(22,93,-4)
#' Alt.4 = c(34,65,-10)
#' Alt.5 = c(31,80,-8)
#' Alt.6 = c(29,79,-9)
#' Alt.7 = c(37,55,-15)
#' Alt.8 = c(21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8) # Decision matrix
#' CPP.AHP.Unif(n,list,x)
#' @importFrom stats integrate dnorm pnorm
#'@export
CPP.AHP.Unif = function (n,list,x){
### Computing weights by AHP with Uniform distributions
min = apply(simplify2array(list), 1:2, min)
max = apply(simplify2array(list), 1:2, max)
c=nrow(min)
d = c^2
simu = vector("list", d)
k=1
for (i in 1:c)
{
for (j in 1:c)
{
simu[[k]] <- runif(n,min[i,j],max[i,j])
k = k+1
}}
un = unlist(simu)
abc = matrix(un,c^2,n,byrow=TRUE)
### AHP
m = vector("list", n)
weight = vector("list", n)
CI = vector("list", n)
for (a in 1:n)
{
m[[a]] = as.vector(abc[,a])
matrix = matrix(m[[a]],nrow = c,ncol = c, byrow = TRUE)
for (i in 1:c)
{
weight[[a]][i] <- prod(matrix[i,])^(1/c)
}
temp_sum <- sum(weight[[a]])
weight[[a]] <- weight[[a]]/temp_sum
lambda_max <- Re(eigen(matrix)$values[1])
CI[[a]] <- (lambda_max-c)/(c-1)
}
### Saaty's Random Index
RI = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45,1.49)
min = which.min(CI)
index = CI[[min]]/RI[c]
w.min = weight[[min]]
w = w.min
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
### PMax by Normal distributions
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax = PMax[,]
### SAW
saw = PMax%*%w
rank = rank(-saw)
SAW = cbind(saw, rank)
colnames(SAW) = c("SAW","Rank")
Result = list(Weights.AHP=w, PMax=PMax, CPP=SAW)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_ahp_unif.R
|
#' CPP by axes using Beta PERT distributions
#' @description This function computes the CPP by axes, using Beta PERT distributions to randomize the decision matrix. The CPP by axes is used to rank alternatives in multicriteria decision problems. The "Progressive-Conservative" and the "Optimist-Pessimist" axes emulate four decision maker's points of view.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion. PMin are the joint probabilities of each alternative being lower than the others, also by criterion. Axes returns the alternatives' scores by axis and ranking for decisionmaking.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Garcia, Pauli A. A. & Sant'Anna, Annibal P. (2015). Vendor and logistics provider selection in the construction sector: A probabilistic preferences composition approach. Pesquisa Operacional 35.2: 363-375.
#' @examples
#' # Alternatives' original scores
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix
#' s = 4 # Shape
#' CPP.Axes.Beta(x,s)
#' @importFrom mc2d dpert ppert
#' @export
CPP.Axes.Beta = function (x,s) {
PMax = x
m = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],m[,j][-i],max[j],shape=s))*dpert(x,min[j],m[,j][[i]],max[j],shape=s)}),min[j],max[j])) $value
}}
PMax = PMax[,]
####
PMin = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMin[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],m[,j][-i],max[j],shape=s))*dpert(x,min[j],m[,j][[i]],max[j],shape=s)}),min[j],max[j])) $value
}}
PMin = PMin[,]
### Composition by axes
# PP point of view
PP = apply(PMax,1,prod)
PP.rank = rank(-PP)
# PO point of view
Probs.m = 1-PMax
PO = 1-(apply(Probs.m,1,prod))
PO.rank = rank(-PO)
# CP point of view
Probs.mm = 1-PMin
CP = apply(Probs.mm,1,prod)
CP.rank = rank(-CP)
# CO point of view
CO = 1-(apply(PMin,1,prod))
CO.rank = rank(-CO)
Result = cbind(PP,PP.rank,PO,PO.rank,CP,CP.rank,CO,CO.rank)
colnames(Result) = c("PP","R","PO","R","CP","R","CO","R")
Result <- list(PMax=PMax, PMin=PMin, Axes=Result)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_axes_beta.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.