content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' CPP by axes using Normal distributions
#' @description This function computes the CPP by axes, using Normal distributions to randomize the decision matrix. The CPP by axes is used to rank alternatives in multicriteria decision problems. The "Progressive-Conservative" and the "Optimist-Pessimist" axes emulate four decision maker's points of view.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion. PMin are the joint probabilities of each alternative being lower than the others, also by criterion. Axes returns the alternatives' scores by axis and ranking for decisionmaking.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Garcia, Pauli A. A. & Sant'Anna, Annibal P. (2015). Vendor and logistics provider selection in the construction sector: A probabilistic preferences composition approach. Pesquisa Operacional 35.2: 363-375.
#' @examples
#' # Alternatives' original scores
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix
#' CPP.Axes.Normal(x)
#' @export
CPP.Axes.Normal = function (x) {
### Normalization of the decision matrix
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax = PMax[,]
####
PMin = x
mat = x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMin[i,j] = (integrate(Vectorize(function(x) {prod(1-pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMin = PMin[,]
### Composition by axes
# PP point of view
PP = apply(PMax,1,prod)
PP.rank = rank(-PP)
# PO point of view
Probs.m = 1-PMax
PO = 1-(apply(Probs.m,1,prod))
PO.rank = rank(-PO)
# CP point of view
Probs.mm = 1-PMin
CP = apply(Probs.mm,1,prod)
CP.rank = rank(-CP)
# CO point of view
CO = 1-(apply(PMin,1,prod))
CO.rank = rank(-CO)
Result = cbind(PP,PP.rank,PO,PO.rank,CP,CP.rank,CO,CO.rank)
colnames(Result) = c("PP","Rank","PO","Rank","CP","Rank","CO","Rank")
Result = list(PMax=PMax, PMin=PMin, Axes=Result)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_axes_normal.R
|
#' CPP by Choquet integrals, using Beta PERT distributions
#' @description This function computes the CPP by Choquet integrals, using Beta PERT distributions to randomize the decision matrix. The CPP by Choquet integrals is used to rank alternatives in multicriteria decision problems.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data elicited from experts.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion. Capacities are the interactions of all combined criteria, computed by the Progressive-Optimistic (PO) point of view. Choq returns the alternatives' scores by Choquet integrals and their respetive rankings for decisionmaking. Shap returns the Shapley indices, which are associated with criteria weights.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @examples
#' # Alternatives' original scores
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix
#' s = 4 # Shape
#' CPP.Choquet.Beta(x,s)
#' @importFrom kappalab capacity Choquet.integral Shapley.value
#' @importFrom mc2d dpert ppert rpert
#' @importFrom stats quantile
#' @importFrom utils combn
#' @export
CPP.Choquet.Beta = function(x,s) {
### PMax Beta
m = x
PMax = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],m[,j][-i],max[j],s))*dpert(x,min[j],m[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
PMax[,]
rownames(PMax) = paste0("Alt",1:nrow(m))
colnames(PMax) = paste0("Crit",1:ncol(m))
### Capacities
PMax.m = 1-PMax
### Union of subsets
n = ncol(m)-1
p = ncol(m)
# Combinations 2:n
syn = vector("list", n)
for (i in 2:n)
{
syn[[i]] = 1-(t(apply(PMax.m, 1, function(x){combn(x,i,prod)})))
}
syn = do.call(cbind, syn)
# Subset of all criteria
ult.syn = 1-(apply(PMax.m, 1, function(x){combn(x,p,prod)}))
### MAX value determination
mat.syn = cbind(PMax,syn,ult.syn)
colnames(mat.syn)=NULL
max1.syn = apply(mat.syn,2,max)
max2.syn = max(max1.syn)
### capacities
cap.syn = max1.syn/max2.syn
kappa = c(0,cap.syn)
kappa2= capacity(kappa)
### Choquet integrals and Shapley indices
choquet = matrix(0,nrow(m),1)
for (i in 1:nrow(m))
{
choquet[i] = Choquet.integral(kappa2,PMax[i,])
}
choquet.r = apply(-choquet,2,rank)
choquet = cbind(choquet,choquet.r)
rownames(choquet) = paste0("Alt",1:nrow(m))
colnames(choquet) = c("CPP.Choquet","Rank")
shapley = t(as.matrix(Shapley.value(kappa2)))
colnames(shapley) = paste0("Crit",1:ncol(m))
Result = list(PMax = PMax, Capacities = kappa2, Choq = choquet, Shap = shapley)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_chq_beta.R
|
#' CPP by the Gini Index, using Beta PERT distributions
#' @description The CPP by the Gini Index is used to rank alternatives by evenness of evaluations, in multicriteria decision problems.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion. CPP.Gini returns the alternatives' scores by the Gini Index and their respective preference ranks for decisionmaking.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer
#' @references Gaviao, Luiz O. & Lima, Gilson B.A. (2017) Support decision to player selection: an application of the CPP in soccer, Novas Edições Acadêmicas [in Portuguese].
#' @examples
#' # Alternatives' original scores
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix
#' s = 4 # Shape
#' CPP.Gini(x,s)
#' @importFrom ineq ineq
#' @importFrom mc2d dpert ppert
#' @export
CPP.Gini = function(x,s) {
b = x
PMax = x
max = apply(b,2,max)
min = apply(b,2,min)
for (j in 1:ncol(b))
{
for (i in 1:nrow(b))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],b[,j][-i],max[j],s))*dpert(x,min[j],b[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
b = PMax[,]
### Gini Index
gin = matrix(nrow(b),1)
for (i in 1:nrow(b))
{
m = as.vector(b[i,])
gin[i] = ineq(m, parameter = NULL, type = "Gini")
}
r.gin = rank(gin)
index = cbind(gin,r.gin)
colnames(index) = c("Index","Rank")
Result = list(PMax = PMax,CPP.Gini = index)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_gini.R
|
#' CPP by the Malmquist Index, using Beta PERT distributions
#' @description The CPP-Malmquist is used to dynamic evaluation of alternatives, in multicriteria problems, considering two different moments.
#' @param m1 Decision matrix of Alternatives (rows) and Criteria (columns) in moment '1'. Benefit criteria must be positive and cost criteria must be negative.
#' @param m2 Decision matrix of Alternatives (rows) and Criteria (columns) in the following moment '2'. Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return MC gives the Malmquist Conservative index. MP gives the Malmquist Progressive index. Finally, Index gives the CPP-Malmquist of all alternatives and their rankings for decisionmaking. The indices greater than one represent a relative evolution of the alternative between the two periods, while the indices lower than one reveal the alternatives that decreased performance in relation to the others.
#' @examples
#' # Alternatives' original scores
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' m1 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix of the previous moment '1'.
#' Alt.1 = c(3,29,82,-3)
#' Alt.2 = c(6,28,70,-8)
#' Alt.3 = c(2,20,99,-8)
#' Alt.4 = c(5,31,62,-14)
#' Alt.5 = c(9,27,73,-5)
#' m2 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5) # Decision matrix of the following moment '2'.
#' s = 4 # Shape
#' CPP.Malmquist.Beta(m1,m2,s)
#' @importFrom mc2d dpert ppert
#' @export
CPP.Malmquist.Beta = function(m1,m2,s) {
pre = m1
pos = m2
### Assumption of imprecision/uncertainty with Beta PERT Distributions
### Parameters: dpert(x,"min","moda","max","shape")
### MAX MIN values of moments "pre" and "pos"
min.pre = apply(pre, 2, min) # Minimum value per criterion
max.pre = apply(pre, 2, max) # Maximum value per criterion
min.pos = apply(pos, 2, min) # Minimum value per criterion
max.pos = apply(pos, 2, max) # Maximum value per criterion
#################################
# Malmquist - Conservative (MC) #
#################################
### Variables
MC_pre_t1 = pre
MC_pre_t = pre
MC_pos_t1 = pos
MC_pos_t = pos
##############
### MC PRE ###
##############
### MC PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pre_t1 = 1-(MC_pre_t1)
MC_pre_t1 = apply(MC_pre_t1,1,prod)
### MC PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pre_t = 1-(MC_pre_t)
MC_pre_t = apply(MC_pre_t,1,prod)
### Resultado MC PRE
MC_PRE = (MC_pre_t1)/(MC_pre_t)
##############
### MC POS ###
##############
### MC POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pos_t1 = 1-(MC_pos_t1)
MC_pos_t1 = apply(MC_pos_t1,1,prod)
### MC POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pos_t = 1-(MC_pos_t)
MC_pos_t = apply(MC_pos_t,1,prod)
### Resultado MC POS
MC_POS = (MC_pos_t1)/(MC_pos_t)
##################
#### MC Index ####
MC = sqrt((MC_PRE)*(MC_POS))
################################
# Malmquist - Progressive (MP) #
################################
### Variables
MP_pre_t1 = pre
MP_pre_t = pre
MP_pos_t1 = pos
MP_pos_t = pos
##############
### MP PRE ###
##############
### MP PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pre_t1 = 1-(MP_pre_t1)
MP_pre_t1 = apply(MP_pre_t1,1,prod)
### MP PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],)}),min.pre[j],max.pre[j]))$value
}}
MP_pre_t = 1-(MP_pre_t)
MP_pre_t = apply(MP_pre_t,1,prod)
### Resultado MP PRE
MP_PRE = (MP_pre_t1)/(MP_pre_t)
##############
### MP POS ###
##############
### MP POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pos_t1 = 1-(MP_pos_t1)
MP_pos_t1 = apply(MP_pos_t1,1,prod)
### MP POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MP_pos_t = 1-(MP_pos_t)
MP_pos_t = apply(MP_pos_t,1,prod)
### Resultado MC POS
MP_POS = (MP_pos_t1)/(MP_pos_t)
#######################
###### MP Index #######
MP = sqrt((MP_PRE)*(MP_POS))
###############################
# PROB. MALMQUIST INDEX (IMP) #
###############################
IMP = sqrt(MC/MP)
rank = rank(-IMP)
IMP = cbind(IMP,rank)
###############
### RESULTS ###
###############
MC = cbind(MC_pre_t,MC_pre_t1,MC_PRE,MC_pos_t,MC_pos_t1,MC_POS,MC)
MP = cbind(MP_pre_t,MP_pre_t1,MP_PRE,MP_pos_t,MP_pos_t1,MP_POS,MP)
Result = list(MC=MC, MP=MP, Index=IMP)
colnames(Result$Index) = c("CPP.Malmquist","Rank")
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_malm_beta.R
|
#' CPP with multiple perspectives for decision-making, based on the 'Moneyball' principle.
#' @description The algorithm evaluates alternatives by integrating the CPP-Tri, the CPP-Malmquist, the CPP-Gini, the alternatives' market values and the CPP by axes. The CPP-mb was originally applied in sports science to evaluate players' performance.
#' @param t1 Decision matrix of Alternatives (rows) and Criteria (columns) in the moment '1'. Benefit criteria must be positive and cost criteria negative.
#' @param t2 Decision matrix of Alternatives (rows) and Criteria (columns) in the following moment '2'. Benefit criteria must be positive and cost criteria negative.
#' @param m Vector of alternatives' market values.
#' @param q Vector of quantiles, indicating the classes' profiles.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return Class assigns the alternatives to classes, defined by the indicated profiles. The list of classes also shows the decision matrices to be modeled by CPP-PP. CPP-mb indicates the final scores per class.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Lewis, Michael. (2004) Moneyball: The art of winning an unfair game. WW Norton & Company.
#' @references Gaviao, Luiz O. & Lima, Gilson B.A. (2017) Support decision to player selection: an application of the CPP in soccer, Novas Edições Acadêmicas [in Portuguese].
#' @examples
#' ## Decision matrix of the previous moment '1'.
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' t1 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' ## Decision matrix of the following moment '2'.
#' Alt.1 = c(3,29,82,-3)
#' Alt.2 = c(6,28,70,-8)
#' Alt.3 = c(2,20,99,-8)
#' Alt.4 = c(5,31,62,-14)
#' Alt.5 = c(9,27,73,-5)
#' Alt.6 = c(4,33,85,-13)
#' Alt.7 = c(9,39,59,-10)
#' Alt.8 = c(8,19,77,-9)
#' t2 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' m = c(100,120,150,140,90,70,110,130) # Market values
#' q = c(0.65,0.35) # quantiles of class profiles
#' s = 4 # Shape
#' CPP.mb(t1,t2,m,q,s)
#' @importFrom mc2d dpert ppert
#' @importFrom ineq ineq
#' @export
CPP.mb = function(t1,t2,m,q,s){
rownames(t1) = paste('Alt', 1:(nrow(t1)))
rownames(t2) = paste('Alt', 1:(nrow(t1)))
### CPP Tri
A = t2
min = apply(A,2,min)
max = apply(A,2,max)
Perfil = matrix(0,length(q),ncol(A))
for (a in 1:length(q))
{
Perfil[a,] = apply(A,2,function(x) quantile(x,q[a]))
}
rownames(Perfil) = paste0("Prof",1:length(q))
A = rbind(Perfil,A)
##### Prob.Max OVER profiles
listac = vector("list", length(q))
for (a in 1:length(q))
{
listac[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listac[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listac) = paste0("Profile",1:length(q))
listac
##### Prob.Min UNDER profiles
listab = vector("list", length(q))
for (a in 1:length(q))
{
listab[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listab[[a]][i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listab) = paste0("Profile",1:length(q))
listab
### Sorting procedure
PPacima = lapply(listac,apply,1,prod)
PPabaixo = lapply(listab,apply,1,prod)
Difs = vector("list", length(q))
for (a in 1:length(q))
{
Difs[[a]] = abs(PPacima[[a]]-PPabaixo[[a]])
}
Difs = t(do.call(rbind, Difs))
Classe = as.matrix(apply(Difs,1,which.min))
Classe = Classe[-c(1:length(q)),]
### Original matrices "t1" and "t2"
t1 = cbind(t1,Classe)
t2 = cbind(t2,Classe)
c = ncol(t1)
list1 = vector("list", length(q))
list2 = vector("list", length(q))
for (a in 1:length(q))
{
list1[[a]] = subset(t1[,-c(c)], t1[,c] == a)
list2[[a]] = subset(t2[,-c(c)], t2[,c] == a)
}
### CPP Gini
PMax = vector("list", length(q))
for (a in 1:length(q))
{
b = list2[[a]]
PMax[[a]] = matrix(0,nrow(b),ncol(b))
max = apply(b,2,max)
min = apply(b,2,min)
for (j in 1:ncol(b))
{
for (i in 1:nrow(b))
{
PMax[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],b[,j][-i],max[j],s))*dpert(x,min[j],b[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}}
gin = vector("list", length(q))
for (a in 1:length(q))
{
b = PMax[[a]]
for (i in 1:nrow(b))
{
g = b[i,]
gin[[a]][i] = ineq(g, parameter = NULL, type = "Gini")
}
gin[[a]] = as.matrix(gin[[a]])
rownames(gin[[a]]) = c(rownames(list1[[a]]))
}
### CPP Malmquist
c = ncol(t1)
pre = t1[,-c(c)]
pos = t2[,-c(c)]
min.pre = apply(pre, 2, min) # Minimum value per criterion
max.pre = apply(pre, 2, max) # Maximum value per criterion
min.pos = apply(pos, 2, min) # Minimum value per criterion
max.pos = apply(pos, 2, max) # Maximum value per criterion
# Malmquist - Conservative (MC)
MC_pre_t1 = pre
MC_pre_t = pre
MC_pos_t1 = pos
MC_pos_t = pos
# MC PRE
# MC PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pre_t1 = 1-(MC_pre_t1)
MC_pre_t1 = apply(MC_pre_t1,1,prod)
# MC PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pre_t = 1-(MC_pre_t)
MC_pre_t = apply(MC_pre_t,1,prod)
MC_PRE = (MC_pre_t1)/(MC_pre_t)
# MC POS
# MC POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pos_t1 = 1-(MC_pos_t1)
MC_pos_t1 = apply(MC_pos_t1,1,prod)
# MC POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pos_t = 1-(MC_pos_t)
MC_pos_t = apply(MC_pos_t,1,prod)
MC_POS = (MC_pos_t1)/(MC_pos_t)
# MC Index
MC = sqrt((MC_PRE)*(MC_POS))
# Malmquist - Progressive (MP)
# Variables
MP_pre_t1 = pre
MP_pre_t = pre
MP_pos_t1 = pos
MP_pos_t = pos
# MP PRE
# MP PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pre_t1 = 1-(MP_pre_t1)
MP_pre_t1 = apply(MP_pre_t1,1,prod)
# MP PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],)}),min.pre[j],max.pre[j]))$value
}}
MP_pre_t = 1-(MP_pre_t)
MP_pre_t = apply(MP_pre_t,1,prod)
# Resultado MP PRE
MP_PRE = (MP_pre_t1)/(MP_pre_t)
# MP POS
# MP POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pos_t1 = 1-(MP_pos_t1)
MP_pos_t1 = apply(MP_pos_t1,1,prod)
# MP POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MP_pos_t = 1-(MP_pos_t)
MP_pos_t = apply(MP_pos_t,1,prod)
MP_POS = (MP_pos_t1)/(MP_pos_t)
# MP Index
MP = sqrt((MP_PRE)*(MP_POS))
# PROB. MALMQUIST INDEX (IMP)
IMP = sqrt(MC/MP)
IMP1 = cbind(IMP,Classe)
c = ncol(IMP1)
IMP2 = vector("list", length(q))
for (a in 1:length(q))
{
IMP2[[a]] = subset(IMP1[,-c(c)], IMP1[,c] == a)
#rownames(IMP2[[a]]) = c(rownames(list1[[a]]))
}
# Market value
mark = cbind(m,Classe)
colnames(mark) = c("Market","Class")
c = ncol(IMP1)
mark2 = vector("list", length(q))
for (a in 1:length(q))
{
mark2[[a]] = subset(mark[,-c(c)], mark[,c] == a)
#rownames(mark2[[a]]) = c(rownames(list1[[a]]))
}
### CPP Moneyball
matrix = vector("list", length(q))
for (a in 1:length(q))
{
matrix[[a]] = cbind(-gin[[a]],IMP2[[a]],-mark2[[a]])
}
for(i in seq_along(matrix))
{
colnames(matrix[[i]]) <- c("CPP.Gini","CPP.Malmquist","Market")
}
PMax.mb = vector("list", length(q))
for (a in 1:length(q))
{
b = matrix[[a]]
PMax.mb[[a]] = matrix(0,nrow(b),ncol(b))
max = c(0,apply(b[,2:3],2,max))
min = c(-1,apply(b[,2:3],2,min))
for (j in 1:ncol(b))
{
for (i in 1:nrow(b))
{
PMax.mb[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],b[,j][-i],max[j],s))*dpert(x,min[j],b[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}}
for(i in seq_along(PMax.mb))
{
colnames(PMax.mb[[i]]) <- c("CPP.Gini","CPP.Malmquist","Market")
}
PP = vector("list", length(q))
PP.r = vector("list", length(q))
mb = vector("list", length(q))
for (a in 1:length(q))
{
PP[[a]] = apply(PMax.mb[[a]],1,prod)
PP.r[[a]] = rank(-PP[[a]])
mb[[a]] = cbind(PMax.mb[[a]],PP[[a]],PP.r[[a]])
rownames(mb[[a]]) = c(rownames(list1[[a]]))
}
for(i in seq_along(mb))
{
colnames(mb[[i]]) <- c("PMax.Gini","PMax.Malmquist","PMax.Market","CPP.PP","Rank")
}
### Results
Results = list(Class = matrix, CPP.mb = mb)
Results
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_mb.R
|
#' CPP with multiple perspectives for human resources evaluation
#' @description This function computes the CPP-rh, using Beta PERT distributions to randomize the decision matrices. The CPP-rh is used to evaluate alternatives by integrating the CPP-Tri, the CPP-Malmquist, the CPP-Gini and the CPP by axes. The CPP-rh and the CPP-mb are very similar, but the CPP-rh does not include the alternatives's market value.
#' @param t1 Decision matrix of Alternatives (rows) and Criteria (columns) in the previous moment '1'. Benefit criteria must be positive and cost criteria must be negative.
#' @param t2 Decision matrix of Alternatives (rows) and Criteria (columns) in the following moment '2'. Benefit criteria must be positive and cost criteria must be negative.
#' @param q Vector of quantiles, indicating the classes' profiles.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return Class identifies the alternatives' classes, according to the selected profiles. CPP-RH returns the alternatives' scores per class.
#' @examples
#' ## Decision matrix of the previous moment '1'.
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' t1 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' ## Decision matrix of the following moment '2'.
#' Alt.1 = c(3,29,82,-3)
#' Alt.2 = c(6,28,70,-8)
#' Alt.3 = c(2,20,99,-8)
#' Alt.4 = c(5,31,62,-14)
#' Alt.5 = c(9,27,73,-5)
#' Alt.6 = c(4,33,85,-13)
#' Alt.7 = c(9,39,59,-10)
#' Alt.8 = c(8,19,77,-9)
#' t2 = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' q = c(0.65,0.35) # quantiles of class profiles
#' s = 4 # Shape
#' CPP.rh(t1,t2,q,s)
#' @importFrom ineq ineq
#' @importFrom mc2d dpert ppert
#' @export
CPP.rh = function(t1,t2,q,s){
rownames(t1) = paste('Alt', 1:(nrow(t1)))
rownames(t2) = paste('Alt', 1:(nrow(t1)))
### CPP Tri
A = t2
min = apply(A,2,min)
max = apply(A,2,max)
Perfil = matrix(0,length(q),ncol(A))
for (a in 1:length(q))
{
Perfil[a,] = apply(A,2,function(x) quantile(x,q[a]))
}
A = rbind(Perfil,A)
##### Prob.Max OVER profiles
listac = vector("list", length(q))
for (a in 1:length(q))
{
listac[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listac[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listac) = paste0("Profile",1:length(q))
listac
##### Prob.Min UNDER profiles
listab = vector("list", length(q))
for (a in 1:length(q))
{
listab[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listab[[a]][i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listab) = paste0("Profile",1:length(q))
listab
### Sorting procedure
PPacima = lapply(listac,apply,1,prod)
PPabaixo = lapply(listab,apply,1,prod)
Difs = vector("list", length(q))
for (a in 1:length(q))
{
Difs[[a]] = abs(PPacima[[a]]-PPabaixo[[a]])
}
Difs = t(do.call(rbind, Difs))
Classe = as.matrix(apply(Difs,1,which.min))
Classe = Classe[-c(1:length(q)),]
### Original matrices "t1" and "t2"
t1 = cbind(t1,Classe)
t2 = cbind(t2,Classe)
c = ncol(t1)
list1 = vector("list", length(q))
list2 = vector("list", length(q))
for (a in 1:length(q))
{
list1[[a]] = subset(t1[,-c(c)], t1[,c] == a)
list2[[a]] = subset(t2[,-c(c)], t2[,c] == a)
}
### CPP Gini
PMax = vector("list", length(q))
for (a in 1:length(q))
{
b = list2[[a]]
PMax[[a]] = matrix(0,nrow(b),ncol(b))
max = apply(b,2,max)
min = apply(b,2,min)
for (j in 1:ncol(b))
{
for (i in 1:nrow(b))
{
PMax[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],b[,j][-i],max[j],s))*dpert(x,min[j],b[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}}
gin = vector("list", length(q))
for (a in 1:length(q))
{
b = PMax[[a]]
for (i in 1:nrow(b))
{
g = b[i,]
gin[[a]][i] = ineq(g, parameter = NULL, type = "Gini")
}
gin[[a]] = as.matrix(gin[[a]])
rownames(gin[[a]]) = c(rownames(list1[[a]]))
}
### CPP Malmquist
c = ncol(t1)
pre = t1[,-c(c)]
pos = t2[,-c(c)]
min.pre = apply(pre, 2, min) # Minimum value per criterion
max.pre = apply(pre, 2, max) # Maximum value per criterion
min.pos = apply(pos, 2, min) # Minimum value per criterion
max.pos = apply(pos, 2, max) # Maximum value per criterion
# Malmquist - Conservative (MC)
MC_pre_t1 = pre
MC_pre_t = pre
MC_pos_t1 = pos
MC_pos_t = pos
# MC PRE
# MC PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pre_t1 = 1-(MC_pre_t1)
MC_pre_t1 = apply(MC_pre_t1,1,prod)
# MC PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MC_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pre_t = 1-(MC_pre_t)
MC_pre_t = apply(MC_pre_t,1,prod)
MC_PRE = (MC_pre_t1)/(MC_pre_t)
# MC POS
# MC POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MC_pos_t1 = 1-(MC_pos_t1)
MC_pos_t1 = apply(MC_pos_t1,1,prod)
# MC POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MC_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MC_pos_t = 1-(MC_pos_t)
MC_pos_t = apply(MC_pos_t,1,prod)
MC_POS = (MC_pos_t1)/(MC_pos_t)
# MC Index
MC = sqrt((MC_PRE)*(MC_POS))
# Malmquist - Progressive (MP)
# Variables
MP_pre_t1 = pre
MP_pre_t = pre
MP_pos_t1 = pos
MP_pos_t = pos
# MP PRE
# MP PRE T+1
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pre_t1 = 1-(MP_pre_t1)
MP_pre_t1 = apply(MP_pre_t1,1,prod)
# MP PRE T
for (j in 1:ncol(pre))
{
for (i in 1:nrow (pre))
{
MP_pre_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pre[j],pre[,j][-i],max.pre[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],)}),min.pre[j],max.pre[j]))$value
}}
MP_pre_t = 1-(MP_pre_t)
MP_pre_t = apply(MP_pre_t,1,prod)
# Resultado MP PRE
MP_PRE = (MP_pre_t1)/(MP_pre_t)
# MP POS
# MP POS T+1
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t1[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pos[j],pos[,j][[i]],max.pos[j],s)}),min.pos[j],max.pos[j]))$value
}}
MP_pos_t1 = 1-(MP_pos_t1)
MP_pos_t1 = apply(MP_pos_t1,1,prod)
# MP POS T
for (j in 1:ncol(pos))
{
for (i in 1:nrow (pos))
{
MP_pos_t[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min.pos[j],pos[,j][-i],max.pos[j],s))*dpert(x,min.pre[j],pre[,j][[i]],max.pre[j],s)}),min.pre[j],max.pre[j]))$value
}}
MP_pos_t = 1-(MP_pos_t)
MP_pos_t = apply(MP_pos_t,1,prod)
MP_POS = (MP_pos_t1)/(MP_pos_t)
# MP Index
MP = sqrt((MP_PRE)*(MP_POS))
# PROB. MALMQUIST INDEX (IMP)
IMP = sqrt(MC/MP)
IMP1 = cbind(IMP,Classe)
c = ncol(IMP1)
IMP2 = vector("list", length(q))
for (a in 1:length(q))
{
IMP2[[a]] = subset(IMP1[,-c(c)], IMP1[,c] == a)
#rownames(IMP2[[a]]) = c(rownames(list1[[a]]))
}
### CPP RH
matrix = vector("list", length(q))
for (a in 1:length(q))
{
matrix[[a]] = cbind(-gin[[a]],IMP2[[a]])
}
for(i in seq_along(matrix))
{
colnames(matrix[[i]]) <- c("CPP.Gini","CPP.Malmquist")
}
PMax.rh = vector("list", length(q))
for (a in 1:length(q))
{
b = matrix[[a]]
PMax.rh[[a]] = matrix(0,nrow(b),ncol(b))
max = c(0,max(b[,2]))
min = c(-1,min(b[,2]))
for (j in 1:ncol(b))
{
for (i in 1:nrow(b))
{
PMax.rh[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],b[,j][-i],max[j],s))*dpert(x,min[j],b[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}}
for(i in seq_along(PMax.rh))
{
colnames(PMax.rh[[i]]) <- c("CPP.Gini","CPP.Malmquist")
}
PP = vector("list", length(q))
PP.r = vector("list", length(q))
rh = vector("list", length(q))
for (a in 1:length(q))
{
PP[[a]] = apply(PMax.rh[[a]],1,prod)
PP.r[[a]] = rank(-PP[[a]])
rh[[a]] = cbind(PMax.rh[[a]],PP[[a]],PP.r[[a]])
rownames(rh[[a]]) = c(rownames(list1[[a]]))
}
for(i in seq_along(rh))
{
colnames(rh[[i]]) <- c("PMax.Gini","PMax.Malmquist","CPP.PP","Rank")
}
### Results
Results = list(Class = matrix, CPP.RH = rh)
Results
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_rh.R
|
#' CPP by weighted sum, with weights informed by the user
#' @description This function computes the CPP-SAW, using Normal distributions and weights defined by the decision maker. The CPP-SAW is used to evaluate alternatives by weighted sum.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param w Vector of weights assigned by the decision maker. Weights are normalized, just in case their sum differs from the unity.
#' @return Weights repeat the parameter 'w' if sum the unity, otherwise are normalized. PMax indicates the joint probabilities of each alternative being higher than the others, per criterion. CPP returns the alternatives' scores by weighted sum, indicating the preference ranks for decisionmaking.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @examples
#' # Decision matrix
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' w = c(0.2,0.3,0.4,0.1)
#' CPP.SAW(x,w)
#' @export
CPP.SAW = function (x,w){
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
### PMax by Normal distributions
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax = PMax[,]
### SAW
z = sum(w)
w = w/z # Normalization of weights
saw = PMax%*%w
rank = rank(-saw)
SAW = cbind(saw, rank)
colnames(SAW) = c("CPP.SAW","Rank")
Result = list(Weights=w, PMax=PMax, CPP=SAW)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_saw.R
|
#' CPP by weighted sum, with weights computed from Shannon entropy.
#' @description This function computes the CPP-SAW, using Normal distributions to randomize the decision matrix and weights defined by entropy. The CPP-SAW Entropy is used to evaluate alternatives by weighted sum.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @return Weights by entropy.PMax are the joint probabilities of each alternative being higher than the others, per criterion. CPP returns the alternatives' scores by weighted sum, indicating the preference ranks for decisionmaking.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @examples
#' ## Decision matrix.
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8)
#' CPP.SAW.Entropy(x)
#' @export
CPP.SAW.Entropy = function (x){
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
# Entropy per criteria (less dispersion indicates a higher entropy and a lower criteria weight)
k = 1/log(nrow(x)) # constant
fun = apply(dadosn, 2, function(x) x*log(x))
ent = apply(fun, 2, sum)
entropia = -k*ent
# Diferential factor
d = 1 - entropia
# Normalization by sum
w = as.matrix(d/(sum(d)))
### PMax by Normal distributions
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax = PMax[,]
### SAW
saw = PMax%*%w
rank = rank(-saw)
SAW = cbind(saw, rank)
colnames(SAW) = c("CPP.SAW.Ent","Rank")
Result = list(Weights=w, PMax=PMax, CPP=SAW)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_saw_ent.R
|
#' CPP for sorting alternatives in ordinal classes
#' @description This function computes the CPP-Tri, using Beta PERT distributions to randomize the decision matrix. The CPP-Tri is used to classify alternatives, indicating the order of classes, whose quantity is defined by the decision maker. The probabilities of each alternative being higher and lower than the classes' profiles are composed by the Progressive-Pessimist (PP) point of view.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param q Vector of quantiles, indicating the classes' profiles.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return Prob.Plus are the probabilities of each alternative being higher than the classes'profiles. Prob.Minus are the probabilities of each alternative being lower than the classes'profiles. CPP.Tri returns the alternatives' classes.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Sant'Anna, Annibal P.; Costa, Helder G.; Pereira, Valdecy (2015). CPP-TRI: a sorting method based on the probabilistic composition of preferences. International Journal of Information and Decision Sciences 7.3, 193-212.
#' @examples
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8) # Decision matrix.
#' q = c(0.65,0.35) # quantiles of classes' profiles.
#' s = 4 # Shape
#' CPP.Tri.Beta(x,q,s)
#' @importFrom mc2d dpert ppert
#' @export
CPP.Tri.Beta = function(x,q,s) {
A = x
min = apply(A,2,min)
max = apply(A,2,max)
Perfil = matrix(0,length(q),ncol(A))
for (a in 1:length(q))
{
Perfil[a,] = apply(A,2,function(x) quantile(x,q[a]))
}
A = rbind(Perfil,A)
##### Prob.Max OVER profiles
listac = vector("list", length(q))
for (a in 1:length(q))
{
listac[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listac[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listac) = paste0("Profile",1:length(q))
listac
##### Prob.Min UNDER profiles
listab = vector("list", length(q))
for (a in 1:length(q))
{
listab[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listab[[a]][i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listab) = paste0("Profile",1:length(q))
listab
###################
##### Sorting #####
###################
PPacima = lapply(listac,apply,1,prod)
PPabaixo = lapply(listab,apply,1,prod)
Difs = vector("list", length(q))
for (a in 1:length(q))
{
Difs[[a]] = abs(PPacima[[a]]-PPabaixo[[a]])
}
Difs = t(do.call(rbind, Difs))
Classe = as.matrix(apply(Difs,1,which.min))
Classe = Classe[-c(1:length(q)),]
Classe = matrix(Classe,nrow(x),1)
rownames(Classe) = paste0("Alt",1:nrow(x))
colnames(Classe) = c("Class")
### Results
PPacima = t(do.call(rbind, PPacima))
PPacima = PPacima[-c(1:length(q)),]
PPabaixo = t(do.call(rbind, PPabaixo))
PPabaixo = PPabaixo[-c(1:length(q)),]
Result = list(Prob.Plus = PPacima,Prob.Minus = PPabaixo, CPP.Tri = Classe)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_tri_beta.R
|
#' CPP for sorting alternatives, based on Choquet integrals
#' @description This function computes the CPP-Tri with Choquet integrals, using Beta PERT distributions to randomize the decision matrix. The CPP Tri is used to classify alternatives, indicating the order of a number of classes defined by the decision maker. The probabilities of each alternative being higher and lower than the classes' profiles are composed by Choquet integrals.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria negative.
#' @param q Vector of quantiles, indicating the classes' profiles.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return Choquet.O returns the probabilities of each alternative being higher than the classes' profiles, composed by Choquet integrals. Choquet.U indicates the probabilities of each alternative being lower than the classes' profiles. CPP.Tri.Chq returns the alternatives' classes.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @references Sant’Anna, Annibal P.; Lima, Gilson B. A.; Gavião, Luiz O. (2018) A Probabilistic approach to the inequality adjustment of the Human Development Index. Pesquisa Operacional 38.1: 99-116.
#' @examples
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8) # Decision matrix.
#' q = c(0.65,0.35) # quantiles of classes' profiles.
#' s = 4 # Shape
#' CPP.Tri.Choquet(x,q,s)
#' @importFrom kappalab capacity Choquet.integral Shapley.value
#' @importFrom mc2d dpert ppert
#' @export
CPP.Tri.Choquet = function(x,q,s){
############
### PMax ###
############
m = x
PMax = m
max = apply(m,2,max)
min = apply(m,2,min)
for (j in 1:ncol(m))
{
for (i in 1:nrow(m))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],m[,j][-i],max[j],s))*dpert(x,min[j],m[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
##############################
### Capacity determination ###
##############################
PMax.m = 1-PMax
n = ncol(m)-1
p = ncol(m)
syn = vector("list", n)
for (i in 2:n)
{
syn[[i]] = 1-(t(apply(PMax.m, 1, function(x){combn(x,i,prod)})))
}
syn = do.call(cbind, syn)
# Last criteria
ult.syn = 1-(apply(PMax.m, 1, function(x){combn(x,p,prod)}))
### MAX value determination
mat.syn = cbind(PMax,syn,ult.syn)
colnames(mat.syn)=NULL
max1.syn = apply(mat.syn,2,max)
max2.syn = max(max1.syn)
### capacities
cap.syn = max1.syn/max2.syn
kappa = c(0,cap.syn)
kappa2= capacity(kappa)
#########################
### Sorting procedure ###
#########################
A = x
min = apply(A,2,min)
max = apply(A,2,max)
Perfil = matrix(0,length(q),ncol(A))
for (a in 1:length(q))
{
Perfil[a,] = apply(m,2,function(x) quantile(x,q[a]))
}
A = rbind(Perfil,A)
##### Probs OVER profiles
listac = vector("list", length(q))
for (a in 1:length(q))
{
listac[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listac[[a]][i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listac) = paste0("Profile",1:length(q))
listac
##### Probs UNDER profiles
listab = vector("list", length(q))
for (a in 1:length(q))
{
listab[[a]] = matrix(0,nrow(A),ncol(A))
for (j in 1:ncol(A))
{
for (i in 1:nrow (A))
{
listab[[a]][i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],A[a,j],max[j],s))*dpert(x,min[j],A[,j][[i]],max[j],s)}),min[j],max[j]))$value
}}}
names(listab) = paste0("Profile",1:length(q))
listab
#########################
### Choquet integrals ###
#########################
choquet.ac = vector("list", length(q))
for (b in 1:length(q))
{
for (i in 1:nrow(x))
{
choquet.ac[[b]][i] = Choquet.integral(kappa2,listac[[b]][i,])
}}
choquet.ab = vector("list", length(q))
for (b in 1:length(q))
{
for (i in 1:nrow(x))
{
choquet.ab[[b]][i] = Choquet.integral(kappa2,listab[[b]][i,])
}}
Difs = vector("list", length(q))
for (b in 1:length(q))
{
Difs[[b]] = abs(choquet.ac[[b]]-choquet.ab[[b]])
}
Difs = t(do.call(rbind, Difs))
Classe = as.matrix(apply(Difs,1,which.min))
rownames(Classe) = paste0("Alt",1:nrow(x))
colnames(Classe) = c("Class")
Result = list(Choquet.O = choquet.ac, Choquet.U = choquet.ab, CPP.Tri.Chq = Classe)
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/cpp_tri_chq_beta.R
|
#' Weights by entropy
#' @description This function computes weights by Shannon's entropy.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria negative.
#' @return Weights for each criterion.
#' @references Pomerol, Jean-Charles & Barba-Romero, Sergio. (2012) Multicriterion Decision in Management: Principles and Practice, Springer.
#' @examples
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' Alt.6 = c(6,29,79,-9)
#' Alt.7 = c(8,37,55,-15)
#' Alt.8 = c(10,21,69,-11)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5,Alt.6,Alt.7,Alt.8) # Decision matrix.
#' Entrop.weights(x)
#' @export
Entrop.weights = function (x) {
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
# Entropy per criteria (less dispersion indicates a higher entropy and a lower criteria weight)
k = 1/log(nrow(x)) # constant
fun = apply(dadosn, 2, function(x) x*log(x))
ent = apply(fun, 2, sum)
entropia = -k*ent
# Diferential factor
d = 1 - entropia
# Normalization by sum
w = d/(sum(d))
w = c(Weights=w)
w
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/ent_wei.R
|
#' Probabilities of maximization, by Beta PERT distributions
#' @description This function computes the probabilities of each alternative maximizing the preference per criterion, using Beta PERT distributions to randomize the decision matrix.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @examples
#' # Decision matrix
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5)
#' s = 4 # Shape
#' PMax.Beta(x,s)
#' @importFrom mc2d dpert ppert
#' @export
PMax.Beta = function (x,s) {
m = x
PMax = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(ppert(x,min[j],m[,j][-i],max[j],s))*dpert(x,min[j],m[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
PMax[,]
rownames(PMax) = paste0("Alt",1:nrow(m))
colnames(PMax) = paste0("Crit",1:ncol(m))
Result = PMax
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/pmax_beta.R
|
#' Probabilities of maximization, by Normal distributions
#' @description This function computes the probabilities of each alternative maximizing the preference per criterion, using Normal distributions to randomize the decision matrix.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @return PMax are the joint probabilities of each alternative being higher than the others, per criterion.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @examples
#' # Decision matrix
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5)
#' PMax.Normal(x)
#' @export
PMax.Normal = function (x) {
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
# PMax
x = dadosn
PMax = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMax[i,j] = (integrate(Vectorize(function(x) {prod(pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMax[,]
rownames(PMax) = paste0("Alt",1:nrow(mat))
colnames(PMax) = paste0("Crit",1:ncol(mat))
Result = PMax
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/pmax_normal.R
|
#' Probabilities of minimization, by Beta PERT distributions
#' @description This function computes the Probabilities of each alternative minimizing the preference per criterion, using Beta PERT distributions to randomize the decision matrix.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @param s Shape of a Beta PERT distribution, as described in the package 'mc2d'. There is no default value, however the higher the shape the higher the kurtosis, which emulates the precision of data.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @return PMin are the joint probabilities of each alternative being lower than the others, per criterion.
#' @examples
#' # Decision matrix
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5)
#' s = 4 # Shape
#' PMin.Beta(x,s)
#' @importFrom mc2d dpert ppert
#' @export
PMin.Beta = function (x,s) {
m = x
PMin = x
max = apply(x,2,max)
min = apply(x,2,min)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMin[i,j] = (integrate(Vectorize(function(x) {prod(1-ppert(x,min[j],m[,j][-i],max[j],s))*dpert(x,min[j],m[,j][[i]],max[j],s)}),min[j],max[j])) $value
}}
PMin[,]
rownames(PMin) = paste0("Alt",1:nrow(m))
colnames(PMin) = paste0("Crit",1:ncol(m))
Result = PMin
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/pmin_beta.R
|
#' Probabilities of minimization, by Normal distributions
#' @description This function computes the probabilities of each alternative minimizing the preference per criterion, using Normal distributions to randomize the decision matrix.
#' @param x Decision matrix of Alternatives (rows) and Criteria (columns). Benefit criteria must be positive and cost criteria must be negative.
#' @references Sant'Anna, Annibal P. (2015). Probabilistic Composition of Preferences: Theory and Applications, Springer.
#' @return PMin are the joint probabilities of each alternative being lower than the others, per criterion.
#' @examples
#' # Decision matrix
#' Alt.1 = c(2,30,86,-5)
#' Alt.2 = c(4,26,77,-12)
#' Alt.3 = c(3,22,93,-4)
#' Alt.4 = c(6,34,65,-10)
#' Alt.5 = c(5,31,80,-8)
#' x = rbind(Alt.1,Alt.2,Alt.3,Alt.4,Alt.5)
#' PMin.Normal(x)
#' @export
PMin.Normal = function (x) {
### Decision matrix normalization
y = t(as.matrix(apply(x,2,sum)))
dadosn=x
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
dadosn[i,j] = x[i,j]/y[j]
}}
dadosn = replace(dadosn, dadosn == 0, 0.0000000001)
apply(dadosn,2,sum)
# PMin
x = dadosn
PMin = x
mat = x
sd = apply(x,2,sd)
for (j in 1:ncol(x))
{
for (i in 1:nrow(x))
{
PMin[i,j] = (integrate(Vectorize(function(x) {prod(1-pnorm(x,mat[,j][-i],sd[j]))*dnorm(x,mat[,j][[i]],sd[j])}),-2,2)) $value
}}
PMin[,]
rownames(PMin) = paste0("Alt",1:nrow(mat))
colnames(PMin) = paste0("Crit",1:ncol(mat))
Result = PMin
Result
}
|
/scratch/gouwar.j/cran-all/cranData/CPP/R/pmin_normal.R
|
#' The CPsurv package.
#'
#' A package for nonparametric change point estimation for survival data. See
#' \code{\link{cpsurv}} for the main function.
#'
#' @name CPsurv
#' @docType package
NULL
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/CPsurv-package.R
|
#' Implements Bootstrap Bias Correction
#'
#' @inheritParams cpest
#' @param changeP Estimated change point.
#' @param censoring Type of right-censoring for simulated data on which the
#' bootstrap bias correction is based. Possible types are "random" for
#' \emph{random censoring} (default), "type1" for \emph{Type I censoring} or
#' "no" for data without censored observations. Because simulated data should
#' be similar to given data, the censoring type is adapted from vector
#' 'events' if given and argument 'censoring' is ignored than.
#' @param censpoint Point of \emph{Type I censoring}; if missing, minimum time
#' after which all events are equal to 0 is used. Censpoint is only needed for
#' bootstrap bias correction.
#' @param B.correct Number of bootstrap samples for bias correction; defaults to
#' 49.
#' @param parametric Logical; if \code{TRUE} parametric bootstrap bias
#' correction is used (simulation of boostrap samples is based on estimated
#' Weibull parameters); otherwise Kaplan-Meier is used for a nonparametric
#' bootstrap bias correction.
#' @param times.int Logical; if \code{TRUE} simulated survival times are
#' integers.
#' @param opt.start Numeric vector of length two; initial values for the Weibull
#' parameters (shape and scale parameters) to be optimized if parametric bootstrap
#' bias correction is used.
#' @return A list with bias-corrected change point and optional estimated shape
#' and scale parameters of the Weibull distribution.
#' @importFrom survival Surv survfit
bootbiascorrect <- function(changeP, time, event, censoring, censpoint, intwd,
cpmax, cpmin, norm.riskset, B.correct, parametric,
times.int, opt.start){
# for parametric bootstrap bias correction estimate shape and scale parameters
# of Weibull distribution
if(parametric){
par.tmp <- nlminb(start = opt.start, objective = neg.loglik.WeibExp,
changeP = changeP, time = time, event = event,
lower = c(1e-08, 1e-08))$par
shape <- par.tmp[1]
scale <- par.tmp[2]
}
# simulate data and estimate change point 'B.correct' times
cp.boot <- vapply(1:B.correct, function(idx){
# simulate data with estimated parameters
data.boot <- sim.survdata(time = time, event = event, changeP = changeP,
censoring = censoring, censpoint = censpoint,
scale = scale, shape = shape,
times.int = times.int, parametric = parametric)
if(sum(data.boot[data.boot$time > cpmax, "event"] == 1) == 0){
# NA is returned, if there are no events above 'cpmax'
NA
} else{
# return estimated change point for current bootstrap-sample
cpest(time = data.boot[,"time"], event = data.boot[,"event"],
intwd = intwd, cpmax = cpmax, cpmin = cpmin,
norm.riskset = norm.riskset)$cp
}
}, numeric(1L))
# indicator, if bc is out of bounds
oob <- logical(2L)
if(any(is.na(cp.boot))){
if(parametric){
return(list(bc = NA, shape = shape, scale = scale, oob = oob))
} else return(list(bc = NA, oob = oob))
}
bias <- median(cp.boot) - changeP
bc <- changeP - bias
if(bc < cpmin){
bc <- cpmin
oob[1] <- TRUE
}
if(bc > cpmax){
bc <- cpmax
oob[2] <- TRUE
}
if(parametric){
list(bc = bc, shape = shape, scale = scale, oob = oob)
} else{
list(bc = bc, oob = oob)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/bootbiascorrect.R
|
#' Estimates change point using shifted intervals
#'
#' Shifts intervals iteratively and estimates change point at each step. Final
#' change point is calculated by optimization over all estimations.
#'
#' @param time Numeric vector with survival times.
#' @param event Numeric vector indicating censoring status; 0 = alive (censored), 1 =
#' dead (uncensored). If missing, all observations are assumed to be
#' uncensored.
#' @param intwd Width of intervals into which the time period is split; default
#' is \code{ceiling(cpmax/20)}. Has to be an integer value.
#' @param cpmax Upper bound for estimated change point. Time period is split into
#' intervals up to this point. Has to be an integer value.
#' @param cpmin Lower bound for estimated change point; default is
#' \code{cpmin=0}. Has to be an integer value.
#' @param norm.riskset Logical; if \code{TRUE} normalized number of units at
#' risk is used within an interval.
#' @return A list with estimated change point, p-values of exact binomial
#' test, mean of p-values above estimated change point (part of regression
#' function), lower and upper bounds of confidence intervals.
#' @seealso \code{\link{cpsurv}}
cpest <- function(time, event, cpmax, intwd, cpmin, norm.riskset){
nobs <- length(time)
# estimation of lambda (const. hazard rate for t > cpmax)
rate <- sum(event[time > cpmax] == 1) / sum(time[time > cpmax] - cpmax)
#probability for event within an interval
pr = pexp(intwd, rate)
# determine interval limits
limseq <- seq(cpmin, (cpmax + 2 * intwd - 1))
limseq <- limseq[1:(intwd * floor(length(limseq) / intwd))]
lim <- matrix(limseq, nrow = intwd)
results <- matrix(nrow = intwd, ncol = 3)
pvals <- matrix(nrow = intwd, ncol = ncol(lim) - 1)
for(shift in 1:intwd){
# number of events within intervals
x <- table(cut(time[event == 1], lim[shift,]))
if(shift == 1){
cumtab <- c(0, cumsum(table(cut(time, lim[shift,]))))
} else{
cumtab <- cumsum(table(cut(time, c(0, lim[shift,]))))
}
n <- nobs - cumtab[-length(cumtab)]
if(norm.riskset){
# times of right-censored data
tcens <- time[event == 0]
censcut <- cut(tcens, lim[shift, ])
splitted <- split(tcens, censcut)
# correct n by censored observations
n <- vapply(1:(ncol(lim) - 1), function(k){
n[k] - round(sum((lim[shift, k + 1] - splitted[[k]]) / intwd))},
FUN.VALUE = numeric(1L))
}
pv <- vapply(1:(ncol(lim) - 1), function(i){
# pv = P(k >= x) = P(k > x-1)
pbinom(x[i] - 1, n[i], pr, lower.tail = FALSE)
}, FUN.VALUE = numeric(1L))
# means of p-values
mean_pv <- rev(cumsum(rev(pv))) / rev(seq_along(pv))
lower <- lim[shift, -ncol(lim)]
S <- vapply(1:length(lower),
function(i){sum((pv - mean_pv[i] * (lower[i] <= lower))^2)},
FUN.VALUE = numeric(1L))
opt <- which.min(S)
cp <- lim[shift, opt]
if(cp < cpmin) cp <- cpmin
if(cp > cpmax) cp <- cpmax
results[shift, ] <- c(min(S), cp, mean_pv[opt])
pvals[shift, ] <- pv
}
optrow <- which.min(results[ ,1])
list(cp = results[optrow, 2],
p.values = pvals[optrow, ],
pv.mean = results[optrow, 3],
lower.lim = lim[optrow, -ncol(lim)],
upper.lim = lim[optrow, -1],
rate = rate)
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/cpest.R
|
#' @name survdata
#' @title Simulated Survival Data
#' @description A simulated dataset with 1500 fake right-censored survival times
#' with a change point at \code{time = 90}.\cr The survival times are Weibull
#' distributed with parameters \code{shape = 0.44} and \code{scale = 100}
#' below the change point and have a constant hazard rate above.
#' @docType data
#' @usage survdata
#' @format \tabular{ll}{ \code{time} \tab survival or censoring time\cr
#' \code{event} \tab censoring status (0 = alive, 1 = dead)}
NULL
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/cpsurv-data.R
|
#' @title Nonparametric Change Point Estimation
#'
#' @description Change point estimation for survival data based on exact
#' binomial test.
#'
#' @details Change point is a point in time, from which on the hazard rate is
#' supposed to be constant. For its estimation the timeline up to \code{cpmax}
#' is split into equidistant intervals of width \code{intwd} and exact
#' binomial tests are executed for each interval. The change point is
#' estimated by fitting a regression model on the resulting p-values. See
#' Brazzale \emph{et al} (2017) for details. \cr\cr For bootstrap bias
#' correction the change point is estimated for a given number
#' (\code{B.correct}) of bootstrap samples whereupon the bias is built by
#' subtracting their median from primary estimation. Depending on argument
#' \code{parametric} the data for bootstrapping are simulated either
#' parametric (Weibull distributed with estimated shape and scale parameters)
#' or nonparametric (based on Kaplan-Meier estimation).
#' @inheritParams cpest
#' @param censoring Type of right-censoring for simulated data on which the
#' bootstrap bias correction is based. Possible types are "random" for
#' \emph{random censoring} (default), "type1" for \emph{Type I censoring} or
#' "no" for data without censored observations. Because simulated data should
#' be similar to given data, the censoring type is adapted from vector
#' 'events' if given and argument 'censoring' is ignored than.
#' @param censpoint Point of \emph{Type I censoring}; if missing, minimum time
#' after which all events are equal to 0 is used. Censpoint is only needed for
#' bootstrap bias correction.
#' @param biascorrect Logical; if \code{TRUE}, a bootstrap bias correction is
#' performed; see 'Details'.
#' @param parametric Indicator for parametric bias-correction (see Details for
#' more information).
#' @param B.correct Number of bootstrap samples for bias-correction; defaults to
#' 49.
#' @param opt.start Numeric vector of length two; initial values for the Weibull
#' parameters (shape and scale parameters) to be optimized if parametric
#' bootstrap bias correction is used.
#' @param boot.ci Indicator if confidence intervals (and thereby standard
#' deviation) should be calculated by bootstrap sampling. Please note the
#' extended runtime (see details for examples).
#' @param B Number of bootstrap samples for confidence intervals; defaults to
#' 999.
#' @param conf.level Confidence level for bootstrap confidence intervals.
#' @param seed Seed for random number generator (optional).
#' @param parallel Indicator if bootstrap-sampling is executed parallelized
#' (based on package 'parallel'); operating system is identified
#' automatically.
#' @param cores Number of CPU-cores that are used for parallelization; maximum
#' possible value is the detected number of logical CPU cores.
#' @return \tabular{ll}{ \code{cp}\tab estimated change point\cr
#' \code{p.values}\tab p-values resulting from exact binomial test\cr
#' \code{pv.mean}\tab mean of p-values for intervals above the estimated
#' change point\cr \code{lower.lim}\tab lower interval limits\cr
#' \code{upper.lim}\tab upper interval limits\cr \code{cp.bc}\tab bias
#' corrected change point\cr \code{ml.shape}\tab ML estimator of shape
#' parameter for Weibull distribution\cr \code{ml.scale}\tab ML estimator of
#' scale parameter for Weibull distribution\cr \code{cp.boot}\tab estimated
#' change points for bootstrap samples\cr \code{sd}\tab standard deviation
#' estimated by bootstrap sampling\cr \code{ci.normal}\tab confidence interval
#' with normal approximation\cr \code{ci.percent}\tab bootstrap percentile
#' interval\cr \code{conf.level}\tab the \code{conf.level} argument passed to
#' \code{cpsurv}\cr \code{B}\tab the \code{B} argument passed to
#' \code{cpsurv}\cr \code{time}\tab the \code{time} argument passed to
#' \code{cpsurv}\cr \code{event}\tab the \code{event} argument passed to
#' \code{cpsurv}\cr \code{cpmax}\tab the \code{cpmax} argument passed to
#' \code{cpsurv}\cr \code{intwd}\tab the \code{intwd} argument passed to
#' \code{cpsurv}\cr \code{call}\tab matched call}
#' @references Brazzale, A. R. and Küchenhoff, H. and Krügel, S. and Hartl, W.
#' (2017) \emph{Nonparametric change point estimation for survival
#' distributions with a partially constant hazard rate.}
#' @author Stefanie Krügel \email{stefanie.kruegel@@gmail.com}
#' @examples
#' data(survdata)
#' # estimate change point for survdata (random censored)
#' cp <- cpsurv(survdata$time, survdata$event, cpmax = 360, intwd = 20)
#' summary(cp)
#'
#' \dontrun{
#' # estimation with parametric bootstrap bias correction
#' cp_param <- cpsurv(survdata$time, survdata$event, cpmax = 360, intwd = 20,
#' biascorrect = TRUE, parametric = TRUE)
#' summary(cp_param)
#'
#' # with bootstrap confidence intervals and parametric bootstrap bias
#' cp_ci <- cpsurv(survdata$time, survdata$event, cpmax = 360, intwd = 20,
#' biascorrect = TRUE, parametric = FALSE, boot.ci = TRUE, cores = 4, seed = 36020)
#' # runtime: approx. 180 min (with Intel(R) Core(TM) i7 CPU 950 @ 3.07GHz, 4 logical CPUs used)
#' }
#' @import parallel
#' @importFrom stats dweibull median nlminb pbinom pexp predict pweibull qexp
#' qnorm qweibull runif sd smooth.spline spline
#' @export
cpsurv <- function(time, event, cpmax, intwd, cpmin = 0,
censoring = c("random", "type1", "no"), censpoint = NULL,
biascorrect = FALSE, parametric = FALSE, B.correct = 49,
opt.start = c(0.1, 50), boot.ci = FALSE,
B = 999, conf.level = 0.95, norm.riskset = TRUE, seed = NULL,
parallel = TRUE, cores = 4L){
if (!is.null(seed)) {
if(parallel) RNGkind(kind = "L'Ecuyer-CMRG")
set.seed(as.integer(seed))
}
# function for input checking
is_single_num <- function(x, lower = -Inf, upper = Inf){
if(is.na(x) || !is.numeric(x) || (length(x) != 1)){
stop("Argument '", substitute(x), "' is not a single numeric value.")
}
if(x < lower) stop("Value for argument '", substitute(x), "' too low.")
if(x > upper) stop("Value for argument '", substitute(x), "' too high.")
}
stopifnot(is.numeric(time),
all(sapply(time, function(z) z >= 0)),
length(time) >= 1)
if(missing(event))
event <- rep.int(1, length(time))
stopifnot(is.numeric(event), length(event) >= 1)
if(!all(sapply(event, function(z) z %in% c(0,1)))){
stop("Argument 'event' has to be binary.")
}
is_single_num(cpmax, lower = 1L)
is_single_num(cpmin, lower = 0L, upper = cpmax)
if(!is.null(censpoint)) is_single_num(censpoint, lower = 0L, upper = max(time))
is_single_num(B.correct, lower = 1L)
is_single_num(B, lower = 1L)
is_single_num(conf.level, lower = 0L, upper = 1L)
is_single_num(cores, lower = 1L)
if(missing(intwd)) intwd <- ceiling(cpmax / 20)
is_single_num(intwd, lower = 1L)
stopifnot(!is.na(norm.riskset), is.logical(norm.riskset), length(norm.riskset) == 1,
!is.na(biascorrect), is.logical(biascorrect), length(biascorrect) == 1,
!is.na(parametric), is.logical(parametric), length(parametric) == 1,
!is.na(boot.ci), is.logical(boot.ci), length(boot.ci) == 1,
!is.na(parallel), is.logical(parallel), length(parallel) == 1)
if((cpmax%%intwd) != 0){
cpmax <- cpmax + (cpmax %% intwd)
warning("'cpmax' is not a multiple of 'intwd'; cpmax corrected to ", cpmax)
}
censoring <- match.arg(censoring)
if(!identical(length(time), length(event)))
stop("Vectors 'time' and 'event' must be of equal length.")
if(sum(event[time > cpmax] == 1) == 0){
stop("No events with 'time' > 'cpmax'; ",
"choose lower value for 'cpmax'.")
}
if(!is.vector(opt.start) || !length(opt.start) == 2 || !all(opt.start > 0)){
stop("Invalid 'opt.start' argument. See documentation.")
}
#----------------------------------------------------------------------------
# check censoring
if(all(event %in% 1)){
censoring <- "no"
}
if(censoring == "type1" && is.null(censpoint)){
# point of type 1 censoring
censpoint <- min(time[sapply(seq_along(time), function(i)
all(event[time >= time[i]] == 0))])
warning("point for Type I censoring missing; 'censpoint' was set to ", censpoint)
}
nobs <- length(time)
# if times of given data are integers, simulated times are also integers
check.integer <- function(x){
x == round(x)
}
if(all(sapply(time, check.integer))){
times.int <- TRUE
} else{
times.int <- FALSE
}
# estimated change point
cp <- cpest(time = time, event = event, intwd = intwd, cpmax = cpmax,
cpmin = cpmin, norm.riskset = norm.riskset)
changeP <- cp$cp
if(biascorrect){
bcresult <- bootbiascorrect(changeP = changeP, time = time, event = event,
censoring = censoring, censpoint = censpoint,
intwd = intwd, opt.start = opt.start,
cpmax = cpmax, cpmin = cpmin,
norm.riskset = norm.riskset,
B.correct = B.correct, parametric = parametric,
times.int = times.int)
if(is.na(bcresult$bc)){
if(parametric == TRUE){
warning("Parametric bootstrap bias correction not possible;",
"maybe times are not Weibull distributed")
} else{
warning("Nonparametric bootstrap bias correction not possible",
"(no events above 'cpmax' in simulated data)")
}
}
cp$cp.bc <- bcresult$bc
if(parametric){
cp$ml.shape <- bcresult$shape
cp$ml.scale <- bcresult$scale
}
if(bcresult$oob[1] == TRUE){
warning("bias corrected change point out of bounds; it was set to 'cpmin'")
} else if(bcresult$oob[2] == TRUE){
warning("bias corrected change point out of bounds; it was set to 'cpmax'")
}
}
# variance estimation by bootstrap sampling
if(boot.ci){
bootstrap <- function(x){
boot.out <- NA
while(is.na(boot.out)){
samp <- sample(1:nobs, size = nobs, replace=T)
cp.tmp <- cpest(time = time[samp], event = event[samp], intwd = intwd,
cpmax = cpmax, cpmin = cpmin, norm.riskset = norm.riskset)$cp
if(biascorrect){
boot.out <- bootbiascorrect(changeP = cp.tmp, time = time[samp],
event = event[samp], censoring = censoring,
censpoint = censpoint, intwd = intwd,
cpmax = cpmax, cpmin = cpmin,
norm.riskset = norm.riskset, B.correct = B.correct,
parametric = parametric, times.int = times.int,
opt.start = opt.start)$bc
} else{
boot.out <- cp.tmp
}
}
boot.out
}
if(parallel){
if(parallel::detectCores() < cores) cores <- parallel::detectCores()
if(.Platform$OS.type != "windows") {
cpboot <- parallel::mclapply(1:B, FUN = bootstrap, mc.cores = cores)
} else{
cl <- parallel::makePSOCKcluster(rep("localhost", cores))
parallel::clusterExport(cl = cl, varlist = c("bootbiascorrect",
"neg.loglik.WeibExp", "cpest", "sim.survdata",
"km.sim.survtimes", "nobs", "time", "event", "intwd", "cpmax",
"cpmin", "norm.riskset", "censoring", "censpoint", "biascorrect", "B.correct",
"parametric", "times.int", "opt.start"),
envir = environment())
if(!is.null(seed)) parallel::clusterSetRNGStream(cl)
cpboot <- parallel::parLapply(cl, 1:B, fun = bootstrap)
parallel::stopCluster(cl)
}
cpboot <- unlist(cpboot)
} else{
cpboot <- vapply(1:B, bootstrap, FUN.VALUE = numeric(1L))
}
cp$cp.boot <- cpboot
# standard deviation
sd_boot <- sd(cpboot)
cp$sd <- sd_boot
# confidence intervals
quant <- (1 + c(-conf.level, conf.level))/2
#normal approximation
if(biascorrect){
ci.normal <- c(cp$cp.bc - qnorm(quant[2]) * sd_boot,
cp$cp.bc + qnorm(quant[2]) * sd_boot)
} else{
ci.normal <- c(changeP - qnorm(quant[2]) * sd_boot,
changeP + qnorm(quant[2]) * sd_boot)
}
if(ci.normal[1] < 0){
ci.normal[1] <- 0
warning("lower bound of confidence interval was set to 0")
}
if(ci.normal[2] > cpmax){
ci.normal[2] <- cpmax
warning("upper bound of confidence interval was set to 'cpmax'")
}
cp$ci.normal <- ci.normal
#percentile interval
perc <- (B + 1) * quant
ptrunc <- trunc(perc)
cpsort <- sort(cpboot)
percint <- numeric(2L)
percint[1] <- ifelse(ptrunc[1] == 0, cpsort[1L], cpsort[ptrunc[1]])
percint[2] <- ifelse(ptrunc[2] == B, cpsort[B], cpsort[ptrunc[2]])
if(ptrunc[1] == 0 || ptrunc[2] == B){
warning("extreme order statistics used as endpoints for percentile interval")
}
cp$ci.percent <- percint
cp$conf.level <- conf.level
cp$B <- B
}
structure(c(cp, list(time = time, event = event, cpmax = cpmax, intwd = intwd,
call = match.call())), class = "cpsurv")
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/cpsurv.R
|
#' Simulates Survival Times using Kaplan-Meier
#'
#' @inheritParams cpest
#' @param nobs Number of observations.
#' @param weibexp Logical; if \code{TRUE}, survival times above change point have
#' constant hazard; if \code{FALSE} all survival times are generated by using
#' the estimated survival curve (relevant for generation of censoring times).
#' @param changeP Change point
km.sim.survtimes <- function(nobs, time, event, weibexp, changeP = NULL){
survdata <- survival::Surv(time, event, type = "right")
# Kaplan-Meier estimation
km <- survival::survfit(survdata ~ 1, type = "kaplan-meier")
# fitted survival curve
surv <- summary(km)$surv
survtimes <- summary(km)$time
# smoothed survival function
S <- spline(predict(smooth.spline(round(surv, 4), survtimes),
x = seq(1e-04, 1, by = 1e-04)), n=1e4)
S$x <- round(S$x, 4)
names(S) <- c("Surv", "survtimes")
u <- round(runif(nobs, min = 1e-04, max = 0.9999), 4)
if(weibexp){
rateE <- sum(event[time > changeP] == 1) / sum(time[time > changeP] - changeP)
# Survivalfunction at change point
S.chp <- S$Surv[which.min(abs(S$survtimes - changeP))]
sim.weib.exp <- function(u, S, S.chp){
if(u > S.chp) t <- S$survtimes[which(S$Surv == u)]
else t <- changeP + qexp(p = u/S.chp, rate = rateE, lower.tail = F)
t
}
sapply(u, FUN = sim.weib.exp, S, S.chp)
} else{
sapply(u, function(u) S$survtimes[which(S$Surv == u)])
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/km.sim.survtimes.R
|
#' @include cpsurv.R
NULL
#' @title Summarize and print cpsurv objects
#' @description Summary and print methods for objects inheriting from a call to
#' \code{\link{cpsurv}}.
#' @seealso \code{\link{cpsurv}}
#' @name summarize.cpsurv
NULL
#' @param x An object of class \code{cpsurv} or \code{summary.cpsurv} to be
#' printed out.
#' @param ... not used
#' @rdname summarize.cpsurv
#' @export
print.cpsurv <- function(x, ...){
cat("Estimated change point:\n")
if(!is.null(x$cp.bc)){
cat(" ", x$cp.bc, "\n")
}else{
cat(" ", x$cp, "\n")
}
}
#' @details The main results from \code{cpsurv} are printed out in a
#' well-arranged format. If the estimated change point is bias corrected, both
#' estimates (the original, and the corrected one) are shown in the summary.
#' If a bootstrap-sampling was executed, the output contains a summary of the
#' resultant bootstrap-estimates.
#' @param object An object of class \code{cpsurv}.
#' @examples
#' data(survdata)
#' cpest <- cpsurv(survdata$time, survdata$event, cpmax = 360)
#' summary(cpest)
#' @rdname summarize.cpsurv
#' @export
summary.cpsurv <- function(object, ...){
if(is.null(object$cp.bc)){
estimate <- object$cp
} else{
estimate <- object$cp.bc
}
if(!is.null(object$sd)){
estimate["std.error"] <- round(object$sd, 3)
qq <- stats::quantile(object$cp.boot)
names(qq) <- c("Min", "1Q", "Median", "3Q", "Max")
quant <- (1 + c(-object$conf.level, object$conf.level))/2
normal <- as.character(round(object$ci.normal, 4))
names(normal) <- c(paste0((quant[1]* 100), "%"), paste0((quant[2] * 100), "%"))
percentile <- as.character(object$ci.percent)
int.out <- rbind(normal, percentile)
colnames(int.out) <- c(paste0((quant[1]* 100), "%"), paste0((quant[2] * 100), "%"))
structure(c(object,
list(estimate = estimate, qq = qq, int.out = int.out, normal = normal,
percentile = percentile)),
class = "summary.cpsurv")
} else{
structure(c(object, list(estimate = estimate)),
class = "summary.cpsurv")
}
}
#' @rdname summarize.cpsurv
#' @export
print.summary.cpsurv <- function(x, ...){
bc <- !is.null(x$cp.bc)
boot <- !is.null(x$sd)
param <- !is.null(x$ml.shape)
cat("\nNONPARAMETRIC CHANGE POINT ESTIMATION\n")
#cat("``````````````````´´´´´´´´´´´´´´´´´´\n")
if(bc){
partext <- ifelse(param, "PARAMETRIC", "NONPARAMETRIC")
cat("- with", partext, "Bootstrap Bias Correction\n")
}
cat("\nCALL: \n")
dput(x$call, control = NULL)
if(boot){
names(x$estimate) <- c("change point", "std.error")
cat("\nEstimation: \n")
print(x$estimate)
} else{
cat("\nestimated change point: ", x$estimate, "\n")
}
cat("(based on exact binomial tests for intervals of width ",
x$intwd, ")\n", sep = "")
if(boot){
cat("\n\nBootstrap Confidence Intervals:\n")
print(x$int.out, quote = FALSE)
cat("\n(based on", x$B, "bootstrap replicates)\n")
cat("\nBootstrap-Estimations:\n")
print(x$qq)
}
if(param){
cat("\n\nEstimated Weibull parameters (used for bias correction):\n")
cat(" shape:", round(x$ml.shape, 3), " scale:", round(x$ml.scale, 3))
}
}
#' Plot method for objects of class cpsurv
#'
#' Plot method for objects of class 'cpsurv' inheriting from a call to
#' \code{\link{cpsurv}}.
#'
#' @details The value \code{type = "pvals"} produces a plot with p-values used
#' to estimate the stump regression model with superimposed least squares
#' regression line. For \code{type = "events"} a barplot is produced with
#' frequency of events per unit at risk for each interval (with length
#' \code{intwd}. For \code{type = "hazard"} the estimated hazard rate (based
#' on \code{\link[muhaz]{muhaz}}) is plotted with optional (normal- or
#' percentile-) confidence intervals and the estimated constant hazard rate.
#'
#' @param x An object of class 'cpsurv' (estimated with \code{cpsurv}).
#' @param type A vector of character strings to select the plots for printing.
#' The value should be any subset of the values c("pvals", "events", "hazard")
#' or simply "all", where all possible plots are shown.
#' @param ci.type Character representing the type of confidence interval to plot
#' (if existing); "perc" for percentile interval and "norm" for CI with normal
#' approximation (default is "perc").
#' @param ci Logical; if \code{TRUE}, a bootstrap confidence interval is plotted
#' (if existing).
#' @param const.haz Logical; if \code{TRUE}, the estimated constant hazardrate
#' is plotted.
#' @param regline Logical; if \code{TRUE}, the regression line is plotted.
#' @param legend Logical; if \code{TRUE}, the plots contain legends.
#' @param xlim Vector with x limits (timeline) for each plot if supplied;
#' default is c(0, x$cpmax).
#' @param ylim Vector with y limits for plots of type "events" and "hazard". For
#' changing ylim for only one of them, plot them separately by use of argument
#' 'type'.
#' @param main Main title for each plot if supplied.
#' @param xlab Character vector used as x label for all plots if supplied.
#' @param ylab Character vector used as y label for all plots if supplied.
#' @param min.time Left bound of time domain used for
#' \code{\link[muhaz]{muhaz}}. If missing, min.time is considered 0.
#' @param max.time Right bound of time domain used for
#' \code{\link[muhaz]{muhaz}}. If missing, value 'cpmax' of object x is used.
#' @param n.est.grid Number of points in the estimation grid, where hazard
#' estimates are computed (used for \code{\link[muhaz]{muhaz}}). Default value
#' is 101.
#' @param ask If \code{TRUE}, the user is asked for input, before a new figure
#' is drawn.
#' @param ... Additional arguments passed through to plotting functions.
#' @seealso \code{\link[muhaz]{muhaz}}
#' @examples
#' data(survdata)
#' cp <- cpsurv(survdata$time, survdata$event, cpmax = 360, intwd = 10)
#' plot(cp, ask = FALSE)
#'
#' \dontrun{
#' cp <- cpsurv(survdata$time, survdata$event, cpmax = 360, intwd = 10,
#' boot.ci = TRUE)
#' plot(cp, type = "pvals", ask = FALSE)
#' }
#' @importFrom muhaz muhaz
#' @importFrom grDevices devAskNewPage
#' @importFrom graphics abline barplot lines plot
#' @method plot cpsurv
#' @export
plot.cpsurv <- function(x, type = "all", ci = TRUE, ci.type = c("perc", "norm"),
const.haz = TRUE, regline = TRUE, legend = TRUE,
xlim = NULL, ylim = NULL, main = NULL, xlab = NULL,
ylab = NULL, min.time, max.time,
n.est.grid = 101, ask = TRUE, ...){
ci.type <- match.arg(ci.type)
stopifnot(is.logical(ci), is.logical(const.haz), is.logical(regline))
if(missing(min.time))
min.time <- 0
if(missing(max.time))
max.time <- x$cpmax
if(is.null(xlim)) xlim <- c(0, x$cpmax)
cp <- ifelse(!is.null(x$cp.bc), x$cp.bc, x$cp)
existci <- !is.null(x$ci.normal)
if (ask) {
oask <- devAskNewPage()
devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
if (any(type == "all" | type == "pvals")){
ylim1 <- c(0,1)
if(is.null(xlab)){
xlab1 <- "follow-up time"
} else{xlab1 <- xlab}
if(is.null(ylab)){
ylab1 <- "p-values"
} else{ylab1 <- ylab}
plot(x$lower.lim, x$p.values, xlim = xlim, ylim = ylim1,
xlab = xlab1, ylab = ylab1, main = main, ...)
if(regline){
lines(x = c(0, cp), y = c(0, 0), lwd = 2)
lines(x = c(cp, x$lower.lim[length(x$lower.lim)]),
y = c(x$pv.mean, x$pv.mean), lwd = 2)
lines(x = c(cp, cp), y = c(0, x$pv.mean), lty = 3)
if(legend)
legend("topleft", "Regression line", col = 1, lwd = 2, bg = "white")
}
}
if (any(type == "all" | type == "events")){
if(is.null(ylab)){
ylab1 <- "events per units at risk"
} else{ylab1 <- ylab}
if(is.null(main)){
main1 <- "Relative frequency of events per interval"
} else{main1 <- main}
if(all(x$event == 1)){
events <- table(x$event, cut(x$time, seq(xlim[1], xlim[2], by=x$intwd)))
} else{
events <- table(x$event, cut(x$time, seq(xlim[1], xlim[2], by=x$intwd)))[2,]
}
riskset <- (length(x$time) - c(0, cumsum(events)[-length(events)]))
relevents <- events / riskset
barplot(relevents, ylab = ylab1, xlab = xlab, main = main1,
ylim = ylim, col = "gray")
}
if (any(type == "all" | type == "hazard")){
mh <- muhaz::muhaz(times = x$time, delta = x$event, min.time = min.time,
max.time = max.time,n.est.grid = n.est.grid)
if(is.null(xlab)){
xlab1 <- "follow-up time"
} else{xlab1 <- xlab}
if(is.null(ylab)){
ylab1 <- "hazard rate"
} else{ylab1 <- ylab}
plot(mh, xlim = xlim, main = main, xlab = xlab1, ylab = ylab1, ylim = ylim, ...)
haztext <- NULL
if(const.haz){
exprate <- sum(x$event[x$time > x$cpmax] == 1) /
sum(x$time[x$time > x$cpmax] - x$cpmax)
abline(h = exprate, col = "grey60")
haztext <- "const.hazard"
lines(mh, mgp = c(2.5,1,0))
}
abline(v=cp)
if(ci && existci){
if(ci.type == "perc"){
abline(v = x$ci.percent[1], lty = 2, col = "grey40")
abline(v = x$ci.percent[2], lty = 2, col = "grey40")
if(legend)
legend("topright", c("changepoint", "perc.int", haztext),
col = c(1,"grey40","grey60"),lty = c(1,2,1), bg = "white")
} else{
abline(v = x$ci.normal[1], lty = 2, col = "grey40")
abline(v = x$ci.normal[2], lty = 2, col = "grey40")
if(legend)
legend("topright", c("changepoint", "normal.ci", haztext),
col = c(1,"grey40","grey60"),lty = c(1,2,1), bg = "white")
}
} else if(legend){
legend("topright", c("changepoint", haztext),
col = c(1,"grey60"),lty = c(1,1), bg = "white")
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/methods.R
|
#' Negative Log-Likelihood for Weibull-Exponential Distribution
#'
#' @param param Shape and scale parameter for Weibull distribution.
#' @param changeP Changepoint.
#' @param time Vector of survival times.
#' @param event Vector indicating censoring status; 0 = alive (censored), 1 = dead
#' (uncensored).
#' @return Value of the negative log-likelihood.
neg.loglik.WeibExp <- function(param, changeP, time, event)
{
shape <- param[1]
scale <- param[2]
rateE <- shape/scale * (changeP/scale)^(shape-1)
#
is.W <- as.numeric(time <= changeP)
x.E <- (time - changeP)
#
ll1 <- event * is.W * dweibull(x=time, shape=shape, scale=scale)
ll2 <- (1-event) * is.W * pweibull(q=time, shape=shape, scale=scale, lower.tail=F)
ll3 <- event * (1-is.W) * rateE * pweibull(q=changeP, shape=shape, scale=scale,
lower.tail=F) * pexp(q=x.E, rate=rateE, lower.tail=F)
ll4 <- (1-event) * (1-is.W) * pweibull(q=changeP, shape=shape, scale=scale,
lower.tail=F) * pexp(q=x.E, rate=rateE, lower.tail=F)
ll <- ll1 + ll2 + ll3 + ll4
-sum(log(ll))
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/neg.loglik.WeibExp.R
|
#' Simulate Survival Data with Change Point
#'
#' Simulates Weibull distributed survival data from a given data set with
#' change point above which hazard rate is constant.
#'
#' @inheritParams cpest
#' @param changeP Change point.
#' @param shape Shape parameter of Weibull distribution.
#' @param scale Scale parameter of Weibull distribution.
#' @param censoring Logical; if \code{TRUE}, censored data are generated.
#' @param censpoint Censoring point for Type I censoring.
#' @param times.int Logical; if \code{TRUE}, returned survival times are
#' integers.
#' @param parametric Logical; if \code{TRUE}, survival times are generated
#' parametrically by inverse transform sampling; otherwise Kaplan-Meier is
#' used for simulation.
#' @return A dataset with survival times and corresponding censoring status
#' ('event').
sim.survdata <-function(time, event, changeP, shape, scale, censoring, censpoint,
times.int, parametric){
nobs <- length(time)
if(parametric){
# rate of exponential distr.
rateE <- shape / scale * (changeP / scale) ^ (shape - 1)
# Survivorfunction at change point
St <- pweibull(q = changeP, shape = shape, scale = scale, lower.tail = FALSE)
u <- runif(nobs)
times.sim <- numeric(nobs)
times.sim[u > St] <- qweibull(p = u[u > St], shape = shape, scale = scale,
lower.tail = FALSE)
times.sim[u <= St] <- changeP + qexp(p = u[u <= St] /
pweibull(q = changeP, shape = shape,
scale = scale,
lower.tail = FALSE),
rate = rateE, lower.tail = FALSE)
} else{
# nonparametric simulation of survivaltimes (by Kaplan-Meier)
times.sim <- km.sim.survtimes(nobs = nobs, time = time, event = event,
weibexp = TRUE, changeP = changeP)
}
data <- data.frame(time = times.sim, event = rep(1, nobs))
#-----------------censoring--------------------
if(censoring == "random"){
censtimes <- time[event == 0]
times.sim.cens <- km.sim.survtimes(nobs = nobs, time = censtimes,
event = rep(1, length(censtimes)),
weibexp = FALSE)
data$event <- as.numeric(data$time <= times.sim.cens)
data$time <- pmin(data$time, times.sim.cens)
}else if(censoring == "type1"){
data$event[data$time >= censpoint] <- 0
data$time[data$time >= censpoint] <- censpoint
}
if(times.int){
data$time <- ceiling(data$time)
}
data
}
|
/scratch/gouwar.j/cran-all/cranData/CPsurv/R/sim.survdata.R
|
#' @export
#' @importFrom stats printCoefmat
print.CR2 <- function(x, ...){
cat("\nStandard error type =", x$crtype, '\n')
cat("Degrees of freedom =", x$df, '\n\n')
printCoefmat(x$ttable, x$digits)
}
#' Compute the inverse square root of a matrix
#'
#' From Imbens and Kolesar (2016).
#' @param A The matrix object.
#' @export
#' @return Returns a matrix.
MatSqrtInverse <- function(A) {
ei <- eigen(A, symmetric = TRUE) #obtain eigenvalues and eigenvectors
d <- pmax(ei$values, 10^-12) #set negatives values to zero
#or near zero 10^-12
d2 <- 1/sqrt(d) #get the inverse of the square root
d2[d == 0] <- 0
ei$vectors %*% (if (length(d2)==1) d2 else diag(d2)) %*% t(ei$vectors)
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/S3-methods.R
|
#' Cluster robust standard errors with degrees of freedom adjustments (for lm and glm objects)
#'
#' Function to compute the CR0, CR1, CR2 cluster
#' robust standard errors (SE) with Bell and McCaffrey (2002)
#' degrees of freedom (df) adjustments. Useful when dealing with datasets with a few clusters.
#' Shows output using different CR types and degrees of freedom choices (for comparative purposes only).
#' For linear and logistic regression models (as well as other GLMs). Computes the BRL-S2 variant.
#'
#' @importFrom stats nobs resid residuals var coef pt model.matrix family weights fitted.values
#' @param mod The \code{lm} model object.
#' @param clust The cluster variable (with quotes).
#' @param digits Number of decimal places to display.
#' @param ztest If a normal approximation should be used as the naive degrees of freedom. If FALSE, the between-within degrees of freedom will be used.
#' @return A data frame with the CR adjustments with p-values.
#' \item{estimate}{The regression coefficient.}
#' \item{se.unadj}{The model-based (regular, unadjusted) SE.}
#' \item{CR0}{Cluster robust SE based on Liang & Zeger (1986).}
#' \item{CR1}{Cluster robust SE (using an adjustment based on number of clusters).}
#' \item{CR2}{Cluster robust SE based on Bell and McCaffrey (2002).}
#' \item{tCR2}{t statistic based on CR2.}
#' \item{dfn}{Degrees of freedom(naive): can be infinite (z) or between-within (default). User specified.}
#' \item{dfBM}{Degrees of freedom based on Bell and McCaffrey (2002).}
#' \item{pv.unadj}{p value based on model-based standard errors.}
#' \item{CR0pv}{p value based on CR0 SE with dfBM.}
#' \item{CR0pv.n}{p value based on CR0 SE with naive df.}
#' \item{CR1pv}{p value based on CR1 SE with dfBM.}
#' \item{CR1pv.n}{p value based on CR1 SE with naive df.}
#' \item{CR2pv}{p value based on CR2 SE with dfBM.}
#' \item{CR2pv.n}{p value based on CR2 SE with naive df.}
#'
#' @examples
#' clustSE(lm(mpg ~ am + wt, data = mtcars), 'cyl')
#' data(sch25)
#' clustSE(lm(math ~ ses + minority + mses + mhmwk, data = sch25), 'schid')
#'
#' @references
#' \cite{Bell, R., & McCaffrey, D. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. Survey Methodology, 28, 169-182.
#' (\href{https://www150.statcan.gc.ca/n1/pub/12-001-x/2002002/article/9058-eng.pdf}{link})}
#'
#' Liang, K.Y., & Zeger, S. L. (1986). Longitudinal data analysis using generalized linear models. \emph{Biometrika, 73}(1), 13–22.
#' \doi{10.1093/biomet/73.1.13}
#'
#' @export
clustSE <- function(mod, clust = NULL, digits = 3, ztest = FALSE){
#if (is.null(data))
data <- eval(mod$call$data) #OLD
if (class(mod)[1] != 'glm' & class(mod)[1] != 'lm') stop("Must include a model object of class glm or lm.")
# is(mod, c('lm', 'glm')) {
# stop("Must include a model object of class glm or lm.")
# }
if (is.null(clust)) stop("Must include a cluster name. clust = 'cluster'")
tmp <- summary(mod)
se.n <- tmp$coefficients[,2]
pv.n <- tmp$coefficients[,4]
#X <- model.matrix.lm(mod, data, na.action = "na.pass")
X <- Xo <- model.matrix(mod) #to keep NAs if
n <- nobs(mod) #how many total observations
if (family(mod)[[1]] != 'gaussian') {
wts <- weights(mod, "working")
X <- X * sqrt(wts)
#re <- resid(mod, 'working') * wts
#re <- resid(mod, 'response') #y - pp
re <- resid(mod, 'pearson')
} else {
wts <- rep(1, n)
re <- resid(mod, 'pearson')
}
Wm <- diag(wts) #Weight matrix
if(nrow(X) != nrow(data)) {
#warning("Just a note: Missing data in original data.")
data <- data[names(fitted.values(mod)), ]
}
#data[,clust] <- as.character(data[,clust])
NG <- length(table(data[,clust])) #how many clusters
#cpx <- solve(crossprod(X)) #(X'X)-1 or inverse of the cp of X
cpx <- chol2inv(qr.R(qr(X))) #using QR decomposition, faster, more stable?
#cpx <- chol2inv(chol(t(Xo) %*% Wm %*% Xo))
cnames <- names(table(data[,clust])) #names of the clusters
js <- table(data[,clust]) #how many in each cluster
k <- mod$rank #predictors + intercept
Xj <- function(x){ #inverse of the symmetric square root (p. 709 IK)
index <- which(data[,clust] == x)
Xs <- X[index, , drop = F] #X per cluster [original, unweighted]
#wm <- Wm[index, index]
Hm <- Xs %*% cpx %*% t(Xs) # %*% Wm[index, index] # the Hat matrix, not symmetric
#Hm <- sqrt(wm) %*% Xs %*% cpx %*% t(Xs) %*% sqrt(wm) # the Hat matrix
## This is the orig formulation
IHjj <- diag(nrow(Xs)) - Hm
return(MatSqrtInverse(IHjj)) #I - H
# V3 <- chol(Wm[index, index]) #based on MBB
# Bi <- V3 %*% IHjj %*% Wm[index, index] %*% t(V3)
# t(V3) %*% MatSqrtInverse(Bi) %*% V3
}
ml <- lapply(cnames, Xj) #need these matrices for CR2 computation
#re <- resid(mod, 'pearson') #works for both lm and glm
cdata <- data.frame(data[,clust], re ) #data with cluster and residuals
names(cdata) <- c('cluster', 'r')
gs <- names(table(cdata$cluster))
u1 <- u2 <- matrix(NA, nrow = NG, ncol = k)
dfa <- (NG / (NG - 1)) * ((n - 1)/(n - k)) #for HC1 / Stata
#dfa <- NG / (NG - 1) #used by SAS
## function for Liang and Zeger SEs
uu1 <- function(x){
t(cdata$r[cdata$cluster == x]) %*%
X[cdata$cluster == x, ] #e'X #plain vanilla
}
u1 <- t(sapply(cnames, uu1)) #use as a list?
#have to transpose to get into proper shape
#because of sapply
mt <- crossprod(u1)
# br <- solve(crossprod(X)) #cpx
br <- cpx #just copying, got this earlier
clvc <- br %*% mt %*% br #LZ vcov matrix
uu2 <- function(x){
ind <- which(cnames == x)
t(cdata$r[cdata$cluster == x]) %*% ml[[ind]] %*%
X[cdata$cluster == x, ]
}
u2 <- t(sapply(cnames, uu2)) #have to transpose because of sapply
mt2 <- crossprod(u2)
clvc2 <- br %*% mt2 %*% br #BR LZ2 vcov matrix
#### To compute empirically-based DF
## STEP 1
tXs <- function(s) {
index <- which(cdata$cluster == s)
Xs <- X[index, , drop = F]
IHjj <- diag(nrow(Xs)) - Xs %*% cpx %*% t(Xs)
MatSqrtInverse(IHjj) %*%
Xs
} # A x Xs / Need this first
tX <- lapply(cnames, tXs)
## STEP 2
tHs <- function(s) {
index <- which(cdata$cluster == s)
Xs <- X[index, , drop = F]
ss <- matrix(0, nrow = n, ncol = length(index)) #all 0, n x G
ss[cbind(index, 1:length(index))] <- 1 #indicator
ss - X %*% cpx %*% t(Xs) #overall X x crossprod x Xs'
}
tH <- lapply(cnames, tHs) #per cluster
## STEP 3
id <- diag(k) #number of coefficients // for different df
degf <- numeric(k) #vector for df // container
for (j in 1:k){ #using a loop since it's easier to see
Gt <- sapply(seq(NG), function(i) tH[[i]] %*%
tX[[i]] %*% cpx %*% id[,j])
#already transposed because of sapply: this is G'
#ev <- eigen(Gt %*% t(Gt))$values #eigen values: n x n
ev <- eigen(t(Gt) %*% Gt)$values #much quicker this way, same result: p x p:
degf[j] <- (sum(ev)^2) / sum(ev^2) #final step to compute df
}
### Computing the dofHLM
if (ztest == FALSE){
### figuring out Number of L2 and L1 vars for dof
chk <- function(x){
vrcheck <- sum(tapply(x, data[,clust], var), na.rm = T) #L1,
# na needed if only one observation with var = NA
y <- 1 #assume lev1 by default
if (vrcheck == 0) (y <- 2) #if variation, then L2
return(y)
}
if (family(mod)[[1]] != 'gaussian') X <- model.matrix(mod) ## use original matrix to check for df
## don't need the X matrix after this
levs <- apply(X, 2, chk) #all except intercept
levs[1] <- 1 #intercept
tt <- table(levs)
l1v <- tt['1']
l2v <- tt['2']
l1v[is.na(l1v)] <- 0
l2v[is.na(l2v)] <- 0
####
#ns <- nobs(mod)
df1 <- n - l1v - NG #l2v old HLM
df2 <- NG - l2v - 1
dfn <- rep(df1, length(levs)) #naive
dfn[levs == '2'] <- df2
dfn[1] <- df2 #intercept
} else {
dfn <- rep(Inf, k) #infinite
}
### Putting it all together
CR1 <- sqrt(diag(clvc * dfa))
CR2 <- sqrt(diag(clvc2))
CR0 = sqrt(diag(clvc))
beta <- coef(mod)
tCR0 <- beta / CR0
tCR1 <- beta / CR1
tCR2 <- beta / CR2
CR0pv.n = round(2 * pt(-abs(tCR0), df = dfn), digits) #naive: either inf or HLM df
CR0pv = round(2 * pt(-abs(tCR0), df = degf), digits) #using adjusted df
CR1pv.n = round(2 * pt(-abs(tCR1), df = dfn), digits) #naive: either inf or HLM df
CR1pv = round(2 * pt(-abs(tCR1), df = degf), digits) #using adjusted df
CR2pv.n = round(2 * pt(-abs(tCR2), df = dfn), digits) #naive: either inf or HLM df
CR2pv = round(2 * pt(-abs(tCR2), df = degf), digits) #using adjusted df
####
res <- data.frame(estimate = round(beta, digits),
se.unadj = round(se.n, digits),
CR0 = round(CR0, digits), #LZeger
CR1 = round(CR1, digits), #stata
CR2 = round(CR2, digits),
tCR2 = round(tCR2, digits),
dfn = dfn,
dfBM = round(degf, 2),
pv.unadj = round(pv.n, digits),
CR0pv = round(CR0pv, digits),
CR0pv.n = round(CR0pv.n, digits),
CR1pv,
CR1pv.n,
CR2pv.n,
CR2pv) #BM
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/clustSE.R
|
#' Simulated data from 18 schools (from a cluster randomized controlled trial)
#'
#' Synthetic dataset used in the manuscript in the Journal of Research on Educational Effectiveness.
#'
#' @usage data(crct)
#' @format A data frame with 4233 rows and 12 variables:
#' \describe{
#' \item{usid}{Unique school identifier (the grouping variable).}
#' \item{stype}{School type (elementary, middle, or high school).}
#' \item{trt}{Treatment indicator. 1 = intervention; 0 = control.}
#' \item{odr_post}{Office disciplinary referral outcome.}
#' \item{odr_pre}{Office disciplinary referral (baseline).}
#' \item{size}{School enrollment size (to the nearest hundred).}
#' \item{female}{Student is female: 1 = yes.}
#' \item{stype_ms}{Dummy code for school type; middle school.}
#' \item{stype_elem}{Dummy code for school type; elementary school.}
#' \item{stype_hs}{Dummy code for school type; high school.}
#' \item{race_Black}{Dummy code for student race/ethnicity; Black student.}
#' \item{race_Hispanic}{Dummy code for student race/ethnicity; Hispanic student.}
#' }
"crct"
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/crct.R
|
#' Get V matrix for merMod objects
#'
#' Function to extract V matrix.
#'
#'
#' @param x lme4 object
#' @return V matrix (weight) for multilevel models
#' @export
#' @importFrom stats sigma
getV <- function(x) {
lam <- data.matrix(getME(x, "Lambdat"))
var.d <- crossprod(lam)
Zt <- data.matrix(getME(x, "Zt"))
vr <- sigma(x)^2
var.b <- vr * (t(Zt) %*% var.d %*% Zt)
sI <- vr * diag(nobs(x))
var.y <- var.b + sI
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/getV.R
|
utils::globalVariables(c("getVarCov", 'bdiag', 'formula', 'Matrix::solve', 'getV', 'getME', 'fixef', 'performance::r2_nakagawa', 'chol2inv', 'chol', 'VarCorr', 'vcov', 'diag', 'Estimate', 'cr.se', 'p.val', 'dplyr::rename', 'tibble::as_tibble', 'dplyr::bind_cols', 'performance::r2_nakagawa'))
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/globals.R
|
#' Grade point average (GPA) data of students from 25 schools
#'
#' For investigating heteroskedasticity.
#' @usage data(gpadat)
#' @format A data frame with 8,956 rows and 18 variables:
#' \describe{
#' \item{gpa}{Grade point average. 1 = D ... 4 = A.}
#' \item{female}{Gender. Female = 1.}
#' \item{race}{Student race/ethnicity (factor).}
#' \item{dis}{Disability status (1 = yes/0 = no).}
#' \item{frpl}{Free/reduced price lunch status.}
#' \item{race_w}{Dummy coded race (White).}
#' \item{race_a}{Dummy coded race (Asian).}
#' \item{race_b}{Dummy coded race (Black).}
#' \item{race_h}{Dummy coded race (Hispanic).}
#' \item{race_o}{Dummy coded race (Other).}
#' \item{per_asian}{Group-aggregated Asian variable.}
#' \item{per_black}{Group-aggregated Black variable.}
#' \item{per_hisp}{Group-aggregated Hispanic variable.}
#' \item{per_other}{Group-aggregated Other variable.}
#' \item{per_fem}{Group-aggregated female variable.}
#' \item{per_dis}{Group-aggregated disability variable.}
#' \item{per_frpl}{Group-aggregated frpl variable.}
#' \item{schoolid}{School identifier (cluster variable).}
#' }
"gpadat"
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/gpadat.R
|
#' Testing for nonconstant variance (ncv)
#'
#' @description{Function to detect heteroscedasticity in two-level random intercept models.
#' Uses a generalization of the Breusch-Pagan-type (using squared residuals)
#' and Levene-type test (using the absolute value of residuals). Note: this will
#' not tell you if including random slopes are warranted (for that, use the
#' \code{robust_mixed}) function and compare differences in model-based and
#' robust standard errors.}
#'
#'
#'
#' @importFrom stats resid anova update
#' @importFrom nlme ranef
#' @importFrom lme4 lmer
#' @param mx The \code{lme} or \code{merMod} model object.
#' @param bp Computes a Breusch-Pagan-type test (\code{TRUE}). If \code{FALSE} computes a Levene-type test.
#' @return A p-value (p < .05 suggests heteroskedasticity).
#'
#' @references
#' \cite{Huang, F., Wiedermann, W., & Zhang, B. (2022). Accounting for Heteroskedasticity Resulting from Between-group Differences in Multilevel Models. Multivariate Behavioral Research.
#' }
#'
#'
#' @examples
#' require(lme4)
#' data(sch25)
#' ncvMLM(lmer(math ~ byhomewk + male + ses + (1|schid), data = sch25)) #supported
#' ncvMLM(lmer(math ~ byhomewk + male + ses + minority + (1|schid), data = sch25)) #hetero
#' @export ncvMLM
ncvMLM <- function(mx, bp = TRUE){
#if (class(mx) != 'lme') (stop("Only for lme objects"))
if(is(mx, 'lme')){
dat <- mx$data
if (length(mx$dims$ngrps) > 3) {stop("Can only be used with two level data.")}
if (ncol(ranef(mx)) > 1) {stop("Can only be used with random intercept models.")}
if (bp == TRUE){
dat$rr <- resid(mx)^2 #squared, these are conditional
} else { #this is Levene's
dat$rr <- abs(resid(mx)) #absolute value, these are conditional
}
tmp0 <- update(mx, rr ~ 1, method = 'ML', data = dat)
tmp1 <- update(mx, rr ~ ., method = 'ML', data = dat)
res <- anova(tmp1, tmp0)
return(as.numeric(res$`p-value`[2]))
} else if(is(mx, 'merMod')){
dat <- mx@frame
Gname <- names(getME(mx, 'l_i')) #name of clustering variable
if (length(Gname) > 1) {
stop("lmer: Can only be used with non cross-classified data.")
}
if (getME(mx, 'p_i') > 1) {
stop("Can only be used with random intercept models.")
}
if (bp == TRUE){
dat$rr <- resid(mx)^2 #squared, these are conditional
} else { #this is Levene's
dat$rr <- abs(resid(mx)) #absolute value, these are conditional
}
groups <- dat[,Gname]
#print(groups)
tmp0 <- lmer(rr ~ 1 + (1|groups), REML = FALSE, data = dat)
tmp1 <- update(mx, rr ~ ., REML = FALSE, data = dat)
res <- anova(tmp1, tmp0)
return(as.numeric(res$`Pr(>Chisq)`[2]))
} else {
stop("Can only be used with lme or merMod objects.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/ncvMLM.R
|
#' Cluster robust standard errors with degrees of freedom adjustments for lmerMod/lme objects
#'
#' Function to compute the CR2/CR0 cluster
#' robust standard errors (SE) with Bell and McCaffrey (2002)
#' degrees of freedom (dof) adjustments. Suitable even with a low number of clusters.
#' The model based (mb) and cluster robust standard errors are shown for comparison purposes.
#'
#'
#' @importFrom stats nobs resid formula residuals var coef pt model.matrix family weights fitted.values
#' @importFrom methods is
#' @param m1 The \code{lmerMod} or \code{lme} model object.
#' @param digits Number of decimal places to display.
#' @param type Type of cluster robust standard error to use ("CR2" or "CR0").
#' @param satt If Satterthwaite degrees of freedom are to be computed (if not, between-within df are used).
#' @param Gname Group/cluster name if more than two levels of clustering (does not work with lme).
#' @return A data frame (\code{results}) with the cluster robust adjustments with p-values.
#' \item{Estimate}{The regression coefficient.}
#' \item{mb.se}{The model-based (regular, unadjusted) SE.}
#' \item{cr.se}{The cluster robust standard error.}
#' \item{df}{degrees of freedom: Satterthwaite or between-within.}
#' \item{p.val}{p-value using CR0/CR2 standard error.}
#' \item{stars}{stars showing statistical significance.}
#'
#' @references
#' \cite{Bell, R., & McCaffrey, D. (2002). Bias reduction in standard errors for linear regression with multi-stage samples. Survey Methodology, 28, 169-182.
#' (\href{https://www150.statcan.gc.ca/n1/pub/12-001-x/2002002/article/9058-eng.pdf}{link})}
#'
#' \cite{Liang, K.Y., & Zeger, S. L. (1986). Longitudinal data analysis using generalized linear models. \emph{Biometrika, 73}(1), 13-22.
#' (\href{https://academic.oup.com/biomet/article/73/1/13/246001}{link})
#' }
#'
#' @author Francis Huang, \email{[email protected]}
#' @author Bixi Zhang, \email{[email protected]}
#'
#'
#' @examples
#' require(lme4)
#' data(sch25, package = 'CR2')
#' robust_mixed(lmer(math ~ male + minority + mses + mhmwk + (1|schid), data = sch25))
#' @export
robust_mixed <- function(m1, digits = 3, type = 'CR2', satt = TRUE, Gname = NULL){
if(is(m1, 'lmerMod')){
dat <- m1@frame
X <- model.matrix(m1) #X matrix
B <- fixef(m1) #coefficients
y <- m1@resp$y #outcome
Z <- getME(m1, 'Z') #sparse Z matrix
b <- getME(m1, 'b') #random effects
if (is.null(Gname)){
Gname <- names(getME(m1, 'l_i')) #name of clustering variable
if (length(Gname) > 1) {
stop("lmer: Can only be used with non cross-classified data. If more than two levels, specify highest level using Gname = 'clustername'")
}
}
js <- table(dat[, Gname]) #how many observation in each cluster
G <- bdiag(VarCorr(m1)) #G matrix
NG <- getME(m1, 'l_i') #number of groups :: ngrps(m1)
NG <- NG[length(NG)]
gpsv <- dat[, Gname] #data with groups
# { #done a bit later than necessary but that is fine
# if(is.unsorted(gpsv)){
# # stop("Data are not sorted by cluster. Please sort your data first by cluster, run the analysis, and then use the function.\n")
# }
# }
# getV <- function(x) {
# lam <- data.matrix(getME(x, "Lambdat"))
# var.d <- crossprod(lam)
# Zt <- data.matrix(getME(x, "Zt"))
# vr <- sigma(x)^2
# var.b <- vr * (t(Zt) %*% var.d %*% Zt)
# sI <- vr * diag(nobs(x))
# var.y <- var.b + sI
# }
Vm <- getV(m1)
}
if(is(m1, 'lme')){
dat <- m1$data
fml <- formula(m1)
X <- model.matrix(fml, data = dat)
B <- fixef(m1)
NG <- m1$dims$ngrps[[1]]
if (length(m1$dims$ngrps) > 3) {stop("Can only be used with two level data.")}
Gname <- names(m1$groups)
y <- dat[,as.character(m1$terms[[2]])]
gpsv <- dat[,Gname]
js <- table(gpsv)
{#done a bit later than necessary but that is fine
if(is.unsorted(gpsv)){
stop("Data are not sorted by cluster. Please sort your data first by cluster, run the analysis, and then use the function.\n")
}
}
ml <- list()
for (j in 1:NG){
test <- getVarCov(m1, individuals = j, type = 'marginal')
ml[[j]] <- test[[1]]
}
Vm <- as.matrix(Matrix::bdiag(ml)) #to work with other funs
}
### robust computation :: once all elements are extracted
rr <- y - X %*% B #residuals with no random effects
#n <- nobs(m1)
cdata <- data.frame(cluster = gpsv, r = rr)
k <- ncol(X) #number of predictors (inc intercept)
gs <- names(table(cdata$cluster)) #name of the clusters
u <- matrix(NA, nrow = NG, ncol = k) #LZ
uu <- matrix(NA, nrow = NG, ncol = k) #CR2
cnames <- names(table(gpsv))
#cpx <- solve(crossprod(X))
#cpx <- chol2inv(qr.R(qr(X))) #using QR decomposition, faster, more stable?
#cdata <- data.frame(cluster = dat[,Gname])
#NG <- length(cnames)
### quicker way, doing the bread by cluster
tmp <- split(X, cdata$cluster)
XX <- lapply(tmp, function(x) matrix(x, ncol = k)) #X per clust
# to get Vc^-1 per cluster
aa <- function(x){
sel <- which(cdata$cluster == x)
chol2inv(chol(Vm[sel, sel])) #this is V^-1
#solve(Vm[sel, sel])
}
Vm2 <- lapply(cnames, aa) #Vc^-1 used
a2 <- function(x){
sel <- which(cdata$cluster == x)
Vm[sel, sel] #this is V
}
Vm3 <- lapply(cnames, a2) #Vc used
names(Vm2) <- names(Vm3) <- cnames #naming
#Vm2 is the inverse, Vm3 is just the plain V matrix
Vinv <- as.matrix(Matrix::bdiag(Vm2))
# to get X V-1 X per cluster
bb <- function(x){
t(XX[[x]]) %*% Vm2[[x]] %*% XX[[x]]
}
dd <- lapply(cnames, bb)
br <- solve(Reduce("+", dd)) #bread
#rrr <- split(rr, getME(m1, 'flist'))
rrr <- split(rr, cdata$cluster)
#### Meat matrix
if (type == "CR2"){
tXs <- function(s) {
Ijj <- diag(nrow(XX[[s]]))
Hjj <- XX[[s]] %*% br %*% t(XX[[s]]) %*% Vm2[[s]]
IHjj <- Ijj - Hjj
#MatSqrtInverse(Ijj - Hjj) #early adjustment / valid
V3 <- chol(Vm3[[s]]) #based on MBB
Bi <- V3 %*% IHjj %*% Vm3[[s]] %*% t(V3)
t(V3) %*% MatSqrtInverse(Bi) %*% V3
} # A x Xs / Need this first
tX <- lapply(cnames, tXs)
cc2 <- function(x){
rrr[[x]] %*% tX[[x]] %*% Vm2[[x]] %*% XX[[x]]
}
u <- t(sapply(1:NG, cc2)) #using 1:NG instead
} else { ## meat matrix for CR0
# residual x inverse of V matrix x X matrix
cc0 <- function(x){
rrr[[x]] %*% Vm2[[x]] %*% XX[[x]]
}
u <- t(sapply(cnames, cc0))
}
## e'(Vg)-1 Xg ## CR0
## putting the pieces together
#br2 <- solve(t(X) %*% Vinv %*% X) #bread
mt <- t(u) %*% u #meat
if (ncol(X) == 1) {mt <- u %*% t(u)} #updated 2022.12.24
clvc2 <- br %*% mt %*% br #variance covariance matrix
rse <- sqrt(diag(clvc2)) #standard errors
### DEGREES OF FREEDOM :::::::::::::::::
if (satt == TRUE){
dfn <- satdf(m1, Gname = Gname, type = type)
} else {
### HLM dof
chk <- function(x){
vrcheck <- sum(tapply(x, gpsv, var), na.rm = T) #L1,
# na needed if only one observation with var = NA
y <- 1 #assume lev1 by default
if (vrcheck == 0) (y <- 2) #if variation, then L2
return(y)
}
levs <- apply(X, 2, chk) #all except intercept
# levs[1] <- 1 #intercept
tt <- table(levs)
l1v <- tt['1']
l2v <- tt['2']
l1v[is.na(l1v)] <- 0
l2v[is.na(l2v)] <- 0
####
n <- nobs(m1)
df1 <- n - l1v - length(js)
df2 <- NG - l2v
dfn <- rep(df1, length(levs)) #naive
dfn[levs == '2'] <- df2
}
robse <- as.numeric(rse)
FE_auto <- fixef(m1)
cfsnames <- names(FE_auto)
statistic <- FE_auto / rse
p.values = round(2 * pt(abs(statistic), df = dfn, lower.tail = F), digits)
stars <- as.character(cut(p.values, breaks = c(0, 0.001, 0.01, 0.05, 0.1, 1),
labels = c("***", "**", "*", ".", " "), include.lowest = TRUE)
)
robs <- rse
pv <- p.values
vc <- clvc2
rownames(vc) <- colnames(vc) <- cfsnames #names matrix 2022.12.24
#gams <- solve(t(X) %*% solve(Vm) %*% X) %*% (t(X) %*% solve(Vm) %*% y)
#SEm <- as.numeric(sqrt(diag(solve(t(X) %*% solve(Vm) %*% X)))) #X' Vm-1 X
#SE <- as.numeric(sqrt(diag(vcov(m1)))) #compare standard errors
SE <- as.numeric(sqrt(diag(br)))
ttable <- cbind(
Estimate = round(FE_auto, digits),
mb.se = round(SE, digits),
robust.se = round(robs, digits),
t.stat = round(FE_auto / robs, digits),
df = round(dfn, 1),
"Pr(>t)" = pv
)
results <- data.frame(
Estimate = FE_auto,
mb.se = SE,
cr.se = robse,
t.stat = FE_auto / robs,
df = dfn,
p.val = pv,
stars
)
type <- ifelse(type == 'CR2', 'CR2', 'CR0')
dft <- ifelse(satt == TRUE, "Satterthwaite", "Between-within")
res <- list(ttable = ttable,
results = results,
crtype = type,
df = dft,
digits = digits,
vcov = vc,
orig = m1)
class(res) <- 'CR2'
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/robust_mixed.R
|
#' Compute Satterthwaite degrees of freedom
#'
#' Function to compute empirical degrees of freedom
#' based on Bell and McCaffrey (2002).
#'
#'
#' @importFrom stats nobs resid formula residuals var coef pt model.matrix family weights fitted.values
#' @importFrom methods is
#' @param m1 The \code{lmerMod} or \code{lme} model object.
#' @param type The type of cluster robust correction used (i.e., CR2 or none).
#' @param Vinv2 Inverse of the variance matrix.
#' @param Vm2 The variance matrix.
#' @param br2 The bread component.
#' @param Gname The group (clustering variable) name'
#'
#' @author Francis Huang, \email{[email protected]}
#' @author Bixi Zhang, \email{[email protected]}
#'
#' @return Returns a vector of degrees of freedom.
#' @export
## empirical DOF
satdf <- function(m1, type = 'none', Vinv2, Vm2, br2, Gname = NULL){
#require(Matrix)
#if(class(m1) == 'lme'){ #if nlme
if(is(m1, 'lme')){
dat <- m1$data
fml <- formula(m1)
X <- model.matrix(fml, data = dat)
B <- fixef(m1)
NG <- m1$dims$ngrps[[1]]
if (length(m1$dims$ngrps) > 3) {stop("Can only be used with two level data (for now).")}
Gname <- names(m1$groups)
y <- dat[,as.character(m1$terms[[2]])]
gpsv <- dat[,Gname]
js <- table(gpsv)
K <- ncol(X)
{#done a bit later than necessary but that is fine
if(is.unsorted(gpsv)){
stop("Data are not sorted by cluster. Please sort your data first by cluster, run the analysis, and then use the function.\n")
}
}
ml <- list()
for (j in 1:NG){
test <- getVarCov(m1, individuals = j, type = 'marginal')
ml[[j]] <- test[[1]]
}
Vm <- as.matrix(Matrix::bdiag(ml)) #to work with other funs
}
### for lmer
#if(class(m1) %in% c('lmerMod', 'lmerModLmerTest')){ #if lmer
if(is(m1, 'lmerMod')){
dat <- m1@frame
X <- model.matrix(m1) #X matrix
B <- fixef(m1) #coefficients
y <- m1@resp$y #outcome
Z <- getME(m1, 'Z') #sparse Z matrix
b <- getME(m1, 'b') #random effects
if (is.null(Gname)){
Gname <- names(getME(m1, 'l_i')) #name of clustering variable
if (length(Gname) > 1) {
stop("lmer: Can only be used with non cross-classified data. If more than two levels, specify highest level using Gname = 'clustername'")
}
}
js <- table(dat[, Gname]) #how many observation in each cluster
G <- bdiag(VarCorr(m1)) #G matrix
NG <- getME(m1, 'l_i') #number of groups :: ngrps(m1)
NG <- NG[length(NG)]
gpsv <- dat[, Gname] #data with groups
# { #done a bit later than necessary but that is fine
# if(is.unsorted(gpsv)){
# # stop("Data are not sorted by cluster. Please sort your data first by cluster, run the analysis, and then use the function.\n")
# }
# }
# getV <- function(x) {
# lam <- data.matrix(getME(x, "Lambdat"))
# var.d <- crossprod(lam)
# Zt <- data.matrix(getME(x, "Zt"))
# vr <- sigma(x)^2
# var.b <- vr * (t(Zt) %*% var.d %*% Zt)
# sI <- vr * diag(nobs(x))
# var.y <- var.b + sI
# }
Vm <- getV(m1)
}
Vm <- Matrix::drop0(Vm) #make a sparse matrix, if not already
Vinv <- Matrix::solve(Vm)
#Vinv <- chol2inv(chol(Vm))
cpx <- solve(t(X) %*% Vinv %*% X) #solve(Vm)
ns <- nobs(m1)
Im <- diag(ns) #identity matrix
Hm <- X %*% cpx %*% t(X) %*% Vinv #Overall hat matrix
IH <- as.matrix(Im - Hm) #difference
nms <- names(table(dat[,Gname])) #names of clusters
K <- ncol(X) #number of vars
dd <- diag(K)
NG <- length(nms)
### adjustments
if (type == 'CR2') {
tHs <- function(x) { #working CR2
ind <- which(dat[,Gname] == x)
Xs <- X[ind, ,drop = F]
Vs <- Vm[ind, ind, drop = F]
U <- chol(Vs) #with the cholesky matrix
adj <- Xs %*% cpx %*% t(Xs) %*% chol2inv(U) #solve(Vs)
Ws <- Vinv[ind, ind, drop = F] #Wj, Vinv in clusters
ih <- IH[ind, , drop = F] #asymmetric, need rows(ind) here
ng <- nrow(Xs)
cr <- diag(ng) - adj
t(ih) %*% t(U) %*% MatSqrtInverse(U %*% cr %*% Vs %*% t(U)) %*%
U %*% Ws %*% Xs %*% cpx ### this has the adjustment in the matsqrtinv & U
# A(adjust matrix) is t(U) %*% MatSqrtInverse(U %*% cr %*% Vs %*% t(U)) %*% U
#IHjj <- Ijj - Hjj
#Bi <- chol(V3) %*% IHjj %*% V3 %*% t(chol(V3))
#Ai <- t(chol(V3)) %*% MatSqrtInverse(Bi) %*% chol(V3)
}
} else {
tHs <- function(x) { #CR0
ind <- which(dat[,Gname] == x)
Xs <- X[ind, ,drop = F]
ih <- IH[ind, , drop = F]
Ws <- Vinv[ind, ind, drop = F]
t(ih) %*% Ws %*% Xs %*% cpx ### NO ADJUSTMENT but with Ws
}
}
tmp <- lapply(nms, tHs)
#Gm = do.call('cbind', tmp) #bind them together
degf <- numeric() #container
#Wm <- MatSqrtInverse(Vm) #as per Tipton 2015 -- this is new
Wm <- Vm #W is just Vm or target variance in our case
for (j in 1:K){ #using a loop since it's easier to see
sel <- dd[j, ]
Gt <- lapply(seq(NG), function(i) tmp[[i]] %*% sel)
Gt <- as.matrix(do.call('cbind', Gt))
#ev <- eigen(Wm %*% Gt %*% t(Gt) %*% Wm)$values
#degf[j] <- (sum(ev)^2) / sum(ev^2) #final step to compute df
#GG <- Wm %*% Gt %*% t(Gt) %*% Wm #avoids using eigen; from Kolesar
#degf[j] <- sum(diag(GG))^2 / sum(GG * GG)
GG <- t(Gt) %*% Wm %*% Gt #from Pustejovsky and Tipton 2018 eq.11
GGd <- GG[row(GG) == col(GG)] #just diag(GG)
#degf[j] <- sum(diag(GG))^2 / sum(GG * GG) #lme issues?
degf[j] <- sum(GGd)^2 / sum(GG * GG)
}
degf #manual computation for CR2 dof
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/satdf.R
|
#' Data from 25 schools (based on the NELS dataset)
#'
#' For examining the association between amount homework done per week and math outcome.
#' @usage data(sch25)
#' @format A data frame with 546 rows and 8 variables:
#' \describe{
#' \item{schid}{The school identifier (the grouping variable)}
#' \item{ses}{Student-level socioeconomic status}
#' \item{byhomewk}{Total amount of time the student spent on homework per week. 1 = None, 2 = Less than one hour, 3 = 1 hour, 4 = 2 hours, 5 = 3 hours, 6 = 4-6 hours, 7 = 7 - 9 hours, 8 = 10 or more}
#' \item{math}{Mathematics score.}
#' \item{male}{Dummy coded gender, 1 = male, 0 = female}
#' \item{minority}{Dummy coded minority status, 1 = yes, 0 = no}
#' \item{mses}{Aggregated socioeconomic status at the school level}
#' \item{mhmwk}{Aggregated time spent on homework at the school level}
#' }
#' @source \url{https://nces.ed.gov/pubs92/92030.pdf}
"sch25"
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/sch25.R
|
#' Data from Project SHARE
#'
#' Project SHARE (Sexual Health and Relationships) was a cluster randomized trial (CRT) in
#' Scotland carried out to measure the impact of a school-based
#' sexual health program (Wight et al., 2002).
#'
#' @docType data
#'
#' @usage data(sharedat)
#'
#' @format A data frame with 5399 observations and 7 variables.
#'
#' \describe{
#' \item{\code{school}}{The cluster variable}
#' \item{\code{sex}}{factor indicating F or M}
#' \item{\code{arm}}{treatment arm = 1 vs control = 0}
#' \item{\code{kscore}}{Pupil knowledge of sexual health}
#' \item{\code{idno}}{student id number}
#' \item{\code{sc}}{factor showing the highest social class of the father or mother
#' based on occupation (coded 10: I (highest), 20: II,
#' 31: III non-manual, 32: III manual, 40: IV, 50: V (lowest), 99: not coded).}
#' \item{\code{zscore}}{standardized knowledge score}
#'}
#'
#'
#'
#' @keywords datasets
#'
#' @references
#' \cite{Moulton, L. (2015). readme.txt contains an overall explanation of the data sets. Harvard.
#' \doi{10.7910/DVN/YXMQZM}}
#'
#' \cite{Wight, D., Raab, G. M., Henderson, M., Abraham, C., Buston, K., Hart, G., & Scott, S. (2002). Limits of teacher delivered sex education:
#' Interim behavioural outcomes from randomised trial. BMJ, 324, 1430.
#' \doi{10.1136/bmj.324.7351.1430}}
#'
#'
#' @source \doi{10.7910/DVN/YXMQZM}{Harvard dataverse}
#'
#' @examples
#' data(sharedat)
"sharedat"
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/sharedat.R
|
#' Tidy a CR2 object
#'
#' @param x A `CR2` object.
#' @param conf.int Logical indicating whether or not to include
#' a confidence interval in the tidied output. Defaults to FALSE.
#' @param conf.level The confidence level to use for the confidence
#' interval if conf.int = TRUE. Must be strictly greater than 0
#' and less than 1. Defaults to 0.95, which corresponds to a
#' 95 percent confidence interval.
#' @param ... Unused, included for generic consistency only.
#' @return A tidy [tibble::tibble()] summarizing component-level
#' information about the model
#'
#' @importFrom generics tidy
#' @importFrom stats confint df qt
#' @export
tidy.CR2 <- function(x, conf.int = FALSE, conf.level = 0.95, ...) {
result <- x$results %>%
tibble::as_tibble(rownames = "term") %>%
dplyr::rename(estimate = Estimate,
std.error = `cr.se`,
statistic = `df`, #why the df
p.value = `p.val`)
if (conf.int) {
ci <- confint(x, level = conf.level)
colnames(ci) <- c('conf.low', 'conf.high')
#result <- dplyr::left_join(result, ci, by = "term")
result <- dplyr::bind_cols(result, ci)
}
return(result)
}
#' @export
vcov.CR2 <- function(object, ...){
object$vcov
}
#' Glance at goodness-of-fit statistics
#'
#' Helper function used to obtain supporting fit statistics for multilevel models. The R2s are computed using the `performance` package.
#'
#' @param x A `CR2` object.
#' @param ... Unused, included for generic consistency only.
#'
#' @return \code{glance} returns one row with the columns:
#' \item{nobs}{the number of observations}
#' \item{sigma}{the square root of the estimated residual variance}
#' \item{logLik}{the data's log-likelihood under the model}
#' \item{AIC}{Akaike Information Criterion}
#' \item{BIC}{Bayesian Information Criterion}
#' \item{r2.marginal}{marginal R2 based on fixed effects only using method of Nakagawa and Schielzeth (2013)}
#' \item{r2.conditional}{conditional R2 based on fixed and random effects using method of Nakagawa and Schielzeth (2013)}
#'
#' @rawNamespace if(getRversion()>='3.3.0') importFrom(stats, sigma) else importFrom(lme4,sigma)
#'
#' @importFrom broom glance
#' @export
glance.CR2 <- function(x, ...) {
tmp <- data.frame(
sigma = sigma(x$orig),
logLik = as.numeric(stats::logLik(x$orig)),
AIC = stats::AIC(x$orig),
BIC = stats::BIC(x$orig),
nobs = stats::nobs(x$orig),
r2.marginal = as.numeric(performance::r2_nakagawa(x$orig)[2]),
r2.conditional = as.numeric(performance::r2_nakagawa(x$orig)[1])
# )
)
return(tmp)
}
#' @export
confint.CR2 <- function(object, parm, level = 0.95, ...){
z <- object
k <- nrow(z$results)
cf <- z$results$Estimate
parm <- row.names(z$results)
se <- z$results$cr.se
a <- (1 - level) / 2
crit <- qt(a, z$results$df)
a <- c(a, 1 - a)
pct <- sprintf("%0.1f%%", a * 100)
ci <- array(NA, dim = c(k, 2),
dimnames = list(parm, pct))
for (i in 1:k){
crit <- qt(a, z$results$df[i])
ci[i, 1] <- cf[i] - abs(crit[1]) * se[i]
ci[i, 2] <- cf[i] + abs(crit[2]) * se[i]
}
ci
}
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/tidy_cr2.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
|
/scratch/gouwar.j/cran-all/cranData/CR2/R/utils-pipe.R
|
#' CRABS: Congruent Rate Analyses in Birth-death Scenarios
#'
#'
#'
#'
#'
#'@importFrom magrittr %>%
#'@importFrom latex2exp latex2exp
#'@importFrom tidyr gather
#'@importFrom stats approxfun rlnorm rgamma rcauchy rnorm time quantile rbeta
#'@importFrom utils txtProgressBar setTxtProgressBar head tail read.table
#'@importFrom ggplot2 ggplot aes scale_fill_gradient2 theme_classic ggtitle theme element_text scale_x_continuous
#'@importFrom ggplot2 geom_line scale_x_reverse scale_color_manual element_blank geom_hline geom_tile xlab ylim
#'@importFrom ggplot2 scale_fill_manual ylab labs element_rect facet_grid scale_linetype_manual scale_y_continuous geom_histogram
#'@importFrom graphics matplot par
#'@importFrom dplyr group_map bind_rows filter
#'@importFrom patchwork plot_layout
#'@importFrom colorspace sequential_hcl
#'@importFrom ape branching.times
#'@importFrom pracma fderiv
#'
#'
#'
#' @section References:
#'
#' \itemize{
#' \item Louca, S., & Pennell, M. W. (2020). Extant timetrees are consistent with a myriad of diversification histories. Nature, 580(7804), 502-505. https://doi.org/10.1038/s41586-020-2176-1
#' \item Höhna, S., Kopperud, B. T., & Magee, A. F. (2022). CRABS: Congruent rate analyses in birth–death scenarios. Methods in Ecology and Evolution, 13, 2709– 2718. https://doi.org/10.1111/2041-210X.13997
#' \item Kopperud, B. T., Magee, A. F., & Höhna, S. (2023). Rapidly Changing Speciation and Extinction Rates Can Be Inferred in Spite of Nonidentifiability. Proceedings of the National Academy of Sciences 120 (7): e2208851120. https://doi.org/10.1073/pnas.2208851120
#' \item Andréoletti, J. & Morlon, H. (2023). Exploring congruent diversification histories with flexibility and parsimony. Methods in Ecology and Evolution. https://doi.org/10.1111/2041-210X.14240
#' }
#'
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/CRABS.R
|
congruent.extinction <- function( model, func.lambda ) {
times = model$times
v_p_div = sapply(times, model$p.delta)
delta_t = model$delta_t
if (length(func.lambda(times)) == 1){
func.lambda <- Vectorize(func.lambda)
}
## make sure that the initial conditions holds
if ( abs(model$lambda(0) - func.lambda(0)) > 1E-8 ) {
stop("The initial values of the reference and alternative speciation rate functions are not identical")
}
v_spec1 <- sapply(times, func.lambda)
#v_mu1 <- compute.extinction( v_p_div, v_spec1, delta_t )
lambda_1 <- func.lambda
v_mu1 <- compute.extinction( lambda_1, v_spec1, v_p_div, times )
mu_1 <- approxfun( times, v_mu1)
## create the parameter transformations as rate functions
func_div <- function(t) lambda_1(t) - mu_1(t)
func_turn <- function(t) mu_1(t) / lambda_1(t)
res = list(lambda = lambda_1,
mu = mu_1,
delta = func_div,
epsilon = func_turn,
p.delta = model$p.delta,
times = times,
max.t = model$max.t,
delta_t = model$delta_t,
num.intervals = model$num.intervals)
class(res) <- c("CRABS")
return (res)
}
compute.extinction <- function(lambda_1, v_spec1, v_p_div, times) {
back <- pracma::fderiv(lambda_1, times, method = "backward")
forw <- pracma::fderiv(lambda_1, times, method = "forward")
m <- rbind(forw, back)
lambda_deriv <- apply(m, 2, mean, na.rm = TRUE)
v_mu1 <- v_spec1 - v_p_div + 1/v_spec1 * lambda_deriv
}
#
# compute.extinction <- function( v_p_div, v_spec1, delta_t ) {
#
# # compute the derivatives
# l <- v_spec1[-length(v_spec1)]
# l_plus_one <- v_spec1[-1]
# l_derivative <- (l_plus_one - l) / delta_t
# l_derivative <- c(l_derivative[1],l_derivative)
#
# # finally, add the 1/lambda * lambda dt to the pulled diversification rate
# v_mu1 <- v_spec1 - v_p_div + 1/v_spec1 * l_derivative
#
# return (v_mu1)
# }
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/compute.extinction.R
|
congruent.speciation <- function( model, func.mu, ode_solver ) {
lambda0 = model$lambda(0)
times = model$times
v_p_div = sapply(times, model$p.delta )
delta_t = model$delta_t
v_ext1 <- sapply(times, func.mu)
if(ode_solver){
lambda_1 <- compute.speciation.ode(model, func.mu)
}else{
v_lambda1 <- compute.speciation( lambda0, v_p_div, v_ext1, delta_t )
lambda_1 <- approxfun( times, v_lambda1)
}
mu_1 = func.mu
if (length(mu_1(times)) == 1){
mu_1 = Vectorize(mu_1)
}
## create the parameter transformations as rate functions
func_div <- function(t) lambda_1(t) - mu_1(t)
func_turn <- function(t) mu_1(t) / lambda_1(t)
res <- list(lambda = lambda_1,
mu = mu_1,
delta = func_div,
epsilon = func_turn,
p.delta = model$p.delta,
times = times,
max.t = model$max.t,
delta_t = model$delta_t,
num.intervals = model$num.intervals)
class(res) <- c("CRABS")
return (res)
}
compute.speciation <- function( lambda0, v_p_div, v_ext1, delta_t ) {
NUM_TIME_DISCRETIZATIONS = length(v_p_div)
### compute the new lambda
v_lambda1 <- c()
v_lambda1[1] <- lambda0
for (j in 2:NUM_TIME_DISCRETIZATIONS) {
# Finite backward difference
tmp <- 4*v_lambda1[j-1]*delta_t + (v_p_div[j]*delta_t+v_ext1[j]*delta_t-1)^2
v_lambda1[j] <- (sqrt(tmp) + v_p_div[j]*delta_t+v_ext1[j]*delta_t-1) / (2*delta_t)
}
return (v_lambda1)
}
compute.speciation.ode <- function( model, mu ) {
newLambda <- function(t, state, parameters){
mu <- parameters["mu"]
lambda <- parameters["lambda"]
Lambda <- state["Lambda"]
dLambda = -Lambda^2 + Lambda *(model$p.delta(t) + mu(t))
return(list(dLambda))
}
lambda0 <- model$lambda(0.0)
parameters <- c(rp = model$p.delta,
mu = mu,
lambda = model$lambda)
state <- c(Lambda = lambda0)
res <- as.data.frame(deSolve::radau(y = state, times = model$times, func = newLambda, parms = parameters),
atol = 1e-06, rtol = 1e-06)
lambda <- approxfun(res$time, res$Lambda)
return (lambda)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/compute.speciation.R
|
#' Create a set of congruent models
#'
#' @param model The reference model. An object of class "CRABS"
#' @param mus A list of extinction-rate functions
#' @param lambdas A list of speciation-rate functions
#' @param keep_ref Whether or not to keep the reference model in the congruent set
#' @param ode_solver Whether to use a numerical ODE solver to solve for lambda
#'
#' @return An object of class "CRABSset"
#' @export
#'
#' @examples
#'
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#'
#' ## A reference model
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#' model <- create.model(lambda, mu, times = times)
#'
#' mu1 <- lapply(c(0.5, 1.5, 3.0), function(m) function(t) m)
#'
#' model_set1 <- congruent.models(model, mus = mu1)
#'
#' model_set1
#'
#' lambda0 <- lambda(0.0) ## Speciation rates must all be equal at the present
#' bs <- c(0.0, 0.01, 0.02)
#' lambda1 <- lapply(bs, function(b) function(t) lambda0 + b*t)
#'
#' model_set2 <- congruent.models(model, lambdas = lambda1)
#'
#' model_set2
congruent.models <- function(model, mus = NULL, lambdas = NULL, keep_ref = TRUE, ode_solver = TRUE){
lambda0 <- model$lambda(0)
times <- model$times
delta_t <- model$delta_t
models <- list()
if (keep_ref){
models[[1]] <- model
names(models) <- c("reference")
}
if(missing(lambdas)){
if(missing(mus)){
stop("must provide either mu(s) or lambda(s)")
}
}
models1 <- list()
model_idx <- 1
# Use mus to generate model
if (!is.null(mus) && length(mus) > 0){
if(length(mus) == 1 ){
mus <- list(mus)
}
for (i in seq_along(mus)){
models1[[i]] <- congruent.speciation(model, mus[[i]], ode_solver = ode_solver)
if (!is.null(names(mus)[[i]])){
names(models1)[[i]] <- names(mus)[[i]]
}else{
names(models1)[[i]] <- paste0("model", model_idx)
model_idx <- model_idx +1
}
}
}
models2 <- list()
# use lambdas to generate model
if (!is.null(lambdas) && length(lambdas) > 0){
if(length(lambdas) == 1){
lambdas <- list(lambdas)
}
for ( i in seq_along(lambdas)){
lambda <- lambdas[[i]]
if(!abs(lambda(min(times)) - model$lambda(min(times))) < 0.0001){
stop("Initial speciation rate (at present = tips) must be equal across the congruence set.")
}
models2[[i]] <- congruent.extinction(model, lambdas[[i]])
if (!is.null(names(lambdas)[[i]])){
names(models2)[[i]] <- names(lambdas)[[i]]
}else{
names(models2)[[i]] <- paste0("model", model_idx)
model_idx <- model_idx +1
}
}
}
models <- c(models, models1, models2)
class(models) <- c("list", "CRABSset")
return(models)
}
#' Create a set of congruent models
#'
#' @param model The reference model. An object of class "CRABS"
#' @param mus A list of extinction-rate functions
#' @param lambdas A list of speciation-rate functions
#' @param keep_ref Whether or not to keep the reference model in the congruent set
#'
#' @return An object of class "CRABSset"
#' @export
#'
#' @examples
#'
#' # This function should not have to be used externally.
#' # It is called in the CRABS function `sample.congruence.class` when `rate.type=="joint"`.
joint.congruent.models <- function(model, mus, lambdas, keep_ref = TRUE){
models <- list()
model_idx <- 1
if(length(mus) == 1){
mus <- list(mus)
lambdas <- list(lambdas)
}
for (i in seq_along(mus)){
# define the net diversification and turnover rates based on speciation and extinction
func_div <- function(t) lambdas[[i]](t) - mus[[i]](t)
func_turn <- function(t) mus[[i]](t) / lambdas[[i]](t)
models[[i]] <- list(lambda = lambdas[[i]],
mu = mus[[i]],
delta = func_div,
epsilon = func_turn,
p.delta = model$p.delta,
times = model$times,
max.t = model$max.t,
delta_t = model$delta_t,
num.intervals = model$num.intervals)
class(models[[i]]) <- c("CRABS")
if (!is.null(names(mus)[[i]])){
names(models)[[i]] <- names(mus)[[i]]
}else{
names(models)[[i]] <- paste0("model", model_idx)
model_idx <- model_idx +1
}
}
if (keep_ref){
models <- c(list(reference=model), models)
}
class(models) <- c("list", "CRABSset")
return(models)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/congruent.models.R
|
#' Computes the congruent class, i.e., the pulled rates.
#'
#' @param func_spec0 The speciation rate function (measured in time before present).
#' @param func_ext0 The extinction rate function (measured in time before present).
#' @param times the time knots for the piecewise-linear rate functions
#' @param func_p_spec the pulled speciation rate function
#' @param func_p_div the pulled net-diversification rate function
#' @return A list of rate functions representing this congruence class.
#' @export
#' @examples
#' lambda1 <- function(t) exp(0.3*t) - 0.5*t + 1
#' mu1 <- function(t) exp(0.3*t) - 0.2*t + 0.2
#'
#' model1 <- create.model(lambda1, mu1, times = seq(0, 5, by = 0.005))
#'
#' model1
#'
#' data("primates_ebd")
#'
#' lambda2 <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu2 <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' model2 <- create.model(lambda2, mu2, primates_ebd[["time"]])
#'
#' model2
create.model <- function(func_spec0, func_ext0, times = seq(from = 0, to = 5, by = 0.005), func_p_spec = NULL, func_p_div = NULL) {
## create our vector of times (i.e., change-points)
## for the piecewise linear approximation
max.t <- max(times)
num.intervals <- length(times)
delta_t <- times[2] - times[1]
## create the vector of rate values at the change points
v_spec0 <- func_spec0( times )
if(length(v_spec0) == 1){
v_spec0 <- rep(v_spec0, num.intervals)
func_spec0 <- Vectorize(func_spec0)
}
v_ext0 <- func_ext0( times )
if(length(v_ext0) == 1){
v_ext0 <- rep(v_ext0, num.intervals)
func_ext0 <- Vectorize(func_ext0)
}
## create the parameter transformations as rate functions
func_div <- function(t) func_spec0(t) - func_ext0(t)
func_turn <- function(t) func_ext0(t) / func_spec0(t)
## compute the pulled diversification rate
if(missing("func_p_div")){
#v_p_div <- compute.pulled.diversification( v_spec0, v_ext0, delta_t )
back <- pracma::fderiv(func_spec0, times, method = "backward")
forw <- pracma::fderiv(func_spec0, times, method = "forward")
m <- rbind(forw, back)
lambda_deriv <- apply(m, 2, mean, na.rm = TRUE)
v_p_div <- v_spec0 - v_ext0 + (1/v_spec0) * lambda_deriv
func_p_div <- approxfun(times,v_p_div)
}
res = list(lambda=func_spec0,
mu=func_ext0,
delta=func_div,
epsilon=func_turn,
p.delta=func_p_div,
times=times,
max.t = max.t,
delta_t = delta_t,
num.intervals = num.intervals)
class(res) <- c("CRABS")
return (res)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/create.model.R
|
#' Primates phylogenetic tree
#'
#' The example tree taken from the RevBayes tutorial website
#'
#' @docType data
#'
#' @usage data(primates)
#'
"primates"
#' RevBayes Primates birth-death model
#'
#' The results of a bayesian horseshoe markov random field (HSMRF) episodic birth-death model, fitted on the primates tree. One hundred episodes. Each estimate is the posterior median. The time unit is millions of years before the present.
#'
#' @docType data
#'
#' @usage data(primates_ebd)
#'
"primates_ebd"
#' Primates birth-death model
#'
#' See \code{?primates_ebd}, but including posterior samples instead of a summary.
#'
#' @docType data
#'
#' @usage data(primates_ebd_log)
#'
"primates_ebd_log"
#' TESS Primates birth-death model
#'
#' The results of a bayesian episodic birth-death model in the R-package TESS, fitted on the primates tree. One hundred episodes. Each estimate is the posterior median. The time unit is millions of years before the present.
#'
#' @docType data
#'
#' @usage data(primates_ebd_tess)
#'
"primates_ebd_tess"
#' TreePar Primates birth-death model
#'
#' The results of a birth-death model in the R-package TreePar, fitted on the primates tree. The estimated model has two epochs, that are maximum-likelihood estimates. The time unit is millions of years before the present.
#'
#' @docType data
#'
#' @usage data(primates_ebd_treepar)
#'
"primates_ebd_treepar"
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/data.R
|
# Filter with increasing regularity thresholds
regularity_filtering <- function(filtering_fraction, samples, samples_penalty){
eligible_models <- names(samples_penalty[samples_penalty<=quantile(samples_penalty, filtering_fraction)])
# take 50 samples among the models below the regularity threshold
n_samples <- min(length(eligible_models), 50)
filtered_samples <- samples[sample(eligible_models, n_samples)]
if (!("reference" %in% names(filtered_samples))){
filtered_samples[[1]] <- samples$reference
names(filtered_samples)[1] <- "reference"
}
class(filtered_samples) <- c("list", "CRABSset")
return(filtered_samples)
}
plot_rate <- function(samples_list, i, rate, xlab="time before present",
main="Congruent diversification histories"){
palettes = c("lambda" = "Blues",
"mu" = "orange",
"delta" = "purple",
"epsilon" = "Greens")
ylabs = c("lambda" = "Speciation rate",
"mu" = "Extinction rate",
"delta" = "Net-diversification rate",
"epsilon" = "Turnover rate")
sample_i <- samples_list[[i]]
sample_i <- sample_i[sort(names(sample_i))]
rate_min <- 0.0
rate_max <- 0.99
for (i in seq_along(samples_list)){
samples_set <- samples_list[[i]]
for (j in seq_along(samples_set)){
rate_min <- min(rate_min, min(get_rates(samples_set[[j]], rate = rate)))
rate_max <- max(rate_max, max(get_rates(samples_set[[j]], rate = rate)))
}
}
rate_max <- rate_max * 1.1
rates_matrix <- sapply(sample_i, get_rates, rate=rate)
cols <- c(head(sequential_hcl(palette=palettes[rate], n=ncol(rates_matrix)),n=-1), "black")
matplot(sample_i$reference$times, rates_matrix, type="l", lty=c(rep(5,ncol(rates_matrix)-1),1),
lwd=c(rep(1,ncol(rates_matrix)-1),1.5),
col = cols, ylim=c(rate_min,rate_max), xlim=rev(range(sample_i$reference$times)),
xlab=xlab, ylab=ylabs[rate], main=main)
}
get_rates <- function(samples, rate) {
res <- samples[[rate]](samples$times)
return(res)
}
#' Plots the rate functions after filtering them according to a given penalty and predefined thresholds.
#'
#' @param samples A list of (congruent) CRABS models
#' @param filtering_fractions A vector of thresholds for filtering, as fractions of the most regular trajectories.
#' @param penalty The choice of penalty, among "L1", "L2" and "L1_derivative" (penalty on derivative shifts).
#' @param rates A vector of rate(s) to be plotted, among "lambda" (speciation), "mu" (extinction), "delta" (net-diversification) and "epsilon" (turnover).
#'
#' @return Plots an array of rate trajectories for the chosen rates and thresholds.
#'
#' @export
#' @examples
#' data("primates_ebd")
#' set.seed(123)
#'
#' l <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' times <- primates_ebd[["time"]]
#'
#' model <- create.model(l, mu, times)
#'
#' sample.joint.rates <- function(n) {
#' sample.basic.models.joint(times = times,
#' p.delta = model$p.delta,
#' beta.param = c(0.5,0.3),
#' lambda0 = l(0.0),
#' mu0.median = mu(0.0))
#' }
#'
#' joint.samples <- sample.congruence.class(model = model,
#' num.samples = 100,
#' rate.type = "joint",
#' sample.joint.rates = sample.joint.rates)
#'
#' full.plot.regularity.thresholds(joint.samples)
full.plot.regularity.thresholds <- function(samples,
filtering_fractions=c(0.01, 0.05, 0.2, 0.9),
penalty="L1",
rates=c("lambda", "mu")){
# Compute a regularity penalty
if (penalty=="L1"){
penalty <- function(model){sum(abs(diff(model$lambda(model$times)))) + sum(abs(diff(model$mu(model$times))))}
}else if (penalty=="L2"){
penalty <- function(model){sum(diff(model$lambda(model$times))**2) + sum(diff(model$mu(model$times))**2)}
}else if (penalty=="L1_derivative"){
penalty <- function(model){
return (sum(abs(diff(model$lambda(model$times[-1]))-diff(model$lambda(model$times[-length(model$times)])))) +
sum(abs(diff(model$mu(model$times[-1])) -diff(model$mu(model$times[-length(model$times)])))))
}
}else{
stop("Invalid \"penalty\"")
}
samples_penalty <- sapply(samples, penalty)
filtered_samples_list <- lapply(filtering_fractions, regularity_filtering, samples, samples_penalty)
lr = length(rates)
lf = length(filtering_fractions)
par(mfrow=c(lr,lf), mar=c(2.6, 3.6, 2, 0.6), mgp=c(1.5, 0.5, 0))
for (j in 1:lr){
for (i in 1:lf){
main <- ifelse(j==1, paste(filtering_fractions[i]*100, "% most regular trajectories"), "")
plot_rate(filtered_samples_list, i, rates[j], xlab=ifelse(j==lr, "Time (Mya)", ""), main=main)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/full.plot.regularity.thresholds.R
|
#' @importFrom utils globalVariables
globalVariables(c("rate", "times", "Time", "name", "delta_rate", "unit", "direction", "value", "count"))
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/globals.R
|
dLTT <- function(model, times, N0, rho = 1.0){
lambda0 <- model$lambda(0.0)
odeN <- function(time, state, parameters, lambda, mu){
l <- lambda(time)
m <- mu(time)
with(as.list(c(state, parameters)), {
dN = N * (m - l)
dE = m - E * (l + m) + E^2 * l
return(list(c(dN, dE), l, m))
})
}
parameters <- list()
state <- c("N" = N0 / rho, "E" = 1 - rho)
res <- as.data.frame(deSolve::radau(y = state, times = times,
func = odeN, parms = parameters,
lambda = model$lambda, mu = model$mu))
M <- res$N * (1 - res$E)
return(M)
}
#' Compute likelihood
#'
#' @param phy an object of class "phylo"
#' @param model an object of class "CRABS"
#' @param rho the taxon sampling fraction
#'
#' @return the log-likelihood of the tree given the model
#' @export
#'
#' @examples
#' library(ape)
#' lambda <- function(t) exp(0.3*t) - 0.5*t
#' mu <- function(t) exp(0.3*t) - 0.2*t - 0.8
#'
#' model <- create.model(lambda, mu, times = seq(0, 3, by = 0.005))
#'
#' set.seed(123)
#' phy <- rcoal(25)
#'
#' crabs.loglikelihood(phy, model)
crabs.loglikelihood <- function(phy, model, rho = 1.0){
times <- model$times
N0 <- length(phy$tip.label)
M <- approxfun(times, dLTT(model, times, N0, rho))
bt <- branching.times(phy)
n <- length(bt)
dM = tail(fderiv(M, bt), n = -1)
logL <- log(M(bt[1])) - (n+1)*log(M(0.0)) + sum(log(-dM))
return(logL)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/likelihood.R
|
#' Plots the rate functions including the pulled rates.
#'
#' @param x An object of class "CRABS"
#' @param ... other parameters
#'
#' @return a patchwork object
#'
#' @export
#' @examples
#'
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#'
#' model <- create.model(lambda, mu, times = times)
#'
#' plot(model)
plot.CRABS <- function( x, ... ) {
df <- model2df(x)
df_lambda <- df %>% filter(grepl("speciation", rate, ignore.case = TRUE))
df_mu <- df %>% filter(rate %in% c("Extinction", "Pulled extinction"))
df_delta <- df %>% filter(grepl("net-diversification", rate, ignore.case = TRUE))
df_relext <- df %>% filter(rate == "Relative extinction")
ylim <- range(bind_rows(df_lambda, df_mu)[["value"]])
p1 <- df_lambda %>%
ggplot(aes(x = Time, y = value, linetype = rate)) +
theme_classic() +
geom_line(color = "darkblue") +
scale_x_reverse() +
theme(legend.position = c(0.5, 0.6),
legend.background = element_blank(),
legend.title = element_blank(),
axis.title.x = element_blank(),
plot.title = element_text(hjust = 0.5)) +
scale_linetype_manual(breaks=c("Speciation", "Pulled speciation"), values=c(1,5)) +
ylab("rate") +
labs(title = "Speciation") +
ylim(ylim)
p2 <- df_mu %>%
ggplot(aes(x = Time, y = value, linetype = rate)) +
theme_classic() +
geom_line(color = "orange") +
scale_x_reverse() +
theme(legend.position = c(0.5, 0.6),
legend.background = element_blank(),
plot.title = element_text(hjust = 0.5),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.title = element_blank()) +
scale_linetype_manual(breaks=c("Extinction", "Pulled extinction"), values=c(1,5)) +
ylab("rate") +
labs(title = "Extinction") +
ylim(ylim)
p3 <- df_delta %>%
ggplot(aes(x = Time, y = value, linetype = rate)) +
theme_classic() +
geom_line(color = "purple") +
scale_x_reverse() +
theme(legend.position = c(0.5, 0.5),
plot.title = element_text(hjust = 0.5),
legend.background = element_blank(),
legend.title = element_blank()) +
scale_linetype_manual(breaks=c("Net-diversification", "Pulled net-diversification"), values=c(1,5)) +
labs(title = "Net-diversification") +
ylab("rate") +
xlab("time before present")
p4 <- df_relext %>%
ggplot(aes(x = Time, y = value)) +
theme_classic() +
geom_line(color = "darkgreen") +
scale_x_reverse() +
theme(plot.title = element_text(hjust = 0.5),
axis.title.y = element_blank(),
legend.title = element_blank()) +
labs(title = "Relative extinction") +
ylab("rate") +
xlab("time before present")
p <- p1 + p2 +
p3 + p4 +
plot_layout(ncol = 2)
return(p)
}
#' Print method for CRABS object
#'
#' @param x and object of class CRABS
#' @param ... other arguments
#'
#' @return nothing
#'
#' @export
#' @examples
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#'
#' model <- create.model(lambda, mu, times = times)
#'
#' print(model)
print.CRABS <- function(x, ...){
cat("Piecewise-linear birth-death model\n")
cat("Knots:", length(x$times), "\n")
cat("Delta-tau:", x$delta_t, "\n")
p <- plot.CRABS(x, ...)
plot(p)
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/plot.CRABS.R
|
#' Plots the rate functions
#'
#' @param x A list of congruent birth-death x
#' @param ... other parameters
#'
#' @return a patchwork object object
#'
#' @export
#' @examples
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#'
#' model <- create.model(lambda, mu, times = times)
#'
#' mus <- list(function(t) 0.2 + exp(0.01*t),
#' function(t) 0.2 + sin(0.35*t) + 0.1*t,
#' function(t) 1.0,
#' function(t) 0.5 + 0.2*t)
#' models <- congruent.models(model, mus = mus)
#'
#' plot(models)
plot.CRABSset <- function( x, ... ) {
dfs <- lapply(x, model2df)
## Add names columns
for (i in seq_along(dfs)){
df <- dfs[[i]]
df$name <- names(dfs)[i]
dfs[[i]] <- df
}
df <- bind_rows(dfs)
df_lambda <- df %>% filter(rate == "Speciation")
df_mu <- df %>% filter(rate == "Extinction")
df_delta <- df %>% filter(rate == "Net-diversification")
df_relext <- df %>% filter(rate == "Relative extinction")
ylim <- range(bind_rows(df_lambda, df_mu)[["value"]])
## Speciation rate
col_lambda <- c(head(colorspace::sequential_hcl(palette = "Blues", n = length(unique(df_lambda$name))), n = -1), "black")
p1 <- df_lambda %>%
ggplot(aes(x = Time, y = value, color = name)) +
scale_x_reverse() +
theme_classic() +
geom_line(data=subset(df_lambda, name == "reference"), linetype=1) +
geom_line(data=subset(df_lambda, name != "reference"), linetype="longdash") +
labs(title = "Speciation") +
theme(legend.position = "NA",
axis.title.x = element_blank(),
) +
ylim(ylim) +
ylab("rate") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_manual(values = col_lambda)
## Extinction rate
col_mu <- c(head(colorspace::sequential_hcl(palette = "orange", n = length(unique(df_mu$name))), n = -1), "black")
p2 <- df_mu %>%
ggplot(aes(x = Time, y = value, color = name)) +
scale_x_reverse() +
theme_classic() +
geom_line(data=subset(df_mu, name == "reference"), linetype=1) +
geom_line(data=subset(df_mu, name != "reference"), linetype="longdash") +
ggtitle("Extinction") +
ylim(ylim) +
theme(legend.position = "NA",
axis.title.y = element_blank(),
axis.title.x = element_blank(),
) +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_manual(values = col_mu)
## Net-diversification rate
col_delta <- c(head(colorspace::sequential_hcl(palette = "purple", n = length(unique(df_delta$name))), n = -1), "black")
p3 <- df_delta %>%
ggplot(aes(x = Time, y = value, color = name)) +
scale_x_reverse() +
theme_classic() +
geom_line(data=subset(df_delta, name == "reference"), linetype=1) +
geom_line(data=subset(df_delta, name != "reference"), linetype="longdash") +
ggtitle("Net-diversification") +
theme(legend.position = "NA",
) +
ylab("rate") +
xlab("time before present") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_manual(values = col_delta)
## Relative-extinction rate
col_relext <- c(head(colorspace::sequential_hcl(palette = "green", n = length(unique(df_relext$name))), n = -1), "black")
p4 <- df_delta %>%
ggplot(aes(x = Time, y = value, color = name)) +
scale_x_reverse() +
theme_classic() +
geom_line(data=subset(df_relext, name == "reference"), linetype=1) +
geom_line(data=subset(df_relext, name != "reference"), linetype="longdash") +
ggtitle("Relative extinction") +
theme(legend.position = "NA",
axis.title.y = element_blank(),
) +
xlab("time before present") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_manual(values = col_relext)
p <- p1 + p2 +
p3 + p4 +
plot_layout(ncol = 2)
return(p)
}
#' Print method for CRABSset object
#'
#' @param x an object of class CRABSset
#' @param ... other arguments
#'
#' @return nothing
#'
#' @export
#' @examples
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#'
#' model <- create.model(lambda, mu, times = times)
#'
#' mus <- list(function(t) 0.2 + exp(0.01*t),
#' function(t) 0.2 + sin(0.35*t) + 0.1*t,
#' function(t) 1.0,
#' function(t) 0.5 + 0.2*t)
#' models <- congruent.models(model, mus = mus)
#'
#' print(models)
print.CRABSset <- function(x, ...){
cat("A congruent set of piecewise-linear birth-death models\n")
cat("Knots:", length(x[[1]]$times), "\n")
cat("Delta-tau:", x[[1]]$delta_t, "\n")
cat("n_models: ", length(x), "\n")
if (length(x) <= 50){
p <- plot.CRABSset(x)
plot(p)
}else{
cat("Your set is too large (>50), and won't be plotted.")
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/plot.CRABSset.R
|
#' print.CRABSsets
#'
#' @param x a list of (congruent) CRABS sets
#' @param ... additional parameters
#'
#' @return nothing
#' @export
#'
#' @examples
#' data(primates_ebd_log)
#'
#' posterior <- read.RevBayes(primates_ebd_log, max_t = 65, n_samples = 10)
#'
#' samples <- sample.congruence.class.posterior(posterior,
#' num.samples = 5,
#' rate.type = "extinction",
#' rate0.median = 0.1,
#' model = "MRF",
#' max.rate = 1.0)
#'
#' print(samples)
print.CRABSsets <- function(x, ...){
cat("A group of ", length(x), "CRABS sets.\n")
cat("Knots:", length(x[[1]][[1]]$times), "\n")
cat("Delta-tau:", x[[1]][[1]]$delta_t, "\n")
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/print.CRABSsets.R
|
# compute.pulled.diversification <- function( v_spec0, v_ext0, delta_t ) {
#
# # compute the derivatives
# l <- head(v_spec0, n = -1) #v_spec0[-length(v_ext0)]
# l_plus_one <- tail(v_spec0, n = -1) #v_spec0[-1]
# l_derivative <- (l_plus_one - l) / delta_t
# l_derivative <- c(l_derivative[1], l_derivative)
#
# # finally, add the 1/lambda * lambda dt to the pulled diversification rate
# v_p_div <- v_spec0 - v_ext0 + (1/v_spec0) * l_derivative
#
# return (v_p_div)
# }
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/pulled.diversification.R
|
## From equation (57) in Louca & Pennell (2020) supplement. Page 10.
pulled.speciation <- function( model, rho = 1.0 ) {
pulled.spec <- function(t, state, parameters){
Lp <- state["Lp"]
rp <- parameters["rp"]
dLp = Lp * (model$p.delta(t) - Lp)
return(list(dLp))
}
lambda0 <- model$lambda(0.0)
parameters <- c(rp = model$p.delta)
state <- c(Lp = rho*lambda0)
res <- as.data.frame(deSolve::radau(y = state, times = model$times, func = pulled.spec, parms = parameters),
atol = 1e-06, rtol = 1e-06)
Lp <- approxfun(res$time, res$Lp)
return (Lp)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/pulled.speciation.R
|
#' read RevBayes log file
#'
#' @param x path to log, or data frame
#' @param n_times number of time knots
#' @param max_t tree height
#' @param n_samples first n posterior samples
#' @param summary_type either "none" for all the posterior samples, or "mean" or "median" for the posterior mean/median
#' @param speciation_prefix the prefix string for the speciation rate column names. Must be unique
#' @param extinction_prefix the prefix string for the extinction rate column names. Must be unique
#' @usage read.RevBayes(x, n_times, max_t = 100, n_samples = 20, summary_type = "none",
#' extinction_prefix = "extinction_rate.", speciation_prefix = "speciation_rate.")
#'
#' @return a set of CRABS models, each being a sample in the posterior
#' @export
#'
#' @examples
#' data(primates_ebd_log)
#' posterior <- read.RevBayes(primates_ebd_log, n_times = 500, max_t = 65, n_samples = 20)
read.RevBayes <- function(x,
n_times,
max_t = 100,
n_samples = 20,
summary_type = "none",
extinction_prefix = "extinction_rate.",
speciation_prefix = "speciation_rate."){
if(is.character(x)){
samples <- read.table(file=x,
stringsAsFactors=FALSE,
header=TRUE)
}else if(is.data.frame(x)){
samples <- x
}
## Assume episodes are sorted, i.e. [1] the most recent one comes first, then [2], then [3] etc.
speciation <- samples[, startsWith(names(samples), speciation_prefix)]
extinction <- samples[, startsWith(names(samples), extinction_prefix)]
n_epochs <- ncol(speciation)
n_total <- nrow(speciation)
times_rb <- seq(0, max_t, length.out = n_epochs)
if(missing("n_times")){
n_times <- length(times_rb)
}
iter <- floor(seq(1, nrow(samples), length.out = n_samples))
i <- 1
if (summary_type == "none"){
pb <- txtProgressBar(min = 1, max = length(iter), style = 3)
setTxtProgressBar(pb, 0)
models <- list()
for (it in iter){
setTxtProgressBar(pb, i)
if (any(is.na(speciation[it,])) || any(is.na(extinction[it,])) || any(speciation[it,] < 0) || any(extinction[it,] < 0)){
warning(paste("Posterior sample", it," containing negative or NA rate values. skipping."))
}else{
lambda <- approxfun(times_rb, speciation[it,])
mu <- approxfun(times_rb, extinction[it,])
times <- seq(0, max(times_rb), length.out = n_times)
model <- create.model( lambda, mu, times = times)
models[[i]] <- model
i <- i + 1
}
}
close(pb)
cat("\n")
names(models) <- paste0("posterior", seq_along(models))
class(models) <- c("list", "CRABSposterior")
return(models)
}else{
speciation_summary <- apply(speciation, 2, summary_type)
extinction_summary <- apply(extinction, 2, summary_type)
lambda <- approxfun(times_rb, speciation_summary)
mu <- approxfun(times_rb, extinction_summary)
times <- seq(0, max(times_rb), length.out = n_times)
model <- create.model( lambda, mu, times = times)
return(model)
}
}
#' Title
#'
#' @param x a list of CRABS objects
#' @param ... additional parameters
#'
#' @return nothing
#' @export
#'
#' @examples
#' data(primates_ebd_log)
#' posterior <- read.RevBayes(primates_ebd_log, max_t = 65, n_samples = 20)
#' print(posterior)
print.CRABSposterior <- function(x, ...){
cat("Posterior sample\n")
cat("Knots:", length(x[[1]]$times), "\n")
cat("Delta-tau:", x[[1]]$delta_t, "\n")
#p <- plot.CRABS(x, ...)
#plot(p)
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/read.RevBayes.R
|
#' Stochastic exploration of congruent models.
#'
#' @param model the reference model, an object of class "CRABS"
#' @param num.samples The number of samples to be drawn
#' @param rate.type either "extinction", "speciation", "both" or "joint"
#' @param sample.speciation.rates a function that when called returns a speciation rate function
#' @param sample.extinction.rates a function that when called returns a extinction rate function
#' @param sample.joint.rates a function that when called returns a list with a speciation rate function and an extinction rate function
#' @return A named list with congruent rates.
#' @export
#' @examples
#' data("primates_ebd")
#'
#' l <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' times <- primates_ebd[["time"]]
#'
#' model <- create.model(l, mu, primates_ebd[["time"]])
#'
#' # Sampling extinction rates
#'
#' extinction_rate_samples <- function(){
#' res <- sample.basic.models(times = times,
#' rate0 = 0.05,
#' model = "MRF",
#' MRF.type = "HSMRF",
#' fc.mean = 2.0,
#' min.rate = 0.0,
#' max.rate = 1.0)
#' return(res)
#' }
#'
#' samples <- sample.congruence.class(model,
#' num.samples = 8,
#' rate.type = "extinction",
#' sample.extinction.rates = extinction_rate_samples)
#'
#' samples
#'
#' # Jointly sampling speciation and extinction rates
#'
#' sample.joint.rates <- function(n) {
#' sample.basic.models.joint(times = times,
#' p.delta = model$p.delta,
#' beta.param = c(0.5,0.3),
#' lambda0 = l(0.0),
#' mu0.median = mu(0.0))
#' }
#'
#' joint.samples <- sample.congruence.class(model = model,
#' num.samples = 40,
#' rate.type = "joint",
#' sample.joint.rates = sample.joint.rates)
#'
#' joint.samples
sample.congruence.class <- function(model,
num.samples,
rate.type="both",
sample.speciation.rates=NULL,
sample.extinction.rates=NULL,
sample.joint.rates=NULL) {
times <- model$times
v_p_div <- model$p.delta(model$times)
mus <- list()
lambdas <- list()
idx_lambda <- 1
idx_mu <- 1
for (i in 1:num.samples) {
if (rate.type == "joint"){
joint.rates = sample.joint.rates()
lambdas[[idx_lambda]] <- joint.rates$func_lambdas
idx_lambda <- idx_lambda + 1
mus[[idx_mu]] <- joint.rates$func_mus
idx_mu <- idx_mu +1
}
else if (rate.type == "extinction" || (rate.type == "both" && (i <= num.samples/2))) {
mus[[idx_mu]] <- sample.extinction.rates()
idx_mu <- idx_mu +1
} else{
lambdas[[idx_lambda]] <- sample.speciation.rates()
idx_lambda <- idx_lambda + 1
}
}
if (rate.type=="joint"){
models <- joint.congruent.models(model, mus=mus, lambdas=lambdas)
}else{
models <- congruent.models(model, mus=mus, lambdas=lambdas)
}
return (models)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/sample.congruence.class.R
|
#' Stochastic exploration of congruent models for all samples in the posterior
#'
#' @description
#' This function takes a posterior sample as input: a list of CRABS objects.
#' It will then iterate over the samples, and for each posterior sample it will
#' sample from the posterior class. It will sample using the \code{\link{sample.basic.models}}
#' function, and all additional parameters are passed to \code{\link{sample.basic.models}}.
#'
#' @param posterior a list of CRABS model objects
#' @param mu0.equal whether to propose alternative mu starting at mu0 equal to the posterior sample. default to FALSE
#' @param rate0 rate0 allows the user to fix the extinction rate at the present to a single value. defaults to NULL, for drawing it randomly
#' @inheritParams sample.congruence.class
#' @inheritDotParams sample.basic.models
#'
#' @inherit sample.congruence.class return
#' @export
#'
#' @examples
#' data(primates_ebd_log)
#'
#' posterior <- read.RevBayes(primates_ebd_log, max_t = 65, n_samples = 10)
#'
#' samples <- sample.congruence.class.posterior(posterior,
#' num.samples = 5,
#' rate.type = "extinction",
#' rate0.median = 0.1,
#' model = "MRF",
#' max.rate = 1.0)
#'
#' print(samples)
sample.congruence.class.posterior <- function(posterior,
num.samples,
rate.type="extinction",
mu0.equal = FALSE,
rate0 = NULL,
...){
pb <- txtProgressBar(min = 1, max = length(posterior), style = 3)
res <- list()
for(i in seq_along(posterior)){
times <- posterior[[i]]$times
num.epochs <- length(posterior[[i]]$times)
if (mu0.equal){
rate0 <- posterior[[i]]$mu(0.0)
}
if(rate.type == "speciation"){
sample.extinction.rates <- NULL
sample.speciation.rates <- function () {sample.basic.models(times = times, rate0 = posterior[[i]]$lambda(0.0), ...)}
}else if(rate.type == "extinction"){
sample.extinction.rates <- function() {sample.basic.models(times = times, rate0 = rate0, ...)}
sample.speciation.rates <- NULL
}else if(rate.type == "both"){
sample.extinction.rates <- function() {sample.basic.models(times = times, rate0 = rate0, ...)}
sample.speciation.rates <- function() {sample.basic.models(times = times, rate0 = posterior[[i]]$lambda(0.0), ...)}
}else{
stop("rate.type must be either \"speciation\", \"extinction\", or \"both\".")
}
cg <- sample.congruence.class(posterior[[i]],
num.samples = num.samples,
rate.type = rate.type,
sample.speciation.rates = sample.speciation.rates,
sample.extinction.rates = sample.extinction.rates)
res[[i]] <- cg
setTxtProgressBar(pb, i)
}
close(pb)
names(res) <- paste0("posterior", seq_along(res))
class(res) <- c("CRABSsets", "list")
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/sample.congruence.class.posterior.R
|
#' Sample custom functions through time.
#'
#' @param times the time knots
#' @param lambda0 The rate at present
#' @param rsample Function to sample next rate
#' @param rsample0 Function to sample rate at present
#' @param autocorrelated Should rates be autocorrelated?
#' @return Sampled rate vector
#' @export
#' @examples
#' data("primates_ebd")
#'
#' l <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' times <- primates_ebd[["time"]]
#'
#' model <- create.model(l, mu, times)
#'
#' rsample <- function(n) runif(n, min = 0.0, max = 0.9)
#' mu <- sample.rates(times, 0.5, rsample = rsample)
#'
#'
#' model_set <- congruent.models(model, mus = mu)
#'
#' model_set
sample.rates <- function(times,
lambda0=NULL,
rsample=NULL,
rsample0=NULL,
autocorrelated=FALSE) {
num.epochs <- length(times)
N_SAMPLES = ifelse( is.null(lambda0), num.epochs, num.epochs -1)
if ( autocorrelated == FALSE ) {
# we draw a bunch of iid samples
new_rates = rsample(N_SAMPLES)
if( is.null(lambda0) == FALSE ) {
new_rates = c(lambda0, new_rates)
}
} else {
# we draw autocorrelated rates
if ( is.null(lambda0) == FALSE ) {
new_rates = c( lambda0 )
} else if ( is.null(rsample0) == FALSE ) {
new_rates = c( rsample0() )
}
for ( i in 1:num.epochs ) {
new_rates[i+1] = rsample(new_rates[i])
}
}
func_rates <- approxfun(times, new_rates)
return (func_rates)
}
#' Samples simple increase/decrease models through time with noise.
#'
#' @param times the time knots
#' @param rate0 The rate at present, otherwise drawn randomly.
#' @param model "MRF" for pure MRF model, otherwise MRF has a trend of type "exponential","linear", or "episodic<n>"
#' @param direction "increase" or "decrease" (measured in past to present)
#' @param noisy If FALSE, no MRF noise is added to the trajectory
#' @param MRF.type "HSMRF" or "GMRF", type for stochastic noise.
#' @param monotonic Whether the curve should be forced to always move in one direction.
#' @param fc.mean Determines the average amount of change when drawing from the model.
#' @param rate0.median When not specified, rate at present is drawn from a lognormal distribution with this median.
#' @param rate0.logsd When not specified, rate at present is drawn from a lognormal distribution with this sd
#' @param mrf.sd.scale scale the sd of the mrf process up or down. defaults to 1.0
#' @param min.rate The minimum rate (rescaling fone after after drawing rates).
#' @param max.rate The maximum rate (rescaling fone after after drawing rates).
#' @return Speciation or extinction rate at a number of timepoints.
#' @export
#' @examples
#' data("primates_ebd")
#'
#' l <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' times <- primates_ebd[["time"]]
#'
#' model <- create.model(l, mu, times)
#'
#' mus <- sample.basic.models(times = times,
#' rate0 = 0.05,
#' "MRF",
#' MRF.type = "HSMRF",
#' fc.mean = 2.0,
#' min.rate = 0.0,
#' max.rate = 1.0)
#'
#' model_set <- congruent.models(model, mus = mus)
#'
#' model_set
sample.basic.models <- function(times,
rate0=NULL,
model="exponential",
direction="decrease",
noisy=TRUE,
MRF.type="HSMRF",
monotonic=FALSE,
fc.mean=3,
rate0.median=0.1,
rate0.logsd=1.17481,
mrf.sd.scale = 1.0,
min.rate=0,
max.rate=10) {
num.epochs <- length(times)
# recover()
# We use rejection sampling to find a model that fits within minimum and maximum rates
# To speed up sampling, we break this into three rejection sampling steps, handling separately the:
# 1) rate at present
# 2) fold change (given rate at present)
# 3) MRF noise
# rate at present
x0 <- min.rate - 10
if ( !is.null(rate0) ) {
if ( rate0 < min.rate || rate0 > max.rate ) {
stop("User-defined rate0 is outside [min.rate,max.rate].")
}
x0 <- rate0
} else {
while ( x0 < min.rate || x0 > max.rate ) {
x0 <- rlnorm(1,log(rate0.median),rate0.logsd)
}
}
# draw a fold change
fc_mean_adj <- fc.mean - 1
fc_rate <- 1.25
fc_shape <- fc_mean_adj * fc_rate
fc <- Inf
while ( x0 * fc < min.rate || x0 * fc > max.rate ) {
fc <- 1 + rgamma(1,fc_shape,fc_rate)
}
# cat(fc,"\n")
# cat(fc * x0,"\n")
if ( direction == "increase" ) {
fc <- 1/fc
} else if ( direction != "decrease" ) {
stop("Invalid \"direction\"")
}
# Deterministic component of trajectory
num_deltas <- num.epochs -1
x <- numeric(num.epochs)
x[1] <- x0
if ( model == "exponential" ) {
delta_deterministic <- rep(log(fc)/(num_deltas),num_deltas)
x[2:(num.epochs)] <- x[1] * exp(cumsum(delta_deterministic))
} else if ( model == "linear" ) {
delta_deterministic <- rep(((x0*fc)-x0)/(num_deltas),num_deltas)
x[2:(num.epochs)] <- x[1] + cumsum(delta_deterministic)
} else if ( grepl("episodic",model) ) {
delta_deterministic <- rep(0,num_deltas)
njumps <- as.numeric(gsub("episodic","",model)) - 1
if (njumps < 1) {
stop("Too few episodes in episodic model")
}
if ( njumps == 1 ) {
delta_deterministic[sample.int(num_deltas,1)] <- (fc * x0) - x0
} else {
delta_deterministic[sample.int(num_deltas,njumps)] <- ((fc * x0) - x0) * rdirichlet(njumps,1)
}
x[2:(num.epochs)] <- x[1] + cumsum(delta_deterministic)
} else if ( grepl("MRF",model) ) {
x <- rep(x0,num.epochs)
} else {
stop("Invalid \"model\"")
}
rates <- x
# Add noise
if ( noisy ) {
found_valid_model <- FALSE
while ( !found_valid_model ) {
# Get random component of rate changes
zeta <- 0
delta_stochastic <- rep(Inf,num_deltas)
noise <- rep(Inf,num_deltas)
# Avoid infinities, this loop should rarely trigger
while ( any(!is.finite(noise)) ) {
if ( MRF.type == "HSMRF" || MRF.type == "HSRF") {
zeta <- get.hsmrf.global.scale(num.epochs)
gamma <- min(abs(rcauchy(1,0,1)),1000) # avoid numerical instability
sigma <- abs(rcauchy(num_deltas,0,1))
delta_stochastic <- rnorm(num_deltas,0,sigma*gamma*zeta*mrf.sd.scale)
} else if ( MRF.type == "GMRF" ) {
zeta <- get.gmrf.global.scale(num.epochs)
gamma <- min(abs(rcauchy(1,0,1)),1000) # avoid numerical instability
delta_stochastic <- rnorm(num_deltas,0,gamma*zeta*mrf.sd.scale)
} else {
stop("Invalid \"MRF.type\"")
}
if ( monotonic ) {
delta_stochastic <- abs(delta_stochastic)
if ( direction == "increase" ) {
delta_stochastic <- -delta_stochastic
}
}
noise <- c(1,exp(cumsum(delta_stochastic)))
}
x_prop <- x * noise
if ( all(x_prop <= max.rate) && all(x_prop >= min.rate) ) {
rates <- x * noise
found_valid_model <- TRUE
}
}
}
func_rates <- approxfun(times, rates)
return (func_rates)
}
#' Jointly samples speciation and extinction trajectories through time, with noise.
#'
#' @param times the time knots
#' @param p.delta The The pulled diversification rate function (measured in time before present).
#' @param lambda0 The speciation rate at present.
#' @param mu0 The extinction rate at present, otherwise drawn randomly.
#' @param MRF.type "HSMRF" or "GMRF", type for stochastic noise.
#' @param beta.param Parameters of the Beta distribution used for
#' @param mu0.median When not specified, extinction rate at present is drawn from a lognormal distribution with this median.
#' @param mu0.logsd When not specified, extinction rate at present is drawn from a lognormal distribution with this sd
#' @param mrf.sd.scale scale the sd of the mrf process up or down. defaults to 1.0
#' @param min.lambda The minimum speciation rate (rescaling done after after drawing rates).
#' @param min.mu The minimum extinction rate (rescaling done after after drawing rates).
#' @param max.lambda The maximum speciation rate (rescaling done after after drawing rates).
#' @param max.mu The maximum extinction rate (rescaling done after after drawing rates).
#' @param min.p The lower bound of parameter p's trajectory.
#' @param max.p The upper bound of parameter p's trajectory.
#' @return Speciation or extinction rate at a number of timepoints.
#' @export
#' @examples
#' data("primates_ebd")
#'
#' l <- approxfun(primates_ebd[["time"]], primates_ebd[["lambda"]])
#' mu <- approxfun(primates_ebd[["time"]], primates_ebd[["mu"]])
#' times <- primates_ebd[["time"]]
#'
#' model <- create.model(l, mu, times)
#'
#' sample.joint.rates <- function(n) {
#' sample.basic.models.joint(times = times,
#' p.delta = model$p.delta,
#' beta.param = c(0.5,0.3),
#' lambda0 = l(0.0),
#' mu0.median = mu(0.0))
#' }
#'
#' joint.samples <- sample.congruence.class(model = model,
#' num.samples = 40,
#' rate.type = "joint",
#' sample.joint.rates = sample.joint.rates)
#'
#' joint.samples
sample.basic.models.joint <- function(times,
p.delta,
lambda0,
mu0=NULL,
MRF.type="HSMRF",
beta.param=c(0.3,0.3),
mu0.median=0.1,
mu0.logsd=1.17481,
mrf.sd.scale = 1.0,
min.lambda=0,
min.mu=0,
max.lambda=10,
max.mu=10,
min.p=-0.05,
max.p=1.05) {
num.epochs <- length(times)
delta_t <- times[2]-times[1]
# rate at present
if ( lambda0 < min.lambda || lambda0 > max.lambda ) {
stop("User-defined lambda0 is outside [min.lambda,max.lambda].")
}
x0 <- lambda0
y0 <- min.mu - 10
if ( !is.null(mu0) ) {
if ( mu0 < min.mu || mu0 > max.mu ) {
stop("User-defined mu0 is outside [min.mu,max.mu].")
}
y0 <- mu0
} else {
while ( y0 < min.mu || y0 > max.mu ) {
y0 <- rlnorm(1,log(mu0.median),mu0.logsd)
}
}
lambdas <- rep(x0, num.epochs)
mus <- rep(y0, num.epochs)
### MRF joint trajectories between λ_{i-1} and λ* (:= λ such as μ_i=μ_{i-1}) ###
in_bounds = FALSE
while (!in_bounds){
in_bounds = TRUE
# Generate the MRF trajectory
if (MRF.type == "HSMRF" || MRF.type == "HSRF") {
zeta <- get.hsmrf.global.scale(num.epochs)
gamma <- min(abs(rcauchy(1,0,1)),1000) # avoid numerical instability
sigma <- abs(rcauchy(num.epochs-1,0,1))
delta_stochastic <- rnorm(num.epochs-1,0,sigma*gamma*zeta*mrf.sd.scale)
} else if (MRF.type == "GMRF") {
zeta <- get.gmrf.global.scale(num.epochs)
gamma <- min(abs(rcauchy(1,0,1)),1000) # avoid numerical instability
delta_stochastic <- rnorm(num.epochs-1,0,gamma*zeta*mrf.sd.scale)
} else {
stop("Invalid \"MRF.type\"")
}
trajectory <- cumsum(delta_stochastic)
# Fix a random point in the trajectory at a value drawn from a Beta distribution
trajectory <- trajectory + rbeta(1, beta.param[1], beta.param[2]) - sample(trajectory, 1)
# Rescale or slide the trajectory if it exceeds the allowed bounds
trajectory <- trajectory / max((max(trajectory)-min(trajectory))/(max.p-min.p), 1)
trajectory <- trajectory + min.p - min(min(trajectory), min.p)
trajectory <- trajectory + max.p - max(max(trajectory), max.p)
trajectory <- c(NA, trajectory)
for (i in 2:num.epochs){
# compute the λ_min and λ* parameters
lambda_min <- (p.delta(times[i])-1/delta_t + sqrt((1/delta_t-p.delta(times[i]))**2+4*lambdas[i-1]/delta_t))/2
b = p.delta(times[i]) + mus[i-1] - 1/delta_t
lambda_star = (b + sqrt(b**2+4*lambdas[i-1]/delta_t))/2
# get λ_i between λ_{i-1} and λ* according to the trajectory
lambda_i <- lambdas[i-1] + trajectory[i]*(lambda_star-lambdas[i-1])
# force λ_i to be higher than λ_min
if (lambda_i<lambda_min){
lambda_i <- lambda_min
mu_i <- 0
}else{
mu_i <- lambda_i - p.delta(times[i]) + (lambda_i-lambdas[i-1])/(lambda_i*delta_t)
}
if (mu_i > max.mu || lambda_i > max.lambda){
in_bounds = FALSE
break
}
lambdas[i] <- lambda_i
mus[i] <- mu_i
}
}
func_lambdas <- approxfun(times, lambdas)
func_mus <- approxfun(times, mus)
return (list(func_lambdas=func_lambdas, func_mus=func_mus))
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/sample.rates.R
|
#' Summarize trends in the posterior
#'
#' @param posterior a list of CRABS objects, each one representing a sample from the posterior
#' @inheritParams summarize.trends
#'
#' @return a ggplot object
#' @export summarize.posterior
#' @usage summarize.posterior(posterior, threshold = 0.01, rate_name = "lambda",
#' return_data = FALSE, rm_singleton = FALSE, per_time = TRUE,
#' window_size = 1, relative_deltas = FALSE)
#'
#' @examples
#' data(primates_ebd_log)
#'
#' posterior <- read.RevBayes(primates_ebd_log, max_t = 65, n_samples = 10)
#'
#' samples <- sample.congruence.class.posterior(posterior,
#' num.samples = 5,
#' rate.type = "extinction",
#' rate0.median = 0.1,
#' model = "MRF",
#' max.rate = 1.0)
#'
#' p <- summarize.posterior(samples, threshold = 0.05)
summarize.posterior <- function(posterior,
threshold = 0.01,
rate_name = "lambda",
return_data = FALSE,
rm_singleton = FALSE,
per_time = TRUE,
window_size = 1,
relative_deltas = FALSE){
times <- posterior[[1]][[1]]$times
max_t <- max(times)
n_epochs <- length(times)
res <- list()
for (i in seq_along(posterior)){
df <- summarize.trends(posterior[[i]],
threshold = threshold,
window_size = window_size,
per_time = per_time,
return_data = TRUE)[["heatmap_data"]]
df["posterior"] <- paste0("sample",i)
res[[i]] <- df
}
plotdata <- bind_rows(res)
if(return_data){
return(plotdata)
}
k <- sum(sapply(posterior, length))
#levels_base <-
levels1 <- c("-1", "0", "1")
levels1 <- levels1[levels1 %in% levels(plotdata$direction)]
plotdata$direction <- factor(plotdata$direction, levels = levels1)
p1 <- ggplot(plotdata, aes(x = time, fill = direction)) +
geom_histogram(aes(y = stat(count / sum(count)*(n_epochs-window_size+1))), binwidth = max_t/n_epochs) +
theme_classic() +
scale_fill_manual(values = c("purple","white", "#7fbf7b"), labels = direction_labels) +
theme(legend.position = c(0.3, 0.3),
legend.title = element_blank()) +
scale_x_reverse() +
labs(y = "model coverage", x = "time before present")
return(p1)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/summarize.posterior.R
|
create_heatmatrix <- function(model,
name,
rate_name,
threshold,
window_size,
method,
per_time,
relative_deltas = FALSE,
gmap = NULL){
times <- model$times
rate <- model[[rate_name]](times)
#method <- "neighbour"
if (method == "neighbour"){
delta_t <- model$times[window_size+1] - model$times[1]
## \Delta \lambda_{i} = \lambda_{i} - \lambda_{i-k}
rate_i <- tail(rate, n = -window_size)
rate_i_minus_k <- head(rate, n = -window_size) ## k is the window size
if (relative_deltas){
delta_rate <- (rate_i_minus_k - rate_i) / rate_i
}else{
delta_rate <- (rate_i_minus_k - rate_i)
}
if (per_time){
delta_rate <- delta_rate / delta_t
}
xtimes <- (head(times, n = -window_size) + tail(times, n = -window_size))/2
}else if (is.numeric(method)){
if(per_time){
stop("the option \"per time\" is not supported when not using neighbours. Set \"per time\" to FALSE.")
}
rate_i <- rate
rate_k <- rate[method]
delta_rate <- rate_i - rate_k
xtimes <- times
}else{
stop("method must either be \"neighbour\" or a numeric value")
}
# is increasing
inc <- ifelse(delta_rate > threshold, 1, 0)
# if decreasing
dec <- ifelse(delta_rate <= -threshold, -1, 0)
# No change (or flat) is implicitly the value 0
direction <- factor(inc + dec)
group_name <- gmap[[name]]
if(is.null(group_name)){
group_name <- "other"
}
df <- tibble::tibble(delta_rate = delta_rate,
direction = direction,
time = xtimes,
name = name,
group_name = group_name)
return(df)
}
remove_singletons <- function(df){
for (i in 2:(nrow(df)-1)){
triplet <- df[["direction"]][(i-1):(i+1)] ## Create the 3-window
if (triplet[1] == triplet[3]){ ## Check if ends are equal
if (triplet[2] != triplet[1]){
df[i, "direction"] <- triplet[3] ## If middle is unequal, assign it to the corner values
}
}
}
return(df)
}
direction_labels <- function(x){
ifelse(x == "-1", "Decreasing", ifelse(x == "1", "Increasing", "Flat"))
}
plotdata <- function(model_set,
threshold,
rate_name,
window_size,
method,
per_time,
relative_deltas,
gmap){
model_names <- names(model_set)
l <- list()
for (i in seq_along(model_names)){
name <- model_names[i]
l[[i]] <- create_heatmatrix(model_set[[name]],
name,
rate_name,
threshold,
window_size,
method,
per_time,
relative_deltas,
gmap)
}
df <- do.call(rbind, l)
df$direction <- factor(df$direction, levels = sort(levels(df$direction)))
return(df)
}
#' Summarize trends in the congruence class
#'
#' @param model_set an object of type "CRABSset"
#' @param threshold a threshold for when \eqn{\Delta \lambda i} should be interpreted as decreasing, flat, or increasing
#' @param rate_name either "lambda" or "mu" or "delta"
#' @param window_size the window size "k" in \eqn{\Delta\lambda i = \lambda i - \lambda(i-k)}
#' @param method default to "neighbour", i.e. to compare rate values at neighbouring time points.
#' @param per_time whether to compute \eqn{\Delta\lambda i} that are in units of per time, i.e. divide by \eqn{\Delta t}
#' @param return_data instead of plots, return the plotting dataframes
#' @param rm_singleton whether or not to remove singletons. Pass starting at present, going towards ancient
#' @param relative_deltas whether to divide \eqn{\Delta \lambda i} by the local lambda value
#' @param group_names a vector of prefixes, if you want to group the models in a facet. For example 'c("reference", "model")'
#'
#' @importFrom ggplot2 facet_grid stat
#'
#' @return a patchwork object
#' @usage summarize.trends(model_set, threshold = 0.005, rate_name = "lambda",
#' window_size = 1, method = "neighbour", per_time = TRUE, return_data = FALSE,
#' rm_singleton = FALSE, relative_deltas = FALSE, group_names = NULL)
#' @export summarize.trends
#'
#' @examples
#'
#' data(primates_ebd)
#' lambda <- approxfun(primates_ebd$time, primates_ebd$lambda)
#' mu <- approxfun(primates_ebd$time, primates_ebd$mu)
#' times <- seq(0, max(primates_ebd$time), length.out = 500)
#'
#' reference <- create.model(lambda, mu, times = times)
#'
#' mus <- list(function(t) exp(0.01*t) - 0.01*t - 0.9,
#' function(t) exp(-0.02*t) - 0.2,
#' function(t) exp(-0.07*t) + 0.02*t - 0.5,
#' function(t) 0.2 + 0.01*t,
#' function(t) 0.2)
#'
#'
#' model_set <- congruent.models(reference, mus = mus)
#'
#' p <- summarize.trends(model_set, 0.02)
summarize.trends <- function(model_set,
threshold = 0.005,
rate_name = "lambda",
window_size = 1,
method = "neighbour",
per_time = TRUE,
return_data = FALSE,
rm_singleton = FALSE,
relative_deltas = FALSE,
group_names = NULL){
##
if(!is.null(group_names)){
gmap <- list()
for (group_name in group_names){
idx <- which(startsWith(names(model_set), group_name))
if (length(idx) > 0){
for (i in idx){
gmap[[names(model_set)[i]]] <- group_name
}
}
}
}else{
gmap <- NULL
}
df <- plotdata(model_set, threshold, rate_name, window_size, method, per_time, relative_deltas, gmap)
if(!is.null(gmap)){
df$group_name <- factor(df$group_name, levels = group_names)
}
rate_times <- model_set[[1]]$times
if (rm_singleton){
l <- df %>% dplyr::group_by(name) %>%
group_map(~ remove_singletons(.x), .keep = TRUE)
df <- do.call(rbind, l)
}
# plot finite-difference derivative
if(rate_name %in% c("lambda", "mu", "delta")){
if (method == "neighbour"){
lab <- paste0("\\Delta\\", rate_name, " = \\frac{\\", rate_name, "_{i-", window_size, "} - \\", rate_name, "_i}")
}else{
lab <- paste0("\\Delta\\", rate_name, " = \\frac{\\", rate_name, "_{", method,"} - \\", rate_name, "_i}")
}
denum <- ""
if(per_time){
denum <- paste0(denum, "\\Delta t")
}
if(relative_deltas){
denum <- paste0(denum, "\\", rate_name, "_i")
}
if(!per_time && !relative_deltas){
denum <- paste0(denum, "1")
}
denum <- paste0("{", denum, "}")
ylabel <- latex2exp::TeX(paste0("$", lab, denum, "$"))
}else{
stop("rate_name must either be 'lambda' or 'mu' or 'delta'.")
}
col1 <- list("mu" = "orange",
"lambda" = "blue",
"delta" = "purple")[[rate_name]]
cbPalette <- c(head(sequential_hcl(palette = col1,
n = length(model_set)),
n = -1), "black")
p1 <- ggplot(df, aes(time, delta_rate, color = name)) +
geom_line() +
geom_hline(yintercept = threshold, linetype = "dashed", color = "red") +
geom_hline(yintercept = -threshold, linetype = "dashed", color = "red") +
scale_x_reverse(limits = rev(range(rate_times))) +
scale_color_manual(values = cbPalette) +
theme_classic() +
ylab(ylabel) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
plot.margin = unit(c(t = 0,r = 1,b = 0,l = 1), "pt"),
legend.position = "none")
# plot directions
p2 <- ggplot(df, aes(time, name, fill = direction)) +
geom_tile() +
scale_x_reverse(limits = rev(range(rate_times))) +
scale_fill_manual(values = c("purple","white", "#7fbf7b"), labels = direction_labels) +
theme_classic() +
theme(plot.margin = unit(c(t = 0,r = 0,b = 1,l = 1), "pt"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
ylab("Models") +
xlab("time before present")
if(!is.null(gmap)){
p2 <- p2 +
facet_grid(group_name~., scales="free_y", space="free_y", switch = "y") +
theme(panel.spacing = unit(c(0), "lines"),
strip.background = element_rect(fill=NA),
axis.text.y = element_blank())
}
if(return_data){
return(list(heatmap_data = df
))
}else{
p <- p1 + p2 + plot_layout(ncol = 1,
#guides = "collect",
heights = c(0.5, 0.5))
return(p)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/summarize.trends.R
|
rdirichlet <- function(dim,alpha) {
x <- rgamma(dim,alpha,1)
return(x/sum(x))
}
#' Global scale for HSMRF using linear interpolation of pre-computed values
#'
#' @param v The number of pieces in the approximation
#' @return Global scale
#' @keywords internal
get.hsmrf.global.scale <- function(v){
return (exp(approxfun(x=log(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)),
y=log(c(3.971988,0.04446432,0.01653494,0.005024999,0.002093737,0.0008441697,0.0002137604,4.947984e-05,4.518384e-10,7.099068e-11,3.113958e-11,2.941689e-12)))(log(v))))
}
# Replacing the previous `get.gmrf.global.scale` function as the linear approximation seems to be more appropriately done in log-scale (see code below).
# get.hsmrf.global.scale.old <- approxfun(x=c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000),
# y=c(3.971988,0.04446432,0.01653494,0.005024999,0.002093737,0.0008441697,0.0002137604,4.947984e-05,4.518384e-10,7.099068e-11,3.113958e-11,2.941689e-12))
# plot(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000), get.hsmrf.global.scale.old(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)), log="xy"); lines(2:100000, get.hsmrf.global.scale.old(2:100000))
# plot(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000), get.hsmrf.global.scale(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)), log="xy"); lines(2:100000, get.hsmrf.global.scale(2:100000))
#' Global scale for GMRF using linear interpolation of pre-computed values
#'
#' @param v The number of pieces in the approximation
#' @return Global scale
#' @keywords internal
get.gmrf.global.scale <- function(v){
return (exp(approxfun(x=log(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)),
y=log(c(2.871193,0.1064935,0.04975563,0.01911503,0.009376335,0.004549474,0.001693932,0.0007408964,0.0002640923,0.0001002221,7.352401e-05,4.42448e-05)))(log(v))))
}
# Replacing the previous `get.gmrf.global.scale` function as the linear approximation seems to be more appropriately done in log-scale (see code below).
# get.gmrf.global.scale.old <- approxfun(x=c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000),
# y=c(2.871193,0.1064935,0.04975563,0.01911503,0.009376335,0.004549474,0.001693932,0.0007408964,0.0002640923,0.0001002221,7.352401e-05,4.42448e-05))
# plot(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000), get.gmrf.global.scale.old(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)), log="xy"); lines(2:100000, get.gmrf.global.scale.old(2:100000))
# plot(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000), get.gmrf.global.scale(c(2,10,20,50,100,200,500,1000,2000,5000,10000,100000)), log="xy"); lines(2:100000, get.gmrf.global.scale(2:100000))
#' model2df
#'
#' @param model an object of class "CRABS"
#' @param gather boolean. Whether to return wide or long data frame
#' @param rho the sampling fraction at the present. Used to calculate the pulled speciation rate
#' @param compute.pulled.rates whether to compute the pulled rates
#'
#' @return a data frame
#' @export
#'
#' @examples
#' lambda <- function(t) 2.0 + sin(0.8*t)
#' mu <- function(t) 1.5 + exp(0.15*t)
#' times <- seq(from = 0, to = 4, length.out = 1000)
#' model <- create.model( lambda, mu, times = times)
#'
#' model2df(model)
model2df <- function(model, gather = TRUE, rho = 1.0, compute.pulled.rates = TRUE){
times <- model$times
l <- model$lambda(times)
ex <- model$mu(times)
if (compute.pulled.rates){
p.lambda <- pulled.speciation(model, rho = rho)(times)
p.delta <- model$p.delta(times)
p.mu <- model$lambda(0.0) - p.delta
}else{
p.lambda <- NULL
p.delta <- NULL
p.mu <- NULL
}
df <- tibble::tibble("Time" = times,
"Speciation" = l,
"Extinction" = ex,
"Net-diversification" = l - ex,
"Relative extinction" = ex / l,
"Pulled net-diversification" = p.delta,
"Pulled speciation" = p.lambda,
"Pulled extinction" = p.mu)
if (gather){
df <- gather(df, "rate", "value", -Time)
}
return(df)
}
|
/scratch/gouwar.j/cran-all/cranData/CRABS/R/utils.R
|
# for R CMD check NOTE about global vars
if(getRversion() >= "2.15.1") utils::globalVariables(c(".", "Package","Published","name",
"Title","Description","term","months_since",
"snapshot_date","cran_inventory"))
## function to get packages
getPackages <- function() {
repo <- ifelse(is.na(getOption("repos")["CRAN"]), getOption("repos")[[1]], getOption("repos")["CRAN"])
description <- sprintf("%s/web/packages/packages.rds", repo)
con <- if(substring(description, 1L, 7L) == "file://") {
file(description, "rb")
} else {
url(description, "rb")
}
on.exit(close(con))
db <- readRDS(gzcon(con))
rownames(db) <- NULL
db[, c("Package", "Version","Title","Description","Published","License")]
}
#' CRANsearcher
#'
#' Addin for searching packages in CRAN database based on keywords
#' @import dplyr
#' @importFrom curl has_internet
#' @import shiny
#' @import miniUI
#' @importFrom lubridate interval
#' @importFrom shinyjs hide useShinyjs
#' @importFrom stringr str_detect
#' @importFrom utils contrib.url install.packages
#'
#' @examples
#' \dontrun{
#' CRANsearcher()
#' }
#'
#' @export
CRANsearcher <- function(){
ui <- miniPage(
shinyjs::useShinyjs(),
# Loading message
div(
id = "loading-content",
h2("Loading CRAN package database..."),
style = "margin: auto;
position: absolute;
top: 35%;
left: 30%;
text-align: left;"),
gadgetTitleBar(a(href="https://github.com/RhoInc/CRANsearcher", "CRAN Package Searcher"),
left = miniTitleBarCancelButton("close","Close"),
right = uiOutput("install")),
miniContentPanel(
fillCol(
flex=c(1,6),
fillRow(
flex=c(2,1),
textInput("search","Enter search terms separated by commas (e.g. latent class, longitudinal)", width="90%"),
selectInput("dates","Last release date range",choices=c("1 month","3 months","6 months","12 months","All time"), selected="All time", width="80%")
),
div(DT::dataTableOutput("table"), style = "font-size: 90%")
)
),
miniButtonBlock(
div(textOutput("n"), style = "font-weight: bold")
)
)
server <- function(input, output, session){
crandb <- reactiveValues(a=NULL, snapshot_date=NULL)
observeEvent(!is.null(crandb$a),{
shinyjs::hide(id = "loading-content", anim = TRUE, animType = "fade")
})
# determine if internet access & manage data
if(curl::has_internet()){
crandb$a <- getPackages() %>%
data.frame %>%
mutate(Published = as.Date(Published),
months_since = lubridate::interval(Published, Sys.Date())/months(1),
name = Package %>% as.character,
Package = paste0('<a href="','https://cran.r-project.org/web/packages/',Package,'" style="color:#000000">',Package,'</a>',
'<sub> <a href="','http://www.rpackages.io/package/',Package,'" style="color:#000000">',1,'</a></sub>',
'<sub> <a href="','http://rdrr.io/cran/',Package,'" style="color:#000000">',2,'</a></sub>')) %>%
rename(`Last release`=Published)
crandb$snapshot_date <- format(Sys.Date(), "%m/%d/%y")
} else {
a <- cran_inventory %>%
mutate(Published = as.Date(Published),
months_since = lubridate::interval(Published, Sys.Date())/months(1),
name = Package %>% as.character,
Package =paste0('<a href="','https://cran.r-project.org/web/packages/',Package,'" style="color:#000000">',Package,'</a>',
'<sub> <a href="','http://www.rpackages.io/package/',Package,'" style="color:#000000">',1,'</a></sub>',
'<sub> <a href="','http://rdrr.io/cran/',Package,'" style="color:#000000">',2,'</a></sub>')) %>%
rename(`Last release`=Published)
crandb$a <- a
crandb$snapshot_date <- format(a$snapshot_date, "%m/%d/%y")
}
a_sub1 <- reactive({
dat <- crandb$a
if(input$dates=="All time"){
return(dat)
} else {
nmos <- gsub("[^0-9\\.]", "", input$dates)
return(filter(dat, months_since < nmos))
}
})
a_sub2 <- reactive({
search <- input$search %>%
tolower %>%
strsplit(.,",") %>%
unlist %>%
trimws
search2 <- search[which(nchar(search) >1)]
a <- a_sub1()
if(nchar(input$search)<3){
s <- 0
} else{
s <- a %>%
mutate(term = tolower(paste(name, Title, Description, sep=","))) %>%
rowwise %>%
mutate(match = all(stringr::str_detect(term, search2))) %>%
filter(match==TRUE) %>%
select(-c(term, match)) %>%
data.frame
}
return(s)
})
output$table <- DT::renderDataTable({
if(nchar(input$search)<3){
if(!is.null(crandb$a)){
if (input$dates=="All time"){
DT::datatable(crandb$a[c(1:10),c(1:6)],
rownames = FALSE,
escape = FALSE,
style="bootstrap",
class='compact stripe hover row-border order-column',
selection="multiple",
extensions = "Buttons",
options= list(dom = 'Btip',
buttons = I('colvis')))
} else{
DT::datatable(a_sub1()[,c(1:6)],
rownames = FALSE,
escape = FALSE,
style="bootstrap",
class='compact stripe hover row-border order-column',
selection="multiple",
extensions = "Buttons",
options= list(dom = 'Btip',
buttons = I('colvis')))
}
} else{
return()
}
} else{
DT::datatable(a_sub2()[,c(1:6)],
rownames = FALSE,
escape = FALSE,
style="bootstrap",
class='compact stripe hover row-border order-column',
selection="multiple",
extensions = "Buttons",
options= list(dom = 'Btip',
buttons = I('colvis')))
}
})
output$n <- renderText({
note <- ifelse(!is.null(crandb$snapshot_date), paste0(" (as of ", crandb$snapshot_date,")", ""))
if(nchar(input$search)<3){
if (!is.null(crandb$a)){
if (input$dates=="All time"){
paste0("There are ",dim(crandb$a)[1]," packages on CRAN", note, ". Displaying first 10.")
} else{
paste0("There are ",dim(a_sub1())[1]," packages on CRAN released within the past ",input$dates,note,".")
}
} else{
paste("")
}
} else{
n <- dim(a_sub2())[1]
if (!n==1){
if (input$dates=="All time"){
paste0("There are ",n," packages related to '",input$search,"' on CRAN", note,".")
} else {
paste0("There are ",n," packages related to '",input$search,"' on CRAN released within the past ",input$dates,note,".")
}
} else {
if (input$dates=="All time"){
paste0("There is ",n," package related to '",input$search,"' on CRAN", note, ".")
} else {
paste0("There is ",n," package related to '",input$search,"' on CRAN released within the past ",input$dates,note,".")
}
}
}
})
output$install <- renderUI({
if (!is.null(input$table_rows_selected)){
miniTitleBarButton("install", "Install selected package(s)", primary=TRUE)
} else{
miniTitleBarButton("install", "Install selected package(s)")
}
})
observeEvent(input$install, {
rows <- input$table_rows_selected
pkgs <- as.vector(a_sub2()[rows, "name"])
utils::install.packages(pkgs)
})
observeEvent(input$close,{
stopApp()
})
}
viewer <- dialogViewer("Search packages in CRAN database based on keywords", width = 1200, height = 900)
runGadget(ui, server, viewer = viewer)
}
|
/scratch/gouwar.j/cran-all/cranData/CRANsearcher/R/CRANsearcher.R
|
#' CRAN inventory snapshot
#'
#' Snapshot of CRAN web database inventory of packages for use offline.
#'
#' @format A data frame with 10,000+ rows and 6 columns:
#' \describe{
#' \item{Package}{Package}
#' \item{Version}{Version}
#' \item{Title}{Title}
#' \item{Description}{Description}
#' \item{Published}{Published}
#' \item{License}{License}
#' \item{snapshot_date}{Date that database snapshot was taken.}
#' }
#'
#' @source \url{https://cran.r-project.org/web/packages/}
"cran_inventory"
|
/scratch/gouwar.j/cran-all/cranData/CRANsearcher/R/data.R
|
#' @title
#' The CRE package
#'
#' @description
#' In health and social sciences, it is critically important to
#' identify subgroups of the study population where a treatment
#' has notable heterogeneity in the causal effects with respect
#' to the average treatment effect. Data-driven discovery of
#' heterogeneous treatment effects (HTE) via decision tree methods
#' has been proposed for this task. Despite its high interpretability,
#' the single-tree discovery of HTE tends to be highly unstable and to
#' find an oversimplified representation of treatment heterogeneity.
#' To accommodate these shortcomings, we propose Causal Rule Ensemble
#' (CRE), a new method to discover heterogeneous subgroups through an
#' ensemble-of-trees approach. CRE has the following features:
#' 1) provides an interpretable representation of the HTE; 2) allows
#' extensive exploration of complex heterogeneity patterns; and 3)
#' guarantees high stability in the discovery. The discovered subgroups
#' are defined in terms of interpretable decision rules, and we develop
#' a general two-stage approach for subgroup-specific conditional
#' causal effects estimation, providing theoretical guarantees.
#'
#' @docType package
#' @name CRE-package
#' @aliases CRE
#' @author Naeem Khoshnevis
#' @author Daniela Maria Garcia
#' @author Riccardo Cadei
#' @author Kwonsang Lee
#' @author Falco Joannes Bargagli Stoffi
#' @import xtable
#' @import data.table
#' @import SuperLearner
#' @importFrom RRF RRF
#' @importFrom RRF getTree
#' @importFrom gbm pretty.gbm.tree
#' @importFrom xgboost xgb.model.dt.tree
#' @import stats
#' @importFrom methods as
#'
#' @references
#' Bargagli-Stoffi, F. J., Cadei, R., Lee, K. and Dominici, F. (2023).
#' Causal rule ensemble: Interpretable Discovery and Inference of
#' Heterogeneous Treatment Effects,arXiv preprint arXiv:2009.09036
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/CRE_package.R
|
#' @title
#' Check input parameters
#'
#' @description
#' Checks consistency in input (hyper) parameters for the `cre` function.
#'
#' @param X_names The observed covariates names.
#' @param params The list of parameters required to run the function.
#'
#' @keywords internal
#'
#' @return
#' A modified input `params`. A list of parameters that might be changed during
#' the checks.
#'
check_hyper_params <- function(X_names, params) {
logger::log_debug("Checking hyper parameters...")
# Input params checks --------------------------------------------------------
ntrees <- getElement(params, "ntrees")
if (length(ntrees) == 0) {
ntrees <- 20
} else {
if (!inherits(ntrees, "numeric")) {
stop("Invalid 'ntrees' input. Please input a positive integer")
}
if (ntrees<1) {
stop("Invalid 'ntrees' input. Please input a positive integer")
}
}
params[["ntrees"]] <- ntrees
node_size <- getElement(params, "node_size")
if (length(node_size) == 0) {
node_size <- 20
} else {
if (!inherits(node_size, "numeric")) {
stop("Invalid 'node_size' input. Please input a number.")
}
}
params[["node_size"]] <- node_size
max_rules <- getElement(params, "max_rules")
if (length(max_rules) == 0) {
max_rules <- 50
} else {
if (!inherits(max_rules, "numeric")) {
stop("Invalid 'max_rules' input. Please input a number.")
}
}
params[["max_rules"]] <- max_rules
max_depth <- getElement(params, "max_depth")
if (length(max_depth) == 0) {
max_depth <- 3
} else {
if (!inherits(max_depth, "numeric")) {
stop("Invalid 'max_depth' input. Please input a number.")
}
}
params[["max_depth"]] <- max_depth
t_decay <- getElement(params, "t_decay")
if (length(t_decay) == 0) {
t_decay <- 0.025
} else {
if (!inherits(t_decay, "numeric")) {
stop("Invalid 't_decay' input. Please input a number.")
}
}
params[["t_decay"]] <- t_decay
replace <- getElement(params, "replace")
if (length(replace) == 0) {
replace <- TRUE
} else {
if (!(replace %in% c(TRUE, FALSE))) {
stop("Please specify 'TRUE' or 'FALSE' for the replace argument.")
}
}
params[["replace"]] <- replace
t_ext <- getElement(params, "t_ext")
if (length(t_ext) == 0) {
t_ext <- 0.01
} else {
if (!inherits(t_ext, "numeric")) {
stop("Invalid 't_ext' input. Please input a number.")
}
if (t_ext > 0.5 || t_ext < 0){
stop(paste("t_ext should be defind in [0, 0.5) range. ",
"Current provided value: ", t_ext))
}
}
params[["t_ext"]] <- t_ext
t_corr <- getElement(params, "t_corr")
if (length(t_corr) == 0) {
t_corr <- 1
} else {
if (!inherits(t_corr, "numeric")) {
stop("Invalid 't_corr' input. Please input a number.")
}
}
params[["t_corr"]] <- t_corr
stability_selection <- getElement(params, "stability_selection")
if (length(stability_selection) == 0) {
stability_selection <- "vanilla"
} else {
if (!(stability_selection %in% c("error_control", "no","vanilla"))) {
stop(paste0("Invalid `stability_selection` argument. Please input ",
"a value among: {`no`, `vanilla`, `error_control`}."))
}
}
params[["stability_selection"]] <- stability_selection
cutoff <- getElement(params, "cutoff")
if (length(cutoff) == 0) {
cutoff <- 0.9
} else {
if (!inherits(cutoff, "numeric")) {
stop("Invalid 'cutoff' input. Please input a number.")
}
}
params[["cutoff"]] <- cutoff
pfer <- getElement(params, "pfer")
if (length(pfer) == 0) {
pfer <- 1
} else {
if (!inherits(pfer, "numeric")) {
stop("Invalid 'pfer' input. Please input a number.")
}
}
params[["pfer"]] <- pfer
intervention_vars <- getElement(params, "intervention_vars")
if (length(intervention_vars) == 0) {
intervention_vars <- NULL
} else {
for (intervention_var in intervention_vars) {
if (!(intervention_var %in% X_names))
stop(paste(intervention_var,
"variable is not observed. Please select a set of",
"'intervention_vars' included among the observed covariates."))
}
}
params[["intervention_vars"]] <- intervention_vars
# Check for correct offset input
offset <- getElement(params, "offset")
if (!is.null(offset)) {
if (!(offset %in% X_names)) {
stop(paste(offset,
"variable is not observed. Please select a ",
"'offset' included among the observed covariates."))
}
}
params[["offset"]] <- offset
# Check for correct B input
B <- getElement(params, "B")
if (length(B) == 0) {
B <- 20
} else {
if (!inherits(B, "numeric")) {
stop("Invalid 'B' input. Please input an integer.")
}
}
params[["B"]] <- B
# Check for correct subsample imput
subsample <- getElement(params, "subsample")
if (length(subsample) == 0) {
subsample <- 0.5
} else {
if (!inherits(subsample, "numeric") || (subsample < 0) || (subsample > 1)) {
stop("Invalid 'subsample' input. Please input a number between 0 and 1.")
}
}
params[["subsample"]] <- subsample
logger::log_debug("Done with checking hyper parameters.")
return(params)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/check_hyper_params.R
|
#' @title
#' Check input data
#'
#' @description
#' Conducts sanity checks for the input data.
#'
#' @param y The observed response vector.
#' @param z The treatment vector.
#' @param X The features matrix.
#' @param ite The estimated ITE vector.
#'
#' @keywords internal
#'
#' @return
#' The number of data samples.
#'
check_input_data <- function(y, z, X, ite = NULL) {
logger::log_debug("Checking input data...")
# Observed Outcome
if (is.matrix(y)) {
if (ncol(y) != 1 || !(is.numeric(y[, 1]) || is.integer(y[, 1]))) {
stop("Observed response vector (y) input values should be a numerical
vector, not a matrix")
}
N <- nrow(y)
} else if (is.vector(y) && (is.numeric(y) || is.integer(y))) {
N <- length(y)
} else {
stop("Observed response vector (y) input values should be a numerical
vector")
}
# Treatment
if (is.matrix(z)) {
if (ncol(z) != 1 || !(is.numeric(z[, 1]) || is.integer(z[, 1]))
|| length(unique(z)) != 2) {
stop("Treatment vector (z) input values should be a numerical binary
vector, not a matrix")
}
N_check <- nrow(z)
} else if (is.vector(z) && (is.numeric(z) || is.integer(z))
&& length(unique(z)) == 2) {
N_check <- length(z)
} else {
stop(paste0("Treatment vector (z) input values should be",
" a numerical binary vector"))
}
if (N != N_check) {
stop(paste("Response and treatment vectors should be the same size.",
"Current values:", N, ",", N_check))
}
# ITE (if provided)
if (!is.null(ite)) {
if (is.matrix(ite)) {
if (ncol(ite) != 1 || !(is.numeric(ite[, 1]) || is.integer(ite[, 1]))) {
stop("ITE vector (ite) input values should be a numerical
vector, not a matrix")
}
N_check <- nrow(ite)
} else if (is.vector(ite) && (is.numeric(ite) || is.integer(ite))) {
N_check <- length(ite)
} else {
stop("ITE vector (ite) input values should be a numerical vector")
}
if (N != N_check) {
stop(paste("Response and ITE vectors should be the same size.",
"Current values:", N, ",", N_check))
}
}
# Covariates
if (is.matrix(X)) {
N_check <- nrow(X)
} else if (is.data.frame(X)) {
N_check <- nrow(X)
} else {
stop(paste("Invalid 'X' input. Please input a matrix or data frame",
" of numeric variables"))
}
if (!all(apply(X, 2, class) %in% c("integer", "numeric"))) {
stop(paste("Invalid 'X' input. Please input a matrix or data frame",
" of numeric variables"))
}
if (N != N_check) {
stop(paste("Response and X dataframe should have the ",
"same number of observations.",
"Current values:", N, ",", N_check))
}
logger::log_debug("Done with checking input data.")
invisible(N)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/check_input_data.R
|
#' @title
#' Check method-related parameters
#'
#' @description
#' Checks method-related parameters.
#'
#' @param y The observed response vector.
#' @param ite The estimated ITE vector.
#' @param params The list of parameters required to run the method functions.
#'
#' @keywords internal
#'
#' @return
#' A modified input `params`. A list of parameters that might be changed during
#' the checks.
#'
check_method_params <- function(y, ite, params) {
logger::log_debug("Checking method parameters...")
# Honest Splitting Parameters Check ------------------------------------------
ratio_dis <- getElement(params, "ratio_dis")
if (length(ratio_dis) == 0) {
ratio_dis <- 0.5
} else {
if (!inherits(ratio_dis, "numeric") || (ratio_dis < 0) || (ratio_dis > 1)) {
stop("Invalid 'ratio_dis' input. Please input a number between 0 and 1.")
}
}
params[["ratio_dis"]] <- ratio_dis
# ITE Estimation Parameters Check --------------------------------------------
ite_method <- tolower(getElement(params, "ite_method"))
if (length(ite_method) == 0) {
ite_method <- "aipw"
} else {
if (!(ite_method %in% c("aipw", "slearner", "tlearner", "xlearner",
"bart", "cf", "tpoisson"))) {
stop(paste(
"Invalid ITE method. Please choose from the following:",
"\n", "'aipw', 'bart', 'slearner','tlearner', ",
"'xlearner', 'cf', or 'tpoisson'"
))
}
}
params[["ite_method"]] <- ite_method
# Propensity Score Estimation Parameters Check--------------------------------
learner_ps <- getElement(params, "learner_ps")
if (!(ite_method %in% c("slearner", "tlearner",
"xlearner", "tpoisson"))) {
if (length(learner_ps) == 0) {
learner_ps <- "SL.xgboost"
} else {
if (!(class(learner_ps) %in% c("character", "list"))) {
stop("Please specify a string or list of strings for the learner_ps
argument.")
}
}
} else {
learner_ps <- NA
}
params[["learner_ps"]] <- learner_ps
# Outcome Estimation Parameters Check -----------------------
learner_y <- getElement(params, "learner_y")
if (ite_method %in% c("slearner", "tlearner", "xlearner", "aipw")) {
if (length(learner_y) == 0) {
learner_y <- "SL.xgboost"
} else {
if (!(class(learner_y) %in% c("character", "list"))) {
stop("Please specify a string or list of strings for the learner_y
argument.")
}
}
} else {
learner_y <- NA
}
params[["learner_y"]] <- learner_y
# Discard ITE Parameters if ITE estimates are provided------------------------
if (!is.null(ite)) {
params[["ite_method"]] <- "personalized"
params[["learner_ps"]] <- NULL
params[["learner_y"]] <- NULL
}
logger::log_debug("Done with checking method parameters.")
return(params)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/check_method_params.R
|
#' @title
#' Causal rule ensemble
#'
#' @description
#' Performs the Causal Rule Ensemble on a data set with a response variable,
#' a treatment variable, and various features.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A covariate matrix (or a data frame). Should be provided as
#' numerical values.
#' @param method_params The list of parameters to define the models used,
#' including:
#' - *Parameters for Honest Splitting*
#' - *ratio_dis*: The ratio of data delegated to rules discovery
#' (default: 0.5).
#' - *Parameters for Discovery and Inference*
#' - *ite_method*: The method for ITE (pseudo-outcome) estimation
#' (default: \code{"aipw"}, options: \code{"aipw"} for Augmented Inverse
#' Probability Weighting, \code{"cf"} for Causal Forest, \code{"bart"} for
#' Causal Bayesian Additive Regression Trees, \code{"slearner"} for S-Learner,
#' \code{"tlearner"} for T-Learner, \code{"xlearner"} for X-Learner,
#' \code{"tpoisson"} for T-Learner with Poisson regression).
#' - *learner_ps*: The model for the propensity score estimation
#' (default: \code{"SL.xgboost"}, options: any SuperLearner prediction model
#' i.e., \code{"SL.lm"}, \code{"SL.svm"}, used only for \code{"aipw"},
#' \code{"bart"}, \code{"cf"} ITE estimators).
#' - *learner_y*: The model for the outcome estimation
#' (default: \code{"SL.xgboost"}, options: any SuperLearner prediction model
#' i.e., \code{"SL.lm"}, \code{"SL.svm"}, used only for \code{"aipw"},
#' \code{"slearner"}, \code{"tlearner"} and \code{"xlearner"} ITE
#' estimators).
#' @param hyper_params The list of hyper parameters to fine-tune the method,
#' including:
#' - *General hyper parameters*
#' - *intervention_vars*: Array with intervention-able covariates names used
#' for Rules Generation. Empty or null array means that all the covariates
#' are considered as intervention-able (default: `NULL`).
#' - *ntrees*: The number of decision trees for random forest (default: 20).
#' - *node_size*: Minimum size of the trees' terminal nodes (default: 20).
#' - *max_rules*: Maximum number of generated candidates rules (default: 50).
#' - *max_depth*: Maximum rules length (default: 3).
#' - *t_decay*: The decay threshold for rules pruning. Higher values will
#' carry out an aggressive pruning (default: 0.025).
#' - *t_ext*: The threshold to truncate too generic or too specific (extreme)
#' rules (default: 0.01, range: [0, 0.5)).
#' - *t_corr*: The threshold to define correlated rules (default: 1,
#' range: `[0,+inf)`).
#' - *stability_selection*: Method for stability selection for selecting the
#' rules. \code{"vanilla"} for stability selection, \code{"error_control"}
#' for stability selection with error control and \code{"no"} for no
#' stability selection (default: \code{"vanilla"}).
#' - *B*: Number of bootstrap samples for stability selection in rules
#' selection and uncertainty quantification in estimation (default: 20).
#' - *subsample*: Bootstrap ratio subsample for stability selection in rules
#' selection and uncertainty quantification in estimation (default: 0.5).
#' - *Method specific hyper parameters*
#' - *offset*: Name of the covariate to use as offset (i.e., \code{"x1"}) for
#' T-Poisson ITE estimation. Use `NULL` if offset is not used (default:
#' `NULL`).
#' - *cutoff*: Threshold (percentage) defining the minimum cutoff value for
#' the stability scores for Stability Selection (default: 0.9).
#' - *pfer*: Upper bound for the per-family error rate (tolerated amount of
#' falsely selected rules) for Error Control Stability Selection (default: 1).
#'
#' @param ite The estimated ITE vector. If given both the ITE estimation steps
#' in Discovery and Inference are skipped (default: `NULL`).
#'
#'
#' @return
#' An S3 object composed by:
#' \item{M}{the number of Decision Rules extracted at each step,}
#' \item{CATE}{the data.frame of Conditional Average Treatment Effect
#' decomposition estimates with corresponding uncertainty quantification,}
#' \item{method_params}{the list of method parameters,}
#' \item{hyper_params}{the list of hyper parameters,}
#' \item{rules}{the list of rules (implicit form) decomposing the CATE.}
#'
#' @note
#' - If `intervention_vars` are provided, it is important to note that the
#' individual treatment effect will still be computed using all covariates.
#' @export
#'
#' @examples
#'
#' \donttest{
#' set.seed(123)
#' dataset <- generate_cre_dataset(n = 400,
#' rho = 0,
#' n_rules = 2,
#' p = 10,
#' effect_size = 2,
#' binary_covariates = TRUE,
#' binary_outcome = FALSE,
#' confounding = "no")
#' y <- dataset[["y"]]
#' z <- dataset[["z"]]
#' X <- dataset[["X"]]
#'
#' method_params <- list(ratio_dis = 0.5,
#' ite_method ="aipw",
#' learner_ps = "SL.xgboost",
#' learner_y = "SL.xgboost")
#'
#' hyper_params <- list(intervention_vars = NULL,
#' offset = NULL,
#' ntrees = 20,
#' node_size = 20,
#' max_rules = 50,
#' max_depth = 3,
#' t_decay = 0.025,
#' t_ext = 0.025,
#' t_corr = 1,
#' stability_selection = "vanilla",
#' cutoff = 0.6,
#' pfer = 1,
#' B = 20,
#' subsample = 0.5)
#'
#' cre_results <- cre(y, z, X, method_params, hyper_params)
#'}
#'
cre <- function(y, z, X,
method_params = NULL, hyper_params = NULL, ite = NULL) {
"%>%" <- magrittr::"%>%"
# timing the function
st_time_cre <- proc.time()
# Input checks ---------------------------------------------------------------
check_input_data(y, z, X, ite)
method_params <- check_method_params(y = y,
ite = ite,
params = method_params)
hyper_params <- check_hyper_params(X_names = colnames(as.data.frame(X)),
params = hyper_params)
# Honest Splitting -----------------------------------------------------------
subgroups <- honest_splitting(y, z, X,
getElement(method_params, "ratio_dis"), ite)
discovery <- subgroups[["discovery"]]
inference <- subgroups[["inference"]]
y_dis <- discovery$y
z_dis <- discovery$z
X_dis <- discovery$X
ite_dis <- discovery$ite
y_inf <- inference$y
z_inf <- inference$z
X_inf <- inference$X
ite_inf <- inference$ite
intervention_vars <- getElement(hyper_params, "intervention_vars")
# Discovery ------------------------------------------------------------------
logger::log_info("Starting rules discovery...")
st_time_rd <- proc.time()
# Estimate ITE
if (is.null(ite)) {
ite_dis <- estimate_ite(y = y_dis,
z = z_dis,
X = X_dis,
ite_method = getElement(method_params, "ite_method"),
learner_ps = getElement(method_params, "learner_ps"),
learner_y = getElement(method_params, "learner_y"),
offset = getElement(method_params, "offset"))
} else {
logger::log_info("Using the provided ITE estimations...")
}
# Filter only Intervention-able variables
if (!is.null(intervention_vars)) {
X_dis <- X_dis[, intervention_vars, drop = FALSE]
}
# Discover Decision Rules
discovery <- discover_rules(X_dis,
ite_dis,
method_params,
hyper_params)
rules <- discovery[["rules"]]
M <- discovery[["M"]]
en_time_rd <- proc.time()
logger::log_info("Done with rules discovery. ",
"(WC: {g_wc_str(st_time_rd, en_time_rd)}", ".)")
# Inference ------------------------------------------------------------------
logger::log_info("Starting inference...")
st_time_inf <- proc.time()
# Estimate ITE
if (is.null(ite)) {
ite_inf <- estimate_ite(y = y_inf,
z = z_inf,
X = X_inf,
ite_method = getElement(method_params, "ite_method"),
learner_ps = getElement(method_params, "learner_ps"),
learner_y = getElement(method_params, "learner_y"),
offset = getElement(method_params, "offset"))
} else {
logger::log_info("Skipped generating ITE.",
"The provided ITE will be used.")
}
# Filter only Intervention-able variables
if (!is.null(intervention_vars)) {
X_inf <- X_inf[, intervention_vars, drop = FALSE]
}
# Generate rules matrix
if (length(rules) == 0) {
rules_matrix_inf <- NA
rules_explicit <- c()
} else {
rules_matrix_inf <- generate_rules_matrix(X_inf, rules)
if (!is.null(hyper_params$intervention_vars)) {
covariate_names <- hyper_params$intervention_vars
} else {
covariate_names <- colnames(as.data.frame(X))
}
rules_explicit <- interpret_rules(rules, covariate_names)
}
# Estimate CATE
cate_inf <- estimate_cate(rules_matrix_inf,
rules_explicit,
ite_inf,
getElement(hyper_params, "B"),
getElement(hyper_params, "subsample"))
M["select_significant"] <- as.integer(length(cate_inf$Rule)) - 1
# Estimate ITE
if (M["select_significant"] > 0) {
rules <- rules[rules_explicit %in% cate_inf$Rule[2:length(cate_inf$Rule)]]
rules_explicit <- cate_inf$Rule[2:length(cate_inf$Rule)]
} else {
rules <- NULL
rules_explicit <- NULL
}
en_time_inf <- proc.time()
logger::log_info("Done with inference. ",
"(WC: {g_wc_str(st_time_inf, en_time_inf)} ", ".)")
# Generate final results S3 object
results <- list("M" = M,
"CATE" = cate_inf,
"method_params" = method_params,
"hyper_params" = hyper_params,
"rules" = rules)
attr(results, "class") <- "cre"
# Return Results -------------------------------------------------------------
end_time_cre <- proc.time()
logger::log_info("Done with running CRE function!",
"(WC: {g_wc_str(st_time_cre, end_time_cre)}", ".)")
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/cre.R
|
#' @title
#' Discover rules
#'
#' @description
#' Discover the minimal set of rules linearly decomposing the Conditional
#' Average Treatment Effect (CATE).
#'
#' @param X A covariate matrix.
#' @param ite An estimated ITE.
#' @param method_params A vector of method parameters.
#' @param hyper_params A vector of hyper parameters.
#'
#' @return
#' A minimal set of rules linearly decomposing the Conditional Average
#' Treatment Effect (CATE).
#'
#' @keywords internal
#'
discover_rules <- function(X, ite, method_params, hyper_params) {
# Generate rules -------------------------------------------------------------
rules <- generate_rules(X,
ite,
getElement(hyper_params, "ntrees"),
getElement(hyper_params, "node_size"),
getElement(hyper_params, "max_rules"),
getElement(hyper_params, "max_depth"))
M_initial <- length(rules)
# Filtering ------------------------------------------------------------------
# Discard irrelevant variable-value pair from a rule condition ---------------
rules <- filter_irrelevant_rules(rules, X, ite,
getElement(hyper_params, "t_decay"))
M_filter1 <- length(rules)
# Generate rules matrix ------------------------------------------------------
rules_matrix <- generate_rules_matrix(X, rules)
# Discard rules with too few or too many observations rules ------------------
rules_matrix <- filter_extreme_rules(rules_matrix, rules,
getElement(hyper_params, "t_ext"))
rules <- colnames(rules_matrix)
M_filter2 <- length(rules)
# Discard correlated rules ---------------------------------------------------
rules_matrix <- filter_correlated_rules(rules_matrix, rules,
getElement(hyper_params, "t_corr"))
rules <- colnames(rules_matrix)
M_filter3 <- length(rules)
# Select Rules ---------------------------------------------------------------
rules <- select_rules(rules_matrix,
rules,
ite,
getElement(hyper_params, "stability_selection"),
getElement(hyper_params, "cutoff"),
getElement(hyper_params, "pfer"),
getElement(hyper_params, "B"))
rules <- as.character(rules)
M_select1 <- length(rules)
M <- list("initial" = M_initial,
"filter_irrelevant" = M_filter1,
"filter_extreme" = M_filter2,
"filter_correlated" = M_filter3,
"select_LASSO" = M_select1)
return(list(rules = rules, M = M))
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/discover_rules.R
|
#' @title
#' Estimate the Conditional Average Treatment Effect
#'
#' @description
#' Estimates the Conditional Average Treatment Effect (CATE) by
#' linearly modeling the Individual Treatment Effect by a set of rules.
#'
#' @param rules_matrix A rules matrix.
#' @param rules_explicit A list of select rules in terms of covariate names.
#' @param ite The estimated ITEs.
#' @param B The number of bootstrap samples for uncertainty quantification in
#' estimation.
#' @param subsample The bootstrap ratio subsample for uncertainty quantification
#' in estimation.
#'
#' @return
#' A list with 2 elements:
#' `summary`: A data frame summarizing the CATE linear decomposition:
#' - `Rule`: rule name,
#' - `Estimate`: linear contribution to CATE,
#' - `CI_lower`: lower bound 95% confidence interval on the estimate,
#' - `CI_upper`: upper bound 95% confidence interval on the estimate,
#' - `P_Value`: p-value (from Z-test).
#' `model`: A linear model for CATE-ATE estimation.
#'
#' @import stats
#' @keywords internal
#'
#'
estimate_cate <- function(rules_matrix, rules_explicit, ite, B=1, subsample=1) {
logger::log_debug("Estimating CATE ...")
"%>%" <- magrittr::"%>%"
if (B == 1){
# Estimate ATE (if No Rules Selected)
ate_model <- stats::lm(ite ~ 1)
ate_coeff <- summary(ate_model)$coefficients
ate_ci <- stats::confint(ate_model)
ate_summary <- data.frame(Rule = "(ATE)",
Estimate = ate_coeff[1],
CI_lower = ate_ci[1],
CI_upper = ate_ci[2],
P_Value = ate_coeff[2])
if (length(rules_explicit) == 0) {
result <- ate_summary
} else {
# Estimate AATEs
rules_df_inf <- as.data.frame(rules_matrix)
names(rules_df_inf) <- rules_explicit
aate_model <- stats::lm(ite - mean(ite) ~ . -1, data = rules_df_inf)
filter_na <- is.na(aate_model$coefficients)
if (sum(filter_na)) {
rules_matrix <- rules_matrix[, !filter_na]
rules_explicit <- rules_explicit[!filter_na]
return(estimate_cate(rules_matrix, rules_explicit, ite, B=1,
subsample=1))
}
aate_coeff <- summary(aate_model)$coef[, c(1, 4), drop = FALSE] %>%
as.data.frame()
aate_ci <- stats::confint(aate_model) %>% as.data.frame()
aate_summary <- data.frame(Rule = rules_explicit,
Estimate = aate_coeff[, 1],
CI_lower = aate_ci[, 1],
CI_upper = aate_ci[, 2],
P_Value = aate_coeff[, 2])
result <- rbind(ate_summary, aate_summary)
rownames(result) <- 1:nrow(result)
}
}
else {
models <- NULL
for (i in 1:B) {
index <- sample(length(ite),
size = round(length(ite)*subsample),
replace = FALSE)
ite_ <- ite[index]
if (length(rules_explicit) > 0) {
rules_matrix_ <- rules_matrix[index, ]
} else {
rules_matrix_ <- NULL
}
model <- estimate_cate(rules_matrix_, rules_explicit, ite_, B=1,
subsample=1)
models <- rbind(models, model)
}
result <- aggregate(Estimate ~ Rule,
data = models,
FUN = function(x) c(mean = mean(x), sd = sd(x)))
rules_explicit_ <- c("(ATE)", rules_explicit)
result <- result[order(match(result$Rule, rules_explicit_)), ]
Mean_Estimate <- result[,2][,1]
Std_Dev_Estimate <- result[,2][,2]
result$t <- Mean_Estimate / Std_Dev_Estimate
result$p_value <- 2 * (1 - pt(abs(result$t),
length(ite) - nrow(result)))
result$CI_lower <- Mean_Estimate - 1.96 * Std_Dev_Estimate
result$CI_upper <- Mean_Estimate + 1.96 * Std_Dev_Estimate
result <- data.frame(Rule = result$Rule,
Estimate = Mean_Estimate,
CI_lower = result$CI_lower,
CI_upper = result$CI_upper,
P_Value = result$p_value)
}
if (nrow(result)>1 & B>1) {
filter_pvalue <- result$P_Value[2:length(result$P_Value)] <= 0.05
if (sum(filter_pvalue) < length(filter_pvalue)) {
rules_matrix <- rules_matrix[, filter_pvalue, drop = FALSE]
rules_explicit <- rules_explicit[filter_pvalue]
return(estimate_cate(rules_matrix, rules_explicit, ite, B, subsample))
}
}
#logger::log_debug("Done with estimating CATE.")
rownames(result) <- 1:nrow(result)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_cate.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE)
#'
#' @description
#' Estimates the Individual Treatment Effect given a response vector,
#' a treatment vector, a covariate matrix, and a desired algorithm.
#'
#' @param y An observed response vector.
#' @param z An observed treatment vector.
#' @param X A covariate matrix.
#' @param ite_method A method for estimating the Individual Treatment Effect.
#' Some methods requires additional parameters. These parameters are mentioned
#' in the indented blocks for each method and their definitions are provided at
#' the end of this parameters list.
#' - \code{"slearner"}: S-Learner.
#' - `learner_y`
#' - \code{"tlearner"}: T-Learner.
#' - `learner_y`
#' - \code{"tpoisson"}: T-Poisson.
#' - `offset`
#' - \code{"xlearner"}: X-Learner.
#' - `learner_y`
#' - \code{"aipw"}: Augmented Inverse Probability Weighting.
#' - `learner_ps` and `learner_y`
#' - \code{"bart"}: Bayesian Additive Regression Trees.
#' - `learner_ps`
#' - \code{"cf"}: Causal Forest.
#' - `learner_ps`
#' @param ... Additional parameters passed to different models.
#' @details
#' ## Additional parameters
#' - **learner_ps**: An estimation method for the propensity score. This
#' includes libraries for the SuperLearner package.
#' - **learner_y**: An estimation model for the outcome. This includes
#' libraries for the SuperLearner package.
#' - **offset**: Name of the covariate to use as offset (i.e. \code{"x1"}) for
#' Poisson ITE Estimation. `NULL` if offset is not used.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite <- function(y, z, X, ite_method, ...) {
logger::log_debug("Estimating ITE...")
st_time <- proc.time()
# Address visible binding error.
offset <- learner_y <- learner_ps <- NULL
## collect additional arguments
dot_args <- list(...)
arg_names <- names(dot_args)
for (i in arg_names){
assign(i, unlist(dot_args[i], use.names = FALSE))
}
check_args <- function(required_args, arg_names) {
for (arg in required_args){
if (!is.element(arg, arg_names)) {
stop(paste("At least one argument is not provided. Missing argument: ",
arg, "."))
}
}
}
if (ite_method == "slearner") {
check_args(c("learner_y"), arg_names)
ite <- estimate_ite_slearner(y, z, X, learner_y)
} else if (ite_method == "tlearner") {
check_args(c("learner_y"), arg_names)
ite <- estimate_ite_tlearner(y, z, X, learner_y)
} else if (ite_method == "xlearner") {
check_args(c("learner_y"), arg_names)
ite <- estimate_ite_xlearner(y, z, X, learner_y)
}else if (ite_method == "aipw") {
check_args(c("learner_ps", "learner_y"), arg_names)
ite <- estimate_ite_aipw(y, z, X, learner_ps, learner_y)
} else if (ite_method == "bart") {
check_args(c("learner_ps"), arg_names)
ite <- estimate_ite_bart(y, z, X, learner_ps)
} else if (ite_method == "cf") {
check_args(c("learner_ps"), arg_names)
ite <- estimate_ite_cf(y, z, X, learner_ps)
} else if (ite_method == "tpoisson") {
check_args(c("offset"), arg_names)
ite <- estimate_ite_tpoisson(y, z, X, offset)
} else {
stop(paste("Invalid ITE method. Please choose from the following:\n",
"'slearner', 'tlearner', 'xlearner', 'aipw', 'bart', ",
"'cf' or 'tpoisson'"))
}
en_time <- proc.time()
logger::log_debug("Done with estimating ITE. ",
"(WC: {g_wc_str(st_time, en_time)}", ".)")
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using Augmented Inverse
#' Probability Weighting (AIPW)
#'
#' @description
#' Estimates the Individual Treatment Effect using Augmented Inverse Probability
#' Weighting given a response vector, a treatment vector, a features matrix,
#' an estimation model for the propensity score and estimation model for the
#' outcome.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_ps A estimation model for the propensity score.
#' @param learner_y A estimation model for the outcome.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_aipw <- function(y, z, X, learner_ps = "SL.xgboost",
learner_y = "SL.xgboost") {
logger::log_trace("learner_ps: '{learner_ps}' and learner_y: '{learner_y}'",
" were provided.")
ps_hat <- estimate_ps(z, X, learner_ps)
y_model <- SuperLearner::SuperLearner(Y = y,
X = data.frame(X = X, Z = z),
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_model$coef) == 0) y_model$coef[1] <- 1
y_0_hat <- predict(y_model,
data.frame(X = X, Z = rep(0, nrow(X))),
onlySL = TRUE)$pred
y_1_hat <- predict(y_model,
data.frame(X = X, Z = rep(1, nrow(X))),
onlySL = TRUE)$pred
apo_1 <- y_1_hat + z * (y - y_1_hat) / (ps_hat)
apo_0 <- y_0_hat + (1 - z) * (y - y_0_hat) / (1 - ps_hat)
ite <- as.vector(apo_1 - apo_0)
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_aipw.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using Bayesian Additive
#' Regression Trees (BART)
#'
#' @description
#' Estimates the Individual Treatment Effect using Bayesian Additive Regression
#' Trees given a response vector, a treatment vector, and a features matrix.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_ps Method for the estimation of the propensity score.
#'
#' @return A list of ITE estimates.
#'
#' @note The number of samples and the number of burn are set by default equal
#' to 500.
#'
#' @keywords internal
#'
estimate_ite_bart <- function(y, z, X, learner_ps) {
logger::log_trace("learner_ps: '{learner_ps}' was selected.")
if (!is.null(learner_ps)) {
est_ps <- estimate_ps(z, X, learner_ps)
X <- cbind(X, est_ps)
}
n_sample <- 500
n_burn <- 500
logger::log_trace("In bartCause::bartc command n.samples: {n_sample} ",
"and n.burn: {n_burn} were used.")
bart_fit <- bartCause::bartc(as.matrix(y), as.matrix(z), as.matrix(X),
n.samples = n_sample, n.burn = n_burn)
pd_ite <- bartCause::extract(bart_fit, type = "ite")
ite <- apply(pd_ite, 2, mean)
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_bart.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using Causal Forest (CF)
#'
#' @description
#' Estimates the Individual Treatment Effect using Causal Forest given a
#' response vector, a treatment vector, and a features matrix.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_ps A method for the estimation of the propensity score.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_cf <- function(y, z, X, learner_ps) {
logger::log_trace("learner_ps: '{learner_ps}' was selected.")
if (!requireNamespace("grf", quietly = TRUE)) {
stop(
"Package \"grf\" must be installed to use this function.",
call. = FALSE
)
}
if (!is.null(learner_ps)) {
est_ps <- estimate_ps(z, X, learner_ps)
X <- cbind(X, est_ps)
}
tau_forest <- grf::causal_forest(X, y, z)
ite <- stats::predict(tau_forest)$predictions
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_cf.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using S-Learner
#'
#' @description
#' Estimates the Individual Treatment Effect using S-Learner given a response
#' vector, a treatment vector, a features matrix and estimation model for the
#' outcome.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_y An estimation model for the outcome.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_slearner <- function(y, z, X, learner_y = "SL.xgboost") {
logger::log_trace("learner_y: '{learner_y}' was selected.")
y_model <- SuperLearner::SuperLearner(Y = y,
X = data.frame(X = X, Z = z),
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_model$coef) == 0) y_model$coef[1] <- 1
y_0_hat <- predict(y_model,
data.frame(X = X, Z = rep(0, nrow(X))),
onlySL = TRUE)$pred
y_1_hat <- predict(y_model,
data.frame(X = X, Z = rep(1, nrow(X))),
onlySL = TRUE)$pred
ite <- as.vector(y_1_hat - y_0_hat)
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_slearner.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using T-Learner
#'
#' @description
#' Estimates the Individual Treatment Effect using T-Learner given a response
#' vector, a treatment vector, a features matrix and estimation model for the
#' outcome.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_y A estimation model for the outcome.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_tlearner <- function(y, z, X, learner_y = "SL.xgboost") {
logger::log_trace("learner_y: '{learner_y}' was selected.")
X <- as.data.frame(X)
y_0_model <- SuperLearner::SuperLearner(Y = y[z == 0],
X = X[z == 0, ],
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_0_model$coef) == 0) y_0_model$coef[1] <- 1
y_0_hat <- predict(y_0_model, X, onlySL = TRUE)$pred
y_1_model <- SuperLearner::SuperLearner(Y = y[z == 1],
X = X[z == 1, ],
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_1_model$coef) == 0) y_1_model$coef[1] <- 1
y_1_hat <- predict(y_1_model, X, onlySL = TRUE)$pred
ite <- as.vector(y_1_hat - y_0_hat)
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_tlearner.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using T-Poisson regression
#'
#' @description
#' Estimates the Individual Treatment Effect using Poisson regression given a
#' response vector, a treatment vector, and a features matrix.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param offset A name of the covariate to use as offset (i.e. \dQuote{x1}) to
#' model the corresponding outcome rate. `NULL` to model directly the outcome
#' counts without offset.
#'
#' @return
#' A vector of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_tpoisson <- function(y, z, X, offset) {
X_names <- names(X)
if (!is.null(offset)) {
if (!(offset %in% X_names)) {
stop("Offset varible is not observed. Please replace `offset` with an
observed varibale.")
} else {
X_names <- X_names[-which(X_names == offset)]
colnames(X)[colnames(X) == offset] <- "offset_var"
y_treated <- data.frame(y = y[z == 1])
X_treated <- as.data.frame(X[z == 1, ])
y_control <- data.frame(y = y[z == 0])
X_control <- as.data.frame(X[z == 0, ])
data_treated <- cbind(y_treated, X_treated)
data_control <- cbind(y_control, X_control)
formula <- as.formula(paste("y ~ ", paste(X_names, collapse = "+"),
"+ offset(log(offset_var))"))
temp1 <- stats::glm(formula,
data = data_treated,
family = stats::poisson(link = "log"))
temp0 <- stats::glm(formula,
data = data_control,
family = stats::poisson(link = "log"))
}
} else {
y_treated <- data.frame(y = y[z == 1])
X_treated <- as.data.frame(X[z == 1, ])
y_control <- data.frame(y = y[z == 0])
X_control <- as.data.frame(X[z == 0, ])
data_treated <- cbind(y_treated, X_treated)
data_control <- cbind(y_control, X_control)
temp1 <- stats::glm(y ~ .,
data = data_treated,
family = stats::poisson(link = "log"))
temp0 <- stats::glm(y ~ .,
data = data_control,
family = stats::poisson(link = "log"))
}
y1hat <- stats::predict(temp1, as.data.frame(X), type = "response")
y0hat <- stats::predict(temp0, as.data.frame(X), type = "response")
ite <- y1hat - y0hat
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_tpoisson.R
|
#' @title
#' Estimate the Individual Treatment Effect (ITE) using X-Learner
#'
#' @description
#' Estimates the Individual Treatment Effect using X-Learner given a response
#' vector, a treatment vector, a features matrix and an estimation model for
#' the outcome.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param learner_y A estimation model for the outcome.
#'
#' @return
#' A list of ITE estimates.
#'
#' @keywords internal
#'
estimate_ite_xlearner <- function(y, z, X, learner_y = "SL.xgboost") {
logger::log_trace("learner_y: '{learner_y}' was selected.")
X <- as.data.frame(X)
y_0_model <- SuperLearner::SuperLearner(Y = y[z == 0],
X = X[z == 0, ],
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_0_model$coef) == 0) y_0_model$coef[1] <- 1
y_1_model <- SuperLearner::SuperLearner(Y = y[z == 1],
X = X[z == 1, ],
family = gaussian(),
SL.library = learner_y,
cvControl = list(V = 0))
if (sum(y_1_model$coef) == 0) y_1_model$coef[1] <- 1
ite <- array(0, dim = length(y))
ite[z == 0] <- predict(y_1_model, X[z == 0, ], onlySL = TRUE)$pred - y[z == 0]
ite[z == 1] <- y[z == 1] - predict(y_0_model, X[z == 1, ], onlySL = TRUE)$pred
ite <- as.vector(ite)
return(ite)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ite_xlearner.R
|
#' @title
#' Estimate the propensity score
#'
#' @description
#' Estimates the Propensity Score given a treatment vector and
#' features data frame.
#'
#' @param z A treatment vector.
#' @param X A features data frame.
#' @param ps_method An estimation model for the propensity score
#' (default: `SL.xgboost`).
#'
#' @return
#' A vector of propensity score estimates.
#'
#' @import SuperLearner
#'
#' @keywords internal
estimate_ps <- function(z, X, ps_method = "SL.xgboost") {
sl_pscore <- SuperLearner::SuperLearner(Y = z, X = as.data.frame(X),
newX = as.data.frame(X),
family = binomial(),
SL.library = ps_method,
cvControl = list(V = 0))
if (sum(sl_pscore$coef) == 0) sl_pscore$coef[1] <- 1
est_ps <- as.numeric(sl_pscore$SL.predict)
return(est_ps)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/estimate_ps.R
|
#' @title
#' Discovery (performance) evaluation
#'
#' @description
#' Computes different metrics for discovery evaluation.
#'
#' @param ground_truth List of true (CDR/EM) values.
#' @param prediction List of predicted (CDR/EM) values.
#'
#' @keywords internal
#'
#' @return
#' Intersection over union, precision, recall.
#'
evaluate <- function(ground_truth, prediction) {
intersect <- intersect(prediction, ground_truth)
union <- union(prediction, ground_truth)
TP <- length(intersect)
FP <- length(setdiff(prediction, ground_truth))
FN <- length(setdiff(ground_truth, prediction))
recall <- TP / (TP + FN) # quantity
precision <- TP / (TP + FP) # quality
IoU <- length(intersect) / length(union)
evaluate <- list(recall = recall,
precision = precision,
IoU = IoU)
return(evaluate)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/evaluate.R
|
#' @title
#' Extract effect modifiers
#'
#' @description
#' Extracts the effect modifiers from a list of (causal) decision rules.
#'
#' @param rules_list A list of (causal) decision rules.
#' @param X_names A list of the covariate names.
#'
#' @keywords internal
#'
#' @return
#' A list of the effect modifiers.
#'
extract_effect_modifiers <- function(rules_list, X_names) {
effect_modifiers <- c()
for (X_name in X_names) {
if (any(grepl(X_name, rules_list))) {
effect_modifiers <- append(effect_modifiers, X_name)
}
}
return(effect_modifiers)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/extract_effect_modifiers.R
|
#' @title
#' Extract (causal) decision rules
#'
#' @description
#' Extracts causal rules from the random forest or the gradient
#' boosting algorithms.
#'
#' @param treelist A list of decision trees.
#' @param X Features matrix.
#' @param max_depth A number of top levels from each tree considered.
#' @param digits A Number of digits for rounding decision rules to extract
#' conditions.
#'
#' @keywords internal
#'
#' @return
#' A vector of (causal) decision rules.
#'
extract_rules <- function(treelist, X, max_depth, digits = 2) {
if (is.numeric(digits)) digits <- as.integer(abs(digits))
levelX <- list()
for (iX in 1:ncol(X)) levelX <- c(levelX, list(levels(X[, iX])))
ntree <- min(treelist$ntree)
allRulesList <- list()
for (iTree in 1:ntree) {
rule <- list()
count <- 0
rowIx <- 1
tree <- treelist$list[[iTree]]
if (nrow(tree) <= 1) next # skip if there is no split
ruleSet <- vector("list", length(which(tree[, "status"] == -1)))
for (max_length in 1:max_depth) {
res <- inTrees::treeVisit(tree,
rowIx = rowIx,
count,
ruleSet,
rule,
levelX,
length = 0,
max_length = max_length,
digits = digits)
allRulesList <- c(allRulesList, res$ruleSet)
}
}
allRulesList <- allRulesList[!unlist(lapply(allRulesList, is.null))]
rules <- inTrees::ruleList2Exec(X, allRulesList)
return(rules)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/extract_rules.R
|
#' @title
#' Filter correlated rules
#'
#' @description
#' Discards highly correlated rules (i.e., \eqn{Cov(rule_1,rule_2) > t_{corr}}).
#'
#' @param rules_matrix A rules matrix.
#' @param rules_list A list of rules (names).
#' @param t_corr A threshold to define correlated rules.
#'
#' @keywords internal
#'
#' @return
#' A rules matrix without the highly correlated rules (columns).
#'
filter_correlated_rules <- function(rules_matrix, rules_list, t_corr) {
logger::log_debug("Filtering correlated rules...")
# Identify correlated rules
nrules <- length(rules_list)
ind <- 1:nrules
C <- stats::cor(rules_matrix)
elim <- c()
for (i in 1:(nrules - 1)) {
elim <- c(elim,
which(round(abs(C[i, (i + 1):nrules]), digits = 4) >= t_corr) + i)
}
if (length(elim) > 0) ind <- ind[-elim]
# Remove correlated rules
rules_matrix <- rules_matrix[, ind, drop = FALSE]
rules_list <- rules_list[ind]
colnames(rules_matrix) <- rules_list
logger::log_debug("Done with filtering correlated rules.")
return(rules_matrix)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/filter_correlated_rules.R
|
#' @title
#' Filter extreme decision rules
#'
#' @description
#' Discards rules with too few or too many observations.
#'
#' @param rules_matrix A rules matrix.
#' @param rules_list A list of rules (names).
#' @param t_ext A threshold in [0, 0.5) range to truncate too generic or too
#' specific rules.
#'
#' @keywords internal
#'
#' @return
#' A rules matrix without the rare/common rules.
#'
filter_extreme_rules <- function(rules_matrix, rules_list, t_ext) {
logger::log_debug("Filtering extreme rules...")
# Identify rules with too few or too many observations
ind <- 1:dim(rules_matrix)[2]
sup <- apply(rules_matrix, 2, mean)
elim <- which((sup < t_ext) | (sup > (1 - t_ext)))
if (length(elim) > 0) ind <- ind[-elim]
# Remove rules with too few/too many observations
rules_matrix <- rules_matrix[, ind, drop = FALSE]
rules_list <- rules_list[ind]
colnames(rules_matrix) <- rules_list
logger::log_debug("Done with filtering extreme rules.")
return(rules_matrix)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/filter_extreme_rules.R
|
#' @title
#' Filter irrelevant decision rules using leave-one-out pruning
#'
#' @description
#' Filters the irrelevant decision rules. The irrelevant rules are interpreted
#' as an error increase after removing a variable-value pair from the decision
#' rules (see \dQuote{Interpreting tree ensembles with the inTrees package} by
#' Houtao Deng, 2019).
#'
#' @param rules A list of rules.
#' @param X A features matrix.
#' @param ite An estimated ITE.
#' @param t_decay The decay threshold for rules pruning.
#'
#' @keywords internal
#'
#' @return
#' A list of \dQuote{relevant} rules.
#'
filter_irrelevant_rules <- function(rules, X, ite, t_decay) {
logger::log_debug("Filtering irrelevant rules...")
ite_ <- ite - mean(ite)
rules_matrix <- matrix(rules)
colnames(rules_matrix) <- "condition"
metric <- inTrees::getRuleMetric(rules_matrix,
X,
ite_)
pruned <- inTrees::pruneRule(rules = metric,
X = X,
target = ite_,
maxDecay = t_decay)
rules <- unique(pruned[, 4])
logger::log_debug("Done with filtering irrelevant rules.")
return(unique(rules))
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/filter_irrelevant_rules.R
|
#' @title
#' Generate CRE synthetic data
#'
#' @description
#' Generates synthetic data sets to run simulation for causal inference
#' experiments composed by an outcome vector (`y`), a treatment vector (`z`),
#' a covariates matrix (`X`), and an unobserved individual treatment effects
#' vector (`ite`).
#' The arguments specify the data set characteristic, including the
#' number of individuals (`n`), the number of covariates (`p`), the correlation
#' within the covariates (`rho`), the number of decision rules
#' (`n_rules`) decomposing the Conditional Average Treatment Effect (CATE), the
#' treatment effect magnitude (`effect_size`), the confounding mechanism
#' (`confounding`), and whether the covariates and outcomes are binary or
#' continuous (`binary_covariates`, `binary_outcome`).
#'
#' @details
#' The covariates matrix is generated with the specified correlation among
#' individuals, and each covariate is sampled either from a
#' \code{Bernoulli(0.5)} if binary, or a \code{Gaussian(0,1)} if continuous.
#' The treatment vector is sampled from a
#' \code{Bernoulli}(\eqn{\frac{1}{1+ \exp(1-x_1+x_2-x_3)}}), enforcing the treatment
#' assignment probabilities to be a function of observed covariates.
#' The potential outcomes (\eqn{y(0)} and \eqn{y(1)}) are then sampled from a Bernoulli
#' if binary, or a Gaussian (with standard deviation equal to 1) if continuous.
#' Their mean is equal to a confounding term (null, linear or non-linear and
#' always null for binary outcome) plus 1-4 decision rules weighted by the
#' treatment effect magnitude. The two potential outcomes characterizes the CATE
#' (and then the unobserved individual treatment effects vector) as the sum of
#' different additive contributions for each decision rules considered
#' (plus an intercept).
#' The final expression of the CATE depends on the treatment effect magnitude
#' and the number of decision rules considered.
#'
#' The 4 decision rules are:
#' - Rule 1: \eqn{1\{x_1 > 0.5; x_2 \leq 0.5\}(\textbf{x})}
#' - Rule 2: \eqn{1\{x_5 > 0.5; x_6 \leq 0.5\}(\textbf{x})}
#' - Rule 3: \eqn{1\{x_4 \leq 0.5\}(\textbf{x})}
#' - Rule 4: \eqn{1\{x_5 \leq 0.5; x_7 > 0.5; x_8 \leq 0.5\}(\textbf{x})}
#' with corresponding additive average treatment effect (AATE) equal to:
#' - Rule 1: \eqn{-} `effect_size`,
#' - Rule 2: \eqn{+} `effect_size`,
#' - Rule 3: \eqn{- 0.5 \cdot} `effect_size`,
#' - Rule 4: \eqn{+ 2 \cdot} `effect_size`.
#'
#' In example, setting `effect_size`=4 and `n_rules`=2:
#' \deqn{\text{CATE}(\textbf{x}) = -4 \cdot 1\{x_1 > 0.5; x_2 \leq 0.5\}(\textbf{x}) +
#' 4 \cdot 1\{x_5 > 0.5; x_6 \leq 0.5\}(\textbf{x})}
#'
#' The final outcome vector `y` is finally computed by combining the potential
#' outcomes according to the treatment assignment.
#'
#' @param n An integer number that represents the number of observations.
#' Non-integer values will be converted into an integer number.
#' @param rho A positive double number that represents the correlation
#' within the covariates (default: 0, range: [0,1)).
#' @param n_rules The number of causal rules (default: 2, range: \{1,2,3,4\}).
#' @param effect_size The treatment effect size magnitude (default: 2,
#' range: \eqn{\geq}0).
#' @param p The number of covariates (default: 10).
#' @param binary_covariates Whether to use binary or continuous covariates
#' (default: `TRUE`).
#' @param binary_outcome Whether to use binary or continuous outcomes
#' (default: `TRUE`).
#' @param confounding Only for continuous outcome, add confounding variables:
#' - \code{"lin"} for linear confounding,
#' - \code{"nonlin"} for non-linear confounding,
#' - \code{"no"} for no confounding (default).
#'
#' @return
#' A list, representing the generated synthetic data set, containing:
#' \item{y}{an outcome vector,}
#' \item{z}{a treatment vector,}
#' \item{X}{a covariates matrix,}
#' \item{ite}{an individual treatment vector.}
#'
#' @note
#' Set the covariates domain (`binary_covariates`) and outcome domain
#' (`binary_outcome`) according to the experiment of interest.
#' Increase complexity in heterogeneity discovery:
#' - decreasing the sample size (`n`),
#' - adding correlation among covariates (`rho`),
#' - increasing the number of rules (`n_rules`),
#' - increasing the number of covariates (`p`),
#' - decreasing the absolute value of the causal effect (`effect_size`),
#' - adding linear or not-linear confounders (`confounding`).
#'
#' @examples
#' set.seed(123)
#' dataset <- generate_cre_dataset(n = 1000, rho = 0, n_rules = 2, p = 10,
#' effect_size = 2, binary_covariates = TRUE,
#' binary_outcome = TRUE, confounding = "no")
#'
#' @export
#'
generate_cre_dataset <- function(n = 1000, rho = 0, n_rules = 2, p = 10,
effect_size = 2, binary_covariates = TRUE,
binary_outcome = TRUE, confounding = "no") {
# Check for correct binary input
if (!(binary_outcome %in% c(TRUE, FALSE))) {
stop("Invalid 'binary' input. Please specify TRUE or FALSE.")
}
if (is.numeric(n) && !is.integer(n)) {
n <- as.integer(n)
}
# Generate Covariate Matrix
mu <- rep(0, p)
Sigma <- matrix(rho, nrow = p, ncol = p) + diag(p) * (1 - rho)
rawvars <- MASS::mvrnorm(n = n, mu = mu, Sigma = Sigma)
pvars <- stats::pnorm(rawvars)
if (binary_covariates) {
X <- stats::qbinom(pvars, 1, 0.5)
} else {
X <- pvars
}
colnames(X) <- paste("x", 1:p, sep = "")
X <- as.data.frame(X)
# Generate Treatment Vector
logit_prob <- -1 + X$x1 - X$x2 + X$x3
prob <- exp(logit_prob) / (1 + exp(logit_prob))
z <- stats::rbinom(n, 1, prob = prob)
# Generate Causal Rules and Potential Outcomes
if (binary_outcome == TRUE) {
y0 <- rep(0, n)
y1 <- rep(0, n)
effect_size <- 1
} else {
if (confounding == "lin") {
mean <- X$x1 + X$x3 + X$x4
} else if (confounding == "nonlin") {
mean <- X$x1 + cos(X$x3*X$x4)
} else if (confounding == "no") {
mean <- 0
} else {
stop("Invalid 'confounding' input. Please input:
'lin' for linear confounding,
'nonlin' for non-linear confounding),
'no' for no confounding")
}
y0 <- stats::rnorm(n, mean = mean, sd = 1)
y1 <- y0
}
if (n_rules >= 1) {
y0[X$x1 > 0.5 & X$x2 <= 0.5] <- y0[X$x1 > 0.5 & X$x2 <= 0.5] + effect_size
}
if (n_rules >= 2) {
y1[X$x5 > 0.5 & X$x6 <= 0.5] <- y1[X$x5 > 0.5 & X$x6 <= 0.5] + effect_size
}
if (n_rules >= 3) {
if (binary_outcome) {
stop(paste("Synthtic dataset with binary outcome and ", n_rules,
"rules has not been implemented yet. ",
"Available 'n_rules' options: {1,2}."))
}
y0[X$x4 <= 0.5] <- y0[X$x4 <= 0.5] + 0.5 * effect_size
}
if (n_rules >= 4) {
y1[X$x5 <= 0.5 & X$x7 > 0.5 & X$x8 <= 0.5] <-
y1[X$x5 <= 0.5 & X$x7 > 0.5 & X$x8 <= 0.5] + 2 * effect_size
}
if (n_rules >= 5) {
stop(paste("Synthtic dataset with continuos outcome and ", n_rules,
"rules has not been implemented yet. ",
"Available 'n_rules' options: {1,2,3,4}."))
}
# Generate Outcome
y <- y0 * (1 - z) + y1 * z
# Compute Individual Treatment Effect
ite <- y1 - y0
# Generate data set
dataset <- list(y = y, z = z, X = X, ite = ite)
return(dataset)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/generate_cre_dataset.R
|
#' @title
#' Generate rules
#'
#' @description
#' Generates a list of rules characterizing the heterogeneity in the Conditional
#' Average Treatment Effect (CATE) by tree-based methods (i.e., random forest).
#'
#' @param X A covariate matrix.
#' @param ite A vector of estimated ITE.
#' @param ntrees The number of decision trees for the random forest algorithm.
#' @param node_size Minimum size of the trees' terminal nodes.
#' @param max_rules Maximum number of candidate decision rules.
#' @param max_depth Maximum rules length.
#'
#' @return
#' A list of rules (names).
#'
#' @keywords internal
#'
generate_rules <- function(X, ite, ntrees, node_size, max_rules, max_depth) {
logger::log_debug("Generating (candidate) rules...")
st_time <- proc.time()
# TODO: replace splitting criteria enforcing heterogeneity
if (ntrees > 0) {
N <- dim(X)[1]
sampsize <- 0.5 * N
forest <- randomForest::randomForest(x = X,
y = ite,
sampsize = sampsize,
ntree = ntrees,
maxnodes = 2^max_depth,
nodesize = node_size,
mtry = ncol(X)*2/3)
treelist <- inTrees::RF2List(forest)
rules <- extract_rules(treelist, X, max_depth)
} else {
rules <- NULL
}
rule_counts <- table(unlist(rules))
M <- min(max_rules, length(rule_counts))
rules <- names(sort(rule_counts, decreasing = TRUE)[1:M])
en_time <- proc.time()
logger::log_debug("Done with generating (candidate) rules.. ",
"(WC: {g_wc_str(st_time, en_time)}", ".)")
return(rules)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/generate_rules.R
|
#' @title
#' Generate rules matrix
#'
#' @description
#' Generates the rules matrix from the feature covariate matrix and a vector of
#' rules. The number of rows in rules_matrix is equal to the number of samples
#' in `X`, and the number of columns is equal to the number of rules in
#' `rules_list`. Each element of rules_matrix corresponds to a specific data
#' sample and rule. If the data sample satisfies a rule, the corresponding
#' element in rules_matrix is set to 1. Otherwise, the element is set to 0.
#'
#' @param X Features matrix.
#' @param rules_list A vector of rules.
#'
#'
#' @return
#' A causal rules matrix.
#'
#' @keywords internal
#'
generate_rules_matrix <- function(X, rules_list) {
# Generate and Rules Matrix
samplesize <- dim(X)[1]
nrules <- length(rules_list)
rules_matrix <- matrix(0, nrow = samplesize, ncol = nrules)
for (i in 1:nrules){
rules_matrix[eval(parse(text = rules_list[i]), list(X = X)), i] <- 1
}
return(rules_matrix)
}
#' @title
#' Standardize Rules Matrix
#'
#' @description
#' Standardize (i.e. mean=0 and stdev=1) the rules matrix.
#'
#' @param rules_matrix The rules matrix.
#'
#' @return
#' Standardized rules matrix
#'
#' @keywords internal
standardize_rules_matrix <- function(rules_matrix) {
samplesize <- dim(rules_matrix)[1]
nrules <- dim(rules_matrix)[2]
mu_rules_matrix <- apply(rules_matrix, 2, mean)
sd_rules_matrix <- apply(rules_matrix, 2, stats::sd)
rules_matrix_std <- matrix(0, samplesize, nrules)
for (l in 1:ncol(rules_matrix_std)) {
rules_matrix_std[, l] <- ((rules_matrix[, l] - mu_rules_matrix[l]) /
sd_rules_matrix[l])
}
return(rules_matrix_std)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/generate_rules_matrix.R
|
#' @title
#' Honest splitting
#'
#' @description
#' Splits data into discovery and inference sub-samples.
#'
#' @param y An observed response vector.
#' @param z A treatment vector.
#' @param X A features matrix.
#' @param ratio_dis A double number indicating the ratio of data delegated to
#' the discovery sub-sample.
#' @param ite A vector of estimated ITE.
#'
#' @return
#' A list containing the discovery and inference sub-samples.
#'
#' @keywords internal
#'
honest_splitting <- function(y, z, X, ratio_dis, ite = NULL) {
logger::log_debug("(Honest) Splitting the dataset...")
n <- length(y)
index <- sample(1:n, round(n * ratio_dis), replace = FALSE)
X <- as.matrix(X)
y <- as.matrix(y)
z <- as.matrix(z)
if (!is.null(ite)) {
ite <- as.matrix(ite)
}
discovery <- list(y = y[index],
z = z[index],
X = X[index, ],
ite = ite[index, ])
inference <- list(y = y[-index],
z = z[-index],
X = X[-index, ],
ite = ite[-index, ])
logger::log_debug("Done with (Honest) Splitting the dataset.")
return(list(discovery = discovery, inference = inference))
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/honest_splitting.R
|
#' @title
#' Interpret rules
#'
#' @description
#' Replaces the column numbers in the rules vector with their real names.
#'
#' @param rules A vector of rules.
#' @param X_names A vector of real names of the covariates.
#'
#' @return
#' A list of explicit (human-readable) rules.
#'
#'
#'@keywords internal
#'
interpret_rules <- function(rules, X_names) {
replacements <- X_names
names(replacements) <- paste("X[,", 1:length(X_names), "]", sep = "")
n_rules <- length(rules)
rules_explicit <- vector(length = n_rules)
for (j in 1:n_rules) {
rules_explicit[j] <- stringr::str_replace_all(rules[j],
stringr::fixed(replacements))
}
return(rules_explicit)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/interpret_rules.R
|
#' @title
#' Set Logger settings
#'
#' @description
#' Updates logger settings, including log level and location of the file.
#'
#' @param logger_file_path A path (including file name) to log the messages.
#' (Default: CRE.log)
#' @param logger_level The log level. When a log level is set, all log levels
#' below it are also activated (if implemented). Available levels include:
#' - TRACE: Provides verbose detailed logging, including the steps taken to
#' achieve a result, often used for debugging. Activating TRACE will also
#' enable DEBUG, INFO, SUCCESS, WARN, ERROR, and FATAL logs.
#' - DEBUG: Provides detailed logging about the flow of the application, used
#' mostly by developers to understand potential issues. Activating DEBUG will
#' also enable INFO, SUCCESS, WARN, ERROR, and FATAL logs.
#' - INFO (Default): Standard messages that inform the user about the normal
#' operation of the system. Activating INFO will also enable SUCCESS, WARN,
#' ERROR, and FATAL logs.
#' - SUCCESS: Messages indicating successful completion of a particular
#' operation or task. Activating SUCCESS will also enable WARN, ERROR, and
#' FATAL logs.
#' - WARN: Warning messages about events that might cause problems in the
#' future, but are not yet errors. Activating WARN will also enable ERROR
#' and FATAL logs.
#' - ERROR: Reports an error due to which the system may not be able to
#' achieve its functionality, but the application won't halt. Activating
#' ERROR will also enable FATAL logs.
#' - FATAL: Reports very severe error events that will presumably lead the
#' application to abort.
#'
#' @export
#'
#' @return
#' No return value. This function is called for side effects.
#'
#' @note
#' Log levels are specified by developers during the initial implementation.
#' Future developers or contributors can leverage these log levels to better
#' capture and document the application's processes and events.
#'
#' @examples
#'
#' set_logger("Debug")
#'
set_logger <- function(logger_file_path = "CRE.log",
logger_level = "INFO") {
available_levels <- c("TRACE", "DEBUG", "INFO", "SUCCESS", "WARN",
"ERROR", "FATAL")
if (!is.element(logger_level, available_levels)) {
stop(paste("logger_level: ", logger_level, " is not valid."))
}
logger::log_appender(appender = logger::appender_file(logger_file_path),
index = 1)
set_options("logger_file_path", logger_file_path)
set_options("logger_level", logger_level)
if (!is.null(logger_level)) {
if (is.element(logger_level, available_levels)) {
logger::log_threshold(logger_level)
} else {
stop(paste("Logger level is not valid. Available levels: ",
paste(available_levels, collapse = " ")))
}
} else {
logger::log_threshold(logger::INFO, index = 1)
}
}
#' @title
#' Get Logger settings
#'
#' @description
#' Returns current logger settings.
#'
#' @seealso
#' \code{\link{set_logger}} for information on setting the log level and file path.
#'
#' @return
#' Returns a list that includes **logger_file_path** and **logger_level**.
#'
#' @export
#'
#' @examples
#'
#' set_logger("mylogger.log", "INFO")
#' log_meta <- get_logger()
#'
get_logger <- function() {
return(list(logger_file_path = get_options("logger_file_path"),
logger_level = get_options("logger_level")))
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/logger_utils.R
|
#' @title
#' A helper function for cre object
#'
#' @description
#' A helper function to plot cre object using ggplot2 package.
#'
#' @param object A `cre` object.
#' @param ... Additional arguments passed to customize the plot.
#'
#' @return
#' Returns a ggplot object.
#'
#' @keywords internal
#' @importFrom ggplot2 autoplot
#'
autoplot.cre <- function(object, ...) {
`%>%` <- magrittr::`%>%`
Rule <- Estimate <- CI_lower <- CI_upper <- NULL
gg_labs <- gg_title <- NULL
# collect additional arguments
dot_args <- list(...)
arg_names <- names(dot_args)
for (i in arg_names) {
assign(i, unlist(dot_args[i], use.names = FALSE))
}
cate <- object[["CATE"]]
ate <- cate[1, ]
aate <- cate[2:nrow(cate), ]
aate <- aate[order(aate$Estimate, decreasing = TRUE), ]
rownames(aate) <- 1:nrow(aate)
g <- ggplot2::ggplot(data = aate) +
ggplot2::geom_hline(yintercept = 0, color = "dark grey", lty = 2) +
ggplot2::geom_linerange(ggplot2::aes(x = reorder(Rule, Estimate),
ymin = CI_lower,
ymax = CI_upper),
lwd = 1,
position = ggplot2::position_dodge(width = 1 / 2)) +
ggplot2::geom_pointrange(ggplot2::aes(x = reorder(Rule, Estimate),
y = Estimate,
ymin = CI_lower,
ymax = CI_upper),
lwd = 1 / 2,
position = ggplot2::position_dodge(width = 1 / 2),
shape = 21, fill = "WHITE") +
ggplot2::xlab("") +
ggplot2::ylab("AATE") +
ggplot2::coord_flip() +
ggplot2::theme_bw() +
ggplot2::ggtitle(paste("Causal Rule Ensemble: ",
"\nConditional Average Treatment Effect",
"\nLinear Decomposition",
"\n\nATE = ", round(ate[["Estimate"]], 3),
" [", round(ate[["CI_lower"]], 3), ",",
round(ate[["CI_upper"]], 3), "]", sep = "")) +
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
return(g)
}
#' @title
#' Extend generic plot functions for cre class
#'
#' @description
#' A wrapper function to extend generic plot functions for cre class.
#'
#' @param x A `cre` object.
#' @param ... Additional arguments passed to customize the plot.
#'
#' @return
#' Returns a ggplot2 object, invisibly. This function is called for side
#' effects.
#'
#' @export
#'
plot.cre <- function(x, ...) {
if (x[["M"]]["select_significant"] == 0) {
message("Visualization not available (0 causal decision rules discovered).")
} else {
g <- ggplot2::autoplot(x, ...)
print(g)
invisible(g)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/plot.R
|
#' @title
#' Predict individual treatment effect via causal rule ensemble
#'
#' @description
#' Predicts individual treatment effect via causal rule ensemble algorithm.
#'
#' @param object A `cre` object from running the CRE function.
#' @param X A covariate matrix (or data.frame)
#' @param ... Additional arguments passed to customize the prediction.
#'
#' @return
#' An array with the estimated Individual Treatment Effects
#'
#' @export
#'
predict.cre <- function(object, X, ...) {
if (is.null(object$rules)){
ite_pred <- rep(object$CATE$Estimate[1], times = nrow(X))
} else {
rules_matrix <- generate_rules_matrix(X, object$rules)
rownames(object$CATE) <- object$CATE$Rule
ite_pred <- rules_matrix %*% as.matrix(object$CATE[2:nrow(object$CATE),]["Estimate"])
+ object$CATE$Estimate[1]
}
return(ite_pred)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/predict.R
|
#' @title
#' Extend print function for the CRE object
#'
#' @description
#' Prints a brief summary of the CRE object
#'
#' @param x A cre object from running the CRE function.
#' @param verbose Set level of results description details: 0 for only results
#' summary, 1 for results and parameters summary, 2 for results and parameters
#' and rules summary (default 2).
#' @param ... Additional arguments passed to customize the results description.
#'
#'
#' @return
#' No return value. This function is called for side effects.
#'
#' @export
#'
print.cre <- function(x, verbose = 2, ...) {
summary(x, verbose, ...)
}
#' @title
#' Print summary of CRE object
#'
#' @description
#' Prints a brief summary of the CRE object
#'
#' @param object A cre object from running the CRE function.
#' @param verbose Set level of results description details: only results summary
#' 0, results+parameters summary 1, results+parameters+rules summary
#' (default 2).
#' @param ... Additional arguments passed to customize the results description.
#'
#' @return
#' A summary of the CRE object
#'
#' @export
summary.cre <- function(object, verbose = 2, ...) {
object <- unclass(object)
summary_options <- c(...)
M <- object[["M"]]
CATE <- object[["CATE"]]
hyper_params <- object[["hyper_params"]]
method_params <- object[["method_params"]]
params <- c(hyper_params, method_params)
cat("CAUSAL RULE ENSAMBLE - Summary")
if (verbose > 0) {
cat("\n\nModel parameters")
cat("\n- Pseudo-Outcome estimation")
if (getElement(params, "ite_method") == "tpoisson") {
cat("\n. - Estimator : tlearner")
cat("\n - Outcome : poisson")
cat("\n - Offset :", getElement(params, "offset"))
} else {
cat("\n - Estimator :", getElement(params, "ite_method"))
cat("\n - Outcome :", getElement(params, "learner_y"))
cat("\n - Propensity Score:", getElement(params, "learner_ps"))
}
cat("\n- Rules Generation")
if (!is.null(getElement(params, "intervention_vars"))) {
cat("\n - Intervention Variables:", getElement(params,
"intervention_vars"))
} else {
cat("\n - Intervention Variables: All")
}
cat("\n - Number of Trees :", getElement(params, "ntrees"))
cat("\n - Node Size :", getElement(params, "node_size"))
cat("\n - Max Rules :", getElement(params, "max_rules"))
cat("\n - Max Depth :", getElement(params, "max_depth"))
cat("\n- Filtering")
cat("\n - Threshold Decay (Irrelevant):", getElement(params,
"t_decay"))
cat("\n - Threshold (Extreme) :", getElement(params,
"t_ext"))
cat("\n - Threshold (Correlated) :", getElement(params,
"t_corr"))
cat("\n - Threshold (p-Value) :", getElement(params,
"t_pvalue"))
stability_selection <- getElement(params, "stability_selection")
if (stability_selection == "error_control") {
cat("\n- Stability Selection with Error Control")
cat("\n - Cutoff:", getElement(params, "cutoff"))
cat("\n - PFER :", getElement(params, "pfer"))
} else if (stability_selection == "vanilla") {
cat("\n- Vanilla Stability Selection")
cat("\n - Cutoff:", getElement(params, "cutoff"))
} else if (stability_selection == "no") {
cat("\n- No Stability Selection (only LASSO)")
}
}
if (verbose > 1) {
cat("\n\nRules")
cat("\n - Intial :", getElement(M, "initial"))
cat("\n - Filter (irrelevant) :", getElement(M, "filter_irrelevant"))
cat("\n - Filter (extreme) :", getElement(M, "filter_extreme"))
cat("\n - Filter (correlated) :", getElement(M, "filter_correlated"))
cat("\n - Select (LASSO) :", getElement(M, "select_LASSO"))
cat("\n - Select (significant):", getElement(M, "select_significant"))
}
cat("\n\nResults\n")
cat("- CATE Linear Decomposition:\n")
print(CATE)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/print.R
|
#' @title
#' Select rules
#'
#' @description
#' Given a set of rules, selects the minimal set linearly decomposing the
#' Conditional Average Treatment Effect (CATE) by LASSO (optionally with
#' Stability Selection).
#'
#' @param rules_matrix The rules matrix.
#' @param rules A list of rules.
#' @param ite The estimated ITE.
#' @param stability_selection Stability selection method.
#' @param cutoff Threshold (percentage) defining the minimum cutoff value for
#' the stability scores. Only for stability selection.
#' @param pfer Upper bound for the per-family error rate (tolerated amount of
#' falsely selected rules). Only for stability selection.
#' @param B Number of bootstrap samples.
#'
#' @return
#' A minimal set of rules linearly decomposing the CATE.
#'
#' @keywords internal
#'
select_rules <- function(rules_matrix, rules, ite,
stability_selection, cutoff, pfer, B) {
logger::log_debug("Selecting rules...")
"%>%" <- magrittr::"%>%"
rules_weight <- c()
for (rule in rules) {
rule_length <- lengths(regmatches(rule, gregexpr("&", rule))) + 1
rules_weight <- append(rules_weight, rule_length)
}
R <- t(t(rules_matrix) / rules_weight)
M <- ncol(R)
if (length(rules) > 1) {
if (stability_selection=="vanilla") {
# Vanilla Stability Selection
stability_scores <- rep(0, M)
ite_mean <- mean(ite)
for (i in 1:B) {
subsample <- 0.5
indices <- sample(1:nrow(R),
size = round(nrow(R) * subsample),
replace = FALSE)
lasso <- glmnet::cv.glmnet(x = as.matrix(R[indices, ]),
y = ite[indices] - ite_mean,
alpha = 1,
nfolds = 5,
gamma = c(0.01, 0.05, 0.1, 0.5, 1, 5, 10),
intercept = FALSE)
non_zero_indices <- which(as.matrix(coef(lasso)) != 0) - 1
stability_scores[non_zero_indices] <- stability_scores[non_zero_indices] + 1
}
stability_scores <- stability_scores / B
rules <- colnames(R)[stability_scores >= cutoff]
} else if (stability_selection=="error_control") {
# Stability Selection with Error Control
stab_mod <- tryCatch(
{
stabs::stabsel(x = as.data.frame(R),
y = ite - mean(ite),
intercept = FALSE,
fitfun = "glmnet.lasso",
cutoff = cutoff,
PFER = pfer)
},
error = function(e) {
stop(paste(
"Combination of `cutoff` and `pfer` not allowed. ",
"Try to decrease the `cutoff` or increase the `pfer`. ",
"See Stability Selection documentation for further details.",
"\n\nOriginal Error message:", e))
}
)
rules <- rules[stab_mod$selected]
} else if (stability_selection=="no") {
# LASSO
cv_lasso <- glmnet::cv.glmnet(x = rules_matrix,
y = ite - mean(ite),
alpha = 1,
intercept = FALSE)
aa <- stats::coef(cv_lasso, s = cv_lasso$lambda.1se)
index_aa <- which(aa[-1, 1] != 0)
rule_LASSO <- data.frame(rules = rules[index_aa],
val = aa[index_aa + 1, 1])
rule_LASSO <- rule_LASSO[order(-rule_LASSO[, 2]), ]
rule_LASSO <- rule_LASSO[!is.na(rule_LASSO$rules), ]
rules <- rule_LASSO$rules
}
} else {
rules <- NULL
}
logger::log_debug("Done with selecting rules.")
return(rules)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/select_rules.R
|
# Keeping logger options
my_options <- new.env(parent = emptyenv())
get_options <- function(k, v) {
my_options[[k]]
}
set_options <- function(k, v) {
my_options[[k]] <- v
}
list_options <- function() {
names(my_options)
}
g_wc_str <- function(start, end) {
wc_rd <- (end - start)[[3]]
wc_string <- paste(sprintf("%.3f", wc_rd), "seconds")
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/utils.R
|
.onLoad <- function(libname, pkgname) {
flogger <- logger::layout_glue_generator(format =
paste("{time} {node} {pid} ",
"{namespace} {fn} ",
"{level}: {msg}",
sep = ""))
logger::log_layout(flogger, index = 1)
}
|
/scratch/gouwar.j/cran-all/cranData/CRE/R/zzz.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# install.packages("CRE")
## ---- eval=FALSE--------------------------------------------------------------
# library(devtools)
# install_github("NSAPH-Software/CRE", ref = "develop")
## ---- eval=FALSE--------------------------------------------------------------
# library("CRE")
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/CRE.R
|
---
title: "CRE"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CRE}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# Installation
Installing from CRAN.
```{r, eval=FALSE}
install.packages("CRE")
```
Installing the latest developing version.
```{r, eval=FALSE}
library(devtools)
install_github("NSAPH-Software/CRE", ref = "develop")
```
Import.
```{r, eval=FALSE}
library("CRE")
```
# Arguments
__Data (required)__
**`y`** The observed response/outcome vector (binary or continuous).
**`z`** The treatment/exposure/policy vector (binary).
**`X`** The covariate matrix (binary or continuous).
__Parameters (not required)__
**`method_parameters`** The list of parameters to define the models used, including:
- **`ratio_dis`** The ratio of data delegated to the discovery sub-sample (default: 0.5).
- **`ite_method`** The method to estimate the individual treatment effect (default: "aipw") [1].
- **`learner_ps`** The ([SuperLearner](https://CRAN.R-project.org/package=SuperLearner)) model for the propensity score estimation (default: "SL.xgboost", used only for "aipw","bart","cf" ITE estimators).
- **`learner_y`** The ([SuperLearner](https://CRAN.R-project.org/package=SuperLearner)) model for the outcome estimation (default: "SL.xgboost", used only for "aipw","slearner","tlearner" and "xlearner" ITE estimators).
**`hyper_params`** The list of hyper parameters to fine tune the method, including:
- **`intervention_vars`** Intervention-able variables used for Rules Generation (default: `NULL`).
- **`ntrees`** The number of decision trees for random forest (default: 20).
- **`node_size`** Minimum size of the trees' terminal nodes (default: 20).
- **`max_rules`** Maximum number of candidate decision rules (default: 50).
- **`max_depth`** Maximum rules length (default: 3).
- **`t_decay`** The decay threshold for rules pruning (default: 0.025).
- **`t_ext`** The threshold to define too generic or too specific (extreme) rules (default: 0.01).
- **`t_corr`** The threshold to define correlated rules (default: 1).
- **`stability_selection`** Method for stability selection for selecting the rules. `vanilla` for stability selection, `error_control` for stability selection with error control and `no` for no stability selection (default: `vanilla`).
- **`B`** Number of bootstrap samples for stability selection in rules selection and uncertainty quantification in estimation (default: 20).
- **`subsample`** Bootstrap ratio subsample for stability selection in rules selection and uncertainty quantification in estimation (default: 0.5).
- **`offset`** Name of the covariate to use as offset (i.e. "x1") for T-Poisson ITE Estimation. `NULL` if not used (default: `NULL`).
- **`cutoff`** Threshold defining the minimum cutoff value for the stability scores in Stability Selection (default: 0.9).
- **`pfer`** Upper bound for the per-family error rate (tolerated amount of falsely selected rules) in Error Control Stability Selection (default: 1).
__Additional Estimates (not required)__
**`ite`** The estimated ITE vector. If given, both the ITE estimation steps in Discovery and Inference are skipped (default: `NULL`).
## Notes
### Options for the ITE estimation
**[1]** Options for the ITE estimation are as follows:
- [S-Learner](https://CRAN.R-project.org/package=SuperLearner) (`slearner`).
- [T-Learner](https://CRAN.R-project.org/package=SuperLearner) (`tlearner`)
- T-Poisson(`tpoisson`)
- [X-Learner](https://CRAN.R-project.org/package=SuperLearner) (`xlearner`)
- [Augmented Inverse Probability Weighting](https://CRAN.R-project.org/package=SuperLearner) (`aipw`)
- [Causal Forests](https://CRAN.R-project.org/package=grf) (`cf`)
- [Causal Bayesian Additive Regression Trees](https://CRAN.R-project.org/package=bartCause) (`bart`)
If other estimates of the ITE are provided in `ite` additional argument, both the ITE estimations in discovery and inference are skipped and those values estimates are used instead. The ITE estimator requires also an outcome learner and/or a propensity score learner from the [SuperLearner](https://CRAN.R-project.org/package=SuperLearner) package (i.e., "SL.lm", "SL.svm"). Both these models are simple classifiers/regressors. By default XGBoost algorithm is used for both these steps.
### Customized wrapper for SuperLearner
One can create a customized wrapper for SuperLearner internal packages. The following is an example of providing the number of cores (e.g., 12) for the xgboost package in a shared memory system.
```R
m_xgboost <- function(nthread = 12, ...) {
SuperLearner::SL.xgboost(nthread = nthread, ...)
}
```
Then use "m_xgboost", instead of "SL.xgboost".
# Examples
Example 1 (*default parameters*)
```R
set.seed(9687)
dataset <- generate_cre_dataset(n = 1000,
rho = 0,
n_rules = 2,
p = 10,
effect_size = 2,
binary_covariates = TRUE,
binary_outcome = FALSE,
confounding = "no")
y <- dataset[["y"]]
z <- dataset[["z"]]
X <- dataset[["X"]]
cre_results <- cre(y, z, X)
summary(cre_results)
plot(cre_results)
ite_pred <- predict(cre_results, X)
```
Example 2 (*personalized ite estimation*)
```R
set.seed(9687)
dataset <- generate_cre_dataset(n = 1000,
rho = 0,
n_rules = 2,
p = 10,
effect_size = 2,
binary_covariates = TRUE,
binary_outcome = FALSE,
confounding = "no")
y <- dataset[["y"]]
z <- dataset[["z"]]
X <- dataset[["X"]]
ite_pred <- ... # personalized ite estimation
cre_results <- cre(y, z, X, ite = ite_pred)
summary(cre_results)
plot(cre_results)
ite_pred <- predict(cre_results, X)
```
Example 3 (*setting parameters*)
```R
set.seed(9687)
dataset <- generate_cre_dataset(n = 1000,
rho = 0,
n_rules = 2,
p = 10,
effect_size = 2,
binary_covariates = TRUE,
binary_outcome = FALSE,
confounding = "no")
y <- dataset[["y"]]
z <- dataset[["z"]]
X <- dataset[["X"]]
method_params <- list(ratio_dis = 0.5,
ite_method ="aipw",
learner_ps = "SL.xgboost",
learner_y = "SL.xgboost")
hyper_params <- list(intervention_vars = c("x1","x2","x3","x4"),
offset = NULL,
ntrees = 20,
node_size = 20,
max_rules = 50,
max_depth = 3,
t_decay = 0.025,
t_ext = 0.025,
t_corr = 1,
stability_selection = "vanilla",
cutoff = 0.8,
pfer = 1,
B = 10,
subsample = 0.5)
cre_results <- cre(y, z, X, method_params, hyper_params)
summary(cre_results)
plot(cre_results)
ite_pred <- predict(cre_results, X)
```
More synthetic data sets can be generated using `generate_cre_dataset()`.
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/CRE.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/Contribution.R
|
---
title: "Contribution"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Contribution}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
CRE is an open source package and contributions are welcome from open source community in the form of pull request.Please read the following documents before making changes to the codebase.
## Environment Setup
Please follow these steps to get a copy of _CRE_ on your Github account.
- Navigate to CRE Github [repository](https://github.com/NSAPH-Software/CRE), and at the top right corner, click on the `Fork` button. This will add a clone of the project to your Github account.
- Open your terminal (or Gitbash for Windows, Anaconda prompt, ...) and run the following command (brackets are not included):
```S
git clone [email protected]:[your user name]/CRE.git
```
- If you do not already have an SSH key, you need to generate one. Read more [here](https://docs.github.com/en/github-ae@latest/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent).
- Now, you can modify the codebase and track your modification.
- It is a good idea to create a new branch to work on the codebase. Read the following instructions for git branching.
## Git Branching Model
Although, in your personal repository, you can pick any branch name, however, in order to keep consistency and also understand who is working on what, the following convention is strongly recommended. In this project, we follow the convention that is proposed by Vincent Driessen in his [A successful Git branching model](https://nvie.com/posts/a-successful-git-branching-model/) post.
## Where to submit pull requests?
All pull requests should be submitted to `base repository: fasrc/CRE` and `base: develop` branch.
## Pull request checklist
- Please run `devtools::document()`, `devtools::load_all()` after your final modifications.
- Make sure that your modified code passes all checks and tests (you can run `devtools::check()` in RStudio)
- Your PR should pass all the CI and reviews so we can merge it.
- Add a line(s) about the modification to the NEWS.md file.
- If you are adding new features, please make sure that appropriate documentation is added or updated.
- Please clean up white spaces. Read more [here](https://softwareengineering.stackexchange.com/questions/121555/why-is-trailing-whitespace-a-big-deal).
## Reporting bugs
Please report potential bugs by creating a [new issue](https://github.com/NSAPH-Software/CRE/issues) or sending us an email. Please include the following information in your bug report:
- A brief description of what you are doing, what you expected to happen, and what happened.
- OS that you are using and whether you are using a personal computer or HPC cluster.
- The version of the package that you have installed.
## Style Guide
In this project, we follow the [tidyverse style guide](https://style.tidyverse.org).
### Summary
#### Names
- File names all snake_case and ends with .R (e.g., create_matching.R)
- variable names small letter and separate with _ if need (e.g., delta_n)
- Function names should follow snake_case style (e.g., generate_data)
- Function names follow verb+output convention (e.g., compute_resid)
#### Spaces and Indentation
- Indentations are two spaces (do not use tab)
- Place space around binary operators (e.g., x + y)
```R
#Acceptable:
z <- x + y
#Not recommended:
z<-x+y # (no space)
z<- x+y
z<-x +y
```
- Place space after comma
```R
#Acceptable:
a <- matrix(c(1:100), nrow = 5)
#Not recommended:
a <- matrix(c(1:100),nrow = 5) # (no space after comma)
a <- matrix( c(1:100), nrow = 5 ) # (extra space after and before parentheses)
a<-matrix(c(1:100), nrow = 5) # (no space around unary operator <- )
```
- Place space after # and before commenting and avoid multiple ###
```R
#Acceptable:
# This is a comment
#Not recommended:
#This is a comment
# This is a comment (more than one space after #)
## This is a comment (multiple #)
### This is a comment (multiple # and more than one space)
```
- Do not put space at the opening and closing the parenthesis
```R
#Acceptable:
x <- (z + y)
#Not recommended:
x <- ( z + y ) # (unnecessary space)
x <- (z + y )
x <- ( z + y)
```
- Place a space before and after `()` when used with `if`, `for`, or `while`.
```R
#Acceptible
if (x > 2) {
print(x)
}
# Not recommended
if(x > 2){
print(x)
}
```
#### Other notes
- Maximum line length is 80 character
- Use explicit returns
- Use explicit tags in documentation (e.g., @title, @description, ...)
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/Contribution.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- warning=FALSE, eval=FALSE-----------------------------------------------
# library(CRE)
#
# # Generate sample data
# set.seed(1358)
# dataset <- generate_cre_dataset(n = 1000,
# rho = 0,
# n_rules = 2,
# p = 10,
# effect_size = 2,
# binary_covariates = TRUE,
# binary_outcome = FALSE,
# confounding = "no")
# y <- dataset[["y"]]
# z <- dataset[["z"]]
# X <- dataset[["X"]]
#
# method_params <- list(ratio_dis = 0.5,
# ite_method = "aipw",
# learner_ps = "SL.xgboost",
# learner_y = "SL.xgboost",
# offset = NULL)
#
# hyper_params <- list(intervention_vars = NULL,
# ntrees = 20,
# node_size = 20,
# max_rules = 50,
# max_depth = 3,
# t_decay = 0.025,
# t_ext = 0.01,
# t_corr = 1,
# t_pvalue = 0.05,
# stability_selection = "vanilla",
# cutoff = 0.6,
# pfer = 1,
# B = 10,
# subsample = 0.5)
#
# # linreg CATE estimation with aipw ITE estimation
# cre_results <- cre(y, z, X, method_params, hyper_params)
# summary(cre_results)
# plot(cre_results)
# ite_pred <- predict(cre_results, X)
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/Testing-the-Package.R
|
---
title: "Testing the CRE Package"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Testing the CRE package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
We encourage all developers to test the package in different conditions.
Testing the package is the easiest way to get familiar with the package and its
functionalities.
# Getting the code
To test the package, please install the package on your
system (R (>= 3.5.0)). You can install the package by following one of these
approaches:
- Directly from GitHub
- CRAN (not recommended)
- Source
- Forked Repository (recommended)
## Installing the package directly from Github
In this project, we follow [A successful Git Branching Model](https://nvie.com/posts/a-successful-git-branching-model/).
As a result, the `develop` branch is the most updated branch for developers. Use
`devtools::install_github` to install the package. If you do not specify the `ref`,
it will install the master (or main) branch.
```R
library(devtools)
try(detach("package:CRE", unload = TRUE), silent = TRUE) # if already you have the package, detach and unload it, to have a new install.
install_github("NSAPH-Software/CRE", ref="develop")
library(CRE)
```
Try `?CRE`. It should open the package description page under the help tab
(assuming you are using RStudio).
## Installing the package from CRAN
Installing the package from CRAN for developing purposes is not recommended.
Because most probably, the version on CRAN is not the latest version.
[Complete this section after submitting the package to CRAN]
## Installing the package from the source
In order to install the package from the source, you need to download the source
code into your computer and install it from the source. Here are the steps:
- Go to package [Github repository](https://github.com/NSAPH-Software/CRE) and from the
drop-down menu change the branch to `develop`. Then click on the `Code` tab and
then click on the `Download Zip` tab.
- Open one of the files using RStudio, then change the project directory to the
project directory (`Session > Set Working Directory > To Project Directory`).
- Load `devtools` library and then load CRE.
```R
library(devtools)
load_all()
?CRE
```
## Forking the package
Forking the package under your Github account is the best option if you are
planning on installing, testing, modifying, and contributing to the
project. Go to package [Github repository](https://github.com/NSAPH-Software/CRE) and
click on the `Fork` button at the top right corner. After forking the package,
Open your terminal (or Gitbash for Windows, Anaconda prompt, ...) and run the
following command (brackets are not included):
```S
git clone [email protected]:[your user name]/CRE.git
```
Now, you can modify the codebase and track your modification. Navigate to the
package folder and Install the package following the
**Installing the package from source** steps. It is a good idea
to create a new branch to work on the codebase. Read
[A successful Git Branching Model](https://nvie.com/posts/a-successful-git-branching-model/) for
branching convention.
# Testing the Package
Run the following command to test the package.
```{r, warning=FALSE, eval=FALSE}
library(CRE)
# Generate sample data
set.seed(1358)
dataset <- generate_cre_dataset(n = 1000,
rho = 0,
n_rules = 2,
p = 10,
effect_size = 2,
binary_covariates = TRUE,
binary_outcome = FALSE,
confounding = "no")
y <- dataset[["y"]]
z <- dataset[["z"]]
X <- dataset[["X"]]
method_params <- list(ratio_dis = 0.5,
ite_method = "aipw",
learner_ps = "SL.xgboost",
learner_y = "SL.xgboost",
offset = NULL)
hyper_params <- list(intervention_vars = NULL,
ntrees = 20,
node_size = 20,
max_rules = 50,
max_depth = 3,
t_decay = 0.025,
t_ext = 0.01,
t_corr = 1,
t_pvalue = 0.05,
stability_selection = "vanilla",
cutoff = 0.6,
pfer = 1,
B = 10,
subsample = 0.5)
# linreg CATE estimation with aipw ITE estimation
cre_results <- cre(y, z, X, method_params, hyper_params)
summary(cre_results)
plot(cre_results)
ite_pred <- predict(cre_results, X)
```
|
/scratch/gouwar.j/cran-all/cranData/CRE/inst/doc/Testing-the-Package.Rmd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.