content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
body <- bs4DashBody(
# include CSS
includeCSS(path = "www/css/treatments-app.css"),
# include the script for Hotjar tracking
#tags$head(includeScript("www/hotjar.js")),
# include the script needed to find the web browser
# JS interactions
useShinyjs(),
includeScript(path = "www/js/fullscreen.js"),
includeScript(path = "www/js/close.js"),
includeScript(path = "www/js/find-navigator.js"),
# print feedback for input
useShinyFeedback(),
setShadow(class = "card"),
setZoom(class = "card", scale = 1.01),
setPulse(class = "timeline-item"),
setPulse(class = "diagnosis-badge"),
setShake(class = "diagnosis-badge"),
setShadow(class = "modal-content"),
setZoom(class = "modal-content"),
#setShake("post"),
chooseSliderSkin(skin = "Flat", color = "#007cfe"),
bs4TabItems(
# Network panel
bs4TabItem(
tabName = "main",
uiOutput("patient_ui")
),
# About section Panel
bs4TabItem(
tabName = "about",
div(
id = "about_us",
HTML(
paste(
"<img style=\"height: 100%; width: 100%; object-fit: contain\"
border=\"0\" align=\"center\" src=\"logos/about_us.jpg\"/>"
)
)#,
#HTML(paste(tags$img(src = "about_us.jpg")))
)
)
)
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/body.R
|
#-------------------------------------------------------------------------
# This code contains the function that calculates the percentage of
# change of each flux, which is then needed to change the color of arrows
# depending on the resulting variation. (see global.R for color change)
#
# David Granjon, the Interface Group, Zurich
# July 10th, 2017
#-------------------------------------------------------------------------
calc_change <- function(out, t_target) {
# change for Ca and PO4 fluxes
# numbers represent the base-case value
# t target is the time at which to compute calc_change
Abs_int_change <- 0.5*((out[t_target,"Abs_int_Ca"] - 9.829864e-04)/9.829864e-04*100 +
(out[t_target,"Abs_int_PO4"] - 8.233724e-04)/8.233724e-04*100)
U_Ca_change <- (out[t_target,"U_Ca"] - 3.907788e-05)/3.907788e-05*100
U_PO4_change <- (out[t_target,"U_PO4"] - 3.969683e-04)/3.969683e-04*100
Res_change <- 0.5*((out[t_target,"Res_Ca"] - 3.921871e-04)/3.921871e-04*100 +
(out[t_target,"Res_PO4"] - 1.176561e-04)/1.176561e-04*100)
Ac_Ca_change <- (out[t_target,"Ac_Ca"] - 1.009965e-03)/1.009965e-03*100
Ac_PO4_change <- (out[t_target,"Ac_PO4"] - 2.178550e-04)/2.178550e-04*100
Reabs_Ca_change <- (out[t_target,"Reabs_Ca"] - 2.592522e-03)/2.592522e-03*100
Reabs_PO4_change <- (out[t_target,"Reabs_PO4"] - 4.606232e-03)/4.606232e-03*100
Net_Ca_pf_change <- ((out[t_target,"Ca_pf"] - out[t_target,"Ca_fp"]) -
(5.306840e-03 - 4.296942e-03))/(5.306840e-03 - 4.296942e-03)*100
Net_PO4_pf_change <- (round((out[t_target,"PO4_pf"] - out[t_target,"PO4_fp"]) -
(1.995840e-01 - 1.993571e-01),4))/(1.995840e-01 - 1.993571e-01)*100
# need to round since the order or magnitude of the difference is 1e-7
Net_PO4_pc_change <- (round((out[t_target,"PO4_pc"] - out[t_target,"PO4_cp"]) -
(2.772000e-03 - 2.771900e-03),6))/(2.772000e-03 - 2.771900e-03)*100
# change for PTH fluxes
PTHg_synth_change <- (out[t_target,"PTHg_synth"] - 54.02698)/54.02698*100
PTHg_synth_D3_change <- (out[t_target,"PTHg_synth_D3"] - 0.68025)/0.68025*100
PTHg_synth_PO4_change <- (out[t_target,"PTHg_synth_PO4"] - 0.18945)/0.18945*100
PTHg_exo_CaSR_change <- (out[t_target,"PTHg_exo_CaSR"] - 0.00693)/0.00693*100
PTHg_deg_change <- (out[t_target,"PTHg_deg"] - 45.086650)/45.086650*100
PTHg_exo_change <- (out[t_target,"PTHg_exo"] - 8.936505)/8.936505*100
PTHp_deg_change <- (out[t_target,"PTHp_deg"] - 8.931000)/8.931000*100
# Changes for PTH contribution in the proximal tubule
Reabs_PT_change <- (out[t_target, "Reabs_PT_PTH"] - 0.0098)/0.0098*100
# changes for PTH and CaSR contribution in TAL
Reabs_TAL_CaSR_change <- (out[t_target, "Reabs_TAL_CaSR"] - 0.0104)/0.0104*100
Reabs_TAL_PTH_change <- (out[t_target, "Reabs_TAL_PTH"] - 0.00465)/0.00465*100
# changes for PTH and D3 contributions in DCT
Reabs_DCT_PTH_change <- (out[t_target, "Reabs_DCT_PTH"] - 0.00417)/0.00417*100
Reabs_DCT_D3_change <- (out[t_target, "Reabs_DCT_D3"] - 0.00108)/0.00108*100
# change for intest Ca reabs due to D3
Abs_int_D3_change <- (out[t_target, "Abs_int_D3"] - 0.000433)/0.000433*100
# change for Ca resorption due to PTH and D3
Res_PTH_change <- (out[t_target, "Res_PTH"] - 0.0000669)/0.0000669*100
Res_D3_change <- (out[t_target, "Res_D3"] - 0.000225)/0.000225*100
# Change for PO4 reabsorption due to PTH and FGF23
Reabs_PT_PO4_PTH_change <- (out[t_target, "Reabs_PT_PO4_PTH"] - 0.09952)/0.09952*100
Reabs_PT_PO4_FGF_change <- (out[t_target, "Reabs_PT_PO4_FGF"] - 0.14124)/0.14124*100
df <- data.frame(
Abs_int_change = Abs_int_change,
U_Ca_change = U_Ca_change,
U_PO4_change = U_PO4_change,
Res_change = Res_change,
Ac_Ca_change = Ac_Ca_change, # 5
Ac_PO4_change = Ac_PO4_change,
Reabs_Ca_change = Reabs_Ca_change,
Reabs_PO4_change = Reabs_PO4_change,
Net_Ca_pf_change = Net_Ca_pf_change,
Net_PO4_pf_change = Net_PO4_pf_change, # 10
Net_PO4_pc_change = Net_PO4_pc_change,
PTHg_synth_change = PTHg_synth_change,
PTHg_synth_D3_change = PTHg_synth_D3_change,
PTHg_synth_PO4_change = PTHg_synth_PO4_change,
PTHg_exo_CaSR_change = PTHg_exo_CaSR_change, # 15
PTHg_deg_change = PTHg_deg_change,
PTHg_exo_change = PTHg_exo_change,
PTHp_deg_change = PTHp_deg_change,
Reabs_PT_change = Reabs_PT_change,
Reabs_TAL_CaSR_change = Reabs_TAL_CaSR_change, # 20
Reabs_TAL_PTH_change = Reabs_TAL_PTH_change,
Reabs_DCT_PTH_change = Reabs_DCT_PTH_change,
Reabs_DCT_D3_change = Reabs_DCT_D3_change,
Abs_int_D3_change = Abs_int_D3_change,
Res_PTH_change = Res_PTH_change, # 25
Res_D3_change = Res_D3_change,
Reabs_PT_PO4_PTH_change = Reabs_PT_PO4_PTH_change,
Reabs_PT_PO4_FGF_change = Reabs_PT_PO4_FGF_change, # 28
stringsAsFactors = FALSE
)
}
# Uncomment if need to set new base case values
# c(out()[1,"Abs_int_Ca"],
# out()[1,"Abs_int_PO4"],
# out()[1,"U_Ca"],
# out()[1,"U_PO4"],
# out()[1,"Res_Ca"],
# out()[1,"Res_PO4"],
# out()[1,"Ac_Ca"],
# out()[1,"Ac_PO4"],
# out()[1,"Reabs_Ca"],
# out()[1,"Reabs_PO4"],
# out()[1,"Ca_pf"],
# out()[1,"PO4_pf"],
# out()[1,"Ca_fp"],
# out()[1,"PO4_fp"],
# out()[1,"PO4_pc"],
# out()[1,"PO4_cp"],
# out()[1,"PTHg_synth"],
# out()[1,"PTHg_deg"],
# out()[1,"PTHg_exo"],
# out()[1,"PTHp_deg"])
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/calc_change.R
|
#-------------------------------------------------------------------------
#
# This is the model core containing all equations and fluxes,
# it is translated from a previous Matlab code
#
# David Granjon, the Interface Group, Zurich
# June 12th, 2017
#-------------------------------------------------------------------------
calcium_phosphate_core <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
##################
# #
# Lag Setting #
# #
##################
#tau <- 240
#PTH_p_lag <- ifelse((t - tau) <= 0, 0.0683/Vp, lagvalue(t - tau))
##################
# #
# Simulations #
##################
k_inject_P <- 0
k_inject_Ca <- 0
k_inject_D3 <- 0
k_inject_FGF <- 0
k_inject_PTH <- 0
if (t_stop != 0) {
if (t > t_start && t < t_stop) {
if (Ca_inject != 0) {
k_inject_Ca <- Ca_inject
} else if (Ca_food != 0) {
I_Ca <- Ca_food
} else if (D3_inject != 0) {
k_inject_D3 <- D3_inject * Vp
} else if (P_inject != 0) {
k_inject_P <- P_inject
} else if (P_food != 0) {
I_P <- P_food
}
}
}
##################
# #
# Equations #
# #
##################
# Conversion of PTH in concentration
PTH_p <- PTH_p/Vp # be carefull when Vp changes
# PTHg #
PTHg_basal_synthesis_norm <- k_prod_PTHg * Vc / PTH_g_norm
PTHg_synthesis_D3_norm <- 1 / (1 + gamma_prod_D3 * D3_norm * D3_p)
PTHg_synthesis_PO4_norm <- PO4_p^n_prod_Pho /
((K_prod_PTH_P / Pho_p_norm)^n_prod_Pho + PO4_p^n_prod_Pho)
PTHg_synthesis_norm <- PTX_coeff * PTHg_basal_synthesis_norm *
PTHg_synthesis_D3_norm * PTHg_synthesis_PO4_norm
PTHg_degradation_norm <- k_deg_PTHg * PTH_g
n_Ca_norm <- n1_exo / (1 + exp(-rho_exo * Ca_p_norm * (R / Ca_p_norm - Ca_p))) + n2_exo
F_Ca_norm <- beta_exo_PTHg - gamma_exo_PTHg * Ca_p^n_Ca_norm /
(Ca_p^n_Ca_norm + (K_Ca / Ca_p_norm)^n_Ca_norm)
PTHg_exocytosis_norm <- F_Ca_norm * PTH_g
# PTHp #
PTHp_influx_norm <- PTHg_exocytosis_norm * PTH_g_norm / PTH_p_norm
PTHp_degradation_norm <- k_deg_PTHp * PTH_p
# D3 #
D3_basal_synthesis_norm <- k_conv_min * D3_inact / D3_norm
# choose PTH_p or PTH_p_lag[1]
D3_conv_PTH_norm <- (delta_conv_max * (D3_inact / D3_norm) * PTH_p^n_conv) /
(PTH_p^n_conv + (K_conv / PTH_p_norm)^n_conv)
D3_conv_Ca_norm <- 1 / (1 + gamma_ca_conv * Ca_p_norm * Ca_p)
D3_conv_D3_norm <- 1 / (1 + gamma_D3_conv * D3_norm * D3_p)
D3_conv_P_norm <- 1 / (1 + gamma_P_conv * Pho_p_norm * PO4_p)
D3_conv_FGF_norm <- 1/(1 + gamma_FGF_conv * FGF_p_norm * FGF_p)
D3_synthesis_norm <- D3_basal_synthesis_norm + D3_conv_PTH_norm * D3_conv_Ca_norm *
D3_conv_D3_norm*D3_conv_P_norm*D3_conv_FGF_norm
D3_degradation_norm <- (k_deg_D3 * (1 + gamma_deg_FGF * FGF_p_norm * FGF_p) * D3_p) /
(1 + gamma_deg_PTH * PTH_p_norm * PTH_p)
# FGF23 #
FGF_basal_synthesis_norm <- k_prod_FGF / FGF_p_norm
FGF_D3_activ_norm <- delta_max_prod_D3 * D3_p^n_prod_FGF /
(D3_p^n_prod_FGF + (K_prod_D3 / D3_norm)^n_prod_FGF)
FGF_P_activ_norm <- PO4_p / (PO4_p + K_prod_P / Pho_p_norm)
FGF_synthesis_norm <- FGF_basal_synthesis_norm *
(1 + FGF_D3_activ_norm * FGF_P_activ_norm)
FGF_degradation_norm <- k_deg_FGF * FGF_p
# Ca Abs_intest #
Abs_intest_basal_norm <- 0.25 * I_Ca
Abs_intest_D3_norm <- (0.45 * I_Ca * D3_p^n_abs) /
(D3_p^n_abs + (K_abs_D3 / D3_norm)^n_abs)
Abs_intest_norm <- (Abs_intest_basal_norm + Abs_intest_D3_norm) / Ca_p_norm
# P Abs_intest #
Abs_intest_basal_P_norm <- 0.4 * I_P
Abs_intest_D3_P_norm <- (0.3 * I_P * D3_p^n_abs) /
(D3_p^n_abs + (K_abs_D3 / D3_norm)^n_abs)
Abs_intest_P_norm <- (Abs_intest_basal_P_norm + Abs_intest_D3_P_norm) /
Pho_p_norm
# Ca Fast bone #
Rapid_storage_Ca <- k_p_Ca * Ca_p * Vp
Rapid_release_Ca <- k_f_Ca * Ca_f
Accretion_norm <- Lambda_ac_Ca * Ca_f
# P Fast Bone #
Rapid_storage_P <- k_p_P * PO4_p * Vp
Rapid_release_P <- k_f_P * PO4_f
Accretion_P_norm <- Lambda_ac_P * PO4_f
# Ca Slow bone #
Resorption_basal <- Lambda_res_min
Resorption_PTH_norm <- (delta_res_max * 0.2 * PTH_p^n_res) /
(PTH_p^n_res + (K_res_PTH / PTH_p_norm)^n_res)
Resorption_D3_norm <- (delta_res_max * 0.8 * D3_p^n_res) /
(D3_p^n_res + (K_res_D3 / D3_norm)^n_res)
Resorption_norm <- Resorption_basal + Resorption_PTH_norm + Resorption_D3_norm
# P Slow Bone #
Resorption_P_norm <- 0.3 * Resorption_norm
# Ca Kidney #
Reabs_PT_basal <- lambda_reabs_PT_0
Reabs_PT_PTH_norm <- delta_PT_max / (1 + (PTH_p*PTH_p_norm / PTH_ref)^n_PT)
Reabs_PT <- Reabs_PT_basal + Reabs_PT_PTH_norm
Reabs_TAL_basal <- lambda_TAL_0
Reabs_TAL_CaSR_norm <- delta_CaSR_max / (1 + (Ca_p * Ca_p_norm / Ca_ref)^n_TAL)
Reabs_TAL_PTH_norm <- delta_PTH_max * PTH_p / (PTH_p + K_TAL_PTH / PTH_p_norm)
Reabs_DCT_basal <- lambda_DCT_0
Reabs_DCT_PTH_norm <- (delta_DCT_max * 0.8 * PTH_p) /
(PTH_p + K_DCT_PTH / PTH_p_norm)
Reabs_DCT_D3_norm <- (delta_DCT_max * 0.2 * D3_p) /
(D3_p + K_DCT_D3 / D3_norm)
Excretion_norm <- (1 - (Reabs_PT + Reabs_TAL_basal + Reabs_TAL_CaSR_norm +
Reabs_TAL_PTH_norm + Reabs_DCT_basal +
Reabs_DCT_PTH_norm + Reabs_DCT_D3_norm)) *
GFR*(Ca_p + CaHPO4_p + CaH2PO4_p)
Reabs_norm <- (Reabs_PT + Reabs_TAL_basal + Reabs_TAL_CaSR_norm +
Reabs_TAL_PTH_norm + Reabs_DCT_basal +
Reabs_DCT_PTH_norm + Reabs_DCT_D3_norm) *
GFR * (Ca_p + CaHPO4_p + CaH2PO4_p)
# P Kidney #
Reabs_PT_basal_P <- lambda_PT_0
Reabs_PT_PTH_P_norm <- (delta_PTH_max_P * (K_PT_PTH / PTH_p_norm)^n_reabs_P) /
(PTH_p^n_reabs_P + (K_PT_PTH / PTH_p_norm)^n_reabs_P)
Reabs_PT_FGF_P_norm <- (delta_FGF_max * (K_PT_FGF / FGF_p_norm)^n_reabs_P) /
(FGF_p^n_reabs_P + (K_PT_FGF / FGF_p_norm)^n_reabs_P)
Reabs_PT_P_norm <- (delta_P_max * (K_PT_P / Pho_p_norm)^n_reabs_P) /
(PO4_p^n_reabs_P + (K_PT_P / Pho_p_norm)^n_reabs_P)
Reabs_DCT_basal_P <- lambda_DCT_P
Excretion_P_norm <- (1 - (Reabs_PT_basal_P + Reabs_PT_PTH_P_norm +
Reabs_PT_FGF_P_norm + Reabs_PT_P_norm +
Reabs_DCT_basal_P)) *
GFR * (PO4_p + CaHPO4_p + CaH2PO4_p + NaPO4_p)
Reabs_P_norm <- (Reabs_PT_basal_P + Reabs_PT_PTH_P_norm +
Reabs_PT_FGF_P_norm + Reabs_PT_P_norm +
Reabs_DCT_basal_P) *
GFR * (PO4_p + CaHPO4_p + CaH2PO4_p + NaPO4_p)
# Intracellular P #
Plasma_intra_Flux_norm <- k_pc * PO4_p * Vp
Intra_plasma_Flux_norm <- k_cp * PO4_c
# Ca/P from HPO42- and H2PO4+ in plasma #
k_form_CaHPO4_norm <- k_f_CaHPO4 * Ca_p * a * PO4_p * f2^2
k_diss_CaHPO4_norm <- k_d_CaHPO4 * CaHPO4_p
k_form_CaH2PO4_norm <- k_f_CaH2PO4 * Ca_p * b * PO4_p * f2 * f1
k_diss_CaH2PO4_norm <- k_d_CaH2PO4 * CaH2PO4_p * f1
# Ca/P from HPO42- and H2PO4+ in the fast bone pool #
k_form_CaHPO4f_norm <- k_f_CaHPO4 * Ca_f * a * PO4_f * f2^2
k_diss_CaHPO4f_norm <- k_d_CaHPO4 * CaHPO4_f
k_form_CaH2PO4f_norm <- k_f_CaH2PO4 * Ca_f * b * PO4_f * f2 * f1
k_diss_CaH2PO4f_norm <- k_d_CaH2PO4 * CaH2PO4_f * f1
# Fetuin-A complexation with CaHPO4 and CaH2PO4 in plasma #
k_fet_CaHPO4_norm <- k_fet * CaHPO4_p
k_fet_CaH2PO4_norm <- k_fet * CaH2PO4_p*f1
# CPP degradation
CPP_degradation <- k_c_CPP * CPP_p
# CaProt formation
k_form_CaProt <- k_f_CaProt * Ca_p * (N_Prot * Prot_tot_p - CaProt_p)
k_diss_CaProt <- k_d_CaProt * CaProt_p
# Na and phosphate reaction in plasma
k_form_NaPO4 <- (a * k_f_NaHPO4 + b * k_f_NaH2PO4) * Na * PO4_p
k_diss_NaPO4 <- (c * k_d_NaHPO4 + d * k_d_NaH2PO4) * NaPO4_p
# EGTA reaction
EGTA_form <- k_on_egta * Ca_p * EGTA_p
EGTA_diss <- k_off_egta * CaEGTA_p
##################
# #
# Rate of #
# Change #
##################
# PTHg
dPTH_g <- PTHg_synthesis_norm - PTHg_degradation_norm - PTHg_exocytosis_norm
# PTHp
dPTH_p <- k_inject_PTH + PTHp_influx_norm - PTHp_degradation_norm
# D3
dD3_p <- k_inject_D3 + D3_synthesis_norm - D3_degradation_norm
# FGF23
dFGF_p <- k_inject_FGF + FGF_synthesis_norm - FGF_degradation_norm
# Plasma Ca
dCa_p <- 1 / Vp * (k_inject_Ca + Abs_intest_norm + Resorption_norm/Ca_p_norm -
Rapid_storage_Ca + Rapid_release_Ca - Excretion_norm) -
k_form_CaProt + k_diss_CaProt - k_form_CaHPO4_norm*HPO4_norm +
k_diss_CaHPO4_norm * CaHPO4_norm / Ca_p_norm -
k_form_CaH2PO4_norm / Ca_p_norm +
k_diss_CaH2PO4_norm * CaH2PO4_norm / Ca_p_norm -
EGTA_form + EGTA_diss / Ca_p_norm
# Rapid Bone Ca
dCa_f <- Rapid_storage_Ca - Rapid_release_Ca - Accretion_norm -
k_form_CaHPO4f_norm * Ca_p_norm * HPO4_norm / CaHPO4_norm +
k_diss_CaHPO4f_norm -
k_form_CaH2PO4f_norm * Ca_p_norm*H2PO4_norm / CaH2PO4_norm +
k_diss_CaH2PO4f_norm
# Slow Bone Ca
dCa_b <- 1 / Ca_b_norm * (Accretion_norm - Resorption_norm)
# plasma PO4
dPO4_p <- 1 / Vp * (k_inject_P + Abs_intest_P_norm + Resorption_P_norm / Pho_p_norm -
Rapid_storage_P + Rapid_release_P - Excretion_P_norm -
Plasma_intra_Flux_norm + Intra_plasma_Flux_norm / Pho_p_norm) -
k_form_CaHPO4_norm * Ca_p_norm +
k_diss_CaHPO4_norm * CaHPO4_norm / Pho_p_norm -
k_form_CaH2PO4_norm * Ca_p_norm +
k_diss_CaH2PO4_norm * CaH2PO4_norm / Pho_p_norm -
k_form_NaPO4 + k_diss_NaPO4
# Rapid Bone PO4
dPO4_f <- Rapid_storage_P - Rapid_release_P - Accretion_P_norm -
k_form_CaHPO4f_norm * Ca_p_norm*HPO4_norm / CaHPO4_norm +
k_diss_CaHPO4f_norm - k_form_CaH2PO4f_norm * Ca_p_norm *
H2PO4_norm / CaH2PO4_norm + k_diss_CaH2PO4f_norm
# Slow bone PO4
dPO4_b <- 1 / Pho_b_norm * (Accretion_P_norm - Resorption_P_norm)
# Intracellular PO4
dPO4_c <- Plasma_intra_Flux_norm / Pho_c_norm - Intra_plasma_Flux_norm
# CaHPO4p
dCaHPO4_p <- k_form_CaHPO4_norm * Ca_p_norm*HPO4_norm / CaHPO4_norm -
k_diss_CaHPO4_norm - k_fet_CaHPO4_norm
# Ca(H2PO4)2p
dCaH2PO4_p <- k_form_CaH2PO4_norm * Ca_p_norm * H2PO4_norm / CaH2PO4_norm -
k_diss_CaH2PO4_norm - k_fet_CaH2PO4_norm
# CPP particles
dCPP_p <- k_fet_CaHPO4_norm + k_fet_CaH2PO4_norm - CPP_degradation
# CaHPO4 fast pool
dCaHPO4_f <- k_form_CaHPO4f_norm * Ca_p_norm * HPO4_norm / CaHPO4_norm -
k_diss_CaHPO4f_norm
# CaH2PO4 fast pool
dCaH2PO4_f <- k_form_CaH2PO4f_norm * Ca_p_norm * H2PO4_norm / CaH2PO4_norm -
k_diss_CaH2PO4f_norm
# CaProt plasma
dCaProt_p <- k_form_CaProt - k_diss_CaProt
# NaPho plasma
dNaPO4_p <- k_form_NaPO4 - k_diss_NaPO4
# calcium total
dCa_tot <- dCa_p + 1 * dCaHPO4_p + 1 * dCaH2PO4_p + dCaProt_p + 1 * dCPP_p
# phosphate total
dPO4_tot <- dPO4_p + 1 * dCaHPO4_p + 1 * dCaH2PO4_p + dNaPO4_p + 1 * dCPP_p
# equation for continuous EGTA injection
dEGTA_p <- 1 / Vp * k_inject_egta / EGTA_norm - EGTA_form + EGTA_diss / EGTA_norm
# kinetic of CaEGTA_p complex
dCaEGTA_p <- EGTA_form / Ca_EGTA_norm - EGTA_diss
##################
# #
# Results #
##################
# return the list of variables as well as fluxes in
# another vector
list(
list(
dPTH_g, dPTH_p, dD3_p, dFGF_p,
dCa_p, dCa_f, dCa_b, dPO4_p, dPO4_f,
dPO4_b, dPO4_c, dCaHPO4_p, dCaH2PO4_p,
dCPP_p, dCaHPO4_f, dCaH2PO4_f,
dCaProt_p, dNaPO4_p, dCa_tot,
dPO4_tot, dEGTA_p, dCaEGTA_p
),
list(U_Ca = Excretion_norm, # out 24
U_PO4 = Excretion_P_norm,
Abs_int_Ca = Abs_intest_norm,
Abs_int_PO4 = Abs_intest_P_norm,
Res_Ca = Resorption_norm,
Res_PO4 = Resorption_P_norm,
Ac_Ca = Accretion_norm,
Ac_PO4 = Accretion_P_norm,
Reabs_Ca = Reabs_norm,
Reabs_PO4 = Reabs_P_norm,
Ca_pf = Rapid_storage_Ca, # out 34
Ca_fp = Rapid_release_Ca,
PO4_pf = Rapid_storage_P,
PO4_fp = Rapid_release_P,
PO4_pc = Plasma_intra_Flux_norm,
PO4_cp = Intra_plasma_Flux_norm,
PTHg_synth = PTHg_synthesis_norm, # out 40
PTHg_synth_D3 = PTHg_synthesis_D3_norm,
PTHg_synth_PO4 = PTHg_synthesis_PO4_norm,
PTHg_exo_CaSR = F_Ca_norm,
PTHg_deg = PTHg_degradation_norm,
PTHg_exo = PTHg_exocytosis_norm, # 45
PTHp_deg = PTHp_degradation_norm,
Reabs_PT_PTH = Reabs_PT_PTH_norm,
Reabs_TAL_CaSR = Reabs_TAL_CaSR_norm,
Reabs_TAL_PTH = Reabs_TAL_PTH_norm,
Reabs_DCT_PTH = Reabs_DCT_PTH_norm, # 50
Reabs_DCT_D3 = Reabs_DCT_D3_norm,
Abs_int_D3 = Abs_intest_D3_norm,
Res_PTH = Resorption_PTH_norm,
Res_D3 = Resorption_D3_norm,
Reabs_PT_PO4_PTH = Reabs_PT_PTH_P_norm, # 55
Reabs_PT_PO4_FGF = Reabs_PT_FGF_P_norm
)
)
})
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/calcium_phosphate_core.R
|
# Parameters that #
# will not #
# change #
# ############### #
# Commented parameters are that which are possible to change via sliderinput buttons
parameters_fixed <- c(
# Other parameters
Vp = 1e-002, # Plasma volume for a 250-300g rat (in L)
Vc = 1e-007, # Volume in parathyroid cell (in L)
# PTH #
k_deg_PTHg = 3.5e-002, # PTH degradation rate constant in parathyroid cells (in min^-1)
k_deg_PTHp = 1.3e000, # PTH degradation rate constant in plasma (in min^-1)
K_Ca = 1.16e000, # Binding of Ca2+ to CaSR (in mM)
beta_exo_PTHg = 5.9e-002, # Rate constant for maximal PTHg secretion (in min^-1)
gamma_exo_PTHg = 5.8e-002, # Rate constant for maximal inhibition of PTHg secretion by Ca2+ (in min^-1)
#k_prod_PTHg = 100*4.192e-002, # PTHg synthesis rate (in microM.min^-1)
gamma_prod_D3 = 8.33e005, # Sensitivity of PTHg synthesis to vitamin D3 (nM)
n1_exo = 1e002, #
n2_exo = 3e001, #
rho_exo = 1e001, #
R = 1.1e000, #
K_prod_PTH_P = 2.4e000, # Sensitivity of PTHg production to PO4 (in mM)
n_prod_Pho = 3, #
# vitamin D3
k_conv_min = 8.8e-006, # Rate for minimum production of D3 (in min^-1)
delta_conv_max = 14.04e-005, # Maximal increase in D3 production rate (in min^-1)
gamma_ca_conv = 3e-001, # Sensitivity of vitamin D3 production to Ca2+ (in mM). In the paper take 1/gamma_ca_conv to find K_conv_Ca
gamma_D3_conv = 3e006, # Sensitivity of vitamin D3 production to D3 (in pM). In the paper take 1/gamma_D3_conv to find K_conv_D3
gamma_P_conv = 2e-001, # Sensitivity of vitamin D3 production to PO_4 (in mM). In the paper take 1/gamma_P_conv to find K_conv_PO4
gamma_FGF_conv = 2e007, # Sensitivity of vitamin D3 production to FGF23 (in pM). In the paper take 1/gamma_FGF_conv to find K_conv_FGF
K_conv = 1.575e-008, # Sensitivity of vitamin D3 production to PTH (in pM).
#D3_inact = 2.5e-005 # Plasma concentration of 25(OH)D3, inactive form of D3 (in nM)
n_conv = 6e000, #
k_deg_D3 = 1e-003, # Rate constant of vitamin D3 degradation (in min^1)
gamma_deg_PTH = 1e009, # Inhibition of Cyp24a1 activity by PTH (in pM^1)
gamma_deg_FGF = 1.265e008, # Activation of Cyp24a1 activity by FGF23 (in pM-1)
# FGF-23
k_prod_FGF = 6.902e-011, # Minimal rate of FGF23 synthesis (in fM.min^1)
delta_max_prod_D3 = 10, # Maximal activation of FGF23 synthesis
K_prod_D3 = 5.64e-007, # Sensitivity of FGF23 synthesis to vitamin D3 (in pM)
n_prod_FGF = 5,
K_prod_P = 1.6e000, # Sensitivity of FGF23 synthesis to PO4 (in mM)
k_deg_FGF = 1.4e-002, # FGF23 degradation rate constant (in min^1)
# absorption
K_abs_D3 = 6.4e-007, # Sensitivity of Ca absorption to vitamin D3 (in pM)
I_Ca = 2.2e-003, # Calcium intake (in micromol.min^-1)
n_abs = 2e000, #
I_P = 1.55e-003, # Phosphate intake (in micromol.min^-1)
# accretion
Lambda_ac_Ca = 5.5e-004, # Ca accretion rate constant (in min^-1)
Lambda_ac_P = 2.75e-004, # PO4 accretion rate constante (in min^-1)
# resorption
Lambda_res_min = 1e-004, # Minimal Ca resorption rate (in micromol.min^-1)
delta_res_max = 6e-004, # Maximal Ca resorption rate (in micromol.min^-1)
K_res_D3 = 6e-007, # Sensivity of Ca resorption to vitamin D3 (in pM)
K_res_PTH = 6.12e-009, # Sensivity of Ca resorption to PTH (in pM)
n_res = 2e000, #
# rapid bone pool
k_p_Ca = 4.4e-001, # Rate constant of Ca2+ transfer from plasma to fast bone pool (in min^-1)
k_f_Ca = 2.34e-003, # Rate constant of Ca2+ transfer from fast bone pool to plasma (in min^-1)
k_p_P = 13.5e-000, # Rate constant of PO_4 transfer from plasma to fast bone pool (in min^-1)
k_f_P = 2.5165e-001, # Rate constant of PO_4 transfer from fast bone pool to plasma (in min^-1)
# intracellular phosphate
k_pc = 1.875e-001, # Intrecallular PO4 rate constant of PO4 transfer from plasma to intracellular pool (in min^-1)
k_cp = 1e-003, # Rate constant of PO4 transfer from intracellular pool to plasma (in min^-1)
# N_PO4_c, the total quantity of PO_4 in cells is sored in s_eq and equals
# 3 mmol
# reaction with calcium and phosphate
k_f_CaHPO4 = 1.1373e003, # CaHPO4 formation rate constant (in mM^-1.min^-1)
k_d_CaHPO4 = 1.6667e003, # CaHPO4 dissociation rate constant (in min^-1)
k_f_CaH2PO4 = 5.294e001, # CaH2PO4 formation rate constant (in mM^-1.min^-1)
k_d_CaH2PO4 = 1.6667e003, # CaH2PO4 dissociation rate constant (in min^-1)
f1 = 7.6e-001, # ionic activity coefficient of a monovalent ion
f2 = 3.3e-001, # ionic activity coefficient of a divalent ion
f3 = 8e-002, # ionic activity coefficient of a trivalent ion
k_fet = 3e-001, # Fetuin-A binding to CaHPO4 and CaH2PO4+ (in min^-1)
k_c_CPP = 3e000, # CPP degradation rate constant (in min^-1)
# reaction Ca and Proteins
k_f_CaProt = 1.901976e002, # CaProt formation rate constant (in mM^-1.min^-1)
k_d_CaProt = 1.6667e003, # CaProt dissociation constant (in min^-1)
N_Prot = 20, #
Prot_tot_p = 0.6, # Total concentration of plasma proteins (in mM). [Albumin]p is about half so 0.4 mM
# reaction Na and phosphate
k_f_NaHPO4 = 7.8432e000, # NaHPO4- formation rate constant (in mM^-1.min^-1)
k_f_NaH2PO4 = 4.9020e000, # NaH2PO4 formation rate constant (in mM^-1.min^-1)
Na = 142, # Sodium plasma concentration (in mM)
k_d_NaHPO4 = 1.6667e003, # NaHPO4- dissociation rate constant (in min^-1)
k_d_NaH2PO4 = 1.6667e003, # NaH2PO4 dissociation rate constant (in min^-1)
c = 0.62,
d = 0.38,
# renal parameters
GFR = 2e-003, # Glomerular filtration rate (in mL.min^-1)
#---- Ca ----#
lambda_reabs_PT_0 = 0.64, # Minimal fractional reabsorption of Ca2+ in proximal tubule (PT)
delta_PT_max = 0.01, # Stimulation of Ca2+ reabsorption in PT by PTH
PTH_ref = 1.5e-008, # Sensitivity of Ca2+ reabsorption in PT to PTH (in pM)
n_PT = 5,
lambda_TAL_0 = 2.25e-001, # Minimal fractional reabsorption of Ca2+ in the TAL
delta_CaSR_max = 1.75e-002, # Stimulation of Ca2+ reabsorption in TAL by CaSR
Ca_ref = 1.33e000, # Sensitivity of Ca2+ reabsorption in TAL to CaSR (in mM)
n_TAL = 4e000,
K_TAL_PTH = 4.2e-009, # Sensitivity of Ca2+ reabsorption in TAL to PTH (in pM)
delta_PTH_max = 7.5e-003, # Stimulation of Ca2+ reabsorption in TAL by PTH
lambda_DCT_0 = 9e-002, # Minimal fractional reabsorption of Ca2+ in the DCT-CNT
delta_DCT_max = 1e-002, # Stimulation of Ca2+ reabsorption in DCT-CNT by PTH and D3
K_DCT_PTH = 6.3e-009, # Sensitivity of Ca2+ reabsorption in DCT-CNT to PTH (in pM)
K_DCT_D3 = 4.8e-007, # Sensitivity of Ca2+ reabsorption in DCT-CNT to D3 (in pM)
#---- PO4 ----#
n_reabs_P = 5,
lambda_PT_0 = 5.5e-001, # Minimal fractional reabsorption of PO_4 in proximal tubule (PT)
delta_PTH_max_P = 1e-001, # Stimulation of PO_4 reabsorption in PT by PTH
K_PT_PTH = 2e-008, # Sensitivity of PO_4 reabsorption in PT to PTH (in pM)
delta_P_max = 0.5e-001, # Stimulation of PO_4 reabsorption in PT by PO_4
K_PT_P = 1.6e000, # Sensitivity of PO_4 reabsorption in PT to PO_4 (in mM)
delta_FGF_max = 2e-001, # Stimulation of PO_4 reabsorption in PT by FGF23
K_PT_FGF = 2e-008, # Sensitivity of PO_4 reabsorption in PT to FGF23 (in pM)
lambda_DCT_P = 1e-001, # Fractional reabsorption of PO4 in the DCT
# Take the effect of pH into account (proportion of HPO42- VS H2PO4-)
pH = 7.4e000, #
pKa = 6.8e000, # pKA of the pair H2PO4-/HPO42-
# Normalization
D3_norm = 1e-009,
PTH_g_norm = 1e-009,
PTH_p_norm = 1e-009,
FGF_p_norm = 1e-009,
Ca_p_norm = 1e000,
Ca_f_norm = 1e000,
Ca_b_norm = 1e000,
Pho_p_norm = 1e000,
Pho_c_norm = 1e000,
Pho_f_norm = 1e000,
Pho_b_norm = 1e000,
HPO4_norm = 1e000,
H2PO4_norm = 1e000,
CaHPO4_norm = 1e000,
CaH2PO4_norm = 1e000,
CPP_norm = 1e000,
# EGTA constants if any
EGTA_norm = 1e000,
Ca_EGTA_norm = 1e000,
k_on_egta = 0,
k_off_egta = 0,
k_inject_egta = 0,
# Supersaturation
K_sp_DCPD = 1.87e-007 # Supersaturation index of Brushite (in M^2)
)
# calculated parameters based on fixed parameters
parameters_calc <- with(
list(parameters_fixed),
c(r = 10^(parameters_fixed["pH"] - parameters_fixed["pKa"]),
a = 10^(parameters_fixed["pH"] - parameters_fixed["pKa"]) /
(1 + 10^(parameters_fixed["pH"] - parameters_fixed["pKa"])),
b = 10^(parameters_fixed["pH"] - parameters_fixed["pKa"]) /
(1 + 10^(parameters_fixed["pH"] - parameters_fixed["pKa"])) /
10^(parameters_fixed["pH"] - parameters_fixed["pKa"])
)
)
names(parameters_calc) <- c("r", "a", "b")
# merge calculated and fixed parameters
parameters_fixed <- c(parameters_fixed, parameters_calc)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/cap_fixed_parameters.R
|
dashboardControlbar <- bs4DashControlbar(
skin = "light",
title = NULL,
width = 250,
h4("Network options", align = "center"), br(),
prettyCheckboxGroup(
inputId = "background_choice",
label = "Network background",
choices = c("human"),
thick = TRUE,
animation = "pulse",
selected = "human",
inline = TRUE
),
prettyCheckboxGroup(
inputId = "network_Ca_choice",
label = "Select a network",
choices = c(
"Ca" = "Ca",
"Pi" = "PO4",
"PTH" = "PTH",
"D3" = "D3",
"FGF23" = "FGF23"
),
thick = TRUE,
animation = "pulse",
selected = "rat",
inline = TRUE
),
prettySwitch(
inputId = "network_hormonal_choice",
label = "Display hormones",
value = TRUE,
slim = TRUE,
bigger = TRUE
),
prettySwitch(
inputId = "network_organ_choice",
label = "Display organs",
value = TRUE,
slim = TRUE,
bigger = TRUE
),
hr(),
fluidRow(
column(
width = 6,
uiOutput(outputId = "size_nodes_organs")
),
column(
width = 6,
uiOutput(outputId = "size_nodes_hormones")
)
),
fluidRow(
column(
width = 6,
uiOutput(outputId = "width_arrows_organs")
),
column(
width = 6,
uiOutput(outputId = "width_arrows_hormones")
)
),
hr(),
h4("Solver options", align = "center"), br(),
numericInput(
inputId = "tmax",
label = "Maximum simulated time",
value = 500,
min = 0
),
sliderInput(
inputId = "t_now",
label = "Time after simulation",
min = 1,
max = 500,
value = 500
) %>%
shinyInput_label_embed(
icon("undo") %>%
actionBttn(
inputId = "reset_t_now",
label = "",
color = "danger",
size = "xs"
)
)
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/dashboardControlbar.R
|
interface_logo <- "logos/interface.jpeg"
uzh_logo <- "logos/uzh.svg"
unil_logo <- "logos/unil.svg"
nccr_logo <- "logos/nccr.svg"
left_footer <- fluidRow(
column(
width = 3,
align = "center",
"The Interface Group",
a(
href = "http://interfacegroup.ch/people/",
target = "_blank",
img(src = interface_logo, height = "30px")
)
),
column(
width = 3,
align = "center",
"With",
a(
href = "https://shiny.rstudio.com",
target = "_blank",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/04/shiny.png", height = "30px")
),
"by",
a(
href = "http://www.rstudio.com",
target = "_blank",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/07/RStudio-Logo-Blue-Gray.png", height = "30px")
)
),
column(
width = 6,
align = "center",
a(
href = "http://www.nccr-kidney.ch",
target = "_blank",
img(src = nccr_logo, height = "50px")
),
a(
href = "http://www.uzh.ch/de.html",
target = "_blank",
img(src = uzh_logo, height = "30px")
),
"and",
a(
href = "https://www.unil.ch/fbm/fr/home.html",
target = "_blank",
img(src = unil_logo, height = "30px")
)
)
)
footer <- bs4DashFooter(
left_footer,
right_text = NULL
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/footer.R
|
generate_questions <- function() {
questions <- c(
"Q1: Summarize all abnormalities",
"Q2: Does this explain all of the patient's symptoms?",
"Q3: Why does the patient have a renal insufficiency?",
"Q4: Establish a differential diagnostic",
"Q5: What additional lab test would you perform?",
"Q6: Comment on the 25-(OH)- and 1,25(OH)2-vitamin D ratio",
"Q7: Why is the PTH undetectable?",
"Q8: Pose your diagnostic"
)
return(questions)
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/generate_questions.R
|
generate_slider_events <- function(input) {
tagList(
# Generate the slider corresponding to the choosen treatment
if (input$treatment_selected == "D3_inject") {
sliderInput(
"D3_inject",
"D3 injection",
value = 0.001,
min = 0,
max = 0.1,
step = 0.001
) %>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "D3 injection (pmol/min)"))
} else if (input$treatment_selected == "Ca_food") {
sliderInput(
"Ca_food",
"Ca intake",
value = 0.0022,
min = 0,
max = 0.008,
step = 0.0001
) %>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Calcium intake (μmol/min)"))
} else if (input$treatment_selected == "Ca_inject") {
sliderInput(
"Ca_inject",
"Ca injection",
min = 0,
max = 0.002,
value = 0.001,
step = 0.0001
) %>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Rate of injection of calcium in plasma (μmol/min)"))
} else if (input$treatment_selected == "P_food") {
sliderInput(
"P_food",
"PO4 intake",
value = 1.55e-003,
min = 0,
max = 0.01,
step = 0.0001
) %>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Phosphate intake (μmol/min)"))
} else if (input$treatment_selected == "P_inject") {
sliderInput(
"P_inject",
"PO4 injection",
value = 0.001,
min = 0,
max = 0.01,
step = 0.0001
) %>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "PO4 injection (μmol/min)"))
} else if (input$treatment_selected == "D3_intake_reduction") {
sliderInput(
"D3_intake_reduction",
"D3 intake percentage reduction",
value = 50,
min = 0,
max = 100,
step = 1
)
},
# Start, stop and add
if (input$treatment_selected == "PTX") {
NULL
} else {
numericInput(
"t_stop",
"Duration (in minutes, 1440 min = 1 day):",
value = 100,
min = 0,
max = NA,
width = "100%"
)
},
column(
width = 12,
align = "center",
actionBttn(
inputId = "add_treatment",
size = "xs",
label = NULL,
style = "material-circle",
color = "success",
icon = icon("plus")
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/generate_slider_events.R
|
getting_started <- function() {
withMathJax(
HTML(
paste(
"<u><b>Hi there! Need some help?
(Click on the <i class = 'fa fa-info'></i> above to toggle me)</b></u>", br(), br(),
"<b>1) Regulatory mechanisms:</b>", br(),
shiny::tags$ul(
shiny::tags$li(
img(src = "help_img/node_help.svg",
height = "70px", width = "70px"),
"Organs involved in Ca and \\(P_i\\) metabolism"
),
shiny::tags$li(
img(src = "help_img/regulation_help.svg",
height = "60px", width = "60px"),
"Regulatory hormones and ions"
),
shiny::tags$li(
img(src = "help_img/dashed_arrow_help_promotor.svg",
height = "70px", width = "70px"),
"Promotor"
),
shiny::tags$li(
img(src = "help_img/dashed_arrow_help_inhibitor.svg",
height = "70px", width = "70px"),
"Inhibitor"
),
shiny::tags$li(
img(src = "help_img/dashed_arrow_help.svg",
height = "70px", width = "70px"),
"Mixed effect or opposite effects on \\([Ca]_p\\) and \\([P_i]_p\\)."
)
),
br(),
"<b>2) FLuxes and concentrations:</b>", br(),
"\\([...]_p\\)", "Plasma concentrations", br(),
shiny::tags$ul(
shiny::tags$li(
img(src = "help_img/arrow_help.svg",
height = "70px", width = "70px"),
"Ca and \\(P_i\\) fluxes"
)
),
"Visualize changes in regulations: the arrow thickness increases if
the regulation is stronger, decreases if it is weaker:",
shiny::tags$ul(
shiny::tags$li(
img(src = "help_img/red_arrow_help.svg",
height = "70px", width = "70px"),
"if the flux is decreased", ","),
shiny::tags$li(
img(src = "help_img/arrow_help.svg",
height = "70px", width = "70px"),
"if it is unaltered"
),
shiny::tags$li(
img(src = "help_img/green_arrow_help.svg",
height = "70px", width = "70px"),
"if it is increased."
)
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/getting_started.R
|
#-------------------------------------------------------------------------
# This codes loads all packages needed by the application.
# Moreover, it contains all mandatory UI and server elements.
#
#-------------------------------------------------------------------------
# load packages
library(shiny)
library(plotly)
library(deSolve)
library(visNetwork)
library(shinyjs)
library(shinycssloaders)
library(shinyjqui)
library(bsplus)
library(purrr)
library(shinyWidgets)
library(shinyEffects)
library(stringr)
library(shinyFeedback)
library(bs4Dash)
library(dplyr)
library(CaPO4Sim)
# Load the template components of UI
source("patient_selector.R")
source("getting_started.R")
source("generate_questions.R")
source("navbar.R")
source("sidebar.R")
source("body.R")
source("footer.R")
#-------------------------------------------------------------------------
#
#
# Load server elements and useful functions
#
#
#-------------------------------------------------------------------------
# Load usefull scripts
source("dashboardControlbar.R")
source("cap_fixed_parameters.R")
source("calcium_phosphate_core.R") # core model
source("calc_change.R")
source("networks.R")
source("model_utils.R")
source("generate_slider_events.R")
# set the current time zone to Zurich (for shiny server)
Sys.setenv(TZ = "Europe/Zurich")
# compile the C code containing equations
if (.Platform$OS.type == "unix") {
if (!file.exists("compiled_core.so")) {
system("R CMD SHLIB compiled_core.c")
}
} else if (.Platform$OS.type == "windows") {
if (!file.exists("compiled_core.dll")) {
system("R CMD SHLIB compiled_core.c")
}
}
dyn.load(paste("compiled_core", .Platform$dynlib.ext, sep = ""))
#-------------------------------------------------------------------------
#
#
# Other elements: bookmarking, config,...
#
#
#-------------------------------------------------------------------------
users_logs <- "www/users_data"
if (!dir.exists(users_logs)) {
dir.create(users_logs)
}
# Bookmarking
#enableBookmarking(store = "server") # save to the disk
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/global.R
|
# Function that allows to light the graph when an event occurs:
# arrows are in yellow to show perturbations
# take event argument, edges and network
# This function is then called by flux_lighting
# to update edges at the same time
arrow_lighting <- function(events, edges, network) {
if (network == "network_Ca") {
param_event <- list(
values = events,
# do not use rep(6,2) to have 6,6//12,12 because of a bug
edges_id = list(
4,2,3,2,3,6,6,7,12,12,
c(19,20,21),
c(22,23,24,25,26,27),
c(22,23,24,25,26,27),
c(28,29)
)
)
} else if (network == "network_PTH") {
param_event <- list(
values = events,
edges_id = list(1,3,4,6,6)
)
} else if (network == "network_kidney_PT") {
param_event <- list(
values = events,
edges_id = 1
)
} else if (network == "network_kidney_PT_PO4") {
param_event <- list(
values = events,
edges_id = list(c(3,4), c(1,2))
)
} else if (network == "network_kidney_TAL") {
param_event <- list(
values = events,
edges_id = 1
)
} else if (network == "network_kidney_DCT") {
param_event <- list(
values = events,
edges_id = list(1, c(5,6), c(5,6))
)
} else if (network == "network_intestine") {
param_event <- list(
values = events,
edges_id = list(c(1,2),c(1,2))
)
} else {
param_event <- list(
values = events,
edges_id = list(1,5,5)
)
}
# search for events which value are different of 1
event_id <- which(param_event$values != 1)
# if a previous event is already active
# only select the last new event
ifelse(length(event_id) > 1,
event_target <- event_id[[length(event_id)]],
event_target <- event_id)
# select the related edges on the network
edges_id_network <- as.numeric(unlist(param_event$edges_id[event_target]))
return(list(edges_id_network, event_target,
param_event$values, param_event$edges_id))
}
# highlitght arrows for dynamic events
# take out (result of integration by ode solver),
# edges and session as arguments. Nothing special
# is returned, except that the network is updated
arrow_lighting_live <- function(out, edges, session, t_target) {
# restricted to the first 11 fluxes
calc_change_t <- round(calc_change(out, t_target)[1:11])
calc_change_t$X <- NULL # remove column X
# calculate the difference between live fluxes and base-case values
# index of arrows in the graph (which are fluxes and not regulations)
index <- c(1,10,11,6,4,5,8,9,2,3,12)
calc_change_t <- rbind(calc_change_t, index)
# calculate which element in the sum table is different of 0 and store the index
flux_changed_index <- which(calc_change_t[1,] != 0)
# convert to arrow index in the interactive diagramm
arrow_index <- as.numeric(t(calc_change_t[2, flux_changed_index]))
if (!is.null(flux_changed_index)) {
for (i in (1:ncol(calc_change_t))) {
# change edge color according to an increase or decrease of the flux
arrow_index_i <- arrow_index[i]
ifelse(calc_change_t[[i]][1] > 0,
edges$color.color[arrow_index_i] <- "green",
edges$color.color[arrow_index_i] <- "red")
}
}
visNetworkProxy("network_Ca") %>%
visUpdateEdges(edges = edges)
}
# Function that allows to light the graph when fluxes change:
# arrows are in green when fluxes are increased and in
# red when fluxes are decreased
# takes edges, network (by default set to network_Ca), out and
# events as arguments
flux_lighting <- function(edges, network = "network_Ca", events, out, t_target){
# calculate the difference between live fluxes and base-case values
# depending on the graph selection
if (network == "network_Ca") {
# round by 0.1, otherwise too much precision might cause problems
# low precision also
calc_change_t <- round(calc_change(out, t_target)[1:11],1)
# index of arrows in the Ca network (which are fluxes and not regulations)
# except filtration which is not included
index <- c(1,10,11,6,4,5,8,9,2,3,12)
calc_change_t <- rbind(calc_change_t, index)
# change arrowhead orientation for Ca flux between plasma and rapid bone
edges$from[2] <- ifelse(calc_change_t[1,"Net_Ca_pf_change"] > 0, 2, 3)
edges$to[2] <- ifelse(calc_change_t[1,"Net_Ca_pf_change"] > 0, 3, 2)
# change arrowhead orientation for PO4 flux between plasma and rapid bone
edges$from[3] <- ifelse(calc_change_t[1,"Net_PO4_pf_change"] > 0, 2, 3)
edges$to[3] <- ifelse(calc_change_t[1,"Net_PO4_pf_change"] > 0, 3, 2)
# change arrowhead orientation for PO4 flux between plasma and cells
edges$from[12] <- ifelse(calc_change_t[1,"Net_PO4_pc_change"] > 0, 2, 8)
edges$to[12] <- ifelse(calc_change_t[1,"Net_PO4_pc_change"] > 0, 8, 2)
edges$arrows.to.enabled[c(2,3,12)] <- TRUE
} else if (network == "network_PTH") {# should use else if when other graphs will be added
calc_change_t <- round(calc_change(out, t_target)[c(12:17)])
index <- c(1,6,5,4,3,2) # index arrows in the PTH network
calc_change_t <- rbind(calc_change_t, index)
} else if (network == "network_kidney_PT") {# PT network
# the second arrow of PT is not part of calc_change so need
# to do as if it is the same as for the first arrow
calc_change_t <- as.data.frame(rep(round(calc_change(out, t_target)[19]),2))
index <- c(1,2)
calc_change_t <- rbind(calc_change_t, index)
} else if (network == "network_kidney_PT_PO4") {
# PTH and FGF23 have the same qualitative effect
calc_change_t <- round(calc_change(out, t_target)[c(rep(27,2), rep(28,2))], 2)
index <- c(4,3,2,1)
calc_change_t <- rbind(calc_change_t, index)
} else if (network == "network_kidney_TAL") {
calc_change_t <- round(calc_change(out, t_target)[20:21])
index <- c(2,3)
calc_change_t <- rbind(calc_change_t, index)
} else if (network == "network_kidney_DCT") {
calc_change_t <- round(calc_change(out, t_target)[c(rep(22,4), rep(23,3))])
index <- c(1:7)
calc_change_t <- rbind(calc_change_t, index)
} else if (network == "network_intestine") {
calc_change_t <- round(calc_change(out, t_target)[rep(24,7)])
index <- c(1:7)
calc_change_t <- rbind(calc_change_t, index)
} else {
calc_change_t <- round(calc_change(out, t_target)[c(rep(25,4), rep(26,3))])
index <- c(1:7)
calc_change_t <- rbind(calc_change_t, index)
}
# calculate which element in the sum table is different of 0 and store the index
flux_changed_index <- which(calc_change_t[1,] != 0)
# convert to arrow index in the interactive diagramm
arrow_index <- t(calc_change_t[2,flux_changed_index])
# proceed to perturbation highlithing
selected_edges <- arrow_lighting(events, edges, network)
edges_id_network <- selected_edges[[1]]
event_target <- selected_edges[[2]]
param_event_values <- selected_edges[[3]]
# edge color engine
if (!is.null(flux_changed_index)) {
for (i in (seq_along(calc_change_t))) {
arrow_index_i <- arrow_index[i]
if (is.element(arrow_index_i, edges_id_network)) {
# if the edge is part of the selection
edges$color.color[arrow_index_i] <- "yellow"
} else {
# change edge color according to an increase or decrease of the flux
ifelse(calc_change_t[[i]][1] > 0,
edges$color.color[arrow_index_i] <- "green",
edges$color.color[arrow_index_i] <- "red")
}
}
}
# increase/decrease the size of the corresponding edge
if (network == "network_Ca") {
# need to take care when parameters correspond to
# degradation rate (edge$width is thus inverted)
# such as for vitamin D3 degradation
ifelse(param_event_values[13] == 1,
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] > 1, 12, 2),
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] < 1, 12, 2))
} else if (network == "network_kidney_DCT" | network == "network_bone") {
ifelse(param_event_values[3] == 1,
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] > 1, 12, 2),
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] < 1, 12, 2))
} else if (network == "network_intestine") {
ifelse(param_event_values[2] == 1,
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] > 1, 12, 2),
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] < 1, 12, 2))
} else {
edges$width[edges_id_network] <- ifelse(param_event_values[event_target] > 1, 12, 2)
}
# update the network
visNetworkProxy(network) %>%
visSetSelection(edgesId = edges_id_network) %>%
visUpdateEdges(edges = edges)
}
# plot_node function will plot the concentrations or
# quantities related to the latest selected node
# nodes can be input$current_node_id and out is out()
# Finally, also needs parameters_bis
title_size <- list(size = 10)
plot_node <- function(input, node, out, parms) {
if (node == "null") {
p <- plot_ly() %>%
add_annotations(
"Please select a node!",
showarrow = FALSE,
font = list(color = "red", size = 10)
) %>%
config(displayModeBar = FALSE)
} else {
if (sum(node == c(1,5:7,9:10,12:16)) != 1) {
# set the x/y-axis ranges
time <- out[,1]
xvar <- list(
title = "time (min)",
range = c(0, max(time))
)
# plasma compartment
if (node == 2) {
p <- plot_ly(out,
x = time) %>%
add_lines(y = round(out[,"Ca_p"], 3),
ymin = 0.5 * min(out[,"Ca_p"]),
ymax = 1.5 * max(out[,"Ca_p"]),
name = "Ca2+p (mM)",
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = round(out[,"PO4_p"], 3),
ymin = 0.5 * min(out[,"PO4_p"]),
ymax = 1.5 * max(out[,"PO4_p"]),
name = "PO4p (mM)",
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = FALSE) %>%
add_lines(y = round(out[,"PTH_p"]/parms["Vp"], 1),
ymin = 0.5 * min(out[,"PTH_p"]),
ymax = 1.5 * max(out[,"PTH_p"]),
name = "PTHp (pM)",
line = list(color = 'black', width = 2),
visible = FALSE) %>%
# rescale D3
add_lines(y = out[,"D3_p"] / 4,
ymin = 0.5 * min(out[,"D3_p"] / 4),
ymax = 1.5 * max(out[,"D3_p"] / 4),
name = "1,25D3p (pM)",
line = list(color = 'black', width = 2),
visible = FALSE) %>%
# rescale FGF23
add_lines(y = round(out[,"FGF_p"] / 16.8 * 32, 1),
ymin = 0.5 * min(out[,"FGF_p"] / 16.8 * 32),
ymax = 1.5 * max(out[,"FGF_p"] / 16.8 * 32),
name = "FGF23p (pg/mL)",
line = list(color = 'black', width = 2),
visible = FALSE) %>%
layout(
title = "Plasma concentrations",
font = title_size,
xaxis = xvar,
updatemenus = list(
list(
#type = "buttons",
direction = "right",
#xanchor = 'left',
yanchor = "bottom",
x = 0,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE, FALSE, FALSE)),
label = "Cap"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE, FALSE, FALSE)),
label = "PO4p"),
list(method = "restyle",
args = list("visible", list(FALSE, FALSE, TRUE, FALSE, FALSE)),
label = "PTHp"),
list(method = "restyle",
args = list("visible", list(FALSE, FALSE, FALSE, TRUE, FALSE)),
label = "D3p"),
list(method = "restyle",
args = list("visible", list(FALSE, FALSE, FALSE, FALSE, TRUE)),
label = "FGFp")
)
)
)
) %>%
config(displayModeBar = FALSE)
} else if (node == 3) {
# rapid bone compartment
p <- plot_ly(out,
x = time,
mode = "lines") %>%
add_lines(y = out[,"Ca_f"],
ymin = 0.5 * min(out[,"Ca_f"]),
ymax = 1.5 * max(out[,"Ca_f"]),
name = "Caf (mmol)",
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = out[,"PO4_f"],
ymin = 0.5 * min(out[,"PO4_f"]),
ymax = 1.5 * max(out[,"PO4_f"]),
name = "PO4f (mmol)",
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = TRUE) %>%
layout(
title = "Rapid bone pool Ca and PO4 content",
font = title_size,
xaxis = xvar,
yaxis = list(title = "Quantities (mmol)"),
updatemenus = list(
list(
type = "buttons",
direction = "right",
xanchor = 'center',
yanchor = "bottom",
#pad = list('r'= 0, 't'= 10, 'b' = 10),
x = 0.5,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE)),
label = "Ca fast bone"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE)),
label = "PO4 fast bone"),
list(method = "restyle",
args = list("visible", list(TRUE, TRUE, FALSE)),
label = "Both")))
)
) %>%
config(displayModeBar = FALSE)
} else if (node == 4) {
# deep bone compartment
p <- plot_ly(out,
x = time,
mode = "lines") %>%
add_lines(y = out[,"Ca_b"],
ymin = 0.5 * min(out[,"Ca_b"]),
ymax = 1.5 * max(out[,"Ca_b"]),
name = "Cab (mmol)",
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = out[,"PO4_b"],
ymin = 0.5 * min(out[,"PO4_b"]),
ymax = 1.5 * max(out[,"PO4_b"]),
name = "PO4b (mmol)",
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = TRUE) %>%
layout(
title = "Deep bone pool Ca and PO4 content",
font = title_size,
xaxis = xvar,
yaxis = list(title = "Quantities (mmol"),
updatemenus = list(
list(
type = "buttons",
direction = "right",
xanchor = 'center',
yanchor = "bottom",
#pad = list('r'= 0, 't'= 10, 'b' = 10),
x = 0.5,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE)),
label = "Ca bone"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE)),
label = "PO4 bone"),
list(method = "restyle",
args = list("visible", list(TRUE, TRUE, FALSE)),
label = "Both")))
)
) %>%
config(displayModeBar = FALSE)
} else {
# other cases: need to convert graph indexes to the solver indexes
# which are totally different (and is a big problem!!!).
# 0 correspond to nodes in previous cases or not interesting
node_Ca_list <- data.frame(id = c(rep(0,7),11,rep(0,2),2),
names = c(rep("",7),"PO4 quantity in cells",
rep("",2),"PTH quantity in parathyroid glands"),
units = c(rep("",7),"mmol",
rep("",2),"pmol"))
#names(node_Ca_list) <- c(rep("",8),"PTH quantity in parathyroid glands",
# rep("",3),"PO4 quantity in cells")
yvar <- list(title = paste("Quantity", "(", node_Ca_list$units[node], ")"),
range = c(min(out[,node_Ca_list$id[node]]*0.8),
max(out[,node_Ca_list$id[node]]*1.2)))
p <- plot_ly(out,
x = time,
y = out[,node_Ca_list$id[node]],
type = "scatter",
mode = "lines",
line = list(color = 'black', width = 2)) %>%
layout(title = paste(node_Ca_list$names[node]),
font = title_size,
xaxis = xvar,
yaxis = yvar) %>%
config(displayModeBar = FALSE)
}
} else {
# node not allowed to plot
p <- plot_ly() %>%
add_annotations("Please select another node!",
showarrow = FALSE,
font = list(color = "red", size = 10)) %>%
config(displayModeBar = FALSE)
}
}
}
# plot_edge function will plot the flux
# related to the last selected edge
# edge can be input$current_edge_id and out
# contains all the variables returned by the
# solver
plot_edge <- function(edge, out) {
time <- out[,1]
xvar <- list(title = "time (min)",
range = c(0, max(time)))
# avoid edges that are not fluxes in the network 13:29
# as well as filtration process
if (edge == 7 |
# sum counts the number of true, only one is enough
sum(edge == 13:29) == 1) {
p <- plot_ly() %>%
add_annotations("Please select another edge!",
showarrow = FALSE,
font = list(color = "red", size = 10)) %>%
config(displayModeBar = FALSE)
} else {
# select edges where Ca and PO4 fluxes
# have the same regulation
if (edge == "Abs_int" | edge == "Res") {
yvar <- list(title = "Flux (µmol/min)",
range = c(min(out[,paste0(edge,"_Ca")]*1000*0.8,
out[,paste0(edge,"_PO4")]*1000*0.8),
max(out[,paste0(edge,"_Ca")]*1000*1.2,
out[,paste0(edge,"_PO4")]*1000*1.2)))
p <- plot_ly(out,
x = time,
mode = "lines") %>%
add_lines(y = out[,paste0(edge,"_Ca")]*1000,
name = paste0("Ca ",edge),
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = out[,paste0(edge,"_PO4")]*1000,
name = paste0("PO4 ",edge),
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = FALSE) %>%
layout(
title = paste(edge),
font = title_size,
xaxis = xvar,
yaxis = yvar,
updatemenus = list(
list(
type = "buttons",
direction = "right",
xanchor = 'center',
yanchor = "bottom",
#pad = list('r'= 0, 't'= 10, 'b' = 10),
x = 0.5,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE)),
label = "Ca"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE)),
label = "PO4"),
list(method = "restyle",
args = list("visible", list(TRUE, TRUE, FALSE)),
label = "Both"))))) %>%
config(displayModeBar = FALSE)
} else if (edge == "Net_Ca_pf" | edge == "Net_PO4_pf") {
# extract the pattern Ca or PO4 from the edge name
elem <- unlist(strsplit(edge,"_"))[[2]]
# Ca and PO4 exchanges between plasma and rapid pool
yvar <- list(title = "Flux (µmol/min)",
range = c(min(out[,paste0(elem,"_pf")]*1000*0.8),
max(out[,paste0(elem,"_pf")]*1000*1.2)))
p <- plot_ly(out,
x = time,
mode = "lines") %>%
add_lines(y = out[,paste0(elem,"_pf")]*1000,
name = paste(elem, "flux between plasma and fast bone pool"),
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = out[,paste0(elem,"_fp")]*1000,
name = paste(elem, "flux between fast bone pool and plasma"),
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = FALSE) %>%
layout(
title = paste("Plasma/fast bone pool",elem,"exchanges"),
font = title_size,
xaxis = xvar,
yaxis = yvar,
updatemenus = list(
list(
type = "buttons",
direction = "right",
xanchor = 'center',
yanchor = "bottom",
#pad = list('r'= 0, 't'= 10, 'b' = 10),
x = 0.5,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE)),
label = "Plasma -> bone"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE)),
label = "Bone -> plasma"),
list(method = "restyle",
args = list("visible", list(TRUE, TRUE, FALSE)),
label = "Both"))))) %>%
config(displayModeBar = FALSE)
} else if (edge == "Net_PO4_cells") {
# PO4 exchanges between cells and plasma
yvar <- list(title = "Flux (µmol/min)",
range = c(min(out[,"PO4_pc"]*1000*0.8,
out[,"PO4_cp"]*1000*0.8),
max(out[,"PO4_cp"]*1000*1.2,
out[,"PO4_pc"]*1000*1.2)))
p <- plot_ly(out,
x = time,
mode = "lines") %>%
add_lines(y = out[,"PO4_pc"]*1000,
name = "PO4 flux into cell",
line = list(color = 'rgb(27, 102, 244)', width = 2),
visible = TRUE) %>%
add_lines(y = out[,"PO4_cp"]*1000,
name = "PO4 release from cells",
line = list(color = 'rgb(244, 27, 27)', width = 2),
visible = FALSE) %>%
layout(
title = "Plasma/Cells PO4 exchanges",
font = title_size,
xaxis = xvar,
yaxis = yvar,
updatemenus = list(
list(
type = "buttons",
direction = "right",
xanchor = 'center',
yanchor = "bottom",
#pad = list('r'= 0, 't'= 10, 'b' = 10),
x = 0.5,
y = -0.45,
buttons = list(
list(method = "restyle",
args = list("visible", list(TRUE, FALSE, FALSE)),
label = "Plasma -> Cells"),
list(method = "restyle",
args = list("visible", list(FALSE, TRUE, FALSE)),
label = "Cells -> Plasma"),
list(method = "restyle",
args = list("visible", list(TRUE, TRUE, FALSE)),
label = "Both"))))) %>%
config(displayModeBar = FALSE)
} else {
# other cases
yvar <- list(title = "Flux (µmol/min)",
range = c(min(out[,edge]*1000*0.8),
max(out[,edge]*1000*1.2)))
p <- plot_ly(out,
x = time,
y = out[,edge]*1000,
type = "scatter",
mode = "lines",
line = list(color = 'black', width = 2)) %>%
layout(title = paste(edge),
font = title_size,
xaxis = xvar,
yaxis = yvar) %>%
config(displayModeBar = F)
}
}
}
# Function to reset sliders input to their original values
# Takes a reset_table, network and edges as arguments
# reset_table contains the state of reset button (0 if
# not used) as well as the related sliders_id
sliders_reset <- function(button_states, input) {
# stock the previous state of buttons in
# reactiveValues so as to compare with
# the current state
button_states$values <- append(button_states$values, input$reset_t_now)
# associate each reset button to its related slider
reset_vector <- "t_now"
# store the temp state of buttons
states <- button_states$values
last_state <- states[[length(states)]]
# select which reset buttons are selected
if (length(states) <= 1) {
# compare the current state with 0
reset_target <- which(unlist(states) != 0)
} else {
# compare the current state with the previous one
penultimate_state <- states[[length(states) - 1]]
reset_target <- which(penultimate_state != last_state)
}
# reset the corresponding target(s) in the table
shinyjs::reset(reset_vector[reset_target])
}
# Function that determines which parameter is changed or not
# Takes parameters values as argument and returns a list
# of all parameters that are currently different from their
# base case value
find_parameter_change <- function(parms) {
param_base_case <- c(4.192, 5.9e-002, 5.8e-002, 2.5e-005, 1e-003, 6.902e-011,
2.2e-003, 5.5e-004, 2.75e-004, 1e-004, 6e-004, 0.44,
2.34e-003, 1.55e-003, 0.1875, 1e-003, 13.5,
0.25165, 0.3, 3, 142, 0.6, 0.01, 2e-003)
# determines which param val is different of 1 (normalized value)
id <- which(parms/param_base_case != 1)
if (is_empty(id)) {
NULL
} else {
param_name <- names(parms[id])
parms <- unname(parms)
param_val <- parms[id]
param_ratio <- parms[id]/param_base_case[id]
param_col <- ifelse(param_ratio > 1, "success", "danger")
param_variation <- ifelse(param_ratio > 1, "increased", "decreased")
list("value" = param_val, "color" = param_col,
"text" = paste(param_name, "is", param_variation, "by",
param_ratio, sep = " "))
}
}
# Recover to_start and t_stop time when reading the event_table
# Takes event table as argument and returns a vector of the
# corresponding event parameters (t_start, t_stop and the rate of injection/gavage)
generate_event_parms <- function(event_table) {
#take event table row by row
if (nrow(event_table) > 0) {
name <- event_table[1, "event"]
rate <- event_table[1, "rate"]
t_start <- event_table[1, "start_time"]
t_stop <- event_table[1, "stop_time"]
Ca_inject <- if (name == "Ca_inject") rate else 0
Ca_food <- if (name == "Ca_food") rate else 0
D3_inject <- if (name == "D3_inject") rate else 0
P_inject <- if (name == "P_inject") rate else 0
P_food <- if (name == "P_food") rate else 0
D3_intake_reduction <- if (name == "D3_intake_reduction") rate else 1
Bispho <- if (name == "bisphosphonate") 0.3 else 1
Furo <- if (name == "furosemide") 6 else 1
Cinacal <- if (name == "cinacalcet") 1 else 0
return(
c(
"t_start" = t_start,
"t_stop" = t_stop,
"Ca_inject" = Ca_inject,
"Ca_food" = Ca_food,
"D3_inject" = D3_inject,
"P_inject" = P_inject,
"P_food" = P_food,
"D3_intake_reduction" = D3_intake_reduction,
"Bispho" = Bispho,
"Furo" = Furo,
"Cinacal" = Cinacal
)
)
} else {
return(
c(
"t_start" = 0,
"t_stop" = 0,
"Ca_inject" = 0,
"Ca_food" = 0,
"D3_inject" = 0,
"P_inject" = 0,
"P_food" = 0,
"D3_intake_reduction" = 1,
"Bispho" = 1,
"Furo" = 1,
"Cinacal" = 0
)
)
}
}
# Function needed to produce cumulative plots
accumulate_by <- function(dat, var) {
var <- lazyeval::f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
dplyr::bind_rows(dats)
}
# custom bootstrap 4 panels
bs4TabSetPanel <- function(..., id, side, status = NULL, tabStatus = NULL, .list = NULL) {
# to make tab ids in the namespace of the tabSetPanel
ns <- shiny::NS(id)
tabs <- c(list(...), .list)
found_active <- FALSE
selected <- NULL
tabStatus <- if (!is.null(tabStatus)) rep(tabStatus, length.out = length(tabs))
# handle tabs
tabSetPanelItem <- lapply(seq_along(tabs), FUN = function(i) {
tabName <- tabs[[i]][[1]]
tabsTag <- tabs[[i]][[2]]
tabClass <- tabsTag$attribs$class
# make sure that if the user set 2 tabs active at the same time,
# only the first one is selected
active <- sum(grep(x = tabClass, pattern = "active")) == 1
if (!found_active) {
if (active) {
found_active <<- TRUE
selected <<- i - 1
# if no items are selected, we select the first
} else {
selected <<- 0
}
# do not allow more than 1 active item
} else {
if (active) {
stop("Cannot set 2 active tabs at the same time.")
}
}
id <- tabsTag$attribs$id
shiny::tags$li(
class = if (!is.null(status) & is.null(tabStatus[i])) {
"nav-item bg-light"
} else if (!is.null(tabStatus[i])) {
paste0("nav-item bg-", tabStatus[i])
} else {
"nav-item"
},
shiny::tags$a(
class = if (active) "nav-link active show" else "nav-link",
href = paste0("#", ns(id)),
`data-toggle` = "tab",
tabName
)
)
})
tabSetMenu <- shiny::tags$ul(
id = id,
class = if (side == "right") {
"nav nav-pills ml-auto p-2"
} else {
"nav nav-pills p-2"
}
)
tabSetMenu <- shiny::tagAppendChildren(tabSetMenu, tabSetPanelItem)
# content
tabSetContent <- shiny::tags$div(
class = "tab-content",
lapply(seq_along(tabs), FUN = function(i) {
# put the correct namespace on ids
tabs[[i]][[2]]$attribs$id <- ns(tabs[[i]][[2]]$attribs$id)
tabs[[i]][[2]]
})
)
shiny::tagList(
shiny::singleton(
shiny::tags$head(
shiny::tags$script(
paste0(
"$(function () {
$('#", id," li:eq(", selected,") a').tab('show');
})
"
)
)
)
),
tabSetMenu, tabSetContent
)
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/model_utils.R
|
#-------------------------------------------------------------------------
# This code contains the header of shinydashboard. It is modified compared
# to classic header. Indeed, some buttons to save, load, reset, download are
# inserted in the header bar. Moreover, users can change the global theme
# clicking on the theme selector.
#
# David Granjon, the Interface Group, Zurich
# December 4th, 2017
#-------------------------------------------------------------------------
navbar <- bs4DashNavbar(
skin = "light",
status = "white",
border = TRUE,
sidebarIcon = "bars",
controlbarIcon = "th",
leftUi = tagList(
#uiOutput("currentTime"),
fullScreenUI(id = "fullScreenTrigger"),
uiOutput("user_game_status")
#uiOutput("current_calcium")
),
rightUi = tagList(
downloadButton(
label = "logs",
outputId = "download_logs"
)
),
fixed = FALSE
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/navbar.R
|
#-------------------------------------------------------------------------
# This codes contains all network skeletons for CaPO4, PTH, ...
# For each network, we define two dataframe: node contains all informations
# related to nodes and edges to edges...
#
#-------------------------------------------------------------------------
# This function is used to generate a network as well
# as basic options such as physics, manipulations,
# selection
generate_network <- function(input, nodes, edges, usephysics = FALSE) {
visNetwork(
nodes,
edges,
width = "100%",
height = "100%"
) %>%
visNodes(
shapeProperties = list(
interpolation = TRUE
)
) %>%
# put shadow on false
visEdges(
shadow = FALSE,
smooth = TRUE,
font = list(align = "horizontal")
) %>%
# add group selection option
visOptions(
highlightNearest = FALSE,
clickToUse = FALSE,
manipulation = FALSE,
collapse = FALSE,
autoResize = if (input$isMobile) FALSE else TRUE
) %>%
# prevent edge from being selected when a node is selected
visInteraction(
hover = TRUE,
hoverConnectedEdges = FALSE,
selectConnectedEdges = FALSE,
multiselect = FALSE,
dragNodes = FALSE,
dragView = FALSE,
zoomView = FALSE,
navigationButtons = FALSE,
selectable = TRUE,
tooltipStyle =
'position: fixed;
visibility:hidden;
padding: 5px;
padding-right: 10px;
padding-bottom: 10px;
white-space: nowrap;
font-family: verdana;
font-size:14px;
font-color:#000000;
background-color: #FFFFFF;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
border: 1px solid #808074;
box-shadow: 3px 3px 10px rgba(0, 0, 0, 0.2);
z-index: 100;
'
) %>%
# stabilization prevents arrows from bouncing
visPhysics(
stabilization = TRUE,
enabled = usephysics
)
}
# % % % % #
# #
# CaPO4 #
# #
# % % % % #
# Generate nodes for the CaPO4 network
generate_nodes_Ca <- function(input) {
data.frame(
id = 1:16,
shape = rep("image", 16),
image = c(
"CaPO4_network/intestine.svg", "CaPO4_network/plasma.svg",
"CaPO4_network/rapid-bone.svg", "CaPO4_network/bone.svg",
"CaPO4_network/kidney.svg", "CaPO4_network/kidney_zoom1.svg",
"CaPO4_network/urine.svg", "CaPO4_network/cells.svg",
"CaPO4_network/Cap.svg", "CaPO4_network/PO4.svg",
if (is.null(input$background_choice)) {
"CaPO4_network/parathyroid_gland.svg"
} else if (input$background_choice == "rat") {
"CaPO4_network/parathyroid_gland.svg"
} else {
"CaPO4_network/parathyroid_gland_human.svg"
}
, "CaPO4_network/PTH.svg", "CaPO4_network/D3.svg",
"CaPO4_network/D3.svg", "CaPO4_network/D3.svg",
"CaPO4_network/FGF23.svg"
),
label = c(rep("", 6), rep("",10)),
# tooltip to display an image
title = rep(NA, 16),
x = if (is.null(input$background_choice)) {
c(38,-65,-65,-256,180,360,170,-190,290,320,41,-418,330,385,-386,481)
} else if (input$background_choice == "rat") {
c(38,-65,-65,-256,180,360,170,-190,290,320,41,-418,330,385,-386,481)
# for human background
} else {
c(13,-80,-185,-322,157,333,7,-175,290,320,9,-466,330,385,-386,481)
},
y = if (is.null(input$background_choice)) {
c(-150,195,472,460,0,230,506,0,-317,-633,-452,240,-452,0,-106,-452)
} else if (input$background_choice == "rat") {
c(-150,195,472,460,0,230,506,0,-317,-633,-452,240,-452,0,-106,-452)
# for human background
} else {
c(23,320,524,214,189,439,581,88,-317,-633,-449,400,-452,0,-106,-452)
},
color = list(background = "#97C2FC", border = "#97C2FC",
highlight = list(background = "orange", border = "orange")),
size = c(rep(input$size_organs,5), 150, rep(input$size_organs,2),
rep(input$size_hormones,2), input$size_organs, rep(input$size_hormones,5)),
#fixed = list("x" = TRUE, "y" = TRUE),
physics = rep(FALSE,16),
hidden = c(
## organs ##
if (input$network_organ_choice == TRUE) {
c(rep(FALSE, 5),
TRUE,
FALSE,
# PO4 Cells
ifelse(is.element("PO4", input$network_Ca_choice),
ifelse(is.element("Ca", input$network_Ca_choice),FALSE,FALSE),TRUE))
} else {
rep(TRUE, 8)
},
## Hormones ##
# Ca plasma
ifelse(input$network_hormonal_choice,
ifelse(is.element("Ca", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PO4 plasma
ifelse(input$network_hormonal_choice,
ifelse(is.element("PO4", input$network_Ca_choice) &
(is.element("D3", input$network_Ca_choice) |
is.element("PTH", input$network_Ca_choice) |
is.element("FGF23", input$network_Ca_choice)),
FALSE, TRUE), TRUE),
# PTHg
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PTH plasma
TRUE,
# ifelse(input$network_organ_choice,
# ifelse(input$network_hormonal_choice,
# ifelse(is.element("PTH", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 regulation
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice) &
(is.element("PO4", input$network_Ca_choice) |
is.element("Ca", input$network_Ca_choice) |
is.element("PTH", input$network_Ca_choice) |
is.element("FGF23", input$network_Ca_choice)),
FALSE, TRUE), TRUE),
# D3 plasma
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 plasma
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# FGF23
ifelse(input$network_hormonal_choice,
ifelse(is.element("FGF23", input$network_Ca_choice), FALSE, TRUE), TRUE)
),
stringsAsFactors = FALSE
)
}
# Generate edges for the CaPO4 network
generate_edges_Ca <- function(input) {
req(input$width_organs, input$width_hormones)
data.frame(
from = c(
1, 2, 3, rep(3, 2), 4, 2, rep(5, 2), rep(5, 2), 8,
rep(9, 3), rep(10, 3), rep(11, 2), 11, rep(13, 2),
rep(14, 2), rep(15, 2), rep(16, 2)
),
to = c(
2, 3, 2, rep(4, 2), 2, 5, rep(2, 2), rep(7, 2),
2, 11, 5, 13, 11, 13, 16, 5, 13, 4, 11, 16,
14, 5, 4, 1, 13, 5
),
arrows = list(
to = list(
enabled = c(
TRUE,
rep(FALSE,2),
rep(TRUE,8),
FALSE,
rep(TRUE,17)
),
scaleFactor = 1,
type = "arrow"
)
),
label = c(
"", "Net Ca", "Net PO4", "Ca", "PO4", rep("", 2), "Ca", "PO4",
"Ca", "PO4", "Net PO4", rep("-", 3), "+", "-", "+",
"", "+", "+", "-", "+", "-", "+", "+", "+", "-", "-"
),
id = c(
"Abs_int", "Net_Ca_pf", "Net_PO4_pf",
"Ac_Ca", "Ac_PO4", "Res", 7, "Reabs_Ca",
"Reabs_PO4", "U_Ca", "U_PO4", "Net_PO4_cells",
13:29
),
width = c(rep(input$width_organs,12), rep(input$width_hormones,17)),
font.size = c(rep(25,12),rep(60,17)),
font.align = c(
"","top","bottom","top","bottom",rep("",4),"bottom",
"top","bottom","bottom",rep("top",2),"top","top",
"top","","bottom","top","top","bottom","bottom","top","top",
rep("top",2),"bottom"
),
color = list(color = c(rep("black", 29)), highlight = "yellow"),
dashes = c(rep(FALSE,12), rep(TRUE,17)),
title = c(rep(NA,3), rep(NA, 9), rep(NA, 2), rep(NA,15)),
smooth = c(rep(TRUE,29)),
length = c(200,rep(300,2),rep(300,2),200,300,
200,rep(300,4),rep(200,8), 1700, rep(200,8)),
# to show either Ca or PO4 or CaPO4 network arrows
hidden = c(
## organ arrows ##
if (input$network_organ_choice == TRUE) {
c(ifelse(is.element("Ca", input$network_Ca_choice) |
is.element("PO4", input$network_Ca_choice), FALSE, TRUE),
ifelse(is.element("Ca", input$network_Ca_choice),
ifelse(is.element("PO4", input$network_Ca_choice),FALSE, FALSE), TRUE),
ifelse(is.element("PO4", input$network_Ca_choice),
ifelse(is.element("Ca", input$network_Ca_choice),FALSE, FALSE), TRUE),
ifelse(is.element("Ca", input$network_Ca_choice),
ifelse(is.element("PO4", input$network_Ca_choice),FALSE, FALSE), TRUE),
ifelse(is.element("PO4", input$network_Ca_choice),
ifelse(is.element("Ca", input$network_Ca_choice),FALSE, FALSE), TRUE),
rep(ifelse(is.element("Ca", input$network_Ca_choice) |
is.element("PO4", input$network_Ca_choice), FALSE, TRUE), 2),
ifelse(is.element("Ca", input$network_Ca_choice),
ifelse(is.element("PO4", input$network_Ca_choice),FALSE, FALSE), TRUE),
ifelse(is.element("PO4", input$network_Ca_choice),
ifelse(is.element("Ca", input$network_Ca_choice),FALSE, FALSE), TRUE),
ifelse(is.element("Ca", input$network_Ca_choice),
ifelse(is.element("PO4", input$network_Ca_choice),FALSE, FALSE), TRUE),
rep(ifelse(is.element("PO4", input$network_Ca_choice),
ifelse(is.element("Ca", input$network_Ca_choice),FALSE, FALSE), TRUE), 2))
} else {
rep(TRUE, 12)
},
## hormonal regulations arrows ##
# Ca regulation to PTH
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice) &
is.element("Ca", input$network_Ca_choice), FALSE, TRUE), TRUE),
# Ca to Kidney
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("Ca", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# Ca regulation to D3
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice) &
is.element("Ca", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PO4 regulation to PTH
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice) &
is.element("PO4", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PO4 regulation to D3
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice) &
is.element("PO4", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PO4 regulation to FGF23
ifelse(input$network_hormonal_choice,
ifelse(is.element("FGF23", input$network_Ca_choice) &
is.element("PO4", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PTH regulation to kidney
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# PTH regulation to D3
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice) &
is.element("PTH", input$network_Ca_choice), FALSE, TRUE), TRUE),
# PTH regulation to bone
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 regulation to PTH
ifelse(input$network_hormonal_choice,
ifelse(is.element("PTH", input$network_Ca_choice) &
is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE),
# D3 regulation to FGF23
ifelse(input$network_hormonal_choice,
ifelse(is.element("FGF23", input$network_Ca_choice) &
is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE),
# D3 regulation to D3
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 regulation to kidney
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 regulation to bone
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# D3 regulation to intestine
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE),
# FGF regulation to D3
ifelse(input$network_hormonal_choice,
ifelse(is.element("D3", input$network_Ca_choice) &
is.element("FGF23", input$network_Ca_choice), FALSE, TRUE), TRUE),
# FGF regulation to kidney
ifelse(input$network_organ_choice,
ifelse(input$network_hormonal_choice,
ifelse(is.element("FGF23", input$network_Ca_choice), FALSE, TRUE), TRUE), TRUE)
),
stringsAsFactors = FALSE
)
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/networks.R
|
# Function needed to generate patients in the /www/datas folder
# id: patient id, must be unique
# name: patient name
# picture: path to image if any
# age: patient age
# height: patient height in cm
# weight: patient weight in kg
# gender: whether the patient is a male or female. Use to generate rendom avatar images
# medical_history -> list containing the following fields pathologies,
# examination_dates, doctors, doctors_gender, disease_description, disease_image that are also lists
# disease_id <- php1, hypopara, hypoD3, ... To set up the initial conditions
patient_generator <- function(id, name, picture = NULL, age, height, weight, gender,
pathologies = list() , examination_dates = list(),
doctors = list(), doctors_gender = list(),
disease_description = list(), disease_image = list(),
disease_id) {
# raw folder
raw_folder <- paste0(getwd(), "/www/")
# check if the provided id is already used
data_folder <- paste0(raw_folder, "patients_datas")
file_list <- list.files(data_folder)
n_patients <- length(file_list)
if (n_patients > 0) {
test <- lapply(1:n_patients, FUN = function(i) {
temp <- readRDS(file = paste0(data_folder, "/", "patient_", i, ".rds"))
if (id == temp$id) stop("You must choose a unique id number")
})
}
# setup initial conditions depending on the disease_id
state_folder <- paste0(raw_folder, "model_engine")
if (disease_id == "php1") {
state <- read.csv(paste0(state_folder, "/init_php1.csv"), stringsAsFactors = FALSE)
state <- unlist(state[,-1])
} else if (disease_id == "hypopara") {
state <- read.csv(paste0(state_folder, "/init_hypopara.csv"), stringsAsFactors = FALSE)
state <- unlist(state[,-1])
} else if (disease_id == "hypoD3") {
state <- read.csv(paste0(state_folder, "/init_hypoD3.csv"), stringsAsFactors = FALSE)
state <- unlist(state[,-1])
} else if (disease_id == "hyperD3") {
state <- read.csv(paste0(state_folder, "/init_hyperD3.csv"), stringsAsFactors = FALSE)
state <- unlist(state[,-1])
}
# set up random patient image
patient_images_folder <- paste0(raw_folder, "patients_img")
if (gender == "male") {
patient_images_folder <- paste0(patient_images_folder, "/male")
} else {
patient_images_folder <- paste0(patient_images_folder, "/female")
}
random_image_number <- sample(1:12, 1)
patient_avatar <- paste0(
patient_images_folder,
"/",
list.files(patient_images_folder)[[random_image_number]]
)
patient_avatar <- unlist(str_split(string = patient_avatar, pattern = "www/"))[2]
# set up a rendom doctor image
doctor_images_folder <- paste0(raw_folder, "doctors_img")
doctors_avatars <- lapply(1:length(doctors), FUN = function(i){
if (doctors_gender[[i]] == "male") {
doctor_images_folder <- paste0(doctor_images_folder, "/male")
} else {
doctor_images_folder <- paste0(doctor_images_folder, "/female")
}
random_image_number <- sample(1:7, 1)
doctor_avatar <- paste0(
doctor_images_folder,
"/",
list.files(doctor_images_folder)[[random_image_number]]
)
doctor_avatar <- unlist(str_split(string = doctor_avatar, pattern = "www/"))[2]
})
# if the previous id test id passed, generated the patient
patient_data <- list(
id = id,
name = name,
picture = patient_avatar,
age = age,
height = paste0(height, " cm"),
weight = paste0(weight, " kg"),
medical_history = list(
pathologies = pathologies,
examination_dates = examination_dates,
doctors = doctors,
doctors_avatars = doctors_avatars,
disease_description = disease_description,
disease_image = disease_image
),
disease_id = disease_id,
initial_conditions = state
)
saveRDS(object = patient_data, file = paste0(raw_folder, "patients_datas/patient_", id, ".rds"))
}
patient_generator(
id = 1,
name = "Patient: John Doe",
age = "50 yrs",
height = 183,
weight = 72,
gender = "male",
disease_id = "hyperD3",
pathologies = list(
"Hyporeactivity, muscle aches, appetite loss, constipation, irritability",
"Preliminary blood and urine analyses",
"Mild left hydronephrosis"
),
examination_dates = list(rep("", 3)),
doctors = list(
"Initial consultation",
"Laboratory Findings",
"Renal sonography"
),
doctors_gender = list("male", "female", "male"),
disease_description = list(
"Mr. Doe presented with tiredness and hyporeactivity.
Over the past few weeks, he had experienced fatigue, excessive thirst,
muscle aches, loss of appetite, constipation and irritability. <br>
Mr. Doe did not have any relevant past clinical history and was
not taking any medication apart from over the counter supplements.
Basic physical parameters were normal: BMI (21.5 kg/m2), blood
pressure (120/70 mmHg) and heart rate (70 bpm).
",
NULL,
"Renal sonography revealed normal sized kidneys (right 10.cm, left 11cm),
no nephrocalcinosis, but a mild left hydronephrosis."
),
disease_image = list(
NULL,
"case_studies_img/patient1-1.svg",
NULL
)
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/patient_generator.R
|
patient_selector <- function() {
# read the datas folder and
# find the number of patients
data_folder <- paste0(getwd(), "/www/patients_datas")
file_list <- list.files(data_folder)
n_patients <- length(file_list)
# generate a random number
random_patient <- sample(1:n_patients, 1)
readRDS(file = paste0(data_folder, "/", "patient_", random_patient, ".rds"))
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/patient_selector.R
|
#-------------------------------------------------------------------------
# This application is a R-Shiny implementation of a calcium and phosphate
# homeostasis model. It aims at being used by medical students but also
# researchers. See https://divadnojnarg.github.io for more informations
#
# David Granjon, the Interface Group, Zurich
# June 12th, 2017
#-------------------------------------------------------------------------
server <- function(input, output, session) {
# enable fullscreen
callModule(module = fullScreen, id = "fullScreenTrigger")
#-------------------------------------------------------------------------
# useful datas: initialization. These data are not in global.R since
# they are some time reloaded by the program. In global.R they would not
# be reloaded, which would corrupt the new session
#
#-------------------------------------------------------------------------
# all students names for the session
students_names <- paste(rep("Jane Doe", 5), c(1:5))
# load all questions
questions <- generate_questions()
# load patient files
patient_datas <- patient_selector()
# Load state values based on files previously created for each case (php1, hypopara, hypoD3)
patient_state_0 <- patient_datas$initial_conditions
# patient disease
patient_disease <- patient_datas$disease_id
# game answers
if (patient_disease == "php1") {
answer <- c("primary hyperparathyroidism")
} else if (patient_disease == "hypopara") {
answer <- c("hypoparathyroidism")
} else if (patient_disease == "hypoD3") {
answer <- c("vitamin D3 deficiency")
} else if (patient_disease == "hyperD3") {
answer <- c("vitamin D3 intoxication")
}
# disease answer list for students
diseases_list <- c(
"nephrolithiasis",
"primary hyperparathroidism",
"vitamin D3 intoxication",
"hypoparathyroidism",
"ricket",
"oncogenic osteomalacia",
"FGF23 deficiency",
"vitamin D3 deficiency",
"nephrocalcinosis",
"depression",
"nonalcoholic fatty liver disease"
)
# below is needed to handle treatments events
treatment_choices <- c(
#"PTX",
#"D3_inject",
#"Ca_food",
#"Ca_inject",
#"P_food",
#"P_inject",
"D3_intake_reduction",
"cinacalcet",
"bisphosphonate",
"furosemide"
)
# plot summary list
summary_plot_names <- c(
"Ca_p",
"PO4_p",
"PTH_p",
"D3_p",
"FGF_p"
)
# initialization of the patient feedback observer
patient_feedback <- NULL
# # inititalization of the timer
# minutes_time <- 60 # the application will stop in 60 minutes
start_time <- Sys.time()
# end_time <- start_time + minutes_time * 60
# store the app url
app_url <- reactive({
paste0(
session$clientData$url_protocol, "//",
session$clientData$url_hostname, ":",
session$clientData$url_port
)
})
# store the current user folder
user_folder <- reactive({
paste0(
users_logs, "/",
input$user_name, "_", format(Sys.time(), "%Y-%m-%d_%H%M%S"))
})
#-------------------------------------------------------------------------
# Store times, state and parameters in reactive values that can
# react to user inputs
#
#-------------------------------------------------------------------------
# Basic reactive expressions needed by the solver
times <- reactive({
seq(0, ifelse(parameters()[["t_stop"]] != 0, parameters()[["t_stop"]], input$tmax), by = 1)
})
# initial conditions
states <- reactiveValues(
val = list(),
counter = 1,
name = "start_case"
)
# storing parameters event from the data frame to a reactive list
parameters_event <- reactive({
c(
# static event parameters
"PTX_coeff" = ifelse(isTRUE(events$PTX), 0, 1),
# dynamic event parameters
generate_event_parms(events$current)
)
})
# Create parameters sets for all diseases and treatments
parameters_disease <- reactive({
c("k_prod_PTHg" = ifelse(
patient_disease == "php1", 300*4.192,
ifelse(patient_disease == "hypopara", 0, 4.192)
),
"D3_inact" = ifelse(
patient_disease == "hypoD3", 0,
ifelse(patient_disease == "hyperD3", 5e-004, 2.5e-005)
)
)
})
# make a vector of disease related parameters,
# fixed_parameters and parameters related to events
parameters <- reactive({
c(parameters_disease(), parameters_fixed, parameters_event())
})
#-------------------------------------------------------------------------
# Render Patient boxes: patient_info,
# medical_history, timeline events as well
# as the graph and CaPO4 network box
#
#-------------------------------------------------------------------------
# patient info box
output$patient_info <- renderUI({
medical_history <- patient_datas$medical_history
len <- length(medical_history$pathologies)
bs4Card(
title = "Past Medical History",
footer = NULL,
status = "primary",
elevation = 4,
solidHeader = TRUE,
headerBorder = TRUE,
gradientColor = NULL,
width = 12,
height = NULL,
collapsible = TRUE,
collapsed = FALSE,
closable = FALSE,
labelStatus = "danger",
labelText = len,
labelTooltip = NULL,
dropdownMenu = NULL,
dropdownIcon = "wrench",
overflow = FALSE,
cardProfile(
src = patient_datas$picture,
title = patient_datas$name,
subtitle = NULL,
cardProfileItemList(
bordered = FALSE,
cardProfileItem(
title = "Age",
description = patient_datas$age
),
cardProfileItem(
title = "Height",
description = patient_datas$height
),
cardProfileItem(
title = "Weight",
description = patient_datas$weight
)
)
),
br(),
lapply(1:len, FUN = function(i){
userPost(
id = i,
collapsed = FALSE,
src = medical_history$doctors_avatars[[i]],
author = medical_history$doctors[[i]],
description = strong(medical_history$pathologies[[i]]),
HTML(paste(medical_history$disease_description[[i]])),
if (!is.null(medical_history$disease_image[[i]])) {
userPostMedia(src = medical_history$disease_image[[i]])
}
)
})
)
})
# the user notebook
output$user_notebook <- renderUI({
if (events$logged) {
comments <- comments$history
len <- nrow(comments)
bs4SocialCard(
closable = FALSE,
width = 12,
title = paste0(input$user_name, "'s notebook"),
subtitle = start_time,
src = "https://image.flaticon.com/icons/svg/305/305983.svg",
if (events$animation >= 8) {
tagList(
column(
width = 12,
align = "center",
actionBttn(
inputId = "diagnosis",
size = "lg",
label = "Diagnosis",
style = "fill",
color = "primary",
icon = icon("search")
)
),
br()
)
},
if (events$animation < 8) {
tagList(
textAreaInput(
inputId = "user_comment",
label = questions[[events$animation + 1]],
value = "I enter here all my observations!"
),
column(
width = 12,
align = "center",
actionBttn(
inputId = "user_add_comment",
size = "sm",
icon = "Next",
style = "fill",
color = "success"
)
)
)
},
comments = if (len > 0) {
tagList(
lapply(1:len, FUN = function(i) {
cardComment(
src = "https://image.flaticon.com/icons/svg/305/305983.svg",
title = questions[[i]],
date = comments$date[[i]],
comments$description[[i]]
)
})
)
} else {
NULL
},
footer = NULL
)
}
})
# Event to be added in the timeLine
output$recent_events <- renderUI({
if (events$logged) {
if (events$animation_started) {
len <- nrow(events$history)
name <- events$history$event
start_time <- events$history$real_time
rate <- events$history$rate
plasma_values <- plasma_analysis$history
withMathJax(
bs4Card(
title = "Recent Events",
footer = NULL,
status = "primary",
elevation = 4,
solidHeader = TRUE,
headerBorder = TRUE,
gradientColor = NULL,
width = 12,
height = NULL,
collapsible = TRUE,
collapsed = FALSE,
closable = FALSE,
labelStatus = "danger",
labelText = len,
labelTooltip = NULL,
dropdownMenu = NULL,
dropdownIcon = "wrench",
overflow = TRUE,
# treatments input are
# in the event box
if (!is.null(events$answered)) {
tagList(
prettyCheckboxGroup(
inputId = "treatment_selected",
label = "Select a new treatment:",
choices = c(
#"parathyroid surgery" = "PTX",
#"D3 iv injection" = "D3_inject",
#"Ca supplementation" = "Ca_food",
#"Ca iv injection" = "Ca_inject",
#"Pi iv injection" = "P_inject",
#"Pi supplementation" = "P_food",
"D3 intake reduction" = "D3_intake_reduction",
"Cinacalcet" = "cinacalcet",
"Bisphosphonate" = "bisphosphonate",
"Furosemide" = "furosemide"
),
thick = TRUE,
inline = TRUE,
animation = "pulse"
),
uiOutput(outputId = "sliderInject"),
hr()
)
},
if (len > 0) {
items <- lapply(1:len, FUN = function(i){
item <- tagAppendAttributes(
bs4TimelineItem(
title = name[[i]],
icon = "medkit",
status = "orange",
time = bs4Badge(
position = "left",
rounded = FALSE,
status = "warning",
start_time[[i]]
),
bs4TimelineItemMedia(
src = if (name[[i]] %in% c("D3_inject", "Ca_inject", "P_inject")) {
"treatments_img/syringe.svg"
} else if (name[[i]] %in% c("Ca_food", "P_food", "D3_intake_reduction")) {
"treatments_img/medicine.svg"
} else if (name[[i]] == "PTX") {
"treatments_img/surgery.svg"
} else if (name[[i]] %in% c("cinacalcet", "furosemide", "bisphosphonate")) {
"treatments_img/pills.svg"
} else if (name[[i]] == "plasma analysis") {
"treatments_img/test-tube.svg"
},
width = "40",
height = "40"
),
# in case of plasma analysis, display the results next to the logo
if (name[[i]] == "plasma analysis") {
tagList(
paste0("$$[Ca^{2+}_p] = ", round(plasma_values[i, 'Ca_p'], 2), " mM [1.1-1.4 mM]$$"),
paste0("$$[P_i] = ", round(plasma_values[i, "PO4_p"], 2), " mM [0.8-1.6 mM]$$"),
paste0("$$[PTH_p] = ", round(plasma_values[i, "PTH_p"] * 100) * 1.33, " pM [3-16 pM]$$"),
# scale D3
paste0("$$[1,25D3_p] = ", round(plasma_values[i, "D3_p"]) / 4, " pM [36-150 pM]$$"),
# scale FGF23
paste0("$$[FGF23_p] = ", round(plasma_values[i, "FGF_p"] / 25, 2), " pM [0.3-2.1 pM]$$")
)
},
footer = NULL
#if (!is.null(name[[i]])) {
# if (name[[i]] != "PTX")
# if (!(name[[i]] %in% c("PTX", "plasma analysis"))) {
# dashboardLabel(status = "danger", rate[[i]])
# }
# else NULL
#}
),
align = "middle"
)
item$children[[2]]$children[[3]] <- tagAppendAttributes(
item$children[[2]]$children[[3]],
style = "overflow-x: auto;"
)
item
})
bs4Timeline(
width = 12,
style = "height: 400px;",
bs4TimelineStart(status = "danger"),
br(),
items,
br(),
bs4TimelineEnd(status = "gray")
)
}
)
)
}
}
})
# graph box
output$graphs_box <- renderUI({
if (events$logged) {
if (events$animation_started) {
bs4Card(
width = 12,
elevation = 4,
#title = "Click on the plasma node to display concentrations",
solidHeader = TRUE,
status = "primary",
collapsible = TRUE,
closable = FALSE,
withSpinner(
plotlyOutput(
"plot_node",
height = "300px",
width = "100%"
),
size = 2,
type = 8,
color = "#000000"
)
)
}
}
})
# network box
output$network_box <- renderUI({
if (events$logged) {
if (events$animation_started) {
cardTag <- bs4Card(
title = tagList(
actionBttn(
inputId = "run",
size = "lg",
label = "Run",
style = "fill",
color = "primary",
icon = icon("play")
),
actionBttn(
inputId = "summary",
size = "lg",
label = "Summary",
style = "fill",
color = "royal",
icon = icon("tv")
)
),
solidHeader = TRUE,
collapsible = TRUE,
status = "primary",
width = 12,
closable = FALSE,
#enable_sidebar = TRUE,
#sidebar_width = 50,
#sidebar_background = "#888888",
#sidebar_start_open = FALSE,
#sidebar_content = tagList(
# getting_started()
#),
div(
id = "network_cap",
withSpinner(
visNetworkOutput(
"network_Ca",
height = if (input$isMobile) "450px" else "900px"
),
size = 2,
type = 8,
color = "#000000"
)
),
footer = NULL
)
cardTag[[2]]$children[[1]]$children[[2]] <- tagAppendAttributes(
cardTag[[2]]$children[[1]]$children[[2]],
class = "p-0"
)
cardTag
}
}
})
# wrap the whole UI
output$patient_ui <- renderUI({
fluidRow(
# left colum
column(
width = if (events$animation_started) 3 else 6,
style = 'padding:0px;',
# profile box
uiOutput("patient_info"),
# user notebook
uiOutput("user_notebook")
),
# patient operation table
column(
width = 6,
style = 'padding:0px;',
uiOutput("network_box")
),
# event/results column
column(
width = 3,
style = 'padding:0px;',
# results box
uiOutput("graphs_box"),
# timeline event box
uiOutput("recent_events")
)
)
})
#-------------------------------------------------------------------------
# Javascript alerts: to give instructions to users, handle when the
# game ends
#
#-------------------------------------------------------------------------
# time <- reactiveValues(switcher = FALSE)
#
# # set up a timer during which user have to finish the game
# # and generate the related progress bar
# countdown <- reactive({
# invalidateLater(1000, session)
# countdown <- end_time - Sys.time()
# })
#
# # switch between minutes and seconds when coutdown < 1 minute
# observe({
# if (countdown()<= 1.02) {
# time$switcher <- TRUE
# }
# })
#
# # convert in percentage for the progress bar
# percent_countdown <- reactive({
# countdown <- countdown()
# if (!time$switcher) {
# countdown / minutes_time * 100
# } else {
# countdown / 60 * 100
# }
# })
#
# # render the progress bar for countdown
# output$currentTime <- renderUI({
# if (!events$stop) {
# countdown <- countdown()
# percent_countdown <- percent_countdown()
# statusClass <- if (!time$switcher) {
# if (66 < percent_countdown & percent_countdown <= 100) {
# "success"
# } else if (30 < percent_countdown & percent_countdown <= 66) {
# "warning"
# } else {
# "danger"
# }
# } else {
# "danger"
# }
# progressBar(
# id = "countdown",
# value = percent_countdown,
# status = statusClass,
# striped = TRUE,
# size = "xs",
# title = paste0("End in ", round(countdown), if (!time$switcher) " min" else " sec")
# )
# }
# })
# When the counter is equal to 0, each time the session is opened,
# show the how to start sweetAlert
# I set up a delay of 5 seconds so that the alert is not displayed before
# the page is fully loaded (in case we use preloaders in the dashboardPagePlus
# the preloader lasts around 3s...)
observe({
if (!events$logged) {
shinyjs::delay(
5000,
confirmSweetAlert(
session,
inputId = "register_user",
title = "How to start?",
text = tagList(
img(src = "interface_img/start.svg", width = "100px", height = "100px"),
br(),
HTML(
"You will be presented with a patient case-study related
to CaPO4 homeostasis. The goal of this activity is to
<b>establish</b> a diagnosis and <b>treat</b>
the patient correctly:
<ol>
<li> To establish your diagnostic, you can click on any compartment e.g.
click on plasma to conduct blood plasma analyses. </li>
<li> After having established an initial diagnostic you will be
offered multiple treatment options. </li>
</ol>"
),
hr(),
column(
align = "center",
width = 12,
selectInput(
inputId = "user_name",
label = "Your name:",
choices = students_names,
selected = NULL,
multiple = FALSE,
selectize = TRUE,
width = NULL,
size = NULL
)
)
),
btn_labels = c(NA, "Confirm"),
type = "warning",
html = TRUE
)
)
}
})
# disable the confirm button if the user name is missing
observe({
if (!is.null(input$user_name)) {
shinyjs::toggleState(
selector = "button.swal-button.swal-button--confirm",
condition = input$user_name != ""
)
}
})
# when the user is registered, set logged to TRUE
observeEvent(input$register_user,{
if (input$user_name != "") {
events$logged <- TRUE
}
})
# # shift stop when countdown is 0
# observe({
# if (countdown() <= 0)
# events$stop <- TRUE
# })
#
# # When the timer is 0 the game is over if the user has no diagnosis
# # and treatment
# observe({
# if (is.null(input$close_app)) {
# if (events$stop) {
# confirmSweetAlert(
# inputId = "close_app",
# danger_mode = TRUE,
# session,
# title = "This is the end!",
# text = tagList(
# img(src = "interface_img/finish.svg", width = "100px", height = "100px"),
# br(),
# HTML(
# paste(
# "It seems that the game is finished.
# You can restart or close the game."
# )
# )
# ),
# btn_labels = c("Restart", "Stop"),
# type = "error",
# html = TRUE
# )
# }
# }
# })
#
# # Handle what happens when the user close or restart the app
# observeEvent(input$close_app, {
# if (input$close_app) {
# sendSweetAlert(
# session,
# title = "Stop in 5 seconds...",
# type = "error"
# )
# shinyjs::delay(5000, {
# js$closeWindow()
# stopApp()
# })
# } else {
# session$reload()
# }
# })
# init the directory where user datas will be saved
observeEvent(input$register_user, {
req(input$register_user)
# create the new folder
dir.create(user_folder())
})
# # give the user the opportunity to load a previous session
# observeEvent(input$register_user, {
# user_folder <- paste0(getwd(), "/www/users_datas/")
# file_list <- as.vector(list.files(user_folder))
#
# confirmSweetAlert(
# session,
# danger_mode = TRUE,
# inputId = "load_previous_session",
# title = "Want to load an older session?",
# text = tagList(
# column(
# width = 12,
# align = "center",
# prettyRadioButtons(
# inputId = "old_session",
# label = "Choose a saved session:",
# choices = file_list,
# animation = "pulse",
# status = "info"
# )
# )
# ),
# btn_labels = c("Cancel", "Load"),
# type = "warning",
# html = TRUE
# )
# })
#
# # load the previous session
# observeEvent(input$load_previous_session, {
# if (input$load_previous_session) {
# user_folder <- paste0(getwd(), "/www/users_datas/")
# temp_folder <- paste0(user_folder, input$old_session)
# file_list <- list.files(temp_folder)
# lapply(1:length(file_list), FUN = function(i) {
# print(paste0(temp_folder, "/", file_list[[i]]))
# readRDS(file = paste0(temp_folder, "/", file_list[[i]]))
# })
#
# # replace start_time by the value of when the folder was first created
# start_time <- unlist(str_split(input$old_session, "-", n = 2))[[2]]
# }
# })
# handle case when the use press the diagnosis button
observeEvent(input$diagnosis, {
confirmSweetAlert(
session,
inputId = "diagnosis_answer",
title = "What is the disease of this patient?",
btn_labels = c("Send"),
type = "warning",
text = tagList(
column(
align = "center",
width = 12,
selectInput(
inputId = "disease_name",
label = "",
choices = diseases_list,
selected = NULL,
multiple = FALSE,
selectize = TRUE,
width = NULL,
size = NULL
)
)
),
html = TRUE
)
})
# treat the diagnosis answer
observeEvent(input$diagnosis_answer, {
user_answer <- input$disease_name
if (user_answer != "") {
test <- str_detect(answer, regex(paste0("\\b", user_answer, "\\b"), ignore_case = TRUE))
if (test) {
events$answered <- TRUE
sendSweetAlert(
session,
title = paste0("Congratulations ", input$user_name, " !"),
text = HTML(
paste0(
"This patient has,", answer,
"It would be better to treat him now. Remember you have
<b>15 minutes</b> to complete this activity."
)
),
type = "success",
html = TRUE
)
} else {
events$answered <- FALSE
sendSweetAlert(
session,
title = "Wasted!",
text = paste0(input$user_name, ", it seems that your answer is wrong!"),
type = "error"
)
}
# save the answer status
saveRDS(
object = c(events$answered, user_answer),
file = paste0(user_folder(), "/user_answer.rds")
)
} else {
sendSweetAlert(
session,
title = "Missing diagnosis!",
text = paste0(input$user_name, ", it seems that your answer is empty!"),
type = "error"
)
}
})
# prevent the user from resubmitting an answer if he correctly guessed
# the patient disease
observe({
if (!is.null(events$answered)) {
if (events$answered) {
shinyjs::disable("diagnosis")
}
}
})
# a label to indicate the user whether the diagnosis is ok or not
# in the header
output$user_game_status <- renderUI({
game_status <- if (!is.null(events$answered)) {
if (events$answered) "success" else "danger"
} else {
"warning"
}
game_text <- if (!is.null(events$answered)) {
if (events$answered)
paste0(input$disease_name, ": successful diagnosis")
else paste0(input$disease_name, ": unsuccessful diagnosis")
} else {
"No diagnosis yet"
}
div(
style = "margin-top: 7.5px; margin-left: 10px;",
class = "diagnosis-badge",
bs4Badge(
game_text,
status = game_status,
rounded = TRUE,
position = "left"
)
)
})
# Give users the opportunity to save data
output$download_logs <- downloadHandler(
filename = function() paste0(input$user_name, "_logs.rds"),
content = function(file) {
saveRDS(
list(
my_events = events$history,
my_comments = comments$history,
my_answer = c(events$answered, input$disease_name)
),
file
)
}
)
# clean all empty folders when the application starts
observe({
dir_list <- list.dirs(users_logs)
if (length(dir_list) > 1) {
lapply(2:length(dir_list), FUN = function(i) {
temp_dir <- dir_list[[i]]
temp_file_list <- list.files(temp_dir)
if (length(temp_file_list) == 0) unlink(x = temp_dir, recursive = TRUE)
})
}
})
#-------------------------------------------------------------------------
# Calcium/PTH/D3/FGF3 feedback: give the user some feedback
# regarding the current state of the app
#
#-------------------------------------------------------------------------
# how to use the notebook
observe({
if (!is_empty(input$register_user)) {
shinyjs::delay(
1000,
confirmSweetAlert(
session,
inputId = "diagnosis_intro",
title = "How to use the notebook?",
text = tagList(
img(src = "interface_img/notebook.svg", width = "100px", height = "100px"),
br(),
HTML("A serie of questions will help you during
the diagnostic process. Click on <img src='interface_img/next.svg' height='50' width='50'>
to go through the questions. Once you completed all questions,
submit your diagnosis by clicking on
<img src='interface_img/diagnosis.svg' height='70' width='70'>.")
),
btn_labels = c(NULL, "Ok"),
type = "warning",
html = TRUE
)
)
}
})
# Introduction to plasma analysis
observeEvent(input$user_add_comment, {
if (events$animation == 3) {
confirmSweetAlert(
session,
inputId = "plasma_analysis_intro",
title = "How to deal with plasma analysis?",
text = tagList(
img(src = "CaPO4_network/plasma.svg", width = "100px", height = "100px"),
br(),
"You can access any plasma concentration by clicking on the",
img(src = "CaPO4_network/plasma.svg", width = "50px", height = "50px"),
" node. Besides, other compartments are available such as",
img(src = "CaPO4_network/parathyroid_gland_human.svg", width = "50px", height = "50px"),
img(src = "CaPO4_network/cells.svg", width = "50px", height = "50px"),
img(src = "CaPO4_network/bone.svg", width = "50px", height = "50px"),
"and", img(src = "CaPO4_network/rapid-bone.svg", width = "50px", height = "50px")
),
btn_labels = c(NULL, "Ok"),
type = "warning",
html = TRUE
)
}
})
# Introduction to treatments
observeEvent(input$diagnosis_answer, {
if (events$animation == 8) {
shinyjs::delay(
1000,
confirmSweetAlert(
session,
inputId = "treatments_intro",
title = "How to deal with treatments?",
text = tagList(
img(src = "treatments_img/pills.svg", width = "100px", height = "100px"),
br(),
column(
width = 12,
align = "center",
HTML(
"Now that you have posed your initial diagnostic, you may explore different treatment options.
For each:
<ol>
<li> Select the treatment in the timeline </li>
<li> Specify dosage and duration (if relevant) </li>
<li> Click on <img src='interface_img/add_treatment.svg' height='50' width='50'>
to add the treatment</li>
<li> Click on <img src='interface_img/run.svg' height='50' width='50'></li>
<li> You may visualize changes due to your last intervention in the top right panel </li>
<li> To visualize the entire simulation history, click on
<img src='interface_img/summary.svg' height='70' width='70'></li>
</ol>
You can perform several treatments. Note that interventions cannot
be erased from the timeline (i.e. you cannot go back in time).
But you can always start over and explore a different approach.
"
)
)
),
btn_labels = c(NULL, "Ok"),
type = "warning",
html = TRUE
)
)
# increament by 1 to prevent this alert
# from being displayed each time since
# the button is hidden when equal to 8
events$animation <- events$animation + 1
}
})
# increase the animation counter by 1 each time a new comment
# is added by the user
observeEvent(input$user_add_comment, {
events$animation <- events$animation + 1
})
# say that the animation is started when the user has clicked on next
observeEvent(events$animation , {
if (events$animation == 1) {
events$animation_started <- TRUE
}
})
# # warn the user when Calcium, PTH, vitamin D3 are above their physiological ranges
# observe({
# out <- out()
# # event only triggered if the user is logged in
# if (events$logged) {
#
# # Calcium conditions
# Cap_range <- (out[, "Ca_p"] > 1.1 && out[, "Ca_p"] < 1.3)
# # Pi conditions
# PO4p_range <- (out[, "PO4_p"] > 0.8 && out[, "PO4_p"] < 1.5)
# # PTH conditions
# PTHp_range <- (out[, "PTH_p"] > 8 && out[, "PTH_p"] < 51)
# # D3 conditions
# D3p_range <- (out[, "D3_p"] > 80 && out[, "D3_p"] < 700)
# # FGF23 conditions
# FGFp_range <- (out[, "FGF_p"] > 12 && out[, "FGF_p"] < 21)
#
# if (!Cap_range) {
# patient_feedback <- paste0(
# patient_feedback, p(" [Ca2+]p is out of bounds", class = "text-danger")
# )
# }
# if (!PO4p_range) {
# patient_feedback <- paste0(
# patient_feedback, p(" [Pi]p is out of bounds", class = "text-danger")
# )
# }
# if (!PTHp_range) {
# patient_feedback <- paste0(
# patient_feedback, p(" [PTH]p is out of bounds", class = "text-danger")
# )
# }
# if (!D3p_range) {
# patient_feedback <- paste0(
# patient_feedback, p(" [D3]p is out of bounds", class = "text-danger")
# )
# }
# if (!FGFp_range) {
# patient_feedback <- paste0(
# patient_feedback, p(" [FGF23]p is out of bounds", class = "text-danger")
# )
# }
#
# # send the alert message with all feedbacks
# sendSweetAlert(
# session,
# title = paste0("Oups ", input$user_name, " !"),
# text = HTML(paste0(
# "It seems that: ", patient_feedback,
# "You should do something!")
# ),
# type = "warning",
# html = TRUE
# )
# }
# })
# output$current_calcium <- renderUI({
# Ca_p <- round(out()[, "Ca_p"], 2)
# if (Ca_p > 1.1 && Ca_p < 1.3) {
# p(Ca_p)
# } else if (Ca_p < 1.1) {
# p(class = "text-danger", paste0("$$[Ca]$$"))
# } else {
# p(class = "text-success", Ca_p)
# }
# })
#-------------------------------------------------------------------------
# sidebar User panel: print name and date
#
#-------------------------------------------------------------------------
output$user_panel <- renderUI({
# use invalidate later to simulate a clock
invalidateLater(1000)
bs4SidebarUserPanel(
text = tags$small(paste(input$user_name, Sys.time())),
img = "https://image.flaticon.com/icons/svg/305/305983.svg"
)
})
#-------------------------------------------------------------------------
# Handle user comments
#
#-------------------------------------------------------------------------
# create the comment dataframe to store all comments
comments <- reactiveValues(
history = data.frame(
description = NULL,
date = NULL,
stringsAsFactors = FALSE
)
)
# each time the user add a new comment, add it to the table
observeEvent(input$user_add_comment, {
if (!is.null(input$user_comment)) {
temp_comment <- data.frame(
description = input$user_comment,
date = Sys.time(),
stringsAsFactors = FALSE
)
comments$history <- rbind(comments$history, temp_comment)
}
})
#-------------------------------------------------------------------------
# This part handle events, plasma analysis, triggered by the user
# as well as the export function to save the timeline Event
#
#-------------------------------------------------------------------------
# Set events parameters in reactiveValues so as to modify them later
# history stores all events whereas current correspond to the last called
# event in the stack
events <- reactiveValues(
history = data.frame(
id = NULL,
real_time = NULL,
event = NULL,
rate = NULL,
start_time = NULL,
stop_time = NULL,
status = NULL,
stringsAsFactors = FALSE
),
current = data.frame(
id = NULL,
real_time = NULL,
event = NULL,
rate = NULL,
start_time = NULL,
stop_time = NULL,
status = NULL,
stringsAsFactors = FALSE
),
counter = 1,
stop = FALSE,
answered = NULL,
PTX = FALSE,
logged = FALSE,
animation = 0,
animation_started = FALSE
)
# handle plasma analysis history
plasma_analysis <- reactiveValues(history = data.frame(stringsAsFactors = FALSE))
observeEvent(input$current_node_id, {
node_id <- input$current_node_id
if (node_id == 2) {
temp_plasma_analysis <- out()[nrow(out()), -1]
plasma_analysis$history <- rbind(plasma_analysis$history, temp_plasma_analysis)
}
})
observeEvent(input$add_treatment, {
if (!is.null(input$add_treatment)) {
# prevent plasma analysis from being done when PTX was already
# performed before
if (input$treatment_selected == "PTX" && isTRUE(events$PTX)) {
NULL
} else {
temp_plasma_analysis <- out()[nrow(out()), -1]
plasma_analysis$history <- rbind(plasma_analysis$history, temp_plasma_analysis)
}
}
})
# generate the slider corresponding to a given treatment
output$sliderInject <- renderUI({
req(input$treatment_selected)
generate_slider_events(input)
})
# plasma analysis events
observeEvent(input$current_node_id, {
node_id <- input$current_node_id
if (node_id == 2) {
if (nrow(events$history) == 0) {
temp_event <- data.frame(
id = events$counter,
real_time = Sys.time(),
event = "plasma analysis",
rate = "undefined",
start_time = "undefined",
stop_time = "undefined",
status = "active",
stringsAsFactors = FALSE
)
} else {
temp_event <- data.frame(
id = events$counter,
real_time = if (events$history[nrow(events$history), "event"] == "PTX" ||
events$history[nrow(events$history), "event"] == "plasma analysis") {
events$history[nrow(events$history), "real_time"]
# need to wait before the end of the previous event
} else {
# calculate the time difference between the previous event
# end and when the user press the add event button
dt <- difftime(
time1 = Sys.time(),
time2 = events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"]),
units = c("mins"),
tz = Sys.timezone(location = TRUE)
)
# if the user press before the previous event is finished
# we consider that the next event happens just after
if (dt <= 0) {
events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"])
# otherwise, we consider the elapsed time plus the time
# that takes the event (t_stop)
} else {
Sys.time()
}
},
event = "plasma analysis",
rate = "undefined",
start_time = "undefined",
stop_time = "undefined",
status = "active",
stringsAsFactors = FALSE
)
}
events$history <- rbind(events$history, temp_event)
events$counter <- events$counter + 1
}
})
# Add treatments to the event list
observeEvent(input$add_treatment, {
# the same treatment can be added
# multiple times. However, parathyroidectomy
# cannot be performed more than once
if (input$treatment_selected != "PTX") {
if (nrow(events$history) == 0) {
temp_event <- data.frame(
id = events$counter,
real_time = Sys.time(),
event = input$treatment_selected,
rate = if (!(input$treatment_selected %in%
c("bisphosphonate", "furosemide", "cinacalcet"))) {
input[[paste(input$treatment_selected)]]
} else {
"undefined"
},
start_time = 0,
stop_time = input$t_stop,
status = "active",
stringsAsFactors = FALSE
)
} else {
temp_event <- data.frame(
id = events$counter,
# if PTX was performed before, we do not need to wait
real_time = if (events$history[nrow(events$history), "event"] == "PTX" ||
events$history[nrow(events$history), "event"] == "plasma analysis") {
events$history[nrow(events$history), "real_time"]
# need to wait before the end of the previous event
} else {
# calculate the time difference between the previous event
# end and when the user press the add event button
dt <- difftime(
time1 = Sys.time(),
time2 = events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"]),
units = c("mins"),
tz = Sys.timezone(location = TRUE)
)
# if the user press before the previous event is finished
# we consider that the next event happens just after
if (dt <= 0) {
events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"]) +
input$t_stop
# otherwise, we consider the elapsed time plus the time
# that takes the event (t_stop)
} else {
Sys.time() + input$t_stop
}
},
event = input$treatment_selected,
rate = if (!(input$treatment_selected %in%
c("bisphosphonate", "furosemide", "cinacalcet"))) {
input[[paste(input$treatment_selected)]]
} else {
"undefined"
},
start_time = 0,
stop_time = input$t_stop,
status = "active",
stringsAsFactors = FALSE
)
}
events$history <- rbind(events$history, temp_event)
events$counter <- events$counter + 1
events$current <- temp_event
} else {
if (!isTRUE(events$PTX)) {
if (nrow(events$history) == 0) {
temp_event <- data.frame(
id = events$counter,
real_time = Sys.time(),
event = input$treatment_selected,
rate = "undefined",
start_time = "undefined",
stop_time = "undefined",
status = "active",
stringsAsFactors = FALSE
)
} else {
temp_event <- data.frame(
id = events$counter,
# if PTX was performed before, we do not need to wait
real_time = if (events$history[nrow(events$history), "event"] == "plasma analysis") {
events$history[nrow(events$history), "real_time"]
# need to wait before the end of the previous event
} else {
# calculate the time difference between the previous event
# end and when the user press the add event button
dt <- difftime(
time1 = Sys.time(),
time2 = events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"]),
units = c("mins"),
tz = Sys.timezone(location = TRUE)
)
# if the user press before the previous event is finished
# we consider that the next event happens just after
if (dt < 0) {
events$history[nrow(events$history), "real_time"] +
as.numeric(events$history[nrow(events$history), "stop_time"])
# otherwise, we consider the elapsed time plus the time
# that takes the event (t_stop)
} else {
Sys.time()
}
},
event = input$treatment_selected,
rate = "undefined",
start_time = "undefined",
stop_time = "undefined",
status = "active",
stringsAsFactors = FALSE
)
}
events$history <- rbind(events$history, temp_event)
events$counter <- events$counter + 1
events$PTX <- TRUE
} else {
showNotification(
"Cannot perform parathyroidectomy more than once!",
type = "error",
closeButton = TRUE
)
}
}
})
# flush the stack of current events
# 5 seconds after the user click on run
observeEvent(input$run, {
shinyjs::delay(1000, {
events$current <- data.frame(
id = NULL,
real_time = NULL,
event = NULL,
rate = NULL,
start_time = NULL,
stop_time = NULL,
status = NULL,
stringsAsFactors = FALSE
)
})
})
#-------------------------------------------------------------------------
#
# Integrate equations using deSolve package to generate table
# out is a reactive intermediate component that is called by
# to make plots or other stuffs. We used the compiled version of
# the code, to make computations faster
#
#-------------------------------------------------------------------------
# will be used the save all out elements
out_history <- reactiveValues(
item = list(),
counter = 0,
summary = data.frame()
)
out <- reactive({
input$run
isolate({
parameters <- parameters()
times <- times()
# always solve from the last state
as.data.frame(
ode(
# when opening the application, y will be state_0 since states$val
# is an empty list. However, for the next runs, states$val is
# populated with the last simulated final state and so on
# each time the user press run
y = if (is_empty(states$val)) {
patient_state_0
} else {
states$val[[length(states$val)]]
},
times = times,
func = "derivs",
parms = parameters,
dllname = "compiled_core",
initfunc = "initmod",
nout = 33,
outnames = c(
"U_Ca", "U_PO4", "Abs_int_Ca",
"Abs_int_PO4", "Res_Ca", "Res_PO4",
"Ac_Ca", "Ac_PO4", "Reabs_Ca", "Reabs_PO4",
"Ca_pf", "Ca_fp", "PO4_pf", "PO4_fp",
"PO4_pc", "PO4_cp", "PTHg_synth",
"PTHg_synth_D3", "PTHg_synth_PO4",
"PTHg_exo_CaSR", "PTHg_deg", "PTHg_exo",
"PTHp_deg", "Reabs_PT_PTH",
"Reabs_TAL_CaSR", "Reabs_TAL_PTH",
"Reabs_DCT_PTH", "Reabs_DCT_D3",
"Abs_int_D3", "Res_PTH", "Res_D3",
"Reabs_PT_PO4_PTH", "Reabs_PT_PO4_FGF"
)
)
)
})
})
# update initial conditions to the last state of the system each time an event
# has occured. Need to delayed by the time needed for computation before updating
# which is not really obvious since we don't know exactly what time it will take.
observe({
input$run
shinyjs::delay(1000, {
out <- out()
temp_state <- c(
"PTH_g" = out[nrow(out),"PTH_g"],
"PTH_p" = out[nrow(out),"PTH_p"],
"D3_p" = out[nrow(out),"D3_p"],
"FGF_p" = out[nrow(out),"FGF_p"],
"Ca_p" = out[nrow(out),"Ca_p"],
"Ca_f" = out[nrow(out),"Ca_f"],
"Ca_b" = out[nrow(out),"Ca_b"],
"PO4_p" = out[nrow(out),"PO4_p"],
"PO4_f" = out[nrow(out),"PO4_f"],
"PO4_b" = out[nrow(out),"PO4_b"],
"PO4_c" = out[nrow(out),"PO4_c"],
"CaHPO4_p" = out[nrow(out),"CaHPO4_p"],
"CaH2PO4_p" = out[nrow(out),"CaH2PO4_p"],
"CPP_p" = out[nrow(out),"CPP_p"],
"CaHPO4_f" = out[nrow(out),"CaHPO4_f"],
"CaH2PO4_f" = out[nrow(out),"CaH2PO4_f"],
"CaProt_p" = out[nrow(out),"CaProt_p"],
"NaPO4_p" = out[nrow(out),"NaPO4_p"],
"Ca_tot" = out[nrow(out),"Ca_tot"],
"PO4_tot" = out[nrow(out),"PO4_tot"],
"EGTA_p" = out[nrow(out),"EGTA_p"],
"CaEGTA_p" = out[nrow(out),"CaEGTA_p"]
)
states$counter <- states$counter + 1
states$val[[states$counter]] <- temp_state
states$name <- input$treatment_selected
})
})
# when the user clicks on summary rerun the simulation with all events
observeEvent(input$summary, {
showModal(
modalDialog(
title = fluidRow(
column(
width = 9,
align = "left",
p(style = "text-align: center;", "Overview of your patient")
),
column(
width = 3,
align = "right",
tags$button(
type = "button",
class = "btn btn-default float-right",
`data-dismiss` = "modal",
icon("close"),
"Dismiss"
)
)
),
fluidRow(
column(
width = 12,
align = "center",
bs4TabSetPanel(
id = "tabset1",
side = "left", # generate the 5 plots
.list = lapply(1:length(summary_plot_names), FUN = function(i) {
name <- summary_plot_names[[i]]
bs4TabPanel(
tabName = name,
active = if (i == 1) TRUE else FALSE,
withSpinner(
plotlyOutput(paste0("plot_summary_", name)),
size = 2,
type = 8,
color = "#000000"
)
)
})
)
)
),
size = "m",
footer = NULL
)
)
})
# out_summary <- eventReactive(input$summary, {
# if (nrow(events$history) >= 2) {
# times <- as.list(events$history[, "real_time"])
# len <- length(times)
# delta_t <- lapply(2:len, FUN = function(i) {
# difftime(
# time1 = times[[i]],
# time2 = times[[i - 1]],
# units = c("secs"),
# tz = Sys.timezone(location = TRUE)
# )
# })
#
#
#
# }
#
# })
# cumulative datas
datas_summary <- reactive({
datas <- out_history$summary %>%
filter(time %% 50 == 0) %>%
accumulate_by(~time)
# add bounds for each variable
low_norm_Ca_p <- data.frame(low_norm_Ca_p = rep(1.1, length(datas[, "time"])))
high_norm_Ca_p <- data.frame(high_norm_Ca_p = rep(1.3, length(datas[, "time"])))
low_norm_PO4_p <- data.frame(low_norm_PO4_p = rep(0.8, length(datas[, "time"])))
high_norm_PO4_p <- data.frame(high_norm_PO4_p = rep(1.5, length(datas[, "time"])))
low_norm_PTH_p <- data.frame(low_norm_PTH_p = rep(1.5, length(datas[, "time"])))
high_norm_PTH_p <- data.frame(high_norm_PTH_p = rep(7, length(datas[, "time"])))
low_norm_D3_p <- data.frame(low_norm_D3_p = rep(50, length(datas[, "time"])))
high_norm_D3_p <- data.frame(high_norm_D3_p = rep(180, length(datas[, "time"])))
low_norm_FGF_p <- data.frame(low_norm_FGF_p = rep(8, length(datas[, "time"])))
high_norm_FGF_p <- data.frame(high_norm_FGF_p = rep(51, length(datas[, "time"])))
# bind all values
datas <- cbind(
datas,
low_norm_Ca_p,
high_norm_Ca_p,
low_norm_PO4_p,
high_norm_PO4_p,
low_norm_PTH_p,
high_norm_PTH_p,
low_norm_D3_p,
high_norm_D3_p,
low_norm_FGF_p,
high_norm_FGF_p
)
})
# cumulative plot (5 plots)
lapply(1:length(summary_plot_names), FUN = function(i) {
name <- summary_plot_names[[i]]
output[[paste0("plot_summary_", name)]] <- renderPlotly({
if (nrow(out_history$summary) >= 1) {
plot_ly(
datas_summary(),
x = datas_summary()[, "time"],
y = if (name == "PTH_p") {
datas_summary()[, name] * 100
} else if (name == "D3_p") {
datas_summary()[, name] / 4
} else if (name == "FGF_p") {
datas_summary()[, name] / 16.8 * 32
} else {
datas_summary()[, name]
},
name = if (name %in% c("Ca_p", "PO4_p")) {
paste0(name, " (mM)")
} else if (name == "FGF_p") {
paste0(name, " (pg/mL)")
} else {
paste0(name, " (pM)")
},
frame = ~frame,
type = 'scatter',
mode = 'lines',
line = list(
simplyfy = FALSE,
color = if (name == "Ca_p") {
'rgb(27, 102, 244)'
} else if (name == "PO4_p") {
'rgb(244, 27, 27)'
} else {
'black'
}
)
) %>%
add_lines(
y = datas_summary()[, paste0("low_norm_", name)],
frame = ~frame,
name = if (name %in% c("Ca_p", "PO4_p")) {
paste0("Low ", name, " bound (mM)")
} else {
paste0("Low ", name, " bound (pM)")
},
line = list(
color = 'rgb(169,169,169)',
width = 4,
dash = 'dash'
)
) %>%
add_lines(
y = datas_summary()[, paste0("high_norm_", name)],
frame = ~frame,
name = if (name %in% c("Ca_p", "PO4_p")) {
paste0("High ", name, " bound (mM)")
} else {
paste0("High ", name, " bound (pM)")
},
line = list(
color = 'rgb(169,169,169)',
width = 4,
dash = 'dot'
)
) %>%
layout(
xaxis = list(
title = "time (min)",
zeroline = FALSE
),
yaxis = list(
title = if (name %in% c("Ca_p", "PO4_p")) {
paste0(name, " (mM)")
} else {
paste0(name, " (pM)")
},
zeroline = FALSE
),
showlegend = if (input$isMobile) FALSE else TRUE
) %>%
animation_opts(
# animation speed (the lower, the faster)
frame = 5,
transition = 0,
redraw = FALSE
) %>%
animation_slider(
hide = FALSE
) %>%
config(displayModeBar = FALSE)
}
})
})
# each time the user click on run, the history is saved
observeEvent(input$run, {
out <- out()
len <- length(out_history$item)
if (len >= 1) {
# translate all time by the number of time points
# in the previous run + 1
out_history$counter <- out_history$counter + nrow(out_history$item[[len]])
out[, "time"] <- out[, "time"] + out_history$counter
}
out_history$item[[len + 1]] <- out
# merge all dataframe into a big one
out_history$summary <- bind_rows(out_history$item)
})
#-------------------------------------------------------------------------
#
# The network part: make interactive diagramms of Ca and PO4 homeostasis
# as well as regulation by hormones such as PTH, vitamin D3 and FGF23
#
#-------------------------------------------------------------------------
# Generate the CaP Graph network
nodes_Ca <- reactive({generate_nodes_Ca(input)})
edges_Ca <- reactive({generate_edges_Ca(input)})
# Generate the output of the Ca graph to be used in body
output$network_Ca <- renderVisNetwork({
nodes_Ca <- nodes_Ca()
edges_Ca <- edges_Ca()
input$network_hormonal_choice
generate_network(
input,
nodes = nodes_Ca,
edges = edges_Ca,
usephysics = TRUE
) %>%
# simple click event to allow graph ploting
visEvents(
selectNode = "
function(nodes) {
Shiny.onInputChange('current_node_id', nodes.nodes);
}"
) %>%
# unselect node event
visEvents(
deselectNode = "
function(nodes) {
Shiny.onInputChange('current_node_id', 'null');
}"
) %>%
# add the doubleclick function to handle zoom views
visEvents(
doubleClick = "
function(nodes) {
Shiny.onInputChange('current_node_bis_id', nodes.nodes);
}"
) %>%
visEvents(
selectEdge = "
function(edges) {
Shiny.onInputChange('current_edge_id', edges.edges);
}"
) %>%
visEvents(
deselectEdge = "
function(edges) {
Shiny.onInputChange('current_edge_id', 'null');
}"
) %>%
# very important: change the whole graph position after drawing
visEvents(
type = "on",
stabilized = "
function() {
this.moveTo({
position: {x:0, y:-13.43},
offset: {x: 0, y:0}
});
}"
) %>%
# very important: allow to detect the web browser used by client
# use before drawing the network. Works with find_navigator.js
visEvents(
type = "on",
initRedraw = paste0("
function() {
this.moveTo({scale:", if (input$isMobile) 0.3 else 0.6, "});
}")
) # to set the initial zoom (1 by default)
})
# Events for the CaPO4 Homeostasis diagramm whenever a flux change
# Change arrow color relatively to the value of fluxes for Ca injection/PO4
# injection as well as PO4 gavage
observe({
out <- out()
edges_Ca <- edges_Ca()
arrow_lighting_live(
out,
edges = edges_Ca,
session,
t_target = input$t_now
)
})
# change the selected node size to better highlight it
last <- reactiveValues(selected_node = NULL, selected_edge = NULL)
observeEvent(input$current_node_id, {
req(input$current_node_id)
selected_node <- input$current_node_id
nodes_Ca <- nodes_Ca()
# javascript return null instead of NULL
# cannot use is.null
if (!identical(selected_node, "null")) {
last$selected_node <- selected_node
# organ nodes
if (selected_node %in% c(1:5, 7:8, 11)) {
nodes_Ca$size[selected_node] <- 100
# Kidney zoom node
} else if (selected_node == 6) {
nodes_Ca$size[selected_node] <- 214
# regulation nodes
} else {
nodes_Ca$size[selected_node] <- 57
}
visNetworkProxy("network_Ca") %>%
visUpdateNodes(nodes = nodes_Ca)
# reset the node size when unselected
} else {
if (last$selected_node %in% c(1:5, 7:8, 11)) {
nodes_Ca$size[last$selected_node] <- 70
} else if (last$selected_node == 6) {
nodes_Ca$size[last$selected_node] <- 150
} else {
nodes_Ca$size[last$selected_node] <- 40
}
visNetworkProxy("network_Ca") %>%
visUpdateNodes(nodes = nodes_Ca)
}
})
# change the selected edge size to
# better highlight it
observeEvent(input$current_edge_id,{
req(input$current_edge_id)
selected_edge <- input$current_edge_id
edges_Ca <- edges_Ca()
edge_id <- match(selected_edge, edges_Ca$id)
if (!identical(selected_edge, "null")) {
last$selected_edge <- edge_id
# organs edges
if (edge_id %in% c(1:12)) {
edges_Ca$width[edge_id] <- 24
# regulations edges
} else {
edges_Ca$width[edge_id] <- 12
}
visNetworkProxy("network_Ca") %>%
visUpdateEdges(edges = edges_Ca)
# reset the edge size when unselected
} else {
if (edge_id %in% c(1:12)) {
edges_Ca$width[edge_id] <- 8
} else {
edges_Ca$width[edge_id] <- 4
}
visNetworkProxy("network_Ca") %>%
visUpdateEdges(edges = edges_Ca)
}
})
# handle the size of organ and hormonal nodes
output$size_nodes_organs <- renderUI({
req(!is.null(input$isMobile))
knobInput(
"size_organs",
"Organs",
min = 50,
max = 100,
value = if (input$isMobile) 85 else 70,
step = 5,
displayPrevious = TRUE,
fgColor = "#A9A9A9",
inputColor = "#A9A9A9",
skin = "tron",
width = if (input$isMobile) "75px" else "100px",
height = if (input$isMobile) "75px" else "100px"
)
})
output$size_nodes_hormones <- renderUI({
req(!is.null(input$isMobile))
knobInput(
"size_hormones",
"Hormones",
min = 20,
max = 60,
value = if (input$isMobile) 60 else 40,
step = 5,
displayPrevious = TRUE,
fgColor = "#A9A9A9",
inputColor = "#A9A9A9",
skin = "tron",
width = if (input$isMobile) "75px" else "100px",
height = if (input$isMobile) "75px" else "100px"
)
})
# control width of arrows
output$width_arrows_organs <- renderUI({
req(!is.null(input$isMobile))
knobInput(
"width_organs",
"Organs",
angleOffset = -90,
angleArc = 180,
min = 4,
max = 14,
value = 8,
step = 1,
displayPrevious = TRUE,
fgColor = "#A9A9A9",
inputColor = "#A9A9A9",
skin = NULL,
width = if (input$isMobile) "75px" else "100px",
height = if (input$isMobile) "75px" else "100px"
)
})
output$width_arrows_hormones <- renderUI({
req(!is.null(input$isMobile))
knobInput(
"width_hormones",
"Hormones",
angleOffset = -90,
angleArc = 180,
min = 1,
max = 8,
value = 4,
step = 1,
displayPrevious = TRUE,
fgColor = "#A9A9A9",
inputColor = "#A9A9A9",
skin = NULL,
width = if (input$isMobile) "75px" else "100px",
height = if (input$isMobile) "75px" else "100px"
)
})
#-------------------------------------------------------------------------
#
# The graph part: calls out(), parameters_bis()
# Interactive graph as a result of click on the diagram
#
#-------------------------------------------------------------------------
# Generate a graph when node is clicked.
# The graph corresponds to the node clicked
output$plot_node <- renderPlotly({
validate(need(input$current_node_id, "Select one node on the graph!"))
out <- out()
plot_node(input, node = input$current_node_id , out, parameters_fixed)
})
output$plot_edge <- renderPlotly({
validate(need(input$current_edge_id, "Select one edge on the graph!"))
out <- out()
plot_edge(edge = input$current_edge_id , out)
})
#-------------------------------------------------------------------------
#
# Handle dangerous parameter values by the user
#
#-------------------------------------------------------------------------
# prevent the user to put infinite value in the max time of integration
# With compiled code, tmax = 100000 min is a correct value
observeEvent(input$tmax,{
# critical value for tmax
feedbackWarning(
inputId = "tmax",
show = !is.na(input$tmax),
text = "tmax should exist and set between 1 and 100000."
)
# check if input tmax does not exists or is not numeric
if (is.na(input$tmax)) {
sendSweetAlert(
session,
title = "Ooops ...",
text = "Invalid value: tmax should be set correctly.",
type = "error"
)
reset("tmax") # value is reset
} else {
# if yes, check it is negative
if (input$tmax <= 0) {
sendSweetAlert(
session,
title = "Ooops ...",
text = "Invalid value: tmax must be higher than 0.",
type = "error"
)
reset("tmax") # value is reset
# check whether it is too high
} else if (input$tmax > 100000) {
sendSweetAlert(
session,
title = "Ooops ...",
text = "Invalid value: the maximum
time of simulation is too high!",
type = "error"
)
reset("tmax") # value is reset
}
}
})
#-------------------------------------------------------------------------
#
# Useful tasks such as save, reset, load ...
#
#-------------------------------------------------------------------------
# reset parameters individually
button_states <- reactiveValues(values = list())
observeEvent(input$reset_t_now,{
# call the function to reset the given slider
sliders_reset(button_states, input)
})
# disable the summary button as long as input$run is lower than 1
observe({
if (!is.null(input$run)) {
toggleState(id = "summary", condition = input$run >= 1)
}
})
# make the run button blinking when a new event is added
# but remove it when run is pressed
observeEvent(input$add_treatment, {
addClass(id = "run", class = "run_glowing_blue")
})
observeEvent(input$run, {
removeClass(id = "run", class = "run_glowing_blue")
})
# make the Summary button blinking when run was pressed at least once
observeEvent(input$run, {
addClass(id = "summary", class = "run_glowing_purple")
})
observeEvent(input$summary, {
removeClass(id = "summary", class = "run_glowing_purple")
})
# make the run button glowing when not clicked
observeEvent(input$diagnosis_intro, {
addClass(id = "user_add_comment", class = "run_glowing_green")
})
observeEvent(input$add_user_comment, {
removeClass(id = "user_add_comment", class = "run_glowing_green")
})
# make diagnosis blinking when there remains 5 min
# before the app close, only if it exists (if the user
# never clicked on next, diagmosis does not exist!!!)
# observe({
# if (countdown() <= 5) {
# if (!is_empty(input$diagnosis)) {
# if (input$diagnosis == 0) {
# addClass(id = "diagnosis", class = "run_glowing_blue")
# }
# }
# }
# })
observe({
if (!is_empty(input$diagnosis)) {
if (input$diagnosis > 0) {
removeClass(id = "diagnosis", class = "run_glowing_blue")
}
}
})
# prevent user from selecting multiple treatments as the same time
observe({
if (!is.null(input$treatment_selected)) {
treatment <- match.arg(input$treatment_selected, treatment_choices)
idx <- match(input$treatment_selected, treatment_choices)
other_treatments <- treatment_choices[-idx]
lapply(seq_along(other_treatments), FUN = function(j) {
disable(selector = paste0("#treatment_selected input[value='", other_treatments[[j]], "']"))
})
} else {
enable(id = "treatment_selected")
}
})
# display or not display the network background
observe({
# add invalidate later so that the background class is
# applied after the application startup
invalidateLater(1000, session)
if (!is_empty(input$background_choice)) {
if (input$background_choice == "rat") {
addClass(id = "network_cap", class = "network_caprat")
removeClass(id = "network_cap", class = "network_caphuman")
} else {
removeClass(id = "network_cap", class = "network_caprat")
addClass(id = "network_cap", class = "network_caphuman")
}
} else {
addClass(id = "network_cap", class = "network_capnone")
removeClass(id = "network_cap", class = "network_caphuman")
removeClass(id = "network_cap", class = "network_caprat")
}
})
# prevent user from selecting multiple background
observe({
if (is.element("rat", input$background_choice) &&
!is.element("human", input$background_choice)) {
disable(selector = "#background_choice input[value='human']")
} else {
enable(selector = "#background_choice input[value='human']")
}
if (is.element("human", input$background_choice) &&
!is.element("rat", input$background_choice)) {
disable(selector = "#background_choice input[value='rat']")
} else {
enable(selector = "#background_choice input[value='rat']")
}
})
# when enable regulation is selected, activates all the checkboxes
# the reverse case does not work for unknow reason
observeEvent(input$network_hormonal_choice, {
if (input$network_hormonal_choice == TRUE) {
updatePrettyCheckboxGroup(
session,
inputId = "network_Ca_choice",
selected = c("Ca","PO4", "PTH", "D3", "FGF23")
)
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/server.R
|
#-------------------------------------------------------------------------
# This code contains the sidebar of shinydashboard.
#
# David Granjon, the Interface Group, Zurich
# December 4th, 2017
#-------------------------------------------------------------------------
sidebar <- bs4DashSidebar(
title = HTML("<small>Virtual Patient</small>"),
skin = "light",
status = "primary",
brandColor = NULL,
url = "http://physiol-seafile.uzh.ch/",
src = "logos/online-learning.png",
elevation = 4,
opacity = 0.8,
# user panel info
uiOutput("user_panel"),
# sidebar menu with 2 tabs
bs4SidebarMenu(
bs4SidebarMenuItem(
"App",
tabName = "main",
icon = "home"
),
bs4SidebarMenuItem(
"About",
tabName = "about",
icon = "info-circle"
)
)
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/sidebar.R
|
#-------------------------------------------------------------------------
# This UI code contains the global UI of the application. It calls
# header, body and sidebar (which is NULL in this case) and load all
# javascript libraries such as shinyJS, extendShinyjs, MathJax... as well
# as the theme by default which is cerulean (can be changed with theme selector)
#
# David Granjon, the Interface Group, Zurich
# December 4th, 2017
#
# bsplus only works with R > 3.3, so pay attention to update R before installing
# other packages. On shiny-server, always install R packages by running R in the
# shiny folder. Put the app in src/shiny-server/myApp and access via:
# server_ip:3838/myApp
#
#-------------------------------------------------------------------------
# Define UI
#header_box_network,
ui <- bs4DashPage(
# options
enable_preloader = TRUE,
sidebar_collapsed = TRUE,
controlbar_collapsed = TRUE,
title = "Virtual Patient Simulator",
# content
navbar = navbar,
sidebar = sidebar,
body = body,
footer = footer,
controlbar = dashboardControlbar
)
|
/scratch/gouwar.j/cran-all/cranData/CaPO4Sim/inst/virtual_patient_simulator/bs4/ui.R
|
### Copyright (C) 2004-2007 Simon Urbanek
### License: GPL v2
### mapping of supported type names to canonical type names
### as of 1.3-2 png/png24/png32 are the same (we don't support png8 anyway)
.supported.types <- c(png="png",png24="png",png32="png",jpeg="jpeg",jpg="jpeg",tiff="tiff",tif="tiff",
pdf="pdf",svg="svg",ps="ps",postscript="ps",x11="x11",xlib="x11",
win="win",win32="win",window="win",windows="win",w32="win",raster="raster")
Cairo <- function(width=640, height=480, file="", type="png", pointsize=12, bg="transparent", canvas="white", units="px", dpi="auto", ...) {
ctype <- tolower(type)
if (!ctype %in% names(.supported.types))
stop("Unknown output type `",type,"'.")
ctype <- .supported.types[ctype==names(.supported.types)]
if (is.null(file) || !nchar(file))
file <- if (ctype != 'x11') paste("plot.",ctype,sep='') else Sys.getenv("DISPLAY")
if (is.character(file) && length(file) != 1)
stop("file must be a character vector of length 1 or a connection")
else if (inherits(file,"connection") && (summary(file)$opened != "opened" || summary(file)$"can write" != "yes"))
stop("connection must be open and writeable")
if (is.character(file)) file <- path.expand(file)
if (length(units)!=1 || ! units %in% c("px","pt","in","cm","mm"))
stop("invalid unit (supported are px, pt, in, cm and mm)")
## res is used in bitmap wrappers to set dpi
## the default is NA so we only honor it if it's set to a non-default value
res <- list(...)$res
if (!is.null(res) && all(!is.na(res))) dpi <- res
if (any(dpi=="auto" || dpi=="")) dpi <- 0
if (length(dpi)!=1 || !is.numeric(dpi) || dpi<0)
stop("invalid dpi specification (must be 'auto' or a positive number)")
dpi <- as.double(dpi)
## unit multiplier: >0 mpl to get inches, <0 mpl to get device pixels
umpl <- as.double(c(-1, 1/72, 1, 1/2.54, 1/25.4)[units==c("px","pt","in","cm","mm")])
gdn<-.External("cairo_create_new_device", as.character(ctype), file, width, height, pointsize, bg, canvas, umpl, dpi, ..., PACKAGE="Cairo")
par(bg=bg)
invisible(structure(gdn,class=c("Cairo",paste("Cairo",toupper(ctype),sep='')),type=as.character(ctype),file=file))
}
Cairo.capabilities <- function() {
ust <- c(unique(.supported.types), "freetype", "harfbuzz")
cap <- !is.na(match(ust, .Call("Rcairo_supported_types", PACKAGE="Cairo")))
names(cap) <- ust
cap
}
###-------------- supporting functions -----------------
CairoFontMatch <- function(fontpattern="Helvetica",sort=FALSE,verbose=FALSE) {
if (typeof(fontpattern) != "character")
stop("fontname must be a character vector of length 1")
if (typeof(sort) != "logical")
stop("sort option must be a logical")
if (typeof(verbose) != "logical")
stop("verbose option must be a logical")
invisible(.External("cairo_font_match",fontpattern,sort,verbose,PACKAGE="Cairo"))
}
CairoFonts <- function(
regular="Helvetica:style=Regular",
bold="Helvetica:style=Bold",
italic="Helvetica:style=Italic",
bolditalic="Helvetica:style=Bold Italic,BoldItalic",
symbol="Symbol", usePUA=TRUE) {
if (!is.null(regular) && typeof(regular) != "character")
stop("regular option must be a character vector of length 1")
if (!is.null(bold) && typeof(bold) != "character")
stop("bold option must be a character vector of length 1")
if (!is.null(italic) && typeof(italic) != "character")
stop("italic option must be a character vector of length 1")
if (!is.null(bolditalic) && typeof(bolditalic) != "character")
stop("bolditalic option must be a character vector of length 1")
if (!is.null(symbol) && typeof(symbol) != "character")
stop("symbol option must be a character vector of length 1")
invisible(.External("cairo_font_set", regular, bold, italic, bolditalic,
symbol, usePUA, PACKAGE="Cairo"))
}
###-------------- convenience wrapper functions -----------------
CairoX11 <- function(display=Sys.getenv("DISPLAY"), width = 7, height = 7, pointsize = 12,
gamma = getOption("gamma"), bg = "transparent", canvas = "white",
xpos = NA, ypos = NA, ...) {
Cairo(width, height, file=display, type='x11', pointsize=pointsize, bg=bg, units="in", ...)
}
CairoPNG <- function(filename = "Rplot%03d.png", width = 480, height = 480,
pointsize = 12, bg = "white", res = NA, ...) {
Cairo(width, height, type='png', file=filename, pointsize=pointsize, bg=bg, res=res, ...)
}
CairoTIFF <- function(filename = "Rplot%03d.tiff", width = 480, height = 480,
pointsize = 12, bg = "white", res = NA, ...) {
Cairo(width, height, type='tiff', file=filename, pointsize=pointsize, bg=bg, res=res, ...)
}
CairoJPEG <- function(filename = "Rplot%03d.jpeg", width = 480, height = 480,
pointsize = 12, quality = 75, bg = "white", res = NA, ...) {
Cairo(width, height, type='jpeg', file=filename, pointsize=pointsize, bg=bg, quality=quality, res=res, ...)
}
CairoPDF <- function(file = ifelse(onefile, "Rplots.pdf", "Rplot%03d.pdf"),
width = 6, height = 6, onefile = TRUE, family = "Helvetica",
title = "R Graphics Output", fonts = NULL,
paper = "special", encoding, bg, fg, pointsize, pagecentre, ...) {
if (!onefile) stop("Sorry, PDF backend of Cairo supports onefile=TRUE only")
if (missing(pointsize)) pointsize <- 12
if (missing(bg)) bg <- "white"
Cairo(width, height, file, "pdf", pointsize=pointsize, bg=bg, units="in", title = title, ...)
}
CairoSVG <- function(file = ifelse(onefile, "Rplots.svg", "Rplot%03d.svg"),
width = 6, height = 6, onefile = TRUE, bg = "transparent",
pointsize = 12, ...) {
if (!onefile) stop("Sorry, SVG backend of Cairo supports onefile=TRUE only")
Cairo(width, height, type='svg', file=file, pointsize=pointsize, bg=bg, units='in', ...)
}
CairoPS <- function(file = ifelse(onefile, "Rplots.ps", "Rplot%03d.ps"),
onefile = TRUE, family,
title = "R Graphics Output", fonts = NULL,
encoding, bg, fg,
width, height, horizontal, pointsize,
paper, pagecentre, print.it, command, colormodel) {
if (!onefile) stop("Sorry, PostScript backend of Cairo supports onefile=TRUE only")
if (missing(pointsize)) pointsize <- 12
if (missing(bg)) bg <- "white"
# the following are different from R's postscript defaults!
# the PS device uses page dimensions, we don't
if (missing(width)) width <- 8
if (missing(height)) height <- 6
Cairo(width, height, file, "ps", pointsize=pointsize, bg=bg, units="in")
}
CairoWin <- function(width = 7, height = 7, pointsize = 12,
record = getOption("graphics.record"),
rescale = c("R", "fit", "fixed"), xpinch, ypinch,
bg = "transparent", canvas = "white",
gamma = getOption("gamma"), xpos = NA, ypos = NA,
buffered = getOption("windowsBuffered"),
restoreConsole = FALSE, ...) {
Cairo(width, height, '', 'win', pointsize=pointsize, bg=bg, units="in", ...)
}
Cairo.serial <- function(device = dev.cur()) .Call("Cairo_get_serial", device, PACKAGE="Cairo")
Cairo.onSave <- function(device = dev.cur(), onSave) .Call("Cairo_set_onSave", device, onSave, PACKAGE="Cairo")
Cairo.capture <- function(device = dev.cur()) .Call("Rcairo_capture", device, PACKAGE="Cairo")
Cairo.snapshot <- function(device = dev.cur(), last=FALSE) {
res <- if (is.na(last)) {
res <- .Call("Rcairo_snapshot", device, FALSE, PACKAGE="Cairo")
if (is.null(res[[1]])) .Call("Rcairo_snapshot", device, TRUE, PACKAGE="Cairo") else res
} else .Call("Rcairo_snapshot", device, last, PACKAGE="Cairo")
attr(res, "pid") <- Sys.getpid()
class(res) <- "recordedplot"
res
}
|
/scratch/gouwar.j/cran-all/cranData/Cairo/R/Cairo.R
|
.image <- function(device) {
a <- .Call("get_img_backplane", device, PACKAGE="Cairo")
names(a) <- c('ref', 'info')
a$width <- a[[2]][1]
a$height <- a[[2]][2]
a$format <- c("ARGB","RGB","A8","A1","dep","RGB16")[a[[2]][3]+1]
class(a) <- "CairoImageRef"
a
}
.ptr.to.raw <- function(ptr, begin, length)
.Call("ptr_to_raw", ptr, begin, length, PACKAGE="Cairo")
.raw.to.ptr <- function(ptr, offset=0, raw, begin=0, length=length(raw))
invisible(.Call("raw_to_ptr", ptr, offset, raw, begin, length, PACKAGE="Cairo"))
|
/scratch/gouwar.j/cran-all/cranData/Cairo/R/imgtools.R
|
.onLoad <- function(libname, pkgname) {
## add our libs to the PATH
if (.Platform$OS.type=="windows") {
lp<-gsub("/","\\\\",paste(libname,pkgname,"libs",sep="/"))
cp<-strsplit(Sys.getenv("PATH"),";")
if (! lp %in% cp) Sys.setenv(PATH=paste(lp,Sys.getenv("PATH"),sep=";"))
}
library.dynam("Cairo", pkgname, libname)
.Call("Rcairo_initialize", PACKAGE="Cairo")
}
|
/scratch/gouwar.j/cran-all/cranData/Cairo/R/zzz.R
|
#' @title Calibration Simplex
#' @aliases CalSim
#'
#' @description Generates an object of class \code{calibration_simplex} which can be used to assess the calibration
#' of ternary probability forecasts. The Calibration Simplex can be seen as generalization of the reliability diagram
#' for binary probability forecasts. For details on the interpretation of the calibration simplex, see Wilks (2013). Be
#' aware that some minor changes have been made compared to the calibration simplex as suggested by Wilks (2013) (see note below).
#'
#' As a somewhat experimental feature, multinomial p-values can be used for uncertainty quantification, that is, as a tool
#' to judge whether the observed discrepancies may be merely coincidental or whether the predictions may in fact be miscalibrated, see Resin (2020, Section 4.2).
#'
#' @param n A natural number.
#' @param p1 A vector containing the forecasted probabilities for the first (1) category, e.g. below-normal.
#' @param p2 A vector containing the forecasted probabilities for the second (2) category, e.g. near-normal.
#' @param p3 A vector containing the forecasted probabilities for the third (3) category, e.g. above-normal.
#' @param obs A vector containing the observed outcomes (Categories are encoded as 1 (e.g. below-normal), 2 (e.g. near-normal) and 3 (e.g. above-normal)).
#' @param test_stat A string indicating which test statistic is to be used for the multinomial test in each bin.
#' Options are "LLR" (log-likelihood ratio; default), "Chisq" (Pearson's chi-square) and "Prob" (probability mass statistic). See details
#' @param percentagewise Logical, specifying whether probabilities are percentagewise (summing to 100) or not (summing to 1).
#'
#' @return A list with class "calibration_simplex" containing
#' \item{\code{n}}{As input by user or default.}
#' \item{\code{n_bins}}{Computed from \code{n}. Number of hexagons.}
#' \item{\code{n_obs}}{Total number of observations.}
#' \item{\code{freq}}{Vector of length \code{n_bins} containing the number of observations within each bin.}
#' \item{\code{cond_rel_freq}}{Matrix containing the observed outcome frequencies within each bin.}
#' \item{\code{cond_ave_prob}}{Matrix containing the average forecast probabilities within each bin.}
#' \item{\code{pvals}}{Exact multinomial p-values within each bin. See details.}
#'
#' @rdname calibration_simplex
#' @export
#'
#' @details Only two of the three forecast probability vectors (\code{p1}, \code{p2} and \code{p3}) need to be specified.
#'
#' The p-values are based on multinomial tests comparing the observed frequencies within a bin
#' with the average forecast probabilities within the bin as outlined in Resin (2020, Section 4.2).
#' The p-values are exact and do not rely on asymptotics, however, it is assumed that the true
#' distribution (under the hypothesis of forecast calibration) within each bin
#' is approximated well by the multinomial distribution. If \code{n} is small the
#' approximation may be poor, resulting in unreliable p-values. p-Values less than 0.0001 are not
#' exact but merely indicate a value less than 0.0001.
#'
#' @examples
#' attach(ternary_forecast_example) #see also documentation of sample data
#' #?ternary_forecast_example
#'
#' # Calibrated forecast sample
#' calsim0 = calibration_simplex(p1 = p1, p3 = p3, obs = obs0)
#' plot(calsim0,use_pvals = TRUE) # with multinomial p-values
#'
#' # Overconfident forecast sample
#' calsim1 = calibration_simplex(p1 = p1, p3 = p3, obs = obs1)
#' plot(calsim1)
#'
#' # Underconfident forecast sample
#' calsim2 = calibration_simplex(p1 = p1, p3 = p3, obs = obs2)
#' plot(calsim2,use_pvals = TRUE) # with multinomial p-values
#'
#' # Unconditionally biased forecast sample
#' calsim3 = calibration_simplex(p1 = p1, p3 = p3, obs = obs3)
#' plot(calsim3)
#'
#' # Using a different number of bins
#' calsim = calibration_simplex(n=4, p1 = p1, p3 = p3, obs = obs3)
#' plot(calsim)
#'
#' calsim = calibration_simplex(n=13, p1 = p1, p3 = p3, obs = obs3)
#' plot(calsim, # using some additional plotting parameters:
#' error_scale = 0.5, # errors are less pronounced (smaller shifts)
#' min_bin_freq = 100, # dots are plotted only for bins,
#' # which contain at least 100 forecast-outcome pairs
#' category_labels = c("below-normal","near-normal","above-normal"),
#' main = "Sample calibration simplex")
#'
#' detach(ternary_forecast_example)
calibration_simplex = function(n,p1,p2,p3,obs,test_stat,percentagewise){
UseMethod("calibration_simplex")
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/calibration_simplex.R
|
#' @return Object of class \code{calibration_simplex}.
#'
#' @rdname calibration_simplex
#' @export
#'
#' @note In contrast to the calibration simplex proposed by Daniel S. Wilks, 2013, the simplex has been
#' mirrored at the diagonal through the left bottom hexagon. The miscalibration error is by default calculated
#' precisely (in each bin as the difference of the relative frequencies of each class and the
#' average forecast probabilities) instead of approximately (using Wilks original formula).
#' Approximate errors can be used by setting \code{true_error = FALSE} when using \code{\link{plot.calibration_simplex}}.
#'
#' @references Daniel S. Wilks, 2013, The Calibration Simplex: A Generalization of the Reliability Diagram for Three-Category Probability Forecasts, \emph{Weather and Forecasting}, \strong{28}, 1210-1218
#' @references Resin, J. (2020), A Simple Algorithm for Exact Multinomial Tests, \emph{Preprint} \url{https://arxiv.org/abs/2008.12682}
#'
#' @seealso \code{\link{plot.calibration_simplex}}
#' @seealso \code{\link{ternary_forecast_example}}
#'
#' @importFrom stats aggregate
#' @importFrom ExactMultinom multinom_test_cpp
calibration_simplex.default = function(n = 10,
p1 = NULL,
p2 = NULL,
p3 = NULL,
obs = NULL,
test_stat = "LLR",
percentagewise = FALSE) {
factor_percent = if(percentagewise) 100 else 1 #=div (prev)
if(is.null(obs)) stop("Observations are missing!")
stopifnot(all(obs %in% c(1,2,3)))
eps = 0.01
if(is.null(p3)) {
if(is.null(p1)||is.null(p2)) stop("Probability vectors are missing!")
if(any(p2 < 0)||any(p1 < 0)) stop("Negative probabilities detected!")
if(any(p2+p1>(1 + eps)*factor_percent)) stop("Specified probabilities do not sum to <=1!")
p3 = factor_percent-p1-p2
}
else if(is.null(p1)) {
if(is.null(p3)||is.null(p2)) stop("Probability vectors are missing!")
if(any(p2 < 0)||any(p3 < 0)) stop("Negative probabilities detected!")
if(any(p3+p2>(1 + eps)*factor_percent)) stop("Specified probabilities do not sum to <=1!")
p1 = factor_percent-p3-p2
}
else if(is.null(p2)) {
if(any(p3 < 0)||any(p1 < 0)) stop("Negative probabilities detected!")
if(any(p3+p1>(1 + eps)*factor_percent)) stop("Specified probabilities do not sum to <=1!")
p2 = factor_percent-p1-p3
}
else {
if(any(p3 < 0)||any(p2 < 0)||any(p1 < 0)) stop("Negative probabilities detected!")
if(any((1-eps)*factor_percent>p3+p2+p1|p3+p2+p1>(1 + eps)*factor_percent)) stop("Probabilities do not sum to 1!")
}
stopifnot(length(obs) == length(p1),
length(p3) == length(p1),
(is.null(p2) || length(p2) == length(p1)))
n_bins = n*(n+1)/2 #= n_points (prev)
n_obs = length(obs)
assign_bin = function(p1,p3) { #bins ordered by p3,-p1 (ascending)
p3_bin = floor((n-1)*p3+0.5) #rounding up (on border)
p1_bin = ceiling((n-1)*p1-0.5) #rounding down (on border)
p2_bin = n-1-p3_bin-p1_bin
bin = (n*(p2_bin + 1)) - (p2_bin^2+p2_bin)/2 - p1_bin #=n_bin (prev)
return(bin)
}
p1 = p1/factor_percent
p2 = p2/factor_percent
p3 = p3/factor_percent
bin = mapply(assign_bin,p1,p3)
data = data.frame(p1,p2,p3,obs,bin)
out = list(n = n,
n_bins = n_bins,
n_obs = n_obs,
freq = rep(0,n_bins),
cond_rel_freq = matrix(rep(NA,3*n_bins),ncol = 3),
cond_ave_prob = matrix(rep(NA,3*n_bins),ncol = 3),
# cond_rel_freq_1 = rep(NA,n_bins),
# cond_rel_freq_3 = rep(NA,n_bins),
# cond_p3_ave = rep(NA,n_bins),
# cond_p1_ave = rep(NA,n_bins),
pvals = rep(NA,n_bins))
cond_rel_freq = as.matrix(prop.table(table(rbind(data[,4:5],c(1,0),c(2,0),c(3,0))),2)[,-1]) #fixes error, when obs does not contain all three outcomes
# as.matrix() allows for a single bin
#cond_rel_freq = prop.table(table(data[,3:4]),2) # replaced in 0.4.0
cond_p_ave = aggregate(data[,1:3],list(data[,5]),mean)
bins = cond_p_ave[,1]
out$freq[bins] = margin.table(table(data[,5]),1)
out$cond_rel_freq[bins,] = t(cond_rel_freq)
# out$cond_rel_freq_1[bins] = cond_rel_freq[1,]
# out$cond_rel_freq_3[bins] = cond_rel_freq[3,]
out$cond_ave_prob[bins,] = as.matrix(cond_p_ave[,2:4])
#out$cond_ave_prob[bins,2] = 1 - out$cond_ave_prob[bins,1] - out$cond_ave_prob[bins,3]
# out$cond_p3_ave[bins] = cond_p_ave[,2]
# out$cond_p1_ave[bins] = cond_p_ave[,3]
# Calculate pvalues
stat = which(c("Prob","Chisq","LLR") == test_stat)
if(!length(stat) == 0){
for(bin in 1:n_bins){
if(out$freq[bin] > 0){
x = out$cond_rel_freq[bin,]*out$freq[bin]
p = out$cond_ave_prob[bin,]
if(all(p > 0)) out$pvals[bin] = multinom_test_cpp(x,p)[stat]
else if(sum(p>0) == 2){
if(x[!(p>0)] == 0) out$pvals[bin] = multinom_test_cpp(x[p>0],p[p>0])[stat]
else out$pvals[bin] = -1
}
else if(sum(p>0) == 1){
if(all(x[!(p>0)] == 0)) out$pvals[bin] = 1
else out$pvals[bin] = -1
}
}
}
}
class(out) = append(class(out),"calibration_simplex")
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/calibration_simplex.default.R
|
#' Ternary probability forecast and observations.
#'
#' 10,000 realizations of a ternary probability forecast, which exhibits different characteristics,
#' depending on the realizing outcome variable. Idealized forecast example, generated as described in Wilks (2013).
#'
#' @docType data
#'
#' @usage data(ternary_forecast_example)
#'
#' @format A data frame with 10,000 rows and 6 variables.
#' \describe{
#' \item{p1}{forecast probability for outcome 1}
#' \item{p3}{forecast probability for outcome 3}
#' \item{obs0}{outcomes, such that the forecast is well-calibrated}
#' \item{obs1}{outcomes, such that the forecast is overconfident}
#' \item{obs2}{outcomes, such that the forecast is underconfident}
#' \item{obs3}{outcomes, such that the forecast is unconditionally biased}
#' }
#'
#' @references Daniel S. Wilks, 2013, The Calibration Simplex: A Generalization of the Reliability Diagram for Three-Category Probability Forecasts, \emph{Weather and Forecasting}, \strong{28}, 1210-1218
#'
#' @source Data generated by package author.
#'
"ternary_forecast_example"
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/data.R
|
#Function to compute the miscalibration error in each bin used to construct the
#calibration simplex.
error = function(x,true_error) {
UseMethod("error")
}
error.calibration_simplex = function(x,
true_error = TRUE) {
error = matrix(rep(NA,x$n_bins*3),ncol = 3)
# error = data.frame(c1 = rep(NA,x$n_bins),
# c3 = rep(NA,x$n_bins))
if(true_error) {
error[x$freq > 0,] = x$cond_rel_freq[x$freq > 0,] - x$cond_ave_prob[x$freq > 0,]
# error$c3[x$freq > 0] = x$cond_rel_freq_3[x$freq > 0] - x$cond_p3_ave[x$freq > 0]
# error$c1[x$freq > 0] = x$cond_rel_freq_1[x$freq > 0] - x$cond_p1_ave[x$freq > 0]
}
else {
rounded_forecasts = make_forecasts(x)
error[x$freq > 0,] = x$cond_rel_freq[x$freq > 0,] - rounded_forecasts[x$freq > 0,]
# error$c3[x$freq > 0] = x$cond_rel_freq_3[x$freq > 0] - rounded_forecasts$p3[x$freq > 0]
# error$c1[x$freq > 0] = x$cond_rel_freq_1[x$freq > 0] - rounded_forecasts$p1[x$freq > 0]
}
return(error)
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/error.calibration_simplex.R
|
#Function to construct the forecast vectors for the forecasts
#corresponding to the centers of the hexagons in the calibration simplex.
make_forecasts = function(x) {
UseMethod("make_forecasts")
}
make_forecasts.calibration_simplex = function(x) {
n = x$n
n_bins = x$n_bins
forecasts = matrix(rep(0,n_bins*3),ncol = 3)
# forecasts = data.frame(p1=rep(0,n_bins),p3 = rep(0,n_bins))
k = n-2
p3 = 0
p1 = n-1
for(i in 1:n_bins) {
forecasts[i,] = c(p1,1-p1-p3,p3)
# forecasts[i,] = c(p1,p3) # fixed error introduced in 0.3.2
if(p1>0) {
p3=p3+1
p1=p1+-1
}
else {
p3=0
p1=k
k=k-1
}
}
return(forecasts/(n-1))
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/make_forecasts.calibration_simplex.R
|
#' Plot Calibration Simplex
#'
#' @param x Object of class \code{calibration_simplex}
#' @param true_error Logical, specifying whether to use true miscalibration errors or approximate miscalibration errors.
#' @param error_scale A number specifying the magnitude of the miscalibration errors (greater 0, usually should be less than 1,
#' cf. note below).
#' @param min_bin_freq A number. Lower bound for (absolute) frequencies, i.e. how many observations have to lie in a bin
#' for it to be plotted.
#' @param plot_error_scale Logical, specifying whether to plot a scale showing the magnitude of miscalibration errors.
#' @param scale_area Optional. A number by which the areas of the points are scaled. Use if points are to small or to big.
#' @param indicate_bins Logical, specifying whether to connect points to their respective bin (center of hexagon).
#' @param category_labels A vector of length 3 containing the category names, e.g. \code{c("1","2","3")} (default)
#' @param use_pvals Logical, determines whether multinomial p-values are used for uncertainty quantification, see details.
#' @param alphas Vector of length 2 with values 1 > \code{alphas[1]} > \code{alphas[2]} >= 0.0001. Only relevant if \code{use_pvals = TRUE}.
#' @param ... Arguments concerning the title (e.g. \code{main}, \code{cex.main}, \code{col.main} and \code{font.main})
#' and subtitle (e.g. \code{sub}, \code{cex.sub}, \code{col.sub} and \code{font.sub}) may be passed here.
#' @details If multinomial p-values are used (\code{use_pvals = TRUE}), the dots are colored in the following way:
#' \itemize{
#' \item Blue: p-value greater \code{alphas[1]} (0.1 by default).
#' \item Orange: p-value between \code{alphas[1]} and \code{alphas[2]} (0.1 and 0.01 by default)
#' \item Red: p-value less than \code{alphas[2]} (0.01 by default)
#' \item Black: p-value is exactly 0. This only happens if a category which is assigned 0 probability realizes.
#' }
#' Many small p-values (orange and red dots) indicate miscalibrated predictions, whereas many blue dots indicate that the predictions
#' may in fact be calibrated. WARNING: The use of the multinomial p-values is more of an experimental feature and may not yield reliable
#' p-values, especially if \code{n} is small.
#' For details regarding the calculation of the p-values see also \code{\link{calibration_simplex}}.
#'
#' @note For details on the meaning of the error scale, cf. Wilks, 2013, especially Fig. 2. Note that the miscalibration error in
#' each category is in "probability units" (as it is the average difference in relative frequency and forecast probability
#' in each bin).
#'
#'
#' @rdname plot.calibration_simplex
#' @export
#'
#' @importFrom graphics arrows axis par plot segments symbols text title
#' @importFrom spatstat.geom coords hexgrid hextess owin square
#' @import spatstat
plot.calibration_simplex = function(x,
#alpha = 0.05,
true_error = TRUE,
error_scale = 0.3,
min_bin_freq = 10,
plot_error_scale = TRUE,
scale_area = NULL,
indicate_bins = TRUE,
category_labels = c("1","2","3"),
use_pvals = FALSE,
alphas = c(0.1,0.01),
...) {
par_old <- par(no.readonly = TRUE)
on.exit(par(par_old))
n = x$n
n_bins = x$n_bins
error = error(x,true_error)
rel_freq = rel_freq(x)
if(max(x$freq)<min_bin_freq) stop("Nothing to plot here. Try reducing min_bin_freq (default = 10).")
if(is.null(scale_area)) scale_area = (n/10)^2
# Testing
# tests = x$pvals > alpha
# test_col = ifelse(tests,"blue","red")
# test_col[x$pvals == -1] = "violet"
#tests = x$pvals > alpha
test_col = ifelse(x$pvals >= alphas[1],"blue",ifelse(x$pvals >= alphas[2],"orange","red"))
test_col[x$pvals == -1] = "black"
test_col[is.na(test_col)] = "black"
# Plotting
triangle= function(centers = F) {
a = n - 1
if(a == 0) a= 0.01 # allows to plot one bin (n = 1)
s = 1/sqrt(3)
sin60 = sqrt(3)/2
x = c(0,sin60*a,0)
y = c(0,a/2,a)
nodes = data.frame(x,y)
polygon = owin(poly=nodes)
if(centers) return(hexgrid(polygon,s,trim=FALSE,origin=c(0,0)))
else return(hextess(polygon,s,trim=FALSE,origin=c(0,0)))
}
H = triangle()
H_centers = triangle(T)
par(pty='s',mar=c(2,0,2,0),fig=c(0,1,0,1),cex.main = 1.5)
plot(c(-2,n),c(-1,n),col="white",asp=1,bty="n",axes = F,xlab='',ylab='')
if(is.element("main",names(list(...)))) title(...,line =-1)
else title(main = "Calibration Simplex", line = -1,...)
plot(H,border="darkgrey",add = T)
centers = coords(H_centers)
ordered_centers = centers[order(centers$x,centers$y),]
displacement = cbind(-error[,3]-error[,1],0.577*error[,3]-0.577*error[,1])
# displacement = cbind(-error$c3-error$c1,
# 0.577*error$c3-0.577*error$c1)
shifted_centers = ordered_centers + 1/sqrt(3)/error_scale*displacement
if(use_pvals){
symbols(shifted_centers[x$freq>=min_bin_freq,],
circles=sqrt(scale_area*rel_freq[x$freq>=min_bin_freq]/pi),
inches = F,ann=F,fg = test_col[x$freq>=min_bin_freq],bg = test_col[x$freq>=min_bin_freq],add=T)
if(indicate_bins) segments(shifted_centers$x[x$freq>=min_bin_freq],
shifted_centers$y[x$freq>=min_bin_freq],
ordered_centers$x[x$freq>=min_bin_freq],
ordered_centers$y[x$freq>=min_bin_freq],
col=test_col[x$freq>=min_bin_freq])
}
else{
symbols(shifted_centers[x$freq>=min_bin_freq,],
circles=sqrt(scale_area*rel_freq[x$freq>=min_bin_freq]/pi),
inches = F,ann=F,bg = "black",add=T)
if(indicate_bins) segments(shifted_centers$x[x$freq>=min_bin_freq],
shifted_centers$y[x$freq>=min_bin_freq],
ordered_centers$x[x$freq>=min_bin_freq],
ordered_centers$y[x$freq>=min_bin_freq],
col='red')
}
centers_extremes = ordered_centers[c(1,n,n_bins),]
c1 = 3/4
c2 = c1 *sqrt(3)
shift_start = cbind(c(-c2,c2,0),c(c1,c1,-2*c1))
shift_end = -shift_start[c(2,3,1),]
start = centers_extremes+shift_start
end = centers_extremes[c(2,3,1),]+shift_end
arrows(start$x,start$y,end$x,end$y,length = 0.1)
text((start+end+0.5*(shift_start+shift_end))/2,
labels=as.expression(c(bquote(p[.(category_labels[3])]),
bquote(p[.(category_labels[2])]),
bquote(p[.(category_labels[1])]))),
adj = 0.2)
label_coords = rbind(centers_extremes+0.67*shift_end,end-0.33*shift_end)
rot = c(30,0,-30)
for(i in 1:3) {
text(label_coords$x[c(i,i+3)],label_coords$y[c(i,i+3)],
labels = paste(c(0,n-1),'/',n-1,sep=""),
cex = 0.8,srt=rot[i])
}
if(plot_error_scale) {
par(fig = c(0.65,0.95,0.05,0.35),new=T,cex.main=0.8,cex.axis=0.8,mgp=c(3,0.4,0))
L = square(0.1)
hexagon = hextess(L,1,trim=F,origin = c(0.0,0.0))
plot(c(-1,1),c(-2,1),col="white",asp=1,main = "Error Scale",
bty="o",xaxt="n",
yaxt="n", xlab='',ylab='')
plot(hexagon,add=T)
axis(side = 1,at=c(-1,0,1),pos=-1.1,labels = paste(c(-error_scale,0,error_scale)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/plot.calibration_simplex.R
|
#Function returning the relative frequency in each bin.
rel_freq = function(x) {
UseMethod("rel_freq")
}
rel_freq.calibration_simplex = function(x) {
return(x$freq/x$n_obs)
}
|
/scratch/gouwar.j/cran-all/cranData/CalSim/R/rel_freq.calibration_simplex.R
|
L <-
function(m, m_l, m_r) {
c(m, m_l, m_r, 0.5)
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/L.R
|
LR <-
function(m, m_l, m_r) {
c(m, m_l, m_r, 0)
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/LR.R
|
LRFN.plot <-
function(M, Left.fun = NULL, Right.fun = NULL, ...)
{
if ( messages(M) != 1 ) { return( messages(M) ) }
m = M[1]
m_l = M[2]
m_r = M[3]
x <- NULL
if ( M[4] == 0 ) { y = function(x) Left.fun((m-x)/m_l) * (x<=m) + Right.fun((x-m)/m_r) * (m<x) }
else if ( M[4] == 1 ) { y = function(x) Right.fun((m-x)/m_l) * (x<=m) + Left.fun((x-m)/m_r) * (m<x) }
else if ( M[4] == 0.5 ) { y = function(x) Left.fun((m-x)/m_l) * (x<=m) + Left.fun((x-m)/m_r) * (m<x) }
else { return( noquote( paste0("The fourth element of each LR fuzzy number must be 0 or 0.5 or 1!" ) ) ) }
return(curve(y(x) * (0<=y(x) & y(x)<=1), ...) )
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/LRFN.plot.R
|
RL <-
function(m, m_l, m_r) {
c(m, m_l, m_r, 1)
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/RL.R
|
a <-
function(M, N) {
options(warn = -1)
if ( messages(M) != 1 ) { return( messages(M) ) }
if ( messages(N) != 1 ) { return( messages(N) ) }
if ( M[4] != N[4] )
{
return( noquote( paste0("Addition has NOT a closed form of a LR fuzzy number" ) ) )
}
else
{
a1 = M[1]+N[1]
a2 = M[2]+N[2]
a3 = M[3]+N[3]
a4 = (M[4]+N[4])/2
print( noquote( paste0("the result of addition is (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/a.R
|
d <-
function(M, N) {
options(warn = -1)
if ( messages(M) != 1 ) { return( messages(M) ) }
if ( messages(N) != 1 ) { return( messages(N) ) }
if ( (M[4]==1 & N[4]==0) | (M[4]==0 & N[4]==1) | (M[4]==0.5 & N[4]==0.5) )
{
if ( ( sign(M) == "Positive" ) & ( sign(N) == "Positive" ) )
{
a1 = M[1] / N[1]
a2 = ( ( M[1]*N[3] ) + ( N[1]*M[2] ) ) / (N[1]^2)
a3 = ( ( M[1]*N[2] ) + ( N[1]*M[3] ) ) / (N[1]^2)
a4 = M[4]
print( noquote( paste0("the result of division is approximately (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else
{
return( noquote( paste0("A regular approximation is not defined for division since at least one of LR fuzzy numbers is not positive" ) ) )
}
}
else
{
return( noquote( paste0("Division has NOT a closed form of a LR fuzzy number" ) ) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/d.R
|
m <-
function(M, N) {
options(warn = -1)
if ( messages(M) != 1 ) { return( messages(M) ) }
if ( messages(N) != 1 ) { return( messages(N) ) }
if ( M[4] != N[4])
{
return( noquote( paste0("Production has NOT a closed form of a LR fuzzy number" ) ) )
}
else if ( ( sign(M) == "Positive" ) & ( sign(N) == "Positive" ) )
{
a1 = M[1]*N[1]
a2 = ( M[1]*N[2] ) + ( N[1]*M[2] )
a3 = ( M[1]*N[3] ) + ( N[1]*M[3] )
a4 = (M[4]+N[4])/2
print( noquote( paste0("the result of multiplication is approximately (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else if ( ( sign(M) == "Negative" ) & ( sign(N) == "Negative" ) )
{
a1 = M[1]*N[1]
a2 = -( M[1]*N[2] ) - ( N[1]*M[2] )
a3 = -( M[1]*N[3] ) - ( N[1]*M[3] )
a4 = abs( M[4]-1 )
print( noquote( paste0("the result of multiplication is approximately (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else if ( ( sign(M) == "Positive" ) & ( sign(N) == "Negative" ) )
{
a1 = M[1]*N[1]
a2 = ( M[1]*N[2] ) - ( N[1]*M[3] )
a3 = ( M[1]*N[3] ) - ( N[1]*M[2] )
a4 = abs( M[4]-1 )
print( noquote( paste0("the result of multiplication is approximately (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else if ( ( sign(M) == "Negative" ) & ( sign(N) == "Positive" ) )
{
a1 = M[1]*N[1]
a2 = ( N[1]*M[2] ) - ( M[1]*N[3] )
a3 = ( N[1]*M[3] ) - ( M[1]*N[2] )
a4 = abs( N[4]-1 )
print( noquote( paste0("the result of multiplication is approximately (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else
{
return( noquote( paste0("A regular approximation is not defined for multiplication since at least one of LR fuzzy numbers is non-positive and non-negative fuzzy number")))
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/m.R
|
messages <-
function (M) {
options(warn = -1)
if ( M[1] == "Addition has NOT a closed form of a LR fuzzy number" )
{
return( noquote( paste0( "Addition has NOT a closed form of a LR fuzzy number" ) ) )
}
else if ( M[1] == "Subtraction has NOT a closed form of a LR fuzzy number" )
{
return( noquote( paste0( "Subtraction has NOT a closed form of a LR fuzzy number" ) ) )
}
else if ( M[1] == "Production has NOT a closed form of a LR fuzzy number" )
{
return( noquote( paste0( "Production has NOT a closed form of a LR fuzzy number" ) ) )
}
else if ( M[1] == "Division has NOT a closed form of a LR fuzzy number" )
{
return( noquote( paste0( "Division has NOT a closed form of a LR fuzzy number" ) ) )
}
else if ( M[1] == " The fourth element of each LR fuzzy number must be 0 or 0.5 or 1! " )
{
return( noquote( paste0(" The fourth element of each LR fuzzy number must be 0 or 0.5 or 1! " ) ) )
}
else if ( M[1] == " The scalar multiplication is not defined for zero " )
{
return( noquote( paste0(" The scalar multiplication is not defined for zero " ) ) )
}
else if ( M[1] == "A regular approximation is not defined for multiplication since at least one of LR fuzzy numbers is non-positive and non-negative fuzzy number" )
{
return( noquote( paste0("A regular approximation is not defined for multiplication since at least one of LR fuzzy numbers is non-positive and non-negative fuzzy number" ) ) )
}
else if ( M[1] == "A regular approximation is not defined for division since at least one of LR fuzzy numbers is not positive" )
{
return( noquote( paste0("A regular approximation is not defined for division since at least one of LR fuzzy numbers is not positive" ) ) )
}
else
{
return( 1 )
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/messages.R
|
s <-
function(M, N) {
options(warn = -1)
if ( messages(M) != 1 ) { return( messages(M) ) }
if ( messages(N) != 1 ) { return( messages(N) ) }
if ( (M[4]==1 & N[4]==0) | (M[4]==0 & N[4]==1) | (M[4]==0.5 & N[4]==0.5) )
{
a1 = M[1]-N[1]
a2 = M[2]+N[3]
a3 = M[3]+N[2]
a4 = M[4]
print( noquote( paste0("the result of subtraction is (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
else
{
return( noquote( paste0("Subtraction has NOT a closed form of a LR fuzzy number" ) ) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/s.R
|
s.m <-
function(k, N) {
if ( messages(N) != 1 ) { return( messages(N) ) }
if ( messages(k) != 1 ) { return( messages(k) ) }
if ( length(k)==4 & length(N)==1) {
zarf = N
N[1] = k[1]
N[2] = k[2]
N[3] = k[3]
N[4] = k[4]
k = zarf
}
if (k==0) { return( noquote( paste0(" The scalar multiplication is not defined for zero " ) ) ) }
else {
a1 = k*N[1]
a2 = k* (N[2]*(k>0)-N[3]*(k<0))
a3 = k* (N[3]*(k>0)-N[2]*(k<0))
a4 = N[4]
print( noquote( paste0("the result of scalar multiplication is (core = ", a1, ", left spread = " , a2, ", right spread = " , a3, ")"
, if ( a4 == 0 ) { paste0(" LR" ) } else if ( a4 == 1 ) { paste0(" RL" ) } else { paste0(" L" ) } ) ) )
return( invisible( c(a1,a2,a3,a4) ) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/s.m.R
|
sign <-
function(M){
supp = support(M)
if (supp[1] > 0) {return( noquote( paste0("Positive" ) ) )}
else {if (supp[2] < 0) {return( noquote( paste0("Negative" ) ) )}
else {return( noquote( paste0("non-positive and non negative" ) ) )} }
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/sign.R
|
support <-
function(M, Left.fun = NULL, Right.fun = NULL){
range1 = M[1]-M[2]-M[3]-100
range2 = M[1]+M[2]+M[3]+100
x = seq(range1, range2, len = 200000)
if ( M[4] == 0 ) { y = Left.fun((M[1]-x)/M[2]) * (x<=M[1]) + Right.fun((x-M[1])/M[3]) * (M[1]<x) }
else if ( M[4] == 1 ) { y = Right.fun((M[1]-x)/M[2]) * (x<=M[1]) + Left.fun((x-M[1])/M[3]) * (M[1]<x) }
else if ( M[4] == 0.5 ) { y = Left.fun((M[1]-x)/M[2]) * (x<=M[1]) + Left.fun((x-M[1])/M[3]) * (M[1]<x) }
supp = c()
supp[1] = min(x[0<y & y<1])
supp[2] = max(x[0<y & y<1])
if ( supp[1] == min(x) ) { supp[1] = -Inf }
if ( supp[2] == max(x) ) { supp[2] = +Inf }
#print( noquote( paste("The support of fuzzy number is interval:" ) ) )
return(supp )
if (Left.fun(2) == Right.fun(2)+100 ) print(2) #Yek jomleye alaki choon CRAN majburet karde bud ke ...
}
|
/scratch/gouwar.j/cran-all/cranData/Calculator.LR.FNs/R/support.R
|
#' @title BBQ_CV
#' @description trains and evaluates the BBQ calibration model using \code{folds}-Cross-Validation (CV).
#' The \code{predicted} values are partitioned into n subsets. A BBQ model is constructed on (n-1) subsets; the remaining set is used
#' for testing the model. All test set predictions are merged and used to compute error metrics for the model.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param method_for_prediction 0=selection, 1=averaging, Default: 0
#' @param n_folds number of folds in the cross-validation, Default: 10
#' @param seed random seed to alternate the split of data set partitions
#' @param input specify if the input was scaled or transformed, scaled=1, transformed=2
#' @return list object containing the following components:
#' \item{error}{list object that summarizes discrimination and calibration errors obtained during the CV}
#' \item{pred_idx}{which BBQ prediction method was used during CV, 0=selection, 1=averaging}
#' \item{type}{"BBQ"}
#' \item{probs_CV}{vector of calibrated predictions that was used during the CV}
#' \item{actual_CV}{respective vector of true values (0 or 1) that was used during the CV}
#' @examples
#' ## Loading dataset in environment
#' data(example)
#' actual <- example$actual
#' predicted <- example$predicted
#' BBQ_model <- CalibratR:::BBQ_CV(actual, predicted, method_for_prediction=0, n_folds=4, 123, 1)
#' @rdname BBQ_CV
BBQ_CV <- function(actual, predicted, method_for_prediction=0, n_folds=10, seed, input){
set.seed(seed)
if (!(method_for_prediction==0|method_for_prediction==1)){
print("Please set a valid method_for_prediction. Choose 0 for selection and 1 for averaging")
}
x <- data.frame(cbind(actual, predicted))
x_cases <- subset(x, x[,1]==1)
x_controls <- subset(x, x[,1]==0)
fold_cases <- sample(cut(seq(1,nrow(x_cases)),breaks=n_folds,label=FALSE))
fold_controls <- sample(cut(seq(1,nrow(x_controls)),breaks=n_folds,label=FALSE))
y_cal <- list()
y_dis <- list()
list_probs <- c()
list_actual <- c()
bbq_models <- list()
bbq_models_rd <- list()
for(i in 1:n_folds){
trainIndexes_cases <- which(fold_cases!=i, arr.ind = TRUE)
trainIndexes_controls <- which(fold_controls!=i,arr.ind=TRUE)
trainData <- rbind(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,])
x_train <- format_values(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,], input=input)
testIndexes_cases <- which(fold_cases==i,arr.ind=TRUE)
testIndexes_controls <- which(fold_controls==i,arr.ind=TRUE)
testData <- rbind(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,])
x_test <- format_values(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,], input=input,
min=x_train$min, max=x_train$max, x_train$mean)
BBQ_model <- build_BBQ(x_train$formated_values[,1], x_train$formated_values[,2]) #build calibration model on training set
calibrated_probs <- predict_BBQ(BBQ_model, x_test$formated_values[,2], method_for_prediction) #calibrate with test set und evaluate ECE etc
list_probs <- c(list_probs, calibrated_probs$predictions)
list_actual <- c(list_actual,x_test$formated_values[,1])
bbq_models[[i]] <- BBQ_model
}
y <- reliability_diagramm(list_actual, list_probs)
y_cal <- y$calibration_error
y_dis <- y$discrimination_error
error_summary_CV <- list(calibration_error=y_cal, discrimination_error=y_dis,
mean_pred_per_bin=y$mean_pred_per_bin, accuracy_per_bin=y$accuracy_per_bin,
sign=y$sign)
return(list(error=error_summary_CV, pred_idx=method_for_prediction, type="BBQ",
probs_CV=list_probs, actual_CV=list_actual))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/BBQ_CV.R
|
#' @title get_CLE_comparison
#' @description visualises how class 1 and class 0 classification error (CLE) differs in each trained calibration model.
#' Comparing class-specific CLE helps to choose a calibration model for applications were classification error is cost-sensitive for one class.
#' See \code{\link{get_CLE_class}} for details on the implementation.
#' @param list_models list object that contains all error values for all trained calibration models. For the specific format, see the calling function \code{\link{visualize_calibratR}}.
#' @return ggplot2
#' @rdname get_CLE_comparison
get_CLE_comparison <- function(list_models){
list_models$original <- NULL
list_errors_0 <- list()
list_errors_1 <- list()
idx <- 1
for (j in list_models){
list_errors_1[[names(list_models)[[idx]]]] <- j$CLE_class_1
list_errors_0[[names(list_models)[[idx]]]] <- j$CLE_class_0
idx <- idx+1
}
df_cle_0 <- cbind(reshape2::melt(list_errors_0), Class="CLE class 0")
df_cle_1 <- cbind(reshape2::melt(list_errors_1), Class="CLE class 1")
df <- rbind(df_cle_0, df_cle_1)
Class <- NULL
value <- NULL
L1 <- NULL
ggplot2::ggplot(df, ggplot2::aes(x=L1, y=value, colour=Class)) +
ggplot2::ggtitle("Class-specific CLE") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/CLES_in_comparison.R
|
#' @title GUESS_CV
#' @description trains and evaluates the GUESS calibration model using \code{folds}-Cross-Validation (CV).
#' The \code{predicted} values are partitioned into n subsets. A GUESS model is constructed on (n-1) subsets; the remaining set is used
#' for testing the model. All test set predictions are merged and used to compute error metrics for the model.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param n_folds number of folds for the cross-validation, Default: 10
#' @param method_of_prediction PARAM_DESCRIPTION, Default: 2
#' @param seed random seed to alternate the split of data set partitions
#' @param input specify if the input was scaled or transformed, scaled=1, transformed=2
#' @return list object containing the following components:
#' \item{error}{list object that summarizes discrimination and calibration errors obtained during the CV}
#' \item{type}{"GUESS"}
#' \item{pred_idx}{which prediction method was used during CV}
#' \item{probs_CV}{vector of calibrated predictions that was used during the CV}
#' \item{actual_CV}{respective vector of true values (0 or 1) that was used during the CV}
#' @rdname GUESS_CV
GUESS_CV <- function(actual, predicted, n_folds=10, method_of_prediction=2, seed, input){
set.seed(seed)
x <- data.frame(cbind(actual, predicted))
x_cases <- subset(x, x[,1]==1)
x_controls <- subset(x, x[,1]==0)
fold_cases <- sample(cut(seq(1,nrow(x_cases)),breaks=n_folds,label=FALSE))
fold_controls <- sample(cut(seq(1,nrow(x_controls)),breaks=n_folds,label=FALSE))
y_cal_1 <- list()
y_dis_1 <- list()
list_probs <- c()
list_actual <- c()
GUESS_models <- list()
GUESS_models_rd <- list()
for(i in 1:n_folds){
trainIndexes_cases <- which(fold_cases!=i, arr.ind = TRUE)
trainIndexes_controls <- which(fold_controls!=i,arr.ind=TRUE)
trainData <- rbind(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,])
x_train <- format_values(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,], input=input)
testIndexes_cases <- which(fold_cases==i,arr.ind=TRUE)
testIndexes_controls <- which(fold_controls==i,arr.ind=TRUE)
testData <- rbind(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,])
x_test <- format_values(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,], input=input,
min=x_train$min, max=x_train$max, x_train$mean)
GUESS_model <- build_GUESS(x_train$formated_values[,1], x_train$formated_values[,2]) #build calibration model on train set
calibrated_probs <- predict_GUESS(GUESS_model, x_test$formated_values[,2], method_of_prediction) #calibrate with test set und evaluate ECE etc
list_probs <- c(list_probs, calibrated_probs$predictions)
list_actual <- c(list_actual, x_test$formated_values[,1])
GUESS_models[[i]] <- GUESS_model
}
y <- reliability_diagramm(list_actual, list_probs)
y_cal <- y$calibration_error
y_dis <- y$discrimination_error
error_summary_CV <- list(calibration_error=y_cal, discrimination_error=y_dis,
mean_pred_per_bin=y$mean_pred_per_bin, accuracy_per_bin=y$accuracy_per_bin,
sign=y$sign)
return(list(error=error_summary_CV, type="GUESS", pred_idx=method_of_prediction,
probs_CV=list_probs, actual_CV=list_actual))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/GUESS_CV.R
|
#' @title binom_for_histogram
#' @description p_values from stats::binom.test for each bin, if bin is empty, a p-value of 2 is returned
#' @param n_x numeric vector of two integers. The first one is the number of cases in the bin; the second the number of instances in the bin
#' @return p-value from stats::binom.test method
#' @rdname binom_for_histogram
#' @importFrom stats binom.test
binom_for_histogram <- function(n_x){
success <- n_x[1]
all <- n_x[2]
if(!(success==0 && all==0)){
return(as.numeric(stats::binom.test(success,all)$p.value))
}
else #if bin is empty -> p-value of 2
return(2)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/binomtest_for_bin.R
|
#' @title build_BBQ
#' @description This method builds a BBQ calibration model using the trainings set provided.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @return returns the BBQ model which includes models for all evaluated binning schemes; the prunedmodel contains only a selection of BBQ models with the best Bayesian score
#' @details Based on the paper (and matlab code) : "Obtaining Well Calibrated Probabilities Using Bayesian Binning" by Naeini, Cooper and Hauskrecht: ; https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4410090/
#' @rdname build_BBQ
build_BBQ <- function(actual, predicted){
####local functions###
initlnfact_local <- function(n){
lnfact <- rep(0,n+1)
for (w in 2:(n+1)){
lnfact[w] <- lnfact[w-1] + log(w-1)
}
return(lnfact)
}
get_BDeu_Score2_local <- function(opt){
histModel <- opt$histModel
B <- length(histModel)
N_0 <- 2*B #default in paper
C <- N_0/B # =2
score <- B*lgamma(C)
for (j in 1:B){
nj <- histModel[[j]]$n #number of elements in bin B
nj0 <- histModel[[j]]$n0 #number of y==0 elements in bin B
nj1 <- histModel[[j]]$n1 #number of y==1 elements in bin B
pj <- (histModel[[j]]$min + histModel[[j]]$max)/2 #midpoint of bin B, naive estimate of probability
pj <- min(pj,1-5*10^-3)
pj <- max(pj, 5*10^-3)
score <- score + (lgamma(nj1+C*pj)) + (lgamma(nj0+C*(1-pj))) - (lgamma(nj+C)) - (lgamma(C*pj)) - (lgamma(C*(1-pj)))#log(marginal LL)test score(Pr?fgr??e)
}
score <- -2*score
return (score)
}
buildFuncOpt_local <- function(opt, histModel, cutIdx, cutPoints, logLikelihood){
N <- length(opt$PTR) #number of predicted values in the whole histogram
K <- length(histModel) #how many bins where evaluated
funcOpt <- list(histModel=histModel, cutIdx=cutIdx, cutPoints=cutPoints, logLikelihood=logLikelihood, K=K, N=N, PTR=opt$PTR, lnfact=opt$
lnfact, N_0=opt$N_0)
return(funcOpt)
}
hist_calibration_freq_local <- function(predicted, actual, b){
#get the log of the likelihood P(D|M) for the observed data distribution in the bin, given theta as P
#P is circa the prevalence of positives in the bin, but is smoothed and also depends by midpoint of the bin and mean predicted value for a positive case
N <- length(actual)
logLikelihood <- 0
cutIdx <- c() #b==1 in this case -> no cutIdx
cutPoints <- c()
histModel_all <- list()
#smoothing function for p
if (b==1){ #if binning model contains only 1 bin
min <- 0
max <- 1
m_0 <- (min+max)/2 #mid point of bin
idx <- which(actual==1) #TRUE values
predicted_1 <- predicted[idx] #values predicted to be 1
p_0 <- (sum(predicted_1)+m_0)/(length(predicted_1)+1) #intuitive prior for beta smoothing
n <- length(actual) #how many elements in the bin
n1 <- sum(actual) #how many actual y==1 in the bin
n0 <- n-n1 #how many actual y==0 in the bin
P <- (n1+p_0)/(n+1)
if(n1>0){
logLikelihood <- logLikelihood+n1*log(P)
}
if(n0>0){
logLikelihood<-logLikelihood+n0*log(1-P)
}
histModel_all[[b]] <- list(min=min, max=max, n=n, n1=n1, n0=n0, P=P, midpoint=m_0)
}
else {
Yhat <- predicted
Y <- actual
c <- floor(length(Y)/b) #number of elements per bin b to get equal frequency bins
rest <- length(Y)-(c*b) #how many elements are not counted due to the floor() method
i <- 1
idx <- 1
Tt <- list()
idx2 <- 0
while(i < b){ #i=Laufindex for bin number from 1 to last bin in the model
#idx1 <- (i-1)*c+1 #lower idx of instance for bin i
idx1 <- idx2+1
if (i <= rest){ #rest elements are distributed evenly among the first
idx2 <- idx1+c #upper idx of instance
}
else
idx2 <- idx1+c-1
j <- i+1
while (j <= b){ #is the next bin j already the last bin b of the model?
if (j < b){ #no!
#Jidx2 <- j*c #lower idx of instance for bin j
if (j <= rest){ #is bin j still supposed to contain one element more to avoid too full bin
Jidx2 <- idx2+c #upper idx of instance
}
else{
Jidx2 <- idx2+c-1
}
if (predicted[Jidx2]==predicted[idx1]){
idx2 <- Jidx2
j <- j+1
}
else
break
}
else{
Jidx2 <- N
if (predicted[Jidx2]==predicted[idx1]){
idx2 <- Jidx2
j <- j+1
}
else
break
}
}
if (idx2<N){
cutIdx$idx <- idx2
}
Tt[[idx]] <- list(Y=Y[idx1:idx2], PTR=predicted[idx1:idx2], Yhat=Yhat[idx1:idx2])
idx <- idx+1
i <- j
}
if (idx2<N){
Tt[[idx]]<- list(Y=Y[(idx2+1):length(Y)], PTR=predicted[(idx2+1):length(Y)], Yhat=Yhat[(idx2+1):length(Y)])
}
b_0 <- b
b <- length(Tt)
histModel_all[[1]] <- list(min=0, max=(Tt[[1]]$Yhat[length(Tt[[1]]$Yhat)]+Tt[[2]]$Yhat[1])/2)
cutPoints[1] <- histModel_all[[1]]$max #first cut point in binning model
#intuitive prior for beta smooting
m_0 <- (histModel_all[[1]]$min + histModel_all[[1]]$max)/2 #midpoint
idx <- which(Tt[[1]]$Y==1) #Y==1 in bin 1
PTR1 <- Tt[[1]]$PTR[idx] #which PTR have y==1 (in bin 1)
p_0 <- (sum(PTR1)+m_0)/(length(PTR1)+1) #intuitive prior for beta smoothing
#values for the first bin
histModel_all[[1]]$n <- length(Tt[[1]]$Y) #how many values in first bin
histModel_all[[1]]$n1 <- sum(Tt[[1]]$Y) #how many y==1 values in first bin
histModel_all[[1]]$n0 <- histModel_all[[1]]$n-histModel_all[[1]]$n1 #how many y==0 values in first bin
histModel_all[[1]]$P <- (histModel_all[[1]]$n1+p_0)/(histModel_all[[1]]$n+1)
histModel_all[[1]]$P_observed <- (histModel_all[[1]]$n1)/(histModel_all[[1]]$n)
histModel_all[[1]]$midpoint <- m_0
if (histModel_all[[1]]$n1 > 0){
logLikelihood <- logLikelihood + histModel_all[[1]]$n1*log(histModel_all[[1]]$P)
}
if (histModel_all[[1]]$n0 > 0){
logLikelihood <- logLikelihood + histModel_all[[1]]$n0*log(1-histModel_all[[1]]$P)
}
#for second till second to last bin
for (i in 2:(b-1)){
if((b-1)<2){
break
}
else{
histModel_all[[i]] <- list()
histModel_all[[i]]$min <- (Tt[[i]]$Yhat[1]+Tt[[i-1]]$Yhat[length(Tt[[i-1]]$Yhat)])/2 #min value in bin i
histModel_all[[i]]$max <- (Tt[[i]]$Yhat[length(Tt[[i]]$Yhat)]+Tt[[i+1]]$Yhat[1])/2 #max value in bin i
cutPoints[i] <- histModel_all[[i]]$max
#intuitive prior for beta distribution smoothing
m_0 <- (histModel_all[[i]]$min+histModel_all[[i]]$max)/2 #midpoint
idx <- which(Tt[[i]]$Y==1) #Y==1 in bin i
PTR1 <- Tt[[i]]$PTR[idx] #which PTR have y==1 (in bin i)
p_0 <- (sum(PTR1)+m_0)/(length(PTR1)+1) #intuitive prior for beta smoothing
histModel_all[[i]]$n <- length(Tt[[i]]$Y) #how many elements in bin i
histModel_all[[i]]$n1 <- sum(Tt[[i]]$Y) #how many y==1 values in bin i
histModel_all[[i]]$n0 <- histModel_all[[i]]$n-histModel_all[[i]]$n1 #how many y==0 values in bin i
histModel_all[[i]]$P <- (histModel_all[[i]]$n1+p_0)/(histModel_all[[i]]$n+1) #intuitive prior for beta smoothing
histModel_all[[i]]$P_observed <- (histModel_all[[i]]$n1)/(histModel_all[[i]]$n)
histModel_all[[i]]$midpoint <- m_0
if (histModel_all[[i]]$n1 > 0){
logLikelihood <- logLikelihood + histModel_all[[i]]$n1*log(histModel_all[[i]]$P)
}
if (histModel_all[[i]]$n0 > 0){
logLikelihood <- logLikelihood + histModel_all[[i]]$n0*log(1-histModel_all[[i]]$P)
}
}
}
#for last bin b of histModel_all
histModel_all[[b]] <- list()
histModel_all[[b]]$min <- (Tt[[b]]$Yhat[1]+Tt[[b-1]]$Yhat[length(Tt[[b-1]]$Yhat)])/2
histModel_all[[b]]$max <- 1
m_0 <- (histModel_all[[b]]$min + histModel_all[[b]]$max)/2 #midpoint
idx <- which(Tt[[b]]$Y==1)
PTR1 <- Tt[[b]]$PTR[idx]
p_0 <- (sum(PTR1)+m_0)/(length(PTR1)+1) #intuitive prior for beta smoothing
histModel_all[[b]]$n <- length(Tt[[b]]$Y)
histModel_all[[b]]$n1 <- sum(Tt[[b]]$Y)
histModel_all[[b]]$n0 <- histModel_all[[b]]$n - histModel_all[[b]]$n1
histModel_all[[b]]$P <- (histModel_all[[b]]$n1+p_0)/(histModel_all[[b]]$n+1)
histModel_all[[b]]$P_observed <- (histModel_all[[b]]$n1)/(histModel_all[[b]]$n)
histModel_all[[b]]$midpoint <- m_0
if (histModel_all[[b]]$n1 > 0){
logLikelihood <- logLikelihood + histModel_all[[b]]$n1*log(histModel_all[[b]]$P)
}
if (histModel_all[[b]]$n0 > 0){
logLikelihood <- logLikelihood + histModel_all[[b]]$n0*log(1-histModel_all[[b]]$P)
}
}
return(list(histModel_all=histModel_all, cutIdx=cutIdx, cutPoints=cutPoints, logLL=logLikelihood))
}
elbow <- function(scores, alpha){
#Assume R is the sorted Bayesian scores of histogram models
#in a decreasing order. We fix a small number a > 0 (a = 0.001 in our experiments)
#and pick the first ka associated binning models as the refined set of models,
#where ka is a defined index in the sorted sequence where and o2=sigma2 is the empirical variance of the Bayesian scores.
b <- length(scores)
sigma2 <- (sqrt(mean(scores ^ 2) - mean(scores)^2))^2 #sd of sample mean is computed (denominator=N not N-1) and then ^2
k <- 1 #laufindex zum scores durchsuchen
idxs <- order(scores, decreasing=TRUE)
R <- scores[idxs] #highest SV = rank 1, highest SV = lowest score
while (R[k]==R[k+1]){ #scores unterscheiden sich nicht
k <- k+1
}
while (k < b && ((R[k]-R[k+1]))/sigma2 > alpha){ #Differenz zwischen SV[n] und SV[n+1] ist hoch genug um in refined set aufgenommen zu werden
k <- k+1
}
#for the first k elements of the sorted SV set(lowest score first) is the alpha high enough
#those k elements in the refined SV set
if (k > 1){
res <- idxs[1:k-1]
}
else #k==1, include only highest Bayesion Scores in Averaging procedure
res <- idxs[1]
return(res)
}
processModel_local <- function(inModel, idxs){
outModel <- list()
for (i in 1:length(idxs)){
outModel[[i]] <- inModel[[idxs[i]]]
}
outModel[[1]]$minScoreIdx <- 1 #best model is [[1]]
outModel[[1]]$SV <- inModel[[1]]$SV[idxs]
return(outModel=outModel)
}
#default options from paper are set as fixed values
all <- data.frame(cbind(actual, predicted))
N_0 <- 2 #default
alpha <- 0.001 #default
runSort <- 1 #default
if (runSort==1){
x <- order(predicted)
predicted <- predicted[x]
actual <- actual[x]
}
N <- length(predicted)
lnfact <- initlnfact_local(N+1) #output: array with length+2 elements
maxbinno <- min(ceiling(N/5), ceiling(10*N^(1/3))) #max. number of bins I can have in the model, max=5
minbinno <- max(1,floor(N^(1/3)/10)) #have at least 1 bin in binning model, min. number of bins, min=1
MNM <- maxbinno-minbinno+1 #maximum number of possible binning models
model <- list()
model[[1]] <- list()
model[[1]]$scoringFunc <- "BDeu2"
opt1 <- list(PTR=predicted, lnfact=lnfact, N_0=N_0)
for (b in 1:MNM){ #a binning model for each possible #bin b is created and evaulated using its BDeu2 score
output_hist_calibration <- hist_calibration_freq_local(predicted, actual, b+minbinno-1)
funcOpt <- buildFuncOpt_local(opt1, output_hist_calibration$histModel_all, output_hist_calibration$cutIdx,
output_hist_calibration$cutPoints, output_hist_calibration$logLL)
score <- get_BDeu_Score2_local(funcOpt) #should use the respective get_BDeu_Score(or BDeu2 (default)) method
model[[b]] <- list(binNo=output_hist_calibration$histModel, cutIdx=output_hist_calibration$cutIdx,
cutPoints=output_hist_calibration$cutPoints, score=score, logLL=output_hist_calibration$logLL)
}
score <- c()
logLL <- c()
#Zusammenhang zwischen Score und LogLL..
for (i in 1:MNM){
score[i]<-model[[i]]$score
logLL[i]<-model[[i]]$logLL
}
#which binning model hast the best BDeu2 score?
maxScore <- -Inf
maxScoreIdx <- 0
minScore <- Inf
minScoreIdx <- 0
SV <- rep(0,MNM) #SV vector contains all scores for all evaluated models
for (b in 1:MNM){
SV[b] <- model[[b]]$score
if(model[[b]]$score > maxScore){
maxScoreIdx <- b
maxScore <- model[[b]]$score
}
if (model[[b]]$score < minScore){
minScoreIdx <- b
minScore <- model[[b]]$score
}
}
#SV becomes 1 for min(SV), SV becomes smallest for largest score, model with the logLL closest to 0
SV <- exp((min(SV)-SV)/2)#SV=whole set of BDeu2 scores for each possible MNM
model[[1]]$maxScoreIdx <- maxScoreIdx #first binning model (b=1) stores min and max ScoreIdxs
model[[1]]$minScoreIdx <- minScoreIdx
model[[1]]$SV <- SV
#select only a number of models for averaging over the models
idxs <- elbow(SV, alpha = alpha) #include the indexed SV scores in the refined/pruned model
model2 <- processModel_local(model, idxs = idxs) #refined model
p_observed <- c()
p_calculated <- c()
midpoint <- c()
n <- c()
n_1 <- c()
for (i in 1:length(model2[[1]]$binNo)){
p_observed[i]<- model2[[1]]$binNo[[i]]$P_observed
p_calculated[i] <- model2[[1]]$binNo[[i]]$P #smoothed prevalence value
midpoint[i]<- model2[[1]]$binNo[[i]]$midpoint
n[i] <- model2[[1]]$binNo[[i]]$n
n_1[i] <-model2[[1]]$binNo[[i]]$n1
}
bin_no <- seq(1,length(midpoint))
#significance testing
p_values_binom <- unlist(apply(cbind(n_1, n),1,binom_for_histogram)) #pvalues for single bins, binom.test
binning_scheme <- data.frame(cbind(bin_no,midpoint,cases=n_1,all=n, prob_case=p_calculated,p_value=p_values_binom))
for (i in 1:nrow(binning_scheme)){
if(is.nan(binning_scheme[i,6])){
binning_scheme[i,7] <- "no value"
}
else if(binning_scheme[i,6]<0.001){
binning_scheme[i,7] <- "***"
}
else if(binning_scheme[i,6]<0.01){
binning_scheme[i,7] <- "**"
}
else if(binning_scheme[i,6]<0.05){
binning_scheme[i,7] <- "*"
}
else
binning_scheme[i,7] <- "ns"
}
colnames(binning_scheme)[7] <- c("significance")
#function has to return min/max for scaling
min <- min(predicted)
max <- max(predicted)
#quality markers calibration model
calibration_points <- binning_scheme$prob_case
calibration_points_sign <- binning_scheme$p_value <0.05
calibration_points_number <- length((binning_scheme$prob_case))
calibration_points_number_sign <- length((subset(binning_scheme$prob_case, binning_scheme$p_value<0.05)))
calibration_range <- range(binning_scheme$prob_case)
if(sum(calibration_points_sign) != 0){
calibration_range_sign <- range(subset(binning_scheme$prob_case, binning_scheme$p_value<0.05))
}
else{
calibration_range_sign <- 0
}
return(bbq=list(type="BBQ", model=model, prunedmodel=model2,
binnning_scheme=binning_scheme, min=min, max=max,
calibration_points=list(calibration_points=calibration_points,calibration_points_sign=calibration_points_sign),
calibration_range=list(calibration_range=calibration_range, calibration_range_sign=calibration_range_sign),
calibration_points_number=list(calibration_points_number=calibration_points_number, calibration_points_number_sign=calibration_points_number_sign)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/build_BBQ.R
|
#' @title build_GUESS
#' @description This method builds a GUESS calibration model using the trainings set provided.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @return returns the trained GUESS model that can be used to calibrate a test set using the \code{\link{predict_GUESS}} method
#' @seealso
#' \code{\link[fitdistrplus]{denscomp}}
#' @rdname build_GUESS
#' @export
#' @importFrom fitdistrplus denscomp
#' @importFrom stats median
build_GUESS <- function(actual, predicted){
###local functions###
get_class_prob <- function(cases, controls){
#calculates proportion of cases and controls in data set to determine P(C)
n_cases <- length(cases)
n_controls <- length(controls)
all <- n_cases+n_controls
prob_cases <- n_cases/all
prob_controls <- n_controls/all
return(list(cases=prob_cases, controls=prob_controls))
}
get_LL <- function(distributions){
#ranks tested distributions by their logLL
#returns ranked list of distributions, and distribution with maximal LL
ll <- c()
log_list <- c()
extract_ll <- function (distribution){
ll <- distribution$loglik
}
plot_ll <- function (distribution){
fitdistrplus::llplot(distribution)
}
log_list <- lapply(distributions, extract_ll)
max <- which.max(unlist(log_list)) #find max logLL
return(list(distribution=log_list, best_fit=distributions[max][[1]]))
}
distribution_test <- function(data){
bool=0
z_scaled_data <- scale(data, center=TRUE, scale=F)[,1]
data_fit_norm <- fitdistrplus::fitdist(data, "norm") #normal distribution
data_fit_log <- fitdistrplus::fitdist(data, "logis") #logistic distribution
data_fit_t_z_trans <- fitdistrplus::fitdist(z_scaled_data, "t", start=list(df=10)) #t distribution
#test for distributions that only take input >=0
if(sum(data<0) == 0){
data <- data[!data==0] #fitting works better if data does not include 0 value (?)
data_fit_exp <- fitdistrplus::fitdist(data, "exp") #exponential distribution
data_fit_w <- fitdistrplus::fitdist(data, "weibull", start=list(shape=5, scale=0.5)) #weibull distribution
data_fit_g <- fitdistrplus::fitdist(data, "gamma") #gamma distribution
data_fit_ln <- fitdistrplus::fitdist(data, "lnorm") #lognormal distribution
names <- c("exponential", "Weibull","gamma", "lognormal")
bool=1
}
else{
names <- c()
distributions <- c()
}
if(bool==1){
summary <- list(summary(data_fit_norm),summary(data_fit_log), summary(data_fit_exp), summary(data_fit_w), summary(data_fit_g), summary(data_fit_ln), summary(data_fit_t_z_trans))
best_fit <- get_LL(summary)
return(list(distributions=summary, best_fit=best_fit$best_fit))
}
else{
summary <- list(summary(data_fit_norm),summary(data_fit_log), summary(data_fit_t_z_trans))
best_fit <- get_LL(summary)
return(list(distributions=summary, best_fit=best_fit$best_fit))
}
}
all <- data.frame(cbind(actual, predicted))
controls <- subset(all[,2],all[,1]==0)
cases <- subset(all[,2],all[,1]==1)
#function has to return min/max for scaling and mean and sd for Z-transformation for t-distribution fitting
min <- min(predicted)
max <- max(predicted)
mean_cases <- mean(cases)
sd_cases <- sd(cases)
mean_controls <- mean(controls)
sd_controls <- sd(controls)
#evaluate possible distributions for the two classes
distr_cases <- distribution_test(cases)
distr_controls <- distribution_test(controls)
#save distribution with max logLL in best_fit_Xx
best_fit_cases <- distr_cases$best_fit
best_fit_controls <- distr_controls$best_fit
dist_name_cases <- best_fit_cases$qdistname
dist_name_controls <- best_fit_controls$qdistname
#plot best distributions for cases and controls
fit_cases <- fitdistrplus::denscomp(best_fit_cases, main="Best fit: Cases",
legendtext=best_fit_cases$distname, demp=TRUE, datacol="firebrick3", plotstyle="ggplot")
fit_controls <- fitdistrplus::denscomp(best_fit_controls, main="Best fit: Controls",
legendtext=best_fit_controls$distname, demp=TRUE, datacol="darkolivegreen4", plotstyle="ggplot")
#get class probabiliy P(C)
class_probs <- get_class_prob(cases, controls)
#significance testing
estimate_case <- best_fit_cases$estimate
list_estimate_case <- list()
for(i in 1:length(estimate_case)){
list_estimate_case[i] <- estimate_case[i]
}
estimate_control <- best_fit_controls$estimate
list_estimate_control <- list()
for(i in 1:length(estimate_control)){
list_estimate_control[i] <- estimate_control[i]
}
#define critical values (5%, 95%) for both distributions
t_crit_cases_l <- do.call(match.fun(dist_name_cases), c(0.05,list_estimate_case))
t_crit_cases_u <- do.call(match.fun(dist_name_cases), c(0.95,list_estimate_case))
t_crit_controls_l <- do.call(match.fun(dist_name_controls), c(0.05,list_estimate_control))
t_crit_controls_u <- do.call(match.fun(dist_name_controls), c(0.95,list_estimate_control))
#define critical values where both distributions are in their 5% most extreme values
t_crit <- c(crit_case_l=t_crit_cases_l,
crit_control_l=t_crit_controls_l,
crit_case_u=t_crit_cases_u,
crit_control_u=t_crit_controls_u)
#significant results in test set (lower than lower bound of controls and higher than upper bound of cases)
sign_train_set <- sum(predicted>t_crit[[2]]& predicted<t_crit[[3]])/length(predicted)
return(list(type="GUESS", best_fit_cases=best_fit_cases, best_fit_controls=best_fit_controls, class_probs=class_probs, min=min, max=max,
mean_cases=mean_cases, sd_cases=sd_cases, mean_controls=mean_controls, sd_controls=sd_controls, t_crit=t_crit, sign_train_set=sign_train_set,
plot=list(cases=fit_cases,controls=fit_controls)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/build_GUESS.R
|
#' @title build_hist_binning
#' @description calculate estimated probability per bin, input predicted and real score as numeric vector; builds a histogram binning model which can be used to calibrate uncalibrated predictions using the predict_histogramm_binning method
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param bins number of bins that should be used to build the binning model, Default: decide_on_break estimates optimal number of bins
#' @return returns the trained histogram model that can be used to calibrate a test set using the \code{\link{predict_hist_binning}} method
#' @rdname build_hist_binning
#' @details if trainings set is smaller then threshold (15 bins*5 elements=75), number of bins is decreased
#' @importFrom graphics hist
#' @importFrom stats na.omit
build_hist_binning <- function(actual, predicted, bins=NULL){
##local functions###
decide_on_break <- function(predicted, breaks=15){
#if trainingsset is smaller then threshold (15 bins*5 elements=75), decrease #bins depending on training set size
if (length(predicted)<=75){
breaks <- floor(length(predicted)/6)
}
#only use 0,1,by breaks, if input is between 0 and 1
if(all(predicted<=1) && all(predicted>=0)){
histogram <- hist(predicted, breaks=seq(0,1,(1/breaks)), plot=FALSE)
#if there are more than 3 bins with insignificant (less then 18 elements) -> number of breaks is decreased
while(sum(histogram$counts>0 & histogram$counts<18) > 3 && !breaks <= 7){
breaks <- breaks-1
histogram <- hist(predicted, breaks=seq(0,1,(1/breaks)), plot=FALSE)
}}
else {
histogram <- hist(predicted, breaks=breaks, plot=FALSE)
#if there are more than 3 bins with insignificant (less then 18 elements) -> number of breaks is decreased
while(sum(histogram$counts>0 & histogram$counts<18) > 3 && !breaks <= 7){
breaks <- breaks-1
histogram <- hist(predicted, breaks=breaks, plot=FALSE)
}}
return(suggested_break_number=breaks)
}
if (is.null(bins)){
n_bins <- decide_on_break(predicted)
}
else
n_bins <- bins
predicted_real <- data.frame()
predicted_real[1:length(predicted),1] <- predicted
predicted_real[,2] <- actual
predicted_real <- na.omit(predicted_real)
histogram <- hist(predicted_real[,1], breaks=seq(0,1,(1/n_bins)), plot=FALSE, include.lowest = T)
true_bin <- data.frame()
true_bin[c(1:length(histogram$count)),1] <- seq(1,length(histogram$count))
true_bin[,2] <- 0 #true positiv
true_bin[,3] <- 0 #no. of data in bin
true_bin[,4] <- 0 #correct y=1 diagnosis
colnames(predicted_real) <- c("ML score", "real score")
for(i in 1:nrow(predicted_real)){
for (j in 1:(length(histogram$breaks)-1)){
if (predicted_real[i,1]==histogram$breaks[1]){ #values with prob = 0 are put in bin 1
true_bin[1,3] <- true_bin[1,3] + 1
true_bin[1,2] <- true_bin[1,2] + predicted_real[i,2]
break
}
if (histogram$breaks[j] < predicted_real[i,1] && predicted_real[i,1]<= histogram$breaks[j+1]){
true_bin[j,3] <- true_bin[j,3] + 1
true_bin[j,2] <- true_bin[j,2] + predicted_real[i,2]
break
}
}
}
true_bin[,4] <- true_bin[,2]/true_bin[,3] #Probability for correct y=1 diagnosis
true_bin[,4][is.na(true_bin[,4])] <- 0
#significance testing
p_values_binom <- unlist(apply(cbind(true_bin[,2], true_bin[,3]),1,binom_for_histogram)) #pvalues for single bins, binom.test
true_bin[,5] <- p_values_binom
for (i in 1:nrow(true_bin)){
if(is.nan(true_bin[i,4])){
true_bin[i,6] <- "no value"
}
else if(true_bin[i,5]<0.001){
true_bin[i,6] <- "***"
}
else if(true_bin[i,5]<0.01){
true_bin[i,6] <- "**"
}
else if(true_bin[i,5]<0.05){
true_bin[i,6] <- "*"
}
else
true_bin[i,6] <- "ns"
}
colnames(true_bin) <- c("no bin", "true cases", "all", "prob_case", "p_value", "significance")
colnames(predicted_real) <- c("ML score", "real score")
min <- min(predicted)
max <- max(predicted)
#quality markers calibration model
calibration_points <- true_bin[,4]
calibration_points_sign <- true_bin[,5]<0.05
calibration_points_number <- length((true_bin[,4]))
calibration_points_number_sign <- length((subset(true_bin[,4], true_bin[,5]<0.05)))
calibration_range <- range(true_bin[,4])
if(sum(calibration_points_sign) != 0){
calibration_range_sign <- range(true_bin[,4][true_bin[,5]<0.05])
}
else{
calibration_range_sign <- 0
}
return(list(type="hist", histogram=histogram,probs_per_bin=true_bin[,4],
binnning_scheme=true_bin, min=min, max=max,
calibration_points=list(calibration_points=calibration_points,calibration_points_sign=calibration_points_sign),
calibration_range=list(calibration_range=calibration_range, calibration_range_sign=calibration_range_sign),
calibration_points_number=list(calibration_points_number=calibration_points_number, calibration_points_number_sign=calibration_points_number_sign)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/build_hist_binning.R
|
#' @title hist_binning_CV
#' @description trains and evaluates the histogram binning calibration model repeated \code{folds}-Cross-Validation (CV).
#' The \code{predicted} values are partitioned into n subsets. A histogram binning model is constructed on (n-1) subsets; the remaining set is used
#' for testing the model. All test set predictions are merged and used to compute error metrics for the model.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param n_bins number of bins used in the histogram binning scheme, Default: 15
#' @param n_folds number of folds in the cross-validation, Default: 10
#' @param seed random seed to alternate the split of data set partitions
#' @param input specify if the input was scaled or transformed, scaled=1, transformed=2
#' @return list object containing the following components:
#' \item{error}{list object that summarizes discrimination and calibration errors obtained during the CV}
#' \item{type}{"hist"}
#' \item{probs_CV}{vector of calibrated predictions that was used during the CV}
#' \item{actual_CV}{respective vector of true values (0 or 1) that was used during the CV}
#' @rdname hist_binning_CV
hist_binning_CV <- function(actual, predicted, n_bins=15, n_folds=10, seed, input){
set.seed(seed)
x <- data.frame(cbind(actual, predicted))
x_cases <- subset(x, x[,1]==1)
x_controls <- subset(x, x[,1]==0)
fold_cases <- sample(cut(seq(1,nrow(x_cases)),breaks=n_folds,label=FALSE))
fold_controls <- sample(cut(seq(1,nrow(x_controls)),breaks=n_folds,label=FALSE))
y_cal <- list()
y_dis <- list()
list_calibrated_probs <- c()
list_actual <- c()
error_fold <- c()
hist_models <- list()
hist_models_rd <- list()
for(i in 1:n_folds){
trainIndexes_cases <- which(fold_cases!=i, arr.ind = TRUE)
trainIndexes_controls <- which(fold_controls!=i,arr.ind=TRUE)
trainData <- rbind(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,])
x_train <- format_values(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,], input=input)
testIndexes_cases <- which(fold_cases==i,arr.ind=TRUE)
testIndexes_controls <- which(fold_controls==i,arr.ind=TRUE)
testData <- rbind(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,])
x_test <- format_values(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,], input=input,
min=x_train$min, max=x_train$max, mean=x_train$mean)
hist <- build_hist_binning(x_train$formated_values[,1], x_train$formated_values[,2], n_bins)
calibrated_probs <- predict_hist_binning(hist, x_test$formated_values[,2])
list_calibrated_probs <- c(list_calibrated_probs, calibrated_probs$predictions)
list_actual <- c(list_actual,x_test$formated_values[,1])
hist_models[[i]] <- hist
}
y <- reliability_diagramm(list_actual, list_calibrated_probs)
y_cal <- y$calibration_error
y_dis <- y$discrimination_error
error_summary_CV <- list(calibration_error=y_cal, discrimination_error=y_dis,
mean_pred_per_bin=y$mean_pred_per_bin, accuracy_per_bin=y$accuracy_per_bin,
sign=y$sign)
return(list(error=error_summary_CV, type="hist", probs_CV=list_calibrated_probs, actual_CV=list_actual))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/build_hist_binning_CV.R
|
#' @title calibrate
#' @description Builds selected calibration models on the supplied trainings values \code{actual} and \code{predicted} and returns them
#' to the user. New test instances can be calibrated using the \code{\link{predict_calibratR}} function.
#' Returns cross-validated calibration and discrimination error values for the models if \code{evaluate_CV_error} is set to TRUE. Repeated cross-Validation can be time-consuming.
#' @author Johanna Schwarz
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param model_idx which calibration models should be implemented, 1=hist_scaled, 2=hist_transformed, 3=BBQ_scaled, 4=BBQ_transformed, 5=GUESS, Default: c(1, 2, 3, 4, 5)
#' @param evaluate_no_CV_error computes internal errors for calibration models that were trained on all available \code{actual}/\code{predicted} tuples. Testing is performed with the same set. Be careful to interpret those error values, as they are not cross-validated. Default: TRUE
#' @param evaluate_CV_error computes cross-validation error. \code{folds} times cross validation is repeated \code{n_seeds} times with changing seeds. The trained models and the their calibration and discrimination errors are returned.
#' Evaluation of CV errors can take some time to compute, depending on the number of repetitions specified in \code{n_seeds}, Default: TRUE
#' @param folds number of folds in the cross-validation of the calibration model. If \code{folds} is set to 1, no CV is performed and \code{summary_CV} can be calculated. Default: 10
#' @param n_seeds \code{n_seeds} determines how often random data set partition is repeated with varying seed. If \code{folds} is 1, \code{n_seeds} should be set to 1, too. Default: 30
#' @param nCores \code{nCores} how many cores should be used during parallelisation. Default: 4
#' @return A list object with the following components:
#' \item{calibration_models}{a list of all trained calibration models, which can be used in the \code{\link{predict_calibratR}} method.}
#' \item{summary_CV}{a list containing information on the CV errors of the implemented models}
#' \item{summary_no_CV}{a list containing information on the internal errors of the implemented models}
#' \item{predictions}{calibrated predictions for the original \code{predicted} values}
#' \item{n_seeds}{number of random data set partitions into training and test set for \code{folds}-times CV}
#' @details parallised execution of random data set splits for the Cross-Validation procedure over \code{n_seeds}
#' @examples
#' ## Loading dataset in environment
#' data(example)
#' actual <- example$actual
#' predicted <- example$predicted
#'
#' ## Create calibration models
#' calibration_model <- calibrate(actual, predicted,
#' model_idx = c(1,2),
#' FALSE, FALSE, folds = 10, n_seeds = 1, nCores = 2)
#' @rdname calibrate
#' @export
#' @importFrom parallel makeCluster stopCluster
#' @import foreach
#' @importFrom doParallel registerDoParallel
calibrate <- function(actual, predicted,
model_idx=c(1,2,3,4,5),
evaluate_no_CV_error=TRUE,
evaluate_CV_error=TRUE,
folds=10,
n_seeds=30,
nCores = 4
){
set.seed(123)
if (length(actual) != length(predicted)){
stop("Please make sure, that the parameters actual and predicted are of the same length.")
}
if (any((unique(actual)!=1) & (unique(actual)!=0))){
stop("The parameter actual contains values other than 1 or 0. Please code your class labels accordingly.")
}
if(evaluate_CV_error==FALSE & (!is.null(folds)|| !is.null(n_seeds))){
warning("No Cross-Validation is performed, but parameters folds or n_seeds are specified. If you want to perform CV, please set evaluate_CV_error TRUE.")
}
predicted <- unname(predicted)
#original values
original_values <- list(actual=actual, predicted=predicted)
#build selected models on all data and predict for all data (=no CV)
models_final <- list()
cal_models_final <- list()
for (i in model_idx){
models_final <- c(models_final, calibrate_me(actual=actual, predicted=predicted, model_idx=i))
}
cal_models_final <- list(original_values=original_values, models_final=models_final)
predictions <- predict_calibratR(cal_models_final, predicted, nCores)
#performs x-fold CV and returns error values
if(evaluate_CV_error){
error <- c()
t0_error <- list()
t0_error[["calibration"]] <- list()
t0_error[["discrimination"]] <- list()
y <- 1
#uncalibrated
#parallize the foreach loop
NumberOfCluster <- nCores # how many jobs you want the computer to run at the same time
cl <- parallel::makeCluster(NumberOfCluster) # use the above cluster # your parallel programming code code code stopCluster(cl) # close clusters
doParallel::registerDoParallel(cl)
`%dopar%` <- foreach::`%dopar%`
comb <- function(x, ...) {
lapply(seq_along(x),
function(i) c(x[[i]], lapply(list(...), function(y) y[[i]])))
}
error <- foreach::foreach(i=seq(1,n_seeds,1), .packages = "CalibratR", .combine='comb',
.multicombine=TRUE, .init=list(list(), list(), list()),
.final = function(x) setNames(x, c("original", "scaled", "transformed"))) %dopar% {
original <- uncalibrated_CV(actual, predicted, n_folds=folds, seed=i, input=0)
scaled <- uncalibrated_CV(actual, predicted, n_folds=folds, seed=i, input=1)
transformed <- uncalibrated_CV(actual, predicted, n_folds=folds, seed=i, input=2)
return(list(original=original, scaled=scaled, transformed=transformed))
}
parallel::stopCluster(cl)
##uncalibrated predictions
for (model in error){
calibration_df <- data.frame()
discrimination_df <- data.frame()
for(i in seq(1,length(model),1)){
calibration_df <- rbind(calibration_df,unlist(model[[i]]$error$calibration_error))
discrimination_df <- rbind(discrimination_df,unlist(model[[i]]$error$discrimination_error))
}
colnames(discrimination_df) <- names(model[[1]]$error$discrimination_error)
colnames(calibration_df) <- names(model[[1]]$error$calibration_error)
t0_error$calibration[[names(error)[y]]] <- calibration_df
t0_error$discrimination[[names(error)[y]]] <- discrimination_df
y <- y+1
}
#build calibration models
y <- 1
error_calibrated <- calibrate_me_CV_errors(actual, predicted, model_idx, folds, n_seeds,nCores)
for(model in error_calibrated){
if(!length(model)==0){
calibration_df <- data.frame()
discrimination_df <- data.frame()
for(i in seq(1,length(model),1)){
calibration_df <- rbind(calibration_df,unlist(model[[i]]$error$calibration_error))
discrimination_df <- rbind(discrimination_df,unlist(model[[i]]$error$discrimination_error))
}
colnames(discrimination_df) <- names(model[[1]]$error$discrimination_error)
colnames(calibration_df) <- names(model[[1]]$error$calibration_error)
t0_error$calibration[[names(error_calibrated)[y]]] <- calibration_df
t0_error$discrimination[[names(error_calibrated)[y]]] <- discrimination_df
y <- y+1
}
else{
y <- y+1
}
}}
else{
error <- NULL
error_calibrated <- NULL
t0_error <- NULL
}
#calculates error values on training set
if(evaluate_no_CV_error){
training_error <- c()
y <- 1
error_values_no_CV <- list()
error_values_no_CV[["calibration"]] <- list()
error_values_no_CV[["discrimination"]] <- list()
for (i in predictions){
training_error <- c(training_error, list(reliability_diagramm(actual, i)))
}
names(training_error) <- names(predictions)
for (i in training_error){
error_values_no_CV$calibration[[names(training_error)[y]]] <- unlist(i$calibration_error)
error_values_no_CV$discrimination[[names(training_error)[y]]] <- unlist(i$discrimination_error)
y <- y+1
}
df_calibration_no_CV <- t(data.frame(error_values_no_CV$calibration))
df_discrimination_no_CV <- t(data.frame(error_values_no_CV$discrimination))
}
else{
df_calibration_no_CV <- NULL
df_discrimination_no_CV <- NULL
training_error <- NULL
}
res <- list(calibration_models=cal_models_final,
summary_CV=list(models=list(uncalibrated=error, calibrated=error_calibrated),error_models=t0_error,
folds=folds),
summary_no_CV=list(calibration_error=df_calibration_no_CV,
discrimination_error=df_discrimination_no_CV,
list_errors=training_error),
predictions=predictions,
n_seeds=n_seeds)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/calibratR.R
|
#' @title calibrate_me
#' @description trains calibration models on the training set of \code{predicted}/\code{actual} value pairs.\code{model_idx} specifies which models should be trained.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param model_idx a single number from 1 to 5, indicating which calibration model should be implemented, 1=hist_scaled, 2=hist_transformed, 3=BBQ_scaled, 4=BBQ_transformed, 5=GUESS
#' @return depending on the value of \code{model_idx}, the respective calibration model is build on the input from \code{actual} and \code{predicted}
#' @rdname calibrate_me
calibrate_me <- function(actual, predicted, model_idx){
if (length(predicted)<=75){ #If input set is too small, bin no. in rd is decreased for all (!) models
breaks_rd <- floor(length(predicted)/6)
}
else
breaks_rd <- NULL
all <- data.frame(cbind(actual,unname(predicted)))
cases_all <- data.frame(subset(all, all[,1]==1))
control_all <- data.frame(subset(all, all[,1]==0))
x_original <- format_values(cases_all, control_all, 0)
x_scaled <- format_values(cases_all, control_all, 1)
x_transformed <- format_values(cases_all, control_all, 2)
switch(model_idx,
"1"= {
model <- build_hist_binning(x_scaled$formated_values[,1],x_scaled$formated_values[,2])
model$inputtype <- 1
model$model_idx <- model_idx
return(list(hist_scaled=model))
},
"2"= {
model <- build_hist_binning(x_transformed$formated_values[,1],x_transformed$formated_values[,2])
model$inputtype <- 2
model$model_idx <- model_idx
return(list(hist_transformed=model))
},
"3"= {
model <- build_BBQ(x_scaled$formated_values[,1],x_scaled$formated_values[,2])
model$inputtype <- 1
model$model_idx <- model_idx
return(list(BBQ_scaled=model))
},
"4"= {
model <- build_BBQ(x_transformed$formated_values[,1],x_transformed$formated_values[,2])
model$inputtype <- 2
model$model_idx <- model_idx
return(list(BBQ_transformed=model))
},
"5"={
model <- build_GUESS(x_original$formated_values[,1],x_original$formated_values[,2])
model$inputtype <- 0
model$model_idx <- model_idx
return(list(GUESS=model))
})}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/calibrate_me.R
|
#' @title calibrate_me_CV_errors
#' @description trains and evaluates calibration models using \code{n_seeds}-times repeated \code{folds}-Cross-Validation (CV).\code{model_idx} specifies which models should be trained.
#' \cr Model training and evaluation is repeated \code{n_seeds}-times with a different training/test set partition scheme for the CV each time.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param model_idx which calibration models should be implemented, 1=hist_scaled, 2=hist_transformed, 3=BBQ_scaled, 4=BBQ_transformed, 5=GUESS
#' @param folds number of folds in the cross-validation, Default: 10
#' @param n_seeds \code{n_seeds} determines how often random data set partition is repeated with varying seed
#' @param nCores \code{nCores} how many cores should be used during parallelisation. Default: 4
#' @return returns all trained calibration models that were built during the \code{n_seeds}-times repeated \code{folds}-CV.
#' \cr Error values for each of the \code{n_seeds} CV runs are given.
#' @details parallised execution over \code{n_seeds}
#' @rdname calibrate_me_CV_errors
#' @importFrom parallel makeCluster stopCluster
#' @import foreach
#' @importFrom stats setNames
#' @importFrom doParallel registerDoParallel
calibrate_me_CV_errors <- function(actual, predicted, model_idx, folds=10, n_seeds, nCores){
#parallize the foreach loop
NumberOfCluster <- nCores # how many jobs you want the computer to run at the same time
cl <- parallel::makeCluster(NumberOfCluster) # Make clusters registerDoSNOW(cl) # use the above cluster # your parallel programming code code code stopCluster(cl) # close clusters
doParallel::registerDoParallel(cl)
`%dopar%` <- foreach::`%dopar%`
#how many list() do I expect in my output
idx <- 0
names_model <- c()
if(any(model_idx==1)){
idx <- idx +1
names_model <- c(names_model,"hist_scaled")
}
if(any(model_idx==2)){
idx <- idx +1
names_model <- c(names_model,"hist_transformed")
}
if(any(model_idx==3)){
idx <- idx +2
names_model <- c(names_model,"BBQ_scaled_sel", "BBQ_scaled_avg")
}
if(any(model_idx==4)){
idx <- idx +2
names_model <- c(names_model,"BBQ_transformed_sel", "BBQ_transformed_avg")
}
if(any(model_idx==5)){
idx <- idx +2
names_model <- c(names_model,"GUESS_1", "GUESS_2")
}
comb <- function(x, ...) {
lapply(seq_along(x),
function(i) c(x[[i]], lapply(list(...), function(y) y[[i]])))
}
n <- length(seq(1,n_seeds,1))
i <- NULL
hist_scaled <- NULL
hist_transformed <- NULL
BBQ_scaled_sel <- NULL
BBQ_scaled_avg <- NULL
BBQ_transformed_sel <- NULL
BBQ_transformed_avg <- NULL
GUESS_1 <- NULL
GUESS_2 <- NULL
parallized_results <- foreach::foreach(i=seq(1,n_seeds,1), .packages = "CalibratR", .combine='comb',
.multicombine=TRUE, .init=rep(list(list()), idx),
.final = function(x) setNames(x, names_model)) %dopar% {
if(any(model_idx==1)){
hist_scaled <- hist_binning_CV(actual, predicted, n_folds=folds, seed=i, input=1)
}
if(any(model_idx==2)){
hist_transformed <- hist_binning_CV(actual, predicted, n_folds=folds, seed=i, input=2)
}
if(any(model_idx==3)){
BBQ_scaled_sel <- BBQ_CV(actual, predicted,0, n_folds=folds, seed=i, input=1)
BBQ_scaled_avg <- BBQ_CV(actual, predicted,1, n_folds=folds, seed=i, input=1)
}
if(any(model_idx==4)){
BBQ_transformed_sel <- BBQ_CV(actual, predicted,0, n_folds=folds, seed=i, input=2)
BBQ_transformed_avg <- BBQ_CV(actual, predicted,1, n_folds=folds, seed=i, input=2)
}
if(any(model_idx==5)){
GUESS_1 <- GUESS_CV(actual, predicted, n_folds=folds,1, seed=i, input=0)
GUESS_2 <- GUESS_CV(actual, predicted, n_folds=folds,2, seed=i, input=0)
}
list(hist_scaled=hist_scaled, hist_transformed=hist_transformed, BBQ_scaled_sel=BBQ_scaled_sel, BBQ_scaled_avg=BBQ_scaled_avg,
BBQ_transformed_sel=BBQ_transformed_sel, BBQ_transformed_avg=BBQ_transformed_avg, GUESS_1=GUESS_1, GUESS_2=GUESS_2)
}
parallel::stopCluster(cl)
return(error=parallized_results)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/calibrate_me_CV_errors_parallel.R
|
#' @title compare_models_visual
#' @description FUNCTION_DESCRIPTION
#' @param models PARAM_DESCRIPTION
#' @param seq sequence for which the calibrated predictions should be plotted, Default: NULL
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{ylim}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{scale_color_brewer}}
#' \code{\link[reshape2]{melt}}
#' @rdname compare_models_visual
#' @importFrom ggplot2 ggplot geom_line aes ylim theme labs scale_color_brewer
#' @importFrom reshape2 melt
compare_models_visual <- function(models, seq=NULL){
max <- max(models$original_values$predicted)
min <- min(models$original_values$predicted)
#default: if no seq is given, evaluate from min to max value of original input score
if(is.null(seq)){
step_size <- (max-min)/100 #evaluate 100 scores
seq <- seq(min, max, step_size)
}
predictions <- predict_calibratR(models, seq, nCores=1)
predictions$original <- NULL
L1 <- NULL
value <- NULL
plot1 <- ggplot2::ggplot(cbind(seq,reshape2::melt(predictions)))+
ggplot2::geom_line(ggplot2::aes(x=seq, y=value, colour=L1), size=1)+
ggplot2::ylim(0, 1)+
ggplot2::theme(legend.position = "bottom")+
ggplot2::labs(title="Comparison of Calibration models", x = "original ML score", y = "calibrated prediction")+
ggplot2::scale_color_brewer(palette = "Paired", name=NULL)
return(plot1)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/compare_models_visual.R
|
#' @title evaluate_discrimination
#' @description computes various discrimination error values, namely: sensitivity, specificity, accuracy, positive predictive value (ppv), negative predictive value (npv) and AUC
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param cutoff cut-off to be used for the computation of npv, ppv, sensitivity and specificity, Default: value that maximizes sensitivity and specificity (Youden-Index)
#' @return list object with the following components:
#' \item{sens}{sensitivity}
#' \item{spec}{specificity}
#' \item{acc}{accuracy}
#' \item{ppv}{positive predictive value}
#' \item{npv}{negative predictive value}
#' \item{cutoff}{cut-off that was used to compute the error values}
#' \item{auc}{AUC value}
#' @seealso
#' \code{\link[pROC]{roc}}
#' @rdname evaluate_discrimination
#' @importFrom pROC roc
#'
evaluate_discrimination <- function(actual, predicted, cutoff=NULL){
###local functions###
getAUC <- function(actual, predicted){
if (length(unique(actual))!=2||max(unique(actual))!=1){ #actual hast to be 0 or 1
warning("strange input")
}
nTarget <- length(which(actual==1)) #how many y==1
nBackground <- length(which(actual!=1)) #how many y==0
#Rank data
R <- rank(predicted, ties.method = "average")
#Calculate AUC using Wilcoxon Signed Rank Test
AUC <- (sum(R[which(actual==1)])-(nTarget^2+nTarget)/2) / (nTarget*nBackground) #Ranksum
AUC <- max (AUC, 1-AUC)
}
discriminate <- function(i, cutoff){
#decides on i's 1/0 class membership by checking, if i is greater than the threshold value cutoff
if (i>cutoff){
class <- 1
}
else
class <- 0
return(class)
}
if(is.null(cutoff)){
roc <- pROC::roc(actual, predicted)
youden <- which.max(roc$sensitivities + roc$specificities-1) #calculate maximum of Youden Index
cutoff <- roc$thresholds[youden]
}
else{
youden <- cutoff
}
output_class <- sapply(predicted, discriminate, cutoff=cutoff)
true_positives <- which(actual==1)
true_negatives <- which(actual==0)
#sensitivity, specificity
sens <- sum(output_class[true_positives]==1)/length(true_positives)
spec <- sum(output_class[true_negatives]==0)/length(true_negatives)
false_positive <- sum(output_class[true_negatives]==1)/length(true_negatives)
false_negative <- sum(output_class[true_positives]==0)/length(true_positives)
ppv <- sum(output_class[true_positives]==1)/(sum(output_class[true_positives]==1)+sum(output_class[true_negatives]==1))
npv <- sum(output_class[true_negatives]==0)/(sum(output_class[true_positives]==0)+sum(output_class[true_negatives]==0))
all <- length(actual)
#AUC
auc <- getAUC(actual, predicted)
#accuracy
acc <- (sum(output_class[true_positives]==1)+sum(output_class[true_negatives]==0))/all
error_list <- list(sens=sens, spec=spec, acc=acc, ppv=ppv, npv=npv, cutoff=cutoff, auc=auc)
rounded_list <- lapply(error_list,FUN=round,3)
return(rounded_list)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/evaluate_discrimination.R
|
#' @title example
#' @description list object containing 1) the simulated classifiers for two classes. Distributions are simulated from Gaussian distributions with
#' Normal(mean=1.5, sd=0) for class 1 and Normal(mean=0, sd=0) for class 0 instances. Each class consists of 100 instances.
#' and 2) A test set of 100 instances
#' @name example
#' @format \code{predicted}=vector of 200 simulated classifier values; \code{actual}=their respective true class labels (0/1)
#' @docType data
#' @usage data(example)
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/example.R
|
#' @title format_values
#' @description returns formatted input.
#' If specified, the uncalibrated input is mapped to the [0;1] range using scaling (\code{\link{scale_me}}) or transforming (\code{\link{transform_me}})
#' @param cases instances from class 1
#' @param control instances from class 0
#' @param input single integer (0, 1 or 2). specify if the input should be formatted (=0), formatted and scaled (=1)
#' or formatted and transformed (=2)
#' @param min min value of the original data set, default=calculated on input
#' @param max max value of the original data set, default=calculated on input
#' @param mean mean value of the original data set, default=calculated on input
#' @return list object with the following components:
#' \item{formated_values}{formatted input. If \code{input} is set to 1 (2), the input is additionally scaled (transformed) using the
#' method \code{\link{scale_me}} (\code{\link{transform_me}})}
#' \item{min}{minimum value among all instances}
#' \item{max}{maximum value among all instances}
#' \item{mean}{mean value among all instances}
#' @rdname format_values
format_values <- function(cases, control, input, min=NULL, max=NULL, mean=NULL){
simulation <- c(cases[,2], control[,2])
simulation_real <- c(cases[,1], control[,1])
#return min/max for scaling
if (is.null(max) || is.null(min)|| is.null(mean)){
min <- min(min(cases), min(control), na.rm=TRUE)
max <- max(max(cases), max(control), na.rm=TRUE)
mean <- mean(simulation, na.rm = TRUE)
}
if (input==0){
output <- cbind(simulation_real, simulation)
}
else
if (input==1){
output <- cbind(simulation_real, scale_me(simulation, min=min, max=max))
}
else
if (input==2){
output <- cbind(simulation_real, transform_me(simulation, mean=mean))
}
return(list(formated_values=output, min=min, max=max, mean=mean))
}
#' @title transform_me
#' @description maps all instances in \code{x_unscaled} to the [0;1] range using the equation:
#' \cr y=exp(x)/(1+exp(x))
#' @param x_unscaled vector of predictions
#' @param mean mean of \code{x}
#' @return transformed values of \code{x_unscaled}
#' @details values greater then exp(700)/ or smaller then exp(-700) are returned as "Inf". To avoid NaN values, these "Inf." values are turned into min(y) or max(y).
#' @rdname transform_me
transform_me <- function(x_unscaled, mean){
#center first, subtract mean of x_unscaled from all x_unscaled values to center around 0
x <- scale(x_unscaled, center=mean, scale=FALSE)[,]
#transform x
y <- exp(x)/(1+exp(x))
for (i in 1:length(y)){
if (is.nan(y[i]) && x[i]>0){
y[i] <- max(y, na.rm=TRUE)
}
if (is.nan(y[i]) && x[i]<0){
y[i] <- min(y, na.rm=TRUE)
}
}
return(y)
}
#' @title scale_me
#' @description maps all instances in \code{x} to the [0;1] range using the equation:
#' \cr y = (x-min)/(max-min)
#' \cr If no values for min and max are given, they are calculated per default as min=min(x) and max=max(x)
#' @param x vector of predictions
#' @param min minimum of \code{x}, Default: NULL
#' @param max maximum of \code{x}, Default: NULL
#' @return scaled values of \code{x}
#' @details if \code{x} is greater (smaller) than \code{max} (\code{min}), its calibrated prediction is set to 1 (0) and warning is triggered.
#' @rdname scale_me
scale_me <- function(x, min=NULL, max=NULL){
if (is.null(max) || is.null(min)){
max <- max(x, na.rm=TRUE)
min <- min(x, na.rm=TRUE)
}
y <- (x-min)/(max-min)
if(any(y<0)){
y[(y<0)] <- 0
warning("A new instance exceeded the min value of the calibration training model
and was set to 0 value")
}
if(any(y>1)){
y[(y>1)] <- 1
warning("A new instance exceeded the max value of the calibration training model
and was set to 1 value")
}
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/format_values.R
|
#' @title getECE
#' @description Expected Calibration Error (ECE); the model is divided into 10 equal-width bins (default) and the mean of the observed (0/1) vs. mean of predicted is calculated per bin, weighted by empirical frequency of elements in bin i
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param n_bins number of bins of the underlying equal-frequency histogram, Default: 10
#' @return equal-frequency ECE value
#' @rdname getECE
#' @export
#' @importFrom graphics hist
getECE <- function(actual, predicted, n_bins=10){ #equal frequency bins
predicted <- predicted
labels <- actual
idx <- order(predicted)
pred_actual <- (cbind(predicted[idx], labels[idx]))
N <- nrow(pred_actual)
rest <- N%%n_bins
S <- 0
W <- c()
B <- min(N,n_bins) #if less then n_bins elements in data set, then use that number of bins
groups <- list()
for (i in 1:B){ #i von 1 bis B
if (i <= rest){ #put rest elements into each bin
group_pred <- (pred_actual[(((i-1)*ceiling(N/n_bins)+1) : (i*ceiling(N/n_bins))),1])
group_actual <- (pred_actual[(((i-1)*ceiling(N/n_bins)+1) : (i*ceiling(N/n_bins))),2])
}
else {
group_pred <- (pred_actual[((rest+(i-1)*floor(N/n_bins)+1) : (rest+i*floor(N/n_bins))),1])#group size=N/B
group_actual <- (pred_actual[((rest+(i-1)*floor(N/n_bins)+1) : (rest+i*floor(N/n_bins))),2])
}
n_ <- length(group_pred)
expected <- mean(group_pred) #mean of predictions in bin b
observed <- mean(group_actual) #true fraction of pos.instances = prevalence in bin b
S[i] <- abs(observed-expected) #absolut difference of observed value-predicted value in bin
W[i] <- n_/N #empirical frequence of all instances that fall into bin i, should be equal when using equal freq binning approach
groups[[i]] <- group_pred
}
mean_prediction <- lapply(groups, mean)
min_group <- lapply(groups, min)
max_group <- lapply(groups, max)
res <- t(S)%*%W
return(as.numeric(res))
}
#' @title get_ECE_equal_width
#' @description Expected Calibration Error (ECE); the model is divided into 10 equal-width bins (default) and the mean of the observed (0/1) vs. mean of predicted is calculated per bin, weighted by emperical frequency of elements in bin i
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param bins number of bins for the equal-width binning model
#' @return equal-width ECE value
#' @rdname get_ECE_equal_width
#' @importFrom graphics hist
get_ECE_equal_width <- function(actual, predicted, bins=10){ #equal width bins
pred_actual <- cbind(predicted, actual)
if(all(predicted<=1) && all(predicted>=0)){
hist_x <- hist(pred_actual[,1], breaks=seq(0,1,1/bins), plot=F)
}
else{
hist_x <- hist(pred_actual[,1], breaks=bins, plot=F)
}
breaks_y <- hist_x$breaks
y_true <- hist(subset(pred_actual[,1], pred_actual[,2]=="1"), breaks=breaks_y, plot=F)
divided <- cut(pred_actual[,1], breaks=c(hist_x$breaks), label = seq(1,length(y_true$mids)), include.lowest = T)
prediction_in_bin <- list()
expected <- c()
for (i in as.numeric(levels(divided))){
prediction_in_bin[[i]] <- pred_actual[which(divided==i),1]
expected[i] <- mean(prediction_in_bin[[i]]) #mean prediction in that bin
#expected[i] <- hist_x$mids[i] #hist mids as mean prediction in that bin
}
counts_all <- hist_x$counts
counts_true <- y_true$counts
zeros <- which(counts_all==0)
prevalence <- counts_true/counts_all
prevalence[zeros] <- 0 #set prevalence to 0 when no observations are in the bin
expected[zeros] <- hist_x$mids[zeros] #set expectation to the mid bin point, when no elements are in bin
S_2 <- abs(prevalence-expected)
W_2 <- counts_all/(length(predicted))
return(as.numeric(t(S_2)%*%W_2))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/getECE.R
|
#' @title getMCE
#' @description Maximum Calibration Error (MCE), returns maximum calibration error for equal-frequency binning model
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param n_bins number of bins of the underlying equal-frequency histogram, Default: 10
#' @return equal-frequency MCE value
#' @rdname getMCE
#' @export
#' @importFrom graphics hist
getMCE <- function(actual, predicted, n_bins=10){
predicted <- predicted
labels <- actual
idx <- order(predicted)
pred_actual <- (cbind(predicted[idx], actual[idx]))
N <- nrow(pred_actual)
rest <- N%%n_bins
B <- min(N,n_bins)
S <- 0
W <- c()
for (i in 1:B){ #i von 1 bis B
if (i <= rest){ #put rest elements into each bin
group_pred <- (pred_actual[(((i-1)*ceiling(N/n_bins)+1) : (i*ceiling(N/n_bins))),1])
group_actual <- (pred_actual[(((i-1)*ceiling(N/n_bins)+1) : (i*ceiling(N/n_bins))),2])
}
else {
group_pred <- (pred_actual[((rest+(i-1)*floor(N/n_bins)+1) : (rest+i*floor(N/n_bins))),1])#group size=N/B
group_actual <- (pred_actual[((rest+(i-1)*floor(N/n_bins)+1) : (rest+i*floor(N/n_bins))),2])
}
n <- length(group_pred)
expected <- mean(group_pred) #mean of predictions in bin b
observed <- mean(group_actual) #true fraction of pos.instances = prevalence in bin b
S[i] <- abs(observed-expected) #absolut difference of observed value-predicted value in bin
W[i] <- n/N #empirical frequence of all instances that fall into bin i, should be pretty much the same among all bins
}
res <- max(S*W)
return(res)
}
#' @title get_MCE_equal_width
#' @description Maximum Calibration Error (MCE), returns maximum calibration error for equal-width binning model
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param bins number of bins for the binning model
#' @return equal-width MCE value
#' @rdname get_MCE_equal_width
#' @importFrom graphics hist
get_MCE_equal_width <- function(actual, predicted, bins=10){ #equal width bins
predicted <- predicted
labels <- actual
idx <- order(predicted)
pred_actual <- (cbind(predicted[idx], labels[idx]))
hist_x <- hist(pred_actual[,1],breaks=bins, plot=F)
breaks_y <- hist_x$breaks
y_true <- hist(subset(pred_actual[,1], pred_actual[,2]=="1"),breaks=breaks_y, plot=F)
divided <- cut(pred_actual[,1], breaks=c(hist_x$breaks),label = seq(1,length(y_true$mids)),include.lowest = T)
prediction_in_bin <- list()
expected <- c()
for (i in as.numeric(levels(divided))){
prediction_in_bin[[i]] <- pred_actual[which(divided==i),1]
#expected[i] <- hist_x$mids[i] #mean prediction in that bin
expected[i] <- mean(pred_actual[which(divided==i),1]) #mean prediction in that bin
}
counts_all <- hist_x$counts
counts_true <- y_true$counts
zeros <- which(counts_all==0)
prevalence <- counts_true/counts_all
prevalence[zeros] <- 0 #set prevalence to 0 when no observations are in the bin
expected[zeros] <- hist_x$mids[zeros] #set expectation to the mid bin point, when no elements are in bin
S_2 <- abs(prevalence-expected)
W_2 <- counts_all/(length(predicted))
return(max(S_2*W_2))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/getMCE.R
|
#' @title getRMSE
#' @description calculates the root of mean square error (RMSE) in the test set of calibrated predictions
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @return RMSE value
#' @rdname getRMSE
getRMSE <- function(actual, predicted){
res <- (((actual-predicted)%*%(actual-predicted)/length(actual))^0.5)
return(as.numeric(res))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/getRMSE.R
|
#' @title get_Brier_score
#' @description FUNCTION_DESCRIPTION
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @rdname get_Brier_score
get_Brier_score <- function(actual, predicted){
n <- length(actual)
n_1 <- length(actual==1)
n_0 <- length(actual==0)
sum <- 0
sum_0 <- 0
sum_1 <- 0
for (i in seq(1,n,1)){
diff <- abs((predicted[i]-actual[i]))^2
sum <- sum+diff
if(actual[i]==0){
sum_0 <- sum_0+diff
}
else
if(actual[i]==1){
sum_1 <- sum_1+diff
}
}
return(list(brier=sum/n, brier_1=sum_1/n_1, brier_0=sum_0/n_0))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/get_Brier_score.R
|
#' @title get_CLE_class
#' @description calculates the class-specific classification error CLE in the test set.
#' The method computes the deviation of the calibrated predictions of class 1 instances from their true value 1.
#' For class 0 instances, \code{get_CLE_class} computes the deviation from 0.
#' Class 1 CLE is 0 when all class 1 instances have a calibrated prediction of 1 regardless of potential miscalibration of class 0 instances.
#' CLE calculation is helpful when miscalibration and -classification is more cost-sensitive for one class than for the other.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param bins number of bins for the equal-width binning model, default=10
#' @return object of class list containing the following components:
#' \item{class_1}{CLE of class 1 instances}
#' \item{class_0}{CLE of class 0 instances}
#' @seealso
#' \code{\link[reshape2]{melt}}
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{position_dodge}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{scale_colour_manual}}
#' @rdname get_CLE_class
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line aes position_dodge labs scale_colour_manual
#' @importFrom graphics hist
get_CLE_class <- function(actual, predicted, bins=10){ #equal width bins
pred_actual <- cbind(predicted, actual)
if(all(predicted<=1) && all(predicted>=0)){
hist_x <- hist(pred_actual[,1], breaks=seq(0,1,1/bins), plot=F)
}
else{
hist_x <- hist(pred_actual[,1], breaks=bins, plot=F)
}
breaks_y <- hist_x$breaks
y_true <- hist(subset(pred_actual[,1], pred_actual[,2]=="1"), breaks=breaks_y, plot=F)
divided <- cut(pred_actual[,1], breaks=c(hist_x$breaks), label = seq(1,length(y_true$mids)), include.lowest = T)
divided_0 <- cut(pred_actual[,1][pred_actual[,2]==0], breaks=c(hist_x$breaks), label = seq(1,length(y_true$mids)), include.lowest = T)
divided_1 <- cut(pred_actual[,1][pred_actual[,2]==1], breaks=c(hist_x$breaks), label = seq(1,length(y_true$mids)), include.lowest = T)
prediction_in_bin <- list()
expected <- c()
prediction_in_bin_0 <- list()
expected_0 <- c()
prediction_in_bin_1 <- list()
expected_1 <- c()
for (i in as.numeric(levels(divided))){
prediction_in_bin[[i]] <- pred_actual[which(divided==i),1]
expected[i] <- mean(prediction_in_bin[[i]]) #mean prediction in that bin
prediction_in_bin_0[[i]] <- subset(pred_actual,pred_actual[,2]==0)[which(divided_0==i),1]
expected_0[i] <- mean(prediction_in_bin_0[[i]]) #mean prediction in that bin
prediction_in_bin_1[[i]] <- subset(pred_actual,pred_actual[,2]==1)[which(divided_1==i),1]
expected_1[i] <- mean(prediction_in_bin_1[[i]]) #mean prediction in that bin
}
counts_all <- hist_x$counts
counts_true <- y_true$counts
zeros <- which(counts_all==0)
prevalence <- counts_true/counts_all
prevalence[zeros] <- 0 #set prevalence to 0 when no observations are in the bin
expected[zeros] <- hist_x$mids[zeros] #set expectation to the mid bin point, when no elements are in bin
S_2 <- abs(prevalence-expected)
W_2 <- counts_all/(length(predicted))
expected_0[!is.finite(expected_0)] <- 0
expected_1[!is.finite(expected_1)] <- 0
S2_1 <- abs(1-expected_1)
S2_0 <- abs(0-expected_0)
#weighing adapted for class 1
W_2_1_all <- counts_true/(sum(pred_actual[,2]=="1")) #add up to 1
#weighing adapted for class 0
W_2_0_all <- (counts_all-counts_true)/(sum(pred_actual[,2]=="0")) #add up to 1
ECE_per_bin <- (S_2*W_2)
ECE <- sum(ECE_per_bin)
CLE_per_bin <- (S2_1*W_2_1_all)+(S2_0*W_2_0_all)
CLE <- sum(CLE_per_bin)
CLE_per_bin_1 <- (S2_1*W_2_1_all)
CLE_1 <- sum(CLE_per_bin_1)
CLE_per_bin_0 <- (S2_0*W_2_0_all)
CLE_0 <- sum(CLE_per_bin_0)
#Visualisation of CLE class errors
bins_1 <- S2_1*W_2_1_all
bins_0 <- S2_0*W_2_0_all
# df <- reshape2::melt(cbind(CLE_class1=bins_1,CLE_class0=bins_0, prop_0=W_2_0_all, prop_1=W_2_1_all, ECE_all=ECE_per_bin))
# plot1 <- ggplot2::ggplot()+
# ggplot2::geom_line(ggplot2::aes(x=df$Var1, y=(df$value), colour=df$Var2), position = ggplot2::position_dodge(width = 0.2))+
# ggplot2::labs(x="bin number", y="CLE")+
# ggplot2::scale_colour_manual(values=c("firebrick3", "darkolivegreen4", "cyan3", "grey", "black"), name = NULL)
#show(plot1)
return(list(class_1=as.numeric(t(S2_1)%*%W_2_1_all),
class_0=as.numeric(t(S2_0)%*%W_2_0_all)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/get_CLE.R
|
#' @title uncalibrated_CV
#' @description performs \code{n_folds}-CV but with only input-preprocessing the test set. No calibration model is trained and evaluated in this method.
#' The \code{predicted} values are partitioned into n subsets. The training set is constructed on (n-1) subsets; the remaining set is used
#' for testing. Since no calibration model is used in this method, the test set predictions are only input-preprocessed (either scaled or transformed, depending on \code{input}).
#' All test set predictions are merged and used to compute error metrics for the input-preprocessing methods.
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param n_folds number of folds for the cross-validation, Default: 10
#' @param seed random seed to alternate the split of data set partitions
#' @param input specify if the input was scaled or transformed, scaled=1, transformed=2
#' @return list object containing the following components:
#' \item{error}{list object that summarizes discrimination and calibration errors obtained during the CV}
#' \item{type}{"uncalibrated"}
#' \item{probs_CV}{vector of input-preprocessed predictions that was used during the CV}
#' \item{actual_CV}{respective vector of true values (0 or 1) that was used during the CV}
#' @rdname uncalibrated_CV
uncalibrated_CV <- function(actual, predicted, n_folds=10, seed, input){
set.seed(seed)
x <- data.frame(cbind(actual, predicted))
x_cases <- subset(x, x[,1]==1)
x_controls <- subset(x, x[,1]==0)
fold_cases <- sample(cut(seq(1,nrow(x_cases)),breaks=n_folds,label=FALSE))
fold_controls <- sample(cut(seq(1,nrow(x_controls)),breaks=n_folds,label=FALSE))
uncalibrated_models_rd <- list()
y_cal <- list()
y_dis <- list()
list_probs <- c()
list_actual <- c()
for(i in 1:n_folds){
trainIndexes_cases <- which(fold_cases!=i, arr.ind = TRUE)
trainIndexes_controls <- which(fold_controls!=i,arr.ind=TRUE)
trainData <- rbind(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,])
x_train <- format_values(x_cases[trainIndexes_cases, ], x_controls[trainIndexes_controls,], input=input)
testIndexes_cases <- which(fold_cases==i,arr.ind=TRUE)
testIndexes_controls <- which(fold_controls==i,arr.ind=TRUE)
testData <- rbind(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,])
x_test <- format_values(x_cases[testIndexes_cases, ], x_controls[testIndexes_controls,], input=input,
min=x_train$min, max=x_train$max, mean=x_train$mean)
list_probs <- c(list_probs, x_test$formated_values[,2])
list_actual <- c(list_actual, x_test$formated_values[,1])
}
y <- reliability_diagramm(list_actual, list_probs)
y_cal <- y$calibration_error
y_dis <- y$discrimination_error
error_summary_CV <- list(calibration_error=y_cal, discrimination_error=y_dis,
mean_pred_per_bin=y$mean_pred_per_bin, accuracy_per_bin=y$accuracy_per_bin,
sign=y$sign)
return(list(error=error_summary_CV, type="uncalibrated", probs_CV=list_probs, actual_CV=list_actual))#, models_rd=uncalibrated_models_rd))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/no_calibration_CV.R
|
#' @title plot_class_distributions
#' @description plots the the returned conditional class probabilities P(x|C) of GUESS_1 or GUESS_2 models. Which GUESS model is plotted can be specified in \code{pred_idx}.
#' @param build_guess_object output from build_GUESS()
#' @param pred_idx if \code{pred_idx}=1 GUESS_1 is plotted; if \code{pred_idx}=2 GUESS_2 is plotted
#' @return ggplot object that visualizes the returned calibrated predicition estimates by GUESS_1 or GUESS_2
#' @seealso
#' \code{\link[reshape2]{melt}}
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{scale_colour_manual}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{geom_vline}},\code{\link[ggplot2]{geom_text}}
#' @rdname plot_class_distributions
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line aes scale_colour_manual theme labs geom_vline geom_text
plot_class_distributions <- function(build_guess_object, pred_idx){
min <- build_guess_object$min
max <- build_guess_object$max
x <- seq(min, max, 0.01)
density <- predict_GUESS(build_guess_object, x, density_evaluation=pred_idx, TRUE)
density_case <- density$dens_case
density_controls <- density$dens_controls
Var2 <- NULL
value <- NULL
df <- cbind(x,reshape2::melt(cbind(density_controls, density_case)))
if(build_guess_object$t_crit[[1]]>build_guess_object$t_crit[[4]]){
plot1 <- ggplot2::ggplot()+
ggplot2::geom_line(data=data.frame(df),mapping=ggplot2::aes(x, y=value, colour=Var2))+
ggplot2::scale_colour_manual(values=c("darkolivegreen4","firebrick3"), name="Group",labels=c("Control","Case"))+
ggplot2::theme(legend.position = "bottom")+
ggplot2::labs(subtitle="Controls vs. Cases", x = "original ML score", y = "calibrated prediction")+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[2]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[3]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[1]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[4]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_text(ggplot2::aes(x=build_guess_object$t_crit[[2]], y=max(df$value),label=("significance\n boundaries")),
nudge_x=0.05,vjust = "inward", hjust = "inward", size=3)
}
else
plot1 <- ggplot2::ggplot()+
ggplot2::geom_line(data=data.frame(df),mapping=ggplot2::aes(x, y=value, colour=Var2))+
ggplot2::scale_colour_manual(values=c("darkolivegreen4","firebrick3"), name="Group",labels=c("Control","Case"))+
ggplot2::theme(legend.position = "bottom")+
ggplot2::labs(subtitle="Controls vs. Cases", x = "original ML score", y = "calibrated prediction")+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[2]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=build_guess_object$t_crit[[3]]), colour="grey", size=1, linetype=2)+
ggplot2::geom_text(ggplot2::aes(x=build_guess_object$t_crit[[2]], y=max(df$value),label=("significance\n boundaries")),
nudge_x=0.05,vjust = "inward", hjust = "inward", size=3)
return(plot1)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/plot_class_distributions.R
|
#' @title plot_model
#' @description this methods visualizes all implemented calibration models as a mapping function between original ML scores (x-axis) and
#' calibrated predictions (y-axis)
#' @param calibration_model output from the \code{\link{calibrate}} method.
#' @param seq sequence of ML scores over which the mapping function should be evaluated, Default: 100 scores from the minimum to the maximum of the original ML scores
#' @return ggplot object
#' @seealso
#' \code{\link[reshape2]{melt}}
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{ylim}},\code{\link[ggplot2]{scale_colour_manual}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{geom_text}},\code{\link[ggplot2]{geom_vline}}
#' @rdname plot_model
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line aes ylim scale_colour_manual theme labs geom_text geom_vline
plot_model <- function(calibration_model, seq=NULL){
###local function###
back_to_ML_scores <- function(midpoints, seq, score){
idx <- c()
for (i in 1:length(midpoints)){
idx[i] <- which.min(abs(midpoints[i]-score))
}
return(seq[idx])
}
max <- max(calibration_model$original_values$predicted)
min <- min(calibration_model$original_values$predicted)
#default: if no seq is given, evaluate from min to max value of original input score
if(is.null(seq)){
step_size <- (max-min)/100 #evaluate 100 scores
seq <- seq(min, max, step_size)
}
prediction_all <- predict_calibratR(calibration_model, seq, nCores=1)
scaled <- scale_me(seq)
transformed <- transform_me(seq, mean=mean(seq))
idx <- 1
plot.list <- list()
Var2 <- NULL
value <- NULL
for(i in prediction_all){
control <- 1-i
df <- cbind(seq,reshape2::melt(cbind(i, control), measure.vars=c("i","control")))
plot_ori <- ggplot2::ggplot()+
ggplot2::geom_line(data=data.frame(df),mapping=ggplot2::aes(x=seq, y=value, colour=Var2))+
ggplot2::ylim(0, 1.05)+
ggplot2::scale_colour_manual(values=c("firebrick3", "darkolivegreen4"), name="Group",labels=c("Case","Control") )+
ggplot2::theme(legend.position = "right")+
ggplot2::labs(title=names(prediction_all$predictions)[idx], x = "original ML score", y = "calibrated prediction")
#add model specifc significance values
model_pred <- names(prediction_all)[idx]
if(model_pred=="BBQ_scaled_sel"|model_pred=="BBQ_scaled_avg"){
z <- back_to_ML_scores(calibration_model$models$BBQ_scaled$binnning_scheme$midpoint, seq, scaled)
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_text(mapping=ggplot2::aes(x=as.numeric(z),
y=as.numeric(calibration_model$models$BBQ_scaled$binnning_scheme$prob_case)+0.03, label=(calibration_model$models$BBQ_scaled$binnning_scheme$significance)))
}
else if(model_pred=="BBQ_transformed_sel"|model_pred=="BBQ_transformed_avg"){
z <- back_to_ML_scores(calibration_model$models$BBQ_transformed$binnning_scheme$midpoint, seq, transformed)
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_text(mapping=ggplot2::aes(x=as.numeric(z),
y=as.numeric(calibration_model$models$BBQ_transformed$binnning_scheme$prob_case)+0.03, label=calibration_model$models$BBQ_transformed$binnning_scheme$significance))
}
else if (model_pred=="hist_scal"){
z <- back_to_ML_scores(calibration_model$models$hist_scaled$histogram$mids, seq, scaled)
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_text(mapping=ggplot2::aes(x=as.numeric(z),
y=as.numeric(calibration_model$models$hist_scaled$probs_per_bin)+0.03, label=calibration_model$models$hist_scaled$binnning_scheme$significance))
}
else if (model_pred=="hist_trans"){
z <- back_to_ML_scores(calibration_model$models$hist_transformed$histogram$mids, seq, transformed)
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_text(mapping=ggplot2::aes(x=as.numeric(z),
y=as.numeric(calibration_model$models$hist_transformed$probs_per_bin)+0.03, label=calibration_model$models$hist_transformed$binnning_scheme$significance))
}
else if (model_pred=="GUESS_1"|model_pred=="GUESS_2"){
if (calibration_model$models$GUESS$t_crit[1]>calibration_model$models$GUESS$t_crit[4]){
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[2]), size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[3]), size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[1]), size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[4]), size=1, linetype=2)+
ggplot2::geom_text(ggplot2::aes(x=calibration_model$models$GUESS$t_crit[2], y=0.9,label=("significance\n boundaries")), cex=0.6)
}
else
plot.list[[model_pred]] <- plot_ori +
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[2]), size=1, linetype=2)+
ggplot2::geom_vline(mapping=ggplot2::aes(xintercept=calibration_model$models$GUESS$t_crit[3]), size=1, linetype=2)+
ggplot2::geom_text(ggplot2::aes(x=calibration_model$models$GUESS$t_crit[2], y=0.9,label=("significance\n boundaries")), cex=0.6)
}
else
plot.list[[model_pred]] <- plot_ori
idx <- idx+1
}
return(plot.list)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/plot_model.R
|
#' @title predict_BBQ
#' @description FUNCTION_DESCRIPTION
#' @param bbq output from the \code{\link{build_BBQ}} method
#' @param new vector of uncalibrated probabilities
#' @param option either 1 or 0; averaging=1, selecting=0
#' @return a list object containing the following components:
#' \item{predictions}{contains a vector of calibrated predictions}
#' \item{pred_idx}{which option was used (averaging or selecting)}
#' \item{significance_test_set}{the percentage of \code{new} instances that was evaluated using significant prediction estimates}
#' \item{pred_per_bin}{number of instances \code{new} in each bin of the selected model}
#' @details Based on the paper (and matlab code) : "Obtaining Well Calibrated Probabilities Using Bayesian Binning" by Naeini, Cooper and Hauskrecht: ; https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4410090/
#' @rdname predict_BBQ
predict_BBQ <- function(bbq, new, option){
###local functions###
getHistPr <- function(histModel, cutPoints, new){
N <- length(new) #new elements to be predicted
B <- length(histModel) #how many bins are in the model
cutPoints <- c(0,cutPoints,1)
res <- rep(0,N)
for (i in 1:N){ #for each new element N
x <- new[i]
minIdx <- 1
maxIdx <- B+1
#in which bin does my element belong?
while ((maxIdx - minIdx)>1){
midIdx <- floor((minIdx+maxIdx)/2) #I start looking in the middle
if(x>cutPoints[midIdx]){
minIdx <- midIdx
}
else
if(x < cutPoints[midIdx]){
maxIdx <- midIdx
}
else{
minIdx <- midIdx
break
}
}
idx <- minIdx
res[i] <- histModel[[idx]]$P #assign class prob P for bin idx to new resultat for element i
#handling odd cases, not really relevant according to paper?
cnt <- 1
k <- idx -1
while (k>=1){
if (histModel[[k]]$min==histModel[[idx]]$min && histModel[[k]]$max==histModel[[idx]]$max){
res[i] <- res[i] + histModel[[k]]$P
k <- k+1
cnt <- cnt+1
}
else
break
}
res[i] <- res[i]/cnt
}
return(res)
}
getMA <- function(BBQ_Model, x){ #get Model average
N <- length(BBQ_Model) #how many models
p <- rep(0,N)
SV <- BBQ_Model[[1]]$SV #all the scores for all models
for(i in 1:N){ #get the probs for the new prediction from all evaluated models
p[i] <- getHistPr(BBQ_Model[[i]]$binNo, BBQ_Model[[i]]$cutPoints, x)
}
#output average p
res <- (t(SV)%*%p)/sum(SV) #transpose and matrix multiplication
}
out <- rep(0, length(new))
BBQ_Model <- bbq$prunedmodel
if (option==1){#option for averaging
for (i in 1:length(new)){
out[i] <- getMA(BBQ_Model, new[i])
}
#percentage of significant predictions for test set if best model is used
sign_test_set <- NULL
new_bin <- NULL
}
if(option==0){#option for selection
for (i in 1:length(new)){
out[i] <- getHistPr(BBQ_Model[[1]]$binNo, BBQ_Model[[1]]$cutPoints, new[i])
}
#percentage of significant predictions for test set if best model is used
significant_bins <- subset(bbq$binnning_scheme$bin_no, bbq$binnning_scheme$p_value<0.05)
new_bin <- cut(new, c(0,BBQ_Model[[1]]$cutPoints,1),labels = seq(1,length(bbq$binnning_scheme$midpoint)),include.lowest = T)
sign_test_set <- sum(table(new_bin)[significant_bins])/(sum(table(new_bin)))
}
return(list(predictions=out, pred_idx=option, significance_test_set=sign_test_set, pred_per_bin=table(new_bin)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/predict_BBQ.R
|
#' @title predict_GUESS
#' @description returns calibrated predictions for the instances \code{new} using the trained GUESS calibration model \code{build_guess_object}.
#' Two different evaluation methods are available.
#' Method 1: returns the p-value for the score \code{new} under the distribution that is handed over in the \code{build_guess_object}
#' Method 2: returns the probability density value for the score \code{new} under the distribution that is handed over in the \code{build_guess_object}
#' @param build_guess_object output from the \code{\link{build_GUESS}} method
#' @param new vector of uncalibrated probabilities
#' @param density_evaluation which density evaluation method should be used to infer calculate probabilities, Default: 2
#' @param return_class_density if set to TRUE, class densities p(x|class) are returned, Default: FALSE
#' @return a list object containing the following components:
#' \item{predictions}{contains a vector of calibrated predictions}
#' \item{pred_idx}{which density evaluation method was used}
#' \item{significance_test_set}{the percentage of \code{new} instances that was evaluated using significant prediction estimates}
#' \item{dens_case}{a vector containing the p(x|case) values}
#' \item{dens_control}{a vector containing the p(x|control) values}
#' @details \code{dens_case} and \code{dens_control} are only returned when \code{return_class_density} is set to TRUE
#' @rdname predict_GUESS
#' @export
predict_GUESS <- function(build_guess_object, new, density_evaluation=2, return_class_density=FALSE){
###local function####
evaluate_density_1 <- function(distr, new){
pdistname <- distr$pdistname #use pdistname
estimate <- distr$estimate
list_estimate <- list()
for(i in 1:length(estimate)){
list_estimate[i] <- estimate[i]
}
p_case <- do.call(match.fun(pdistname), c(new,list_estimate))
return(p_case)
}
evaluate_density_2 <- function(distr, new){
ddistname <- distr$ddistname
estimate <- distr$estimate
list_estimate <- list()
for(i in 1:length(estimate)){
list_estimate[i] <- estimate[i]
}
density <- do.call(match.fun(ddistname), c(new,list_estimate))
return(density)
}
new_1 <- new
new_2 <- new
out <- rep(0, length(new))
dens_cases <- rep(0, length(new))
dens_controls <- rep(0, length(new))
best_fit_cases <- build_guess_object$best_fit_cases
best_fit_controls <- build_guess_object$best_fit_controls
class_probs <- build_guess_object$class_probs
#scale input first if data was z-scaled for t distribution
if(best_fit_cases$distname=="t"){
new_1 <- scale(new, center = build_guess_object$mean_case, scale=F)
}
if(best_fit_controls$distname=="t"){
new_2 <- scale(new, center = build_guess_object$mean_control, scale=F)
}
#calculate P(C)
class_prob_case <- class_probs$cases
class_prob_control <- class_probs$controls
#for rebalancing set both priors to 0.5
#class_prob_case <- 0.5
#class_prob_control <- 0.5
#which evaluation method should be used to determine p-value
method <- switch(density_evaluation,
"1"= evaluate_density_1,
"2"= evaluate_density_2)
for (i in 1:length(new)){
#for evaluation of P(x|Case)
dens_case <- method(best_fit_cases, new_1[i])
if(density_evaluation==1){
#for evaluation of P(x|Control) for GUESS1: 1-pnorm = probability, for x to be a control.
dens_control <- (1-method(best_fit_controls, new_2[i]))
}
else{
dens_control <- method(best_fit_controls, new_2[i])
}
#P(x|C)*P(C)
path_prob_case <- dens_case*class_prob_case
path_prob_control <- dens_control*class_prob_control
#p(x)
evidence <- path_prob_case+path_prob_control
if(evidence==0){ #to avoid dividing by 0
prob_case <- path_prob_case #if evidence is 0: both path_probs are set to 0 by default
prob_control <- path_prob_control
}
else{
#path_prob/p(x)
prob_case <- path_prob_case/evidence
prob_control <- path_prob_control/evidence
}
class_guess <- max(prob_case, prob_control)
class <- switch(which.max(c(prob_case, prob_control)), "1"=1, "2"=0)
out[i] <- prob_case
dens_cases[i] <- dens_case
dens_controls[i] <- dens_control
}
#significance
estimate_case <- best_fit_cases$estimate
list_estimate_case <- list()
for(i in 1:length(estimate_case)){
list_estimate_case[i] <- estimate_case[i]
}
estimate_control <- best_fit_controls$estimate
list_estimate_control <- list()
for(i in 1:length(estimate_control)){
list_estimate_control[i] <- estimate_control[i]
}
#significant results in test set
sign_test_set <- sum(new>build_guess_object$t_crit[[1]]& new<build_guess_object$t_crit[[2]])/length(new)
if(return_class_density==TRUE){
return(list(predictions=out, dens_case=dens_cases, dens_controls=dens_controls,
pred_idx=density_evaluation, significance_test_set=sign_test_set))
}
else
return(list(predictions=out, pred_idx=density_evaluation, significance_test_set=sign_test_set))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/predict_GUESS.R
|
#' @title predict_calibratR
#' @description maps the uncalibrated predictions \code{new} into calibrated predictions using the passed over \code{calibration models}
#' @author Johanna Schwarz
#' @param calibration_models list of trained calibration models that were constructed using the \code{\link{calibrate}} method.
#' The list components \code{calibration_models} from the \code{\link{calibrate}} output can be used directly.
#' @param new vector of new uncalibrated instances. Default: 100 scores from the minimum to the maximum of the original ML scores
#' @param nCores \code{nCores} how many cores should be used during parallelisation. Default: 4
#' @return list object with the following components:
#' \item{predictions}{a list containing the calibrated predictions for each calibration model}
#' \item{significance_test_set}{a list containing the percentage of \code{new} instances for which prediction estimates are statistically significant}
#' \item{pred_per_bin}{a list containing the number of instances in each bin for the binning models}
#' @details if no \code{new} value is given, the function will evaluate a sequence of numbers ranging from the minimum to the maximum of the original values in the training set
#' @examples
#' ## Loading dataset in environment
#' data(example)
#' test_set <- example$test_set
#' calibration_model <- example$calibration_model
#'
#' ## Predict for test set
#' predictions <- predict_calibratR(calibration_model$calibration_models, new=test_set, nCores = 2)
#'
#' @rdname predict_calibratR
#' @export
#' @importFrom parallel makeCluster stopCluster
#' @import foreach
#' @importFrom doParallel registerDoParallel
predict_calibratR <- function(calibration_models, new=NULL, nCores=4){
min <- min(calibration_models$original_values$predicted)
max <- max(calibration_models$original_values$predicted)
mean <- mean(calibration_models$original_values$predicted)
#default: if no seq is given, evaluate from min to max value of original input score
if(is.null(new)){
step_size <- (max-min)/100 #evaluate 100 scores
new <- seq(min, max, step_size)
}
#calibrated predictions, inputtype 1=scaled, 2=transformed, 0=original
NumberOfCluster <- nCores # how many jobs you want the computer to run at the same time
cl <- parallel::makeCluster(NumberOfCluster) # use the above cluster # your parallel programming code code code stopCluster(cl) # close clusters
doParallel::registerDoParallel(cl)
`%dopar%` <- foreach::`%dopar%`
i <- NULL
predictions_calibrated <- foreach::foreach(i=seq(1, length(calibration_models$models),1), .packages = "CalibratR") %dopar% {
pred <- predict_model(new, calibration_models$models[[i]], min, max, mean, calibration_models$models[[i]]$inputtype)
return(pred)
}
parallel::stopCluster(cl)
names(predictions_calibrated) <- names(calibration_models$models)
#restructure predictions_calibrated
predictions_calibrated[["hist_scaled"]] <- predictions_calibrated$hist_scaled$predictions
predictions_calibrated[["hist_transformed"]] <- predictions_calibrated$hist_transformed$predictions
predictions_calibrated[["BBQ_scaled_sel"]] <- predictions_calibrated$BBQ_scaled$BBQ_sel$predictions
predictions_calibrated[["BBQ_scaled_avg"]] <- predictions_calibrated$BBQ_scaled$BBQ_avg$predictions
predictions_calibrated$BBQ_scaled <- NULL
predictions_calibrated[["BBQ_transformed_sel"]] <- predictions_calibrated$BBQ_transformed$BBQ_sel$predictions
predictions_calibrated[["BBQ_transformed_avg"]] <- predictions_calibrated$BBQ_transformed$BBQ_avg$predictions
predictions_calibrated$BBQ_transformed <- NULL
predictions_calibrated[["GUESS_1"]] <- predictions_calibrated$GUESS$GUESS_1$predictions
predictions_calibrated[["GUESS_2"]] <- predictions_calibrated$GUESS$GUESS_2$predictions
predictions_calibrated$GUESS <- NULL
#uncalibrated predictions
predictions <- list()
predictions[["original"]] <- new
predictions[["scaled"]] <- scale_me(new, min, max)
predictions[["transformed"]] <- transform_me(new, mean)
predictions <- c(predictions, predictions_calibrated)
return(predictions)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/predict_calibratR_parallel.R
|
#' @title predict_hist_binning
#' @description predict for a new element using histogram binning
#' @param histogram the output of \code{\link{build_hist_binning}}
#' @param new vector of uncalibrated probabilities
#' @return a list object containing the following components
#' \item{predictions}{contains a vector of calibrated predictions}
#' \item{significance_test_set}{the percentage of \code{new} instances that was evaluated using significant prediction estimates}
#' \item{pred_per_bin}{a table containing the number of instances from \code{new} for each bin of the final binning scheme of \code{histogram}}
#' @rdname predict_hist_binning
predict_hist_binning <- function(histogram, new){
breaks <- histogram$histogram$breaks
bin_probs <- histogram$probs_per_bin
out <- c()
#percentage of significant predictions
significant_bins <- subset(histogram$binnning_scheme$`no bin`, histogram$binnning_scheme$p_value<0.05)
no_per_bin <- cut(new, breaks, labels = histogram$binnning_scheme$`no bin`,include.lowest = T)
sign_test_set <- sum(table(no_per_bin)[significant_bins])/(sum(table(no_per_bin)))
for(i in 1:length(new)){
for (j in 1:(length(breaks)-1)){
if (new[i]==breaks[1]){
out[i] <- bin_probs[1]
}
if (breaks[j] < new[i] && new[i]<= breaks[j+1]){
out[i] <- bin_probs[j]
}
}
}
return(list(predictions=out,significance_test_set=sign_test_set,pred_per_bin=table(no_per_bin)))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/predict_hist_binning.R
|
#' @title predict_model
#' @description calibrates the uncalibrated predictions \code{new} using \code{calibration_model}.
#' @param new vector of uncalibrated predictions
#' @param calibration_model calibration model to be used for the calibration. Can be the output of \code{\link{build_BBQ}},\code{\link{build_hist_binning}} or \code{\link{build_GUESS}}.
#' @param min minimum value of the original data set
#' @param max maximum value of the original data set
#' @param mean mean value of the original data set
#' @param inputtype specify if the model was build on original (=0), scaled(=1) or transformed (=2) data
#' @return vector of calibrated predictions
#' @rdname predict_model
predict_model <- function(new, calibration_model, min, max, mean, inputtype){
###locale function###
prepare_input <- function(new, min, max, mean, inputtype){
if (inputtype==0){ #model uses original scores
output <- new
}
else if (inputtype==1){ #model uses scaled scores
output <- scale_me(new, min, max)
}
else if (inputtype==2){ #model uses transformed scores
output <- transform_me(new, mean)
}
return(output=output)
}
predict <- switch(calibration_model$type,
"hist"= predict_hist_binning,
"BBQ"= predict_BBQ,
"GUESS"= predict_GUESS
)
new <- prepare_input(new, min, max, mean, inputtype)
if(calibration_model$type=="BBQ"){
x_sel <- predict(calibration_model, new, 0)
x_avg <- predict(calibration_model, new, 1)
return(list(BBQ_sel=x_sel,
BBQ_avg=x_avg))
}
else if (calibration_model$type=="hist"){
x <- predict(calibration_model, new)
return(case=x)
}
else if(calibration_model$type=="GUESS"){
x_1 <- predict(calibration_model, new, 1)
x_2 <- predict(calibration_model, new, 2)
return(list(GUESS_1=x_1,
GUESS_2=x_2))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/predict_model.R
|
#' @title rd_multiple_runs
#' @description This functions plots all n reliability diagrams that were constructed during n-times repeated m-fold cross-validation (CV).
#' During calibration model evaluation, CV is repeated n times, so that eventually n reliability diagrams are obtained.
#' @param list_models list object that contains n-times the output from the \code{\link{reliability_diagramm}}. method.
#' @return a list object that contains a reliability diagram that visualises all reliabilty diagrams that were constructed during n-times repeated m-fold cross-validation.
#' @seealso
#' \code{\link[reshape2]{melt}}
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{geom_abline}},\code{\link[ggplot2]{ylab}},\code{\link[ggplot2]{xlab}},\code{\link[ggplot2]{xlim}},\code{\link[ggplot2]{ylim}},\code{\link[ggplot2]{coord_fixed}},\code{\link[ggplot2]{geom_text}},\code{\link[ggplot2]{scale_color_discrete}},\code{\link[ggplot2]{ggtitle}}
#' @rdname rd_multiple_runs
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line aes geom_abline ylab xlab xlim ylim coord_fixed geom_text scale_color_discrete ggtitle
#' @importFrom stats median
rd_multiple_runs <- function(list_models){
list_bins <- list()
list_bins[["mean_prediction"]] <- list()
list_bins[["accuracy"]] <- list()
list_bins[["significance"]] <- list()
for (j in list_models){
for (i in seq(1,10,1)){
list_bins[["mean_prediction"]][[as.character(i)]] <- c(list_bins[["mean_prediction"]][[as.character(i)]],j$error$mean_pred_per_bin[[i]])
list_bins[["accuracy"]][[as.character(i)]] <- c(list_bins[["accuracy"]][[as.character(i)]],j$error$accuracy_per_bin[[i]])
list_bins[["significance"]][[as.character(i)]] <- c(list_bins[["significance"]][[as.character(i)]],j$error$sign[[i]])
}
}
mean_pred <- data.frame(list_bins$mean_prediction)
accuracy <- data.frame(list_bins$accuracy)
significance <- data.frame(list_bins$significance)
x <- reshape2::melt(t(mean_pred))
y <- reshape2::melt(t(accuracy))
df <- cbind(x,acc=y[,3])
plot1 <- ggplot2::ggplot()+
ggplot2::geom_line(data=df, ggplot2::aes(x=df$value, y=df$acc, group=df$Var2),
colour="grey70",alpha=0.3, size=1)+
ggplot2::geom_line(ggplot2::aes(apply(mean_pred,2, FUN=median),
apply(accuracy,2, FUN=median)), colour="#0072B2", size=2)+
ggplot2::geom_abline(slope=1, color="#999999", size=1, linetype=2)+
ggplot2::ylab("observed frequency")+
ggplot2::xlab("mean prediction per bin")+
ggplot2::xlim(0, 1) +
ggplot2::ylim(0, 1.01) +
ggplot2::coord_fixed(ratio=1)+
ggplot2::geom_text(ggplot2::aes(x=unlist(mean_pred), y=unlist(accuracy),
label=unlist(significance)), size=3.5, alpha=0.5)+
ggplot2::scale_color_discrete(guide=FALSE)+
ggplot2::ggtitle(paste("Reliability Diagrams from", nrow(accuracy),"partitions"))
return(plot1)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/rd_multiple_runs.R
|
#' @title reliability_diagramm
#' @description Reliability curves allow checking if the predicted probabilities of a
# binary classifier are well calibrated. This function returns two arrays
# which encode a mapping from predicted probability to empirical probability.
# For this, the predicted probabilities are partitioned into equally sized
# bins and the mean predicted probability and the mean empirical probabilties
# in the bins are computed. For perfectly calibrated predictions, both
# quantities whould be approximately equal (for sufficiently many test samples).
# Note: this implementation is restricted to binary classification.
# breaks default value = 10
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @param bins number of bins in the reliability diagram, Default: 10
#' @param plot_rd should the reliability diagram be plotted, Default: TRUE
#' @return a list object containing the following elements
#' \item{calibration_error}{}
#' \item{discrimination_error}{}
#' \item{rd_breaks}{}
#' \item{histogram_plot}{}
#' \item{diagram_plot}{}
#' \item{mean_pred_per_bin}{}
#' \item{accuracy_per_bin}{}
#' \item{freq_per_bin}{}
#' \item{sign}{}
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{stat_bin}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{scale_fill_manual}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{geom_point}},\code{\link[ggplot2]{xlim}},\code{\link[ggplot2]{ylim}},\code{\link[ggplot2]{geom_abline}},\code{\link[ggplot2]{geom_line}},\code{\link[ggplot2]{geom_text}},\code{\link[ggplot2]{geom_label}},\code{\link[ggplot2]{coord_fixed}}
#' @rdname reliability_diagramm
#' @importFrom ggplot2 ggplot stat_bin aes scale_fill_manual theme labs geom_point xlim ylim geom_abline geom_line geom_text geom_label coord_fixed
#' @importFrom graphics hist
#' @export
reliability_diagramm <- function(actual, predicted, bins=10, plot_rd=TRUE){
plot1 <- NULL
plot2 <- NULL
mean_pred_per_bin_ <- NULL
accuracy_per_bin_ <- NULL
freq_per_bin <- NULL
#error values
ece <- getECE(actual, predicted, bins)
mce <- getMCE(actual, predicted, bins)
rmse <- getRMSE(actual, predicted)
ece_ <- get_ECE_equal_width(actual, predicted, bins)
mce_ <- get_MCE_equal_width(actual, predicted, bins)
cle <- get_CLE_class(actual, predicted, bins)
brier <- get_Brier_score(actual, predicted)
discrimination_error <- evaluate_discrimination(actual, predicted)
#only plot reliability diagram if all(predicted) is between 0 and 1, komischer Rundungsfehler.... deshalb 1.0001
if(all(predicted<=1.00001) && all(predicted>=0)){
all <- data.frame(cbind(actual,predicted))
histogram <- hist(all[,2], breaks=seq(0,1,1/bins),plot=FALSE)
accuracy_per_bin <- rep(0,length(histogram$mids))
mean_pred_per_bin <- rep(0,length(histogram$mids))
#sort predicted
x <- order(predicted)
predicted <- predicted[x]
actual <- actual[x]
for(i in 1:length(predicted)){
for (j in 1:(length(histogram$breaks)-1)){
if (predicted[i]==histogram$breaks[1]){ #values with prob = 0 are put in bin 1
accuracy_per_bin[j] <- accuracy_per_bin[j] + actual[i]
mean_pred_per_bin[j] <- mean_pred_per_bin[j] + predicted[i]
break
}
if (histogram$breaks[j] < predicted[i] && predicted[i]<= histogram$breaks[j+1]){
accuracy_per_bin[j] <- accuracy_per_bin[j] + actual[i]
mean_pred_per_bin[j] <- mean_pred_per_bin[j] + predicted[i]
break
}
}}
mean_pred_per_bin_ <- mean_pred_per_bin/histogram$counts #mean prediction in bin
mean_pred_per_bin_[is.nan(mean_pred_per_bin_)] <- 0
accuracy_per_bin_ <- accuracy_per_bin/histogram$counts #no. of cases per bin
accuracy_per_bin_[is.nan(accuracy_per_bin_)] <- 0
pvalue_per_bin <- unlist(apply(cbind(success=accuracy_per_bin, all= histogram$counts),1,binom_for_histogram))
freq_per_bin <- histogram$counts/sum(histogram$counts)
sign <- c()
for (i in (1: length(pvalue_per_bin))){
if (pvalue_per_bin[i]<0.05){
sign[i] <- "*"
}
else if (pvalue_per_bin[i]==2){ #empty bins are indicated with pvalue of 2
sign[i] <- "x"
}
else
sign[i] <- "ns"
}
idx <- sign=="x"
if(plot_rd){
..count.. <- NULL
plot1 <- ggplot2::ggplot(data=all)+
ggplot2::stat_bin(mapping=ggplot2::aes(x=predicted, fill=factor(actual)),color="white",alpha=0.6,breaks=seq(0,1,1/bins), position="identity")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control","case"), name="Group")+
ggplot2::theme(legend.position = "top")+
ggplot2::stat_bin(data=subset(all,actual==0), ggplot2::aes(x=predicted,label=..count..), breaks=seq(0,1,1/bins), geom="text", position="identity", size=4)+
ggplot2::stat_bin(data=subset(all,actual==1), ggplot2::aes(x=predicted,label=..count..), breaks=seq(0,1,1/bins), geom="text", position="identity", size=4)+
ggplot2::labs(title="Constructed Histogram for Reliability Diagram", subtitle=paste("bins:",bins), x = "prediction", y = "observed frequency")
plot2 <- ggplot2::ggplot(data=data.frame(cbind(mean_pred_per_bin_,accuracy_per_bin_)),ggplot2::aes(mean_pred_per_bin_, accuracy_per_bin_))+
ggplot2::geom_point(shape=18,color="black", size=3)+
ggplot2::xlim(0, 1) +
ggplot2::ylim(0, 1.05) +
ggplot2::geom_abline(slope=1, color="#999999", size=1, linetype=2)+
ggplot2::geom_line(data=data.frame(cbind(mean_pred_per_bin_=mean_pred_per_bin_[!idx],accuracy_per_bin_=accuracy_per_bin_[!idx])),
color="#0072B2", size=2)+
ggplot2::geom_text(mapping=ggplot2::aes(mean_pred_per_bin_, accuracy_per_bin_+0.04,label=sign))+
ggplot2::geom_label(mapping=ggplot2::aes(0.2,0.9, label=paste(paste("n:",length(predicted)),"\n",
paste("ECE:",round(ece_,4)),"\n",
"ns = not significant\n",
"x = empty bin")), size=2)+
ggplot2::coord_fixed(ratio=1)+
ggplot2::labs(title ="Reliability Diagram", subtitle=paste("bins:",bins), x = "mean prediction in bin", y = "observed frequency")
# plot2 <- ggplot(data=data.frame(cbind(histogram$mids,accuracy_per_bin_)),aes(V1, accuracy_per_bin_))+
# geom_point(shape=18,color="black", size=3)+
# xlim(0, 1) + ylim(0, 1.05) +
# geom_abline(slope=1, color="#999999", size=1, linetype=2)+
# geom_line(data=data.frame(cbind(histogram$mids[!idx],accuracy_per_bin_=accuracy_per_bin_[!idx])),
# color="#0072B2", size=2)+
# geom_text(mapping=aes(V1, accuracy_per_bin_+0.04,label=sign))+
# geom_label(mapping=aes(0.2,0.9, label=paste(paste("n:",length(predicted)),"\n",
# paste("ECE:",round(ece_,4)),"\n",
# "ns = not significant\n",
# "x = empty bin")), size=2)+
# coord_fixed(ratio=1)+
# labs(title ="Reliability Diagram", subtitle=paste("bins:",breaks), x = "bin midpoint", y = "observed frequency")
#
}}
error_list <- list(ECE_equal_width=ece_, MCE_equal_width=mce_, ECE_equal_freq=ece, MCE_equal_freq=mce,
RMSE=rmse, CLE_class_1=cle$class_1, CLE_class_0=cle$class_0, brier=brier$brier,
brier_class_1=brier$brier_1, brier_class_0=brier$brier_0)
rounded_list <- lapply(error_list,round,5)
return(list(calibration_error=rounded_list, discrimination_error=discrimination_error,
rd_breaks=bins, histogram_plot=plot1, diagram_plot=plot2,
mean_pred_per_bin=mean_pred_per_bin_, accuracy_per_bin=accuracy_per_bin_,
freq_per_bin=freq_per_bin,
sign=sign))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/reliability_diagram.R
|
#' @title statistics_calibratR
#' @description this method offers a variety of statistical evaluation methods for the output of the \code{\link{calibrate}} method.
#' All returned error values represent mean error values over the \code{n_seeds} times repeated 10-fold CV.
#' @author Johanna Schwarz
#' @param calibrate_object list that is returned from the \code{\link{calibrate}} function. The parameter \code{n_seeds} is available as a list component of the \code{calibrate_object}
#' @param t.test_partitions Performs a paired two sided t.test over the error values (ECE, CLE1, CLE0, MCE, AUC, sensitivity and specificity) from the
#' random partition splits comparing a possible significant difference in mean among the calibration models. All models and the original, scaled and transformed values are tested against each other.
#' The p_value and the effect size of the t.test are returned to the user. Can only be performed, if the \code{calibrate_object} contains a \code{summary_CV} list object, else, an error is returned. Default: TRUE
#' @param significance_models returns important characteristics of the implemented calibration models, Default: TRUE
#' @return An object of class list, with the following components:
#' \item{mean_calibration}{mean of calibration error values (ECE_equal_width, MCE_equal_width, ECE_equal_freq, MCE_equal_freq, RMSE, Class 1 CLE, Class 0 CLE, Brier Score, Class 1 Brier Score, Class 0 Brier Score) over \code{n_seeds} times repeated 10-fold CV.
#' ECE and MCE are computed once using equal-width and once using equal-frequency binning for the construction of the underlying binning scheme.
#' Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{standard_deviation}{standard deviation of calibration error values over \code{n_seeds} times repeated 10-fold CV. Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{var_coeff_calibration}{variation coefficient of calibration error values over \code{n_seeds} times repeated 10-fold CV. Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{mean_discrimination}{mean of discrimination error (sensitivity, specificity, AUC, positive predictive value, negative predictive value, accuracy) values over \code{n_seeds} times repeated 10-fold CV. The "cut-off" is
#' the cut-off value that maximizes sensitivity and specificity. Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{sd_discrimination}{standard deviation of discrimination error values over \code{n_seeds} times repeated 10-fold CV. Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{var_coeff_discrimination}{variation coefficient of discrimination error values over \code{n_seeds} times repeated 10-fold CV. Only returned, if \code{calibrate_object} contains a summary_CV list object.}
#' \item{t.test_calibration}{=list(p_value=t.test.calibration, effect_size=effect_size_calibration), only returned if t.test=TRUE}
#' \item{t.test_discrimination}{=list(p_value=t.test.discrimination, effect_size=effect_size_discrimination), only returned if t.test=TRUE}
#' \item{significance_models}{only returned if significance_models=TRUE}
#' \item{n_seeds}{number of random data set partitions into training and test set for \code{folds}-times CV}
#' \item{original_values}{list object that consists of the \code{actual} and \code{predicted} values of the original scores}
#' @details DETAILS
#' @examples
#' ## Loading dataset in environment
#' data(example)
#' calibration_model <- example$calibration_model
#'
#' statistics <- statistics_calibratR(calibration_model)
#' @seealso
#' \code{\link[stats]{t.test}},\code{\link[stats]{friedman.test}}
#' @rdname statistics_calibratR
#' @export
#' @importFrom stats t.test sd
statistics_calibratR <- function(calibrate_object, t.test_partitions=TRUE, significance_models=TRUE){
if(!is.null(calibrate_object$summary_CV$models$calibrated)){
##data preparation
means_calibration <- data.frame()
sd_calibration <- data.frame()
var_coeff_calibration <- data.frame()
compare_ece <- data.frame()
compare_mce <- data.frame()
compare_auc <- data.frame()
compare_rmse <- data.frame()
compare_cle1 <- data.frame()
compare_cle0 <- data.frame()
compare_sens <- data.frame()
compare_spec <- data.frame()
for(i in calibrate_object$summary_CV$error_models$calibration){
compare_ece <- rbind(compare_ece,i$ECE_equal_width)
compare_cle1 <- rbind(compare_cle1,i$CLE_class_1)
compare_cle0 <- rbind(compare_cle0,i$CLE_class_0)
compare_mce <- rbind(compare_mce, i$MCE_equal_width)
compare_rmse <- rbind(compare_rmse, i$RMSE)
means_calibration <- rbind(means_calibration, apply(i,2, mean))
sd_calibration <- rbind(sd_calibration, apply(i,2, sd))
}
model_names <- names(calibrate_object$summary_CV$error_models$calibration)
n_runs <- seq(1,calibrate_object$n_seeds,1)
names_calibration_errors <- names(calibrate_object$summary_CV$error_models$calibration[[1]])
names_discrimination_errors <- names(calibrate_object$summary_CV$error_models$discrimination[[1]])
rownames(compare_ece) <- model_names
colnames(compare_ece) <- n_runs
rownames(compare_rmse) <- model_names
colnames(compare_rmse) <- n_runs
rownames(compare_cle1) <- model_names
colnames(compare_cle1) <- n_runs
rownames(compare_cle0) <- model_names
colnames(compare_cle0) <- n_runs
rownames(compare_mce) <- model_names
colnames(compare_mce) <- n_runs
rownames(means_calibration) <- model_names
colnames(means_calibration) <- names_calibration_errors
rownames(sd_calibration) <- model_names
colnames(sd_calibration) <- names_calibration_errors
var_coeff_calibration <- sd_calibration/means_calibration
means_discrimination <- data.frame()
sd_discrimination <- data.frame()
var_coeff_discrimination <- data.frame()
for(i in calibrate_object$summary_CV$error_models$discrimination){
means_discrimination <- rbind(means_discrimination, apply(i,2, mean))
sd_discrimination <- rbind(sd_discrimination, apply(i,2, sd))
compare_auc <- rbind(compare_auc,i$auc)
compare_sens <- rbind(compare_sens,i$sens)
compare_spec <-rbind(compare_spec,i$spec)
}
rownames(compare_auc) <- model_names
colnames(compare_auc) <- n_runs
rownames(compare_sens) <- model_names
colnames(compare_sens) <- n_runs
rownames(compare_spec) <- model_names
colnames(compare_spec) <- n_runs
rownames(means_discrimination) <- model_names
colnames(means_discrimination) <- names_discrimination_errors
rownames(sd_discrimination) <- model_names
colnames(sd_discrimination) <- names_discrimination_errors
var_coeff_discrimination <- sd_discrimination/means_discrimination
all_calibration_errors <- list(ece=compare_ece, cle0=compare_cle0, cle1=compare_cle1,
mce=compare_mce, rmse=compare_rmse)
all_discrimination_errors <- list(auc=compare_auc, sens=compare_sens, spec=compare_spec)
## perform paired t.test for all models
if(t.test_partitions){
t.test_partitions_cal <- list()
t.test_partitions_dis <- list()
t.test.calibration <- list()
t.test.discrimination <- list()
effect_size_calibration <- list()
effect_size_discrimination <- list()
z <- 1
a <- 1
for (i in all_calibration_errors){
t.test.all <- c()
effect_size_all <- c()
for (w in seq(1, nrow(i),1)){
t.test <- c()
effect_size <-c()
mean <- c()
sd <- c()
for (y in seq(1, nrow(i),1)){
#if observation for i[y,] are unique, do not perform t test or else it will crash
if(length(unique(as.numeric(i[y,])))==1){
t.test <- c(t.test, NA)
effect_size <- c(effect_size, NA)
}
else{
t.test_result <- stats::t.test(as.numeric(i[w,]), as.numeric(i[y,]), paired=TRUE)
t.test <- c(t.test, round(t.test_result$p.value, 4))
effect_size <- c(effect_size, round(t.test_result$estimate,4))
}
mean <- c(mean, mean(as.numeric(i[y,])))
sd <- c(sd, sd(as.numeric(i[y,])))
}
t.test.all <- cbind(t.test.all, t.test)
effect_size_all <- cbind(effect_size_all, effect_size)
t.test_partitions_cal[[z]] <- cbind(i, mean=mean, sd=sd, rank_mean=rank(mean))
z <- z+1
}
row.names(t.test.all) <- model_names
colnames(t.test.all) <- model_names
row.names(effect_size_all) <- model_names
colnames(effect_size_all) <- model_names
t.test.calibration[[a]] <- cbind(t.test.all, mean=round(mean,4), rank_mean=rank(mean))
effect_size_calibration[[a]] <- effect_size_all
a <- a+1
}
names(t.test.calibration) <- c("ece", "cle0", "cle1", "mce", "rmse")
names(t.test_partitions_cal) <- c("ece", "cle0", "cle1", "mce", "rmse")
names(effect_size_calibration) <- c("ece", "cle0", "cle1", "mce", "rmse")
z <- 1
a <- 1
for (i in all_discrimination_errors){
t.test.all <- c()
effect_size_all <- c()
for (w in seq(1, nrow(i),1)){
t.test <- c()
effect_size <- c()
mean <- c()
sd <- c()
for (y in seq(1, nrow(i),1)){
#if observation for i[y,] are unique, do not perform t test or else it will crash
if(length(unique(as.numeric(i[y,])))==1){
t.test <- c(t.test, NA)
effect_size <- c(effect_size, NA)
}
else{
t.test_result <- stats::t.test(as.numeric(i[w,]), as.numeric(i[y,]), paired=TRUE)
t.test <- c(t.test, round(t.test_result$p.value, 4))
effect_size <- c(effect_size, round(t.test_result$estimate,4))
}
mean <- c(mean, mean(as.numeric(i[y,])))
sd <- c(sd, sd(as.numeric(i[y,])))
}
t.test.all <- cbind(t.test.all, t.test)
effect_size_all <- cbind(effect_size_all, effect_size)
t.test_partitions_dis[[z]] <- cbind(i, mean=mean, sd=sd, rank_mean=rank(-mean))
z <- z+1
}
row.names(t.test.all) <- model_names
colnames(t.test.all) <- model_names
row.names(effect_size_all) <- model_names
colnames(effect_size_all) <- model_names
t.test.discrimination[[a]] <- cbind(t.test.all, mean=round(mean,4), rank_mean=rank(-mean))
effect_size_discrimination[[a]] <- effect_size_all
a <- a+1
}
names(t.test.discrimination) <- c("auc", "sens","spec")
names(t.test_partitions_dis) <- c("auc", "sens","spec")
names(effect_size_discrimination) <- c("auc", "sens","spec")
}
else {
t.test_partitions_cal <- NULL
t.test_partitions_dis <- NULL
t.test.calibration <- NULL
t.test.discrimination <- NULL
effect_size_calibration <- NULL
effect_size_discrimination <- NULL
}
}
else{
if(t.test_partitions==TRUE){
warning("No error values from repeated CVs are available in the trained calibration models. No t-test can be performed.
Please make sure that the calibrate_object containes a summary_CV list object.")
}
t.test_partitions_cal <- NULL
t.test_partitions_dis <- NULL
t.test.calibration <- NULL
t.test.discrimination <- NULL
effect_size_calibration <- NULL
effect_size_discrimination <- NULL
means_calibration <- NULL
sd_calibration <- NULL
means_discrimination <- NULL
sd_discrimination <- NULL
var_coeff_calibration <- NULL
var_coeff_discrimination <- NULL
}
if(significance_models){
sign_model <- list()
sign_model[["hist_scaled"]] <- list()
sign_model[["hist_scaled"]] <- c(calibrate_object$calibration_models$models_final$hist_scaled$calibration_points,
calibrate_object$calibration_models$models_final$hist_scaled$calibration_points_number,
calibrate_object$calibration_models$models_final$hist_scaled$calibration_range)
sign_model[["hist_transformed"]] <- list()
sign_model[["hist_transformed"]] <- c(calibrate_object$calibration_models$models_final$hist_transformed$calibration_points,
calibrate_object$calibration_models$models_final$hist_transformed$calibration_points_number,
calibrate_object$calibration_models$models_final$hist_transformed$calibration_range)
sign_model[["BBQ_scaled"]] <- list()
sign_model[["BBQ_scaled"]] <- c(calibrate_object$calibration_models$models_final$BBQ_scaled$calibration_points,
calibrate_object$calibration_models$models_final$BBQ_scaled$calibration_points_number,
calibrate_object$calibration_models$models_final$BBQ_scaled$calibration_range)
sign_model[["BBQ_transformed"]] <- list()
sign_model[["BBQ_transformed"]] <- c(calibrate_object$calibration_models$models_final$BBQ_transformed$calibration_points,
calibrate_object$calibration_models$models_final$BBQ_transformed$calibration_points_number,
calibrate_object$calibration_models$models_final$BBQ_transformed$calibration_range)
sign_model[["GUESS"]] <- list()
sign_model[["GUESS"]] <- list(crit_boundaries=calibrate_object$calibration_models$models_final$GUESS$t_crit,
sign_train_set=calibrate_object$calibration_models$models_final$GUESS$sign_train_set)
}
else {
sign_model <- NULL
}
return(list(mean_calibration=means_calibration, sd_calibration=sd_calibration, var_coeff_calibration=var_coeff_calibration,
mean_discrimination=means_discrimination, sd_discrimination=sd_discrimination, var_coeff_discrimination=var_coeff_discrimination,
t.test_calibration=list(p_value=t.test.calibration, effect_size=effect_size_calibration),
t.test_discrimination=list(p_value=t.test.discrimination, effect_size=effect_size_discrimination),
significance_models=sign_model,
n_seeds=calibrate_object$n_seeds,
original_values=calibrate_object$calibration_models$original_values))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/statistics_calibratR.R
|
#visualize
#' @title visualize_calibratR
#' @description this method offers a variety of visualisations to compare implemented calibration models
#' @author Johanna Schwarz
#' @param calibrate_object the list component \code{calibration_models} from the \code{\link{calibrate}} method
#' @param plot_distributions returns a density distribution plot of the calibrated predictions after CV (External) or without CV (internal)
#' @param rd_partitions returns a reliability diagram for each model
#' @param training_set_calibrated returns a list of ggplots. Each plot represents the calibrated predictions by the respective calibration model of the training set.
#' If the list object \code{predictions} in the \code{calibrate_object} is empty, \code{training_set_calibrated} is returned as NULL.
#' @param visualize_models returns the list components \code{plot_calibration_models} and \code{plot_single_models}
#' @return An object of class list, with the following components:
#' \item{histogram_distribution}{returns a histogram of the original ML score distribution}
#' \item{density_calibration_internal}{returns a list of density distribution plots for each calibration method, the original
#' and the two input-preprocessing methods scaling and transforming. The plot visualises the density distribution of the calibrated predictions of the training set. In this case, training and test set values are identical, so be careful to evaluate the plots.}
#' \item{density_calibration_external}{returns a list of density distribution plots for each calibration method, the original
#' and the two input-preprocessing methods scaling and transforming. The plot visualises the density distribution of the calibrated predictions, that were returned during Cross Validation. If more than one repetition of CV was performed,
#' run number 1 is evaluated}
#' \item{plot_calibration_models}{ maps the original ML scores to their calibrated prediction estimates for each model.
#' This enables easy model comparison over the range of ML scores See also \code{\link{compare_models_visual}}. }
#' \item{plot_single_models}{returns a list of ggplots for each calibration model, also mapping the original ML scores to their calibrated prediction. Significance values are indicated.
#' See also \code{\link{plot_model}}}
#' \item{rd_plot}{returns a list of reliability diagrams for each of the implemented calibration models and the two input-preprocessing methods "scaled" and "transformed". The returned plot visualises the calibrated predictions that
#' were returned for the test set during each of the n run of the n-times repeated CV. Each grey line represents one of the n runs. The blue line represents the median of all calibrated bin predictions.
#' Insignificant bin estimates are indicated with "ns". If no CV was performed during calibration model building using the \code{\link{calibrate}} method, \code{rd_plot} is returned as NULL}
#' \item{calibration_error}{returns a list of boxplots for the calibration error metrics ECE, MCE, CLE and RMSE. The n values for each model represent the obtained error values during the
#' n times repeated CV. If no CV was performed during calibration model building using the \code{\link{calibrate}} method, \code{calibration_error} is returned as NULL}
#' \item{discrimination_error}{returns a list of boxplots for the discrimination error AUC, sensitivity and specificity. The n values for each model represent the obtained error values during the
#' n times repeated CV. If no CV was performed during calibration model building using the \code{\link{calibrate}} method, \code{discrimination_error} is returned as NULL}
#' \item{cle_class_specific_error}{If no CV was performed during calibration model building using the \code{\link{calibrate}} method, \code{cle_class_specific_error} is returned as NULL}
#' \item{training_set_calibrated}{returns a list of ggplots. Each plot represents the calibrated predictions by the respective calibration model of the training set.
#' If the list object \code{predictions} in the \code{calibrate_object} is empty, \code{training_set_calibrated} is returned as NULL.}
#' \item{GUESS_1_final_model}{plots the the returned conditional probability p(x|Class) values of the GUESS_1 model}
#' \item{GUESS_2_final_model}{plots the the returned conditional probability p(x|Class) values of the GUESS_2 model}
#' @examples
#' ## Loading dataset in environment
#' data(example)
#' calibration_model <- example$calibration_model
#'
#' visualisation <- visualize_calibratR(calibration_model, plot_distributions=FALSE,
#' rd_partitions=FALSE, training_set_calibrated=FALSE)
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_density}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{scale_colour_manual}},\code{\link[ggplot2]{scale_fill_manual}},\code{\link[ggplot2]{labs}},\code{\link[ggplot2]{geom_point}},\code{\link[ggplot2]{geom_hline}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{element_text}}
#' \code{\link[reshape2]{melt}}
#' @rdname visualize_calibratR
#' @export
#' @importFrom ggplot2 ggplot geom_density aes scale_colour_manual scale_fill_manual labs geom_point geom_hline theme element_text
#' @importFrom reshape2 melt
visualize_calibratR <- function(calibrate_object, visualize_models=FALSE, plot_distributions=FALSE, rd_partitions=FALSE, training_set_calibrated=FALSE){
visualize_distributions <- TRUE
visualize_errors_CV <- TRUE
visualize_cle_class_error <- TRUE
if(is.null(calibrate_object$calibration_models$models_final$GUESS)){
visualize_guess <- FALSE
}
else{
visualize_guess <- TRUE
}
training_set_calibrated <- TRUE
if(length(calibrate_object$calibration_models$models_final)!=5){
warning("Not all calibration models were trained. Certain visualisations may not be available. ")
}
if(is.null(calibrate_object$summary_CV$models$calibrated)){
visualize_errors_CV <- FALSE
plot_distributions <- FALSE
rd_partitions <- FALSE
visualize_cle_class_error <- FALSE
warning("The list object summary_CV of the calibrate_object is empty. Certain visualisations may not be available. ")
}
if(is.null(calibrate_object$predictions)){
plot_distributions <- FALSE
training_set_calibrated <- FALSE
warning("The list object predictions of the calibrate_object is empty. Certain visualisations may not be available.")
}
if(is.null(calibrate_object$summary_no_CV$discrimination_error)){
training_set_calibrated <- FALSE
warning("The list object summary_no_CV of the calibrate_object is empty. Certain visualisations may not be available.")
}
if(visualize_models){
plot_models <- compare_models_visual(calibrate_object$calibration_models)
plot_single_models <- plot_model(calibrate_object$calibration_models)
}
else{
plot_models <- NULL
plot_single_models <- NULL
}
if(plot_distributions){
p0 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$original[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$original[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="Uncalibrated", x = "ML score")
p1 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$scaled[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$scaled[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, scaled", x = "uncalibrated prediction")
p2 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$transformed[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$transformed[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, transformed", x = "uncalibrated prediction")
p3 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$hist_scal[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$hist_scal[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, hist_scal", x = "calibrated prediction")
p4 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$hist_trans[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$hist_trans[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, hist_trans", x = "calibrated prediction")
p5 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_scaled_sel[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_scaled_sel[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, BBQ_scaled_sel", x = "calibrated prediction")
p6 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_scaled_avg[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_scaled_avg[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, BBQ_scaled_avg", x = "calibrated prediction")
p7 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_transformed_sel[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_transformed_sel[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, BBQ_transformed_sel", x = "calibrated prediction")
p8 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_transformed_avg[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$BBQ_transformed_avg[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, BBQ_transformed_avg", x = "calibrated prediction")
p9 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$GUESS_1[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$GUESS_1[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, GUESS_1", x = "calibrated prediction")
p10 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$GUESS_2[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$predictions$GUESS_2[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, internal, GUESS_2", x = "calibrated prediction")
distribution_list <- list(original=p0,scaled=p1,transformed=p2,hist_scaled=p3,hist_transformed=p4,BBQ_scaled_sel=p5,
BBQ_scaled_avg=p6,BBQ_transformed_sel=p7,BBQ_transformed_avg=p8,GUESS_1=p9,GUESS_2=p10)
p0 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$calibration_models$original_values$predicted[calibrate_object$calibration_models$original_values$actual==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$calibration_models$original_values$predicted[calibrate_object$calibration_models$original_values$actual==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="Uncalibrated", x = "ML score")
p1 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$uncalibrated$scaled[[1]]$probs_CV[calibrate_object$summary_CV$models$uncalibrated$scaled[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$uncalibrated$scaled[[1]]$probs_CV[calibrate_object$summary_CV$models$uncalibrated$scaled[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, scaled", x = "uncalibrated prediction")
p2 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$uncalibrated$transformed[[1]]$probs_CV[calibrate_object$summary_CV$models$uncalibrated$transformed[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$uncalibrated$transformed[[1]]$probs_CV[calibrate_object$summary_CV$models$uncalibrated$transformed[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, transformed", x = "uncalibrated prediction")
p3 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$hist_scaled[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$hist_scaled[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$hist_scaled[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$hist_scaled[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, hist_scal", x = "calibrated prediction")
p4 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$hist_transformed[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$hist_transformed[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$hist_transformed[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$hist_transformed[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, hist_trans", x = "calibrated prediction")
p5 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_scaled_sel[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_scaled_sel[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_scaled_sel[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_scaled_sel[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, BBQ_scaled_sel", x = "calibrated prediction")
p6 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_scaled_avg[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_scaled_avg[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_scaled_avg[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_scaled_avg[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, BBQ_scaled_avg", x = "calibrated prediction")
p7 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_transformed_sel[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_transformed_sel[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_transformed_sel[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_transformed_sel[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, BBQ_transformed_sel", x = "calibrated prediction")
p8 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_transformed_avg[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_transformed_avg[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$BBQ_transformed_avg[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$BBQ_transformed_avg[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, BBQ_transformed_avg", x = "calibrated prediction")
p9 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$GUESS_1[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$GUESS_1[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$GUESS_1[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$GUESS_1[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, GUESS_1", x = "calibrated prediction")
p10 <- ggplot2::ggplot()+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$GUESS_2[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$GUESS_2[[1]]$actual_CV==0], colour="darkolivegreen4", fill="darkolivegreen4"),alpha=0.2)+
ggplot2::geom_density(ggplot2::aes(x=calibrate_object$summary_CV$models$calibrated$GUESS_2[[1]]$probs_CV[calibrate_object$summary_CV$models$calibrated$GUESS_2[[1]]$actual_CV==1], colour="firebrick3", fill="firebrick3"),alpha=0.2)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="calibrated, external, GUESS_2", x = "calibrated prediction")
distribution_list_external <- list(original=p0,scaled=p1,transformed=p2,hist_scaled=p3,hist_transformed=p4,BBQ_scaled_sel=p5,
BBQ_scaled_avg=p6,BBQ_transformed_sel=p7,BBQ_transformed_avg=p8,GUESS_1=p9,GUESS_2=p10)
}
else{
distribution_list <- NULL
distribution_list_external <- NULL
}
if (visualize_distributions){
plot_distribution <- visualize_distribution(calibrate_object$calibration_models$original_values$actual,
calibrate_object$calibration_models$original_values$predicted)
}
else{
plot_distribution <- NULL
}
if(rd_partitions){
rd_plot <- list()
rd_plot[["scaled"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$uncalibrated$scaled)
rd_plot[["transformed"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$uncalibrated$transformed)
rd_plot[["hist_scaled"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$hist_scaled)
rd_plot[["hist_transformed"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$hist_transformed)
rd_plot[["BBQ_scaled_sel"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$BBQ_scaled_sel)
rd_plot[["BBQ_scaled_avg"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$BBQ_scaled_avg)
rd_plot[["BBQ_transformed_sel"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$BBQ_transformed_sel)
rd_plot[["BBQ_transformed_avg"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$BBQ_transformed_avg)
rd_plot[["GUESS_1"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$GUESS_1)
rd_plot[["GUESS_2"]] <- rd_multiple_runs(calibrate_object$summary_CV$models$calibrated$GUESS_2)
}
else{
rd_plot <- NULL
}
if(visualize_errors_CV){
plots_calibration <- visualize_error_boxplot(calibrate_object$summary_CV$error_models$calibration, discrimination = FALSE)
plots_discrimination <- visualize_error_boxplot(calibrate_object$summary_CV$error_models$discrimination, discrimination = TRUE)
}
else{
plots_calibration <- NULL
plots_discrimination <- NULL
}
if(visualize_cle_class_error){
cle_class_specific <- get_CLE_comparison(calibrate_object$summary_CV$error_models$calibration)
}
else{
cle_class_specific <- NULL
}
if(visualize_guess){
guess1 <- plot_class_distributions(calibrate_object$calibration_models$models_final$GUESS, 1)
guess2 <- plot_class_distributions(calibrate_object$calibration_models$models_final$GUESS, 2)
}
else{
guess1 <- NULL
guess2 <- NULL
}
if(training_set_calibrated){
plot1 <- visualize_calibrated_test_set(calibrate_object$calibration_models$original_values$actual, calibrate_object$predictions,
calibrate_object$summary_no_CV$discrimination_error[,"cutoff"])
}
else{
plot1 <- NULL
}
return(list(histogram_distribution=plot_distribution, density_calibration_internal=distribution_list, density_calibration_external=distribution_list_external, plot_calibration_models=plot_models, plot_single_models=plot_single_models,
rd_plot=rd_plot, calibration_error=plots_calibration,discrimination_error=plots_discrimination, cle_class_specific_error=cle_class_specific,
training_set_calibrated=plot1,
GUESS_1_final_model=guess1, GUESS_2_final_model=guess2))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/visualize_calibratR.R
|
#' @title visualize_calibrated_test_set
#' @description plots a panel for all calibrated predictions from the respective calibration model. Allows visual comparison of the models output and their optimal cut off
#' @param actual vector of observed class labels (0/1)
#' @param predicted_list predict_calibratR$predictions object (list of calibrated predictions from calibration models)
#' @param cutoffs vector of optimal cut-off thresholds for each calibration model
#' @return ggplot2 element for visual comparison of the evaluated calibration models
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_point}},\code{\link[ggplot2]{scale_colour_manual}},\code{\link[ggplot2]{xlab}},\code{\link[ggplot2]{ylab}},\code{\link[ggplot2]{geom_hline}},\code{\link[ggplot2]{ylim}}
#' @rdname visualize_calibrated_test_set
#' @importFrom ggplot2 ggplot geom_point scale_colour_manual xlab ylab geom_hline ylim
visualize_calibrated_test_set <- function(actual, predicted_list, cutoffs){
plots <- list()
d <- data.frame(predicted_list)
d$original <- NULL
plot1 <- ggplot2::ggplot()+
ggplot2::geom_point(ggplot2::aes(x=seq(1, length(actual)),y=predicted_list$original, colour=as.factor(actual)), show.legend = FALSE)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4","firebrick3"),name="Group",labels=c("Control","Case"))+
ggplot2::xlab(label="idx") +
ggplot2::ylab(label="original")+
ggplot2::geom_hline(yintercept = 0.6, colour="black", linetype=3, size=0.7)+
ggplot2::geom_hline(yintercept = 0.4, colour="black", linetype=3, size=0.7)+
ggplot2::geom_hline(yintercept = cutoffs[[1]], linetype=4, colour="red")
plots$original <- plot1
for (i in names(d)){
plot <- ggplot2::ggplot(data = d)+
ggplot2::geom_point(ggplot2::aes_string(x=seq(1, length(actual)),y=i, colour=as.factor(actual)), show.legend = FALSE)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4","firebrick3"),name="Group",labels=c("Control","Case"))+
ggplot2::xlab(label="idx") +
ggplot2::ylim(c(0,1))+
ggplot2::ylab(label=i)+
ggplot2::geom_hline(yintercept = 0.6, colour="black", linetype=3, size=0.7)+
ggplot2::geom_hline(yintercept = 0.4, colour="black", linetype=3, size=0.7)+
ggplot2::geom_hline(yintercept = cutoffs[[i]], linetype=4, colour="red")
plots[[i]] <- plot
}
if (any(sapply(plots, is.null))){
plots <- plots[-which(sapply(plots, is.null))]
}
return(plots)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/visualize_calibrated_test_set.R
|
#' @title visualize_distribution
#' @description FUNCTION_DESCRIPTION
#' @param actual vector of observed class labels (0/1)
#' @param predicted vector of uncalibrated predictions
#' @return list object containing the following components:
#' \item{plot_distribution}{ggplot histogram that visualizes the observed class distributions}
#' \item{parameter}{list object that summarizes all relevant parameters (mean, sd, number) of the observed class distributions}
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{geom_histogram}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{scale_colour_manual}},\code{\link[ggplot2]{scale_fill_manual}},\code{\link[ggplot2]{labs}}
#' @rdname visualize_distribution
#' @importFrom ggplot2 ggplot geom_histogram aes scale_colour_manual scale_fill_manual labs
#' @importFrom stats sd
visualize_distribution <- function(actual, predicted){
all <- data.frame(cbind(actual, predicted))
n <- nrow(all)
case <- subset(all, all$actual==1)
control <- subset(all, all$actual==0)
mean_cases <- mean(case$predicted)
mean_control <- mean(control$predicted)
sd_case <- sd(case$predicted)
sd_control <- sd(control$predicted)
n_cases <- nrow(case)
n_control <- nrow(control)
total <- n_cases+n_control
plot_distribution <- ggplot2::ggplot()+
ggplot2::geom_histogram(ggplot2::aes(x=control$predicted, colour="darkolivegreen4", fill="darkolivegreen4"),bins=10,alpha=0.4)+
ggplot2::geom_histogram(ggplot2::aes(x=case$predicted, colour="firebrick3", fill="firebrick3"),bins=10,alpha=0.4)+
ggplot2::scale_colour_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::scale_fill_manual(values=c("darkolivegreen4", "firebrick3"),labels=c("control", "case"), name="Group")+
ggplot2::labs(title="Controls vs. Cases density, Test Set",y="Frequency", subtitle=paste("no. of cases", n_cases,"\n",
"no. of controls", n_control),
x = "original ML score")
parameters <- c(mean_prediction_cases=mean_cases, mean_prediction_controls=mean_control,
sd_prediction_cases=sd_case, sd_prediction_controls=sd_control,
number_cases=n_cases, number_controls=n_control, total=total)
return(list(plot_distribution=plot_distribution, parameter=parameters))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/visualize_distribution.R
|
#' @title visualize_error_boxplot
#' @description compares error values among different calibration models. A boxplots is created from the n error values that were obtained during the n-times repeated Cross-Validation procedure.
#' Different error values are implemented and can be compared:
#' \cr discrimination error = sensitivity, specificity, accuracy, AUC (when \code{discrimination}=TRUE)
#' \cr calibration error = ece, mce, rmse, class 0 cle, class 1 cle (when \code{discrimination}=FALSE)
#' For the calculation of the errors, see the respective methods listed in the "see also" section
#' @param list_models list object that contains all error values for all trained calibration models. For the specific format, see the calling function \code{\link{visualize_calibratR}}.
#' @param discrimination boolean (TRUE or FALSE). If TRUE, discrimination errors are compared between models; if FALSE calibration error is compared, Default: TRUE
#' @return An object of class list, with the following components:
#' \cr if \code{discrimination}=TRUE
#' \item{sens}{ggplot2 boxplot that compares all evaluated calibration models with regard to sensitivity.}
#' \item{spec}{ggplot2 boxplot that compares all evaluated calibration models with regard to specificity}
#' \item{acc}{ggplot2 boxplot that compares all evaluated calibration models with regard to accuracy}
#' \item{auc}{ggplot2 boxplot that compares all evaluated calibration models with regard to AUC}
#' \item{list_errors}{list object that contains all discrimination error values that were used to construct the boxplots}
#' \cr if \code{discrimination}=FALSE
#' \item{ece}{ggplot2 boxplot that compares all evaluated calibration models with regard to expected calibration error}
#' \item{mce}{ggplot2 boxplot that compares all evaluated calibration models with regard to maximum expected calibration error (MCE)}
#' \item{rmse}{ggplot2 boxplot that compares all evaluated calibration models with regard to root mean square error (RMSE)}
#' \item{cle_0}{ggplot2 boxplot that compares all evaluated calibration models with regard to class 0 classification error (CLE)}
#' \item{cle_1}{ggplot2 boxplot that compares all evaluated calibration models with regard to class 1 classification error (CLE)}
#' \item{list_errors}{list object that contains all calibration error values that were used to construct the boxplots}
#' @seealso
#' \code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{aes}},\code{\link[ggplot2]{ggtitle}},\code{\link[ggplot2]{scale_x_discrete}},\code{\link[ggplot2]{geom_boxplot}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{element_text}}
#' \code{\link[reshape2]{melt}},\code{\link{get_CLE_class}},\code{\link{getECE}},\code{\link{getMCE}},\code{\link{getRMSE}}, \code{\link{evaluate_discrimination}}
#' @rdname visualize_error_boxplot
#' @importFrom ggplot2 ggplot aes ggtitle scale_x_discrete geom_boxplot theme element_text
#' @importFrom reshape2 melt
visualize_error_boxplot <- function(list_models, discrimination=TRUE){
idx <- 1
list_errors <- list()
if(discrimination){
list_errors[["sensitivity"]] <- list()
list_errors[["specificity"]] <- list()
list_errors[["accuracy"]] <- list()
list_errors[["auc"]] <- list()
for (j in list_models){
list_errors[["sensitivity"]][[names(list_models)[[idx]]]] <- j$sens
list_errors[["specificity"]][[names(list_models)[[idx]]]] <- j$spec
list_errors[["accuracy"]][[names(list_models)[[idx]]]] <- j$acc
list_errors[["auc"]][[names(list_models)[[idx]]]] <- j$auc
idx <- idx+1
}
df_sens <- data.frame(list_errors$sensitivity)
variable <- NULL
value <- NULL
p1 <- ggplot2::ggplot(reshape2::melt(df_sens, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("Sensitivity") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_spec <- data.frame(list_errors$specificity)
p2 <- ggplot2::ggplot(reshape2::melt(df_spec, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("Specificity") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_acc <- data.frame(list_errors$accuracy)
p3 <- ggplot2::ggplot(reshape2::melt(df_acc, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("Accuracy") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_auc <- data.frame(list_errors$auc)
p4 <- ggplot2::ggplot(reshape2::melt(df_auc, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("AUC") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
return(list(sens=p1, spec=p2, acc=p3, auc=p4, list_errors=list_errors))
}
else{
list_models$original <- NULL
list_errors[["ece"]] <- list()
list_errors[["mce"]] <- list()
list_errors[["rmse"]] <- list()
list_errors[["cle_class1"]] <- list()
list_errors[["cle_class0"]] <- list()
for (j in list_models){
list_errors[["ece"]][[names(list_models)[[idx]]]] <- j$ECE_equal_width
list_errors[["mce"]][[names(list_models)[[idx]]]] <- j$MCE_equal_width
list_errors[["rmse"]][[names(list_models)[[idx]]]] <- j$RMSE
list_errors[["cle_class1"]][[names(list_models)[[idx]]]] <- j$CLE_class_1
list_errors[["cle_class0"]][[names(list_models)[[idx]]]] <- j$CLE_class_0
idx <- idx+1
}
df_ece <- data.frame(list_errors$ece)
variable <- NULL
value <- NULL
p1 <- ggplot2::ggplot(reshape2::melt(df_ece, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("ECE") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_mce <- data.frame(list_errors$mce)
p2 <- ggplot2::ggplot(reshape2::melt(df_mce, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("MCE") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_rmse <- data.frame(list_errors$rmse)
p3 <- ggplot2::ggplot(reshape2::melt(df_rmse, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("RMSE") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_cle_0 <- data.frame(list_errors$cle_class0)
p4 <- ggplot2::ggplot(reshape2::melt(df_cle_0, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("CLE class 0") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
df_cle_1 <- data.frame(list_errors$cle_class1)
p5 <- ggplot2::ggplot(reshape2::melt(df_cle_1, id.vars=NULL), ggplot2::aes(x=variable, y=value)) +
ggplot2::ggtitle("CLE class 1") +
ggplot2::scale_x_discrete(name = NULL) +
ggplot2::geom_boxplot(fill="#4271AE", alpha=0.7) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 60, hjust = 1))
return(list(ece=p1, mce=p2, rmse=p3, cle_0=p4, cle_1=p5, list_errors=list_errors))
}
}
|
/scratch/gouwar.j/cran-all/cranData/CalibratR/R/visualize_errors.R
|
#' Generate test data
#'
#' Generate test data of eight quarters
#'
#'
#' @param n Number of observations within each quarter.
#' @return A data frame with the following variables: \item{id}{Sample unit
#' identifier} \item{year}{Year} \item{q}{Quarter} \item{month}{Month}
#' \item{R}{Response indicator} \item{age}{Age group} \item{sex}{Education
#' group} \item{famid}{Family identifier} \item{unemployed}{Unemployed}
#' \item{workforce}{In workforce}
#' @examples
#'
#' # Generates data - two years
#' z = AkuData(3000) # 3000 in each quarter
#'
#' @export AkuData
AkuData = function(n)
{
#data("testDataBasis",envir=environment())
testDataBasis = getTestDataBasis()
ix=sample(1:dim(testDataBasis)[1],size=100+round(n/0.4),replace=T,prob=testDataBasis[,9])
x=testDataBasis[ix,1:8]
sstat = (x %% 1000) - 100
sstat[sstat>500] = NaN
sstat[apply(!is.na(sstat),2,cumsum)>n] = NaN
rows = rowSums(!is.na(sstat))>0
x = x[rows,]
sstat = as.vector(sstat[rows,])
id=as.vector(row(x))
age = as.vector(x %/% 1000)
R = as.numeric(sstat>0)
q = as.vector(1+ (col(x)-1) %%4)
year = as.vector(2014+ (col(x)-1) %/%4)
month = as.vector(((row(x)-1) %%3)) + 1 + (q-1)*3
edu = rep(sample(1:4,size=dim(x)[1],replace=T,prob=c(3,6,4,2)),dim(x)[2])
sex = rep(sample(c(0,1),size=dim(x)[1],replace=T),dim(x)[2])
id = 1:dim(x)[1]
famid = rep(sample(1:60,size=dim(x)[1],replace=T),dim(x)[2]) + 100*(id %/%100)
z=data.frame(id,year,q,month,R,age,sex,edu,famid)
z =z[is.finite(sstat),]
sstat = sstat[is.finite(sstat)]
rownames(z) = NULL
z$year = factor(z$year)
z$q = factor(z$q)
z$month = factor(z$month)
z$age = factor(z$age)
z$sex = factor(z$sex)
z$edu = factor(z$edu)
z$unemployed = as.numeric(sstat==200)
z$workforce = as.numeric(sstat==100 | sstat==200)
z
}
# stackoverflow questions 30357330
pkgEnvAkuData <- new.env(parent=emptyenv())
if(!exists("testDataBasis", pkgEnvAkuData)) {
data("testDataBasis", package="CalibrateSSB", envir=pkgEnvAkuData)
}
getTestDataBasis <- function() {
pkgEnvAkuData[["testDataBasis"]]
}
#' testDataBasis
#'
#' Data used by \code{\link{AkuData}}
#'
#' @name testDataBasis
#' @docType data
#' @keywords datasets internal
NULL
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/AkuData.R
|
CalibratePackageReGenesees = function(netSample,calmodel=NULL,popTotals=NULL,y=NULL,by = NULL,partition=NULL,
popData=NULL,samplingWeights=NULL,bounds=c(-Inf,Inf),calfun="linear",
onlyTotals=FALSE,ids,...){
#warning("The non-CRAN package, ReGenesees, is needed.")
# Add this line to NAMESPACE when ReGenesees is on CRAN
#importFrom(ReGenesees,e.svydesign,pop.template,fill.template,get.residuals,e.calibrate,svystatTM)
# and add ReGenesees to Depends or Suggests in DESCRIPTION
if (requireNamespace("ReGenesees", quietly = TRUE)) {
e.svydesign <- ReGenesees::e.svydesign
pop.template <- ReGenesees::pop.template
fill.template <- ReGenesees::fill.template
e.calibrate <- ReGenesees::e.calibrate
get.residuals <- ReGenesees::get.residuals
svystatTM <- ReGenesees::svystatTM
} else {
stop("The package ReGenesees, is needed.")
}
desReGenesees <- e.svydesign(netSample,ids=asFormula(ids), weights =asFormula(samplingWeights))
if(is.null(popTotals)){
if(is.null(partition))
popTemplate <- pop.template(data=desReGenesees, calmodel=as.formula(calmodel))
else
popTemplate <- pop.template(data=desReGenesees, calmodel=as.formula(calmodel),partition=asFormula(partition))
popTotals <- fill.template(universe=popData,template= popTemplate)
} else
if(!is.null(partition)) warning("Partition as input has no effect when popTotals is specified and ReGenesees is used.")
if(onlyTotals) return(popTotals)
calReGenesees <- e.calibrate(design=desReGenesees, df.population=popTotals,bounds= bounds,calfun=calfun)
w=weights(calReGenesees) ######### BARE NETTO ##################
###calReGenesees <<- calReGenesees ####################
resids = get.residuals(calReGenesees,asFormula(y), scale = "no")
estTM=NULL
if(!is.null(y)){
if(is.list(y) | is.list(by)){
if(is.list(y) & is.list(by)) {if(length(y)!=length(by)) stop("length(y)==length(by) must be TRUE")}
else{
if(is.list(y)){
if(is.null(by))
by = vector("list",length(y))
else{
by_ = by
by = y
for(i in 1:length(y)) by[[i]] = by_
}
}else{
y_ = y
y = by
for(i in 1:length(y)) y[[i]] = y_
}
}
estTM = y
for(i in 1:length(y)) estTM[[i]] = svystatTM(calReGenesees,y=asFormula(y[[i]]),by=asFormula(by[[i]]),...)
} else
{
estTM = svystatTM(calReGenesees,y=asFormula(y),by=asFormula(by),...)
}
}
if(is.null(estTM)) return(list(popTotals=popTotals,w=w))
list(popTotals=popTotals,w=w,estTM=estTM,resids=resids)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/CalibratePackageReGenesees.r
|
#' Weighting and Estimation for Panel Data with Non-Response
#'
#' CalibrateSSB is an R-package that handles repeated surveys with partially
#' overlapping samples. Initially the samples are weighted by linear
#' calibration using known or estimated population totals. A robust model based
#' covariance matrix for all relevant estimated totals is calculated from the
#' residuals according to the calibration model. Alternatively a design based
#' covariance matrix is calculated in a very similar way. A cluster robust
#' version is also possible. In the case of estimated populations totals the
#' covariance matrix is adjusted by utilizing the theory of Särndal and
#' Lundström (2005). Variances of linear combinations (changes and averages)
#' and ratios are calculated from this covariance matrix. The linear
#' combinations and ratios can involve variables within and/or between sample
#' waves. \cr
#'
#' @name CalibrateSSB-package
#' @docType package
#'
#' @references Langsrud, Ø (2016): \dQuote{A variance estimation R-package for
#' repeated surveys - useful for estimates of changes in quarterly and annual
#' averages}, \emph{Romanian Statistical Review} nr. 2 / 2016, pp. 17-28.
#' CONFERENCE: \emph{New Challenges for Statistical Software - The Use of R in
#' Official Statistics}, Bucharest, Romania, 7-8 April. \cr
#'
#' Särndal, C.-E. and Lundström, S. (2005): \emph{Estimation in Surveys with
#' Nonresponse}, John Wiley and Sons, New York.
#' @keywords calibration
#' @encoding UTF8
NULL
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/CalibrateSSB-package.R
|
# wregw i impvekt bør endres ...bereging av "*" mellom matrise og vektor
# Kan se ut til at det blir riktig p.g.a beregningsrekkefølge, men det bør endres ....
#
#' Calibration weighting and estimation
#'
#' Compute weights by calibration and corresponding estimates, totals and
#' residuals
#'
#' When popTotals as input is NULL, population totals are computed from popData
#' (when available) or from grossSample. Some elements of popTotals may be
#' missing (not allowed when using ReGenesees). When using "ReGenesees", both
#' weiging and estimation are done by that package. When using "survey", only
#' calibration weiging are done by that package.
#' The parameters \code{wave}, \code{id} and \code{extra} have no effect on the
#' computations, but result in extra elements in output
#' (to be used by WideFromCalibrate() later).
#'
#'
#' @encoding UTF8
#'
#' @param grossSample Data frame.
#' @param calmodel Formula defining the linear structure of the calibration
#' model.
#' @param response Variable name of response indicator (net sample when 1).
#' @param popTotals Population totals (similar to population totals as output).
#' @param y Names of variables of interest. Can be a list similar to "by"
#' below.
#' @param by Names of the variables that define the "estimation domains". If
#' NULL (the default option) or NA estimates refer to the whole population. Use
#' list for multiple specifications (resulting in list as output).
#' @param partition Names of the variables that define the "calibration
#' domains" for the model. NULL (the default) implies no calibration domains.
#' @param lRegmodel Formula defining the linear structure of a logistic
#' regression model.
#' @param popData Data frame of population data.
#' @param samplingWeights Name of the variable with initial weights for the
#' sampling units.
#' @param usePackage Specifying the package to be used: "survey" (the default),
#' "ReGenesees" or "none".
#' @param bounds Bounds for the calibration weights. When ReGenesees: Allowed
#' range for the ratios between calibrated and initial weights. The default is
#' c(-Inf,Inf).
#' @param calfun The distance function for the calibration process; the default
#' is 'linear'.
#' @param onlyTotals When TRUE: Only population totals are returned.
#' @param onlyw When TRUE: Only the calibrated weights are returned.
#' @param uselRegWeights When TRUE: Weighted logistic regression is performed
#' as a first calibration step.
#' @param ids Name of sampling unit identifier variable.
#' @param residOutput Residuals in output when TRUE. FALSE is default.
#' @param leverageOutput Leverages in output when TRUE. FALSE is default.
#' @param yOutput y in output when TRUE. FALSE is default.
#' @param samplingWeightsOutput samplingWeights in output when TRUE. FALSE is
#' default.
#' @param dropResid2 When TRUE (default) and when no missing population totals
#' - only one set of residuals in output.
#' @param wGrossOutput wGross in output when TRUE (default) and when NA
#' popTotals.
#' @param wave Time or another repeat variable (to be included in output).
#' @param id Identifier variable (to be included in output).
#' @param extra Variables for the extra dataset (to be included in output).
#' @param allowNApopTotals When TRUE missing population totals are allowed.
#' Results in error when FALSE and warning when NULL.
#' @param partitionPrint When TRUE partition progress is printed.
#' Automatic decision when NULL (about 1 min total computing time).
#'
#' @param ... Further arguments sent to underlying functions.
#' @return Unless onlyTotals or onlyw is TRUE, the output is an object of class calSSB. That is, a list with
#' elements: \item{popTotals}{Population totals.} \item{w}{The calibrated
#' weights.} \item{wGross}{Calibrated gross sample weights when NA popTotals.}
#' \item{estTM}{Estimates (with standard error).} \item{resids}{Residuals,
#' reduced model when NA popTotals.} \item{resids2}{Residuals, full model.}
#' \item{leverages}{Diagonal elements of hat-matrix, reduced model when NA
#' popTotals.} \item{leverages2}{Diagonal elements of hat-matrix, full model.}
#' \item{y}{as input} \item{samplingWeights}{as input}
#' \item{wave}{as input or via CrossStrata}
#' \item{id}{as input}
#' \item{extra}{as input}
#'
#' @export
#' @importFrom methods formalArgs
#' @importFrom stats aggregate as.formula binomial glm lm lm.influence model.frame model.matrix predict resid update weights
#' @importFrom utils head tail capture.output
#' @importFrom survey svydesign calibrate
#'
#' @seealso \code{\link{CalSSBobj}}, \code{\link{WideFromCalibrate}}, \code{\link{PanelEstimation}}, \code{\link{CalibrateSSBpanel}}.
#'
#' @examples
#'
#' # Generates data - two years
#' z <- AkuData(3000) # 3000 in each quarter
#' zPop <- AkuData(10000)[,1:7]
#'
#' # Calibration using "survey"
#' a <- CalibrateSSB(z, calmodel = "~ sex*age",
#' partition = c("year","q"), # calibrate within quarter
#' popData = zPop, y = c("unemployed","workforce"),
#' by = c("year","q")) # Estimate within quarter
#' head(a$w) # calibrated weights
#' a$estTM # estimates
#' a$popTotals # popTotals used as input below
#'
#'
#' # Calibration, no package, popTotals as input
#' b <- CalibrateSSB(z, popTotals=a$popTotals, calmodel="~ sex*age",
#' partition = c("year","q"), usePackage = "none", y = c("unemployed","workforce"))
#' max(abs(a$w-b$w)) # Same weights as above
#'
#' print(a)
#' print(b)
#'
#' \dontrun{
#' require(ReGenesees)
#' # Calibration and estimation via ReGenesees
#' CalibrateSSB(z, calmodel = "~ sex*age",
#' partition = c("year","q"), # calibrate within quarter
#' popData = zPop, usePackage = "ReGenesees",
#' y = c("unemployed","workforce"),
#' by = c("year","q")) # Estimate within quarter
#' }
#'
CalibrateSSB = function(grossSample,calmodel=NULL,response="R",popTotals=NULL,y=NULL,by = NULL,partition=NULL,lRegmodel=NULL,
popData=NULL,samplingWeights=NULL,
usePackage="survey",bounds=c(-Inf,Inf),calfun="linear",
onlyTotals=FALSE,onlyw=FALSE,uselRegWeights=FALSE,ids=NULL,
residOutput = TRUE,
leverageOutput = FALSE,
yOutput = TRUE,
samplingWeightsOutput = FALSE,
dropResid2 = TRUE,
wGrossOutput = TRUE,
wave=NULL,
id=NULL,
extra=NULL,
allowNApopTotals = NULL,
partitionPrint = NULL,
...){
#if(hasArg("residOutput"))
# warning("residOutput is an old argument not in use")
timeLimit = 60
if(!is.null(allowNApopTotals)){
checkAnyNApopTotals = !allowNApopTotals
} else{
checkAnyNApopTotals = TRUE
}
if(leverageOutput) residOutput = TRUE
if(is.null(y)) residOutput = FALSE
if(residOutput & is.list(y)) stop("residOutput & is.list(y) NOT IMPLEMENTED")
if(yOutput & is.list(y)) stop("yOutput & is.list(y) NOT IMPLEMENTED")
n = NROW(grossSample)
R = grossSample[,response]
if(is.null(samplingWeights)){
samplingWeights = "Wei5652503017"
grossSample[,samplingWeights] = 1
samplingW = NULL
} else samplingW = grossSample[[samplingWeights]]
if(is.null(popData)){
popData = grossSample
wPop = samplingW
} else wPop=NULL # Avoid copy?
######## START ReGenesees
if(tolower(usePackage)=="regenesees"){
if(!is.null(lRegmodel))
stop("lRegmodel when ReGenesees NOT IMPLEMENTED")
#if(residOutput)
# stop("residOutput when ReGenesees NOT IMPLEMENTED")
#if(yOutput)
# stop("yOutput when ReGenesees NOT IMPLEMENTED")
#if(samplingWeightsOutput)
# stop("samplingWeightsOutput when ReGenesees NOT IMPLEMENTED")
if(is.null(ids)){
ids = "ids5652503017"
grossSample[,ids] = 1:n
}
retur =CalibratePackageReGenesees(netSample=grossSample[R==1,],calmodel=calmodel,popTotals=popTotals,y=y,
by=by,partition=partition,
popData=popData,samplingWeights=samplingWeights,bounds=bounds,calfun=calfun,
onlyTotals=onlyTotals,ids=ids,...)
w = retur$w
retur$w = rep(0,n)
retur$w[R==1] = w
if(onlyw) return(retur$w)
resids = retur$resids
retur$resids = matrix(NaN,n,dim(resids)[2])
retur$resids[R==1,] = resids
#return(retur)
if(yOutput) retur$y = grossSample[,y,drop=FALSE]
if(samplingWeightsOutput) retur$samplingWeights = samplingWeights
#if(!residOutput | !yOutput)
# return(retur)
#return(structure(retur, class = "calSSB", n=n, nY=length(y)))
}######## END ReGenesees
else {
bigStrataLevels=NULL
createPopTotals = is.null(popTotals)
partitionPop =FALSE
bigStrataPop = rep(1,NROW(popData))
if(!is.null(partition)) {
partitionPop = sum(partition %in% names(popData))>0
if(partitionPop & createPopTotals){
cs = CrossStrata(grossSample[,partition],returnb=TRUE,asNumeric=TRUE,byExtra=popData[,partition])
bigStrataPop = cs$aExtra
} else {
cs = CrossStrata(grossSample[,partition],returnb=TRUE,asNumeric=TRUE)
}
bigStrata = cs$a
bigStrataLevels = cs$b
if(length(bigStrataLevels)==1) names(bigStrataLevels) = partition # Hindre at navnet blir "by"
}
else bigStrata = rep(1,n)
nBig = max(bigStrata)
calModel=NULL
lRegModel=NULL
if(!is.null(calmodel)) calModel = update(as.formula(calmodel),paste(response,"~."))
if(!is.null(lRegmodel)) lRegModel = update(as.formula(lRegmodel),paste(response,"~."))
if(!createPopTotals){
if(is.vector(popTotals)) popTotals = matrix(popTotals,nrow = 1,dimnames=list(" ",names(popTotals)))
popTotalsInput = popTotals
if(nBig>1 & NROW(popTotals)>1) popTotalsInput = mergeSort(popTotals,bigStrataLevels,remov=TRUE)
popTotalsInput = as.matrix(popTotalsInput)
popTotals=NULL
}
w = rep(NaN,n)
if(wGrossOutput) wGross = rep(NaN,n)
if(residOutput){
e1 = NaN+grossSample[,y,drop=FALSE]
e2=e1
etos = etos_e1_e2
if(leverageOutput){
h1=e1[,1]
h2=h1
etos = etos_e1_e2_by_lm
}
}
if(is.null(partitionPrint))
pPrint = TRUE
else
pPrint = partitionPrint
if(nBig<=1)
pPrint = FALSE
if(pPrint)
printi = capture.output(print(bigStrataLevels,row.names=FALSE))
for(i in 1:nBig)
{
if(i==1 & pPrint){
if(is.null(partitionPrint)){
timeStart = Sys.time()
} else {
cat(printi[1],"\n")
}
}
rows = bigStrata ==i
lm_model = lm(c(calModel,as.formula(paste(response,"~1")))[[1]],data=grossSample[rows,]) # ~1 when calModel=NULL
if(createPopTotals)
{
if(partitionPop|i==1)
{
rowsPop = bigStrataPop==i
if(is.null(wPop)) wPopi = NULL
else wPopi = wPop[rowsPop]
tp = getTotal(popData[rowsPop,,drop=FALSE],lm_model,wPopi)
popTotals_ = tp$N*tp$colSum/tp$colN
} else
popTotals_ = NULL
}
else
{
popTotals_=setTotal(popTotalsInput[min(i,dim(popTotalsInput)[1]),,drop=TRUE],lm_model)
}
popTotals = rbind(popTotals,popTotals_)
if(checkAnyNApopTotals){
if(anyNA(popTotals[dim(popTotals)[1],])){
if(!is.null(allowNApopTotals))
stop("Missing population totals not allowed according to parameter allowNApopTotals.")
else
warning("Special methodology since missing population totals (warning according to parameter allowNApopTotals)")
checkAnyNApopTotals = FALSE
}
}
if(!onlyTotals & !(tolower(usePackage)=="nocalibration")){
a = calibrateSSB(grossSample[rows,],calModel, popTotals[dim(popTotals)[1],],response=NULL,lRegModel,
popData=NULL,samplingW[rows],tolower(usePackage)=="survey",bounds=bounds,calfun= calfun, ##### "linear",
totalReturn=0,uselRegWeights=uselRegWeights,...)
w[rows] = a$w
if(wGrossOutput & !is.null(a$wGross)) wGross[rows] = a$wGross
}
if(residOutput){
xFromModel = model.matrix(lm_model)
rFromModel = model.frame(lm_model)[1]
rowsNetto = rows
rowsNetto[rowsNetto][!rFromModel==1] = FALSE
e1_e2=etos(xFromModel[rFromModel==1,],
data.matrix(grossSample[rowsNetto,y]),
is.finite(popTotals[dim(popTotals)[1],]),
w=(samplingW[rowsNetto]))
e1[rowsNetto,]=e1_e2$e1
e2[rowsNetto,]=e1_e2$e2
if(leverageOutput){
h1[rowsNetto]=e1_e2$h1
h2[rowsNetto]=e1_e2$h2
}
}
if(i==1 & pPrint & is.null(partitionPrint)){
if(difftime(Sys.time(),timeStart,units = "secs")>timeLimit/nBig)
cat(printi[1],"\n")
else
pPrint = FALSE
}
if(pPrint)
cat(printi[1+i],"\n")
}
if(wGrossOutput) if(sum(is.finite(wGross))==0) wGross = NULL
rownames(popTotals) =NULL
popTotals = cbind(bigStrataLevels,popTotals)
if(onlyTotals) return(popTotals)
if(onlyw) return(w)
############## Almost same as in CalibratePackageReGenesees
estTM=NULL
if(!is.null(y)){
if(is.list(y) | is.list(by)){
if(is.list(y) & is.list(by)) {if(length(y)!=length(by)) stop("length(y)==length(by) must be TRUE")}
else{
if(is.list(y)){
if(is.null(by))
by = vector("list",length(y))
else{
by_ = by
by = y
for(i in 1:length(y)) by[[i]] = by_
}
}else{
y_ = y
y = by
for(i in 1:length(y)) y[[i]] = y_
}
}
estTM = y
for(i in 1:length(y)) estTM[[i]] = MYestTM(grossSample,y[[i]],w,by[[i]])
} else
{
estTM = MYestTM(grossSample,y,w,by)
}
}
dropResid2 = dropResid2 & (sum(is.na(popTotals))==0)
retur=list(popTotals=popTotals,w=w)
if(!is.null(estTM)) retur$estTM = estTM
if(residOutput) {
retur$resids=e1
if(!dropResid2) retur$resids2=e2
if(leverageOutput){
retur$leverages=h1
if(!dropResid2) retur$leverages2=h2
}
}
if(yOutput) retur$y = grossSample[,y,drop=FALSE]
if(samplingWeightsOutput) retur$samplingWeights = samplingWeights
if(wGrossOutput) retur$wGross = wGross
} ######## END NOT ReGenesees
if(!is.null(wave)){
if(length(wave)>1)
retur$wave = CrossStrata(grossSample[,wave])
else
retur$wave = grossSample[,wave, drop=TRUE]
}
if(!is.null(id)) retur$id = grossSample[,id, drop=TRUE]
if(!is.null(extra)) retur$extra = grossSample[,extra,drop=FALSE]
if(!residOutput | !yOutput)
return(retur)
return(structure(retur, class = "calSSB", n=n, nY=length(y)))
}
# Create new levels by crossing levels in "by"
# When returnb=TRUE an overview of original variabels according to new levels are also retuned
# byExtra contains the same variables as by and represents another data set.
#' Crossing several factor variables
#'
#' Create new factor variable by crossing levels in several variables
#'
#'
#' @param by Dataframe or matrix with several variables
#' @param sep Used to create new level names
#' @param returnb When TRUE an overview of original variabels according to new
#' levels are also retuned.
#' @param asNumeric When TRUE the new variable is numeric.
#' @param byExtra Contains the same variables as by and represents another data
#' set.
#' @return \item{a}{The new variable} \item{aExtra}{New variable according to
#' byExtra} \item{b}{Overview of original variabels according to new levels}
#' @examples
#'
#' CrossStrata(cbind(factor(rep(1:3,2)),c('A',rep('B',5)) ))
#'
#' @export CrossStrata
CrossStrata = function(by,sep = "-",returnb=FALSE,asNumeric=FALSE,byExtra=NULL){
by = as.data.frame(by)
byList = as.list(by)
b = sortrows(as.data.frame(aggregate(byList[[1]],byList,length)[,1:length(byList)]))
rownames(b)=NULL
names(b) = names(by)
levels = apply(b,1,paste,collapse=sep)
a = factor(apply(by,1,paste,collapse=sep),levels = levels)
if(asNumeric){
levels_ = levels(a)
a=as.numeric(a)
attr(a,"levels") = levels_
}
if(!is.null(byExtra)) {
aExtra = factor(apply(as.data.frame(byExtra),1,paste,collapse=sep),levels = levels)
if(asNumeric){
levels_ = levels(aExtra)
aExtra=as.numeric(aExtra)
attr(aExtra,"levels") = levels_
}
if(returnb) return(list(a=a,aExtra=aExtra,b=b))
return(list(a=a,aExtra=aExtra))
}
if(returnb) return(list(a=a,b=b))
a
}
#' Rearrange output from CalibrateSSB (calSSB object). Ready for input to PanelEstimation.
#'
#' One row for each id and one column for each wave.
#'
#' When wave, id or extra is NULL, corresponding elements in the input object (\code{a}) will be used if available,
#'
#'
#' @param a A calSSB object. That is, output from CalibrateSSB() or CalSSBobj().
#' @param wave Time or another repeat variable.
#' @param id Identifier variable.
#' @param subSet Grouping variable for splitting ouput.
#' @param extra Dataset with extra variables not in \code{a}.
#' @return Output has the same elements (+ extra) as input (a), but rearranged.
#' When subSet is input otput is alist according to the subSet levels.
#'
#' @export
#'
#' @seealso \code{\link{CalibrateSSB}}, \code{\link{CalSSBobj}}, \code{\link{PanelEstimation}}.
#'
#'
#' @examples
#'
#' # See examples in PanelEstimation and CalSSBobj
#'
WideFromCalibrate = function(a,wave=NULL,id=NULL,subSet=NULL,extra=NULL){ # wave instead of bigStrata, extra is list
if(class(a)[1] != "calSSB")
stop("a must be an object of class calSSB")
if(is.null(wave))
wave = a$wave
if(is.null(wave))
stop("wave neeed")
if(is.null(id))
id = a$id
if(is.null(id))
stop("id neeed")
x=structure(list(),class = "calSSBwide")
if(!is.null(a$y)) x$y = wideDataMatrix(a$y,wave,id,asList=TRUE)
if(!is.null(a$w)) x$w = wideDataMatrix(a$w,wave,id,asList=FALSE)
if(!is.null(a$resids)) x$resids = wideDataMatrix(a$resids,wave,id,asList=TRUE)
if(!is.null(a$resids2)) x$resids2 = wideDataMatrix(a$resids2,wave,id,asList=TRUE)
if(!is.null(a$leverages)) x$leverages = wideDataMatrix(a$leverages,wave,id,asList=FALSE)
if(!is.null(a$leverages2)) x$leverages2 = wideDataMatrix(a$leverages2,wave,id,asList=FALSE)
if(!is.null(a$samplingWeights)) x$samplingWeights = wideDataMatrix(a$samplingWeights,wave,id,asList=FALSE)
if(!is.null(a$wGross)) x$wGross = wideDataMatrix(a$wGross,wave,id,asList=FALSE)
if(!is.null(extra)) x$extra = wideDataMatrix(extra,wave,id,asList=TRUE)
else
if(!is.null(a$extra)) x$extra = wideDataMatrix(a$extra,wave,id,asList=TRUE)
if(is.null(subSet)) return(x)
s123 = make123(subSet)
s = wideDataMatrix(s123,wave,id,asList=FALSE)
k = vector("list",max(s123))
k = structure(k,class = "calSSBwideList") # Ny her
names(k) = levels(s123)
for(i in 1:max(s123)){
si = s==i
rowsi = rowSums(si,na.rm=TRUE)>0
k[[i]] = x
for(j in 1:length(k[[i]]$y)) k[[i]]$y[[j]] = MakeSubSet(x$y[[j]],si,rowsi)
for(j in 1:length(k[[i]]$resids)) k[[i]]$resids[[j]] = MakeSubSet(x$resids[[j]],si,rowsi)
for(j in 1:length(k[[i]]$resids2)) k[[i]]$resids2[[j]] = MakeSubSet(x$resids2[[j]],si,rowsi)
for(j in 1:length(k[[i]]$extra)) k[[i]]$extra[[j]] = MakeSubSet(x$extra[[j]],si,rowsi)
k[[i]]$w = MakeSubSet(x$w,si,rowsi)
k[[i]]$leverages = MakeSubSet(x$leverages,si,rowsi)
k[[i]]$leverages2 = MakeSubSet(x$leverages2,si,rowsi)
k[[i]]$samplingWeights = MakeSubSet(x$samplingWeights,si,rowsi)
k[[i]]$wGross = MakeSubSet(x$wGross,si,rowsi)
}
k
}
# Old name bigData
# Can be simplified
wideDataMatrix = function(data,bigStrata=rep(1,n),id,nameSep="-",dropSingleName=TRUE,asList=FALSE)
{
n <- NROW(data)
if(!is.matrix(data)) data = data.matrix(data)
if(!is.null(colnames(data))) varNames = colnames(data)
else varNames = paste("y",1:dim(data)[2],sep="")
if(asList){
k = vector("list",dim(data)[2])
names(k) = varNames
for(i in 1:dim(data)[2])
k[[i]] = wideDataMatrix(data=data[,i,drop=FALSE],bigStrata=bigStrata,id=id,dropSingleName=TRUE)
return(k)
}
dropName = (dim(data)[2]==1 & dropSingleName)
if(dropName) nameSep=""
bigStrata = make123(bigStrata)
id=make123(id)
nBig = max(bigStrata)
bigNames = paste(nameSep,levels(bigStrata),sep="")
x0 = matrix(NaN,nrow=max(id),ncol=dim(data)[2])
z=NULL
for(i in 1:nBig)
{
x=x0
datai = data[bigStrata==i,,drop=FALSE]
x[id[bigStrata==i],] = datai
if(dropName) colnames(x) = bigNames[i]
else colnames(x) = paste(varNames,bigNames[i],sep="")
z=cbind(z,x)
}
z
}
make123 = function(x)
{
x=as.factor(x)
levels_x = levels(x)
x=as.numeric(x)
attr(x,"levels") = levels_x
x
}
rBind = function(x,y) rbind(data.frame(x),data.frame(y))[[1]]
uniqueIndex = function(x,useRev=FALSE){
if(useRev) x = rev(x)
ix = unique(match(x,x))
if(!useRev) return(ix)
sort((length(x):1)[ix])
}
uniqueCol= function(x,useRev=FALSE){
if(is.null(dim(x))) {
namesx=names(x)
x=matrix(x,nrow=1)
colnames(x)=namesx
}
x[,uniqueIndex(colnames(x),useRev),drop=FALSE]
}
sortrows <- function(m, cols = 1:dim(m)[2], index.return = FALSE) {
ix <- eval(parse(text = paste("order(", paste("m[[", cols, "]]", sep = "", collapse = ","), ")")))
if (index.return)
return(ix)
m[ix, , drop = FALSE]
}
sortrowsOld = function(m,cols=1:dim(m)[2],index.return=FALSE)
{
ix=eval(parse(text=paste("order(",paste("m[,",cols,"]",sep="", collapse=","),")")))
if(index.return) return(ix)
m[ix, ,drop=FALSE]
}
MYestTMold = function(grossSample,y,w,by){
if(!is.null(by)) if(is.na(by[1])) by = NULL
estTM =aggregate(grossSample[,y]*w,grossSample[,by,drop=FALSE],sum_)
if(!is.null(by)) estTM = sortrows(estTM,1:length(by))
if(length(y)==1) names(estTM)[length(names(estTM))] = y
estTM
}
MYestTM = function(grossSample,y,w,by){
if(!is.null(by)) if(is.na(by[1])) by = NULL
#grossSample <<- grossSample
#w <<- w
#y <<- y
#estTM =aggregate(grossSample[,y, drop = TRUE]*w,grossSample[,by,drop=FALSE],sum_)
if(!is.null(by)){
#cat("#")
estTM = sortrows(aggregate(grossSample[,y, drop = TRUE]*w,grossSample[,by,drop=FALSE],sum_),1:length(by))
} else {
#cat("0")
estTM =aggregate(grossSample[,y, drop = TRUE]*w,grossSample[,integer(0),drop=FALSE],sum_)
}
if(length(y)==1) names(estTM)[length(names(estTM))] = y
estTM
}
mergeSort = function(x,y,useRev=FALSE,remov=FALSE){
x=as.data.frame(x)
y=as.data.frame(y)
if(NROW(x)!=NROW(y)) stop("Not exact match")
colnamesx = colnames(x)
colnames(x) = paste(colnamesx,"_",sep="")
uniqueIndex_ = uniqueIndex(colnamesx,useRev)
colnames(x)[uniqueIndex_] = colnamesx[uniqueIndex_]
mergeSortNr123 = 1:NROW(x)
mergeSortNr345 = 1:NROW(x)
names_y=names(y)
z=merge(cbind(x,mergeSortNr345),cbind(y,mergeSortNr123),names_y)
if(NROW(x)!=NROW(z)) stop("Not exact match")
k=dim(z)[2]
if(remov) cols = !(colnames(x) %in% names_y)
else cols = rep(TRUE,length(colnamesx))
colnames(x) = colnamesx
x[z[order(z[,"mergeSortNr123"]),"mergeSortNr345"],cols]
}
MakeSubSet = function(x,subSet,rows){
if(is.null(x)) return(x)
x[!subSet] = NaN
x[rows,,drop=FALSE]
}
sum_ = function(x) sum(x,na.rm=TRUE)
asFormula = function(s) {
if(is.null(s)) return(NULL)
if(class(s)[1]=="formula") return(s)
if(is.na(s)[1]) return(NULL)
as.formula(paste("~",paste(s,collapse="+"),sep=""))
}
CalibratePackageReGeneseesCRANversion = function(netSample,calmodel=NULL,popTotals=NULL,y=NULL,by = NULL,partition=NULL,
popData=NULL,samplingWeights=NULL,bounds=c(-Inf,Inf),calfun="linear",
onlyTotals=FALSE,ids,...){
stop("Use of ReGenesees is not implemented in this version since ReGenesees is not on CRAN.")
}
calibrateSSB = function(grossSample,calModel=NULL,
popTotals=NULL,response=NULL,lRegModel=NULL,
popData=NULL,samplingWeights=NULL,
usePackageSurvey=TRUE,bounds=c(-Inf,Inf),calfun="linear",
totalReturn=0,uselRegWeights=FALSE,...) ### Merk: uselRegWeights=FALSE
{
n = NROW(grossSample)
w=samplingWeights
if(uselRegWeights) lRegWeights=samplingWeights
else lRegWeights=NULL
if(!is.null(lRegModel))
{
if(is.null(lRegWeights)) glm_modell = glm(as.formula(lRegModel),data=grossSample,family = binomial())
else glm_modell = glm(as.formula(lRegModel),data=grossSample,family = binomial(),weights=lRegWeights)
lRegW = 1/predict(glm_modell,type = "response")
lRegW[model.frame(glm_modell)[1]==0] = 0
if(is.null(w)) w=1
w = w*lRegW
if(length(popTotals)>0)
{
w = w*popTotals[1]/sum(w)
}
}
a = list(w=w,wGross=NULL)
if(!is.null(calModel))
{
if(usePackageSurvey)
{
a=calibratePackageSurvey(grossSample=grossSample,modelformula=calModel,
popTotals=popTotals,response=response,popData=popData,samplingWeights=w,
bounds=bounds,calfun=calfun,totalReturn=totalReturn,returnwGross=TRUE,...)
} else
{
if(!(calfun=="linear")) stop("Method not implemented")
if(!is.null(popTotals)) popData=popTotals
a=lagVekter(calModel,grossSample,popData,min_w = bounds[1],
max_w = bounds[2],totalReturn=totalReturn,samplingWeights=w,returnwGross=TRUE)
}
}
a
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/CalibrateSSB.R
|
#' Variance estimation for panel data
#'
#' Variance estimation of linear combinations of totals and ratios based on
#' output from wideFromCalibrate
#'
#' When denominator=NULL, only estimates for a single y-variable (numerator)
#' are calculated. When denominator is specified, estimates for numerator,
#' denominator and ratio are calculated. The default estimation type parameter,
#' "robustModel", is equation (12) in paper. "ssbAKU" is (16), "robustModelww"
#' is (9) and "robustModelGroup" and "robustModelGroupww" are cluster robust
#' variants based on \eqn{(w-1)^2} and \eqn{w^2} .
#'
#' @param x Output from wideFromCalibrate.
#' @param numerator y variable name or number.
#' @param denominator y variable name or number.
#' @param linComb Matrix defining linear combinations of waves.
#' @param linComb0 Linear combination matrix to be used prior to ratio
#' calculations.
#' @param estType Estimation type: "robustModel" (default), "ssbAKU",
#' "robustModelww", "robustModelGroup" or "robustModelGroupww" (see below)
#' @param leveragePower Power used when adjusting residuals using leverages.
#' @param group Extra variable name or number for cluster robust estimation.
#' @param returnCov Return covariance matrices instead of variance vectors.
#' @param usewGross Use wGross (if avaliable) instead of design weights to
#' adjust covariance matrix in the case of NA popTotals
#' @return \item{wTot}{Sum of weights} \item{estimates}{Ordinary estimates}
#' \item{linCombs}{Estimates of linear combinations}
#' \item{varEstimates}{Variance of estimates} \item{varLinCombs}{Variance of
#' estimates of linear combinations} When denominator is specified the above
#' output refer to ratios. Then, similar output for numerator and denominator
#' are also included.
#'
#' @seealso \code{\link{CalibrateSSB}}, \code{\link{CalSSBobj}}, \code{\link{WideFromCalibrate}}, \code{\link{CalibrateSSBpanel}}.
#'
#' @examples
#'
#' # Generates data - two years
#' z = AkuData(3000) # 3000 in each quarter
#' zPop = AkuData(10000)[,1:7]
#'
#' # Calibration and "WideFromCalibrate"
#' b = CalibrateSSB(z,calmodel="~ sex*age", partition=c("year","q"),
#' popData=zPop, y=c("unemployed","workforce"))
#' bWide = WideFromCalibrate(b,CrossStrata(z[,c("year","q")]),z$id)
#'
#' # Define linear combination matrix
#' lc = rbind(LagDiff(8,4),PeriodDiff(8,4))
#' rownames(lc) = c("diffQ1","diffQ2","diffQ3","diffQ4","diffYearMean")
#' colnames(lc) = colnames(head(bWide$y[[1]]))
#' lc
#'
#' # Unemployed: Totals and linear combinations
#' d1=PanelEstimation(bWide,"unemployed",linComb=lc) #
#'
#' # Table of output
#' cbind(tot=d1$estimates,se=sqrt(d1$varEstimates))
#' cbind(tot=d1$linCombs,se=sqrt(d1$varLinCombs))
#'
#' # Ratio: Totals and linear combinations
#' d=PanelEstimation(bWide,numerator="unemployed",denominator="workforce",linComb=lc)
#' cbind(tot=d$estimates,se=sqrt(d$varEstimates))
#' cbind(tot=d$linCombs,se=sqrt(d$varLinCombs))
#'
#' \dontrun{
#' # Calibration when som population totals unknown (edu)
#' # Leverages in output (will be used to adjust residuals)
#' # Cluster robust estimation (families/famid)
#' b2 = CalibrateSSB(z,popData=zPop,calmodel="~ edu*sex + sex*age",
#' partition=c("year","q"), y=c("unemployed","workforce"),
#' leverageOutput=TRUE)
#' b2Wide = WideFromCalibrate(b2,CrossStrata(z[,c("year","q")]),z$id,extra=z$famid)
#' d2 = PanelEstimation(b2Wide,"unemployed",linComb=lc,group=1,estType = "robustModelGroup")
#' cbind(tot=d2$linCombs,se=sqrt(d2$varLinCombs))
#' }
#'
#'
#' # Yearly mean before ratio calculation (linComb0)
#' # and difference between years (linComb)
#' g=PanelEstimation(bWide,numerator="unemployed",denominator="workforce",
#' linComb= LagDiff(2),linComb0=Period(8,4))
#' cbind(tot=g$linCombs,se=sqrt(g$varLinCombs))
#'
#' @export PanelEstimation
PanelEstimation = function(x,numerator,denominator=NULL,linComb=matrix(0,0,n),linComb0=NULL,
estType="robustModel",leveragePower=1/2,group=NULL,returnCov=FALSE,usewGross=TRUE){
if(class(x$w)[1]=="NULL"){
z = vector("list",length(x))
names(z) = names(x)
n = dim(ListCbind(x[[1]]$y,numerator))[2]
for(i in 1:length(x))
z[[i]] = PanelEstimation(x[[i]],numerator,denominator,linComb,linComb0,estType,leveragePower,group,returnCov,usewGross)
return(z)
}
if(is.null(denominator)){ # Enklere versjon av koden under
m = length(numerator)
y = ListCbind(x$y,numerator)
n = dim(y)[2]
nlc = dim(linComb)[1]
w = RepCbind(x$w,m)
if(usewGross & !is.null(x$wGross)){
samplingWeights = RepCbind(x$wGross,m) # **** wGross -> samplingWeights
} else {
if(!is.null(x$samplingWeights))
samplingWeights = RepCbind(x$samplingWeights,m)
else
samplingWeights=NULL
}
if(!is.null(x$leverages))
leverages = RepCbind(x$leverages,m)
else
leverages = 0
if(!is.null(x$leverages2))
leverages2 = RepCbind(x$leverages2,m)
else
leverages2 = 0
resids = ListCbind(x$resids,numerator)/(1-leverages)^leveragePower
if(!is.null(x$resids2))
resids2 = ListCbind(x$resids2,numerator)/(1-leverages2)^leveragePower
else
resids2 = NULL
} else{
n = dim(x$y[[numerator]])[2]
nlc = dim(linComb)[1]
y = cbind(x$y[[numerator]],x$y[[denominator]])
w = cbind(x$w,x$w)
if(usewGross & !is.null(x$wGross)){
samplingWeights = cbind(x$wGross,x$wGross) # **** wGross -> samplingWeights
} else {
if(!is.null(x$samplingWeights))
samplingWeights = cbind(x$samplingWeights,x$samplingWeights)
else
samplingWeights=NULL
}
if(!is.null(x$leverages))
leverages = cbind(x$leverages,x$leverages)
else
leverages = 0
if(!is.null(x$leverages2))
leverages2 = cbind(x$leverages2,x$leverages2)
else
leverages2 = 0
resids = cbind(x$resids[[numerator]],x$resids[[denominator]])/(1-leverages)^leveragePower
if(!is.null(x$resids2))
resids2 = cbind(x$resids2[[numerator]],x$resids2[[denominator]])/(1-leverages2)^leveragePower
else
resids2 = NULL
}
if(!is.null(group)){
gr = group
group = rowNoNA(data.matrix(x$extra[[gr]])) ##### First element.
group2 = rowNoNA(data.matrix(x$extra[[gr]]),max) ##### Last element.
eq = sum(as.numeric(!(group==group2),na.rm=TRUE))
if(eq>0)
warning(sprintf("Non-unique group detected. Last not equal first in %d cases. First used.",eq))
}
covTotals = TotalsWithCov(y,resids,w,estType,resids2,
samplingWeights=samplingWeights,group=group)
a=NULL
a$wTot = colSums(x$w,na.rm = TRUE)
if(!is.null(samplingWeights))
a$samplingWeightsTot = colSums(samplingWeights,na.rm = TRUE)
if(is.null(denominator)){
if(is.null(linComb0)) A = linComb
else A = linComb %*% linComb0
pEst = PanelEst(covTotals$totals,covTotals$covTotals,
A = A,returnCov=returnCov)
a$estimates = pEst$totals[,1,drop=TRUE]
if(nlc) a$linCombs = pEst$Atotals[,1,drop=TRUE]
a$varEstimates = pEst$varTotals
if(nlc) a$varLinCombs = pEst$varAtotals
return(a)
}
nn=n
diag2n = diag(1,2*n)
rownames(diag2n) = colnames(y)
if(is.null(linComb0)) {
A = rbind(diag2n,cbind(linComb,matrix(0,dim(linComb)[1],n)),cbind(matrix(0,dim(linComb)[1],n),linComb))
numerator=1:n
names(numerator) = colnames(y)[1:n]
} else {
A2 = rbind(cbind(linComb0,matrix(0,dim(linComb0)[1],n)),cbind(matrix(0,dim(linComb0)[1],n),linComb0))
linComb2 = linComb %*% linComb0
AlinComb2 = rbind(cbind(linComb2,matrix(0,dim(linComb2)[1],n)),cbind(matrix(0,dim(linComb2)[1],n),linComb2))
A = rbind(A2,AlinComb2)
n = dim(linComb0)[1]
nlc = dim(linComb2)[1]
numerator=1:n
names(numerator) = rownames(linComb0)
}
pEst = PanelEst(covTotals$totals,covTotals$covTotals,
A = A,
numerator=numerator,denominator=(n+1):(2*n), B=linComb,
returnCov=returnCov)
a$estimates = pEst$ratios[,1,drop=TRUE]
a$estimatesNum = pEst$totals[1:nn,1,drop=TRUE]
a$estimatesDen = pEst$totals[(nn+1):(2*nn),1,drop=TRUE]
if(nlc) a$linCombs = pEst$Bratios[,1,drop=TRUE]
if(nlc) a$linCombsNum = pEst$Atotals[(2*n+1):(2*n+nlc),1,drop=TRUE]
if(nlc) a$linCombsDen = pEst$Atotals[(2*n+nlc+1):(2*n+2*nlc),1,drop=TRUE]
a$varEstimates = pEst$varRatios
a$varEstimatesNum = TakeIndexBoth(pEst$varTotals,1:nn)
a$varEstimatesDen = TakeIndexBoth(pEst$varTotals,(nn+1):(2*nn))
if(nlc) a$varLinCombs = pEst$varBratios
if(nlc) a$varLinCombsNum = TakeIndexBoth(pEst$varAtotals,(2*n+1):(2*n+nlc))
if(nlc) a$varLinCombsDen = TakeIndexBoth(pEst$varAtotals,(2*n+nlc+1):(2*n+2*nlc))
a
}
PanelEst = function(totals,covTotals,A=matrix(0,0,length(totals)),numerator=integer(0),denominator=integer(0),
B=diag(1,length(numerator)),
returnCov=FALSE,
rationames=names(numerator))
{
#### Level 1: Input variables
#### Level 2: Lin.comb of input
Atotals = A %*% totals
covAtotals = A %*% covTotals %*% t(A)
#### Level 3: Ratios of lin.comb
# Variansen til X/Y beregnes som variansen til X/y - Yx/yy
rNum = Atotals[numerator,,drop=FALSE] # teller-estimater (x)
rDen = Atotals[denominator,,drop=FALSE] # nevner-estimater (y)
D = matrix(0,nrow=length(rNum),ncol=dim(A)[1]) # D genererer "X/y - Yx/yy"
if(dim(D)[1]) for(i in 1:dim(D)[1]){
D[i,numerator[i]] = 1/rDen[i] # X blir multiplisert med "1/y"
D[i,denominator[i]] = -rNum[i]/(rDen[i])^2 # Y blir multiplisert med "x/yy"
}
ratios = rNum/rDen
rownames(ratios) = rationames
rownames(D) = rationames
covRatios = D %*% covAtotals %*% t(D)
#### Level 4: Lin.comb of ratios
Bratios = B %*% ratios
covBratios = B %*% covRatios %*% t(B)
if(returnCov) return(list(totals=totals,Atotals=Atotals,ratios=ratios,Bratios=Bratios,
varTotals=covTotals,varATotals=covAtotals,varRatios=covRatios,varBratios=covBratios))
list(totals=totals,Atotals=Atotals,ratios=ratios,Bratios=Bratios,varTotals=diag(covTotals),varAtotals=diag(covAtotals),
varRatios=diag(covRatios),varBratios=diag(covBratios))
}
TotalsWithCov = function(y,resids,w,estType="robustModel", #dummy=!is.na(w)
resids2=NULL,returnNr=FALSE,returnNr1=FALSE,
samplingWeights=NULL,dummyGross=!is.na(w),dummyNet=!is.na(resids), ...){
force(dummyGross) # dummy is created (lazy evaluation)
force(dummyNet) # dummy is created (lazy evaluation)
a=NULL
y[is.na(y)]=0
w[is.na(w)]=0
resids[is.na(resids)]=0
a$totals = matrix(colSums(w*y),nrow=dim(y)[2]) # T is column vector
rownames(a$totals) = colnames(y)
a$covTotals = MakeCovTotals(resids,w,dummyNet,estType,...)
if(!is.null(resids2)){
resids2[is.na(resids2)]=0
if(is.null(samplingWeights)){
samplingWeights = t(matrix(colSums(w)/colSums(dummyGross),dim(w)[2],dim(w)[1]))
}
else samplingWeights[is.na(samplingWeights)] = 0
v=w/samplingWeights
v[is.na(v)]=0
if(estType=="ssbAKU") estType = "robustModel" # Obs here
covTotalsNr = MakeCovTotals(resids2*samplingWeights,v,dummyNet,estType,...)
covTotalsNr1 = MakeCovTotals(resids*samplingWeights,v,dummyNet,estType,...)
a$covTotals = a$covTotals - covTotalsNr1 + covTotalsNr
if(returnNr) a$covTotalsNr = covTotalsNr
if(returnNr1) a$covTotalsNr1 = covTotalsNr1
}
a
}
MakeCovTotals = function(e,w,dummy,estType,group=NULL){ # e instead of resids
if(estType=="ssbAKU") {
n = t(dummy) %*% dummy
We = w*e
mWe = matrix(colSums(We)/diag(n),nrow=1)
covTotals = n/(n-1) * (t(We)%*%We - n*t(mWe)%*%mWe)
}
if(estType=="robustModel"){
we = (w-1)*e
covTotals = t(we)%*%we
we = sqrt(pmax(w-1,0))*e ### Negativ (w-1) settes til 0
covTotals = covTotals + t(we)%*%we
}
if(estType=="robustModelww"){
we = w*e
covTotals = t(we)%*%we
}
if(estType=="robustModelGroup"|estType=="robustModelGroupww"){
we = data.matrix(aggregate(w*e,list(group),sum)[,-1,drop=FALSE])
if(estType=="robustModelGroupww"){
covTotals = t(we)%*%we
} else{
we1 = data.matrix(aggregate((w-1)*e,list(group),sum)[,-1,drop=FALSE])
covTotals = t(we)%*%we1
}
}
covTotals[!is.finite(covTotals)]=0 # Avoid problems when "n-1=0" and n=0
covTotals
}
rowNoNA = function(x,maxmin=min){
colx = col(x)
colx[is.na(x)] = NA
element = apply(colx,1,function(x) maxmin(x,na.rm=T))
x[cbind(1:dim(x)[1],element)]
}
TakeIndexBoth = function(x,index){
if(is.matrix(x)) return(x[index,index,drop=FALSE])
x[index]
}
seq_ = function(a,b) seq(a,b,length = max(0,b-a+1))
MyDiag = function(n,k=0){
x = diag(n)
z = matrix(0,n,n)
a = max(1+k,1)
b = min(n+k,n)
a2 = max(1-k,1)
b2 = min(n-k,n)
z[,seq_(a,b)] = x[,seq_(a2,b2)]
z
}
#' Creation of linear combination matrices
#'
#' Create matrices for changes (LagDiff), means (Period) and mean changes
#' (PeriodDiff).
#'
#'
#' @aliases LinCombMatrix PeriodDiff Period LagDiff
#' @param n Number of variables
#' @param period Number of variables involved in each period
#' @param lag Lag used for difference calculation
#' @param k Shift the start of each period
#' @param takeMean Calculate mean over each period (sum when FALSE)
#' @param removerows Revove incomplete rows
#' @param overlap Overlap between periods (moving averages)
#' @return Linear combination matrix
#' @note It can be useful to add row names to the resulting matrix before
#' further use.
#' @examples
#'
#' # We assume two years of four quarters (n=8)
#'
#' # Quarter to quarter differences
#' LagDiff(8)
#'
#' # Changes from same quarter last year
#' LagDiff(8,4)
#'
#' # Yearly averages
#' Period(8,4)
#'
#' # Moving yearly averages
#' Period(8,4,overlap=TRUE)
#'
#' # Difference between yearly averages
#' PeriodDiff(8,4) # Also try n=16 with overlap=TRUE/FALSE
#'
#' # Combine two variants and add row names
#' lc = rbind(LagDiff(8,4),PeriodDiff(8,4))
#' rownames(lc) = c("diffQ1","diffQ2","diffQ3","diffQ4","diffYearMean")
#' lc
#'
#' @export LinCombMatrix
LinCombMatrix = function(n,period=NULL,lag=NULL,k=0,takeMean=TRUE,removerows=TRUE,overlap=FALSE){
if(is.null(period)){
if(is.null(lag)) x = diag(n)
else x = LagDiff(n,lag=lag,removerows=removerows)
} else{
if(is.null(lag)) x = Period(n=n,period=period,k=k,takeMean=takeMean,removerows=removerows,overlap=overlap)
else x = PeriodDiff(n=n,period=period,lag=lag,k=k,takeMean=takeMean,removerows=removerows,overlap=overlap)
}
x
}
#' @rdname LinCombMatrix
#' @encoding UTF8
#' @export
#'
LagDiff= function(n,lag=1,removerows=TRUE){
m=MyDiag(n,lag) - diag(n)
if(removerows) m = m[rowSums(m)==0 & rowSums(abs(m))==2 , ,drop=FALSE]
m
}
#' @rdname LinCombMatrix
#' @encoding UTF8
#' @export
#'
Period = function(n,period=1,k=0,takeMean=TRUE,removerows=TRUE,overlap=FALSE){
x =matrix(0,n,n)
for(i in seq_(1,period) ) x = x+MyDiag(n,k+i-1)
if(removerows) x = x[rowSums(x)==period, ,drop=FALSE]
if(!overlap) x = x[((-1+1:dim(x)[1])%%period)==0, ,drop=FALSE]
if(takeMean) x = x/period
x
}
#' @rdname LinCombMatrix
#' @encoding UTF8
#' @export
#'
PeriodDiff = function(n,period=1,lag=period,k=0,takeMean=TRUE,removerows=TRUE,overlap=FALSE){
a = Period(n=n,period=period,k=k,takeMean=FALSE,removerows=FALSE,overlap=TRUE)
b = Period(n=n,period=period,k=k+lag,takeMean=FALSE,removerows=FALSE,overlap=TRUE)
x = b-a
if(removerows) x = x[rowSums(x)==0 & rowSums(abs(x))==2*period , ,drop=FALSE]
if(!overlap) x = x[((-1+1:dim(x)[1])%%period)==0, ,drop=FALSE]
if(takeMean) x = x/period
x
}
RepCbind = function(x,n){
z = NULL
for(i in seq_len(n)) z=cbind(z,x)
z
}
ListCbind = function(x,elements,sep="-"){
z = NULL
n = length(elements)
for(i in seq_len(n)){
z1 = x[[elements[i]]]
colnames(z1) = paste(elements[i],colnames(z1),sep=sep)
z = cbind(z,z1)
}
z
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/PanelEstimation.R
|
#' Print method for calSSB
#'
#' @param x calSSB object
#' @param digits positive integer. Minimum number of significant digits to be used for printing most numbers.
#' @param \dots further arguments sent to the underlying
#'
#' @return Invisibly returns the original object.
#' @keywords print
#' @export
print.calSSB <- function(x, digits = max(getOption("digits") - 3, 3), ...) {
if(!is.null(x$estTM)){
cat("---- estTM ----\n")
print(x$estTM)
cat("\n\n")
}
cat("---- n =", attr(x,"n")," ny =",attr(x,"nY"),"\n\n")
if(!is.null(colnames(x$y)))
cat("---- y names ----\n",head(colnames(x$y),21),"\n\n")
cat("---- Calibrated weights, w ----\n",head(x$w),"...",tail(x$w,1),"\n\n")
cat("---- Summary of calSSB object ----\n")
print(summary(x,digits = digits,...))
invisible(x)
}
#' Print method for calSSBwide
#'
#' @param x calSSBwide object
#' @param digits positive integer. Minimum number of significant digits to be used for printing most numbers.
#' @param \dots further arguments sent to the underlying
#'
#' @return Invisibly returns the original object.
#' @keywords print
#' @export
print.calSSBwide <- function(x, digits = max(getOption("digits") - 3, 3), ...) {
cat("---- ",NCOL(x$w)," waves of ",NROW(x$w)," units ---- \n\n")
if(!is.null(colnames(x$w)))
cat("---- wave names ----\n",head(colnames(x$w),21),"\n")
cat("\n---- Summary of calSSBwide object ----\n")
print(summary(x,digits = digits,...))
cat("\n---- Summary of y ----\n")
print(summary(x$y,digits = digits,...))
if(!is.null(x$extra)){
cat("\n---- Summary of extra ----\n")
print(summary(x$extra,digits = digits,...))
}
invisible(x)
}
CheckDim <- function(x,nRow,nCol=1,nam="x",allowNULL=TRUE){
if(allowNULL & is.null(x)) return(NULL)
if(NROW(x) !=nRow | NCOL(x) !=nCol)
stop(paste("Wrong dimension of",nam))
NULL
}
#' Create or modify a CalSSB object
#'
#' The elements of the CalSSB object are taken directly from the input parameters.
#'
#' @param x NULL or an existing calSSB object
#' @param y y
#' @param w w
#' @param wGross wGross
#' @param resids resids
#' @param resids2 resids2
#' @param leverages leverages
#' @param leverages2 leverages2
#' @param samplingWeights samplingWeights
#' @param extra extra
#' @param id id
#' @param wave wave
#'
#' @return A CalSSB object. That is, an object of the type retuned by \code{\link{CalibrateSSB}}.
#'
#' @note If x is a ReGenesees/cal.analytic object, this function is a wrapper to \code{\link{CalSSBobjReGenesees}}.
#'
#' @export
#'
#' @seealso \code{\link{CalibrateSSB}}, \code{\link{CalSSBobjReGenesees}}, \code{\link{WideFromCalibrate}}, \code{\link{PanelEstimation}}.
#'
#' @examples
#' #' # Generates data - two years
#' z <- AkuData(3000) # 3000 in each quarter
#' zPop <- AkuData(10000)[, 1:7]
#'
#' # Create a CalSSB object by CalibrateSSB
#' b <- CalibrateSSB(z, calmodel = "~ sex*age", partition = c("year", "q"), popData = zPop,
#' y = c("unemployed", "workforce"))
#'
#' # Modify the CalSSB object
#' a <- CalSSBobj(b, w = 10*b$w, wave = CrossStrata(z[, c("year", "q")]), id = z$id)
#'
#' # Use the CalSSB object as input ...
#' PanelEstimation(WideFromCalibrate(a), "unemployed", linComb = PeriodDiff(8, 4))
#'
#' # Create CalSSB object without x as input
#' CalSSBobj(y = b$y, w = 10*b$w, resids = b$resids, wave = CrossStrata(z[, c("year", "q")]),
#' id = z$id)
#'
CalSSBobj <- function(x=NULL,y=NULL,w=NULL,wGross=NULL,resids=NULL,resids2=NULL,
leverages=NULL,leverages2=NULL,samplingWeights=NULL,extra=NULL,
id=NULL, wave=NULL){
if(!is.null(x)){
if(class(x)[1]=="cal.analytic"){
if(!is.null(w)) warning("Input w ignored when ReGenesees")
if(!is.null(wGross)) warning("Input wGross ignored when ReGenesees")
if(!is.null(resids)) warning("Input resids ignored when ReGenesees")
if(!is.null(resids2)) warning("Input resids2 ignored when ReGenesees")
if(!is.null(leverages)) warning("Input leverages ignored when ReGenesees")
if(!is.null(leverages)) warning("Input leverages2 ignored when ReGenesees")
return(CalSSBobjReGenesees(x,
y=y,samplingWeights=samplingWeights,extra=extra,id=id,wave=wave))
}
if(class(x)[1] != "calSSB")
stop("x must be an object of class calSSB")
n = attr(x,"n")
nY = attr(x,"nY")
CheckDim(y,n,1,"y")
if(!is.null(y)) x$y = y
}
else{
if(is.null(y)) stop("y needed in input when x=NULL")
if(is.null(w)) stop("w needed in input when x=NULL")
if(is.null(resids)) stop("resids needed in input when x=NULL")
n = dim(y)[1]
nY= dim(y)[2]
x = structure(list(y=y), class = "calSSB", n=n, nY=nY)
}
CheckDim(w,n,1,"w")
CheckDim(wGross,n,1,"wGross")
CheckDim(resids,n,nY,"resids")
CheckDim(resids2,n,nY,"resids2")
CheckDim(leverages,n,1,"leverages")
CheckDim(leverages2,n,1,"leverages2")
CheckDim(samplingWeights,n,1,"samplingWeights")
if(!is.null(extra))
if(NROW(extra) != n)
stop(paste("extra must have ",n," rows"))
CheckDim(id,n,1,"id")
if(!NCOL(wave)==1)
wave = CrossStrata(wave)
CheckDim(wave,n,1,"wave")
if(!is.null(w)) x$w = w
if(!is.null(wGross)) x$wGross = wGross
if(!is.null(resids)) x$resids = resids
if(!is.null(resids2)) x$resids2 = resids2
if(!is.null(leverages)) x$leverages = leverages
if(!is.null(leverages2)) x$leverages2 = leverages2
if(!is.null(samplingWeights)) x$samplingWeights = samplingWeights
if(!is.null(extra)) x$extra = extra
if(!is.null(id)) x$id = id
if(!is.null(wave)) x$wave = wave
x
}
#' Create a CalSSB object from a ReGenesees/cal.analytic object
#'
#' @param x Output from ReGenesees::e.calibrate() (object of class cal.analytic)
#' @param y formula or variable names
#' @param samplingWeights NULL, TRUE (capture from x), formula, variable name or vector of data
#' @param extra NULL, formula, variable names or matrix of data
#' @param id NULL, TRUE (ids from x), formula, variable name or vector of data
#' @param wave NULL, formula, variable name or vector of data
#'
#' @return A CalSSB object. That is, an object of the type retuned by \code{\link{CalibrateSSB}}.
#' @export
#'
#' @seealso \code{\link{CalibrateSSB}}, \code{\link{CalSSBobj}}, \code{\link{WideFromCalibrate}}, \code{\link{PanelEstimation}}.
#'
#' @examples
#' \dontrun{
#' # Generates data - two years
#' z <- AkuData(3000) # 3000 in each quarter
#' zPop <- AkuData(10000)[, 1:7]
#' z$samplingWeights <- 1
#' z$ids <- 1:NROW(z)
#'
#' # Create a ReGenesees/cal.analytic object
#' library("ReGenesees")
#' desReGenesees <- e.svydesign(z[z$R == 1, ], ids = ~ids, weights = ~samplingWeights)
#' popTemplate <- pop.template(data = desReGenesees, calmodel = ~sex * age, partition = ~year + q)
#' popTotals <- fill.template(universe = zPop, template = popTemplate)
#' calReGenesees <- e.calibrate(design = desReGenesees, df.population = popTotals)
#'
#' # Create CalSSB objects from a ReGenesees/cal.analytic object
#' CalSSBobjReGenesees(calReGenesees, y = ~unemployed + workforce, id = TRUE,
#' samplingWeights = TRUE, extra = ~famid)
#' a <- CalSSBobjReGenesees(calReGenesees, y = c("unemployed", "workforce"),
#' id = "id", extra = "famid", wave = c("year", "q"))
#'
#' # Use the CalSSB object as input ...
#' PanelEstimation(WideFromCalibrate(a), "unemployed", linComb = PeriodDiff(8, 4))
#'
#' }
CalSSBobjReGenesees <- function(x,y, samplingWeights=NULL,extra=NULL,
id=NULL, wave=NULL){
if (requireNamespace("ReGenesees", quietly = TRUE)) {
get.residuals <- ReGenesees::get.residuals
} else {
stop("The package ReGenesees, is needed.")
}
z = NULL
z$w <- weights(x)
z$resids = get.residuals(x,asFormula(y), scale = "no")
z$y = model.frame(asFormula(y), data=x$variables)
n = dim(z$y)[1]
nY= dim(z$y)[2]
if(!is.null(samplingWeights)){
if(is.logical(samplingWeights)){
if(samplingWeights){
wei <- attr(x, "weights")
wei.char <- all.vars(wei)
samplingWeights <- substr(wei.char, 0, nchar(wei.char) - 4)
} else
samplingWeights <- NULL
}
}
if(!is.null(samplingWeights))
if(class(samplingWeights)[1]=="formula" | (is.character(samplingWeights) & length(samplingWeights)==1 ))
samplingWeights = model.frame(asFormula(samplingWeights), data=x$variables)[,]
z$samplingWeights = samplingWeights
if(!is.null(extra))
if(class(extra)[1]=="formula" | (is.character(extra) & length(extra)<min(dim(x$variables)) ))
extra = model.frame(asFormula(extra), data=x$variables)
z$extra = extra
if(!is.null(id))
if(is.logical(id)){
if(id)
id = model.frame(attr(x,"ids"),data=x$variables)[,]
else
id = NULL
}
if(!is.null(id))
if(class(id)[1]=="formula" | (is.character(id) & length(id)==1))
id = model.frame(asFormula(id), data=x$variables)[,]
z$id = id
if(!is.null(wave))
if(class(wave)[1]=="formula" | (is.character(wave) & length(wave)<min(dim(x$variables)) ))
wave = model.frame(asFormula(wave), data=x$variables)[,]
if(NCOL(wave)==1)
z$wave = wave
else
z$wave = CrossStrata(wave)
n = dim(z$y)[1]
nY= dim(z$y)[2]
structure(z, class = "calSSB", n=n, nY=nY)
}
GetSubset <- function(x, subset) {
if (is.null(x))
return(x)
if (is.null(subset))
return(x)
if(is.null(ncol(x))) x=as.vector(x) ## Handle problem with AsIs-class in special cases
if (is.vector(x))
return(x[subset])
return(x[subset, , drop = FALSE])
}
RemoveZeroCalSSB <- function(x){
s = x$w!=0
for(i in 1: length(x))
x[[i]] = GetSubset(x[[i]],s) ## Unngå å gjøre dette for popTotals, estTM
x ################ Må endre nY atributt også
}
#' Calibration weighting and variance estimation for panel data
#'
#' @encoding UTF8
#'
#' @param ... Input to CalibrateSSB() and PanelEstimation()
#'
#' @return Output from PanelEstimation()
#' @export
#'
#' @seealso \code{\link{CalibrateSSB}}, \code{\link{PanelEstimation}}.
#'
#' @examples
#' z = AkuData(3000) # 3000 in each quarter
#' zPop = AkuData(10000)[,1:7]
#' lc = rbind(LagDiff(8,4),PeriodDiff(8,4))
#' rownames(lc) = c("diffQ1","diffQ2","diffQ3","diffQ4","diffYearMean")
#' CalibrateSSBpanel(grossSample=z,calmodel="~ sex*age", partition=c("year","q"),popData=zPop,
#' y=c("unemployed","workforce"),id="id",wave=c("year","q"),
#' numerator="unemployed",linComb=lc)
CalibrateSSBpanel = function(...){
sysCall <- sys.call()
panelArgs <- names(sysCall) %in% formalArgs(PanelEstimation)
CalibrateArgs <- !panelArgs
sysCallPanel <- as.call(c(list(as.name("PanelEstimation"),x="x"),as.list(sysCall[panelArgs])) )
sysCall <- sysCall[!panelArgs]
sysCall[[1]] <- as.name("CalibrateSSB")
parentFrame = parent.frame()
sysCallPanel$x = WideFromCalibrate(eval(sysCall, envir=parentFrame))
eval(sysCallPanel, envir=parentFrame)
}
#bB=RemoveZeroCalSSB(CalSSBobj(b,id=z$id))
#bWideB = WideFromCalibrate(bB,CrossStrata(z[b$w!=0 ,c("year","q")]))
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/calSSB.R
|
calibratePackageSurvey = function(grossSample,modelformula,popTotals=NULL,response=NULL,
popData=NULL,samplingWeights=NULL,calfun="linear",totalReturn=0,returnwGross=FALSE,...)
{
if(returnwGross) calWeights1 = NULL
modelformula =as.formula(modelformula)
lm_model = lm(modelformula,data=grossSample)
xFromModel = model.matrix(lm_model)
rFromModel = model.frame(lm_model)[1]
if(is.null(popTotals))
{
if(is.null(popData)) tp = getTotal(grossSample,lm_model,samplingWeights) # Ny
else tp = getTotal(popData,lm_model)
if(totalReturn==2) return(tp)
popTotals = tp$N*tp$colSum/tp$colN
}
else
{
popTotals=setTotal(popTotals,lm_model)
}
if(totalReturn==1) return(popTotals)
if(sum(is.na(popTotals))>0)
{
# not netDesign but same name
netDesign = svydesign(ids=~1,data=data.frame(rFromModel,xFromModel),weights=samplingWeights)
col1= !is.na(popTotals)
col1[1] = FALSE # Antar Intercept nr 1 og denne tas bort
varNames1 = colnames(xFromModel)[col1] # Variabler der totaler finnes
modelformula1 = update(modelformula,paste("~ ",paste(varNames1,collapse="+")))
calWeights1 = try(weights(calibrate(netDesign,modelformula1 ,popTotals[!is.na(popTotals)],calfun=calfun,...)),silent=FALSE)
if(class(calWeights1)[1]=="try-error")
stop ("The function calibrate in package survey did not succeed. Maybe change the parameter usePackage?")
popTotals[is.na(popTotals)] = colSums(calWeights1*xFromModel[,is.na(popTotals)])
}
if(!(totalReturn==0)) return(popTotals)
netDesign = svydesign(ids=~1,data=grossSample[rFromModel==1,],weights=samplingWeights[rFromModel==1])
calWeights = rep(0,dim(grossSample)[1])
survey_weights = try(weights(calibrate(netDesign,modelformula ,popTotals,calfun=calfun,...)),silent=FALSE)
if(class(survey_weights)[1]=="try-error")
stop ("The function calibrate in package survey did not succeed. Maybe change the parameter usePackage?")
calWeights[rFromModel==1] = survey_weights
if(returnwGross){
return(list(w=calWeights,wGross=calWeights1))
}
calWeights
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/calibratePackageSurvey.R
|
# (5.3.14) in ETOS manual (without "- -" typing error)
# (11.8) in Sarndal-Lundstrom-2005
# Without summing over strata
etos_5.3.14 = function(N,n,g,e)
{
ge=g*e
(N^2*(1-n/N)/(n*(n-1))) * (sum(ge^2) - (sum(ge))^2/n ) - (N/n)*(N/n-1)*sum(ge*(g-1)*e)
}
# (5.3.15) in ETOS manual
# (11.9) in Sarndal-Lundstrom-2005
# Without summing over strata
etos_5.3.15 = function(N,n,g,e)
{
(N/n)^2*sum(g*(g-1)*e^2)
}
# Beregner residualer fra modelmatrise og matrise med y-er
# isTotPop er vektor som sier hvilke variabler det finnes tot-pop for
# Det legges til FALSE om vektoren er for kort
# Default er at bare N er kjent i populasjonen
# w er designvekter
etos_e1_e2 = function(mNetto,yMatrix,isTotPop=TRUE,ginvtol=1e-06,w=NULL)
{
inv = function(x) my_ginv(x,tol=ginvtol)
if(is.null(w)) w = 1
sqrt_w = sqrt(w)
isTotPop_input = isTotPop
isTotPop = rep(FALSE,dim(mNetto)[2])
for(i in 1:length(isTotPop_input))
isTotPop[i] = isTotPop_input[i]
reswreg = function(y,x,sqrt_w)
y-(x %*% inv(x*sqrt_w)*sqrt_w) %*% y
a=NULL
a$e1=reswreg(yMatrix,mNetto[,isTotPop,drop=FALSE],sqrt(w))
if(sum(!isTotPop)==0) a$e2=a$e1
else a$e2 = reswreg(yMatrix,mNetto,sqrt(w))
a
}
etos_e1_e2_by_lm = function(mNetto,yMatrix,isTotPop=TRUE,lmInfluence=TRUE,w=NULL)
{
isTotPop_input = isTotPop
isTotPop = rep(FALSE,dim(mNetto)[2])
for(i in 1:length(isTotPop_input))
isTotPop[i] = isTotPop_input[i]
a=NULL
m = lm(yMatrix~mNetto,weights=w)
a$e1 = resid(m)
a$e2 = a$e1
if(lmInfluence){
a$h1 = lm.influence(m)$hat
a$h2 = a$h1
}
if(!(sum(!isTotPop)==0)){
mNetto = mNetto[,isTotPop,drop=FALSE]
m = lm(yMatrix~mNetto,weights=w)
a$e1 = resid(m)
if(lmInfluence) a$h1 = lm.influence(m)$hat
}
a
}
etosV1V2 = function(e1,e2,w,samplingWeights=NULL,R=is.finite(e1), N = sum(w,na.rm=T),n = sum(is.finite(w))){
if(is.null(samplingWeights)) g=w*(n/N)
else g = w/samplingWeights
V1 = etos_5.3.14(N,n,g[R],e1[R])
V2 = etos_5.3.15(N,n,g[R],e2[R])
list(V1=V1,V2=V2)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/etosFunctions.R
|
ImpVekt = function(mBrutto,r,
totPop=dim(mBrutto)[1],N_brutto=TRUE,totPopReturn=FALSE,singularReturn=FALSE,ginvtol=1e-06,w=NULL)
{
wGross = NULL
if(singularReturn) totPopReturn=TRUE
inv = function(x) my_ginv(x,tol=ginvtol)
if(is.null(w)) w = 1
sqrt_w = sqrt(w)
totPop_input = totPop
totPop = rep(NaN,dim(mBrutto)[2])
for(i in 1:length(totPop_input))
totPop[i] = totPop_input[i]
totPop_input = totPop
tp = !is.na(totPop)
wregw = function(tot,x,sqrt_w)
{
(tot %*% inv(x*sqrt_w)*sqrt_w)[,]
}
if(sum(!tp)>0)
wGross = wregw(totPop[tp],mBrutto[,tp,drop=FALSE],sqrt_w)
totPop[!tp] = colSums(mBrutto[,!tp,drop=FALSE]*wGross)
if(length(sqrt_w)>1) sqrt_w=sqrt_w[r==1]
vekt = wregw(totPop,mBrutto[r==1,,drop=FALSE],sqrt_w)
if(N_brutto)
{
vekt_netto = vekt
vekt = r
vekt[r==1] = vekt_netto
}
vekt = as.vector(unlist(vekt,use.names=FALSE))
if(totPopReturn)
{
totP = totPop
totPop = totPop_input
if(singularReturn)
return(list(vekt=vekt,totPop=totPop,wGross=wGross))
#singular1=svd(R,nv=0,nu=0)$d, # R gir samme svar som mBrutto
#singular2=svd(qtw %*%q,nv=0,nu=0)$d))
return(list(vekt=vekt,totPop=totPop,wGross=wGross))
}
vekt
}
glmR2ImpVekt = function(glmR,...)
{
ImpVekt(model.matrix(glmR),glmR$y==1,...)
}
ImpVektFixed = function(mNetto,totPop,wFixed=NULL,useginv=TRUE,ginvtol=1e-06)
{
if(useginv)
inv = function(x) my_ginv(x,tol=ginvtol)
else inv = function(x) solve(x)
qr_mNetto = qr(mNetto) # mNetto = Q*R
Q = qr.Q(qr_mNetto)
R = qr.R(qr_mNetto)
invR = inv(R)
totPop_invR = totPop %*% invR
vekt = as.vector(totPop_invR %*% t(Q))
if(is.null(wFixed)) return(vekt)
z = matrix(wFixed - vekt,length(vekt),1)
za = z[!is.na(wFixed),,drop=FALSE]
Qa = Q[!is.na(wFixed),,drop=FALSE]
Qb = Q[is.na(wFixed),,drop=FALSE]
d = - t(Qa) %*% za
zb = (Qb %*% inv(t(Qb)%*%Qb)) %*% d
z[is.na(z)] = zb
nyVekt = as.vector(z) + vekt
return(nyVekt)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/impVekt.R
|
lagVekter = function(
modellen,
brutto,
pop = NULL,
min_w = -Inf,
max_w = Inf,
totalReturn=0,
maxiter=10,
samplingWeights=NULL,
returnwGross=FALSE)
{
z = brutto
lm_modell = lm(as.formula(modellen),data=z)
if(is.numeric(pop) & is.vector(pop))
{
if(length(pop)==1)
{
totPop=pop
}
else
{
totPop=setTotal(pop,lmObject=lm_modell)
}
}
else
{
if(is.null(pop))
{
tp = getTotal(z,lm_modell,samplingWeights)
}
else
{
tp = getTotal(pop,lm_modell)
}
if(totalReturn>1) return(tp)
totPop = tp$N*tp$colSum/tp$colN
}
if(totalReturn>0) return(totPop)
x_model = model.matrix(lm_modell)
r_model = model.frame(lm_modell)[1][,]
a = ImpVekt(x_model,r_model ,totPop=totPop,totPopReturn=TRUE,w=samplingWeights)
w0=a$vekt
w = w0[r_model==1]
x_netto = x_model[r_model==1,]
minw_eps = 1E-6 * max(min_w,1)
maxwPlusEps = (1+1E-6) * max_w # Avoid Inf-Inf
fortsett = (sum(w<(min_w+minw_eps)|w>maxwPlusEps )>0)
iter=0
if(fortsett & !is.null(samplingWeights)) stop("Limits&SamplingWeights not implemented")
while(fortsett)
{
iter=iter+1
wFixed = NA+w
wFixed[wFixed>min_w] = NA
wFixed[w<min_w] = min_w
wFixed[w>max_w] = max_w
w = ImpVektFixed(x_netto,totPop=a$totPop,wFixed=wFixed)
fortsett = (sum(w<(min_w-minw_eps)|w>maxwPlusEps )>0)
if(fortsett)
if((iter>=maxiter)|(sum(w[!is.na(wFixed)]<(min_w-minw_eps)|w[!is.na(wFixed)]>maxwPlusEps )>0))
{
w = w0[r_model==1]
fortsett = FALSE
}
}
w1 = w0
w1[r_model==1] = w
if(returnwGross){
return(list(w=w1,wGross=a$wGross))
}
w1
}
setTotalOld = function(total,lmObject)
{
setTotal0 <- get0("setTotalFromCalibrateSSBuser", ifnotfound = FALSE) # Hack som gjør det mulig å bytte ut ...
if(is.function(setTotal0)){
return(setTotal0(total,lmObject))
}
x = model.matrix(lmObject)[1,]
x[1:length(x)] = NA
varnames = names(total)[names(total) %in% names(x)]
for(i in 1:length(varnames))
x[names(x)==varnames[i]] = total[names(total)==varnames[i]]
x
}
getTotalOld = function(data,lmObject,w=NULL)
{
getTotal0 <- get0("getTotalFromCalibrateSSBuser", ifnotfound = FALSE) # Hack som gjør det mulig å bytte ut ...
if(is.function(getTotal0)){
return(getTotal0(data,lmObject,w))
}
x=model.frame(lmObject)
x1=dim(x)[1]
x2=dim(x)[2]
d1=dim(data)[1]
while(dim(x)[1]<x1+d1)
{
diffn = x1+d1-dim(x)[1]
if(diffn>dim(x)[1]) x=rbind(x,x)
else x=rbind(x,x[1:diffn,])
}
x[(x1+1):(x1+d1),]=NA
varnames =names(data)[names(data) %in% names(x)]
if(length(varnames)>0) for(i in 1:length(varnames))
x[(x1+1):(x1+d1),names(x)==varnames[i]] = data[,names(data)==varnames[i],drop=FALSE]
m=model.matrix(lmObject,data=x,na.action=NULL)
m=m[(x1+1):(x1+d1), ,drop=FALSE]
x=NULL
if(is.null(w))
{
x$colSum = colSums(m,na.rm=TRUE)
x$colN = colSums(!is.na(m))
x$N = dim(m)[1]
} else
{
x$colSum = colSums(w*m,na.rm=TRUE)
x$colN = colSums(w*(!is.na(m)))
x$N = sum(w)
}
x
}
sumList = function(x,xNew)
{
for(i in 1:length(x)) x[[i]] = x[[i]]+ xNew[[i]]
x
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/lagVekter.R
|
#
# Copy of ginv from package "MASS"
# Only difference: Possibility to return singular values
#
# + override input by ginv_tol_FromUser
#
my_ginv = function (X, tol = sqrt(.Machine$double.eps),singularReturn=FALSE)
{
tolFromUser <- get0("ginv_tol_FromUser", ifnotfound = FALSE)
if(tolFromUser){
if(is.logical(tolFromUser))
tol = sqrt(.Machine$double.eps)
else{
if(is.numeric(tolFromUser))
tol = tolFromUser
}
#cat("ginv-tol set to",tol,"\n")
#flush.console()
}
if (length(dim(X)) > 2L || !(is.numeric(X) || is.complex(X)))
stop("'X' must be a numeric or complex matrix")
if (!is.matrix(X))
X <- as.matrix(X)
Xsvd <- svd(X)
if (is.complex(X))
Xsvd$u <- Conj(Xsvd$u)
Positive <- Xsvd$d > max(tol * Xsvd$d[1L], 0)
if (all(Positive))
invX = Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u)) # new: "invX ="
else if (!any(Positive))
invX = array(0, dim(X)[2L:1L]) # new: "invX ="
else invX = Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) *
t(Xsvd$u[, Positive, drop = FALSE])) # new: "invX ="
if(!singularReturn) return(invX)
list(invX=invX,singular=Xsvd$d)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/my_ginv.R
|
#' OrderedVarNames
#'
#' @param x input
#' @param sep
#'
#' @return output
#' @keywords internal
#' @export
#'
#' @examples
#' z <- data.frame(A = factor(c("a", "b", "c")), B = factor(1:2), C = 1:6)
#' x <- colnames(model.matrix(~B * C * A, z))
#' OrderedVarNames(x)
OrderedVarNames <- function(x, sep = ":") {
unlist(lapply(strsplit(x, sep), function(x) paste(sort(x), collapse = sep)))
}
#' MatchVarNames
#'
#' @param x x
#' @param y y
#' @param sep sep
#' @param makeWarning Warning when matching by reordering
#'
#' @return An integer vector giving the position in y of the first match if there is a match, otherwise NA.
#' @keywords internal
#' @export
#'
#' @examples
#' z <- data.frame(A = factor(c("a", "b", "c")), B = factor(1:2), C = 1:6)
#' x <- colnames(model.matrix(~B * C * A, z))
#' y <- colnames(model.matrix(~A * B + A:B:C, z))
#' MatchVarNames(x, y)
MatchVarNames <- function(x, y, sep = ":", makeWarning = FALSE) {
matchNames <- match(x, y)
noMatch <- is.na(matchNames)
if (!any(noMatch))
return(matchNames)
matchNames[noMatch] <- match(OrderedVarNames(x[noMatch]), OrderedVarNames(y))
if (makeWarning)
if (any(!is.na(matchNames[noMatch])))
warning("Matching by reordering")
matchNames
}
setTotal <- function(total, lmObject) {
x <- model.matrix(lmObject)[1, ]
x[] <- NA
matchNames <- MatchVarNames(names(x), names(total), makeWarning = FALSE)
if (any(duplicated(matchNames[!is.na(matchNames)])))
stop("Duplicates when matching")
x[] <- total[matchNames]
x
}
getTotal <- function(data, lmObject, w = NULL) {
mfCal <- model.frame(lmObject)
mfCalNA <- mfCal[c(NA, 1), ][1, , drop = FALSE]
rownames(mfCalNA) <- NULL
mfCalNames <- colnames(mfCal)
okNames <- mfCalNames %in% colnames(data)
if (any(!okNames))
data <- cbind(data[, mfCalNames[okNames], drop = FALSE], mfCalNA[, mfCalNames[!okNames], drop = FALSE])
m <- model.matrix(lmObject, data = data, na.action = NULL)
x <- NULL
if (is.null(w)) {
x$colSum <- colSums(m, na.rm = TRUE)
x$N <- dim(m)[1]
if (anyNA(m)) {
x$colN <- colSums(!is.na(m))
} else {
x$colN <- rep(x$N, length(x$colSum))
names(x$colN) <- names(x$colSum)
}
} else {
x$colSum <- colSums(w * m, na.rm = TRUE)
x$N <- sum(w)
if (anyNA(m)) {
# will not happen
x$colN <- colSums(w * (!is.na(m)))
} else {
x$colN <- rep(x$N, length(x$colSum))
names(x$colN) <- names(x$colSum)
}
}
x
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrateSSB/R/setAndGetTotal.R
|
BT.samples <- function(y, p, to.pred){
Df = cbind.data.frame(y, p)
# REPEAT TO PREVENT BT SAMPLES WITH NA'S
repeat {
BT.sample = Df[sample(1:nrow(Df), replace = T), ]
loess.BT = loess(y ~ p, BT.sample)
pred.loess = predict(loess.BT, to.pred, type = "fitted")
if (!any(is.na(pred.loess)))
break
}
return(pred.loess)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/BT.samples.R
|
#get the auc through Mann-Whitney statistics
#References:
#Comparing the Areas under Two or More
#Correlated Receiver Operating Characteristic Curves:
#A Nonparametric Approach
#Author(s): Elizabeth R. DeLong, David M. DeLong and Daniel L. Clarke-Pearson
#Source: Biometrics, Vol. 44, No. 3 (Sep., 1988), pp. 837-845
#STATISTICS IN MEDICINE
#Statist. Med. 2006; 25:559-573
#Confidence intervals for an effect size measure based on
#the Mann-Whitney statistic. Part 2: Asymptotic methods
#and evaluation
#Robert G. Newcombe
#x: the scores of subjects from class P
#y: the scores of subjects from class N
#alpha: type I error
#NOTE: the larger the score, the more likely a subject is from class P
getAUCmw <- function(x, y){
xy <- expand.grid(x, y)
mean(ifelse(xy[,2] < xy[,1], 1, (ifelse(xy[,2] == xy[,1], 1/2, 0))))
}
#get the variance V_2(\theta) from method 4 of Newcombe 2006
getVARmw <- function(theta, m, n){
nstar <- mstar <- (m+n)/2 - 1
theta *
(1-theta) *
(1 + nstar*(1-theta)/(2-theta) + mstar*theta/(1+theta)) /
(m*n)
}
#note that here x < y
#m is the length of x and n is the length of y
getS2mw <- function(x, y, m, n){
xi <- sort(x)
yj <- sort(y)
rxy <- rank(c(xi, yj))
Ri <- rxy[1:m]
Sj <- rxy[m+1:n]
S102 <- 1/((m-1)*n^2) * (sum((Ri-1:m)^2) - m*(mean(Ri)-(m+1)/2)^2)
S012 <- 1/((n-1)*m^2) * (sum((Sj-1:n)^2) - n*(mean(Sj)-(n+1)/2)^2)
S2 <- (m*S012 + n*S102) / (m + n)
S2
}
#get the CI by method 5 of Newcombe 2006
getAUCCImwu <- function(hat.theta, zalpha, m, n){
nstar <- mstar <- (m+n)/2 - 1
a <- -1 - nstar - mstar
b <- 1 + 2*mstar
c <- 2 + nstar
d <- 1 + 2*hat.theta
ht2 <- hat.theta^2
e <- 2 - 2*hat.theta - ht2
f <- ht2 - 4*hat.theta
g <- 2*ht2
z2 <- zalpha^2
mn <- m*n
z5 <- -mn + z2*a
z4 <- mn*d - z2*(a-b)
z3 <- mn*e - z2*(b-c)
z2 <- mn*f - z2*c
z1 <- mn*g
roots <- polyroot(c(z1, z2, z3, z4, z5))
real <- Re(roots[sapply(1:4, function(i) all.equal(Im(roots[i]), 0))])
real <- real[real > 0 & real < 1]
ci <- sort(real)
if(length(ci) > 2)
warning("There are three roots meet the requirement when computing
the confidence interval.")
else{
if(length(real) == 1){
if(real <= hat.theta){
ci <- c(real, 1)
}
else{
ci <- c(0, real)
}
}
if(length(real) == 0){
ci <- c(0, 1)
}
}
ci
}
#get the CI by method 5 of Newcombe 2006
auc.mw.newcombe <- function(x, y, alpha){
point <- getAUCmw(x, y)
nx <- length(x)
ny <- length(y)
zalpha <- qnorm(1-alpha/2)
ci <- getAUCCImwu(point, zalpha, ny, nx)
c(point, ci)
}
auc.mw.zhou <- function(x, y, alpha){
if(max(y) < min(x)){
c(1, 1, 1)
}
else{
point <- getAUCmw(x, y)
nx <- length(x)
ny <- length(y)
zalpha <- qnorm(1-alpha/2)
varHatTheta <- getVARmw(point,nx, ny)
Z <- 1/2 * log((1+point)/(1-point))
varZ <- 4 / (1-point^2)^2 * varHatTheta
LL <- Z - zalpha*sqrt(varZ)
UL <- Z + zalpha*sqrt(varZ)
ci <- c((exp(2*LL) - 1)/ (exp(2*LL) + 1), (exp(2*UL) - 1)/ (exp(2*UL) + 1))
c(point,ci)
}
}
auc.mw.pepe <- function(x, y, alpha){
if(max(y) < min(x)){
c(1, 1, 1)
}
else{
point <- getAUCmw(x, y)
nx <- length(x)
ny <- length(y)
zalpha <- qnorm(1-alpha/2)
varHatTheta <- (nx+ny) * getS2mw(y, x, ny, nx) / (nx*ny)
#varHatTheta <- getVARmw(point,nx, ny)
LL <- log(point/(1-point)) - zalpha*sqrt(varHatTheta)/(point*(1-point))
UL <- log(point/(1-point)) + zalpha*sqrt(varHatTheta)/(point*(1-point))
ci <- c(exp(LL) / (1+exp(LL)), exp(UL) / (1+exp(UL)))
c(point,ci)
}
}
auc.mw.delong <- function(x, y, alpha){
point <- getAUCmw(x, y)
nx <- length(x)
ny <- length(y)
zalpha <- qnorm(1-alpha/2)
D10 <- sapply(1:ny, function(i)
mean(ifelse(x > y[i], 1, ifelse(x == y[i], 1/2, 0))))
D01 <- sapply(1:nx, function(i)
mean(ifelse(x[i] > y, 1, ifelse(x[i] == y, 1/2, 0))))
varDhatTheta <- 1/(ny*(ny-1))*sum((D10-point)^2) +
1/(nx*(nx-1))*sum((D01-point)^2)
ci <- c(point - zalpha*sqrt(varDhatTheta),
point + zalpha*sqrt(varDhatTheta))
c(point, ci)
}
mw.jackknife <- function(x, y){
nx <- length(x)
ny <- length(y)
n <- nx + ny
hatThetaPartial <- rep(0, n)
for(i in 1:nx){
hatThetaPartial[i] <- getAUCmw(x[-i], y)
}
for(i in 1:ny){
hatThetaPartial[i+nx] <- getAUCmw(x, y[-i])
}
hatThetaPartial
}
auc.mw.jackknife <- function(x, y, alpha){
nx <- length(x)
ny <- length(y)
n <- nx + ny
hatTheta <- getAUCmw(x, y)
hatThetaPseudo <- rep(0, n)
hatThetaPartial <- mw.jackknife(x, y)
for(i in 1:n){
hatThetaPseudo[i] <- n*hatTheta - (n-1)*hatThetaPartial[i]
}
point <- mean(hatThetaPseudo)
ST2 <- mean((hatThetaPseudo - point)^2) / (n-1)
ST <- sqrt(ST2)
z.alpha2 <- qt(1 - alpha/2, df=n-1)
ci <- c(point - z.alpha2*ST, point + z.alpha2*ST)
c(point, ci)
}
auc.mw.boot <- function(x, y, alpha, nboot=1000, method){
if(max(y) < min(x)){
c(1, 1, 1)
}
else{
nx <- length(x)
ny <- length(y)
point <- getAUCmw(x, y)
index.x <- matrix(sample.int(nx, size = nx*nboot, replace = TRUE),
nboot, nx)
index.y <- matrix(sample.int(ny, size = ny*nboot, replace = TRUE),
nboot, ny)
mw.boot <- sapply(1:nboot, function(i) getAUCmw(x[index.x[i,]],
y[index.y[i,]]))
if(method=="P"){
ci <- as.vector(quantile(mw.boot, c(alpha/2, 1-alpha/2), type=6))
}
else{
hatZ0 <- qnorm(mean(mw.boot < point))
partial <- mw.jackknife(x, y)
mpartial <- mean(partial)
hatA <- sum((mpartial - partial)^3) /
(6 * (sum((mpartial - partial)^2))^(3/2))
alpha1 <- pnorm(hatZ0 + (hatZ0 + qnorm(alpha/2)) /
(1 - hatA*(hatZ0 + qnorm(alpha/2))))
alpha2 <- pnorm(hatZ0 + (hatZ0 + qnorm(1-alpha/2)) /
(1 - hatA*(hatZ0 + qnorm(1-alpha/2))))
ci <- as.vector(quantile(mw.boot, c(alpha1, alpha2), type=6))
}
c(point, ci)
}
}
auc.nonpara.mw <- function(x, y, conf.level=0.95,
method=c("newcombe", "pepe", "delong", "jackknife", "bootstrapP", "bootstrapBCa"),
nboot){
alpha <- 1 - conf.level
method <- match.arg(method)
estimate <- switch(method,
newcombe=auc.mw.newcombe(x, y, alpha),
pepe=auc.mw.pepe(x, y, alpha),
delong=auc.mw.delong(x, y, alpha),
jackknife=auc.mw.jackknife(x, y, alpha),
bootstrapP=auc.mw.boot(x, y, alpha, nboot, method="P"),
bootstrapBCa=auc.mw.boot(x, y, alpha, nboot, method="BCa"))
estimate
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/auc.nonpara.mw.R
|
ci.auc <- function(crit,pred,conf.level=0.95,method="pepe"){
tmp <- cbind.data.frame(crit = crit, pred = pred)
# healthy
nondis <- tmp[which(tmp$crit == 0), ]
# disease
dis <- tmp[which(tmp$crit == 1), ]
# ci auc
if (!grepl("bootstrap", method)) {
result <- auc.nonpara.mw(dis$pred, nondis$pred, conf.level, method)
} else if (grepl("bootstrap", method)) {
warning(
"Bootstrap-based methods are not supported by this package. Method will be set to 'pepe'. \n\n"
, immediate. = TRUE)
result <- auc.nonpara.mw(dis$pred, nondis$pred, conf.level, method = "pepe")
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/ci.auc.R
|
#' Calibration performance using the generalized calibration framework
#'
#' Function to assess the calibration performance of a prediction model where the outcome's distribution is a member of the exponential family (De Cock Campo, 2023).
#' The function plots the generalized calibration curve and computes the generalized calibration slope and intercept.
#'
#'
#' @param y a vector with the values for the response variable
#' @param yHat a vector with the predicted values
#' @param family a description of the type of distribution and link function in the model. This can be a character string naming a family function, a family function or the result of a call to a family function.
#' (See family for details of family functions.)
#' @param plot logical, indicating if a plot should be made or not.
#' @param Smooth logical, indicating if the flexible calibration curve should be estimated.
#' @param GLMCal logical, indicating if the GLM calibration curve has to be estimated.
#' @param lwdIdeal the line width of the ideal line.
#' @param colIdeal the color of the ideal line.
#' @param ltyIdeal the line type of the ideal line.
#' @param lwdSmooth the line width of the flexible calibration curve.
#' @param colSmooth the color of the flexible calibration curve.
#' @param ltySmooth the line type of the flexible calibration curve.
#' @param argzSmooth arguments passed to \code{\link{loess}}.
#' @param lwdGLMCal the line width of the GLM calibration curve.
#' @param colGLMCal the color of the GLM calibration curve.
#' @param ltyGLMCal the line type of the GLM calibration curve.
#' @param AddStats logical, indicating whether to add the values of the generalized calibration slope and intercept to the plot.
#' @param Digits the number of digits of the generalized calibration slope and intercept.
#' @param cexStats the font size of the statistics shown on the plot.
#' @param lwdLeg the line width in the legend.
#' @param Legend logical, indicating whether the legend has to be added.
#' @param legendPos the position of the legend on the plot.
#' @param xLim,yLim numeric vectors of length 2, giving the x and y coordinates ranges (see \code{\link{plot.window}})
#' @param posStats numeric vector of length 2, specifying the x and y coordinates of the statistics (generalized calibration curve and intercept) printed on the plot. Default is \code{NULL}
#' which places the statistics in the top left corner of the plot.
#' @param confLimitsSmooth character vector to indicate if and how the confidence limits for the flexible calibration curve have to be computed. \code{"none"} omits the confidence limits,
#' \code{"bootstrap"} uses 2000 bootstrap samples to calculate the 95\% confidence limits and \code{"pointwise"} uses the pointwise confidence limits.
#' @param confLevel the confidence level for the calculation of the pointwise confidence limits of the flexible calibration curve.
#' @param Title the title of the plot
#' @param xlab x-axis label, default is \code{"Predicted value"}.
#' @param ylab y-axis label, default is \code{"Empirical average"}.
#' @param EmpiricalDistribution logical, indicating if the empirical distribution of the predicted values has to be added to the bottom of the plot.
#' @param length.seg controls the length of the histogram lines. Default is \code{1}.
#' @param ... arguments to be passed to \code{\link{plot}}, see \code{\link{par}}
#'
#' @return An object of type \code{GeneralizedCalibrationCurve} with the following slots:
#' @return \item{call}{the matched call.}
#' @return \item{ggPlot}{the ggplot object.}
#' @return \item{stats}{a vector containing performance measures of calibration.}
#' @return \item{cl.level}{the confidence level used.}
#' @return \item{Calibration}{contains the calibration intercept and slope, together with their confidence intervals.}
#' @return \item{Cindex}{the value of the c-statistic, together with its confidence interval.}
#' @return \item{warningMessages}{if any, the warning messages that were printed while running the function.}
#' @return \item{CalibrationCurves}{The coordinates for plotting the calibration curves. }
#' @export
#'
#' @references De Cock Campo, B. (2023). Towards reliable predictive analytics: a generalized calibration framework. arXiv:2309.08559, available at \url{https://arxiv.org/abs/2309.08559}.
#'
#' @examples
#' library(CalibrationCurves)
#' library(mgcv)
#' data("poissontraindata")
#' data("poissontestdata")
#'
#' glmFit = glm(Y ~ ., data = poissontraindata, family = poisson)
#'
#' # Example of a well calibrated poisson prediction model
#' yOOS = poissontestdata$Y
#' yHat = predict(glmFit, newdata = poissontestdata, type = "response")
#' genCalCurve(yOOS, yHat, family = "poisson", plot = TRUE)
#'
#' # Example of an overfit poisson prediction model
#' gamFit = gam(Y ~ x1 + x3 + x1:x3 + s(x5), data = poissontraindata, family = poisson)
#' yHat = as.vector(predict(gamFit, newdata = poissontestdata, type = "response"))
#' genCalCurve(yOOS, yHat, family = "poisson", plot = TRUE)
#'
#' # Example of an underfit poisson prediction model
#' glmFit = glm(Y ~ x2, data = poissontraindata, family = poisson)
#' yOOS = poissontestdata$Y
#' yHat = predict(glmFit, newdata = poissontestdata, type = "response")
#' genCalCurve(yOOS, yHat, family = "poisson", plot = TRUE)
genCalCurve <- function(y, yHat, family, plot = TRUE, Smooth = FALSE, GLMCal = TRUE, lwdIdeal = 2, colIdeal = "gray", ltyIdeal = 1,
lwdSmooth = 1, colSmooth = "blue", ltySmooth = 1, argzSmooth = alist(degree = 2),
lwdGLMCal = 1, colGLMCal = "red", ltyGLMCal = 1,
AddStats = T, Digits = 3, cexStats = 1, lwdLeg = 1.5, Legend = TRUE, legendPos = "bottomright",
xLim = NULL, yLim = NULL, posStats = NULL,
confLimitsSmooth = c("none", "bootstrap", "pointwise"), confLevel = 0.95,
Title = "Calibration plot",
xlab = "Predicted value", ylab = "Empirical average",
EmpiricalDistribution = TRUE, length.seg = 1, ...) {
bootSamples <- NULL
call = match.call()
if (is.character(family))
family <- get(family, mode = "function", envir = parent.frame())
if (is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
if(!is.null(posStats)) {
if(!is.vector(posStats))
stop("Has to be of type vector.")
if(length(posStats) != 2)
stop("Length of the vector has to be equal to 2.")
}
confLimitsSmooth = match.arg(confLimitsSmooth)
a = 1 - confLevel
wmess = NULL
Eta = family$linkfun(yHat)
ClInt = tryCatch(
glm(
y ~ offset(Eta),
family = family,
control = glm.control(maxit = 1e2)
),
error = function(e)
T,
warning = function(w)
T
)
if(is.logical(ClInt)) {
# https://stackoverflow.com/questions/8212063/glm-starting-values-not-accepted-log-link
ClInt = glm(I(y + .Machine$double.eps) ~ offset(Eta), family = family, control = glm.control(maxit = 1e2))
}
ClSl =
tryCatch(
glm(y ~ Eta, family = family, control = glm.control(maxit = 1e2)),
error = function(e)
T,
warning = function(w)
T
)
if(is.logical(ClSl)) {
lmFit = lm(y ~ Eta)
ClSl = glm(y ~ Eta, family = family, control = glm.control(maxit = 1e2), start = coef(lmFit))
}
ClSl2 = tryCatch(
glm(y ~ Eta - 1, family = family),
error = function(e)
T,
warning = function(w)
T
)
if(is.logical(ClSl2)) {
lmFit = lm(y ~ Eta - 1)
ClSl2 = glm(y ~ Eta - 1, family = family, control = glm.control(maxit = 1e2), start = coef(lmFit))
}
CalibrStats = c("Calibration intercept" = unname(coef(ClInt)), "Calibration slope" = unname(coef(ClSl)[2]))
ClIntCL = confint(ClInt, level = confLevel)
ClSlCL = confint(ClSl, level = confLevel)[2, ]
y = y[order(yHat)]
Eta = Eta[order(yHat)]
yHat = sort(yHat)
calCurves = list()
if(GLMCal) {
glmFit = glm(y ~ Eta, family = family)
rangeY = range(glmFit$fitted)
glmCal = data.frame(x = yHat, y = fitted(glmFit))
calCurves$GLMCalibration = glmCal
}
if(Smooth) {
argzSmooth$formula = y ~ yHat
SmFit <- Sm <- do.call("loess", argzSmooth)
Sm = data.frame(Sm$x, Sm$fitted)
rangeY = if(GLMCal) c(min(rangeY, SmFit$fitted), max(rangeY, SmFit$fitted)) else range(SmFit$fitted)
calCurves$FlexibleCalibration = Sm
}
xLim = if(is.null(xLim)) range(yHat) else xLim
yLim = if(is.null(yLim)) c(min(c(xLim, rangeY)), max(c(xLim, rangeY))) else yLim
yLim[1] =
if(yLim[1] <= 0.5) {
0 - 0.1 * diff(range(yLim))
} else {
yLim[1] * 0.9
}
if(plot) {
plot(mean(xLim), mean(yLim), col = "white", pch = 1, xlab = xlab, ylab = ylab,
xlim = xLim, ylim = yLim, main = Title, ...)
clip(min(c(xLim, yHat)), max(c(xLim, yHat)), min(c(yLim, rangeY)), max(c(yLim, rangeY)))
abline(0, 1, col = colIdeal, lwd = lwdIdeal, lty = ltyIdeal)
}
labLeg = "Ideal"
colLeg = colIdeal
ltyLeg = ltyIdeal
lwdLeg = lwdIdeal
if(Smooth) {
if(plot)
lines(Sm, lty = ltySmooth, lwd = lwdSmooth, col = colSmooth)
if(confLimitsSmooth != "none") {
if(confLimitsSmooth == "bootstrap") {
yHatGrid = seq(min(yHat), max(yHat), length = 200)
resBoot = replicate(2000, bootSamples(y, yHat, yHatGrid))
clBoot = apply(resBoot, 1, quantile, c(0.025, 0.975))
dfCL = data.frame(x = yHatGrid, ymin = clBoot[1, ], ymax = clBoot[2, ])
rownames(dfCL) = NULL
} else {
cl.loess = predict(SmFit, type = "fitted", se = TRUE)
dfCL = data.frame(x = yHat, ymin = with(cl.loess, fit - qnorm(1 - a / 2) * se.fit),
ymax = with(cl.loess, fit + qnorm(1 - a / 2) * se.fit))
}
if(plot)
with(dfCL,
polygon(
x = c(x, rev(x)),
y = c(ymax,
rev(ymin)),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
)
}
labLeg = c(labLeg, "Flexible calibration")
colLeg = c(colLeg, colSmooth)
ltyLeg = c(ltyLeg, ltySmooth)
lwdLeg = c(lwdLeg, lwdSmooth)
}
if(GLMCal) {
if(plot)
lines(yHat, fitted(glmFit), lty = ltyGLMCal, lwd = lwdGLMCal, col = colGLMCal)
labLeg = c(labLeg, "GLM calibration")
colLeg = c(colLeg, colGLMCal)
ltyLeg = c(ltyLeg, ltyGLMCal)
lwdLeg = c(lwdLeg, lwdGLMCal)
}
if(plot)
do.call("clip", as.list(par()$usr))
if(EmpiricalDistribution) {
x <- yHat
bins <- seq(min(x), max(x), length = 101)
f0 <- table(cut(x, bins))
bins <- (bins[-101])
maxf <- max(f0)
f0 <- (0.1 * f0) / maxf
if(plot) {
segments(bins, yLim[1], bins, yLim[1] + length.seg * f0)
lines(c(min(bins) - 0.01, max(bins) + 0.01), c(yLim[1], yLim[1]))
}
}
if(AddStats & plot) {
StatsPlot = paste0('Calibration\n',
'...intercept: ',
sprintf(paste0("%.", Digits, "f"), CalibrStats[1]), '\n',
'...slope: ',
sprintf(paste0("%.", Digits, "f"), CalibrStats[2]), '\n')
if(is.null(posStats))
text(xLim[1], xLim[2] * 0.85, StatsPlot, pos = 4, cex = cexStats)
else
text(posStats[1], posStats[2], StatsPlot, pos = 4, cex = cexStats)
}
if(plot) {
if(Legend)
if(is.character(legendPos))
legend(legendPos, legend = labLeg, col = colLeg, lty = ltyLeg, bty = "n", lwd = lwdLeg)
else
legend(legendPos[1], legendPos[2], legend = labLeg, col = colLeg, lty = ltyLeg, bty = "n", lwd = lwdLeg)
}
Results =
structure(
list(
call = call,
stats = CalibrStats,
cl.level = confLevel,
Calibration = list(
Intercept = c("Point estimate" = CalibrStats[1],
"Lower confidence limit" = ClIntCL[1],
"Upper confidence limit" = ClIntCL[2]),
Slope = c("Point estimate" = CalibrStats[2],
"Lower confidence limit" = ClSlCL[1],
"Upper confidence limit" = ClSlCL[2])
),
warningMessages = wmess,
CalibrationCurves = calCurves
), class = "GeneralizedCalibrationCurve"
)
return(Results)
return(CalibrStats)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/genCalCurve.R
|
#' Print function for a CalibrationCurve object
#'
#' Prints the call, confidence level and values for the performance measures.
#'
#' @param x an object of type CalibrationCurve, resulting from \code{\link{val.prob.ci.2}}.
#' @param ... arguments passed to \code{\link{print}}
#'
#' @seealso \code{\link{val.prob.ci.2}}
#' @return The original \code{CalibrationCurve} object is returned.
print.CalibrationCurve <- function(x, ...) {
cat("Call:\n",
paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat(
paste(
"A ",
x$cl.level * 100,
"% confidence interval is given for the calibration intercept, calibration slope and c-statistic. \n\n",
sep = ""
)
)
print(x$stats, ...)
if(!is.null(x$warningMessages))
for(w in x$warningMessages)
warning(paste0(w, "\n"), immediate. = TRUE)
invisible(x)
}
#' Print function for a ggplotCalibrationCurve object
#'
#' Prints the ggplot, call, confidence level and values for the performance measures.
#'
#' @param x an object of type ggplotCalibrationCurve, resulting from \code{\link{valProbggplot}}.
#' @param ... arguments passed to \code{\link{print}}
#'
#' @seealso \code{\link{valProbggplot}}
#' @return The original \code{ggplotCalibrationCurve} object is returned.
print.ggplotCalibrationCurve <- function(x, ...) {
print(x$ggPlot)
cat("Call:\n",
paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat(
paste(
"A ",
x$cl.level * 100,
"% confidence interval is given for the calibration intercept, calibration slope and c-statistic. \n\n",
sep = ""
)
)
print(x$stats, ...)
if(!is.null(x$warningMessages))
for(w in x$warningMessages)
warning(paste0(w, "\n"), immediate. = TRUE)
invisible(x)
}
#' Print function for a GeneralizedCalibrationCurve object
#'
#' Prints the call, confidence level and values for the performance measures.
#'
#' @param x an object of type GeneralizedCalibrationCurve, resulting from \code{\link{genCalCurve}}.
#' @param ... arguments passed to \code{\link{print}}
#'
#' @seealso \code{\link{genCalCurve}}
#' @return The original \code{GeneralizedCalibrationCurve} object is returned.
print.GeneralizedCalibrationCurve <- function(x, ...) {
cat("Call:\n",
paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat(
paste(
"A ",
x$cl.level * 100,
"% confidence interval is given for the calibration intercept and calibration slope. \n\n",
sep = ""
)
)
print(x$stats, ...)
if(!is.null(x$warningMessages))
for(w in x$warningMessages)
warning(paste0(w, "\n"), immediate. = TRUE)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/printFunction.R
|
#' Internal function
#'
#' Adjusted version of the \code{\link[Hmisc]{rcspline.plot}} function where only the output is returned and no plot is made
#'
#'
#' @param x a numeric predictor
#' @param y a numeric response. For binary logistic regression, \code{y} should be either 0 or 1.
#' @param model \code{"logistic"} or \code{"cox"}. For \code{"cox"}, uses the \code{coxph.fit} function with \code{method="efron"} argument set.
#' @param xrange range for evaluating \code{x}, default is \eqn{f} and \eqn{1 - f} quantiles of \code{x},
#' where \eqn{f = \frac{10}{\max{(n, 200)}}}{f = 10/max(\code{n}, 200)} and \eqn{n} the number of observations
#' @param event event/censoring indicator if \code{model="cox"}. If \code{event} is present, \code{model} is assumed to be \code{"cox"}
#' @param nk number of knots
#' @param knots knot locations, default based on quantiles of \code{x} (by \code{\link[Hmisc]{rcspline.eval}})
#' @param show \code{"xbeta"} or \code{"prob"} - what is plotted on \verb{y}-axis
#' @param adj optional matrix of adjustment variables
#' @param xlab \verb{x}-axis label, default is the \dQuote{label} attribute of \code{x}
#' @param ylab \verb{y}-axis label, default is the \dQuote{label} attribute of \code{y}
#' @param ylim \verb{y}-axis limits for logit or log hazard
#' @param plim \verb{y}-axis limits for probability scale
#' @param plotcl plot confidence limits
#' @param showknots show knot locations with arrows
#' @param add add this plot to an already existing plot
#' @param plot logical to indicate whether a plot has to be made. \code{FALSE} suppresses the plot.
#' @param subset subset of observations to process, e.g. \code{sex == "male"}
#' @param lty line type for plotting estimated spline function
#' @param noprint suppress printing regression coefficients and standard errors
#' @param m for \code{model="logistic"}, plot grouped estimates with triangles. Each group contains \code{m} ordered observations on \code{x}.
#' @param smooth plot nonparametric estimate if \code{model="logistic"} and \code{adj} is not specified
#' @param bass smoothing parameter (see \code{supsmu})
#' @param main main title, default is \code{"Estimated Spline Transformation"}
#' @param statloc location of summary statistics. Default positioning by clicking left mouse button where upper left corner of statistics should appear.
#' Alternative is \code{"ll"} to place below the graph on the lower left, or the actual \code{x} and \code{y} coordinates. Use \code{"none"} to suppress statistics.
#'
#' @return list with components (\samp{knots}, \samp{x}, \samp{xbeta}, \samp{lower}, \samp{upper}) which are respectively the knot locations, design matrix,
#' linear predictor, and lower and upper confidence limits
#' @seealso \code{\link[rms]{lrm}}, \code{\link[rms]{cph}}, \code{\link[Hmisc]{rcspline.eval}}, \code{\link[graphics]{plot}}, \code{\link[stats]{supsmu}},
#' \code{\link[survival:survival-internal]{coxph.fit}}, \code{\link[rms]{lrm.fit}}
.rcspline.plot <- function(x, y, model=c("logistic","cox","ols"), xrange,
event, nk=5, knots=NULL, show=c("xbeta", "prob"),
adj=NULL, xlab, ylab, ylim, plim=c(0,1),
plotcl=TRUE, showknots=TRUE, add=FALSE, plot = TRUE, subset,
lty=1, noprint=FALSE, m, smooth=FALSE, bass=1,
main="auto", statloc)
{
model <- match.arg(model)
show <- match.arg(show)
if(plot) {
oldpar = par(no.readonly = TRUE)
on.exit(par(oldpar))
}
if(! missing(event))
model<-"cox"
if(model == "cox" & missing(event))
stop('event must be given for model="cox"')
if(show == "prob" & ! missing(adj))
stop('show="prob" cannot be used with adj')
if(show == "prob" & model != "logistic")
stop('show="prob" can only be used with model="logistic"')
if(length(x) != length(y))
stop('x and y must have the same length')
if(! missing(event) && length(event) != length(y))
stop('y and event must have the same length')
if(! missing(adj)) {
if(! is.matrix(adj)) adj <- as.matrix(adj)
if(dim(adj)[1] != length(x))
stop('x and adj must have the same length')
}
if(missing(xlab))
xlab <- label(x)
if(missing(ylab))
ylab <- label(y)
isna <- is.na(x) | is.na(y)
if(! missing(event))
isna <- isna | is.na(event)
nadj <- 0
if(! missing(adj)) {
nadj <- ncol(adj)
isna <- isna | apply(is.na(adj), 1, sum) > 0
}
if(! missing(subset))
isna <- isna | (! subset)
x <- x[! isna]
y <- y[! isna]
if(! missing(event))
event <- event[! isna]
if(! missing(adj))
adj <- adj[! isna, ]
n <- length(x)
if(n<6)
stop('fewer than 6 non-missing observations')
if(missing(xrange)) {
frac<-10./max(n, 200)
xrange<-quantile(x, c(frac, 1.-frac))
}
if(missing(knots))
xx <- rcspline.eval(x, nk=nk)
else xx <- rcspline.eval(x, knots)
knots <- attr(xx, "knots")
nk <- length(knots)
df1 <- nk-2
if(model == "logistic") {
b <- rms::lrm.fit(cbind(x, xx, adj), y)
beta <- b$coef
cov <- b$var
model.lr <- b$stats["Model L.R."]
offset <- 1 #to skip over intercept parameter
ylabl <-
if(show == "prob")
"Probability"
else "log Odds"
sampled <- paste("Logistic Regression Model, n=", n," d=", sum(y), sep="")
}
if(model == "cox") {
if(! existsFunction('coxph.fit'))
coxph.fit <- getFromNamespace('coxph.fit', 'survival')
##11mar04
## added coxph.control around iter.max, eps 11mar04
lllin <- coxph.fit(cbind(x, adj), cbind(y, event), strata=NULL,
offset=NULL, init=NULL,
control=coxph.control(iter.max=10, eps=.0001),
method="efron", rownames=NULL)$loglik[2]
b <- coxph.fit(cbind(x, xx, adj), cbind(y, event), strata=NULL,
offset=NULL, init=NULL,
control=coxph.control(iter.max=10, eps=.0001),
method="efron", rownames=NULL)
beta <- b$coef
if(! noprint) {
print(beta);
print(b$loglik)
}
beta <- b$coef
cov <- b$var
model.lr<-2*(b$loglik[2]-b$loglik[1])
offset <- 0
ylabl <- "log Relative Hazard"
sampled <- paste("Cox Regression Model, n=",n," events=",sum(event),
sep="")
}
if(model == "logistic"|model == "cox") {
model.df <- nk - 1 + nadj
model.aic <- model.lr-2.*model.df
v <- solve(cov[(1 + offset) : (nk + offset - 1), (1 + offset) : (nk + offset - 1)])
assoc.chi <- beta[(1 + offset) : (nk + offset - 1)] %*% v %*%
beta[(1 + offset) : (nk + offset - 1)]
assoc.df <- nk - 1 #attr(v,"rank")
assoc.p <- 1.-pchisq(assoc.chi, nk - 1)
v <- solve(cov[(2 + offset) : (nk + offset - 1), (2 + offset) : (nk + offset - 1)])
linear.chi <- beta[(2 + offset) : (nk + offset - 1)] %*% v %*%
beta[(2 + offset) : (nk + offset - 1)]
linear.df <- nk - 2 #attr(v,"rank")
linear.p <- 1. - pchisq(linear.chi, linear.df)
if(nadj > 0) {
ntot <- offset + nk - 1 + nadj
v <- solve(cov[(nk + offset) : ntot, (nk + offset) : ntot])
adj.chi <- beta[(nk + offset) : ntot] %*% v %*%
beta[(nk + offset) : ntot]
adj.df <- ncol(v) #attr(v,"rank")
adj.p <- 1. - pchisq(adj.chi, adj.df)
} else {
adj.chi <- 0
adj.p <- 0
}
}
## Evaluate xbeta for expanded x at desired range
xe <- seq(xrange[1], xrange[2], length=600)
if(model == "cox")
xx <- rcspline.eval(xe, knots, inclx=TRUE)
else
xx<- cbind(rep(1, length(xe)), rcspline.eval(xe, knots, inclx=TRUE))
xbeta <- xx %*% beta[1 : (nk - 1 + offset)]
var <- drop(((xx %*% cov[1 : (nk - 1 + offset), 1 : (nk - 1 + offset)])*xx) %*%
rep(1, ncol(xx)))
lower <- xbeta - 1.96*sqrt(var)
upper <- xbeta + 1.96*sqrt(var)
if(show == "prob") {
xbeta <- 1./(1. + exp(-xbeta))
lower <- 1./(1. + exp(-lower))
upper <- 1./(1. + exp(-upper))
}
xlim <- range(pretty(xe))
if(missing(ylim))
ylim <- range(pretty(c(xbeta, if(plotcl) lower, if(plotcl) upper)))
if(main == "auto") {
if(show == "xbeta")
main <- "Estimated Spline Transformation"
else main <- "Spline Estimate of Prob{Y=1}"
}
if(! interactive() & missing(statloc))
statloc<-"ll"
if(plot) {
if(! add) {
oldmar<-par("mar")
if(! missing(statloc) && statloc[1] == "ll")
oldmar[1]<- 11
plot(xe, xbeta, type="n", main=main, xlab=xlab, ylab=ylabl,
xlim=xlim, ylim=ylim)
lines(xe, xbeta, lty=lty)
ltext<-function(z, line, label, cex=.8, adj=0)
{
zz<-z
zz$y<-z$y-(line - 1)*1.2*cex*par("csi")*(par("usr")[4]-par("usr")[3])/
(par("fin")[2]) #was 1.85
text(zz, label, cex=cex, adj=adj)
}
sl<-0
if(missing(statloc)) {
message("Click left mouse button at upper left corner for statistics\n")
z<-locator(1)
statloc<-"l"
} else if(statloc[1] != "none") {
if(statloc[1] == "ll") {
z<-list(x=par("usr")[1], y=par("usr")[3])
sl<-3
} else z<-list(x=statloc[1], y=statloc[2])
}
if(statloc[1] != "none" & (model == "logistic" | model == "cox")) {
rnd <- function(x, r=2) as.single(round(x, r))
ltext(z, 1 + sl, sampled)
ltext(z, 2 + sl, " Statistic X2 df")
chistats<-format(as.single(round(c(model.lr, model.aic,
assoc.chi, linear.chi, adj.chi), 2)))
pvals<-format(as.single(round(c(assoc.p, linear.p, adj.p), 4)))
ltext(z, 3 + sl, paste("Model L.R. ", chistats[1], model.df,
" AIC=", chistats[2]))
ltext(z, 4 + sl, paste("Association Wald ", chistats[3], assoc.df,
" p= ", pvals[1]))
ltext(z, 5 + sl, paste("Linearity Wald ", chistats[4], linear.df,
" p= ", pvals[2]))
if(nadj > 0)ltext(z, 6 + sl, paste("Adjustment Wald " , chistats[5],
adj.df, " p= ", pvals[3]))}
} else lines(xe, xbeta, lty=lty)
if(plotcl) {
#prn(cbind(xe, lower, upper))
lines(xe, lower, lty=2)
lines(xe, upper, lty=2)
}
if(showknots) {
bot.arrow <- par("usr")[3]
top.arrow <- bot.arrow + .05 * (par("usr")[4]-par("usr")[3])
for(i in 1 : nk)
arrows(knots[i], top.arrow, knots[i], bot.arrow, length=.1)
}
if(model == "logistic" & nadj == 0) {
if(smooth) {
z<-supsmu(x, y, bass=bass)
if(show == "xbeta") z$y <- logb(z$y/(1.-z$y))
points(z, cex=.4)
}
if(! missing(m)) {
z<-groupn(x, y, m=m)
if(show == "xbeta") z$y <- logb(z$y/(1.-z$y))
points(z, pch=2, mkh=.05)}
}
}
invisible(list(
knots = knots,
x = xe,
xbeta = xbeta,
lower = lower,
upper = upper
))
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/rcspline.plot.noprint.R
|
#' Calibration performance
#'
#' The function \code{val.prob.ci.2} is an adaptation of \code{\link{val.prob}} from Frank Harrell's rms package,
#' \url{https://cran.r-project.org/package=rms}. Hence, the description of some of the functions of \code{val.prob.ci.2}
#' come from the the original \code{\link{val.prob}}.
#' \cr \cr The key feature of \code{val.prob.ci.2} is the generation of logistic and flexible calibration curves and related statistics.
#' When using this code, please cite: Van Calster, B., Nieboer, D., Vergouwe, Y., De Cock, B., Pencina, M.J., Steyerberg,
#' E.W. (2016). A calibration hierarchy for risk models was defined: from utopia to empirical data. \emph{Journal of Clinical Epidemiology},
#' \bold{74}, pp. 167-176
#'
#' @inheritParams rms::val.prob
#' @param smooth \code{"loess"} generates a flexible calibration curve based on \code{\link{loess}},
#' \code{"rcs"} generates a calibration curves based on restricted cubic splines (see \code{\link{rcs}} and
#' \code{\link[Hmisc]{rcspline.plot}}), \code{"none"} suppresses the flexible curve. We recommend to use loess unless N is large,
#' for example N>5000. Default is \code{"loess"}.
#' @param CL.smooth \code{"fill"} shows pointwise 95\% confidence limits for the flexible calibration curve with a gray
#' area between the lower and upper limits, \code{TRUE} shows pointwise 95\% confidence limits for the flexible calibration curve
#' with dashed lines, \code{FALSE} suppresses the confidence limits. Default is \code{"fill"}.
#' @param CL.BT \code{TRUE} uses confidence limits based on 2000 bootstrap samples, \code{FALSE} uses closed form confidence limits.
#' Default is \code{FALSE}.
#' @param nr.knots specifies the number of knots for rcs-based calibration curve. The default as well as the highest allowed value is 5.
#' In case the specified number of knots leads to estimation problems, then the number of knots is automatically reduced to the closest
#' value without estimation problems.
#' @param dostats specifies whether and which performance measures are shown in the figure.
#' \code{TRUE} shows the \code{"abc"} of model performance (Steyerberg et al., 2011): calibration intercept, calibration slope,
#' and c-statistic. \code{TRUE} is default.
#' \code{FALSE} suppresses the presentation of statistics in the figure. A \code{c()} list of specific stats shows the specified
#' stats. The key stats which are also mentioned in this paper are \code{"C (ROC)"} for the c statistic, \code{"Intercept"} for the
#' calibration intercept, \code{"Slope"} for the calibration slope, and \code{"ECI"} for the estimated calibration index
#' (Van Hoorde et al, 2015). The full list of possible statistics is taken from \code{\link{val.prob}}
#' and augmented with the estimated calibration index: \code{"Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq",
#' "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "Brier scaled", "Eavg", "ECI"}. These statistics are always returned by the function.
#' @param xlim,ylim numeric vectors of length 2, giving the x and y coordinates ranges (see \code{\link{plot.window}})
#' @param cex,cex.leg controls the font size of the statistics (\code{cex}) or plot legend (\code{cex.leg}). Default is 0.75
#' @param roundstats specifies the number of decimals to which the statistics are rounded when shown in the plot. Default is 2.
#' @param d0lab,d1lab controls the labels for events and non-events (i.e. outcome y) for the histograms.
#' Defaults are \code{d1lab="1"} for events and \code{d0lab="0"} for non-events.
#' @param cex.d01 controls the size of the labels for events and non-events. Default is 0.7.
#' @param dist.label controls the horizontal position of the labels for events and non-events. Default is 0.04.
#' @param dist.label2 controls the vertical distance between the labels for events and non-events. Default is 0.03.
#' @param line.bins controls the horizontal (y-axis) position of the histograms. Default is -0.05.
#' @param cutoff puts an arrow at the specified risk cut-off(s). Default is none.
#' @param las controls whether y-axis values are shown horizontally (1) or vertically (0).
#' @param length.seg controls the length of the histogram lines. Default is \code{1}.
#' @param ... arguments to be passed to \code{\link{plot}}, see \code{\link{par}}
#' @param y.intersp character interspacing for vertical line distances of the legend (\code{\link{legend}})
#' @param col.ideal controls the color of the ideal line on the plot. Default is \code{"red"}.
#' @param lwd.ideal controls the line width of the ideal line on the plot. Default is \code{1}.
#' @param lty.ideal linetype of the ideal line. Default is \code{1}.
#' @param logistic.cal \code{TRUE} plots the logistic calibration curve, \code{FALSE} suppresses this curve.
#' Default is \code{FALSE}.
#' @param xlab x-axis label, default is \code{"Predicted Probability"}.
#' @param ylab y-axis label, default is \code{"Observed proportion"}.
#' @param statloc the "abc" of model performance (Steyerberg et al., 2011)-calibration intercept, calibration slope,
#' and c statistic-will be added to the plot, using statloc as the upper left corner of a box (default is c(0,.85).
#' You can specify a list or a vector. Use locator(1) for the mouse, \code{FALSE} to suppress statistics. This is plotted after
#' the curve legends.
#' @param pl \code{TRUE} to plot the calibration curve(s). If \code{FALSE} no calibration curves will be plotted,
#' but statistics will still be computed and outputted.
#' @param connect.smooth Defaults to \code{TRUE} to draw smoothed estimates using a line. Set to \code{FALSE} to instead use dots at individual estimates
#' @param legendloc if \code{pl=TRUE}, list with components \code{x,y} or vector \code{c(x,y)} for bottom right corner of legend for
#' curves and points. Default is \code{c(.50, .27)} scaled to lim. Use \code{locator(1)} to use the mouse, \code{FALSE} to suppress legend.
#' @param col.log if \code{logistic.cal=TRUE}, the color of the logistic calibration curve. Default is \code{"black"}.
#' @param lty.log if \code{logistic.cal=TRUE}, the linetype of the logistic calibration curve. Default is \code{1}.
#' @param lwd.log if \code{logistic.cal=TRUE}, the line width of the logistic calibration curve. Default is \code{1}.
#' @param col.smooth the color of the flexible calibration curve. Default is \code{"black"}.
#' @param lty.smooth the linetype of the flexible calibration curve. Default is \code{1}.
#' @param lwd.smooth the line width of the flexible calibration curve. Default is \code{1}.
#' @param allowPerfectPredictions Logical, indicates whether perfect predictions (i.e. values of either 0 or 1) are allowed. Default is \code{FALSE}, since we transform
#' the predictions using the logit transformation to calculate the calibration measures. In case of 0 and 1, this results in minus infinity and infinity, respectively. if
#' \code{allowPerfectPredictions = TRUE}, 0 and 1 are replaced by 1e-8 and 1 - 1e-8, respectively.
#' @param argzLoess a list with arguments passed to the \code{\link{loess}} function
#'
#' @param cl.level if \code{dostats=TRUE}, the confidence level for the calculation of the confidence intervals of the calibration intercept,
#' calibration slope and c-statistic. Default is \code{0.95}.
#' @param method.ci method to calculate the confidence interval of the c-statistic. The argument is passed to \code{\link{auc.nonpara.mw}} from
#' the auRoc-package and possible methods to compute the confidence interval are \code{"newcombe"}, \code{"pepe"}, \code{"delong"} or
#' \code{"jackknife"}. Bootstrap-based methods are not available. The default method is \code{"pepe"} and here, the confidence interval is
#' the logit-transformation-based confidence interval as documented in Qin and Hotilovac (2008). See \code{\link{auc.nonpara.mw}} for
#' more information on the other methods.
#'
#' @return An object of type \code{CalibrationCurve} with the following slots:
#' @return \item{call}{the matched call.}
#' @return \item{stats}{a vector containing performance measures of calibration.}
#' @return \item{cl.level}{the confidence level used.}
#' @return \item{Calibration}{contains the calibration intercept and slope, together with their confidence intervals.}
#' @return \item{Cindex}{the value of the c-statistic, together with its confidence interval.}
#' @return \item{warningMessages}{if any, the warning messages that were printed while running the function.}
#' @return \item{CalibrationCurves}{The coordinates for plotting the calibration curves. }
#'
#' @note In order to make use (of the functions) of the package auRoc, the user needs to install JAGS. However, since our package only uses the
#' \code{auc.nonpara.mw} function which does not depend on the use of JAGS, we therefore copied the code and slightly adjusted it when
#' \code{method="pepe"}.
#'
#' @details When using the predicted probabilities of an uninformative model (i.e. equal probabilities for all observations), the model has no predictive value.
#' Consequently, where applicable, the value of the performance measure corresponds to the worst possible theoretical value. For the ECI, for example, this equals 1 (Edlinger et al., 2022).
#'
#' @references Edlinger, M, van Smeden, M, Alber, HF, Wanitschek, M, Van Calster, B. (2022). Risk prediction models for discrete ordinal outcomes: Calibration and the impact of the proportional odds assumption. \emph{Statistics in Medicine}, \bold{41( 8)}, pp. 1334– 1360
#' @references Qin, G., & Hotilovac, L. (2008). Comparison of non-parametric confidence intervals for the area under the ROC curve of a continuous-scale diagnostic test. \emph{Statistical Methods in Medical Research}, \bold{17(2)}, pp. 207-21
#' @references Steyerberg, E.W., Van Calster, B., Pencina, M.J. (2011). Performance measures for prediction models and markers : evaluation of predictions and classifications. \emph{Revista Espanola de Cardiologia}, \bold{64(9)}, pp. 788-794
#' @references Van Calster, B., Nieboer, D., Vergouwe, Y., De Cock, B., Pencina M., Steyerberg E.W. (2016). A calibration hierarchy for risk models was defined: from utopia to empirical data. \emph{Journal of Clinical Epidemiology}, \bold{74}, pp. 167-176
#' @references Van Hoorde, K., Van Huffel, S., Timmerman, D., Bourne, T., Van Calster, B. (2015). A spline-based tool to assess and visualize the calibration of multiclass risk predictions. \emph{Journal of Biomedical Informatics}, \bold{54}, pp. 283-93
#'
#' @importFrom Hmisc cut2
#'
#' @examples
#'
#' # Load package
#' library(CalibrationCurves)
#' set.seed(1783)
#'
#' # Simulate training data
#' X = replicate(4, rnorm(5e2))
#' p0true = binomial()$linkinv(cbind(1, X) %*% c(0.1, 0.5, 1.2, -0.75, 0.8))
#' y = rbinom(5e2, 1, p0true)
#' Df = data.frame(y, X)
#'
#' # Fit logistic model
#' FitLog = lrm(y ~ ., Df)
#'
#' # Simulate validation data
#' Xval = replicate(4, rnorm(5e2))
#' p0true = binomial()$linkinv(cbind(1, Xval) %*% c(0.1, 0.5, 1.2, -0.75, 0.8))
#' yval = rbinom(5e2, 1, p0true)
#' Pred = binomial()$linkinv(cbind(1, Xval) %*% coef(FitLog))
#'
#' # Default calibration plot
#' val.prob.ci.2(Pred, yval)
#'
#' # Adding logistic calibration curves and other additional features
#' val.prob.ci.2(Pred, yval, CL.smooth = TRUE, logistic.cal = TRUE, lty.log = 2,
#' col.log = "red", lwd.log = 1.5)
#'
#' val.prob.ci.2(Pred, yval, CL.smooth = TRUE, logistic.cal = TRUE, lty.log = 9,
#' col.log = "red", lwd.log = 1.5, col.ideal = colors()[10], lwd.ideal = 0.5)
val.prob.ci.2 <- function(p, y, logit, group,
weights = rep(1, length(y)), normwt = FALSE, pl = TRUE,
smooth = c("loess", "rcs", "none"), CL.smooth = "fill",
CL.BT = FALSE, lty.smooth = 1, col.smooth = "black", lwd.smooth = 1,
nr.knots = 5, logistic.cal = FALSE, lty.log = 1,
col.log = "black", lwd.log = 1, xlab = "Predicted probability", ylab = "Observed proportion",
xlim = c(-0.02, 1), ylim = c(-0.15, 1), m, g, cuts, emax.lim = c(0, 1),
legendloc = c(0.50 , 0.27), statloc = c(0, .85), dostats = TRUE, cl.level = 0.95, method.ci = "pepe",
roundstats = 2, riskdist = "predicted", cex = 0.75, cex.leg = 0.75, connect.group = FALSE, connect.smooth = TRUE,
g.group = 4, evaluate = 100, nmin = 0, d0lab = "0", d1lab = "1", cex.d01 = 0.7,
dist.label = 0.04, line.bins = -.05, dist.label2 = .03, cutoff, las = 1, length.seg = 1,
y.intersp = 1, lty.ideal = 1, col.ideal = "red", lwd.ideal = 1, allowPerfectPredictions = FALSE,
argzLoess = alist(degree = 2), ...)
{
call = match.call()
oldpar = par(no.readonly = TRUE)
on.exit(par(oldpar))
smooth <- match.arg(smooth)
if (smooth == "none") {
smooth <- "F"
}
if (!missing(p))
if(allowPerfectPredictions & any(!(p > 0 | p < 1)))
stop("Probabilities can not be > 1 or < 0.")
else if (any(!(p >= 0 | p <= 1)))
stop("Probabilities can not be >= 1 or <= 0.")
if(allowPerfectPredictions) {
if(all(p %in% 0:1))
stop("All predicted values are equal to 0 or 1, implying that the underlying process is deterministic. Please check your model or the input.")
if(any(p %in% c(0, 1))) {
p = sapply(p, function(x) {
if(is.na(x) | is.nan(x))
x
else if(x == 0)
1e-8
else if(x == 1)
1 - 1e-8
else
x
})
wmess = paste0("There are predictions with value 0 or 1! These are replaced by values 1e-8 and 1 - 1e-8, respectively. ",
"Take this into account when interpreting the performance measures, as these are not calculated with the original values.",
"\n\nPlease check your model, as this may be an indication of overfitting. Predictions of 0 or 1 imply that these predicted values are deterministic.\n\n",
"We observe this in the following cases:\n - logistic regression: with quasi-complete separation, the coefficients tend to infinity;\n",
" - tree-based methods: one of the leaf nodes contains only observations with either 0 or 1;\n",
" - neural networks: the weights tend to infinity and this is known as weight/gradient explosion.")
warning(wmess, immediate. = TRUE)
} else {
wmess = NULL
}
} else {
wmess = NULL
}
a = 1 - cl.level
if (missing(p))
p <- 1 / (1 + exp(-logit))
else
logit <- log(p / (1 - p))
if (!all(y %in% 0:1)) {
stop("The vector with the binary outcome can only contain the values 0 and 1.")
}
if (length(p) != length(y))
stop("lengths of p or logit and y do not agree")
names(p) <- names(y) <- names(logit) <- NULL
if (!missing(group)) {
if (length(group) == 1 && is.logical(group) && group)
group <- rep("", length(y))
if (!is.factor(group))
group <-
if (is.logical(group) || is.character(group))
as.factor(group)
else
cut2(group, g = g.group)
names(group) <- NULL
nma <- !(is.na(p + y + weights) | is.na(group))
ng <- length(levels(group))
} else {
nma <- !is.na(p + y + weights)
ng <- 0
}
if(any(nma == FALSE)) {
tmpmess = "There are observations with missing values. These are removed."
warning(tmpmess, immediate. = TRUE)
wmess = c(wmess, tmpmess)
}
if (!is.numeric(nr.knots)) {
stop("Nr.knots must be numeric.")
}
logit <- logit[nma]
y <- y[nma]
p <- p[nma]
if(ng > 0) {
group <- group[nma]
weights <- weights[nma]
return(val.probg(p, y, group, evaluate, weights, normwt, nmin)
)
}
# Sort vector with probabilities
y <- y[order(p)]
logit <- logit[order(p)]
p <- p[order(p)]
if (length(p) > 5000 & smooth == "loess") {
warning("Number of observations > 5000, RCS is recommended.",
immediate. = TRUE)
}
if (length(p) > 1000 & CL.BT == TRUE) {
warning("Number of observations is > 1000, this could take a while...",
immediate. = TRUE)
}
if(length(unique(p)) == 1) {
# Adjusted 2022-09-26
P <- mean(y)
Intc <- log(P/(1 - P))
n <- length(y)
D <- -1/n
L01 <- -2 * sum(y * logit - log(1 + exp(logit)), na.rm = TRUE)
L.cal <- -2 * sum(y * Intc - log(1 + exp(Intc)), na.rm = TRUE)
U.chisq <- L01 - L.cal
U.p <- 1 - pchisq(U.chisq, 1)
U <- (U.chisq - 1)/n
Q <- D - U
cl.auc <- ci.auc(y, p, cl.level, method.ci)
stats <- c(0, 0.5, 0, D, 0, 1, U, U.chisq, U.p, Q, mean((y - p[1])^2), Intc, 0, rep(abs(p[1] - P), 2), 1)
names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq",
"D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier",
"Intercept", "Slope", "Emax", "Eavg", "ECI")
Results =
structure(
list(
call = call,
stats = stats,
cl.level = cl.level,
Calibration = list(
Intercept = c("Point estimate" = unname(stats["Intercept"]),
"Lower confidence limit" = NA,
"Upper confidence limit" = NA),
Slope = c("Point estimate" = unname(stats["Slope"]),
"Lower confidence limit" = NA,
"Upper confidence limit" = NA)
),
Cindex = c("Point estimate" = unname(stats["C (ROC)"]),
"Lower confidence limit" = cl.auc[2],
"Upper confidence limit" = cl.auc[3])
), class = "CalibrationCurve"
)
return(Results)
}
i <- !is.infinite(logit)
nm <- sum(!i)
if(nm > 0)
warning(paste(nm, "observations deleted from logistic calibration due to probs. of 0 or 1"))
i.2 <- i
f.or <- glm(y[i] ~ logit[i], family = binomial) # lrm(y[i] ~ logit[i])
f <- lrm.fit(logit[i], y[i])
cl.slope <- confint(f, level = cl.level)[2, ]
f2 <- lrm.fit(offset = logit[i], y = y[i])
if(f2$fail){
warning("The lrm function did not converge when computing the calibration intercept!",immediate.=TRUE)
f2 <- list()
f2$coef <- NA
cl.interc <- rep(NA,2)
}else{
cl.interc <- confint(f2, level = cl.level)
}
stats <- f$stats
cl.auc <- ci.auc(y, p, cl.level, method.ci)
n <- stats["Obs"]
predprob <- seq(emax.lim[1], emax.lim[2], by = 0.0005)
lt <- f$coef[1] + f$coef[2] * log(predprob/(1 - predprob))
calp <- 1/(1 + exp( - lt))
emax <- max(abs(predprob - calp))
if (pl) {
plot(0.5, 0.5, xlim = xlim, ylim = ylim, type = "n", xlab = xlab,
ylab = ylab, las=las,...)
clip(0,1,0,1)
abline(0, 1, lty = lty.ideal,col=col.ideal,lwd=lwd.ideal)
do.call("clip", as.list(par()$usr))
calCurves = list()
lt <- lty.ideal
lw.d <- lwd.ideal
all.col <- col.ideal
leg <- "Ideal"
marks <- -1
if (logistic.cal) {
lt <- c(lt, lty.log)
lw.d <- c(lw.d, lwd.log)
all.col <- c(all.col, col.log)
leg <- c(leg, "Logistic calibration")
marks <- c(marks,-1)
}
if (smooth != "F") {
all.col <- c(all.col, col.smooth)
}
if (smooth == "loess") {
#Sm <- lowess(p,y,iter=0)
argzLoess$formula = y ~ p
Sm <- do.call("loess", argzLoess)
Sm <- data.frame(Sm$x, Sm$fitted)
Sm.01 <- Sm
if (connect.smooth == TRUE & CL.smooth != "fill") {
clip(0, 1, 0, 1)
lines(Sm,
lty = lty.smooth,
lwd = lwd.smooth,
col = col.smooth)
do.call("clip", as.list(par()$usr))
lt <- c(lt, lty.smooth)
lw.d <- c(lw.d, lwd.smooth)
marks <- c(marks,-1)
} else if (connect.smooth == FALSE & CL.smooth != "fill") {
clip(0, 1, 0, 1)
points(Sm, col = col.smooth)
do.call("clip", as.list(par()$usr))
lt <- c(lt, 0)
lw.d <- c(lw.d, 1)
marks <- c(marks, 1)
}
if (CL.smooth == TRUE | CL.smooth == "fill") {
to.pred <- seq(min(p), max(p), length = 200)
if (CL.BT == TRUE) {
res.BT = replicate(2000, BT.samples(y, p, to.pred))
CL.BT = apply(res.BT, 1, quantile, c(0.025, 0.975))
colnames(CL.BT) = to.pred
dfCL = data.frame(x = to.pred, y = apply(res.BT, 1, quantile, 0.5), ymin = CL.BT[1, ], ymax = CL.BT[2, ])
rownames(dfCL) = NULL
if (CL.smooth == "fill") {
clip(0, 1, 0, 1)
polygon(
x = c(to.pred, rev(to.pred)),
y = c(CL.BT[2, ],
rev(CL.BT[1, ])),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
if (connect.smooth == T) {
lines(Sm,
lty = lty.smooth,
lwd = lwd.smooth,
col = col.smooth)
lt <- c(lt, lty.smooth)
lw.d <- c(lw.d, lwd.smooth)
marks <- c(marks,-1)
} else if (connect.smooth == FALSE) {
points(Sm, col = col.smooth)
lt <- c(lt, 0)
lw.d <- c(lw.d, 1)
marks <- c(marks, 1)
}
do.call("clip", as.list(par()$usr))
leg <- c(leg, "Flexible calibration (Loess)")
} else{
clip(0, 1, 0, 1)
lines(to.pred,
CL.BT[1, ],
lty = 2,
lwd = 1,
col = col.smooth)
clip(0, 1, 0, 1)
lines(to.pred,
CL.BT[2, ],
lty = 2,
lwd = 1,
col = col.smooth)
do.call("clip", as.list(par()$usr))
leg <-
c(leg, "Flexible calibration (Loess)", "CL flexible")
lt <- c(lt, 2)
lw.d <- c(lw.d, 1)
all.col <- c(all.col, col.smooth)
marks <- c(marks, -1)
}
} else{
Sm.0 = loess(y ~ p, degree = 2)
cl.loess = predict(Sm.0, type = "fitted", se = TRUE)
dfCL = data.frame(x = p, ymin = with(cl.loess, fit - qnorm(1 - a / 2) * se.fit), ymax = with(cl.loess, fit + qnorm(1 - a / 2) * se.fit))
clip(0, 1, 0, 1)
if (CL.smooth == "fill") {
polygon(
x = c(Sm.0$x, rev(Sm.0$x)),
y = c(
dfCL$ymax,
rev(dfCL$ymin)
),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
if (connect.smooth == TRUE) {
lines(Sm,
lty = lty.smooth,
lwd = lwd.smooth,
col = col.smooth)
lt <- c(lt, lty.smooth)
lw.d <- c(lw.d, lwd.smooth)
marks <- c(marks,-1)
} else if (connect.smooth == FALSE) {
points(Sm, col = col.smooth)
lt <- c(lt, 0)
lw.d <- c(lw.d, 1)
marks <- c(marks, 1)
}
do.call("clip", as.list(par()$usr))
leg <- c(leg, "Flexible calibration (Loess)")
} else{
lines(
Sm.0$x,
dfCL$ymax,
lty = 2,
lwd = 1,
col = col.smooth
)
lines(
Sm.0$x,
dfCL$ymin,
lty = 2,
lwd = 1,
col = col.smooth
)
do.call("clip", as.list(par()$usr))
leg <-
c(leg, "Flexible calibration (Loess)", "CL flexible")
lt <- c(lt, 2)
lw.d <- c(lw.d, 1)
all.col <- c(all.col, col.smooth)
marks <- c(marks, -1)
}
}
dfCL[dfCL$ymax < 0, "ymax"] <- dfCL[dfCL$ymin < 0, "ymin"] <- 0
dfCL[dfCL$ymax > 1, "ymax"] <- dfCL[dfCL$ymin > 1, "ymin"] <- 1
} else{
leg <- c(leg, "Flexible calibration (Loess)")
}
cal.smooth <- approx(Sm.01, xout = p, ties = "ordered")$y
eavg <- mean(abs(p - cal.smooth))
ECI <- mean((p - cal.smooth) ^ 2) * 100
if(any(Sm$y < 0)) {
sel = which(Sm$y < 0)
sel = c(sel[length(sel)], sel[length(sel)] + 1)
tmp = Sm[sel, ]
Sm = Sm[Sm$y >= 0 & Sm$y <= 1, ]
Sm = rbind.data.frame(
data.frame(x = predict(lm(x ~ y, data = tmp), data.frame(y = 0)), y = 0),
Sm
)
}
colnames(Sm) = c("x", "y")
if(exists("dfCL", envir = environment())) {
flexCal = if("CL.BT" %in% names(call) && call$CL.BT) list(loessFit = Sm, BootstrapConfidenceLimits = dfCL) else merge(Sm, dfCL, by = "x")
} else {
flexCal = Sm
}
calCurves$FlexibleCalibration = flexCal
}
if (smooth == "rcs") {
par(lwd = lwd.smooth, bty = "n", col = col.smooth)
argzRCS = alist(x = p,
y = y,
model = "logistic",
nk = nr.knots,
show = "prob",
statloc = "none",
plot = TRUE,
add = TRUE,
showknots = FALSE,
xrange = c(min(na.omit(p)), max(na.omit(p))),
lty = lty.smooth)
nkDecrease <- function(Argz) {
tryCatch(
do.call(".rcspline.plot", Argz),
error = function(e) {
nk = eval(Argz$nk)
warning(paste0("The number of knots led to estimation problems, nk will be set to ", nk - 1), immediate. = TRUE)
if(nk < 3)
stop("Nk = 3 led to estimation problems.")
Argz$nk = nk - 1
nkDecrease(Argz)
}
)
}
rcsFit = nkDecrease(argzRCS)
rcsDf = as.data.frame(rcsFit)
calCurves$RCS = rcsDf
par(lwd = 1, bty = "o", col = "black")
leg <- c(leg, "Flexible calibration (RCS)", "CL flexible")
lt <- c(lt, lty.smooth, 2)
lw.d <- c(lw.d, rep(lwd.smooth, 2))
all.col <- c(all.col, col.smooth)
marks <- c(marks, -1, -1)
}
if (!missing(m) | !missing(g) | !missing(cuts)) {
if (!missing(m))
q <- cut2(p,
m = m,
levels.mean = TRUE,
digits = 7)
else if (!missing(g))
q <- cut2(p,
g = g,
levels.mean = TRUE,
digits = 7)
else if (!missing(cuts))
q <- cut2(p,
cuts = cuts,
levels.mean = TRUE,
digits = 7)
means <- as.single(levels(q))
prop <- tapply(y, q, function(x)
mean(x, na.rm = TRUE))
points(means, prop, pch = 2, cex = 1)
#18.11.02: CI triangles
ng <- tapply(y, q, length)
og <- tapply(y, q, sum)
ob <- og / ng
se.ob <- sqrt(ob * (1 - ob) / ng)
g <- length(as.single(levels(q)))
for (i in 1:g)
lines(c(means[i], means[i]), c(prop[i], min(1, prop[i] + 1.96 * se.ob[i])), type =
"l")
for (i in 1:g)
lines(c(means[i], means[i]), c(prop[i], max(0, prop[i] - 1.96 * se.ob[i])), type =
"l")
if (connect.group) {
lines(means, prop)
lt <- c(lt, 1)
lw.d <- c(lw.d, 1)
}
else {
lt <- c(lt, 0)
lw.d <- c(lw.d, 0)
}
leg <- c(leg, "Grouped observations")
all.col <- c(all.col, col.smooth)
marks <- c(marks, 2)
}
}
lr <- stats["Model L.R."]
p.lr <- stats["P"]
D <- (lr - 1) / n
L01 <- -2 * sum(y * logit - logb(1 + exp(logit)), na.rm = TRUE)
U.chisq <- L01 - f$deviance[2]
p.U <- 1 - pchisq(U.chisq, 2)
U <- (U.chisq - 2) / n
Q <- D - U
Dxy <- stats["Dxy"]
C <- stats["C"]
R2 <- stats["R2"]
B <- sum((p - y) ^ 2) / n
# ES 15dec08 add Brier scaled
Bmax <- mean(y) * (1 - mean(y)) ^ 2 + (1 - mean(y)) * mean(y) ^ 2
Bscaled <- 1 - B / Bmax
stats <- c(Dxy,
C,
R2,
D,
lr,
p.lr,
U,
U.chisq,
p.U,
Q,
B,
f2$coef[1],
f$coef[2],
emax,
Bscaled)
names(stats) <- c(
"Dxy",
"C (ROC)",
"R2",
"D",
"D:Chi-sq",
"D:p",
"U",
"U:Chi-sq",
"U:p",
"Q",
"Brier",
"Intercept",
"Slope",
"Emax",
"Brier scaled"
)
if (smooth == "loess")
stats <- c(stats, c(Eavg = eavg), c(ECI = ECI))
# Cut off definition
if(!missing(cutoff)) {
arrows(x0=cutoff,y0=.1,x1=cutoff,y1=-0.025,length=.15)
}
if(pl) {
if (min(p) > plogis(-7) | max(p) < plogis(7)) {
lrm.fit.1 = lrm(y[i.2] ~ qlogis(p[i.2]))
logCal = data.frame(x = p[i.2], y = plogis(lrm.fit.1$linear.predictors))
if(logistic.cal) {
lines(
p[i.2],
plogis(lrm.fit.1$linear.predictors),
lwd = lwd.log,
lty = lty.log,
col = col.log
)
calCurves$LogisticCalibration = logCal
}
} else {
logit = seq(-7, 7, length = 200)
prob = 1 / (1 + exp(-logit))
pHat = binomial()$linkinv(cbind(1, logit) %*% coef(f))
logCal = data.frame(x = prob, y = pHat)
if (logistic.cal) {
lines(prob,
pHat,
lty = lty.log,
lwd = lwd.log,
col = col.log)
calCurves$LogisticCalibration = logCal
}
}
lp <- legendloc
if (!is.logical(lp)) {
if (!is.list(lp))
lp <- list(x = lp[1], y = lp[2])
legend(lp, leg, lty = lt, pch = marks, cex = cex.leg, bty = "n",lwd=lw.d,
col=all.col,y.intersp = y.intersp)
}
if(!is.logical(statloc)) {
if(dostats[1] == TRUE){
stats.2 <- paste('Calibration\n',
'...intercept: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["Intercept"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[1]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[2]), ")", '\n',
'...slope: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["Slope"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[1]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[2]), ")", '\n',
'Discrimination\n',
'...c-statistic: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["C (ROC)"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[2]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[3]), ")"
, sep = '')
text(statloc[1], statloc[2], stats.2, pos = 4, cex = cex)
} else {
dostats <- dostats
leg <- format(names(stats)[dostats]) #constant length
leg <- paste(leg, ":", format(stats[dostats], digits=roundstats), sep =
"")
if(!is.list(statloc))
statloc <- list(x = statloc[1], y = statloc[2])
text(statloc, paste(format(names(stats[dostats])),
collapse = "\n"), adj = 0, cex = cex)
text(statloc$x + (xlim[2]-xlim[1])/3 , statloc$y, paste(
format(round(stats[dostats], digits=roundstats)), collapse =
"\n"), adj = 1, cex = cex)
}
}
if(is.character(riskdist)) {
if (riskdist == "calibrated") {
x <- f$coef[1] + f$coef[2] * log(p / (1 - p))
x <- 1 / (1 + exp(-x))
x[p == 0] <- 0
x[p == 1] <- 1
}
else
x <- p
bins <- seq(0, min(1, max(xlim)), length = 101)
x <- x[x >= 0 & x <= 1]
#08.04.01,yvon: distribution of predicted prob according to outcome
f0 <- table(cut(x[y == 0], bins))
f1 <- table(cut(x[y == 1], bins))
j0 <- f0 > 0
j1 <- f1 > 0
bins0 <- (bins[-101])[j0]
bins1 <- (bins[-101])[j1]
f0 <- f0[j0]
f1 <- f1[j1]
maxf <- max(f0, f1)
f0 <- (0.1 * f0) / maxf
f1 <- (0.1 * f1) / maxf
segments(bins1, line.bins, bins1, length.seg * f1 + line.bins)
segments(bins0, line.bins, bins0, length.seg * -f0 + line.bins)
lines(c(min(bins0, bins1) - 0.01, max(bins0, bins1) + 0.01), c(line.bins, line.bins))
text(max(bins0, bins1) + dist.label,
line.bins + dist.label2,
d1lab,
cex = cex.d01)
text(max(bins0, bins1) + dist.label,
line.bins - dist.label2,
d0lab,
cex = cex.d01)
}
}
Results =
structure(
list(
call = call,
stats = stats,
cl.level = cl.level,
Calibration = list(
Intercept = c("Point estimate" = unname(stats["Intercept"]),
"Lower confidence limit" = cl.interc[1],
"Upper confidence limit" = cl.interc[2]),
Slope = c("Point estimate" = unname(stats["Slope"]),
"Lower confidence limit" = cl.slope[1],
"Upper confidence limit" = cl.slope[2])
),
Cindex = c("Point estimate" = unname(stats["C (ROC)"]),
"Lower confidence limit" = cl.auc[2],
"Upper confidence limit" = cl.auc[3]),
warningMessages = wmess,
CalibrationCurves = calCurves
), class = "CalibrationCurve"
)
return(Results)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/val.prob.ci.2.R
|
#' Calibration performance: ggplot version
#'
#' The function \code{valProbggplot} is an adaptation of \code{\link{val.prob}} from Frank Harrell's rms package,
#' \url{https://cran.r-project.org/package=rms}. Hence, the description of some of the functions of \code{valProbggplot}
#' come from the the original \code{\link{val.prob}}.
#' \cr \cr The key feature of \code{valProbggplot} is the generation of logistic and flexible calibration curves and related statistics.
#' When using this code, please cite: Van Calster, B., Nieboer, D., Vergouwe, Y., De Cock, B., Pencina, M.J., Steyerberg,
#' E.W. (2016). A calibration hierarchy for risk models was defined: from utopia to empirical data. \emph{Journal of Clinical Epidemiology},
#' \bold{74}, pp. 167-176
#'
#' @inheritParams rms::val.prob
#' @param smooth \code{"loess"} generates a flexible calibration curve based on \code{\link{loess}},
#' \code{"rcs"} generates a calibration curves based on restricted cubic splines (see \code{\link{rcs}} and
#' \code{\link[Hmisc]{rcspline.plot}}), \code{"none"} suppresses the flexible curve. We recommend to use loess unless N is large,
#' for example N>5000. Default is \code{"loess"}.
#' @param CL.smooth \code{"fill"} shows pointwise 95\% confidence limits for the flexible calibration curve with a gray
#' area between the lower and upper limits, \code{TRUE} shows pointwise 95\% confidence limits for the flexible calibration curve
#' with dashed lines, \code{FALSE} suppresses the confidence limits. Default is \code{"fill"}.
#' @param CL.BT \code{TRUE} uses confidence limits based on 2000 bootstrap samples, \code{FALSE} uses closed form confidence limits.
#' Default is \code{FALSE}.
#' @param nr.knots specifies the number of knots for rcs-based calibration curve. The default as well as the highest allowed value is 5.
#' In case the specified number of knots leads to estimation problems, then the number of knots is automatically reduced to the closest
#' value without estimation problems.
#' @param dostats specifies whether and which performance measures are shown in the figure.
#' \code{TRUE} shows the \code{"abc"} of model performance (Steyerberg et al., 2011): calibration intercept, calibration slope,
#' and c-statistic. \code{TRUE} is default.
#' \code{FALSE} suppresses the presentation of statistics in the figure. A \code{c()} list of specific stats shows the specified
#' stats. The key stats which are also mentioned in this paper are \code{"C (ROC)"} for the c statistic, \code{"Intercept"} for the
#' calibration intercept, \code{"Slope"} for the calibration slope, and \code{"ECI"} for the estimated calibration index
#' (Van Hoorde et al, 2015). The full list of possible statistics is taken from \code{\link{val.prob}}
#' and augmented with the estimated calibration index: \code{"Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq",
#' "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "Brier scaled", "Eavg", "ECI"}. These statistics are always returned by the function.
#' @param xlim,ylim numeric vectors of length 2, giving the x and y coordinates ranges (see \code{\link{xlim}} and \code{\link{ylim}}).
#' @param size,size.leg controls the font size of the statistics (\code{size}) or plot legend (\code{size.leg}). Default is 3 and 5, respectively.
#' @param roundstats specifies the number of decimals to which the statistics are rounded when shown in the plot. Default is 2.
#' @param d0lab,d1lab controls the labels for events and non-events (i.e. outcome y) for the histograms.
#' Defaults are \code{d1lab="1"} for events and \code{d0lab="0"} for non-events.
#' @param size.d01 controls the size of the labels for events and non-events. Default is 5.
#' @param dist.label controls the horizontal position of the labels for events and non-events. Default is 0.01.
#' @param dist.label2 controls the vertical distance between the labels for events and non-events. Default is 0.03.
#' @param line.bins controls the horizontal (y-axis) position of the histograms. Default is -0.05.
#' @param cutoff puts an arrow at the specified risk cut-off(s). Default is none.
#' @param length.seg controls the length of the histogram lines. Default is \code{0.85}.
#' @param col.ideal controls the color of the ideal line on the plot. Default is \code{"red"}.
#' @param lwd.ideal controls the line width of the ideal line on the plot. Default is \code{1}.
#' @param lty.ideal linetype of the ideal line. Default is \code{1}.
#' @param logistic.cal \code{TRUE} plots the logistic calibration curve, \code{FALSE} suppresses this curve. Default is \code{FALSE}.
#' @param xlab x-axis label, default is \code{"Predicted Probability"}.
#' @param ylab y-axis label, default is \code{"Observed proportion"}.
#' @param statloc the "abc" of model performance (Steyerberg et al., 2011)-calibration intercept, calibration slope,
#' and c statistic-will be added to the plot, using statloc as the upper left corner of a box (default is c(0,.85).
#' You can specify a list or a vector. Use locator(1) for the mouse, \code{FALSE} to suppress statistics. This is plotted after
#' the curve legends.
#' @param pl \code{TRUE} to plot the calibration curve(s). If \code{FALSE} no calibration curves will be plotted,
#' but statistics will still be computed and outputted.
#' @param connect.smooth Defaults to \code{TRUE} to draw smoothed estimates using a line. Set to \code{FALSE} to instead use dots at individual estimates
#' @param legendloc if \code{pl=TRUE}, list with components \code{x,y} or vector \code{c(x,y)} for bottom right corner of legend for
#' curves and points. Default is \code{c(.50, .27)} scaled to lim. Use \code{locator(1)} to use the mouse, \code{FALSE} to suppress legend.
#' @param col.log if \code{logistic.cal=TRUE}, the color of the logistic calibration curve. Default is \code{"black"}.
#' @param lty.log if \code{logistic.cal=TRUE}, the linetype of the logistic calibration curve. Default is \code{1}.
#' @param lwd.log if \code{logistic.cal=TRUE}, the line width of the logistic calibration curve. Default is \code{1}.
#' @param col.smooth the color of the flexible calibration curve. Default is \code{"black"}.
#' @param lty.smooth the linetype of the flexible calibration curve. Default is \code{1}.
#' @param lwd.smooth the line width of the flexible calibration curve. Default is \code{1}.
#' @param cl.level if \code{dostats=TRUE}, the confidence level for the calculation of the confidence intervals of the calibration intercept,
#' calibration slope and c-statistic. Default is \code{0.95}.
#' @param method.ci method to calculate the confidence interval of the c-statistic. The argument is passed to \code{\link{auc.nonpara.mw}} from
#' the auRoc-package and possible methods to compute the confidence interval are \code{"newcombe"}, \code{"pepe"}, \code{"delong"} or
#' \code{"jackknife"}. Bootstrap-based methods are not available. The default method is \code{"pepe"} and here, the confidence interval is
#' the logit-transformation-based confidence interval as documented in Qin and Hotilovac (2008). See \code{\link{auc.nonpara.mw}} for
#' more information on the other methods.
#' @param allowPerfectPredictions Logical, indicates whether perfect predictions (i.e. values of either 0 or 1) are allowed. Default is \code{FALSE}, since we transform
#' the predictions using the logit transformation to calculate the calibration measures. In case of 0 and 1, this results in minus infinity and infinity, respectively. if
#' \code{allowPerfectPredictions = TRUE}, 0 and 1 are replaced by 1e-8 and 1 - 1e-8, respectively.
#' @param argzLoess a list with arguments passed to the \code{\link{loess}} function
#'
#' @return An object of type \code{ggplotCalibrationCurve} with the following slots:
#' @return \item{call}{the matched call.}
#' @return \item{ggPlot}{the ggplot object.}
#' @return \item{stats}{a vector containing performance measures of calibration.}
#' @return \item{cl.level}{the confidence level used.}
#' @return \item{Calibration}{contains the calibration intercept and slope, together with their confidence intervals.}
#' @return \item{Cindex}{the value of the c-statistic, together with its confidence interval.}
#' @return \item{warningMessages}{if any, the warning messages that were printed while running the function.}
#' @return \item{CalibrationCurves}{The coordinates for plotting the calibration curves. }
#'
#' @note In order to make use (of the functions) of the package auRoc, the user needs to install JAGS. However, since our package only uses the
#' \code{auc.nonpara.mw} function which does not depend on the use of JAGS, we therefore copied the code and slightly adjusted it when
#' \code{method="pepe"}.
#'
#' @details When using the predicted probabilities of an uninformative model (i.e. equal probabilities for all observations), the model has no predictive value.
#' Consequently, where applicable, the value of the performance measure corresponds to the worst possible theoretical value. For the ECI, for example, this equals 1 (Edlinger et al., 2022).
#'
#' @references Edlinger, M, van Smeden, M, Alber, HF, Wanitschek, M, Van Calster, B. (2022). Risk prediction models for discrete ordinal outcomes: Calibration and the impact of the proportional odds assumption. \emph{Statistics in Medicine}, \bold{41( 8)}, pp. 1334– 1360
#' @references Qin, G., & Hotilovac, L. (2008). Comparison of non-parametric confidence intervals for the area under the ROC curve of a continuous-scale diagnostic test. \emph{Statistical Methods in Medical Research}, \bold{17(2)}, pp. 207-21
#' @references Steyerberg, E.W., Van Calster, B., Pencina, M.J. (2011). Performance measures for prediction models and markers : evaluation of predictions and classifications. \emph{Revista Espanola de Cardiologia}, \bold{64(9)}, pp. 788-794
#' @references Van Calster, B., Nieboer, D., Vergouwe, Y., De Cock, B., Pencina M., Steyerberg E.W. (2016). A calibration hierarchy for risk models was defined: from utopia to empirical data. \emph{Journal of Clinical Epidemiology}, \bold{74}, pp. 167-176
#' @references Van Hoorde, K., Van Huffel, S., Timmerman, D., Bourne, T., Van Calster, B. (2015). A spline-based tool to assess and visualize the calibration of multiclass risk predictions. \emph{Journal of Biomedical Informatics}, \bold{54}, pp. 283-93
#'
#' @importFrom Hmisc cut2
#' @import ggplot2
#'
#' @examples
#'
#' # Load package
#' library(CalibrationCurves)
#' set.seed(1783)
#'
#' # Simulate training data
#' X = replicate(4, rnorm(5e2))
#' p0true = binomial()$linkinv(cbind(1, X) %*% c(0.1, 0.5, 1.2, -0.75, 0.8))
#' y = rbinom(5e2, 1, p0true)
#' Df = data.frame(y, X)
#'
#' # Fit logistic model
#' FitLog = lrm(y ~ ., Df)
#'
#' # Simulate validation data
#' Xval = replicate(4, rnorm(5e2))
#' p0true = binomial()$linkinv(cbind(1, Xval) %*% c(0.1, 0.5, 1.2, -0.75, 0.8))
#' yval = rbinom(5e2, 1, p0true)
#' Pred = binomial()$linkinv(cbind(1, Xval) %*% coef(FitLog))
#'
#' # Default calibration plot
#' valProbggplot(Pred, yval)
#'
#' # Adding logistic calibration curves and other additional features
#' valProbggplot(Pred, yval, CL.smooth = TRUE, logistic.cal = TRUE, lty.log = 2,
#' col.log = "red", lwd.log = 1.5)
#'
#' valProbggplot(Pred, yval, CL.smooth = TRUE, logistic.cal = TRUE, lty.log = 9,
#' col.log = "red", lwd.log = 1.5, col.ideal = colors()[10], lwd.ideal = 0.5)
valProbggplot <- function(p, y, logit, group,
weights = rep(1, length(y)), normwt = FALSE, pl = TRUE,
smooth = c("loess", "rcs", "none"), CL.smooth = "fill",
CL.BT = FALSE, lty.smooth = 1, col.smooth = "black", lwd.smooth = 1,
nr.knots = 5, logistic.cal = FALSE, lty.log = 1,
col.log = "black", lwd.log = 1, xlab = "Predicted probability", ylab = "Observed proportion",
xlim = c(-0.02, 1), ylim = c(-0.15, 1), m, g, cuts, emax.lim = c(0, 1),
legendloc = c(0.50 , 0.27), statloc = c(0, .85), dostats = TRUE, cl.level = 0.95, method.ci = "pepe",
roundstats = 2, riskdist = "predicted", size = 3, size.leg = 5, connect.group = FALSE, connect.smooth = TRUE,
g.group = 4, evaluate = 100, nmin = 0, d0lab = "0", d1lab = "1", size.d01 = 5,
dist.label = 0.01, line.bins = -.05, dist.label2 = .04, cutoff, length.seg = 0.85,
lty.ideal = 1, col.ideal = "red", lwd.ideal = 1, allowPerfectPredictions = FALSE, argzLoess = alist(degree = 2))
{
call = match.call()
smooth = match.arg(smooth)
if (smooth == "none")
smooth <- "F"
if(!is.logical(connect.smooth))
stop("Argument connect.smooth has to be of type logical.")
if (!is.numeric(nr.knots))
stop("Nr.knots must be numeric.")
if(nr.knots > 5 | nr.knots < 3)
stop(paste(
"Number of knots = ",
nr.knots,
sep = "",
", only 5 >= nk >=3 is allowed."
))
if (!missing(p))
if(allowPerfectPredictions & any(!(p > 0 | p < 1)))
stop("Probabilities can not be > 1 or < 0.")
else if (any(!(p >= 0 | p <= 1)))
stop("Probabilities can not be >= 1 or <= 0.")
if(allowPerfectPredictions) {
if(all(p %in% 0:1))
stop("All predicted values are equal to 0 or 1, implying that the underlying process is deterministic. Please check your model or the input.")
if(any(p %in% c(0, 1))) {
p = sapply(p, function(x) {
if(is.na(x) | is.nan(x))
x
else if(x == 0)
1e-8
else if(x == 1)
1 - 1e-8
else
x
})
wmess = paste0("There are predictions with value 0 or 1! These are replaced by values 1e-8 and 1 - 1e-8, respectively. ",
"Take this into account when interpreting the performance measures, as these are not calculated with the original values.",
"\n\nPlease check your model, as this may be an indication of overfitting. Predictions of 0 or 1 imply that these predicted values are deterministic.\n\n",
"We observe this in the following cases:\n - logistic regression: with quasi-complete separation, the coefficients tend to infinity;\n",
" - tree-based methods: one of the leaf nodes contains only observations with either 0 or 1;\n",
" - neural networks: the weights tend to infinity and this is known as weight/gradient explosion.")
warning(wmess, immediate. = TRUE)
} else {
wmess = NULL
}
} else {
wmess = NULL
}
a = 1 - cl.level
if (missing(p))
p <- 1 / (1 + exp(-logit))
else
logit <- log(p / (1 - p))
if (!all(y %in% 0:1)) {
stop("The vector with the binary outcome can only contain the values 0 and 1.")
}
if (length(p) != length(y))
stop("lengths of p or logit and y do not agree")
names(p) <- names(y) <- names(logit) <- NULL
if (!missing(group)) {
if (length(group) == 1 && is.logical(group) && group)
group <- rep("", length(y))
if (!is.factor(group))
group <-
if (is.logical(group) || is.character(group))
as.factor(group)
else
cut2(group, g = g.group)
names(group) <- NULL
nma <- !(is.na(p + y + weights) | is.na(group))
ng <- length(levels(group))
} else {
nma <- !is.na(p + y + weights)
ng <- 0
}
if(any(nma == FALSE)) {
tmpmess = "There are observations with missing values. These are removed."
warning(tmpmess, immediate. = TRUE)
wmess = c(wmess, tmpmess)
}
logit <- logit[nma]
y <- y[nma]
p <- p[nma]
if(ng > 0) {
group <- group[nma]
weights <- weights[nma]
return(val.probg(p, y, group, evaluate, weights, normwt, nmin))
}
# Fix 'No visible global binding for global variable' note
# https://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when
ymin <- ymax <- xbeta <- lower <- upper <- xend <- yend <- NULL
# Sort vector with probabilities
y <- y[order(p)]
logit <- logit[order(p)]
p <- p[order(p)]
if (length(p) > 5000 & smooth == "loess") {
warning("Number of observations > 5000, RCS is recommended.",
immediate. = TRUE)
}
if (length(p) > 1000 & CL.BT == TRUE) {
warning("Number of observations is > 1000, this could take a while...",
immediate. = TRUE)
}
if(length(unique(p)) == 1) {
# Adjusted 2022-09-26
P <- mean(y)
Intc <- log(P/(1 - P))
n <- length(y)
D <- -1/n
L01 <- -2 * sum(y * logit - log(1 + exp(logit)), na.rm = TRUE)
L.cal <- -2 * sum(y * Intc - log(1 + exp(Intc)), na.rm = TRUE)
U.chisq <- L01 - L.cal
U.p <- 1 - pchisq(U.chisq, 1)
U <- (U.chisq - 1)/n
Q <- D - U
cl.auc <- ci.auc(y, p, cl.level, method.ci)
stats <- c(0, 0.5, 0, D, 0, 1, U, U.chisq, U.p, Q, mean((y - p[1])^2), Intc, 0, rep(abs(p[1] - P), 2), 1)
names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq",
"D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier",
"Intercept", "Slope", "Emax", "Eavg", "ECI")
Results =
structure(
list(
call = call,
stats = stats,
cl.level = cl.level,
Calibration = list(
Intercept = c("Point estimate" = unname(stats["Intercept"]),
"Lower confidence limit" = NA,
"Upper confidence limit" = NA),
Slope = c("Point estimate" = unname(stats["Slope"]),
"Lower confidence limit" = NA,
"Upper confidence limit" = NA)
),
Cindex = c("Point estimate" = unname(stats["C (ROC)"]),
"Lower confidence limit" = cl.auc[2],
"Upper confidence limit" = cl.auc[3])
), class = "CalibrationCurve"
)
return(Results)
}
i <- !is.infinite(logit)
nm <- sum(!i)
if(nm > 0)
warning(paste(nm, "observations deleted from logistic calibration due to probs. of 0 or 1"))
i.2 <- i
f.or <- glm(y[i] ~ logit[i], family = binomial) # lrm(y[i] ~ logit[i])
f <- lrm.fit(logit[i], y[i])
# glm(y ~ offset(Eta), family = binomial, control = glm.control(maxit = 1e2))
# glm(y ~ Eta, family = binomial)
cl.slope <- confint(f, level = cl.level)[2, ]
f2 <- lrm.fit(offset = logit[i], y = y[i])
if(f2$fail){
warning("The lrm function did not converge when computing the calibration intercept!",immediate.=TRUE)
f2 <- list()
f2$coef <- NA
cl.interc <- rep(NA,2)
} else{
cl.interc <- confint(f2, level = cl.level)
}
stats <- f$stats
cl.auc <- ci.auc(y, p, cl.level, method.ci)
n <- stats["Obs"]
predprob <- seq(emax.lim[1], emax.lim[2], by = 0.0005)
lt <- f$coef[1] + f$coef[2] * log(predprob/(1 - predprob))
calp <- 1/(1 + exp( - lt))
emax <- max(abs(predprob - calp))
if (pl) {
gg = ggplot(data.frame()) +
geom_line(data = data.frame(x = 0:1, y = 0:1), aes(x = x, y = y, colour = "Ideal"), linewidth = lwd.ideal, show.legend = TRUE) +
labs(x = xlab, y = ylab)
legCol = c("Ideal" = col.ideal)
lt = lty.ideal
lw.d = lwd.ideal
marks = NA
calCurves = list()
if (logistic.cal) {
if (min(p) > plogis(-7) | max(p) < plogis(7)) {
lrm.fit.1 = lrm(y[i.2] ~ qlogis(p[i.2]))
logCal = data.frame(x = p[i.2], y = plogis(lrm.fit.1$linear.predictors))
gg = gg + geom_line(data = logCal, show.legend = TRUE,
aes(x = x, y = y, color = "Logistic calibration"), linewidth = lwd.log, linetype = lty.log)
} else {
logit = seq(-7, 7, length = 200)
prob = 1 / (1 + exp(-logit))
pHat = binomial()$linkinv(cbind(1, logit) %*% coef(f))
logCal = data.frame(x = prob, y = pHat)
gg = gg + geom_line(data = logCal, aes(x = x, y = y, color = "Logistic calibration"), linewidth = lwd.log, linetype = lty.log)
}
calCurves$LogisticCalibration = logCal
legCol = c(legCol, "Logistic calibration" = col.log)
lt <- c(lt, lty.log)
lw.d <- c(lw.d, lwd.log)
marks <- c(marks, NA)
}
if (smooth == "loess") {
argzLoess$formula = y ~ p
SmFit = do.call("loess", argzLoess)
Sm = data.frame(x = unname(SmFit$x), y = SmFit$fitted)
Sm.01 = Sm
if(any(Sm$y < 0)) {
sel = which(Sm$y < 0)
sel = c(sel[length(sel)], sel[length(sel)] + 1)
tmp = Sm[sel, ]
Sm = Sm[Sm$y >= 0 & Sm$y <= 1, ]
Sm = rbind.data.frame(
data.frame(x = predict(lm(x ~ y, data = tmp), data.frame(y = 0)), y = 0),
Sm
)
}
if (connect.smooth) {
gg = gg + geom_line(data = Sm, aes(x = x, y = y, color = "Flexible calibration (Loess)"), linetype = lty.smooth, linewidth = lwd.smooth)
legCol = c(legCol, "Flexible calibration (Loess)" = col.smooth)
lt <- c(lt, lty.smooth)
lw.d <- c(lw.d, lwd.smooth)
marks <- c(marks, NA)
} else {
gg = gg + geom_point(data = Sm, aes(x = x, y = y, color = "Flexible calibration (Loess)"))
legCol = c(legCol, "Flexible calibration (Loess)")
lt <- c(lt, 0)
lw.d <- c(lw.d, 1)
marks <- c(marks, 1)
}
if(CL.smooth != FALSE) {
if(CL.BT) {
to.pred = seq(min(p), max(p), length = 200)
res.BT = replicate(2000, BT.samples(y, p, to.pred))
CL.BT = apply(res.BT, 1, quantile, c(0.025, 0.975))
colnames(CL.BT) = to.pred
dfCL = data.frame(x = to.pred, y = apply(res.BT, 1, quantile, 0.5), ymin = CL.BT[1, ], ymax = CL.BT[2, ])
rownames(dfCL) = NULL
} else {
cl.loess = predict(SmFit, type = "fitted", se = TRUE)
dfCL = data.frame(x = p, ymin = with(cl.loess, fit - qnorm(1 - a / 2) * se.fit),
ymax = with(cl.loess, fit + qnorm(1 - a / 2) * se.fit))
}
dfCL[dfCL$ymax < 0, "ymax"] <- dfCL[dfCL$ymin < 0, "ymin"] <- 0
dfCL[dfCL$ymax > 1, "ymax"] <- dfCL[dfCL$ymin > 1, "ymin"] <- 1
if (CL.smooth == "fill") {
gg = gg + geom_ribbon(data = dfCL, aes(x = x, ymin = ymin, ymax = ymax),
fill = rgb(177, 177, 177, 177, maxColorValue = 255))
} else{
gg =
gg +
geom_line(data = dfCL[dfCL$ymin > 0, ], aes(x = x, y = ymin, color = "CL flexible"), linetype = 2, linewidth = 1) +
geom_line(data = dfCL[dfCL$ymax < 1, ], aes(x = x, y = ymax), linetype = 2, linewidth = 1, col = col.smooth)
legCol = c(legCol, "CL flexible" = col.smooth)
lt <- c(lt, 2)
lw.d <- c(lw.d, 1)
marks <- c(marks, NA)
}
}
if(any(Sm$y < 0)) {
sel = which(Sm$y < 0)
sel = c(sel[length(sel)], sel[length(sel)] + 1)
tmp = Sm[sel, ]
Sm = Sm[Sm$y >= 0 & Sm$y <= 1, ]
Sm = rbind.data.frame(
data.frame(x = predict(lm(x ~ y, data = tmp), data.frame(y = 0)), y = 0),
Sm
)
}
colnames(Sm) = c("x", "y")
if(exists("dfCL", envir = environment())) {
flexCal = if("CL.BT" %in% names(call) && call$CL.BT) list(loessFit = Sm, BootstrapConfidenceLimits = dfCL) else merge(Sm, dfCL, by = "x")
} else {
flexCal = Sm
}
calCurves$FlexibleCalibration = flexCal
cal.smooth <- approx(Sm.01, xout = p, ties = "ordered")$y
eavg <- mean(abs(p - cal.smooth))
ECI <- mean((p - cal.smooth) ^ 2) * 100
} else if (smooth == "rcs") {
argzRCS = alist(x = p,
y = y,
model = "logistic",
nk = nr.knots,
show = "prob",
statloc = "none",
plot = FALSE,
showknots = FALSE,
xrange = c(min(na.omit(p)), max(na.omit(p))),
lty = lty.smooth)
nkDecrease <- function(Argz) {
tryCatch(
do.call(".rcspline.plot", Argz),
error = function(e) {
nk = eval(Argz$nk)
warning(paste0("The number of knots led to estimation problems, nk will be set to ", nk - 1), immediate. = TRUE)
if(nk < 3)
stop("Nk = 3 led to estimation problems.")
Argz$nk = nk - 1
nkDecrease(Argz)
}
)
}
rcsFit = nkDecrease(argzRCS)
rcsDf = as.data.frame(rcsFit)
calCurves$RCS = rcsDf
gg = gg +
geom_line(data = rcsDf, aes(x = x, y = xbeta, color = "Flexible calibration (RCS)"), linetype = lty.smooth, linewidth = lwd.smooth) +
geom_line(data = rcsDf, aes(x = x, y = lower, color = "CL flexible"), linetype = 2, linewidth = 1) +
geom_line(data = rcsDf, aes(x = x, y = upper), linetype = 2, linewidth = 1, col = col.smooth)
legCol = c(legCol, "Flexible calibration (RCS)" = col.smooth, "CL flexible" = col.smooth)
lt <- c(lt, lty.smooth, 2)
lw.d <- c(lw.d, rep(lwd.smooth, 2))
marks <- c(marks, NA, NA)
}
if (!missing(m) | !missing(g) | !missing(cuts)) {
if (!missing(m))
q <- cut2(p,
m = m,
levels.mean = TRUE,
digits = 7)
else if (!missing(g))
q <- cut2(p,
g = g,
levels.mean = TRUE,
digits = 7)
else if (!missing(cuts))
q <- cut2(p,
cuts = cuts,
levels.mean = TRUE,
digits = 7)
means <- as.single(levels(q))
prop <- tapply(y, q, function(x) mean(x, na.rm = TRUE))
gg = gg + geom_point(data = data.frame(x = means, y = prop), aes(x = x, y = y, color = "Grouped observations"), shape = 2, size = 3)
#18.11.02: CI triangles
ng <- tapply(y, q, length)
og <- tapply(y, q, sum)
ob <- og / ng
se.ob <- sqrt(ob * (1 - ob) / ng)
g <- length(as.single(levels(q)))
for (i in 1:g)
gg = gg + geom_line(data = data.frame(x = c(means[i], means[i]), y = c(prop[i], min(1, prop[i] + 1.96 * se.ob[i]))), aes(x = x, y = y))
for (i in 1:g)
gg = gg + geom_line(data = data.frame(x = c(means[i], means[i]), y = c(prop[i], max(0, prop[i] - 1.96 * se.ob[i]))), aes(x = x, y = y))
if (connect.group) {
gg = gg + geom_line(data = data.frame(x = means, y = prop), aes(x = x, y = y))
lt <- c(lt, 1)
lw.d <- c(lw.d, 1)
} else {
lt <- c(lt, 0)
lw.d <- c(lw.d, 0)
}
legCol = c(legCol, "Grouped observations" = "black")
marks <- c(marks, 2)
}
}
lr = stats["Model L.R."]
p.lr = stats["P"]
D = (lr - 1) / n
L01 = -2 * sum(y * logit - logb(1 + exp(logit)), na.rm = TRUE)
U.chisq = L01 - f$deviance[2]
p.U = 1 - pchisq(U.chisq, 2)
U = (U.chisq - 2) / n
Q = D - U
Dxy = stats["Dxy"]
C = stats["C"]
R2 = stats["R2"]
B = sum((p - y) ^ 2) / n
Bmax = mean(y) * (1 - mean(y)) ^ 2 + (1 - mean(y)) * mean(y) ^ 2
Bscaled = 1 - B / Bmax
stats = c(Dxy,
C,
R2,
D,
lr,
p.lr,
U,
U.chisq,
p.U,
Q,
B,
f2$coef[1],
f$coef[2],
emax,
Bscaled)
names(stats) = c(
"Dxy",
"C (ROC)",
"R2",
"D",
"D:Chi-sq",
"D:p",
"U",
"U:Chi-sq",
"U:p",
"Q",
"Brier",
"Intercept",
"Slope",
"Emax",
"Brier scaled"
)
if (smooth == "loess")
stats <- c(stats, c(Eavg = eavg), c(ECI = ECI))
# Cut off definition
if(!missing(cutoff)) {
gg = gg + geom_segment(aes(x = cutoff, y = .1, xend = cutoff, yend = -0.025), arrow = arrow(length = unit(.15, "npc")))
}
if(pl) {
if(!is.logical(statloc)) {
if(dostats[1] == TRUE){
stats.2 <- paste('Calibration\n',
'...intercept: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["Intercept"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[1]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[2]), ")", '\n',
'...slope: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["Slope"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[1]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[2]), ")", '\n',
'Discrimination\n',
'...c-statistic: '
, sprintf(paste("%.", roundstats, "f", sep = ""), stats["C (ROC)"]), " (",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[2]), " to ",
sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[3]), ")"
, sep = '')
gg = gg + annotate("text", x = statloc[1], y = statloc[2], label = stats.2, hjust = 0, size = size)
} else {
dostats <- dostats
leg <- format(names(stats)[dostats]) #constant length
leg <- paste0(leg, ":", format(stats[dostats], digits=roundstats))
if(!is.list(statloc))
statloc <- list(x = statloc[1], y = statloc[2])
gg =
gg +
annotate("text", x = statloc$x, y = statloc$y, label = paste(format(names(stats[dostats])), collapse = "\n"),
hjust = 0, size = size) +
annotate("text", x = statloc$x + (xlim[2] - xlim[1])/3, y = statloc$y, label = paste(format(round(stats[dostats], digits = roundstats)), collapse = "\n"),
hjust = 0, size = size)
}
}
if(is.character(riskdist)) {
if (riskdist == "calibrated") {
x <- f$coef[1] + f$coef[2] * log(p / (1 - p))
x <- 1 / (1 + exp(-x))
x[p == 0] <- 0
x[p == 1] <- 1
} else {
x <- p
}
bins <- seq(0, min(1, max(xlim)), length = 101)
x <- x[x >= 0 & x <= 1]
#08.04.01,yvon: distribution of predicted prob according to outcome
f0 <- table(cut(x[y == 0], bins))
f1 <- table(cut(x[y == 1], bins))
j0 <- f0 > 0
j1 <- f1 > 0
bins0 <- (bins[-101])[j0]
bins1 <- (bins[-101])[j1]
f0 <- f0[j0]
f1 <- f1[j1]
maxf <- max(f0, f1)
f0 <- (0.1 * f0) / maxf
f1 <- (0.1 * f1) / maxf
gg =
gg +
geom_segment(data = data.frame(x = bins1, xend = bins1, y = rep(line.bins, length(bins1)), yend = c(length.seg * f1 + line.bins)),
aes(x = x, y = y, xend = xend, yend = yend)) +
geom_segment(data = data.frame(x = bins0, xend = bins0, y = rep(line.bins, length(bins0)), yend = c(length.seg * -f0 + line.bins)),
aes(x = x, y = y, xend = xend, yend = yend)) +
geom_line(data = data.frame(x = c(min(bins0, bins1) - 0.01, max(bins0, bins1) + 0.01), y = c(line.bins, line.bins)),
aes(x = x, y = y)) +
annotate(geom = "text", x = max(bins0, bins1) + dist.label, y = line.bins + dist.label2, label = d1lab, size = size.d01) +
annotate(geom = "text", x = max(bins0, bins1) + dist.label, y = line.bins - dist.label2, label = d0lab, size = size.d01)
}
}
gg =
gg +
scale_color_manual("", values = legCol, breaks = names(legCol)) +
guides(colour = guide_legend(override.aes = list(linetype = lt, shape = marks, linewidth = lw.d * 0.5, size = 7))) +
theme_bw() +
theme(plot.background=element_blank(),
panel.border = element_rect(colour = "black", fill = NA, linewidth = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 12),
axis.title = element_text(size = 14),
plot.margin = margin(11, 11, 5.5, 5.5, "points"), legend.position = "bottom")
gg =
gg + coord_cartesian(xlim = xlim, ylim = ylim)
Results =
structure(
list(
call = call,
ggPlot = gg,
stats = stats,
cl.level = cl.level,
Calibration = list(
Intercept = c("Point estimate" = unname(stats["Intercept"]),
"Lower confidence limit" = cl.interc[1],
"Upper confidence limit" = cl.interc[2]),
Slope = c("Point estimate" = unname(stats["Slope"]),
"Lower confidence limit" = cl.slope[1],
"Upper confidence limit" = cl.slope[2])
),
Cindex = c("Point estimate" = unname(stats["C (ROC)"]),
"Lower confidence limit" = cl.auc[2],
"Upper confidence limit" = cl.auc[3]),
warningMessages = wmess,
CalibrationCurves = calCurves
), class = "ggplotCalibrationCurve"
)
return(Results)
}
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/R/valProbggplot.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
## ----logo, echo=FALSE, out.width="25%"----------------------------------------
knitr::include_graphics("./CalibrationCurves.png")
## ----PerfectCalibration, fig.align = 'center', fig.cap = "Example of a perfectly calibrated model", fig.topcaption = TRUE, echo = FALSE, out.width="100%"----
knitr::include_graphics("PerfectCalibration.png")
## ----Overfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to overfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"----
knitr::include_graphics("Overfitted.png")
## ----Underfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to underfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"----
knitr::include_graphics("Underfitted.png")
## -----------------------------------------------------------------------------
library(CalibrationCurves)
data("traindata")
## -----------------------------------------------------------------------------
head(traindata)
## -----------------------------------------------------------------------------
glmFit = glm(y ~ . , data = traindata, family = binomial)
summary(glmFit)
## -----------------------------------------------------------------------------
data("testdata")
pHat = predict(glmFit, newdata = testdata, type = "response")
## -----------------------------------------------------------------------------
yTest = testdata$y
## ---- out.width="100%"-------------------------------------------------------
calPerf = val.prob.ci.2(pHat, yTest)
## -----------------------------------------------------------------------------
calPerf
## -----------------------------------------------------------------------------
str(calPerf)
## -----------------------------------------------------------------------------
flexCal = calPerf$CalibrationCurves$FlexibleCalibration
plot(flexCal[, 1:2], type = "l", xlab = "Predicted probability", ylab = "Observed proportion", lwd = 2, xlim = 0:1, ylim = 0:1)
polygon(
x = c(flexCal$x, rev(flexCal$x)),
y = c(
flexCal$ymax,
rev(flexCal$ymin)
),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
## ---- out.width="100%"-------------------------------------------------------
invisible(val.prob.ci.2(pHat, yTest, smooth = "rcs"))
## ---- out.width="100%"-------------------------------------------------------
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, smooth = "none"))
## ---- out.width="100%"-------------------------------------------------------
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, col.log = "orange"))
## ---- out.width="100%"-------------------------------------------------------
invisible(val.prob.ci.2(pHat, yTest, col.ideal = "black", col.smooth = "red", CL.smooth = TRUE,
legendloc = c(0, 1), statloc = c(0.6, 0.25)))
## ---- out.width="100%"-------------------------------------------------------
invisible(val.prob.ci.2(pHat, yTest, dostats = c("C (ROC)", "Intercept", "Slope", "ECI")))
## ---- out.width="100%"-------------------------------------------------------
valProbggplot(pHat, yTest)
## -----------------------------------------------------------------------------
data("poissontraindata")
## -----------------------------------------------------------------------------
head(traindata)
## -----------------------------------------------------------------------------
glmFit = glm(Y ~ . , data = poissontraindata, family = poisson)
summary(glmFit)
## -----------------------------------------------------------------------------
data("poissontestdata")
yHat = predict(glmFit, newdata = poissontestdata, type = "response")
## -----------------------------------------------------------------------------
yTest = poissontestdata$Y
## ---- out.width="100%"-------------------------------------------------------
calPerf = genCalCurve(yTest, yHat, family = poisson)
## -----------------------------------------------------------------------------
calPerf
## -----------------------------------------------------------------------------
str(calPerf)
## -----------------------------------------------------------------------------
GLMCal = calPerf$CalibrationCurves$GLMCalibration
plot(GLMCal[, 1:2], type = "l", xlab = "Predicted value", ylab = "Empirical average", lwd = 2, xlim = 0:1, ylim = 0:1,
col = "red", lty = 2)
abline(0, 1, lty = 1)
## ---- out.width="100%"-------------------------------------------------------
set.seed(1)
yTest = testdata$y
pHat[sample(1:length(pHat), 5, FALSE)] = sample(0:1, 5, TRUE)
x = val.prob.ci.2(pHat, yTest, allowPerfectPredictions = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/inst/doc/CalibrationCurves.R
|
---
title: "Introduction to the CalibrationCurves package"
author: "Bavo De Cock Campo"
date: "`r Sys.Date()`"
output:
bookdown::html_document2:
toc: true
rmarkdown::html_vignette:
fig_caption: yes
bibliography: references.bib
biblio-style: authoryear
latex_engine: xelatex
vignette: >
%\VignetteIndexEntry{CalibrationCurves}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
pkgdown:
as_is: true
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
```
<div>
```{r logo, echo=FALSE, out.width="25%"}
knitr::include_graphics("./CalibrationCurves.png")
```
</div>
<br clear="right">
In this document, we give you a brief overview of the basic functionality of the `CalibrationCurves` package. In addition, we present the theoretical framework behind calibration and provide some illustrative examples to give the reader a better insight into the calibration assessment of a predictive model. We advise you to also consult the help-pages of the functions to get an exhaustive overview of the functionality.
We tried to tailor the explanation of the concepts to professionals with different backgrounds. Please, do contact me if you feel that something is unclear so that I can adjust (and hopefully improve) it. In addition, don't hesitate to send any suggestions you might have and bug reports to the package author.
# Assessing the performance of risk prediction models
## Risk prediction models
In this package, we focus on risk prediction models that estimate the probability $\pi_i$ of observing an event. We use $y_i \in (0, 1)$ to denote the variable that captures this outcome which takes on the value 0 in case of a non-event and 1 in case of an event. Here, $i$ serves as an index for the observations (mostly the patient within medical predictive analytics) with $i = (1, \dots, n)$ and where $n$ denotes the total number of observations. We assume that the response variable $y_i$ follows a Bernoulli distribution $y_i \sim \text{Bern}(\pi_i)$.
For example, we could be interested in estimating the probability $\pi_i$ of observing a malignant tumour for patient $i$. In this case, the event $y_i = 1$ is the tumour being malignant and $y_i = 0$ when the tumour is benign. With no available information on the patient characteristics, we might rely on the prevalence in the general population to estimate this probability.
Using risk prediction models, we model the outcome as a function of the observed risk/patient characteristics. The risk characteristics are contained in the covariate vector $\boldsymbol{x}_i$. This vector contains all observed information for patient $i$ (e.g. maximum diameter of the lesion, proportion of solid tissue, ...). This allows us to obtain a more accurate prediction that is based on the relation between the patient characteristics and the outcome. To construct a clinical prediction model, we either rely on a statistical models such as logistic regression or machine learning methods. A general expression that encompasses both types of models is
\begin{align*}
E[y_i | \boldsymbol{x}_i] = f(\boldsymbol{x}_i).
\end{align*}
This expression states that we model the response $y_i$ as a function of the observed risk characteristics $\boldsymbol{x}_i$.
### Mathematical details on existing predictive models
To construct a risk prediction model, we could rely on a logistic regression model
\begin{align*}
E[y_i | \boldsymbol{x}_i] = \pi_i(\boldsymbol{\beta}) = \frac{e^{\boldsymbol{x}_i^\top \boldsymbol{\beta}}}{1 + e^{\boldsymbol{x}_i^\top \boldsymbol{\beta}}}
\end{align*}
where $\boldsymbol{\beta}$ denotes the parameter vector. $\pi_i(\boldsymbol{\beta}) = P(y_i = 1| \boldsymbol{x}_i)$ denotes the probability of observing the event, given the covariate vector $\boldsymbol{x}_i$. We can rewrite the equation to its more well-known form
\begin{align*}
\log\left( \frac{\pi_i(\boldsymbol{\beta})}{1 - \pi_i(\boldsymbol{\beta})} \right) &= \boldsymbol{x}_i^\top \boldsymbol{\beta}\\[0.5em]
\text{logit}(\pi_i(\boldsymbol{\beta})) &= \eta_i
\end{align*}
where $\eta_i$ denotes the linear predictor. Here, we have the well-known logit function at the left side of the equation.
With machine learning methods, $f(\cdot)$ depends on the specific algorithm. With tree-based methods, for example, this correspond to the observed proportion in the leaf nodes. For neural networks, $f(\cdot)$ is determined by the weights in the layers and the chosen activation functions.
## Different aspects of the predictive performance
To assess how well the model is able to predict (the probability of) the outcome, we assess two different aspects of the model [@VanCalster2016;@VanCalster2019;@Alba2017]:
a) *discrimination*;
b) *calibration*.
With *discrimination*, we refer to the model's ability to differentiate between observations that have the event and observations that have not. In this context, this translates to giving higher risk estimates for patients with the event than patients without the event. We commonly assess this using the area under the receiver operating characteristic curve. However, discrimination performance does not tell us how accurate the predictions are. The estimated risk may result in good discrimination and can be inaccurate at the same time. We refer to the accuracy of the predictions as the *calibration*. Hence, hereby we assess the agreement between the estimated and observed number of events [@VanCalster2016]. We say that a prediction model is calibrated if the predicted risks correspond to the observed proportions of the event.
## Assessing the calibration performance of a risk prediction model
### A mathematical perspective
One way to examine the calibration of risk predictions, is by using calibration curves [@VanCalster2016;@VanCalster2019;@ClinicalPredictionModels;@Campo2023GCF]. A calibration curve maps the predicted probabilities $f(\boldsymbol{x}_i)$ to the actual event probabilities $P(y_i = 1| f(\boldsymbol{x}_i))$ and visualizes the correspondence between the model's predicted risks and the true probabilities. For perfectly calibrated predictions, the calibration curve equals the diagonal, i.e. $P(y_i = 1 | f(\boldsymbol{x}_i)) = f(\boldsymbol{x}_i) \ \forall \ i$ where $\forall \ i$ denotes for all $i$.
### A practical perspective
In practice, we typically assess the model's calibration on a validation set. In this setting, a calibration curve visualizes the correspondence between the model's predicted risks and the observed proportion. When we have a perfect agreement between the observed and predicted proportion the calibration curve coincides with the ideal curve (a diagonal line). This scenario is visualized in Figure \@ref(fig:PerfectCalibration).
```{r PerfectCalibration, fig.align = 'center', fig.cap = "Example of a perfectly calibrated model", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("PerfectCalibration.png")
```
By assessing the calibration performance on a data set other than the training set, we obtain an indication of how well our risk prediction is able to generalize to other data sets and how accurate its out-of-sample predictions are. In general, the prediction model will show some miscalibration and the calibration curve gives us a visual depiction of how badly the model is miscalibrated. The further from the diagonal line, the worse the calibration. Figure \@ref(fig:Overfitted) depicts an example of a model that is miscalibrated and is a typical example of a model that is overfitted to the training data. This particular model has predictions that are too extreme: high risks are overestimated and low risks are underestimated.
```{r Overfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to overfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("Overfitted.png")
```
Its counterpart, an underfitted model, occurs less frequently. \@ref(fig:Underfitted) shows the calibration curve of an underfitted model. Here, there is an overestimation for the low risks and an underestimation for the high risks.
```{r Underfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to underfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("Underfitted.png")
```
### How do we construct a calibration curve?
Fitting a logistic regression model to the training data results in an estimate for the parameter vector $\boldsymbol{\beta}$, which we denote as $\widehat{\boldsymbol{\beta}}$. The latter contains the estimated effects of the included covariates (e.g. proportion of solid tissue). To obtain a risk estimate for patient $i$, we multiply the covariate vector $\boldsymbol{x}_i$ (which contains all the patient-specific characteristics) with the estimated parameter vector $\widehat{\boldsymbol{\beta}}$ to obtain the linear predictor $\widehat{\eta}_i$
\begin{align*}
\widehat{\eta}_i = \boldsymbol{x}_i^\top \widehat{\boldsymbol{\beta}}.
\end{align*}
To differentiate between the training and test set, we append the subscript $*$ to the quantities of the test set. Hence, ${}_{*} y_i$ denotes the outcome in the test set. Similarly, we use ${}_{*} \boldsymbol{x}_i$ to denote the covariate vector for patient $i$ in the test set. We then calculate the linear predictor on the test set as
\begin{align*}
{}_{*} \widehat{\eta}_i = {}_{*} \boldsymbol{x}_i^\top \widehat{\boldsymbol{\beta}} \tag{1}.
\end{align*}
Similarly, we can predict the probability $\widehat{f}({}_{*} \boldsymbol{x}_i)$ for patient $i$ in the test set using machine learning methods. We use
\begin{align*}
{}_{*} \widehat{\pi}_i = \widehat{f}({}_{*} \boldsymbol{x}_i)
\end{align*}
as a general notation to denote the predicted probability of the risk prediction model.
One way to compute the calibration curve, is by using a logistic regression model
\begin{align*}
\text{logit}(P({}_{*} y_i = 1| {}_{*} \widehat{\pi}_i)) &= \alpha + \zeta \ \text{logit}({}_{*} \widehat{\pi}_i)
(\#eq:logcal)
\end{align*}
where we estimate the observed proportions as a function of the predicted probabilities. This model fit yields a logistic calibration curve. Note that $\text{logit}({}_{*} \widehat{\pi}_i) = {}_{*} \widehat{\eta}_i$ when ${}_{*} \widehat{\pi}_i$ is estimated using a logistic regression model (see \@ref(eq:logcal)).
Alternatively, we can obtain flexible, nonlinear calibration curve using a non-parametric smoother such as loess or restricted cubic splines. In our package, we provide both types of calibration curves.
### Calibration intercept and slope
In addition to the calibration curve, we have two measures that summarize different aspects of the calibration performance:
- the calibration intercept $\alpha_c$ (calibration-in-the-large);
- the calibration slope $\zeta$.
We have a perfectly calibrated model when the calibration curve coincides with the diagonal line or when $\alpha =\alpha_c = 0$ and $\zeta = 1$.
To compute the calibration slope $\zeta$, we rely on the model used to obtain the logistic calibration curve (see equation \@ref(eq:logcal)). The value of the calibration slope $\zeta$ tells us whether the model is over- or underfitted. When $\zeta < 1$ the model is overfitted. $\zeta < 1$ indicates that ${}_{*} \eta_i$ is too extreme and needs to be lower to ensure that the predicted risks coincide with the observed risks. Conversely, we have a model that is underfitted when $\zeta > 1$.
To calculate the calibration intercept or calibration-in-the-large, we fix the calibration slope at $1$ and denote this as $\alpha|\zeta = 1$ or the short-hand notation $\alpha_c$. To estimate $\alpha_c$, we fit the model
\begin{align*}
\text{logit}(P({}_{*} y_i = 1| {}_{*} \widehat{\pi}_i)) &= \alpha_c + \text{offset}(\text{logit}({}_{*} \widehat{\pi}_i))
(\#eq:calintercept)
\end{align*}
where we enter $\text{logit}({}_{*} \widehat{\pi}_i)$ as an offset variable. Hereby, we fix $\zeta = 1$. The calibration intercept tells us whether the risks are overestimated $(\alpha_c < 0)$ or underestimated $(\alpha_c > 0)$ **on average**.
## Illustration of the CalibrationCurves package
### Training the model
To illustrate the functionality, the package has two example data sets: `traindata` and `testdata`. These are two synthetically generated data sets (using the same underlying process/settings to generate the data) to illustrate the functionality of the `CalibrationCurves` package.
The `traindata` data frame represents the data that we will use to develop our risk prediction model
```{r}
library(CalibrationCurves)
data("traindata")
```
In this data frame, we have four covariates and one response variable `y`.
```{r}
head(traindata)
```
Next, we fit a logistic regression model to obtain the estimated parameter vector $\widehat{\beta}$.
```{r}
glmFit = glm(y ~ . , data = traindata, family = binomial)
summary(glmFit)
```
### Assessing the calibration performance
Hereafter, we assess the calibration performance on the `testdata` set. Hereto, we first have to compute the predicted probabilities on this data set.
```{r}
data("testdata")
pHat = predict(glmFit, newdata = testdata, type = "response")
```
We then store the response in the `testdata` in a separate vector `yTest`.
```{r}
yTest = testdata$y
```
Now we have everything we need to assess the calibration performance of our prediction model. We can either use `val.prob.ci.2` or `valProbggplot` to visualize the calibration performance and to obtain the statistics. `val.prob.ci.2` makes the plot using `base` R and `valProbggplot` uses the `ggplot2` package.
By default, the flexible calibration curve (based on a loess smoother) will be plotted.
```{r, out.width="100%"}
calPerf = val.prob.ci.2(pHat, yTest)
```
In addition to the plot, the function returns an object of the class `CalibrationCurve`.
```{r}
calPerf
```
This object contains the calculated statistics as well as the calculated coordinates of the calibration curve.
```{r}
str(calPerf)
```
The coordinates are stored in the `CalibrationCurves` slot and can be extracted as follows.
```{r}
flexCal = calPerf$CalibrationCurves$FlexibleCalibration
plot(flexCal[, 1:2], type = "l", xlab = "Predicted probability", ylab = "Observed proportion", lwd = 2, xlim = 0:1, ylim = 0:1)
polygon(
x = c(flexCal$x, rev(flexCal$x)),
y = c(
flexCal$ymax,
rev(flexCal$ymin)
),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
```
Alternatively, we can use restricted cubic splines to obtain the flexible calibration curve.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, smooth = "rcs"))
```
We obtain the logistic calibration curve using the following code.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, smooth = "none"))
```
We can plot both using
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, col.log = "orange"))
```
The package also allows to change the colors, change the position of the legend and much more. Check out the help-function to see what other arguments the functions have.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, col.ideal = "black", col.smooth = "red", CL.smooth = TRUE,
legendloc = c(0, 1), statloc = c(0.6, 0.25)))
```
Finally, we can also decide which statistics appear on the plot.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, dostats = c("C (ROC)", "Intercept", "Slope", "ECI")))
```
### ggplot version
The ggplot version (i.e.`valProbggplot`) uses virtually the same arguments. Hence, we can easily obtain a ggplot using the same code.
```{r, out.width="100%"}
valProbggplot(pHat, yTest)
```
# Assessing the performance of other types of prediction models
In my recent paper [@Campo2023GCF], I propose an extension of the logistic calibration framework to distributions that belong to the exponential family with probability density function (pdf)
\begin{align*}
f(y_i; \theta_i, \phi, w_i) = \exp\left( \frac{y_i \theta_i - b(\theta_i)}{\phi} w_i + c(y_i, \phi, w_i)\right).
\end{align*}
\noindent
Here, $\theta_i$ is the natural parameter, $\phi$ the dispersion parameter and $w_i$ the weight. $b(\cdot)$ and $c(\cdot)$ are known functions. Similar to before, we assume that there is an unknown regression function $r(\boldsymbol{x}_i) = E[y_i | \boldsymbol{x}_i]$. To approximate this unknown function, we rely on prediction models with the following functional form
\begin{align*}
E[y_i | \boldsymbol{x}_i] = \mu_i = f(\boldsymbol{x}_i).
(\#eq:PredModel)
\end{align*}
\noindent
To estimate \@ref(eq:PredModel), we can use a generalized linear model
\begin{align*}
g(E[y_i | \boldsymbol{x}_i]) = \boldsymbol{x}_i^\top \boldsymbol{\beta} = \eta_i.
(#eq:GLM)
\end{align*}
where $g(\cdot)$ denotes the link function. Alternatively, we can estimate \@ref(eq:PredModel) using machine learning methods. Using the model fit, we obtain the predictions $\widehat{\mu}_i = \widehat{f}(\boldsymbol{x}_i)$.
## Generalized calibration curves
To examine the calibration of prediction models where the outcome is a member of the exponential family, we redefine the framework in more general terms. In this context, a calibration curve maps the predicted values $f(\boldsymbol{x}_i)$ to $E[y_i| f(\boldsymbol{x}_i)]$, the actual conditional mean of $y_i$ given $f(\boldsymbol{x}_i)$. As before, a model is perfectly calibrated if the calibration curve equals the diagonal, i.e. $E[y_i | f(\boldsymbol{x}_i)] = f(\boldsymbol{x}_i) \ \forall \ i$. Hence, in this context, the calibration curve captures the correspondence between the predicted values and the conditional mean.
We propose two methods to estimate the calibration curve. Firstly, we can estimate the calibration curve using a generalized linear model
\begin{align*}
g(E[{}_{*} y_i | {}_{*} \widehat{\mu}_i]) = \alpha + \zeta \ g({}_{*} \widehat{\mu}_i).
(#eq:CalibrationGLM)
\end{align*}
By transforming ${}_{*} \widehat{\mu}_i$ using the appropriate $g(\cdot)$, we map ${}_{*} \widehat{\mu}_i$ to the whole real line to better fit the model. If ${}_{*} \widehat{\mu}_i$ is estimated using a generalized linear model with the same link function (i.e. $g(\cdot)$ is identical in \@ref(eq:GLM) and \@ref(eq:CalibrationGLM)), it follows that $g({}_{*} \widehat{\mu}_i) = {}_{*} \widehat{\eta}_i$. Using equation \@ref(eq:CalibrationGLM), we estimate the empirical average as a function of the predicted values. Further, similarly to \@ref(eq:logcal), $\zeta$ tells us whether the model is over- ($\zeta < 1$) or underfitted ($\zeta >1$). We estimate the calibration-in-the-large $\alpha_c$ as
\begin{align*}
g(E[{}_{*} y_i | {}_{*} \widehat{\mu}_i]) = \alpha_c + \text{offset}(g({}_{*} \widehat{\mu}_i)).
(#eq:CITLGLM)
\end{align*}
Hereby, we assess to which extent the observed empirical average equals the average predicted value. Secondly, as with the logistic regression model, we can employ non-parametric smoothers to estimate the calibration curve.
## Illustration of the generalized calibration framework
### Training the model
To illustrate the functionality, the package has two example data sets with a poisson distributed outcome variable: `poissontraindata` and `poissontestdata`. These are two synthetically generated data sets (using the same underlying process/settings to generate the data) to illustrate the functionality of the `CalibrationCurves` package.
The `poissontraindata` data frame represents the data that we will use to develop our prediction model.
```{r}
data("poissontraindata")
```
In this data frame, we have five covariates and one response variable `y`.
```{r}
head(traindata)
```
Next, we fit a Poisson GLM with log link to obtain the estimated parameter vector $\widehat{\beta}$.
```{r}
glmFit = glm(Y ~ . , data = poissontraindata, family = poisson)
summary(glmFit)
```
### Assessing the calibration performance
Hereafter, we assess the calibration performance on the `poissontestdata` set. Hereto, we first have to compute the predicted values on this data set.
```{r}
data("poissontestdata")
yHat = predict(glmFit, newdata = poissontestdata, type = "response")
```
We then store the response in the `poissontestdata` in a separate vector `yTest`.
```{r}
yTest = poissontestdata$Y
```
Now we have everything we need to assess the calibration performance of our prediction model. We can use `genCalCurve` to visualize the calibration performance and to obtain the statistics. `genCalCurve` makes the plot using `base` R and a ggplot version will be included in one of the next updates.
By default, the calibration curve as estimated by a GLM will be plotted. Further, in addition to the outcome and the predicted values, we have to specify the distribution of the response variable.
```{r, out.width="100%"}
calPerf = genCalCurve(yTest, yHat, family = poisson)
```
In addition to the plot, the function returns an object of the class `GeneralizedCalibrationCurve`.
```{r}
calPerf
```
This object contains the calculated statistics as well as the calculated coordinates of the calibration curve.
```{r}
str(calPerf)
```
The coordinates are stored in the `CalibrationCurves` slot and can be extracted as follows.
```{r}
GLMCal = calPerf$CalibrationCurves$GLMCalibration
plot(GLMCal[, 1:2], type = "l", xlab = "Predicted value", ylab = "Empirical average", lwd = 2, xlim = 0:1, ylim = 0:1,
col = "red", lty = 2)
abline(0, 1, lty = 1)
```
# FAQ
## Why is the calibration intercept different in the rms package?
To construct the logistic calibration curve (see [How do we construct a calibration curve?]), we fit the model
\begin{align*}
\text{logit}(E[{}_{*} y_i | {}_{*} \widehat{\pi}_i]) = \alpha + \zeta \ \text{logit}({}_{*} \widehat{\pi}_i)
\end{align*}
Here, $\zeta$ corresponds to the calibration slope. The calibration intercept from the `val.prob` function from the `rms` package corresponds to $\alpha \neq \alpha_c$.
In the `CalibrationCurves` package, the calibration intercept corresponds to $\alpha_c$ which assesses the calibration in the large. Using this formulation, the calibration intercept indicates whether the predicted risks are under- or overestimated on average and this is conform with the definition of the calibration intercept in the article 'A calibration hierarchy for risk models was defined: from utopia to empirical data' (and other articles published on this topic) [@VanCalster2016;@VanCalster2019]. We compute $\alpha_c$ using
\begin{align*}
\text{logit}(E[{}_{*} y_i | {}_{*} \widehat{\pi}_i]) = \alpha_c + \text{offset}(\text{logit}({}_{*} \widehat{\pi}_i)).
\end{align*}
where we fix $\zeta = 1$ by including $\text{logit}({}_{*} \widehat{\pi}_i)$ as an offset variable.
Consequently, both types of calibration intercepts need to be interpreted differently:
- $\alpha$:
- this corresponds to the constant you have to add after you multiplied the linear predictor with the ‘correction’ factor (i.e. the calibration slope) to get the predicted probabilities to correspond to the observed ones. In essence: once we have multiplied the linear predictor by a correction factor, what is the constant that we still have to add to make the predicted probabilities correspond to the observed ones?
- $\alpha_c$:
- $> 0$: ${}_{*} \widehat{\pi}_i$ is too low on average and hence, on average the risks are underestimated. You have to increase it to make it correspond to the observed probabilities.
- $< 0$: ${}_{*} \widehat{\pi}_i$ is too high on average and hence, on average the risks are overestimated. You have to decrease it to make it correspond to the observed probabilities.
## I have predicted probabilities of 0 or 1. Why is this not allowed by default and why do I get these annoying warning messages?
Predicted probabilities of 0 or 1 imply that there is no more randomness and that the process is deterministic. If the process was truly deterministic, we would not have to model it. Mostly the presence of perfect predictions signifies that something went wrong when fitting the model or that the model is severely overfitted. We therefore make sure that this is not allowed by default and delete these observations. We observe this behavior in the following cases:
<br /> - logistic regression: with quasi-complete separation, the coefficients tend to infinity;
<br /> - tree-based methods: one of the leaf nodes contains only observations with either 0 or 1;
<br /> - neural networks: the weights tend to infinity and this is known as weight/gradient explosion.
If you are confident that nothing is wrong with the model fit, then you can obtain a calibration curve by setting the argument `allowPerfectPredictions` to `TRUE`. In this case, predictions of 0 and 1 are replaced by values 1e-8 and 1 - 1e-8, respectively. Do take this into account when interpreting the performance measures, as these are not calculated with the original values.
```{r, out.width="100%"}
set.seed(1)
yTest = testdata$y
pHat[sample(1:length(pHat), 5, FALSE)] = sample(0:1, 5, TRUE)
x = val.prob.ci.2(pHat, yTest, allowPerfectPredictions = TRUE)
```
# References
<div id="refs"></div>
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/inst/doc/CalibrationCurves.Rmd
|
---
title: "Introduction to the CalibrationCurves package"
author: "Bavo De Cock Campo"
date: "`r Sys.Date()`"
output:
bookdown::html_document2:
toc: true
rmarkdown::html_vignette:
fig_caption: yes
bibliography: references.bib
biblio-style: authoryear
latex_engine: xelatex
vignette: >
%\VignetteIndexEntry{CalibrationCurves}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
pkgdown:
as_is: true
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
```
<div>
```{r logo, echo=FALSE, out.width="25%"}
knitr::include_graphics("./CalibrationCurves.png")
```
</div>
<br clear="right">
In this document, we give you a brief overview of the basic functionality of the `CalibrationCurves` package. In addition, we present the theoretical framework behind calibration and provide some illustrative examples to give the reader a better insight into the calibration assessment of a predictive model. We advise you to also consult the help-pages of the functions to get an exhaustive overview of the functionality.
We tried to tailor the explanation of the concepts to professionals with different backgrounds. Please, do contact me if you feel that something is unclear so that I can adjust (and hopefully improve) it. In addition, don't hesitate to send any suggestions you might have and bug reports to the package author.
# Assessing the performance of risk prediction models
## Risk prediction models
In this package, we focus on risk prediction models that estimate the probability $\pi_i$ of observing an event. We use $y_i \in (0, 1)$ to denote the variable that captures this outcome which takes on the value 0 in case of a non-event and 1 in case of an event. Here, $i$ serves as an index for the observations (mostly the patient within medical predictive analytics) with $i = (1, \dots, n)$ and where $n$ denotes the total number of observations. We assume that the response variable $y_i$ follows a Bernoulli distribution $y_i \sim \text{Bern}(\pi_i)$.
For example, we could be interested in estimating the probability $\pi_i$ of observing a malignant tumour for patient $i$. In this case, the event $y_i = 1$ is the tumour being malignant and $y_i = 0$ when the tumour is benign. With no available information on the patient characteristics, we might rely on the prevalence in the general population to estimate this probability.
Using risk prediction models, we model the outcome as a function of the observed risk/patient characteristics. The risk characteristics are contained in the covariate vector $\boldsymbol{x}_i$. This vector contains all observed information for patient $i$ (e.g. maximum diameter of the lesion, proportion of solid tissue, ...). This allows us to obtain a more accurate prediction that is based on the relation between the patient characteristics and the outcome. To construct a clinical prediction model, we either rely on a statistical models such as logistic regression or machine learning methods. A general expression that encompasses both types of models is
\begin{align*}
E[y_i | \boldsymbol{x}_i] = f(\boldsymbol{x}_i).
\end{align*}
This expression states that we model the response $y_i$ as a function of the observed risk characteristics $\boldsymbol{x}_i$.
### Mathematical details on existing predictive models
To construct a risk prediction model, we could rely on a logistic regression model
\begin{align*}
E[y_i | \boldsymbol{x}_i] = \pi_i(\boldsymbol{\beta}) = \frac{e^{\boldsymbol{x}_i^\top \boldsymbol{\beta}}}{1 + e^{\boldsymbol{x}_i^\top \boldsymbol{\beta}}}
\end{align*}
where $\boldsymbol{\beta}$ denotes the parameter vector. $\pi_i(\boldsymbol{\beta}) = P(y_i = 1| \boldsymbol{x}_i)$ denotes the probability of observing the event, given the covariate vector $\boldsymbol{x}_i$. We can rewrite the equation to its more well-known form
\begin{align*}
\log\left( \frac{\pi_i(\boldsymbol{\beta})}{1 - \pi_i(\boldsymbol{\beta})} \right) &= \boldsymbol{x}_i^\top \boldsymbol{\beta}\\[0.5em]
\text{logit}(\pi_i(\boldsymbol{\beta})) &= \eta_i
\end{align*}
where $\eta_i$ denotes the linear predictor. Here, we have the well-known logit function at the left side of the equation.
With machine learning methods, $f(\cdot)$ depends on the specific algorithm. With tree-based methods, for example, this correspond to the observed proportion in the leaf nodes. For neural networks, $f(\cdot)$ is determined by the weights in the layers and the chosen activation functions.
## Different aspects of the predictive performance
To assess how well the model is able to predict (the probability of) the outcome, we assess two different aspects of the model [@VanCalster2016;@VanCalster2019;@Alba2017]:
a) *discrimination*;
b) *calibration*.
With *discrimination*, we refer to the model's ability to differentiate between observations that have the event and observations that have not. In this context, this translates to giving higher risk estimates for patients with the event than patients without the event. We commonly assess this using the area under the receiver operating characteristic curve. However, discrimination performance does not tell us how accurate the predictions are. The estimated risk may result in good discrimination and can be inaccurate at the same time. We refer to the accuracy of the predictions as the *calibration*. Hence, hereby we assess the agreement between the estimated and observed number of events [@VanCalster2016]. We say that a prediction model is calibrated if the predicted risks correspond to the observed proportions of the event.
## Assessing the calibration performance of a risk prediction model
### A mathematical perspective
One way to examine the calibration of risk predictions, is by using calibration curves [@VanCalster2016;@VanCalster2019;@ClinicalPredictionModels;@Campo2023GCF]. A calibration curve maps the predicted probabilities $f(\boldsymbol{x}_i)$ to the actual event probabilities $P(y_i = 1| f(\boldsymbol{x}_i))$ and visualizes the correspondence between the model's predicted risks and the true probabilities. For perfectly calibrated predictions, the calibration curve equals the diagonal, i.e. $P(y_i = 1 | f(\boldsymbol{x}_i)) = f(\boldsymbol{x}_i) \ \forall \ i$ where $\forall \ i$ denotes for all $i$.
### A practical perspective
In practice, we typically assess the model's calibration on a validation set. In this setting, a calibration curve visualizes the correspondence between the model's predicted risks and the observed proportion. When we have a perfect agreement between the observed and predicted proportion the calibration curve coincides with the ideal curve (a diagonal line). This scenario is visualized in Figure \@ref(fig:PerfectCalibration).
```{r PerfectCalibration, fig.align = 'center', fig.cap = "Example of a perfectly calibrated model", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("PerfectCalibration.png")
```
By assessing the calibration performance on a data set other than the training set, we obtain an indication of how well our risk prediction is able to generalize to other data sets and how accurate its out-of-sample predictions are. In general, the prediction model will show some miscalibration and the calibration curve gives us a visual depiction of how badly the model is miscalibrated. The further from the diagonal line, the worse the calibration. Figure \@ref(fig:Overfitted) depicts an example of a model that is miscalibrated and is a typical example of a model that is overfitted to the training data. This particular model has predictions that are too extreme: high risks are overestimated and low risks are underestimated.
```{r Overfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to overfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("Overfitted.png")
```
Its counterpart, an underfitted model, occurs less frequently. \@ref(fig:Underfitted) shows the calibration curve of an underfitted model. Here, there is an overestimation for the low risks and an underestimation for the high risks.
```{r Underfitted, fig.align = 'center', fig.cap = "Example of a miscalibrated model due to underfitting", fig.topcaption = TRUE, echo = FALSE, out.width="100%"}
knitr::include_graphics("Underfitted.png")
```
### How do we construct a calibration curve?
Fitting a logistic regression model to the training data results in an estimate for the parameter vector $\boldsymbol{\beta}$, which we denote as $\widehat{\boldsymbol{\beta}}$. The latter contains the estimated effects of the included covariates (e.g. proportion of solid tissue). To obtain a risk estimate for patient $i$, we multiply the covariate vector $\boldsymbol{x}_i$ (which contains all the patient-specific characteristics) with the estimated parameter vector $\widehat{\boldsymbol{\beta}}$ to obtain the linear predictor $\widehat{\eta}_i$
\begin{align*}
\widehat{\eta}_i = \boldsymbol{x}_i^\top \widehat{\boldsymbol{\beta}}.
\end{align*}
To differentiate between the training and test set, we append the subscript $*$ to the quantities of the test set. Hence, ${}_{*} y_i$ denotes the outcome in the test set. Similarly, we use ${}_{*} \boldsymbol{x}_i$ to denote the covariate vector for patient $i$ in the test set. We then calculate the linear predictor on the test set as
\begin{align*}
{}_{*} \widehat{\eta}_i = {}_{*} \boldsymbol{x}_i^\top \widehat{\boldsymbol{\beta}} \tag{1}.
\end{align*}
Similarly, we can predict the probability $\widehat{f}({}_{*} \boldsymbol{x}_i)$ for patient $i$ in the test set using machine learning methods. We use
\begin{align*}
{}_{*} \widehat{\pi}_i = \widehat{f}({}_{*} \boldsymbol{x}_i)
\end{align*}
as a general notation to denote the predicted probability of the risk prediction model.
One way to compute the calibration curve, is by using a logistic regression model
\begin{align*}
\text{logit}(P({}_{*} y_i = 1| {}_{*} \widehat{\pi}_i)) &= \alpha + \zeta \ \text{logit}({}_{*} \widehat{\pi}_i)
(\#eq:logcal)
\end{align*}
where we estimate the observed proportions as a function of the predicted probabilities. This model fit yields a logistic calibration curve. Note that $\text{logit}({}_{*} \widehat{\pi}_i) = {}_{*} \widehat{\eta}_i$ when ${}_{*} \widehat{\pi}_i$ is estimated using a logistic regression model (see \@ref(eq:logcal)).
Alternatively, we can obtain flexible, nonlinear calibration curve using a non-parametric smoother such as loess or restricted cubic splines. In our package, we provide both types of calibration curves.
### Calibration intercept and slope
In addition to the calibration curve, we have two measures that summarize different aspects of the calibration performance:
- the calibration intercept $\alpha_c$ (calibration-in-the-large);
- the calibration slope $\zeta$.
We have a perfectly calibrated model when the calibration curve coincides with the diagonal line or when $\alpha =\alpha_c = 0$ and $\zeta = 1$.
To compute the calibration slope $\zeta$, we rely on the model used to obtain the logistic calibration curve (see equation \@ref(eq:logcal)). The value of the calibration slope $\zeta$ tells us whether the model is over- or underfitted. When $\zeta < 1$ the model is overfitted. $\zeta < 1$ indicates that ${}_{*} \eta_i$ is too extreme and needs to be lower to ensure that the predicted risks coincide with the observed risks. Conversely, we have a model that is underfitted when $\zeta > 1$.
To calculate the calibration intercept or calibration-in-the-large, we fix the calibration slope at $1$ and denote this as $\alpha|\zeta = 1$ or the short-hand notation $\alpha_c$. To estimate $\alpha_c$, we fit the model
\begin{align*}
\text{logit}(P({}_{*} y_i = 1| {}_{*} \widehat{\pi}_i)) &= \alpha_c + \text{offset}(\text{logit}({}_{*} \widehat{\pi}_i))
(\#eq:calintercept)
\end{align*}
where we enter $\text{logit}({}_{*} \widehat{\pi}_i)$ as an offset variable. Hereby, we fix $\zeta = 1$. The calibration intercept tells us whether the risks are overestimated $(\alpha_c < 0)$ or underestimated $(\alpha_c > 0)$ **on average**.
## Illustration of the CalibrationCurves package
### Training the model
To illustrate the functionality, the package has two example data sets: `traindata` and `testdata`. These are two synthetically generated data sets (using the same underlying process/settings to generate the data) to illustrate the functionality of the `CalibrationCurves` package.
The `traindata` data frame represents the data that we will use to develop our risk prediction model
```{r}
library(CalibrationCurves)
data("traindata")
```
In this data frame, we have four covariates and one response variable `y`.
```{r}
head(traindata)
```
Next, we fit a logistic regression model to obtain the estimated parameter vector $\widehat{\beta}$.
```{r}
glmFit = glm(y ~ . , data = traindata, family = binomial)
summary(glmFit)
```
### Assessing the calibration performance
Hereafter, we assess the calibration performance on the `testdata` set. Hereto, we first have to compute the predicted probabilities on this data set.
```{r}
data("testdata")
pHat = predict(glmFit, newdata = testdata, type = "response")
```
We then store the response in the `testdata` in a separate vector `yTest`.
```{r}
yTest = testdata$y
```
Now we have everything we need to assess the calibration performance of our prediction model. We can either use `val.prob.ci.2` or `valProbggplot` to visualize the calibration performance and to obtain the statistics. `val.prob.ci.2` makes the plot using `base` R and `valProbggplot` uses the `ggplot2` package.
By default, the flexible calibration curve (based on a loess smoother) will be plotted.
```{r, out.width="100%"}
calPerf = val.prob.ci.2(pHat, yTest)
```
In addition to the plot, the function returns an object of the class `CalibrationCurve`.
```{r}
calPerf
```
This object contains the calculated statistics as well as the calculated coordinates of the calibration curve.
```{r}
str(calPerf)
```
The coordinates are stored in the `CalibrationCurves` slot and can be extracted as follows.
```{r}
flexCal = calPerf$CalibrationCurves$FlexibleCalibration
plot(flexCal[, 1:2], type = "l", xlab = "Predicted probability", ylab = "Observed proportion", lwd = 2, xlim = 0:1, ylim = 0:1)
polygon(
x = c(flexCal$x, rev(flexCal$x)),
y = c(
flexCal$ymax,
rev(flexCal$ymin)
),
col = rgb(177, 177, 177, 177, maxColorValue = 255),
border = NA
)
```
Alternatively, we can use restricted cubic splines to obtain the flexible calibration curve.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, smooth = "rcs"))
```
We obtain the logistic calibration curve using the following code.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, smooth = "none"))
```
We can plot both using
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, logistic.cal = TRUE, col.log = "orange"))
```
The package also allows to change the colors, change the position of the legend and much more. Check out the help-function to see what other arguments the functions have.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, col.ideal = "black", col.smooth = "red", CL.smooth = TRUE,
legendloc = c(0, 1), statloc = c(0.6, 0.25)))
```
Finally, we can also decide which statistics appear on the plot.
```{r, out.width="100%"}
invisible(val.prob.ci.2(pHat, yTest, dostats = c("C (ROC)", "Intercept", "Slope", "ECI")))
```
### ggplot version
The ggplot version (i.e.`valProbggplot`) uses virtually the same arguments. Hence, we can easily obtain a ggplot using the same code.
```{r, out.width="100%"}
valProbggplot(pHat, yTest)
```
# Assessing the performance of other types of prediction models
In my recent paper [@Campo2023GCF], I propose an extension of the logistic calibration framework to distributions that belong to the exponential family with probability density function (pdf)
\begin{align*}
f(y_i; \theta_i, \phi, w_i) = \exp\left( \frac{y_i \theta_i - b(\theta_i)}{\phi} w_i + c(y_i, \phi, w_i)\right).
\end{align*}
\noindent
Here, $\theta_i$ is the natural parameter, $\phi$ the dispersion parameter and $w_i$ the weight. $b(\cdot)$ and $c(\cdot)$ are known functions. Similar to before, we assume that there is an unknown regression function $r(\boldsymbol{x}_i) = E[y_i | \boldsymbol{x}_i]$. To approximate this unknown function, we rely on prediction models with the following functional form
\begin{align*}
E[y_i | \boldsymbol{x}_i] = \mu_i = f(\boldsymbol{x}_i).
(\#eq:PredModel)
\end{align*}
\noindent
To estimate \@ref(eq:PredModel), we can use a generalized linear model
\begin{align*}
g(E[y_i | \boldsymbol{x}_i]) = \boldsymbol{x}_i^\top \boldsymbol{\beta} = \eta_i.
(#eq:GLM)
\end{align*}
where $g(\cdot)$ denotes the link function. Alternatively, we can estimate \@ref(eq:PredModel) using machine learning methods. Using the model fit, we obtain the predictions $\widehat{\mu}_i = \widehat{f}(\boldsymbol{x}_i)$.
## Generalized calibration curves
To examine the calibration of prediction models where the outcome is a member of the exponential family, we redefine the framework in more general terms. In this context, a calibration curve maps the predicted values $f(\boldsymbol{x}_i)$ to $E[y_i| f(\boldsymbol{x}_i)]$, the actual conditional mean of $y_i$ given $f(\boldsymbol{x}_i)$. As before, a model is perfectly calibrated if the calibration curve equals the diagonal, i.e. $E[y_i | f(\boldsymbol{x}_i)] = f(\boldsymbol{x}_i) \ \forall \ i$. Hence, in this context, the calibration curve captures the correspondence between the predicted values and the conditional mean.
We propose two methods to estimate the calibration curve. Firstly, we can estimate the calibration curve using a generalized linear model
\begin{align*}
g(E[{}_{*} y_i | {}_{*} \widehat{\mu}_i]) = \alpha + \zeta \ g({}_{*} \widehat{\mu}_i).
(#eq:CalibrationGLM)
\end{align*}
By transforming ${}_{*} \widehat{\mu}_i$ using the appropriate $g(\cdot)$, we map ${}_{*} \widehat{\mu}_i$ to the whole real line to better fit the model. If ${}_{*} \widehat{\mu}_i$ is estimated using a generalized linear model with the same link function (i.e. $g(\cdot)$ is identical in \@ref(eq:GLM) and \@ref(eq:CalibrationGLM)), it follows that $g({}_{*} \widehat{\mu}_i) = {}_{*} \widehat{\eta}_i$. Using equation \@ref(eq:CalibrationGLM), we estimate the empirical average as a function of the predicted values. Further, similarly to \@ref(eq:logcal), $\zeta$ tells us whether the model is over- ($\zeta < 1$) or underfitted ($\zeta >1$). We estimate the calibration-in-the-large $\alpha_c$ as
\begin{align*}
g(E[{}_{*} y_i | {}_{*} \widehat{\mu}_i]) = \alpha_c + \text{offset}(g({}_{*} \widehat{\mu}_i)).
(#eq:CITLGLM)
\end{align*}
Hereby, we assess to which extent the observed empirical average equals the average predicted value. Secondly, as with the logistic regression model, we can employ non-parametric smoothers to estimate the calibration curve.
## Illustration of the generalized calibration framework
### Training the model
To illustrate the functionality, the package has two example data sets with a poisson distributed outcome variable: `poissontraindata` and `poissontestdata`. These are two synthetically generated data sets (using the same underlying process/settings to generate the data) to illustrate the functionality of the `CalibrationCurves` package.
The `poissontraindata` data frame represents the data that we will use to develop our prediction model.
```{r}
data("poissontraindata")
```
In this data frame, we have five covariates and one response variable `y`.
```{r}
head(traindata)
```
Next, we fit a Poisson GLM with log link to obtain the estimated parameter vector $\widehat{\beta}$.
```{r}
glmFit = glm(Y ~ . , data = poissontraindata, family = poisson)
summary(glmFit)
```
### Assessing the calibration performance
Hereafter, we assess the calibration performance on the `poissontestdata` set. Hereto, we first have to compute the predicted values on this data set.
```{r}
data("poissontestdata")
yHat = predict(glmFit, newdata = poissontestdata, type = "response")
```
We then store the response in the `poissontestdata` in a separate vector `yTest`.
```{r}
yTest = poissontestdata$Y
```
Now we have everything we need to assess the calibration performance of our prediction model. We can use `genCalCurve` to visualize the calibration performance and to obtain the statistics. `genCalCurve` makes the plot using `base` R and a ggplot version will be included in one of the next updates.
By default, the calibration curve as estimated by a GLM will be plotted. Further, in addition to the outcome and the predicted values, we have to specify the distribution of the response variable.
```{r, out.width="100%"}
calPerf = genCalCurve(yTest, yHat, family = poisson)
```
In addition to the plot, the function returns an object of the class `GeneralizedCalibrationCurve`.
```{r}
calPerf
```
This object contains the calculated statistics as well as the calculated coordinates of the calibration curve.
```{r}
str(calPerf)
```
The coordinates are stored in the `CalibrationCurves` slot and can be extracted as follows.
```{r}
GLMCal = calPerf$CalibrationCurves$GLMCalibration
plot(GLMCal[, 1:2], type = "l", xlab = "Predicted value", ylab = "Empirical average", lwd = 2, xlim = 0:1, ylim = 0:1,
col = "red", lty = 2)
abline(0, 1, lty = 1)
```
# FAQ
## Why is the calibration intercept different in the rms package?
To construct the logistic calibration curve (see [How do we construct a calibration curve?]), we fit the model
\begin{align*}
\text{logit}(E[{}_{*} y_i | {}_{*} \widehat{\pi}_i]) = \alpha + \zeta \ \text{logit}({}_{*} \widehat{\pi}_i)
\end{align*}
Here, $\zeta$ corresponds to the calibration slope. The calibration intercept from the `val.prob` function from the `rms` package corresponds to $\alpha \neq \alpha_c$.
In the `CalibrationCurves` package, the calibration intercept corresponds to $\alpha_c$ which assesses the calibration in the large. Using this formulation, the calibration intercept indicates whether the predicted risks are under- or overestimated on average and this is conform with the definition of the calibration intercept in the article 'A calibration hierarchy for risk models was defined: from utopia to empirical data' (and other articles published on this topic) [@VanCalster2016;@VanCalster2019]. We compute $\alpha_c$ using
\begin{align*}
\text{logit}(E[{}_{*} y_i | {}_{*} \widehat{\pi}_i]) = \alpha_c + \text{offset}(\text{logit}({}_{*} \widehat{\pi}_i)).
\end{align*}
where we fix $\zeta = 1$ by including $\text{logit}({}_{*} \widehat{\pi}_i)$ as an offset variable.
Consequently, both types of calibration intercepts need to be interpreted differently:
- $\alpha$:
- this corresponds to the constant you have to add after you multiplied the linear predictor with the ‘correction’ factor (i.e. the calibration slope) to get the predicted probabilities to correspond to the observed ones. In essence: once we have multiplied the linear predictor by a correction factor, what is the constant that we still have to add to make the predicted probabilities correspond to the observed ones?
- $\alpha_c$:
- $> 0$: ${}_{*} \widehat{\pi}_i$ is too low on average and hence, on average the risks are underestimated. You have to increase it to make it correspond to the observed probabilities.
- $< 0$: ${}_{*} \widehat{\pi}_i$ is too high on average and hence, on average the risks are overestimated. You have to decrease it to make it correspond to the observed probabilities.
## I have predicted probabilities of 0 or 1. Why is this not allowed by default and why do I get these annoying warning messages?
Predicted probabilities of 0 or 1 imply that there is no more randomness and that the process is deterministic. If the process was truly deterministic, we would not have to model it. Mostly the presence of perfect predictions signifies that something went wrong when fitting the model or that the model is severely overfitted. We therefore make sure that this is not allowed by default and delete these observations. We observe this behavior in the following cases:
<br /> - logistic regression: with quasi-complete separation, the coefficients tend to infinity;
<br /> - tree-based methods: one of the leaf nodes contains only observations with either 0 or 1;
<br /> - neural networks: the weights tend to infinity and this is known as weight/gradient explosion.
If you are confident that nothing is wrong with the model fit, then you can obtain a calibration curve by setting the argument `allowPerfectPredictions` to `TRUE`. In this case, predictions of 0 and 1 are replaced by values 1e-8 and 1 - 1e-8, respectively. Do take this into account when interpreting the performance measures, as these are not calculated with the original values.
```{r, out.width="100%"}
set.seed(1)
yTest = testdata$y
pHat[sample(1:length(pHat), 5, FALSE)] = sample(0:1, 5, TRUE)
x = val.prob.ci.2(pHat, yTest, allowPerfectPredictions = TRUE)
```
# References
<div id="refs"></div>
|
/scratch/gouwar.j/cran-all/cranData/CalibrationCurves/vignettes/CalibrationCurves.Rmd
|
## usethis namespace: start
#' @useDynLib CamelUp, .registration = TRUE
## usethis namespace: end
NULL
## usethis namespace: start
#' @importFrom Rcpp sourceCpp
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/CamelUp/R/CamelUp-package.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @name Board
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Parameter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
#' @name Camel
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
#' @name Die
#' @title Encapsulates a double
#' @description Type the name of the class to see its
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
#'
NULL
#' @name Game
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
#' @name LegBet
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
#' @name Player
#' @title Encapsulates a double
#' @description Type the name of the class to see its
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
#'
NULL
#' @name Simulator
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
#' @name Space
#' @title Encapsulates a double
#' @description Type the name of the class to see its methods
#' @field new Constructor
#' @field mult Multiply by another Double object \itemize{
#' \item Paramter: other - The other Double object
#' \item Returns: product of the values
#' }
#' @export
NULL
|
/scratch/gouwar.j/cran-all/cranData/CamelUp/R/RcppExports.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.