content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @title mae #' @description Calculates the Mean absolute error (MAE) from observed and #' predicted values. #' @author Kristin Piikki, Johanna Wetterlind, Mats Soderstrom and Bo Stenberg, #' E-mail: \email{kristin.piikki@@slu.se} #' @param o A numeric vector. Observed values. #' #' @param p A numeric vector. Predicted values. #' @return Mean absolute error (MAE). #' #' @details Interpretation: smaller is better. Similar to RMSE but less sensitive #' to large errors. #' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives #' on validation in digital soil mapping of continuous attributes. A review. #' Soil Use and Management. \doi{10.1111/sum.12694} #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mae(o=obs, p=pred) #' #' @export mae<-function(o, p) return(mean(abs(o-p)))
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mae.R
#' @title mape #' @description Calculates the Mean absolute percentage error (MAPE) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Mean absolute percentage error (MAPE) #' @details Interpretation: smaller is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mape(o=obs, p=pred) #' #' @export mape<-function(o, p){ a<-abs(o-p) b<-o mean(a/b) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mape.R
#' @title mare #' @description Calculates the Median absolute relative error (MARE) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Median absolute relative error (MARE) #' @details Interpretation: smaller is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mare(o=obs, p=pred) #' #' @export mare<-function(o, p){ a<-abs(o-p) b<-o stats::median(a/b) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mare.R
#' @title mde #' @description Calculates the Median error (MdE) from observed and predicted #' values. #' @inherit mae return author #' @inheritParams mae #' @return Median error (MdE) #' @details Interpretation: smaller is better. Similar to mean error (bias) but #' less sensitive to large errors. Sometimes called bias. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mde(o=obs, p=pred) #' #' @export mde<-function(o, p){ stats::median(p-o) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mde.R
#' @title mdse #' @description Calculates the Median squared error (MdSE) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Median squared error (MSE). #' #' @details Interpretation: smaller is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mdse(o=obs, p=pred) #' #' @export mdse<-function(o, p){ stats::median((p-o)^2) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mdse.R
#' @title me #' @description Calculates the Mean error (ME) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Mean error (ME). #' @details Interpretation: smaller is better. Sometimes called bias. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' me(o=obs, p=pred) #' #' @export me<-function(o, p) return(mean(p-o))
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/me.R
#' @title msdr #' @description Calculates the Mean squared deviation ratio (msdr) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Mean squared deviation ratio (msdr) #' @details Interpretation: closer to 1 is better. Sometimes called standardised #' squared predictor error (SSPE) or scaled root mean squared error (SRMSE). #' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives #' on validation in digital soil mapping of continuous attributes. A review. #' Soil Use and Management. \doi{10.1111/sum.12694} #' @references Voltz, M., & Webster, R. (1990). A comparison of kriging, cubic #' splines and classification for predicting soil properties from sample #' information. Journal of soil Science, 41(3), 473-490. (there called: #' standardized square deviation). #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' msdr(o=obs, p=pred) #' #' @export msdr<-function(o, p){ mse(o,p)/stats::var(p) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/msdr.R
#' @title mse #' @description Calculates the Mean squared error (MSE) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Mean squared error (MSE). #' #' @details Interpretation: smaller is better. This metric is sometimes called mean squared #' deviation (MSD or RMSD2). #' #' @inherit mae return references #' #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' mse(o=obs, p=pred) #' #' @export mse<-function(o, p){ mean((p-o)^2) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mse.R
#' @title nmse #' @description Calculates the Normalized mean squared error (NMSE) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Normalized mean squared error (NMSE) #' @details Interpretation: smaller is better. #' #' @references Park S. J., Vlek P. L. G. 2002. Environmental correlation of #' three-dimensional soil spatial variability: a comparison of three adaptive #' techniques. Geoderma, 109(1-2), 117-140. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' nmse(o=obs, p=pred) #' @export nmse<-function(o, p){ mse(o,p)/stats::var(o) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/nmse.R
#' @title nrmse #' @description Calculates the Normalised RMSE (NRMSE) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Normalised RMSE (NRMSE) #' @details Interpretation: smaller is better. Normalised RMSE (NRMSE) is computed as the RMSE divided by the mean #' of the observed valeus. NRMSE is sometimes called Relative RMSE (rRMSE) or #' Root mean square standardized (RMSS). #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' nrmse(o=obs, p=pred) #' #' @export nrmse<-function(o, p){ rmse(o,p)/mean(o) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/nrmse.R
#' @title nu #' @description Calculates the Non-unity slope (NU) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Non-unity slope (NU) #' @details Interpretation: closer to 1 is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' nu(o=obs, p=pred) #' #' @export nu<-function(o, p){ b<-sum((p-mean(p))*(o-mean(o)))/sst(o) ((1-b)^2)*sum((o^2)/length(o)) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/nu.R
#' @title precision #' @description Calculates the Precision from observed and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Precision #' @details Interpretation: smaller is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' precision(o=obs, p=pred) #' #' @export precision<-function(o, p){ sqrt((rmse(o,p)^2)+(me(o,p)^2)) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/precision.R
#' @title r #' @description Calculates the Pearson product moment correlation coefficient (r) #' from observed and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Pearson product moment correlation coefficient (r). #' #' @details Interpretation: larger absolute value is better. #' #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' r(o=obs, p=pred) #' #' @export r<-function(o, p){ sum((o-mean(o))*(p-mean(p)))/sqrt(sum((o-mean(o))^2)*sum((p-mean(p))^2)) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/r.R
#' @title r2 #' @description Calculates the Coefficient of determination (R2) from observed #'and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Coefficient of determination (R2) #' @details Interpretation: larger is better. #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' r2(o=obs, p=pred) #' #' @export r2<-function(o, p){ #rsq in Excel (sum((o-mean(o))*(p-mean(p)))/sqrt(sum((o-mean(o))^2)*sum((p-mean(p))^2)))^2 }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/r2.R
#' @title rmdse #' @description Calculates the Root median squared error (RMdSE) from observed #' and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Root median squared error (RMdSE). #' @details Interpretation: smaller is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' rmdse(o=obs, p=pred) #' #' @export rmdse<-function(o, p){ sqrt(stats::median((p-o)^2)) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/rmdse.R
#' @title rmse #' @description Calculates the Root mean square error (RMSE) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Root mean square error (RMSE) #' #' @details Interpretation: smaller is better. RMSE is sometimes abbreviated RMS, RMSD #' or RMSEP. A smaller value means a smaller error. RMSE is similar to mean #' absolute error (MAE), median absolute deviation (MAD) and root median squared error (RmdSE) but is #' more sensitive to large errors. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' rmse(o=obs, p=pred) #' #' @export rmse<-function(o, p){ sqrt(mean((p-o)^2)) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/rmse.R
#' @title rpd #' @description Calculates the Ratio of performance to deviation (RPD) from observed #'and predicted values. #' @author Kristin Piikki, Johanna Wetterlind, Mats Soderstrom & Bo Stenberg #' \email{[email protected]} #' @inheritParams mae #' @return Ratio of performance to deviation (RPD). #' @details Interpretation: larger is better. #' @inherit mae return references #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' rpd(o=obs, p=pred) #' #' @export rpd<-function(o, p){ stats::sd(o)/rmse(o,p) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/rpd.R
#' @title rpiq #' @description Calculates the Ratio of interquartile to RMSE (RPIQ) from observed #'and predicted values. #' @author Kristin Piikki, Johanna Wetterlind, Mats Soderstrom & Bo Stenberg #' \email{[email protected]} #' @inheritParams mae #' @return Ratio of interquartile to RMSE (RPIQ) #' @details Interpretation: Smaller is better. #' @references Bellon-Maurel V., Fernandez-Ahumada E., Palagos B., Roger J. M., #' McBratney, A. 2010. Critical review of chemometric indicators commonly used #' for assessing the quality of the prediction of soil attributes by NIR #' spectroscopy. TrAC Trends in Analytical Chemistry, 29(9), 1073-1081. #' @references Piikki K., Wetterlind J., Soderstrom M. Stenberg B. Perspectives #' on validation in digital soil mapping of continuous attributes. A review. #' Soil Use and Management, in press. #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' rpiq(o=obs, p=pred) #' #' @export rpiq<-function(o, p){ iqr(o)/rmse(o,p) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/rpiq.R
#' @title sde #' @description Calculates the Standard deviation of the error (SDE) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Standard deviation of the error (SDE). #' #' @details Interpretation: smaller is better. #' #' @inherit mae return references #' #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' sde(o=obs, p=pred) #' #' @export sde<-function(o, p){ stats::sd(p-o) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/sde.R
#' @title skew #' @description Calculates the Skewness of residuals from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Skewness of residuals. #' #' @details Interpretation: smaller is better. #' #' @inherit mae return references #' #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' skew(o=obs, p=pred) #' #' @export skew<-function(o, p){ res<-p-o mean(((res-mean(res))/stats::sd(res))^3) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/skew.R
#' @title smape #' @description Calculates the Symmetrical mean percentage error (SMAPE) from observed #' and predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Symmetrical mean percentage error (SMAPE) #' @details Interpretation: smaller is better. #' @references Forkuor G., Hounkpatin O. K., Welp G., Thiel, M. 2017. High #' resolution mapping of soil properties using remote sensing variables in #' south-western Burkina Faso: a comparison of machine learning and multiple #' linear regression models. PloS one, 12(1), e0170478. #' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives #' on validation in digital soil mapping of continuous attributes. A review. #' Soil Use and Management. \doi{10.1111/sum.12694} #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' smape(o=obs, p=pred) #' #' @export smape<-function(o, p){ a<-abs(o-p) b<-o+p mean(2*a/b) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/smape.R
#' @title sse #' @description Calculates the sum of squares for error (SSE) from observed and #' predicted values. #' @inherit mae return author #' @inheritParams mae #' @return Sum of squares for error (SSE). #' @details Interpretation: smaller is better. #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10) #' sse(o=obs, p=pred) #' #' @export sse<-function(o, p){ sum((o-p)^2) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/sse.R
#' @title sst #' @description Calculates the Total sums of squares (SST) from a vector of #' observed values. #' @inherit mae return author #' @inheritParams iqr #' @return Total sums of squares (SST). #' #' @details Interpretation: smaller is better. #' #' @examples #' obs<-c(1:10) #' sst(o=obs) #' #' @export sst<-function(o){ sum((o-mean(o))^2) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/sst.R
#' @title valplot #' @description Plots predicted values versus observed values in a #' coordinate system with the same range of both axes. #' @inherit mae return author #' @inheritParams mae #' @param main A character value. The main title of the plot. #' #' @param sub A character value. The subtitle of the plot. #' #' @param xlab A character value. The x axis label. #' #' @param ylab A character value. The y axis label. #' @return A scatter plot of observed and predicted values. #' @details Circles represent the data, dashed line represents observed = predicted #' and solid line represents an #' @examples #' obs<-c(1:10) #' pred<-c(1, 1 ,3, 5, 4, 5, 6, 8, 11, 10) #' t1='Measured variable (unit)' #' evalue<-round(e(o=obs, p=pred),2) #' maevalue<-round(mae(o=obs, p=pred),1) #' t2=paste('E = ', evalue, '; MAE = ', maevalue, ' units') #' valplot(o=obs, p=pred, main=t1, sub=t2) #' @export valplot<-function(o, p, main=NA, sub=NA, xlab='Observed value', ylab='Predicted value' ){ oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) graphics::par(pty="s") plot(o,p, xlab=xlab, ylab=ylab, xlim=range(pretty(c(o,p))), ylim=range(pretty(c(o,p))), xaxt="n", yaxt="n", pch=16, cex.lab=1, ) graphics::abline(stats::lm(p ~ o), lty = "solid") graphics::abline(a=0, b=1, lty = "dashed") if(!is.na(main)&is.na(sub)) graphics::mtext(side=3, line=0.6, adj=0, cex=1, main) if(!is.na(main)&!is.na(sub)) graphics::mtext(side=3, line=2, adj=0, cex=1, main) if(!is.na(sub)) graphics::mtext(side=3, line=0.6, adj=0, cex=0.75, sub) graphics::axis(1,at=pretty(c(o,p)),cex.axis=0.75, font=1, tcl=0.3) graphics::axis(2,at=pretty(c(o,p)),cex.axis=0.75, font=1, tcl=0.3) }
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/valplot.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/valmetrics/inst/doc/notes.R
--- title: "Notes on the valmetrics package" author: "Kristin Piikki, Johanna Wetterlind, Mats Soderstrom and Bo Stenberg" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Notes on the valmetrics package} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} %\usepackage[utf8]{inputenc} %\DeclareUnicodeCharacter{2010}{-} %\DeclareUnicodeCharacter{00A0}{ } --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## 1 Background When models or maps are evaluated, validation metrics of model or map performance are commonly computed, based on vectors of observed values and their corresponding predicted values. In a systematic review of validation practices in the scientific literature within the subject area of digital soil mapping, about thirty different validation metrics were found to be used [Piikki et al., 2021](https://doi.org/10.1111/sum.12694). These measures are sensitive to different aspects of model performance. Some are sensitive to random errors, others are sensitive to systematic error and yet others are sensitive to both (the total error). In addition, some metrics are sensitive to the range of the observed data and some are sensitive to the number of observations in the dataset. In addition, a few are constructed to be sensitive to the number of model parameters used in order to punish for model complexity. What the validation metrics are sensitive to, determine whether it is suitable to compare them between datasets or response variables (the modelled or mapped entity). Functions to compute the validation metrics listed by Piikki et al. (in press) are provided as functions in the R package valmetrics. The present document is a simulation study aiming to demonstrate the sensitivities of the validation metrics to: i) different types of error (random errors, systematic errors and their combinations), and ii) different dataset properties (spread in observed values, number of observations (n) and their combinations). This demonstration is based on synthetic datasets of predicted and observed values with different levels of random and systematic errors and with different ranges in observed values and different numbers of observations. ## 2 Materials and methods ### 2.1 A synthetic dataset for simulation of sensitivities to different types of error First, a dataset of synthetic observed values was defined as all integers from 5 to 15. Then twenty-five different prediction sets were constructed, one for each of the orthogonal combinations of five levels (weights) of systematic errors and five levels of random errors. The synthetic predictions were computed according to: $$p_{ijk} = o_{ijk} + w_{ri} × e_{rk} + w_{sj} × e_{sk} $$ where p is a vector of predicted values, o is a vector of observed value, w_r is a vector of weights of the random error, w_s is a vector of weights of the systematic error, e_r is a vector of random errors and e_s is a vector of systemic errors (a bias). The vectors o, e_r and e_s, all of length 11, are: $$o = [5, 6, 7, …, 15] $$ $$w_r = w_s = [0, 0.25, 0.5, 0.75, 1] $$ $$e_r = [0 , -2, 4, 2, -8, 6, -4, -6, 10, -10, 8] $$ $$e_s≈[11,11,11,…,11]$$ The systematic error weights were: 0, 25 %, 50 %, 75 % and 100 %. The vector e_r was obtained by sampling an ordered vector of integers between -5 and 5 without replacement and multiplying by 2. The constant bias of 11 was chosen such that the systematic errors would be of the same magnitude as the random errors. The systematic error was a constant offset 2 times the mean of the absolute random errors: $$2 × mean(abs(e_r )) = 10.90909 $$ ### 2.2 A synthetic dataset for simulation of sensitivities to dataset properties The synthetic dataset for simulation of sensitivities to dataset properties was constructed for one selected level of systematic and random errors (w_r=w_s=0.25). The vector of observed data (o) was linearly scaled to the ranges: [1, 19], [3,17 ], [5, 15], [7,13] and [9, 11], i.e to ranges that are 20%, 60%, 100%, 140 % and 180% of the original range in observed values. $$ranges = [0.2, 0.6, 1, 1.4, 1.8] $$ Then, predicted values was computed according to equation 1. The resulting dataset was multiplied 1, 2, 3, 4, or 5 times to get different numbers of observations n: $$n = [11, 22, 33, 44, 55] $$ This means that 25 datasets, one for each combination of the five ranges and the five numbers of observations were constructed. ## 3 Results ### 3.1 The synthetic datasets Plots of predicted values versus observed values in the synthetic datasets are presented in figures 1 and 2. ![Figure 1. Synthetic data where five levels of random error and five levels of systematic error are added to the observed dataset to derive the predicted values. The plot denoted with an asterisk is the same as the plot denoted with an asterisk in Figure 2.](fig1.jpg){width=85%} ![Figure 2. Synthetic data with five different ranges and five different numbers of observations. The random and systematic error weights are 25 % in all cases. The plot denoted with an asterisk is the same as the plot denoted with an asterisk in Figure 1. Note that a small random error was added to the observed and random errors in order to make it visible that the number of observations differ. These errors were not added to the data used for simulation/demonstration. ](fig2.jpg){width=85%} ### 3.2 Simulation results Figures 3 and 4 show 28 validation metrics for the 25 datasets in Figure 1 and the 25 datasets in Figure 2. In Figure 3, it is evident that ac, adjr2, aic , e, lc, lccc, mad , mae, mape, mare, msdr, mse, nmse, nrmse, nu , precision, rmdse, rmse, rpd, rpiq, smape and sse are sentitive to both random and systematic errors, while mde, mdse and me (also called bias) area sensitive only to systematic error and nu, r, r2 and sde are sensitive only to random error. The metrics are dented by their function names in the valmetrics R package. Equations are given by Piikki et al. (in press). ![Figure 3. Validation metrics values in relation to added levels (weights) of systematic and random errors. Color scales are stretched for each plot. The 25 circles within each plot are arranged in the same order as the plots in Figure 1. Missing circles means that the validation metric is infinite because there is no error (i.e. when observed values are equal to the predicted values).](fig3.jpg){width=85%} The metrics ac, adjr2, e, lc, lccc, mape, msdr, nmse, nu, r, r2, rpd, rpiq, and smape are sensitive only to the spread in observed values (or sensitive to the spread and only to a very limited degree to n), while aic, sse and sde are sensitive to the size of the dataset (number of observations, n. Eleven of the metrics were insensitive to dataset range and size. ![Figure 4. Validation metrics values in relation to dataset range and number of observations. Color scales are stretched for each plot. The 25 circles within each plot are arranged in the same order as the plots in Figure 2.](fig4.jpg){width=85%} ## 4 Discussion Validation metrics that are sensitive to number of samples or spread in observed values cannot be compared directly between datasets with different value ranges or different numbers of observations, or at least one shall bear that in mind. For example, if a digital soil map of soil organic carbon is made for a continent, say Africa, and another soil organic carbon map is made for a country, say Tanzania, and the Nash-Sutcliffe modelling efficiencies (e) is higher for the continental map than for the national map, that does not necessarily mean that at a specific location in Tanzania, the continental map is better than then national map. The national map may well have a smaller error (e.g. a lower mae) at that location. The reason e values are easily compared, is because the continent, presumably has a larger range in soil organic carbon values than the country. It is evident from Figure 3, that there are several validation metrics that show similar sensitivity patterns to random and systematic errors. We hope that the present simulation study will be helpful to guide modelers to choose a set of validation metrics that are suitable for the comparisons to be made and that show different aspects of model performance, rather than being too similar. One validation metric (skewness of residuals, skew) was excluded from this study as it show a type of systematic error that was not tested here. ## 5 Reference Piikki K., Wetterlind J., Soderstrom M., Stenberg B. 2021. Perspectives on validation in digital soil mapping of continuous attributes – a review. Soil Use and Management. [link](https://doi.org/10.1111/sum.12694)
/scratch/gouwar.j/cran-all/cranData/valmetrics/inst/doc/notes.Rmd
--- title: "Notes on the valmetrics package" author: "Kristin Piikki, Johanna Wetterlind, Mats Soderstrom and Bo Stenberg" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Notes on the valmetrics package} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} %\usepackage[utf8]{inputenc} %\DeclareUnicodeCharacter{2010}{-} %\DeclareUnicodeCharacter{00A0}{ } --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## 1 Background When models or maps are evaluated, validation metrics of model or map performance are commonly computed, based on vectors of observed values and their corresponding predicted values. In a systematic review of validation practices in the scientific literature within the subject area of digital soil mapping, about thirty different validation metrics were found to be used [Piikki et al., 2021](https://doi.org/10.1111/sum.12694). These measures are sensitive to different aspects of model performance. Some are sensitive to random errors, others are sensitive to systematic error and yet others are sensitive to both (the total error). In addition, some metrics are sensitive to the range of the observed data and some are sensitive to the number of observations in the dataset. In addition, a few are constructed to be sensitive to the number of model parameters used in order to punish for model complexity. What the validation metrics are sensitive to, determine whether it is suitable to compare them between datasets or response variables (the modelled or mapped entity). Functions to compute the validation metrics listed by Piikki et al. (in press) are provided as functions in the R package valmetrics. The present document is a simulation study aiming to demonstrate the sensitivities of the validation metrics to: i) different types of error (random errors, systematic errors and their combinations), and ii) different dataset properties (spread in observed values, number of observations (n) and their combinations). This demonstration is based on synthetic datasets of predicted and observed values with different levels of random and systematic errors and with different ranges in observed values and different numbers of observations. ## 2 Materials and methods ### 2.1 A synthetic dataset for simulation of sensitivities to different types of error First, a dataset of synthetic observed values was defined as all integers from 5 to 15. Then twenty-five different prediction sets were constructed, one for each of the orthogonal combinations of five levels (weights) of systematic errors and five levels of random errors. The synthetic predictions were computed according to: $$p_{ijk} = o_{ijk} + w_{ri} × e_{rk} + w_{sj} × e_{sk} $$ where p is a vector of predicted values, o is a vector of observed value, w_r is a vector of weights of the random error, w_s is a vector of weights of the systematic error, e_r is a vector of random errors and e_s is a vector of systemic errors (a bias). The vectors o, e_r and e_s, all of length 11, are: $$o = [5, 6, 7, …, 15] $$ $$w_r = w_s = [0, 0.25, 0.5, 0.75, 1] $$ $$e_r = [0 , -2, 4, 2, -8, 6, -4, -6, 10, -10, 8] $$ $$e_s≈[11,11,11,…,11]$$ The systematic error weights were: 0, 25 %, 50 %, 75 % and 100 %. The vector e_r was obtained by sampling an ordered vector of integers between -5 and 5 without replacement and multiplying by 2. The constant bias of 11 was chosen such that the systematic errors would be of the same magnitude as the random errors. The systematic error was a constant offset 2 times the mean of the absolute random errors: $$2 × mean(abs(e_r )) = 10.90909 $$ ### 2.2 A synthetic dataset for simulation of sensitivities to dataset properties The synthetic dataset for simulation of sensitivities to dataset properties was constructed for one selected level of systematic and random errors (w_r=w_s=0.25). The vector of observed data (o) was linearly scaled to the ranges: [1, 19], [3,17 ], [5, 15], [7,13] and [9, 11], i.e to ranges that are 20%, 60%, 100%, 140 % and 180% of the original range in observed values. $$ranges = [0.2, 0.6, 1, 1.4, 1.8] $$ Then, predicted values was computed according to equation 1. The resulting dataset was multiplied 1, 2, 3, 4, or 5 times to get different numbers of observations n: $$n = [11, 22, 33, 44, 55] $$ This means that 25 datasets, one for each combination of the five ranges and the five numbers of observations were constructed. ## 3 Results ### 3.1 The synthetic datasets Plots of predicted values versus observed values in the synthetic datasets are presented in figures 1 and 2. ![Figure 1. Synthetic data where five levels of random error and five levels of systematic error are added to the observed dataset to derive the predicted values. The plot denoted with an asterisk is the same as the plot denoted with an asterisk in Figure 2.](fig1.jpg){width=85%} ![Figure 2. Synthetic data with five different ranges and five different numbers of observations. The random and systematic error weights are 25 % in all cases. The plot denoted with an asterisk is the same as the plot denoted with an asterisk in Figure 1. Note that a small random error was added to the observed and random errors in order to make it visible that the number of observations differ. These errors were not added to the data used for simulation/demonstration. ](fig2.jpg){width=85%} ### 3.2 Simulation results Figures 3 and 4 show 28 validation metrics for the 25 datasets in Figure 1 and the 25 datasets in Figure 2. In Figure 3, it is evident that ac, adjr2, aic , e, lc, lccc, mad , mae, mape, mare, msdr, mse, nmse, nrmse, nu , precision, rmdse, rmse, rpd, rpiq, smape and sse are sentitive to both random and systematic errors, while mde, mdse and me (also called bias) area sensitive only to systematic error and nu, r, r2 and sde are sensitive only to random error. The metrics are dented by their function names in the valmetrics R package. Equations are given by Piikki et al. (in press). ![Figure 3. Validation metrics values in relation to added levels (weights) of systematic and random errors. Color scales are stretched for each plot. The 25 circles within each plot are arranged in the same order as the plots in Figure 1. Missing circles means that the validation metric is infinite because there is no error (i.e. when observed values are equal to the predicted values).](fig3.jpg){width=85%} The metrics ac, adjr2, e, lc, lccc, mape, msdr, nmse, nu, r, r2, rpd, rpiq, and smape are sensitive only to the spread in observed values (or sensitive to the spread and only to a very limited degree to n), while aic, sse and sde are sensitive to the size of the dataset (number of observations, n. Eleven of the metrics were insensitive to dataset range and size. ![Figure 4. Validation metrics values in relation to dataset range and number of observations. Color scales are stretched for each plot. The 25 circles within each plot are arranged in the same order as the plots in Figure 2.](fig4.jpg){width=85%} ## 4 Discussion Validation metrics that are sensitive to number of samples or spread in observed values cannot be compared directly between datasets with different value ranges or different numbers of observations, or at least one shall bear that in mind. For example, if a digital soil map of soil organic carbon is made for a continent, say Africa, and another soil organic carbon map is made for a country, say Tanzania, and the Nash-Sutcliffe modelling efficiencies (e) is higher for the continental map than for the national map, that does not necessarily mean that at a specific location in Tanzania, the continental map is better than then national map. The national map may well have a smaller error (e.g. a lower mae) at that location. The reason e values are easily compared, is because the continent, presumably has a larger range in soil organic carbon values than the country. It is evident from Figure 3, that there are several validation metrics that show similar sensitivity patterns to random and systematic errors. We hope that the present simulation study will be helpful to guide modelers to choose a set of validation metrics that are suitable for the comparisons to be made and that show different aspects of model performance, rather than being too similar. One validation metric (skewness of residuals, skew) was excluded from this study as it show a type of systematic error that was not tested here. ## 5 Reference Piikki K., Wetterlind J., Soderstrom M., Stenberg B. 2021. Perspectives on validation in digital soil mapping of continuous attributes – a review. Soil Use and Management. [link](https://doi.org/10.1111/sum.12694)
/scratch/gouwar.j/cran-all/cranData/valmetrics/vignettes/notes.Rmd
# v3.07 - Bug Fixes in valorate.comb, valorate.plot.empirical, plots saying "Mutations", "Events". y.limit # v3.06 - Many small changes due package distribution # v3.05 - Add scaled plot of empirical distributions # v3.04 - Add adjust of p-values for two-tails tests # v3.03 - Add TIE estimations (same event type) for pValue and statistic # v3.02 - Add estimation of all combinations # v3.01 - Add valorate.plot.kaplan, valorate.risk, added kullback-leibler in valorate.plot.diff.empirical and correction of maximum # v3.00 - Implements weights (should be included in the SUM OF V, see Peto, Tarone-Ware, Flemington weights in Kleinbaum : Survival Analysis - A self learning text) # v2.31 - default for min.sampling.size raised to 1000 # v2.3 - Bug corrections for TIES estimations (also speed improvements) # v2.2 - alpha function missing from scales package # v2.1 - IMPLEMENTATION OF TIES # v2.1 - Implementation of shades in valorate.plot.empirical and addition of valorate.plot.diff.empirical # v2.0 - C implementation # v2.0 - options to avoid (beta, weibull, etc) parameter estimations and estimated only if needed (to save processing time) # v2.0 - vjx and vjcx matrices now only 1 matrix (only the division term) # v1.2 - Removed "&& k.dens[k+1]" (not so sure since it will be futile) # v1.1 - Added min.sampling.size # v1.? - sampling.size is now used to express "total sampling" instead of sampling in any number of events. # PENDINGS: #Reverted Changed: # v2.31 - pvalues are now 0-1 range, p-value in sampling is now estimated as V > sampling if V >= 0 or V < sampling when V < 0 #require(methods) #require(survival) setClass("valorate", representation( s="numeric", n="numeric", events="numeric", parameters="list", sampling.size="numeric", min.sampling.size="numeric", verbose="logical", save.sampling="logical", wcensored="numeric", wevents="numeric", order="numeric", subpop="environment", time="character", samplings="list", ties="list", sampling.ties="numeric", tiesame="list", tiesame.pos="numeric", tiesame.sampling="numeric", method="character", estimate.distribution.parameters="character", weights="numeric", weights.method="character", tails="numeric" )) #valorate.libfile <- paste("valorate",.Platform$dynlib.ext,sep="") #if (file.exists(valorate.libfile)) { # if (!is.loaded("valorate_samplings",PACKAGE="valorate")) { # oklib <- NULL # try(oklib <- dyn.load(valorate.libfile)) # if (is.null(oklib)) { # cat("There are problems loadding C methods library [",valorate.libfile,"].\n") # } # } #} # Create valorate object new.valorate <- function(time, status, censored, rank, sampling.size=max(10000,200000/events), min.sampling.size=1000, tails=2, sampling.ties=30, weights.method=c("logrank","Wilcoxon","Tarone-Ware","Peto","Flemington-Harrington","Trevino","user")[1], weights.parameters=list(p=1,q=1,t=3), weights=NULL, verbose=FALSE, save.sampling=TRUE, method="C", estimate.distribution.parameters=c("empirical","gaussian","beta","weibull")[1] ) { # Input: # (1) time=Survival Times and censored=censored indicator (1=censored, 0=dead) # (2) time=Survival Times with censoring indicator "+" (censored=missing) # (3) rank=0/1 ranked subjects, 0 is censored, 1 is event # (4) time=Survival Times and status=status indicator (1=dead, 0=censored) # Create valorate object cPath <- "" # this may be an argument but was removed when setting up the R package libfile <- paste(cPath, "valorate_samplings",.Platform$dynlib.ext,sep="") loaded <- is.loaded("valorate_samplings",PACKAGE="valorate") if (method == "C" && !loaded) { if (!file.exists(libfile)) { cat("C methods library have not been loaded and does not seem to be present on [",libfile,"]. Setting to R methods.\n") method <- "R" } else { oklib <- NULL try(oklib <- dyn.load(libfile)) if (is.null(oklib)) { cat("There are problems loadding C methods library [",libfile,"], setting to R methods.\n") method <- "R" } } } vro <- new("valorate") vro@time <- character(0) ties <- list() tiesame <- list() if (!missing(status)) censored <- 1 - 1*as.numeric(status) if (!missing(time) && (!missing(censored) || !missing(status)) && missing(rank)) { if (is.numeric(time) && (is.numeric(censored) || is.logical(censored)) && length(time) == length(censored)) { o <- order(time) s <- 1-(1*censored[o]) vro@time <- paste(time[o],ifelse(censored[o],"+",""),sep="") vro@order <- o } else { stop("time needs to be numeric, censored needs to be numeric (0/1) or logical.") } } else if (!missing(time) && missing(censored) && missing(status) && missing(rank)) { if (is.character(time)) { newtime <- gsub("[^0-9Ee\\.\\+\\-]", "", time) wc <- grep("\\+$",newtime) s <- rep(1,length(time)) s[wc] <- 0 thetimes <- as.numeric(gsub("\\+","",newtime)) if (any(!is.finite(thetimes))) { stop(paste("problems with time parameter:",paste(newtime,collapse=", "))) } o <- order(thetimes) s <- s[o] vro@order <- o vro@time <- paste(thetimes[o],ifelse(s,"","+"),sep="") } else { stop("time needs to be character when not censored parameter is provided.") } } else if (missing(time) && missing(censored) && missing(status) && !missing(rank)) { if (is.numeric(rank) || is.logical(rank)) { s <- 1*(rank==1) vro@order <- 1:length(s) vro@time <- paste(1:length(s), ifelse(s,"","+"),sep="") } else { stop("rank needs to be numeric (0/1) or logical.") } } else { stop("Invalid input.\nPossible inputs:\n\t(1) time:numeric & censorded:numeric/logical,\n\t(2) time:character using '+' as censoring at the end,\n\t(3) rank:numeric/logical,\n\t(4) time:numeric & status:numeric/logical.") } weights.method <- match.arg(weights.method, c("logrank","Wilcoxon","Tarone-Ware","Peto","Flemington-Harrington","Trevino","user")) if (!is.null(weights)) { if (!is.numeric(weights) || !all(is.finite(weights)) || length(weights) != length(s)) { stop("Weights needs to be valid numeric values and length equal to time or rank.") } #weights <- as.double(weights * length(time) / sum(weights)) weights <- as.double(weights) weights.method <- "user" } else { # Survival Analysis - A Self Learning Text - Kleinbaum - Klein Springer 2005 pg 64 if (weights.method == "Wilcoxon") { weights <- as.double(length(vro@time):1) } else if (weights.method == "Tarone-Ware") { weights <- as.double(sqrt(length(vro@time):1)) } else if (weights.method == "Peto") { sf <- as.double(length(vro@time):1 / length(vro@time)) weights <- sf } else if (weights.method == "Flemington-Harrington") { sf <- as.double(length(vro@time):1 / length(vro@time)) weights <- as.double(sf^weights.parameters$p + (1-sf)^weights.parameters$q) } else if (weights.method == "Trevino") { # This method is mine (Victor Trevino). # It gives more weight to high density times if (missing(time)) { stop("Trevino method need time values instead of rank values.") } xh <- hist(time, breaks=seq(min(time),max(time),length=length(time)/weights.parameters$t), plot=FALSE) weights <- numeric(length(time)) for (i in 1:length(time)) { wr <- which(xh$mids > time[i]) if (length(wr) == 0) wr <- length(xh$mids) wr <- wr[1] wl <- rev(which(xh$mids < time[i])) if (length(wl) == 0) wl <- 1 wl <- wl[1] if (wl == wr) { if (wl == 1) { wr <- 2; } else { wl = wr-1; } } m <- (xh$counts[wr]-xh$counts[wl]) / (xh$mids[wr]-xh$mids[wl]) b <- xh$counts[wl] x <- (time[i] - xh$mids[wl]) weights[i] <- m*x + b } weights <- as.double(weights[vro@order]*length(time)/sum(weights)) } else { weights <- as.double(rep(1,len=length(vro@time))) } } # set parameters if (!missing(time)) vro@parameters$time <- time if (!missing(censored)) vro@parameters$censored <- censored if (!missing(rank)) vro@parameters$rank <- rank events <- sum(s) vro@s <- s # subjects ranked by time and event indicated by 1 (0 for censoring) vro@events <- as.integer(events) # the total number of events vro@n <- as.integer(length(s)) # the total number of subjects vro@verbose <- verbose # verbose while running ? [email protected] <- save.sampling # save sampling in object ? vro@wcensored <- as.integer(which(s == 0)) vro@wevents <- as.integer(which(s == 1)) vro@subpop <- new.env() [email protected] <- sampling.size # the number of samples needed for sampling the distribution [email protected] <- min.sampling.size vro@method <- ifelse(method=="C","C","R") [email protected] <- estimate.distribution.parameters vro@tails <- ifelse(tails==1, 1, 2) if (length(vro@time)) { ## Check for ties newtime <- gsub("[^0-9Ee\\.\\+\\-]", "", vro@time) thetimes <- as.numeric(gsub("\\+","",newtime)) ttimes <- table(thetimes) if (any(ttimes > 1)) { rept <- as.numeric(names(ttimes[ttimes > 1])) for (i in rept) { w <- which(thetimes == i) if (length(unique(s[w])) > 1) { # There are events and censoring, create tie record. ties[[length(ties)+1]] <- w } else { tiesame[[length(tiesame)+1]] <- w } } } valorate.cat(vro, "There were",length(ties),"ties having events and censoring at same time point.\n") valorate.cat(vro, "There were",length(tiesame),"time points having ties of events or censoring.\n") } vro@ties <- ties vro@tiesame <- tiesame [email protected] <- if (length(tiesame) == 0) 0 else unlist(tiesame) [email protected] <- max(1,sampling.ties*(length(tiesame) > 0)) [email protected] <- 0 if (length(ties)) { lties <- unlist(lapply(ties, length)) pties <- unlist(lapply(lties, function(x) valorate.perm(x,x))) # the permutations are: # elements in ties: 2, 3, 4, 5, 6 # permutations : 2, 6, 24, 120, 720 ncombs <- max(cumprod(pties)) ncomb <- min(sampling.ties, ncombs*4) ## maximum number of matrices [email protected] <- ncomb } vro@weights <- weights [email protected] <- weights.method return ((vro)) } # utility function valorate.p.overlap <- function(balls, white, drawn, observed.white, lower.tail=FALSE) { black <- balls - white phyper(observed.white, white, black, drawn, lower.tail=lower.tail) } # utility function valorate.hyper.density <- function(npop, nevents, nmut, kmut) { valorate.p.overlap(npop,nevents,nmut,kmut,lower.tail=TRUE)-valorate.p.overlap(npop,nevents,nmut,kmut-1,lower.tail=TRUE) } # utility function valorate.comb <- function(n, x) { if (n < x) { return (0) } return ( exp(lfactorial(n) - (lfactorial(x)+lfactorial(n-x))) ) } # utility function valorate.perm <- function(n,x) { return (exp(lfactorial(n)-lfactorial(n-x))) } # utility function valorate.cat <- function(vro, ...) { if (vro@verbose) { cat(...) flush.console() } } #taken from http://druedin.com/2012/08/11/moving-averages-in-r/ # moving average utility function valorate.mav <- function(x,n=5,avoid.na=FALSE){ y <- stats::filter(x,rep(1/n,n), sides=2) if (avoid.na) { #VT:adapted to avoid NA wna <- which(is.na(y)) if (length(wna)) { y[wna] <- x[wna] } } y } # Prepare object to estimate n1 population prepare.n1 <- function(vro, n1) { nxname <- paste("subpop", n1, sep="") if (nxname %in% ls(vro@subpop)) { # already exists, it doesn't need to be recomputed return (invisible(vro)) } ##### Prepare object to add this n1 population #load parameters from object n <- vro@n s <- vro@s events <- vro@events nx <- n1 <- as.integer(n1) ne <- min(n1, events) # n events to be used sampling.size <- [email protected] min.sampling.size <- [email protected] k.dens <- valorate.hyper.density(n,events,nx,0:ne) valorate.cat(vro, "[[[ Estimating Log-Rank Distribution for a Risk Group of", n1, " subjects]]]\n") valorate.cat(vro, "Densities:\n", k.dens,"\n") valorate.cat(vro, "Densities Log:\n", round(log10(k.dens),2),"\n") build.vjx <- function(s, n, nx) { #build V matrix, #rows (i) is the number of samples that still are in n1 #cols (j) is the time ("n" different times = samples) vjx <- matrix(NA, ncol=n, nrow=nx) #array(NA, dim=c(nx,n,2)) for (j in 1:n) { if (s[j]) { den <- n - j + 1 vjx[,j] <- 1:nx/den #for (i in 1:nx) { # num = i # div = num/den # vjx[i,j] = div; # ####if (i <= n-j && nx-i < j) { # # vjx[i,j,1] = -div; # xj=0, sample is not included in n1 # ####} # ####if (i <= n-j && nx-i < j) { # # vjx[i,j,2] = 1-div; # xj=1, sample is included in n1 # ####} #} } } vcjx <- vjx[,s[1:n]==1,drop=FALSE] #vjx[,s[1:n]==1,,drop=FALSE] return (list(vjx=vjx, vcjx=vcjx)) } vjm <- build.vjx(s, n, nx) #vjx <- vjm$vjx # Not Needed vcjx <- vjm$vcjx vcjx.n <- as.integer(1) ties <- vro@ties if (length(ties) > 0 && [email protected] > 0) { lties <- unlist(lapply(ties, length)) pties <- unlist(lapply(lties, function(x) valorate.perm(x,x))) # the permutations are: # elements in ties: 2, 3, 4, 5, 6 # permutations : 2, 6, 24, 120, 720 ncombs <- max(cumprod(pties)) ncomb <- [email protected] #min([email protected], ncombs*4) ## maximum number of matrices valorate.cat(vro, "Estimating",ncomb,ifelse(is.finite(ncombs),paste("of",ncombs,sep=" "),""),"combinations for",length(ties),"ties in",ne,"events: ") for (i in 1:ncomb) { if (i %% 10 == 0) { valorate.cat(vro, ".") if (i %% 100 == 0) valorate.cat(vro, " ") } s <- vro@s for (j in 1:length(ties)) { s[ties[[j]]] <- sample(s[ties[[j]]]) #vjx <- cbind(vjx, vjm$vjx) # Not needed } vjm <- build.vjx(s, n, nx) vcjx <- cbind(vcjx, vjm$vcjx) } valorate.cat(vro, "Done.\n") s <- vro@s vcjx.n <- as.integer(vcjx.n + ncomb) } #Estimate densities per each combination of events:censored for n1 subjects dens <- list() v <- numeric(min.sampling.size) wcensored <- vro@wcensored #which(s[1:n] == 0) wevents <- vro@wevents #which(s[1:n] == 1) ncensored <- as.integer(length(wcensored)) nevents <- as.integer(length(wevents)) # should be equal to events combinations <- numeric(ne+1) verbose <- as.integer(vro@verbose) weightev <- as.double(vro@weights[wevents]) inn1 <- integer(n) ldx <- integer(n) valorate.cat(vro, "Simulating taking",nx,"samples of",n,"having",events,"events:\n") first <- as.integer(1) for (k in 0:ne) { allComb <- NULL allCombMatrix <- 0 ncomb <- valorate.comb(n-events,nx-k) * valorate.comb(events,k) combinations[k+1] <- ncomb sim <- as.integer(round(max(min.sampling.size, min(ncomb*4, sampling.size*k.dens[k+1])))) #as.integer(min(ncomb*2, sampling.size)) #round(max(min.sampling.size, min(ncomb*2, sampling.size*k.dens[k+1]))) if (sim*2 > ncomb && ncomb > 0) { mcens <- combn(wcensored, nx-k) mevnt <- combn(wevents, k) if (k > 0 && nx-k > 0) { allComb <- matrix(as.integer(0), nrow=nx, ncol=ncol(mcens)*ncol(mevnt)) mk <- 0 for (mi in 1:ncol(mcens)) { for (mj in 1:ncol(mevnt)) { mk <- mk + 1 allComb[,mk] <- c(mcens[,mi],mevnt[,mj]) } } } else if (k > 0) { allComb <- mevnt } else { allComb <- mcens } storage.mode(allComb) <- "integer" sim <- as.integer(round(ncomb,0)) } if (sim != length(v)) { v <- numeric(sim) } valorate.cat(vro, "Sampling",k,"of",ne,"events: P(L|k=",k,"), comb=",ncomb,"=",format(k.dens[k+1]*100,digits=3),"%, Size=") if (ncomb > 0) { ## && k.dens[k+1] valorate.cat(vro, sim,":") doAllCombinations <- (!is.null(allComb)) if (doAllCombinations) { valorate.cat(vro,"[*All Combinations*]",dim(allComb)) #,class(allComb),mode(allComb),is.integer(allComb) allCombMatrix <- allComb } if (vro@method == "R") { for (i in 1:sim) { if (i %% 1000 == 0) { valorate.cat(vro, ".") if (i %% 10000 == 0) valorate.cat(vro, " ") } #Generate inn1 inn1[] <- 0 if (doAllCombinations) { inn1[allComb[,i]] <- 1 } else { if (k < nx) { ### censored inn1[sample(wcensored,nx-k)] <- 1 } if (k > 0) { ### events inn1[sample(wevents,k)] <- 1 } } # Calculate the V statistic #vcjx <- vro@subpop[[nxname]]$vcjx einn1 <- inn1[wevents]#1+inn1[wevents] ldx <- nx - cumsum(c(0,inn1))[wevents] os <- if (vcjx.n == 1) 0 else round(runif(1)*(vcjx.n-1))*events # OffSet of the matrix of ties, each of n columns V <- 0 for (j in 1:events) { if (ldx[j] == 0) break; V <- V + weightev[j] * (einn1[j]-vcjx[ldx[j],j+os]) #vcjx[ldx[j],j,einn1[j]] } v[i] <- V #v[i] <- sum(weightev*(einn1-vcjx[ldx,1:events+os])) } dens[[k+1]] <- v } else { v <- .C("valorate_samplings", v=v, sim, n, k, nx, wcensored, ncensored, wevents, nevents, weightev, vcjx, vcjx.n, inn1, ldx, first, verbose, allCombMatrix, PACKAGE="valorate")$v dens[[k+1]] <- v } first <- as.integer(0) } else { valorate.cat(vro, 0,":") dens[[k+1]] <- 0 } valorate.cat(vro, "\n") } # sumarize the samples in an empirical distribution considering the weights per number of events xmin <- unlist(lapply(dens,min,na.rm=TRUE)) xmax <- unlist(lapply(dens,max,na.rm=TRUE)) nmx <- max(xmax) nmn <- min(xmin) empirical <- 0 empirical.breaks <- seq(nmn*ifelse(nmn<0,1.001,.999),nmx*ifelse(nmx>0,1.001,.999),len=1001) emp.hist <- list() for (k in 0:ne) { dens.hist <- hist(pmax(pmin(nmx,dens[[k+1]]),nmn),plot=FALSE,breaks=empirical.breaks) smv <- valorate.mav(dens.hist$count/sum(dens.hist$count), 10) smv[is.na(smv)] <- 0 empirical <- empirical + k.dens[k+1] * smv #empirical <- empirical + k.dens[k+1] * dens.hist$count/sum(dens.hist$count) emp.hist[[k+1]] <- hist(dens[[k+1]],plot=FALSE,breaks=1001) } vro@subpop[[nxname]] <- list( nx=nx, ne=ne, n1=n1, #vjx=vjx, # Not Needed vcjx=vcjx, vcjx.n=vcjx.n, m=unlist(lapply(dens,mean,na.rm=TRUE)), sd=unlist(lapply(dens,sd,na.rm=TRUE)), min=xmin, max=xmax, k.density=k.dens, combinations=combinations, empirical=empirical, empirical.breaks=empirical.breaks, emp.hist=emp.hist) if ([email protected]) { vro@subpop[[nxname]]$sampling <- dens } # Gaussian fitting # This are always estimated since are anyway stored vro@subpop[[nxname]]$gaussian <- list(mean=vro@subpop[[nxname]]$m, sd=vro@subpop[[nxname]]$sd) if ("gaussian" %in% [email protected]) { valorate.cat(vro, "Estimated Gaussian parameters for",nx,"samples:\n") valorate.cat(vro, "mean=",vro@subpop[[nxname]]$gaussian$mean,"\n") valorate.cat(vro, "sd=",vro@subpop[[nxname]]$gaussian$sd,"\n") } # Beta fitting if ("beta" %in% [email protected]) { valorate.estimate.beta.parameters(vro, vro@subpop[[nxname]]) } # Weibull fitting if ("weibull" %in% [email protected]) { valorate.estimate.weibull.parameters(vro, vro@subpop[[nxname]]) } return (invisible(vro)) } valorate.p.value.normal <- function(vro, vrsubo, lrv, z) { if (length(z) > 1) { unlist(sapply(unlist(z),function(x) valorate.p.value.normal(vro, vrsubo, 0, x))) } else { p <- 1-pnorm(z) min(p,1-p) * vro@tails #p <- if (z >= 0) 1-pnorm(z) else pnorm(z) #p #both are the same, the first code is clearer } } valorate.p.value.chisq <- function(vro, vrsubo, lrv, z) { if (length(z) > 1) { unlist(sapply(unlist(z),function(x) valorate.p.value.chisq(vro, vrsubo, 0, x))) } else { #p <- 1-pchisq(z^2,df=1) #min(p,1-p) p <- 1-pchisq(z^2,df=1) p # here there is no adjust } } valorate.p.value.gaussian <- function(vro, vrsubo, lrv, z) { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] if (length(lrv) > 1) { unlist(sapply(unlist(lrv),function(x) valorate.p.value.gaussian(vro, vrsubo, x))) } else { ncomb <- valorate.comb(vro@n, vrsubo$nx) p <- sum((1-pnorm(lrv,mean=vrsubo$gaussian$mean,sd=vrsubo$gaussian$sd)) * vrsubo$k.density * ncomb, na.rm=TRUE) / ncomb min(p,1-p) * vro@tails } } valorate.p.value.weibull <- function(vro, vrsubo, lrv, z) { if (length(lrv) > 1) { unlist(sapply(unlist(lrv),function(x) valorate.p.value.weibull(vro, vrsubo, x))) } else { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] nx <- vrsubo$nx nxname <- paste("subpop", nx, sep="") valorate.estimate.weibull.parameters(vro, vrsubo) vrsubo <- vro@subpop[[nxname]] # in the case vro object has been updated ncomb <- valorate.comb(vro@n, vrsubo$nx) x <- (lrv-vrsubo$min)/(vrsubo$max-vrsubo$min) p <- sum((1-pweibull(x,vrsubo$weibull$k,vrsubo$weibull$l)) * vrsubo$k.density * ncomb, na.rm=TRUE) / ncomb min(p,1-p) * vro@tails } } valorate.p.value.beta <- function(vro, vrsubo, lrv, z) { if (length(lrv) > 1) { unlist(sapply(unlist(lrv),function(x) valorate.p.value.beta(vro, vrsubo, x))) } else { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] nx <- vrsubo$nx nxname <- paste("subpop", nx, sep="") valorate.estimate.beta.parameters(vro, vrsubo) vrsubo <- vro@subpop[[nxname]] ncomb <- valorate.comb(vro@n, vrsubo$nx) x <- (lrv-vrsubo$min)/(vrsubo$max-vrsubo$min) p <- sum((1-pbeta(x,vrsubo$beta$alpha,vrsubo$beta$beta)) * vrsubo$k.density * ncomb, na.rm=TRUE) / ncomb min(p,1-p) * vro@tails } } valorate.estimate.beta.parameters <- function(vro, vrsubo) { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] if (is.null(vrsubo$beta)) { nx <- vrsubo$nx combinations <- vrsubo$combinations nxname <- paste("subpop", nx, sep="") valorate.cat(vro, "Estimating Beta parameters for",nx,"samples: ") estBetaParams <- function(mu, var) { alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2 beta <- alpha * (1 / mu - 1) return(params = list(alpha = alpha, beta = beta)) } dens <- vrsubo$sampling e.b.a <- c() e.b.b <- c() for (i in 1:length(dens)) { ## Scale dens[[i]] to 0-1 valorate.cat(vro, i, " ") e.b.a[i] <- NA e.b.b[i] <- NA if (combinations[i] > 2) { x <- (dens[[i]]-min(dens[[i]]))/(max(dens[[i]])-min(dens[[i]])) xb <- estBetaParams(mean(x),var(x)) e.b.a[i] <- xb$alpha e.b.b[i] <- xb$beta } } valorate.cat(vro, "done\n") vro@subpop[[nxname]]$beta <- list(alpha=e.b.a, beta=e.b.b) valorate.cat(vro, "alpha=",vro@subpop[[nxname]]$beta$alpha,"\n") valorate.cat(vro, "beta=",vro@subpop[[nxname]]$beta$beta,"\n") } } valorate.estimate.weibull.parameters <- function(vro, vrsubo) { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] if (is.null(vrsubo$weibull)) { nx <- vrsubo$nx nxname <- paste("subpop", nx, sep="") dens <- vrsubo$sampling combinations <- vrsubo$combinations valorate.cat(vro, "Estimating Weibull parameters for",nx,"samples: ") e.w.k <- c() e.w.l <- c() for (i in 1:length(dens)) { valorate.cat(vro, i, " ") e.w.k[i] <- NA e.w.l[i] <- NA if (combinations[i] > 2) { ## Scale dens[[i]] to 0-1 x <- sort( (dens[[i]]-min(dens[[i]]))/(max(dens[[i]])-min(dens[[i]])) ) #x <- x + x[2]/2 ##to avoid NA #first estimate #weibull plot: https://en.wikipedia.org/wiki/Weibull_distribution ly <- log(-log(1-cumsum(x)/(sum(x)))) #+x[1] to avoid NA lx <- log(x) wok <- which(!is.finite(lx) | !is.finite(ly)) if (length(wok)) { lx <- lx[-wok] ly <- ly[-wok] x <- x[-wok] } if (length(x)) { xlm <- lm(ly~lx) k <- xlm$coefficients[2] ## aproximacion inicial if (is.na(k)) { ## This is an empirical observation, when almost all x are the same ## Almost never happen l <- median(x) k <- l*100+length(dens[[i]])*0.05 } else { l <- exp(-xlm$coefficients[1]/k) kweibullest <- function(x,k) 1 / (sum(x^k*log(x))/sum(x^k)-mean(log(x))) dk <- 1 # aproximaciones siguientes j <- 1 while(abs(dk) > 0.001 && j < 100) { knew = kweibullest(x,k) dk <- knew-k k = knew j <- j + 1 } l <- mean(x^k)^(1/k) } e.w.k[i] <- k e.w.l[i] <- l } } } valorate.cat(vro, "done\n") vro@subpop[[nxname]]$weibull <- list(k=e.w.k, l=e.w.l) valorate.cat(vro, "k=",vro@subpop[[nxname]]$weibull$k,"\n") valorate.cat(vro, "l=",vro@subpop[[nxname]]$weibull$l,"\n") } } valorate.p.value.sampling <- function(vro, vrsubo, lrv, z) { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] if (length(lrv) > 1) { unlist(sapply(unlist(lrv),function(x) valorate.p.value.sampling(vro, vrsubo, x))) } else { ncomb <- valorate.comb(vro@n, vrsubo$nx) if (is.null(vrsubo$sampling)) { warning("Sampling is better for estimation but it was not saved. Estimated by histogram.") nplus <- unlist(lapply(vrsubo$emp.hist, function(x) { a <- sum(c(0,x$counts[x$mids > lrv]), na.rm=TRUE) b <- sum(c(0,x$counts[rev(which(x$mids <= lrv))[1]]/2), na.rm=TRUE) (a + b*(a > 0))/ sum(x$counts) } )) nminus <- unlist(lapply(vrsubo$emp.hist, function(x) { a <- sum(c(0,x$counts[x$mids < lrv]), na.rm=TRUE) b <- sum(c(0,x$counts[which(x$mids >= lrv)[1]]/2), na.rm=TRUE) (a + b*(a > 0))/ sum(x$counts) } )) } else { nminus <- pmin(1,unlist(lapply(vrsubo$sampling, function(x) (1*0+sum(x <= lrv, na.rm=TRUE)) / length(x)))) nplus <- pmin(1,unlist(lapply(vrsubo$sampling, function(x) (1*0+sum(x >= lrv, na.rm=TRUE)) / length(x)))) } pleft <- sum(nminus * vrsubo$k.density, na.rm=TRUE) pright <- sum(nplus * vrsubo$k.density, na.rm=TRUE) p <- min(pleft, pright) if (p==0) { p <- 1 / sum(vrsubo$combinations, na.rm=TRUE) } min(p, 1-p) * vro@tails } } valorate.p.value.all <- function(vro, vrsubo, lrv, z=NULL) { if (is.numeric(vrsubo)) vrsubo <- vro@subpop[[paste0("subpop", vrsubo)]] p <- list( normal=valorate.p.value.normal(vro, vrsubo, lrv, z), gaussian=valorate.p.value.gaussian(vro, vrsubo, lrv, z), weibull=valorate.p.value.weibull(vro, vrsubo, lrv, z), beta=valorate.p.value.beta(vro, vrsubo, lrv, z) ) if (!is.null(z)) { p$chisq=1 p$chisq[1]=valorate.p.value.chisq(vro, vrsubo, lrv, z) } if (!is.null(vrsubo$sampling)) { p$sampling = valorate.p.value.sampling(vro, vrsubo, lrv, z) p$valorate = p$sampling } return (p) } #setGeneric("survdiff", function(vro, clusters, p.func) standardGeneric("survdiff")) #setMethod("survdiff", signature("valorate", "numeric", "function"), valorate.survdiff <- function(vro, clusters, p.func=valorate.p.value.sampling) { uc <- unique(clusters) if (length(uc) != 2) { warning("Clusters needs to be only 2.") p <- 1 attr(p, c("Statistic(LR,Z,X2,LRunw,pZ,pX2)")) <- c(LR=0,Z=0,X2=0,LRunw=0,pZ=1,pX2=1) return (p) } if (length(clusters) != vro@n) { stop(paste("Cluster length=",length(cluster),"is different to n=",vro@n)) } if (FALSE && any(table(clusters) == 1)) { warning("A cluster has only 1 sample.") p <- 1 attr(p, c("Statistic(LR,Z,X2,LRunw,pZ,pX2)")) <- c(LR=0,Z=0,X2=0,LRunw=0,pZ=1,pX2=1) return (p) } clusters <- clusters[vro@order] c1 <- sum(clusters==uc[1]) c2 <- length(clusters)-c1 nx <- min(c1, c2) prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") if (c1 <= c2) { inn1 <- (clusters == uc[1])*1 } else { inn1 <- (clusters == uc[2])*1 } #n <- vro@n #x <- inn1 #s <- vro@s #sx<- c(0,cumsum(x))[1:n] #J <- 1:n #L <- sum(s*(x-(nx-sx)/(n-J+1))) #print(L) # Calculate the V statistic vcjx <- vro@subpop[[nxname]]$vcjx events <- vro@events wevents <- vro@wevents nsamp <- [email protected] inties <- which(inn1 %in% [email protected]) if (length(inties) && length(unique(inn1[inties])) > 1) { # Look at tie positions to see which needs to be resampled tiepos <- list() for (tp in 1:length(vro@tiesame)) { w <- vro@tiesame[tp] if (length(unique(inn1[w])) > 1) { # these positions do need tiepos[[length(tiepos)+1]] <- w } } } else { nsamp <- 1 } VR <- numeric(nsamp) VRZ <- numeric(nsamp) for (ivr in 1:nsamp) { if (ivr > 1) { # resample only active tie positions for (tp in 1:length(tiepos)) { w <- tiepos[tp] inn1[w] <- sample(inn1[w]) } } V <- 0 VZ <- 0 einn1 <- inn1[wevents] #1+inn1[wevents] weightev <- vro@weights[wevents] ldx <- nx - cumsum(c(0,inn1))[wevents] for (j in 1:events) { if (ldx[j] == 0) break; V <- V + weightev[j]*(einn1[j] - vcjx[ldx[j],j]) #vcjx[ldx[j],j,einn1[j]] VZ<- VZ+ (einn1[j] - vcjx[ldx[j],j]) #vcjx[ldx[j],j,einn1[j]] } if ([email protected] > 0) { VV <- numeric([email protected]+1) for (i in 1:[email protected]) { offset <- i*events v <- 0 for (j in 1:events) { if (ldx[j] == 0) break; v <- v + weightev[j]*(einn1[j] - vcjx[ldx[j],j+offset]) #vcjx[ldx[j],j,einn1[j]] } VV[i] <- v } VV[[email protected]+1] <- V #V <- V / ([email protected] + 1) V <- mean(VV) } VR[ivr] <- V VRZ[ivr] <- VZ } V <- mean(VR) VZ <- mean(VRZ) # Calculate variance if needed in normal test n <- vro@n n1j <- (sum(inn1)-(c(0,cumsum(inn1))[1:n]))[wevents] nj <- (n:1)[wevents] oj <- (vro@s)[wevents] vj <- oj*(n1j/nj)*(1-n1j/nj)*(nj-oj) / pmax(1,nj-1) names(V) <- NULL zv <- VZ/sqrt(sum(vj)) # Estimate p-value p <- p.func(vro, vro@subpop[[nxname]], V, zv) pn <- 1-pnorm(zv) attr(p, c("Statistic(LR,Z,X2,LRunw,pZ,pX2,var)")) <- c(LR=V,Z=zv,X2=zv^2,LR.unweigthed=VZ,pZ=min(pn,1-pn) * vro@tails,pX2=1-pchisq(zv^2,df=1),var=sum(vj)) if (nsamp == 1 && [email protected] > 0) { xp <- p.func(vro, vro@subpop[[nxname]], VV, zv) attr(p, c("diff.ties_CI(VRmin,VRmax,pMin,pMax,n)")) <- c(VRmin=min(VV),VRmax=max(VV),pMin=min(xp),pMax=max(xp),n=length(VV)) } if (nsamp > 1) { xp <- p.func(vro, vro@subpop[[nxname]], VR, zv) attr(p, c("ties_CI(VRmin,VRmax,pMin,pMax,n)")) <- c(VRmin=min(VR),VRmax=max(VR),pMin=min(xp),pMax=max(xp),n=length(VR)) } return (p) } valorate.psurvdiff <- function(vro, n1, v, z=NULL, p.func=valorate.p.value.sampling) { nx <- n1 prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") # Estimate p-value p <- p.func(vro, vro@subpop[[nxname]], v, z) attr(p, "statistic") <- v p } valorate.plot.empirical <- function(vro, n1, vstat=NULL, type="l", log="", add=FALSE, include=c("none","gaussian","beta","weibull","all")[1], xlab="valorate LR", ylab="density", main=paste("Empirical Density: n1=",n1,ifelse(is.null(vstat),"",paste0("\n(marked statistic at ",vstat,")"))), [email protected], smooth=10, legends=FALSE, shades=c(6,8), transparency=0.25, lwd=2, xlim=range(c(mids,vstat))+(c(-0.05,+0.05)*abs(range(c(mids,vstat)))), minL=NA, minR=NA, ...) { if (length(n1) > 1) { if (all(n1 %in% c(0,1)) || length(n1) == vro@n) { ##### n1 represents risk groups, estimate statistic and real n1 vstat <- valorate.survdiff(vro, n1) vstat <- attributes(vstat)[[1]][1] n1 <- min(length(n1)-sum(n1), sum(n1)) } else { n1 <- n1[1] } } nx <- n1 prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] f <- if (add) points else plot mids <- sp$empirical.breaks[-1]-diff(sp$empirical.breaks)/2 empv <- valorate.mav(sp$empirical,smooth) #if (any(empv == 0 | is.na(empv))) empv[empv == 0 | is.na(empv)] <- min(empv[empv > 0], na.rm=TRUE) if (is.finite(minL) & any((empv == 0 | is.na(empv)) & mids < 0)) empv[(empv == 0 | is.na(empv)) & mids < 0] <- minL if (is.finite(minR) & any((empv == 0 | is.na(empv)) & mids > 0)) empv[(empv == 0 | is.na(empv)) & mids > 0] <- minR f(mids, empv, type=type, log=log, xlab=xlab, ylab=ylab, main=main, lwd=lwd, xlim=xlim, ...) res <- NULL if (!is.null(vstat)) { if (length(vstat) > 1) { res <- numeric(length(vstat)) for (i in 1:length(vstat)) { res[i] <- empv[which.min(abs(mids-vstat[i]))[1]] } rug(res) } else { avstat <- attributes(vstat) if (length(avstat)==1 && names(avstat)=="Statistic(LR,Z,X2,LRunw,pZ,pX2)" && names(avstat[[1]])[1] == "LR") { vstat <- avstat[[1]][1] } v <- min(max(par("usr")[1],vstat), par("usr")[2]) #rug(v, col=shades[1]) miny <- min(empv[empv > 0],na.rm=TRUE) maxy <- max(empv,na.rm=TRUE) abline(v=v, lty=2, col=shades[1]) left.x <- c(mids[mids < vstat],vstat,vstat,mids[1]) left.y <- c(empv[mids < vstat],empv[mids >= vstat][1],miny,miny) left.y[!is.finite(left.y) | left.y == 0] <- miny right.x <- c(vstat, mids[mids > vstat], mids[length(mids)]) right.y <- c(miny, empv[mids > vstat], miny) right.y[!is.finite(right.y) | right.y == 0] <- miny p <- valorate.p.value.sampling(vro, sp, vstat, 0) / vro@tails left <- sum(empv[mids < vstat], na.rm=TRUE) right <- sum(empv[mids > vstat], na.rm=TRUE) if (right > left) { right = 1-p; left = p; } else { right = p; left = 1-p; } left.col <- 2-1*(left < right) right.col <- 3-left.col # Taken from scales package alpha <- function (colour, alpha = NA) { col <- grDevices::col2rgb(colour, TRUE)/255 if (length(colour) != length(alpha)) { if (length(colour) > 1 && length(alpha) > 1) { stop("Only one of colour and alpha can be vectorised") } if (length(colour) > 1) { alpha <- rep(alpha, length.out = length(colour)) } else if (length(alpha) > 1) { col <- col[, rep(1, length(alpha)), drop = FALSE] } } alpha[is.na(alpha)] <- col[4, ][is.na(alpha)] new_col <- grDevices::rgb(col[1, ], col[2, ], col[3, ], alpha) new_col[is.na(colour)] <- NA new_col } polygon(left.x, left.y, col=alpha(shades[left.col],transparency)) polygon(right.x, right.y, col=alpha(shades[right.col],transparency)) l <- format(left,digits=4) r <- format(right,digits=4) mc <- max(nchar(c(l,r))) if (left < 1 && left > 0 && nchar(l) < mc && length(grep("e",l)) == 0 && length(grep("\\.",l)) > 0) l <- format(1-right, digits=nchar(r)-2) #l <- paste(l,paste(rep("0",mc-nchar(l)),collapse=""),sep="") if (right < 1 && right > 0 && nchar(r) < mc && length(grep("e",r)) == 0 && length(grep("\\.",r)) > 0) r <- format(1-left, digits=nchar(l)-2) #paste(r,paste(rep("0",mc-nchar(r)),collapse=""),sep="") srt <- ifelse(((vstat-strwidth(l)) < (par("usr")[1])) || ((vstat+strwidth(r)) > (par("usr")[2])),90,0) if (l=="1" && right > 1e-3) l <- format(1-right) if (r=="1" && left > 1e-3) r <- format(1-left) if (l=="1") { l <- expression(phantom() %=~% 1) } else { if (l=="0") { l <- expression(phantom() %=~% 0) } } if (r=="1") { r <- expression(phantom() %=~% 1) } else { if (r=="0") { r <- expression(phantom() %=~% 0) } } text(vstat,maxy,l,adj=if(srt == 0) c(1.10,1.3) else c(1,-0.25) ,col=shades[left.col], cex=1, srt=srt) text(vstat,maxy,r,adj=if(srt == 0) c(-0.10,1.3) else c(1,1.25) ,col=shades[right.col], cex=1, srt=srt) } } if (is.null(vstat)) { res <- data.matrix(data.frame(x=mids, y=empv)) } if (length(include) > 0 && any(include %in% c("gaussian","beta","weibull","all"))) { ns <- samp xc <- 2 brks <- sp$empirical.breaks #c(-Inf,sp$empirical.breaks,Inf) mp <- NULL minmax <- function(x, min=0, max=1, quantiles=FALSE) { if (quantiles) { min = quantile(x, min, na.rm=TRUE) max = quantile(x, max, na.rm=TRUE) } x[x < min] <- min x[x > max] <- max x } if (any(include == "all")) include <- c("gaussian","beta","weibull") for (i in include) { rs <- list() ec <- 0 for (j in 1:length(sp$k.density)) { if (i == "gaussian") { r <- rnorm(ns, mean=sp$gaussian$mean[j], sd=sp$gaussian$sd[j]) } else if (i == "beta") { valorate.estimate.beta.parameters(vro, sp) sp <- vro@subpop[[nxname]] r <- rbeta(ns, sp$beta$alpha[j], sp$beta$beta[j]) } else if (i == "weibull") { valorate.estimate.weibull.parameters(vro, sp) sp <- vro@subpop[[nxname]] r <- rweibull(ns, sp$weibull$k[j], sp$weibull$l[j]) } else r <- NULL if (!is.null(r)) { r <- if(i == "gaussian") r else (r*(sp$max[j]-sp$min[j]) + sp$min[j]) h <- hist(pmax(pmin(r,max(sp$max)),min(sp$min)), breaks=brks, plot=FALSE) if (is.null(mp)) mp <- h$mids ec <- ec + h$counts * sp$k.density[j] } } ex <- ec*sum(empv,na.rm=TRUE)/sum(ec) # *max(empv, na.rm=TRUE)/max(exs, na.rm=TRUE) # minmax(ex,0.02,0.98,TRUE) exs <- valorate.mav(ex,smooth) points(mp, exs, type="l", col=xc, lwd=lwd[min(length(lwd),2)]) # ec*par("usr")[4]/max(ec) ... xc <- xc + 1 } if (legends) legend("topleft", legend=c("empirical",include), lty=1, col=0:length(include)+1) } invisible(res) } plot.kaplan.valorate <- function(data, time, status, logrank=NULL, plogrank=NULL, main= "", cluster, risk.groups=length(unique(cluster)), draw.main=FALSE, short.names=FALSE, mark.cex=1, mark=3, margins=TRUE, col=1:risk.groups+1, col.main=16, pName="p") { #library(survival) ocox <- NULL try(ocox <- coxph(Surv(time, status) ~ ., data.frame(t(data)))) if (is.null(ocox)) { shr <- list(conf.int=matrix(0,ncol=4,nrow=1),coefficients=matrix(0,ncol=10,nrow=1)) } else { shr <- summary(ocox) } hr <- round(shr$conf.int[1,1],2) hrst <- paste(format(round(shr$conf.int[1,1],1),digits=4)," [",format(round(shr$conf.int[1,3],1),digits=4),"~",format(round(shr$conf.int[1,4],1),digits=4),"], p=",format(shr$coefficients[1,5], digits=4),sep="") censxrisk <- rep(0,risk.groups) ##cuanta cuantos censored hay en cada grupo toview <- 1:(risk.groups+ifelse(draw.main,1,0)) ## las poblaciones que se van a mostrar en la leyenda for(i in 1:risk.groups){ w <- which(cluster == i) censxrisk[i] <- sum(status[w]) ##hace el conteo de censored por grupo } if (margins) { pp = par(mar=c(5.1+max(0,risk.groups-2), 2.5, 5.1, 1.6)) on.exit(par(pp)) } lrLabel <- ifelse(short.names,", LR=",", Log-Rank=") hrLabel <- ifelse(short.names,"\nHR=","\nHazard Ratio= ") if (is.null(plogrank)) { xsd <- survdiff(Surv(time, status) ~ cluster) plogrank <- 1-pchisq(xsd$chisq,df=1) } plot(survfit(Surv(time, status) ~ cluster, data=data.frame(time=time, status=status, cluster=cluster)), col=col, lty=1, lwd=2, conf.int=FALSE, main=paste(main,lrLabel,round(logrank,1),", ",pName,"=",format(plogrank, digits=4),hrLabel,hrst,sep=""), ylim=c(0, 1.05), cex.main=1.25, cex=mark.cex, mark=mark) pos <- c("bottomleft", "bottomright", "top", "right") xt <- axTicks(1) xt <- sort(c(xt, xt[-length(xt)]+diff(xt)/2)) if (draw.main) { ##Plots the whole population line (gray) lines(survfit(Surv(time, status) ~ 1), mark.time=TRUE, col=col.main, conf.int=FALSE, lty=1, lwd=1, mark=mark, cex=mark.cex, cex.main=1.25) lines(survfit(Surv(time, status) ~ cluster, data=data.frame(time=time, status=status, cluster=cluster)), col=col, lty=1, lwd=2, conf.int=FALSE, cex.main=1.25, cex=mark.cex, mark=mark) } uk <- sort(unique(cluster)) for (k in 1:length(uk)) { ##Genera y plotea el numero de paciente por cada grupo con relacion al tiempo. w <- which(cluster == uk[k]) axis(1, xt, labels=sapply(xt, function(x) sum(time[w] >= x)), tick=FALSE, line=k, col.axis=col[which(uk[k] == uk[order(uk)])]) } if (draw.main) { axis(1, xt, labels=sapply(xt, function(x) sum(time >= x)), tick=FALSE, line=k+1, col.axis=col.main) } } valorate.plot.kaplan <- function(vro, clusters, p=valorate.survdiff(vro, clusters), main="", short.names=TRUE, draw.all=FALSE, mark="|", mark.cex=0.75, margins=TRUE, col=2:3, col.all="skyblue") { time <- as.numeric(sub("+","",vro@time,fixed=TRUE)) status <- rep(1,length(time)) status[grep("\\+",vro@time)] <- 0 plot.kaplan.valorate(data=matrix(clusters[vro@order],nrow=1), plogrank=p, logrank=attributes(p)[[1]][1], time=time, status=status, main=main, short.names=short.names, draw.main=draw.all, cluster=1+as.vector(clusters[vro@order]), mark=mark, mark.cex=mark.cex, margins=margins, col=col, col.main=col.all, pName="pValorate") } valorate.risk <- function(vro, clusters) { time <- as.numeric(sub("+","",vro@time,fixed=TRUE)) status <- rep(1,length(time)) status[grep("\\+",vro@time)] <- 0 clusters <- as.vector(clusters[vro@order]) ocox <- NULL try(ocox <- coxph(Surv(time, status) ~ ., data.frame(data=clusters))) if (is.null(ocox)) { shr <- list(conf.int=matrix(0,ncol=4,nrow=1),coefficients=matrix(0,ncol=10,nrow=1)) } else { shr <- summary(ocox) } hr <- shr$conf.int[1,1] attr(hr, "confidence.interval") <- shr$conf.int[1,3:4] attr(hr, "p.value") <- shr$coefficients[1,5] attr(hr, "hazard.ratio") <- shr hr } valorate.plot.diff.empirical <- function(vro, n1, type="l", log="", include=c("gaussian","beta","weibull","all")[4], xlab="valorate LR", ylab="density", main=paste("Differences of Densities: n1=",n1), [email protected], smooth=10, ylim=c(min(miny,-maxy),max(-miny,maxy)), legends=TRUE, ...) { nx <- n1 prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] ns <- samp #xc <- 2 brks <- sp$empirical.breaks #c(-Inf,sp$empirical.breaks,Inf) mp <- NULL minmax <- function(x, min=0, max=1, quantiles=FALSE) { if (quantiles) { min = quantile(x, min, na.rm=TRUE) max = quantile(x, max, na.rm=TRUE) } x[x < min] <- min x[x > max] <- max x } if (any(include == "all")) include <- c("gaussian","beta","weibull") s <- c() xp <- list() miny <- 0 maxy <- 0 for (i in include) { rs <- list() ec <- 0 for (j in 1:length(sp$k.density)) { if (i == "gaussian") { r <- rnorm(ns, mean=sp$gaussian$mean[j], sd=sp$gaussian$sd[j]) } else if (i == "beta") { valorate.estimate.beta.parameters(vro, sp) sp <- vro@subpop[[nxname]] r <- rbeta(ns, sp$beta$alpha[j], sp$beta$beta[j]) } else if (i == "weibull") { valorate.estimate.weibull.parameters(vro, sp) sp <- vro@subpop[[nxname]] r <- rweibull(ns, sp$weibull$k[j], sp$weibull$l[j]) } else r <- NULL if (!is.null(r)) { r <- if(i == "gaussian") r else (r*(sp$max[j]-sp$min[j]) + sp$min[j]) h <- hist(pmax(pmin(r,max(sp$max)),min(sp$min)), breaks=brks, plot=FALSE) if (is.null(mp)) mp <- h$mids ec <- ec + h$counts * sp$k.density[j] } } ex <- ec/sum(ec) d <- sp$empirical - ex #(ex*max(sp$empirical)/max(minmax(ex,0.02,0.98,TRUE))) #points(mp, valorate.mav(d,smooth), type="l", col=xc) # ec*par("usr")[4]/max(ec) ... ds <- valorate.mav(d,smooth) xp[[length(xp)+1]] <- ds s[i] <- sum(abs(d)) KL.div <- function(p, q) { kld <- p*log(p/q) sum(kld[p > 0 & q > 0]) } s[i] <- KL.div(sp$empirical, ex) #(ex*max(sp$empirical)/max(minmax(ex,0.02,0.98,TRUE)))) miny <- min(miny, ds[is.finite(ds)]) maxy <- max(maxy, ds[is.finite(ds)]) } plot(0,0,xlim=range(sp$empirical.breaks), ylim=ylim, xlab=xlab, ylab=ylab, main=main, type="n", ...) abline(h=0, col=1) for (i in 1:length(xp)) { points(mp, xp[[i]], type="l", col=i+1) } if (legends) legend("topleft", legend=c("empirical",paste(include,round(s,5))), lty=1, col=0:length(include)+1, ncol=2) } valorate.plot.subpop.empirical <- function(vro, which=NULL, type="l", log="", xlim=NULL, smooth=10, legends=3, density=TRUE, ylim=c(ymin, ymax), ...) { xsp <- sort(as.numeric(gsub("subpop","",ls(vro@subpop)))) if (!is.null(which)) { xsp <- xsp[xsp %in% which] } xmin <- Inf xmax <- -Inf ymax <- 0 ymin <- Inf dmin <- Inf dmax <- -Inf for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] dmin <- min(dmin, median(diff(sp$empirical.breaks))) dmax <- max(dmax, median(diff(sp$empirical.breaks))) } smoothing <- if (smooth > 0) function(x) valorate.mav(x, smooth) else function(x) x for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- sp$empirical*(dmax/d)/sum(sp$empirical) #*(1+(dmax-d)/(dmax-dmin)) xmin <- min(xmin, sp$min) xmax <- max(xmax, sp$max) ymax <- max(ymax, y) ymin <- min(ymin, y[y > 0]) } if (is.null(xlim)) { xlim <- c(xmin, xmax) } f <- plot icol <- 1 if (!density) { ymax <- 1 femp <- function(y, factor) y / max(y) } else { femp <- function(y, factor) y * factor / sum(y) } for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- femp(sp$empirical, dmax/d) f(sp$empirical.breaks[-1]-diff(sp$empirical.breaks)/2, smoothing(y), type=type, log=log, xlim=xlim, ylim=ylim, col=icol, xlab="Log-Rank Statistic", ylab=paste(ifelse(density,"Normalized Densities","Scaled Densities")," (smooth=",smooth,")",sep=""),...) f <- points icol <- icol + 1 } if (legends > 0) { legend("topleft", legend=xsp, lty=1, col=1:length(xsp), ncol=legends, cex=0.75) } } valorate.plot.subpop.empirical.to.0 <- function(vro, which=NULL, type="l", log="", xlim=NULL, smooth=10, legends=3, density=TRUE, ylim=c(ymin, ymax), ...) { xsp <- sort(as.numeric(gsub("subpop","",ls(vro@subpop)))) if (!is.null(which)) { xsp <- xsp[xsp %in% which] } xmin <- Inf xmax <- -Inf ymax <- 0 ymin <- Inf dmin <- Inf dmax <- -Inf for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] dmin <- min(dmin, median(diff(sp$empirical.breaks))) dmax <- max(dmax, median(diff(sp$empirical.breaks))) } smoothing <- if (smooth > 0) function(x) valorate.mav(x, smooth) else function(x) x w0 <- c() wL <- c() for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- sp$empirical*(dmax/d)/sum(sp$empirical) #*(1+(dmax-d)/(dmax-dmin)) xmin <- min(xmin, sp$min) xmax <- max(xmax, sp$max) ymax <- max(ymax, y) ymin <- min(ymin, y[y > 0]) w0[nx] <- which(sp$empirical.breaks >= 0)[1] wL[nx] <- length(sp$empirical.breaks) } if (is.null(xlim)) { xlim <- c(xmin, xmax) } f <- plot icol <- 1 if (!density) { ymax <- 1 femp <- function(y, factor) y / max(y) } else { femp <- function(y, factor) y * factor / sum(y) } neg <- max(w0-1, na.rm=TRUE) pos <- max(wL-w0, na.rm=TRUE) wLm <- median(w0, na.rm=TRUE) #cat("w0=",w0,"\n") #cat("neg=",neg,",pos=",pos,",wLm=",wLm,"\n") plot(0,0,type="n",xlim=c(wLm-neg,wLm*2+(pos-wLm)),ylim=ylim,xlab="'Scaled' Log-Rank Statistic", ylab=paste(ifelse(density,"Normalized Densities","Scaled Densities")," (smooth=",smooth,")",sep=""), xaxt="n", ...) for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- femp(sp$empirical, dmax/d) sy <- smoothing(y) points(1:length(sy)+wLm-w0[nx], sy, type=type, col=icol, ...) icol <- icol + 1 } abline(v=wLm,col=1,lwd=0.5,lty=3) wlmed <- median(wL, na.rm=TRUE) ats <- wLm + seq(-1,1,len=9)*wlmed axis(side=1, at=ats, labels=round((ats-wLm)*2/wlmed,2)) #axis(side=1) if (legends > 0) { legend("topleft", legend=xsp, lty=1, col=1:length(xsp), ncol=legends, cex=0.75) } } valorate.plot.subpop.empirical.scaled <- function(vro, which=NULL, type="l", log="", xlim=NULL, smooth=10, legends=3, density=FALSE, ylim=c(ymin, ymax), scale.point=0.01, ...) { nsp <- as.numeric(gsub("subpop","",ls(vro@subpop))) xsp <- sort(nsp) if (!is.null(which)) { xsp <- xsp[xsp %in% which] } xmin <- Inf xmax <- -Inf ymax <- 0 ymin <- Inf dmin <- Inf dmax <- -Inf ty <- c() ty2 <- c() for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] dmin <- min(dmin, median(diff(sp$empirical.breaks))) dmax <- max(dmax, median(diff(sp$empirical.breaks))) ty[length(ty)+1] <- sp$empirical.breaks[c(which(cumsum(sp$empirical) >= scale.point),length(sp$empirical.breaks))[1]] ty2[length(ty2)+1] <- sp$empirical.breaks[c(which(cumsum(sp$empirical) >= (1-scale.point)),length(sp$empirical.breaks))[1]] } #x <- xsp #xd <- data.frame(x=x,x1_2=x^(1/2),x1_3=x^(1/3),y=ty) #slm <- summary(lm(y~x+x1_2+x1_3,xd)) # ##print(slm) #co <- c(slm$coefficients[-1,1],0) #scalefactor <- abs(apply(t(xd)*co+slm$coefficients[1,1],2,sum)) scalefactor <- abs(ty) #abs(apply(t(xd)*co+slm$coefficients[1,1],2,sum)) scalefactor <- abs(ty-ty2)/2 #abs(apply(t(xd)*co+slm$coefficients[1,1],2,sum)) names(scalefactor) <- paste0("subpop",xsp) #print(scalefactor) smoothing <- if (smooth > 0) function(x) valorate.mav(x, smooth) else function(x) x for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- sp$empirical*(dmax/d)/sum(sp$empirical) #*(1+(dmax-d)/(dmax-dmin)) xmin <- min(xmin, sp$min/scalefactor[nxname]) xmax <- max(xmax, sp$max/scalefactor[nxname]) ymax <- max(ymax, y) ymin <- min(ymin, y[y > 0]) } if (is.null(xlim)) { xlim <- c(xmin, xmax) } f <- plot icol <- 1 if (!density) { ymax <- 1 femp <- function(y, factor) y / max(y) } else { femp <- function(y, factor) y * factor / sum(y) } for (nx in xsp) { nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] d <- median(diff(sp$empirical.breaks)) y <- femp(sp$empirical, dmax/d) f((sp$empirical.breaks[-1]-diff(sp$empirical.breaks)/2)/scalefactor[nxname], smoothing(y), type=type, log=log, xlim=xlim, ylim=ylim, col=icol, xlab="Log-Rank Statistic", ylab=paste(ifelse(density,"Normalized Densities","Scaled Densities")," (smooth=",smooth,")",sep=""),...) f <- points icol <- icol + 1 } if (legends > 0) { legend("topleft", legend=xsp, lty=1, col=1:length(xsp), ncol=legends, cex=0.75) } } valorate.plot.sampling.densities <- function(vro, n1, type="l", log="", xlim=c(minx, maxx), ylim=c(miny, maxy), ncol=max(1,round(sqrt(n1)/2)), main="Sampling Densities Per Event (k) and Weighted Sum", rug=TRUE, add=FALSE, w.sum=TRUE, sampling=FALSE, weighted=FALSE, legends.cex=1, weights.cex=1, weights.pos=c("middle","left","right")[3], w.sum.lwd=3, y.limit=1e-13, ...) { nx <- n1 prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] w <- which(unlist(lapply(sp$sampling, length)) == 1) if (length(w)) { for (i in w) sp$sampling[[i]] <- rep(sp$sampling[[i]],2) } dens <- lapply(sp$sampling, density) #minx <- min(unlist(lapply(dens, function(x) min(x$x)))) #maxx <- max(unlist(lapply(dens, function(x) max(x$x)))) propdens <- sp$k.density/max(sp$k.density) h <- (if (weighted) propdens else 1) minx <- min(sp$min) maxx <- max(sp$max) miny <- min(min(unlist(lapply(dens, function(x) min(x$y)))), min(sp$empirical))+y.limit my <- unlist(lapply(dens, function(x) max(x$y)))*1+y.limit maxy <- max(my*h) minxdens <- min(unlist(lapply(dens, function(x) min(x$x)))) maxxdens <- max(unlist(lapply(dens, function(x) max(x$x)))) dx <- maxxdens-minxdens L <- length(dens[[1]]$x)+1 #/2 xaprx <- seq(minxdens,maxxdens,length=L) yaprx <- numeric(length(xaprx)) mxy <- numeric(length(dens)) if (!add) plot(1,1, type="n", log=log, xlim=xlim, ylim=ylim, main=main, xlab="", ylab="", ...) for (i in (1:length(dens))[order(sp$k.density)]) { w <- ifelse(weighted,sp$k.density[i]/max(sp$k.density),1) points(dens[[i]]$x, dens[[i]]$y*w, type=type, col=(i+1)) idx <- round(L * (dens[[i]]$x-minxdens) / dx + 1) w <- which(diff(idx) > 0) yaprx[idx[w]] <- yaprx[idx[w]] + dens[[i]]$y[w] * sp$k.density[i] #for (j in idx[idx > 0]) yaprx[idx[j]] <- yaprx[idx[j]] + dens[[i]]$y[j] * sp$k.density[i] if (rug) rug(sp$sampling[[i]], col=i+1, side=(i %% 2)*2+1) mxy[i] <- max(dens[[i]]$y, na.rm=TRUE) } is.logy <- (log=="y" || log=="xy") leg <- c(0:sp$ne,"W. Sum","Sampling") lok <- 1:length(leg) if (!sampling) lok <- lok[-length(lok)] if (!w.sum) lok <- lok[-grep("W. Sum",leg)] if (!add && legends.cex > 0) legend(if(is.logy) "bottomleft" else "topleft",legend=leg[lok],lty=c(rep(1,sp$ne+1),2,3)[lok],col=c(2+0:sp$ne,1+0*(sp$ne+3),8)[lok], ncol=ncol, text.width=strwidth("0"),box.col=0, cex=legends.cex) if (!add && weights.cex > 0) { yl <- cumsum(rep(strheight("Gg"),length(sp$k.density))) if (weights.pos=="middle") pos <- list(x=sp$m, y=pmin(my,maxy*.75), adj=c(ifelse(is.logy,1,0),1), srt=90) if (weights.pos=="left") pos <- list(x=par("usr")[1], y=mean(par("usr")[3:4])-yl+max(yl)/2, adj=c(-0.05,0.5), srt=0) if (weights.pos=="right") pos <- list(x=par("usr")[2], y=mean(par("usr")[3:4])-yl+max(yl)/2, adj=c(1.05,0.5), srt=0) text(pos$x, pos$y, format(sp$k.density,digits=2), srt=pos$srt, col=2+0:sp$ne, cex=weights.cex, adj=pos$adj) } if (sampling) points(sp$empirical.breaks[-1]-diff(sp$empirical.breaks)/2, sp$empirical*mean(mxy)/max(sp$empirical, na.rm=TRUE), type=type, lwd=2, lty=2, col=8, ...) if (w.sum) points(xaprx, yaprx*maxy/max(yaprx,na.rm=TRUE), type=type, col=1+0*(sp$ne+3), lwd=w.sum.lwd, lty=3) axis(4,at=axTicks(2),round(max(sp$empirical)*axTicks(2)/max(axTicks(2)),5)) invisible(list(x=xaprx, y=yaprx)) #yaprx*maxy*1.01/max(yaprx,na.rm=TRUE) } valorate.plot.sampling.densities.figure <- function(vro, n1, type="l", log="", xlim=c(minx, maxx), ylim=c(miny, maxy), main="", rug=1, rug.size=1, sub="P(L|k) Events k=", w.sum=TRUE, sampling=FALSE, ncol=1, y.limit=1e-13, ...) { nx <- n1 prepare.n1(vro, nx) nxname <- paste("subpop", nx, sep="") sp <- vro@subpop[[nxname]] dens <- lapply(sp$sampling, density) lens <- lapply(sp$sampling, length) #minx <- min(unlist(lapply(dens, function(x) min(x$x)))) #maxx <- max(unlist(lapply(dens, function(x) max(x$x)))) minx <- min(sp$min) maxx <- max(sp$max) miny <- min(min(unlist(lapply(dens, function(x) min(x$y)))), min(sp$empirical))+y.limit my <- unlist(lapply(dens, function(x) max(x$y)))*1+y.limit maxy <- max(my) minxdens <- min(unlist(lapply(dens, function(x) min(x$x)))) maxxdens <- max(unlist(lapply(dens, function(x) max(x$x)))) dx <- maxxdens-minxdens L <- length(dens[[1]]$x)+1 #/2 xaprx <- seq(minxdens,maxxdens,length=L) yaprx <- numeric(length(xaprx)) mxy <- numeric(length(dens)) par(mfrow=c(ceiling((length(dens)+sampling*1+w.sum*1)/ncol),ncol)) mar <- par("mar") par(mar=c(0,mar[2],1.2,mar[4])) brks <- seq(minx,maxx,len=round(par("pin")[1]*96)) mids <- brks[-1]+diff(brks)/2 rside <- par("usr")[rug+2] rsign <- ifelse(rug==1,1,-1) for (i in (1:length(dens))) { #[order(sp$k.density)] plot(1,1, type="n", log=log, xlim=c(minx, maxx), ylim=ylim, main=paste0(main,"(",sub,i-1," x ",sp$k.density[i],", size=",lens[[i]],")"), xlab="", ylab="", xaxt="n", ...) points(dens[[i]], type=type, col=(i+1)) idx <- round(L * (dens[[i]]$x-minxdens) / dx + 1) w <- which(diff(idx) > 0) yaprx[idx[w]] <- yaprx[idx[w]] + dens[[i]]$y[w] * sp$k.density[i] #if (rug) rug(sp$sampling[[i]], col=i+1, side=rug) if (rug) { h <- hist(sp$sampling[[i]],breaks=brks,plot=FALSE)$counts segments(mids[h > 0], rside, mids[h > 0], rside+rsign*h[h > 0]*abs(par("usr")[3])*rug.size/max(h), col=i+1) } mxy[i] <- max(dens[[i]]$y, na.rm=TRUE) } is.logy <- (log=="y" || log=="xy") leg <- c(0:sp$ne,"W. Sum","Sampling") lok <- 1:length(leg) if (!sampling) lok <- lok[-length(lok)] if (!w.sum) lok <- lok[-grep("W. Sum",leg)] #if (!add) legend(if(is.logy) "bottomleft" else "topleft",legend=leg[lok],lty=1,col=c(2+0:sp$ne,sp$ne+3,1)[lok], ncol=ncol, text.width=strwidth("0"),box.col=0) #if (!add) text(sp$m, pmin(my,maxy*.75), paste("x ",format(sp$k.density,digits=2)), srt=90, col=2+0:sp$ne, adj=c(ifelse(is.logy,1,0),1)) if (sampling) { par(mar=c(2,mar[2],2,mar[4])) plot(1,1, type="n", log=log, xlim=c(minx, maxx), ylim=ylim, main=paste0(main,"Sampling"), xlab="", ylab="", ...) points(sp$empirical.breaks[-1]-diff(sp$empirical.breaks)/2, sp$empirical*mean(mxy)/max(sp$empirical, na.rm=TRUE), type=type, lwd=2, col=8, ...) } if (w.sum) { par(mar=c(2,mar[2],2,mar[4])) plot(1,1, type="n", log=log, xlim=c(minx, maxx), ylim=ylim, main=paste0(main,"W.Sum"), xlab="", ylab="", ...) points(xaprx, yaprx*mean(mxy)/max(yaprx,na.rm=TRUE), type=type, col=1+0*(sp$ne+3), lwd=3) } axis(4,at=axTicks(2),round(max(sp$empirical)*axTicks(2)/max(axTicks(2)),5)) par(mar=mar) invisible(list(x=xaprx, y=yaprx)) }
/scratch/gouwar.j/cran-all/cranData/valorate/R/valorate.R
#' Mega Millions (Before 10/22/13) #' #' Historical data for the Mega Millions game. On October 22, 2013, the format changed #' from 5/56 + 1/46 to the current 5/75 + 1/15 format. Game play: Pick five different #' numbers from 1 through 75; then select one Mega Ball number from 1 through 15. #' @format A data frame with 1,713 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{megaball}{megaball result} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' "mega.mill.1" #' Mega Millions (10/22/13 and beyond) #' #' Historical data for the Mega Millions game. On October 22, 2013, the format changed #' from 5/56 + 1/46 to the current 5/75 + 1/15 format. Game play: Pick five different #' numbers from 1 through 56; then select one Mega Ball number from 1 through 46. #' @format A data frame with 194 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{megaball}{megaball result} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' "mega.mill.2" #' Power Ball #' #' Historical data for the Power Ball game. Game play: Pick #' five different numbers from 1 through 59; then select one #' Powerball number from 1 through 35. #' @format A data frame with 582 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{powerball}{powerball result} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' @examples #' ## According to game rules, the powerball is numbered 1 - 35, #' ## but apparently there were times when it went up to 39 #' i <- power.ball$powerball > 35 #' any(i) #' sum(i) #' power.ball$powerball[i] "power.ball" #' CASH4LIFE #' #' Historical data for the CASH4LIFE game. Game play: Pick five different numbers from 1 through 60; #' then select one Cash Ball number from 1 through 4. #' @format A data frame with 34 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{cashball}{cash ball result} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' @examples #' ## Check numbers drawn are uniformly distributed #' x <- qunif(ppoints(nrow(cash.4.life)*5),1,60) #' y <- sort(unlist(cash.4.life[,3:7])) #' qqplot(x,y) "cash.4.life" #' $1,000,000 Money Ball #' #' Historical data for the $1,000,000 Money Ball game. Game Play: pick five numbers #' 1 - 35, the Lottery then selects five numbered balls. If the Gold Million Dollar #' Money Ball is drawn before all five numbers have been selected, the top prize #' jumps to $1,000,000. Note: This game was discontinued 8/29/15. #' @format A data frame with 100 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{moneyball}{money ball result: yes or no} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' @examples #' ## probability of drawing money ball before first 5 balls #' (1/36) + (1/35) + (1/34) + (1/33) + (1/32) #' ## observed money ball results #' prop.table(table(money.ball$moneyball)) #' ## simulate money ball draws before first 5 draws #' set.seed(123) #' mean(replicate(1000, any(sample(c(1:35,"mb"),5)=="mb"))) "money.ball" #' Decades of Dollars #' #' Historical data for the Decades of Dollars game. You pick six (6) numbers, #' the Lottery will then select six (6) numbered balls. #' Note: This game was discontinued in favor of CASH4LIFE. #' @format A data frame with 443 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' \item{N6}{6th number in order} #' } #' @source \url{https://www.valottery.com} #' "decades.of.dollars" #' Cash 5 (once daily) #' #' Historical data for the Cash 5 once daily game. Game Play: Pick five #' numbers from 1 through 34. Note: On April 11, 1999, Cash 5 switched #' to twice daily drawings. #' @format A data frame with 1187 rows and 6 variables: #' \describe{ #' \item{date}{date of draw} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' "cash.5.1xday" #' Cash 5 (twice daily) #' #' Historical data for the Cash 5 twice daily game. Game Play: Pick five #' numbers from 1 through 34. Note: On April 11, 1999, Cash 5 switched #' to twice daily drawings. #' @format A data frame with 11,164 rows and 7 variables: #' \describe{ #' \item{date}{date of draw} #' \item{time}{time of drawing: day or night} #' \item{N1}{1st number in order} #' \item{N2}{2nd number in order} #' \item{N3}{3rd number in order} #' \item{N4}{4th number in order} #' \item{N5}{5th number in order} #' } #' @source \url{https://www.valottery.com} #' @examples #' max.days <- apply(subset(cash.5.2xday,time=="day",-(1:2)),1,max) #' max.nights <- apply(subset(cash.5.2xday,time=="night",-(1:2)),1,max) #' op <- par(mfrow=c(1,2)) #' hist(max.days) #' hist(max.nights) #' par(op) "cash.5.2xday" #' Pick 4 (once daily) #' #' Historical data for the Pick 4 once daily game. Game play: Pick #' a four digit number from 0000 through 9999. Note: On January 30, 1995, #' Pick 4 switched to twice daily drawings. #' @format A data frame with 1,041 rows and 5 variables: #' \describe{ #' \item{date}{date of draw} #' \item{N1}{1st digit} #' \item{N2}{2nd digit} #' \item{N3}{3rd digit} #' \item{N4}{4th digit} #' } #' @source \url{https://www.valottery.com} #' @examples #' ## Any Pick 4 happen more than once? #' results <- apply(pick.4.1xday[,-1],1,function(x)paste(x,collapse = "")) #' any(table(results) > 1) #' ## Which numbers? #' i <- which(table(results) > 1,useNames = FALSE) #' sort(table(results)[i],decreasing = TRUE) "pick.4.1xday" #' Pick 4 (twice daily) #' #' Historical data for the Pick 4 twice daily game. Game play: Pick #' a four digit number from 0000 through 9999. Note: On January 30, 1995, #' Pick 4 switched to twice daily drawings. #' @format A data frame with 13,788 rows and 6 variables: #' \describe{ #' \item{date}{date of draw} #' \item{time}{time of drawing: day or night} #' \item{N1}{1st digit} #' \item{N2}{2nd digit} #' \item{N3}{3rd digit} #' \item{N4}{4th digit} #' } #' @source \url{https://www.valottery.com} #' "pick.4.2xday" #' Pick 3 (once daily) #' #' Historical data for the Pick 3 once daily game. Game Play: #' Pick a three digit number from 000 through 999. Note: On January 30, 1995, #' Pick 3 switched to twice daily drawings. #' @format A data frame with 1,777 rows and 4 variables: #' \describe{ #' \item{date}{date of draw} #' \item{N1}{1st digit} #' \item{N2}{2nd digit} #' \item{N3}{3rd digit} #' } #' @source \url{https://www.valottery.com} #' @examples #' lapply(pick.3.1xday[,-1],function(x)round(prop.table(table(x)),2)) "pick.3.1xday" #' Pick 3 (twice daily) #' #' Historical data for the Pick 3 twice daily game. Game Play: #' Pick a three digit number from 000 through 999. Note: On January 30, 1995, #' Pick 3 switched to twice daily drawings. #' @format A data frame with 13,790 rows and 5 variables: #' \describe{ #' \item{date}{date of draw} #' \item{time}{time of drawing: day, night1 or night2 (Note: two nightly drawings were held #' on 10/30/08 and 11/09/08)} #' \item{N1}{1st digit} #' \item{N2}{2nd digit} #' \item{N3}{3rd digit} #' } #' @source \url{https://www.valottery.com} #' "pick.3.2xday"
/scratch/gouwar.j/cran-all/cranData/valottery/R/data.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 bed12toexons_impl <- function(x) { .Call(`_valr_bed12toexons_impl`, x) } closest_impl <- function(x, y, grp_idx_x, grp_idx_y, suffix_x, suffix_y) { .Call(`_valr_closest_impl`, x, y, grp_idx_x, grp_idx_y, suffix_x, suffix_y) } complement_impl <- function(gdf, genome) { .Call(`_valr_complement_impl`, gdf, genome) } coverage_impl <- function(x, y, x_grp_indexes, y_grp_indexes) { .Call(`_valr_coverage_impl`, x, y, x_grp_indexes, y_grp_indexes) } dist_impl <- function(x, y, x_grp_indexes, y_grp_indexes, distcalc) { .Call(`_valr_dist_impl`, x, y, x_grp_indexes, y_grp_indexes, distcalc) } flank_impl <- function(df, genome, both = 0, left = 0, right = 0, fraction = FALSE, stranded = FALSE, trim = FALSE) { .Call(`_valr_flank_impl`, df, genome, both, left, right, fraction, stranded, trim) } intersect_impl <- function(x, y, x_grp_indexes, y_grp_indexes, invert = FALSE, suffix_x = ".x", suffix_y = ".y") { .Call(`_valr_intersect_impl`, x, y, x_grp_indexes, y_grp_indexes, invert, suffix_x, suffix_y) } makewindows_impl <- function(df, win_size = 0L, num_win = 0L, step_size = 0L, reverse = FALSE) { .Call(`_valr_makewindows_impl`, df, win_size, num_win, step_size, reverse) } merge_impl <- function(gdf, max_dist = 0L, collapse = TRUE) { .Call(`_valr_merge_impl`, gdf, max_dist, collapse) } partition_impl <- function(gdf, max_dist = -1L) { .Call(`_valr_partition_impl`, gdf, max_dist) } random_impl <- function(genome, length, n, seed = 0L) { .Call(`_valr_random_impl`, genome, length, n, seed) } shuffle_impl <- function(df, incl, within = FALSE, max_tries = 1000L, seed = 0L) { .Call(`_valr_shuffle_impl`, df, incl, within, max_tries, seed) } subtract_impl <- function(gdf_x, gdf_y, x_grp_indexes, y_grp_indexes) { .Call(`_valr_subtract_impl`, gdf_x, gdf_y, x_grp_indexes, y_grp_indexes) }
/scratch/gouwar.j/cran-all/cranData/valr/R/RcppExports.R
#' Convert BED12 to individual exons in BED6. #' #' After conversion to BED6 format, the `score` column contains the exon #' number, with respect to strand (i.e., the first exon for `-` strand #' genes will have larger start and end coordinates). #' #' @param x [ivl_df] #' #' @family utilities #' #' @examples #' x <- read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' bed12_to_exons(x) #' #' @export bed12_to_exons <- function(x) { check_required(x) x <- check_interval(x) if (!ncol(x) == 12) { cli::cli_abort("expected 12 column input") } res <- bed12toexons_impl(x) res <- res[, c("chrom", "start", "end", "name", "score", "strand")] res <- bed_sort(res) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed12_to_exons.r
#' Compute absolute distances between intervals. #' #' Computes the absolute distance between the midpoint of each `x` interval and #' the midpoints of each closest `y` interval. #' #' @details Absolute distances are scaled by the inter-reference gap for the #' chromosome as follows. For `Q` query points and `R` reference #' points on a chromosome, scale the distance for each query point `i` to #' the closest reference point by the inter-reference gap for each chromosome. #' If an `x` interval has no matching `y` chromosome, #' `.absdist` is `NA`. #' #' \deqn{d_i(x,y) = min_k(|q_i - r_k|)\frac{R}{Length\ of\ chromosome}} #' #' Both absolute and scaled distances are reported as `.absdist` and #' `.absdist_scaled`. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param genome [genome_df] #' #' @return #' [ivl_df] with `.absdist` and `.absdist_scaled` columns. #' #' @template stats #' #' @family interval statistics #' #' @seealso #' \url{https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002529} #' #' @examples #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' x <- bed_random(genome, seed = 1010486) #' y <- bed_random(genome, seed = 9203911) #' #' bed_absdist(x, y, genome) #' #' @export bed_absdist <- function(x, y, genome) { check_required(x) check_required(y) check_required(genome) x <- check_interval(x) y <- check_interval(y) genome <- check_genome(genome) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) grp_indexes <- shared_group_indexes(x, y) res <- dist_impl(x, y, grp_indexes$x, grp_indexes$y, distcalc = "absdist" ) # convert groups_xy to character vector if (!is.null(groups_xy)) { groups_xy <- as.character(groups_xy) } # calculate reference sizes genome <- filter(genome, genome$chrom %in% res$chrom) if (utils::packageVersion("dplyr") > "1.0.10") { genome <- inner_join(genome, get_labels(y), by = c("chrom"), multiple = "all" ) } else { genome <- inner_join(genome, get_labels(y), by = c("chrom")) } ref_points <- summarize(y, .ref_points = n()) genome <- inner_join(genome, ref_points, by = c(groups_xy)) genome <- mutate(genome, .ref_gap = .ref_points / size) genome <- select(genome, -size, -.ref_points) # calculate scaled reference sizes res <- full_join(res, genome, by = c(groups_xy)) res <- mutate(res, .absdist_scaled = .absdist * .ref_gap) res <- select(res, -.ref_gap) # report back original x intervals not found x_missing <- anti_join(x, res, by = c(groups_xy)) x_missing <- ungroup(x_missing) x_missing <- mutate(x_missing, .absdist = NA, .absdist_scaled = NA) res <- bind_rows(res, x_missing) res <- bed_sort(res) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_absdist.r
#' Identify closest intervals. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param overlap report overlapping intervals #' @param suffix colname suffixes in output #' #' @note For each interval in x `bed_closest()` returns overlapping intervals from y #' and the closest non-intersecting y interval. Setting `overlap = FALSE` will #' report the closest non-intersecting y intervals, ignoring any overlapping y #' intervals. #' #' @template groups #' #' @return #' [ivl_df] with additional columns: #' - `.overlap` amount of overlap with overlapping interval. Non-overlapping #' or adjacent intervals have an overlap of 0. `.overlap` will not be included #' in the output if `overlap = FALSE`. #' - `.dist` distance to closest interval. Negative distances #' denote upstream intervals. Book-ended intervals have a distance of 1. #' #' @family multiple set operations #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/closest.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 125 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 140, 175 #' ) #' #' bed_glyph(bed_closest(x, y)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 500, 600, #' "chr2", 5000, 6000 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 200, #' "chr1", 150, 200, #' "chr1", 550, 580, #' "chr2", 7000, 8500 #' ) #' #' bed_closest(x, y) #' #' bed_closest(x, y, overlap = FALSE) #' #' # Report distance based on strand #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~name, ~score, ~strand, #' "chr1", 10, 20, "a", 1, "-" #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~name, ~score, ~strand, #' "chr1", 8, 9, "b", 1, "+", #' "chr1", 21, 22, "b", 1, "-" #' ) #' #' res <- bed_closest(x, y) #' #' # convert distance based on strand #' res$.dist_strand <- ifelse(res$strand.x == "+", res$.dist, -(res$.dist)) #' res #' #' # report absolute distances #' res$.abs_dist <- abs(res$.dist) #' res #' #' @export bed_closest <- function(x, y, overlap = TRUE, suffix = c(".x", ".y")) { x <- check_interval(x) y <- check_interval(y) check_suffix(suffix) x <- bed_sort(x) y <- bed_sort(y) id <- get_id_col(x) x[[id]] <- seq_len(nrow(x)) x_id_out <- paste0(id, suffix[1]) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) ol_ivls <- bed_intersect(x, y, suffix = suffix) grp_indexes <- shared_group_indexes(x, y) res <- closest_impl( x, y, grp_indexes$x, grp_indexes$y, suffix[1], suffix[2] ) res$.overlap <- 0L ol_ivls <- mutate(ol_ivls, .dist = case_when( .overlap > 0 ~ 0L, ol_ivls$end.y <= ol_ivls$start.x ~ -1L, TRUE ~ 1L ) ) res <- res[colnames(ol_ivls)] res <- bind_rows(ol_ivls, res) # get x ivls from groups not found in y x <- add_colname_suffix(x, suffix[1]) mi <- get_no_group_ivls(x, res, x_id_out) if (nrow(mi) > 0) { res <- bind_rows(res, mi) } if (!overlap) { res <- res[which(res$.overlap <= 0 | is.na(res$.overlap)), ] res[[".overlap"]] <- NULL } # reorder by input x ivls res <- res[order(res[[x_id_out]], method = "radix"), ] res[[x_id_out]] <- NULL res } add_colname_suffix <- function(x, s) { cidx <- which(names(x) != "chrom") new_ids <- str_c(colnames(x), s) colnames(x)[cidx] <- new_ids[cidx] x } get_no_group_ivls <- function(x, y, cid) { x <- ungroup(x) mi <- x[!x[[cid]] %in% y[[cid]], ] mi }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_closest.r
#' Cluster neighboring intervals. #' #' The output `.id` column can be used in downstream grouping operations. Default #' `max_dist = 0` means that both overlapping and book-ended intervals will be #' clustered. #' #' @param x [ivl_df] #' @param max_dist maximum distance between clustered intervals. #' #' @template groups #' #' @return [ivl_df] with `.id` column specifying sets of clustered intervals. #' #' @family single set operations #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/cluster.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 200, #' "chr1", 180, 250, #' "chr1", 250, 500, #' "chr1", 501, 1000, #' "chr2", 1, 100, #' "chr2", 150, 200 #' ) #' #' bed_cluster(x) #' #' # glyph illustrating clustering of overlapping and book-ended intervals #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 1, 10, #' "chr1", 5, 20, #' "chr1", 30, 40, #' "chr1", 40, 50, #' "chr1", 80, 90 #' ) #' #' bed_glyph(bed_cluster(x), label = ".id") #' #' @export bed_cluster <- function(x, max_dist = 0) { check_required(x) x <- check_interval(x) groups <- rlang::syms(unique(c("chrom", group_vars(x)))) res <- group_by(x, !!!groups) res <- bed_sort(res) res <- merge_impl(res, max_dist, collapse = FALSE) res <- mutate(res, .id = .id_merge) res <- select(res, !!quo(-one_of(".id_merge"))) res <- ungroup(res) res <- mutate( res, .id = match(.id, unique(.id)) ) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_cluster.r
#' Identify intervals in a genome not covered by a query. #' #' @param x [ivl_df] #' @param genome [ivl_df] #' #' @family single set operations #' #' @return [ivl_df] #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 0, 10, #' "chr1", 75, 100 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 200 #' ) #' #' bed_glyph(bed_complement(x, genome)) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 500, #' "chr2", 600, #' "chr3", 800 #' ) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 300, #' "chr1", 200, 400, #' "chr2", 0, 100, #' "chr2", 200, 400, #' "chr3", 500, 600 #' ) #' #' # intervals not covered by x #' bed_complement(x, genome) #' #' @export bed_complement <- function(x, genome) { check_required(x) check_required(genome) x <- check_interval(x) genome <- check_genome(genome) res <- bed_merge(x) # non-overlapping chroms chroms_no_overlaps <- anti_join(genome, res, by = "chrom") chroms_no_overlaps <- mutate(chroms_no_overlaps, start = 0) chroms_no_overlaps <- select(chroms_no_overlaps, chrom, start, end = size) # remove rows from x that are not in genome res <- semi_join(res, genome, by = "chrom") res <- group_by(res, chrom) res <- complement_impl(res, genome) res <- bind_rows(res, as_tibble(chroms_no_overlaps)) res <- bed_sort(res) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_complement.r
#' Compute coverage of intervals. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param ... extra arguments (not used) #' #' @note Book-ended intervals are included in coverage calculations. #' #' @template groups #' #' @family multiple set operations #' #' @return #' [ivl_df] with the following additional columns: #' #' - `.ints` number of `x` intersections #' - `.cov` per-base coverage of `x` intervals #' - `.len` total length of `y` intervals covered by `x` intervals #' - `.frac` `.len` scaled by the number of `y` intervals # #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~strand, #' "chr1", 100, 500, "+", #' "chr2", 200, 400, "+", #' "chr2", 300, 500, "-", #' "chr2", 800, 900, "-" #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, ~strand, #' "chr1", 150, 400, 100, "+", #' "chr1", 500, 550, 100, "+", #' "chr2", 230, 430, 200, "-", #' "chr2", 350, 430, 300, "-" #' ) #' #' bed_coverage(x, y) #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/coverage.html} #' #' @export bed_coverage <- function(x, y, ...) { check_required(x) check_required(y) x <- check_interval(x) y <- check_interval(y) x <- bed_sort(x) y <- bed_sort(y) ## add integer .id to track each input x ivl .id_col <- get_id_col(x) x[[.id_col]] <- seq_len(nrow(x)) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) grp_indexes <- shared_group_indexes(x, y) res <- coverage_impl( x, y, grp_indexes$x, grp_indexes$y ) # get x ivls from groups not found in y mi <- get_missing_ivls(res, x, .id_col) res <- bind_rows(res, mi) # reorder by index res <- res[order(res[[.id_col]]), ] res[[.id_col]] <- NULL res } get_missing_ivls <- function(res, x, .id_col) { x <- ungroup(x) mi <- x[!x[[.id_col]] %in% res[[.id_col]], ] mi[[".ints"]] <- 0L mi[[".cov"]] <- 0L mi[[".len"]] <- mi[["end"]] - mi[["start"]] mi[[".frac"]] <- 0 mi }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_coverage.R
#' Fisher's test to measure overlap between two sets of intervals. #' #' Calculate Fisher's test on number of intervals that are shared and unique #' between two sets of `x` and `y` intervals. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param genome [genome_df] #' #' @template stats #' #' @family interval statistics #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/fisher.html} #' #' @return [ivl_df] #' #' @examples #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' x <- bed_random(genome, n = 1e4, seed = 1010486) #' y <- bed_random(genome, n = 1e4, seed = 9203911) #' #' bed_fisher(x, y, genome) #' #' @export bed_fisher <- function(x, y, genome) { check_required(x) check_required(y) check_required(genome) x <- check_interval(x) y <- check_interval(y) genome <- check_genome(genome) # number of intervals n_x <- nrow(x) n_y <- nrow(y) # union of intervals (i.e. total bases covered) union_x <- interval_union(x) union_y <- interval_union(y) # mean interval sizes mean_x <- union_x / n_x mean_y <- union_y / n_y # heuristic from bedtools fisher.cpp mean_total <- mean_x + mean_y # number of intersections (`n11` in fisher.cpp) isect <- bed_intersect(x, y) n_i <- nrow(isect) # x, not y (`n12` in fisher.cpp) n_x_only <- max(0, n_x - n_i) # y, not x (`n21` in fisher.cpp) n_y_only <- max(0, n_y - n_i) genome_size <- sum(as.numeric(genome$size)) # estimated total intervals (`n22_full`) total_est <- round(max( n_i + n_x_only + n_y_only, genome_size / mean_total )) # estimate n for neither x nor y (`n22`) not_est <- total_est - n_i - n_x_only - n_y_only fisher_mat <- matrix( c(n_i, n_x_only, n_y_only, not_est), nrow = 2, dimnames = list( "in y?" = c("yes", "no"), "in x?" = c("yes", "no") ) ) stat <- stats::fisher.test(fisher_mat) broom::tidy(stat) } #' @noRd interval_union <- function(x) { res <- bed_merge(x) res <- mutate(res, .size = end - start) sum(res$.size) }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_fisher.r
#' Create flanking intervals from input intervals. #' #' @param x [ivl_df] #' @param genome [genome_df] #' @param both number of bases on both sizes #' @param left number of bases on left side #' @param right number of bases on right side #' @param fraction define flanks based on fraction of interval length #' @param strand define `left` and `right` based on strand #' @param trim adjust coordinates for out-of-bounds intervals #' @param ... extra arguments (not used) #' #' @return [ivl_df] #' #' @family single set operations #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/flank.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 100, 125 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 130 #' ) #' #' bed_glyph(bed_flank(x, genome, both = 20)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~name, ~score, ~strand, #' "chr1", 500, 1000, ".", ".", "+", #' "chr1", 1000, 1500, ".", ".", "-" #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 5000 #' ) #' #' bed_flank(x, genome, left = 100) #' #' bed_flank(x, genome, right = 100) #' #' bed_flank(x, genome, both = 100) #' #' bed_flank(x, genome, both = 0.5, fraction = TRUE) #' #' @export bed_flank <- function(x, genome, both = 0, left = 0, right = 0, fraction = FALSE, strand = FALSE, trim = FALSE, ...) { check_required(x) check_required(genome) x <- check_interval(x) genome <- check_genome(genome) if (!any(c(both, left, right) > 0)) { cli::cli_abort("one of {.var both}, {.var left}, or {.var right} must be a positive value") } if (strand && !"strand" %in% colnames(x)) { cli::cli_abort("expected {.var strand} column in {.var x}") } if (both != 0 && (left != 0 || right != 0)) { cli::cli_abort("ambiguous side spec for bed_flank") } if (both) left <- right <- both res <- flank_impl( x, genome, both, left, right, fraction, strand, trim ) res <- bed_sort(res) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_flank.r
#' Create example glyphs for valr functions. #' #' Used to illustrate the output of valr functions with small examples. #' #' @param expr expression to evaluate #' @param label column name to use for label values. should be present in the #' result of the call. #' #' @return [ggplot2::ggplot()] #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 100, 125 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, #' "chr1", 30, 75, 50 #' ) #' #' bed_glyph(bed_intersect(x, y)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 30, 75, #' "chr1", 50, 90, #' "chr1", 91, 120 #' ) #' #' bed_glyph(bed_merge(x)) #' #' bed_glyph(bed_cluster(x), label = ".id") #' #' @export bed_glyph <- function(expr, label = NULL) { expr <- substitute(expr) # assign `expr <- quote(bed_intersect(x, y))` at this point to debug args_all <- formals(match.fun(expr[[1]])) # get required args i.e. those without defaults args_req <- names(args_all[sapply(args_all, is.name)]) # for bed_intersect replace ... with y if (expr[[1]] == "bed_intersect") args_req[args_req == "..."] <- "y" args_excl <- c("genome", "...") args_req <- args_req[!args_req %in% args_excl] nargs <- length(args_req) # evaluate the expression in the environment context env <- parent.frame() res <- eval(expr, envir = env) # bail if the result is too big max_rows <- 100 if (nrow(res) > max_rows) { cli::cli_abort("max_rows exceeded in bed_glyph.") } # get default columns cols_default <- c("chrom") if ("start" %in% names(res)) cols_default <- c(cols_default, "start") if ("end" %in% names(res)) cols_default <- c(cols_default, "end") cols_vars <- rlang::syms(cols_default) cols_out <- select(res, !!!cols_vars) # get cols that are now suffixed in the result. This is a reasonable default # for bed_intersect and functions that call bed_intersect. suffix_default <- stringr::fixed(".x") cols_out <- bind_cols(cols_out, select(res, ends_with(suffix_default))) # get any named columns from the expr expr_names <- names(expr) expr_names <- expr_names[expr_names != ""] expr_names <- intersect(expr_names, names(res)) if (length(expr_names) > 0) { cols_out <- bind_cols(cols_out, select(res, starts_with(expr_names))) } # get dot cols from result, e.g. `.overlap` dot_fixed <- stringr::fixed(".") cols_out <- bind_cols(cols_out, select(res, starts_with(dot_fixed))) # strip suffixes from names, assumes suffixes are dot-character, e.g. `.x` names_strip <- stringr::str_replace(names(cols_out), "\\.[:alnum:]$", "") names(cols_out) <- names_strip res <- cols_out name_result <- "result" res <- mutate(res, .facet = name_result) # these are the equivalent of the `x` and `y` formals, except are the names # of the args in the quoted call. expr_vars <- all.vars(expr) # this fetches the `x` and `y` rows from the environment for (i in 1:nargs) { env_i <- get(expr_vars[i], env) rows <- mutate(env_i, .facet = expr_vars[i]) res <- bind_rows(res, as_tibble(rows)) } # assign `.y` values in the result based on clustering ys <- group_by(res, .facet) ys <- bed_cluster(ys) ys <- group_by(ys, .facet, .id) ys <- mutate(ys, .y = row_number(.id)) ys <- ungroup(ys) ys <- arrange(ys, .facet, chrom, start) res <- arrange(res, .facet, chrom, start) res <- mutate(res, .y = ys$.y) # make name_result col appear last in the facets fct_names <- c(expr_vars, name_result) res <- mutate(res, .facet = factor(.facet, levels = fct_names)) # plot title title <- deparse(substitute(expr)) glyph_plot(res, title, label) + glyph_theme() } #' plot for bed_glyph #' @noRd glyph_plot <- function(.data, title = NULL, label = NULL) { # Colorbrewer 3-class `Greys` fill_colors <- c("#f0f0f0", "#bdbdbd", "#636363") glyph <- ggplot(.data) + geom_rect( aes( xmin = .data[["start"]], xmax = .data[["end"]], ymin = .data[[".y"]], ymax = .data[[".y"]] + 0.5, fill = .data[[".facet"]] ), color = "black", alpha = 0.75 ) + facet_grid( .facet ~ ., switch = "y", scales = "free_y", space = "free_y" ) + scale_fill_manual(values = fill_colors) + labs(title = title, x = NULL, y = NULL) if (!is.null(label)) { label <- as.name(label) aes_label <- aes( x = (.data[["end"]] - .data[["start"]]) / 2 + .data[["start"]], y = .data[[".y"]] + 0.25, label = !!label ) glyph <- glyph + geom_label(aes_label, na.rm = TRUE) } glyph } #' theme for bed_glyph #' @noRd glyph_theme <- function(base_size = 12, base_family = "Helvetica") { theme_bw(base_size = base_size, base_family = base_family) %+replace% theme( axis.line.y = element_blank(), axis.ticks.y = element_blank(), axis.text.y = element_blank(), legend.position = "none", panel.grid = element_blank(), panel.background = element_blank(), plot.background = element_blank(), panel.border = element_blank() ) }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_glyph.r
#' Identify intersecting intervals. #' #' Report intersecting intervals from `x` and `y` tbls. Book-ended intervals #' have `.overlap` values of `0` in the output. #' #' @param x [ivl_df] #' @param ... one or more (e.g. a list of) `y` [ivl_df()]s #' @param invert report `x` intervals not in `y` #' @param suffix colname suffixes in output #' #' @return #' #' [ivl_df] with original columns from `x` and `y` suffixed with `.x` #' and `.y`, and a new `.overlap` column with the extent of overlap for the #' intersecting intervals. #' #' If multiple `y` tbls are supplied, the `.source` contains variable names #' associated with each interval. All original columns from the `y` are suffixed #' with `.y` in the output. #' #' If `...` contains named inputs (i.e `a = y, b = z` or `list(a = y, b = z)`), #' then `.source` will contain supplied names (see examples). #' #' @template groups #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 100, 125 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 30, 75 #' ) #' #' bed_glyph(bed_intersect(x, y)) #' #' bed_glyph(bed_intersect(x, y, invert = TRUE)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 500, #' "chr2", 200, 400, #' "chr2", 300, 500, #' "chr2", 800, 900 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, #' "chr1", 150, 400, 100, #' "chr1", 500, 550, 100, #' "chr2", 230, 430, 200, #' "chr2", 350, 430, 300 #' ) #' #' bed_intersect(x, y) #' #' bed_intersect(x, y, invert = TRUE) #' #' # start and end of each overlapping interval #' res <- bed_intersect(x, y) #' dplyr::mutate(res, #' start = pmax(start.x, start.y), #' end = pmin(end.x, end.y) #' ) #' #' z <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, #' "chr1", 150, 400, 100, #' "chr1", 500, 550, 100, #' "chr2", 230, 430, 200, #' "chr2", 750, 900, 400 #' ) #' #' bed_intersect(x, y, z) #' #' bed_intersect(x, exons = y, introns = z) #' #' # a list of tbl_intervals can also be passed #' bed_intersect(x, list(exons = y, introns = z)) #' #' @family multiple set operations #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/intersect.html} #' #' @export bed_intersect <- function(x, ..., invert = FALSE, suffix = c(".x", ".y")) { check_required(x) if (dots_n(...) == 0) { cli::cli_abort("One or more tbls required in {.var ...}") } y_tbl <- parse_dots(...) multiple_tbls <- FALSE if (length(y_tbl) > 1) { multiple_tbls <- TRUE # bind_rows preserves grouping y <- bind_rows(y_tbl, .id = ".source") select_vars <- list(quo(-one_of(".source")), quo(everything()), ".source") y <- select(y, !!!select_vars) } else { # only one tbl supplied, so extract out single tbl from list y <- y_tbl[[1]] } x <- check_interval(x) y <- check_interval(y) check_suffix(suffix) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) suffix <- list(x = suffix[1], y = suffix[2]) grp_indexes <- shared_group_indexes(x, y) res <- intersect_impl( x, y, grp_indexes$x, grp_indexes$y, invert, suffix$x, suffix$y ) if (invert) { res <- filter(res, is.na(.overlap)) res <- select( res, chrom, ends_with(".x") ) names(res) <- str_replace(names(res), fixed(".x"), "") return(res) } if (multiple_tbls) { # rename .source.y to .source source_col <- paste0(".source", suffix$y) replace_col <- str_replace( source_col, fixed(suffix$y), "" ) cols <- colnames(res) colnames(res) <- ifelse(cols == source_col, replace_col, cols) } res } # handle objects passed to ... in bed_intersect parse_dots <- function(...) { # determine if list supplied to ... or series of variables n_inputs <- ...length() res <- list(...) if (typeof(substitute(...)) == "symbol") { if (length(res) == 1 & inherits(res[[1]], "list")) { # list was passed to ... res <- res[[1]] } else if (n_inputs > 1 & is.null(names(res))) { # multiple objests passed e.g. ... == x, y or a = x, b = y # name each tbl based on supplied variable dots <- eval(substitute(alist(...))) names(res) <- dots } } else if (n_inputs == 1) { # handles list initialized in ... # e.g. ... = list(y, z) or lst[1] res <- res[[1]] if (is.null(names(res))) { # name each tbl based on supplied variable if list initialized in dots dots <- eval(substitute(alist(...)))[[1]] # extract out variables from language object list(a, b, c) dots <- as.character(dots) if (dots[1] == "list") { dots <- dots[2:length(dots)] names(res) <- as.character(dots) } } } res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_intersect.r
#' Calculate the Jaccard statistic for two sets of intervals. #' #' Quantifies the extent of overlap between to sets of intervals in terms of #' base-pairs. Groups that are shared between input are used to calculate the statistic #' for subsets of data. #' #' @details The Jaccard statistic takes values of `[0,1]` and is measured as: #' #' \deqn{ J(x,y) = \frac{\mid x \bigcap y \mid} #' {\mid x \bigcup y \mid} = #' \frac{\mid x \bigcap y \mid} #' {\mid x \mid + \mid y \mid - #' \mid x \bigcap y \mid} } #' #' @param x [ivl_df] #' @param y [ivl_df] #' #' @template stats #' #' @family interval statistics #' #' @return #' tibble with the following columns: #' #' - `len_i` length of the intersection in base-pairs #' - `len_u` length of the union in base-pairs #' - `jaccard` value of jaccard statistic #' - `n_int` number of intersecting intervals between `x` and `y` #' #' If inputs are grouped, the return value will contain one set of values per group. #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/jaccard.html} #' #' @examples #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' x <- bed_random(genome, seed = 1010486) #' y <- bed_random(genome, seed = 9203911) #' #' bed_jaccard(x, y) #' #' # calculate jaccard per chromosome #' bed_jaccard( #' dplyr::group_by(x, chrom), #' dplyr::group_by(y, chrom) #' ) #' #' @export bed_jaccard <- function(x, y) { check_required(x) check_required(y) x <- check_interval(x) y <- check_interval(y) groups_shared <- shared_groups(x, y) x <- bed_merge(x) y <- bed_merge(y) res_intersect <- bed_intersect(x, y) if (!is.null(groups_shared)) { x <- group_by(x, !!!syms(groups_shared)) y <- group_by(y, !!!syms(groups_shared)) res_intersect <- group_by(res_intersect, !!!syms(groups_shared)) } res_intersect <- summarize( res_intersect, sum_overlap = sum(as.numeric(.overlap)), n_int = as.numeric(n()) ) res_x <- mutate(x, .size = end - start) res_x <- summarize(res_x, sum_x = sum(as.numeric(.size))) res_y <- mutate(y, .size = end - start) res_y <- summarize(res_y, sum_y = sum(as.numeric(.size))) if (!is.null(groups_shared)) { res <- left_join(res_intersect, res_x, by = as.character(groups_shared)) res <- left_join(res, res_y, by = as.character(groups_shared)) res <- mutate(res, sum_xy = sum_x + sum_y) group_cols <- select(res, !!!syms(groups_shared)) res <- transmute( res, len_i = sum_overlap, len_u = sum_xy, jaccard = sum_overlap / (sum_xy - sum_overlap), n = n_int ) res <- bind_cols(group_cols, res) } else { n_i <- res_intersect$sum_overlap n_u <- res_x$sum_x + res_y$sum_y jaccard <- n_i / (n_u - n_i) res <- tibble( len_i = n_i, len_u = n_u, jaccard = jaccard, n = res_intersect$n_int ) } res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_jaccard.r
#' Divide intervals into new sub-intervals ("windows"). #' #' @param x [ivl_df] #' @param win_size divide intervals into fixed-size windows #' @param step_size size to step before next window #' @param num_win divide intervals to fixed number of windows #' @param reverse reverse window numbers #' #' @note The `name` and `.win_id` columns can be used to create new #' interval names (see 'namenum' example below) or in subsequent #' `group_by` operations (see vignette). #' #' @family utilities #' #' @return [ivl_df] with `.win_id` column that contains a numeric #' identifier for the window. #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~name, ~score, ~strand, #' "chr1", 100, 200, "A", ".", "+" #' ) #' #' bed_glyph(bed_makewindows(x, num_win = 10), label = ".win_id") #' #' # Fixed number of windows #' bed_makewindows(x, num_win = 10) #' #' # Fixed window size #' bed_makewindows(x, win_size = 10) #' #' # Fixed window size with overlaps #' bed_makewindows(x, win_size = 10, step_size = 5) #' #' # reverse win_id #' bed_makewindows(x, win_size = 10, reverse = TRUE) #' #' # bedtools 'namenum' #' wins <- bed_makewindows(x, win_size = 10) #' dplyr::mutate(wins, namenum = stringr::str_c(name, "_", .win_id)) #' #' @export bed_makewindows <- function(x, win_size = 0, step_size = 0, num_win = 0, reverse = FALSE) { check_required(x) x <- check_interval(x) if (win_size == 0 && num_win == 0) { cli::cli_abort("specify either {.var win_size} or {.var num_win}") } if (win_size < 0 || num_win < 0) { cli::cli_abort("{.var win_size} and {.var num_win} must be >= 0") } if (any(x$end - x$start < num_win)) { cli::cli_alert_warning("interval lengths < {.var num_win} will be skipped.") } # dummy win_ids x <- mutate(x, .win_id = 0) res <- makewindows_impl(x, win_size, num_win, step_size, reverse) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_makewindows.r
#' Calculate summaries from overlapping intervals. #' #' Apply functions like [min()] and [count()] to intersecting intervals. #' [bed_map()] uses [bed_intersect()] to identify intersecting intervals, so #' output columns will be suffixed with `.x` and `.y`. Expressions that refer to #' input columns from `x` and `y` columns must take these suffixes into account. #' #' Book-ended intervals can be included by setting `min_overlap = 0`. #' #' Non-intersecting intervals from `x` are included in the result with `NA` #' values. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param ... name-value pairs specifying column names and expressions to apply #' @param min_overlap minimum overlap for intervals. #' #' @template groups #' #' @return [ivl_df] #' #' @family multiple set operations #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/map.html} #' #' @example inst/example/bed_map.r #' #' @export bed_map <- function(x, y, ..., min_overlap = 1) { check_required(x) check_required(y) if (rlang::dots_n(...) == 0) { cli::cli_alert_info("{.fun bed_map} expects {.var name=expression} pairs passed in {.var ...}") } x <- check_interval(x) y <- check_interval(y) ## add suffixes to all x columns except `chrom` x_nms <- str_c(names(x)[!names(x) %in% "chrom"], ".x") ## add integer .id to track each input x ivl .id_col <- get_id_col(x) x[[.id_col]] <- seq_len(nrow(x)) .id_col_out <- str_c(.id_col, ".x") # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) grp_indexes <- shared_group_indexes(x, y) res <- intersect_impl(x, y, grp_indexes$x, grp_indexes$y, invert = TRUE, suffix_x = ".x", suffix_y = "" ) ## filter for rows that don't intersect. The `duplicated` call is required ## because book-ended intervals in the intersect_impl result can ## book-end multiple `y` intervals, causing them to be duplicated after the ## `select`. base::duplicated is ~10x faster than dplyr::distinct res_noint <- filter(res, is.na(.overlap) | .overlap < min_overlap) res_noint <- select(res_noint, chrom, ends_with(".x")) res_noint <- res_noint[!duplicated(res_noint[[.id_col_out]]), ] ## identify intersecting intervals res_int <- filter(res, !is.na(.overlap) & .overlap >= min_overlap) ## drop non-intersecting intervals that are also in the intersecting set ## this prevents duplicate reporting of an x interval if it both bookends ## and overlaps y intervals. Using base R logical indexing here is ~ 7x faster ## than dplyr::anti_join() res_noint <- res_noint[!res_noint[[.id_col_out]] %in% res_int[[.id_col_out]], ] res_noint <- select(res_noint, -contains(.id_col_out)) ## map supplied functions to each set of intersecting intervals ## group_by .id_col_out to ensure that duplicated input x intervals are reported # res_int <- group_by(res_int, !!! syms(c("chrom", x_nms, .id_col_out))) res_int <- group_by(res_int, !!!syms(.id_col_out)) res_int <- summarize(res_int, !!!quos(...)) res_int <- ungroup(res_int) ## join summarize data with intervals based on .id ## avoids grouping by many unnecessary columns join_grps <- .id_col_out names(join_grps) <- .id_col x <- ungroup(x) res_int <- inner_join(x, res_int, by = join_grps) res_int <- select(res_int, -contains(.id_col)) ## rename to match input columns colnames(res_noint) <- stringr::str_replace(colnames(res_noint), "[.]x$", "") res <- bind_rows(res_int, res_noint) res <- bed_sort(res) res } #' @export #' #' @param .data data #' @param sep separator character #' #' @rdname bed_map concat <- function(.data, sep = ",") { paste0(.data, collapse = sep) } #' @export #' @rdname bed_map values_unique <- function(.data, sep = ",") { concat(unique(.data), sep = sep) } #' @export #' @rdname bed_map values <- function(.data, sep = ",") { concat(rle(.data)$values, sep = sep) }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_map.r
#' Merge overlapping intervals. #' #' Operations can be performed on merged intervals by specifying name-value #' pairs. Default `max_dist` of `0` means book-ended intervals are #' merged. #' #' @param x [ivl_df] #' @param max_dist maximum distance between intervals to merge #' @param ... name-value pairs that specify operations on merged intervals #' #' @template groups #' #' @return [ivl_df] #' #' @family single set operations #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/merge.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 1, 50, #' "chr1", 10, 75, #' "chr1", 100, 120 #' ) #' #' bed_glyph(bed_merge(x)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, ~strand, #' "chr1", 1, 50, 1, "+", #' "chr1", 100, 200, 2, "+", #' "chr1", 150, 250, 3, "-", #' "chr2", 1, 25, 4, "+", #' "chr2", 200, 400, 5, "-", #' "chr2", 400, 500, 6, "+", #' "chr2", 450, 550, 7, "+" #' ) #' #' bed_merge(x) #' #' bed_merge(x, max_dist = 100) #' #' # merge intervals on same strand #' bed_merge(dplyr::group_by(x, strand)) #' #' bed_merge(x, .value = sum(value)) #' #' @export bed_merge <- function(x, max_dist = 0, ...) { check_required(x) x <- check_interval(x) if (max_dist < 0) { cli::cli_abort("{.var max_dist} must be 0 or greater") } groups_x <- group_vars(x) res <- bed_sort(x) group_vars <- rlang::syms(unique(c("chrom", groups_x))) res <- group_by(res, !!!group_vars) # if no dots are passed then use fast internal merge if (!is.null(substitute(...))) { res <- merge_impl(res, max_dist, collapse = FALSE) group_vars <- rlang::syms(unique(c("chrom", ".id_merge", groups_x))) res <- group_by(res, !!!group_vars) res <- summarize(res, !!!rlang::quos( .start = min(start), .end = max(end), ... )) res <- select(res, everything(), start = .start, end = .end) res <- ungroup(res) res <- select(res, !!quo(-one_of(".id_merge"))) } else { res <- merge_impl(res, max_dist, collapse = TRUE) res <- select(res, !!!rlang::syms(c("chrom", "start", "end", groups_x))) } res <- ungroup(res) # restore original grouping if (!is.null(groups_x)) { res <- group_by(res, !!!rlang::syms(groups_x)) } res <- reorder_names(res, x) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_merge.r
#' Partition intervals into elemental intervals #' #' Convert a set of intervals into elemental intervals that contain each start #' and end position in the set. #' #' Summary operations, such as [min()] or [count()] can be performed #' on elemental intervals by specifying name-value pairs. #' #' This function is useful for calculating summaries across overlapping intervals #' without merging the intervals. #' #' @param x [ivl_df] #' @param ... name-value pairs specifying column names and expressions to apply #' #' @template groups #' #' @return [ivl_df()] #' #' @family single set operations #' #' @seealso #' \url{https://bedops.readthedocs.io/en/latest/content/reference/set-operations/bedops.html#partition-p-partition} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, ~strand, #' "chr1", 100, 500, 10, "+", #' "chr1", 200, 400, 20, "-", #' "chr1", 300, 550, 30, "+", #' "chr1", 550, 575, 2, "+", #' "chr1", 800, 900, 5, "+" #' ) #' #' #' bed_glyph(bed_partition(x)) #' bed_glyph(bed_partition(x, value = sum(value)), label = "value") #' #' bed_partition(x) #' #' # compute summary over each elemental interval #' bed_partition(x, value = sum(value)) #' #' # partition and compute summaries based on group #' x <- dplyr::group_by(x, strand) #' bed_partition(x, value = sum(value)) #' #' # combine values across multiple tibbles #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, ~strand, #' "chr1", 10, 500, 100, "+", #' "chr1", 250, 420, 200, "-", #' "chr1", 350, 550, 300, "+", #' "chr1", 550, 555, 20, "+", #' "chr1", 800, 900, 50, "+" #' ) #' #' x <- dplyr::bind_rows(x, y) #' bed_partition(x, value = sum(value)) #' #' @export bed_partition <- function(x, ...) { check_required(x) x <- check_interval(x) groups_df <- group_vars(x) x <- bed_sort(x) groups <- rlang::syms(unique(c("chrom", groups_df))) x <- group_by(x, !!!groups) res <- partition_impl(x) res <- tibble::as_tibble(res) # drop non-grouped cols as values no longer match ivls res <- select(res, chrom, start, end, one_of(groups_df)) # if dots are passed then map values if (!is.null(substitute(...))) { res <- group_by(res, !!!syms(groups_df)) res <- bed_map(res, x, ...) } res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_partition.r
#' Projection test for query interval overlap. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param genome [genome_df] #' @param by_chrom compute test per chromosome #' #' @template stats #' #' @family interval statistics #' #' @return #' [ivl_df] with the following columns: #' #' - `chrom` the name of chromosome tested if `by_chrom = TRUE`, #' otherwise has a value of `whole_genome` #' #' - `p.value` p-value from a binomial test. p-values > 0.5 #' are converted to `1 - p-value` and `lower_tail` is `FALSE` #' #' - `obs_exp_ratio` ratio of observed to expected overlap frequency #' #' - `lower_tail` `TRUE` indicates the observed overlaps are in the lower tail #' of the distribution (e.g., less overlap than expected). `FALSE` indicates #' that the observed overlaps are in the upper tail of the distribution (e.g., #' more overlap than expected) #' #' @seealso #' \url{https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002529} #' #' @examples #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' x <- bed_random(genome, seed = 1010486) #' y <- bed_random(genome, seed = 9203911) #' #' bed_projection(x, y, genome) #' #' bed_projection(x, y, genome, by_chrom = TRUE) #' #' @export bed_projection <- function(x, y, genome, by_chrom = FALSE) { check_required(x) check_required(y) check_required(genome) x <- check_interval(x) y <- check_interval(y) genome <- check_genome(genome) # find midpoints x <- mutate( x, .midpoint = round((end + start) / 2), start = .midpoint, end = .midpoint + 1 ) x <- select(x, -.midpoint) # flatten y intervals y <- bed_merge(y) # count overlaps per chromosome, obs_counts <- bed_intersect(x, y) # count overlaps obs_counts <- group_by(obs_counts, chrom) obs_counts <- summarize(obs_counts, .obs_counts = n()) # total x intervals tested total_counts <- group_by(x, chrom) total_counts <- summarize(total_counts, .total_trials = n()) obs_counts <- full_join(obs_counts, total_counts, by = "chrom") obs_counts <- mutate( obs_counts, .obs_counts = if_else(is.na(.obs_counts), as.integer(0), .obs_counts ) ) # calculate probabilty of overlap by chance y <- mutate(y, .length = end - start) y <- group_by(y, chrom) y <- summarize(y, .reference_coverage = sum(.length)) # add in any missing chromosomes y <- full_join(y, genome, by = "chrom") null_dist <- mutate(y, .exp_prob = .reference_coverage / size) res <- inner_join(obs_counts, null_dist, by = "chrom") # binomial test and obs/exp if (by_chrom) { res <- group_by(res, chrom) res <- summarize( res, p.value = stats::pbinom( q = .obs_counts, size = .total_trials, prob = .exp_prob ), obs_exp_ratio = (.obs_counts / .total_trials) / .exp_prob ) } else { res <- ungroup(res) res <- summarize( res, .obs_counts = sum(.obs_counts), .total_trials = sum(.total_trials), .exp_prob = sum(.reference_coverage) / sum(as.numeric(size)) ) res <- summarize( res, chrom = "whole_genome", p.value = stats::pbinom( q = .obs_counts, size = .total_trials, prob = .exp_prob ), obs_exp_ratio = (.obs_counts / .total_trials) / .exp_prob ) } res <- mutate( res, lower_tail = if_else(p.value < .5, "TRUE", "FALSE" ), p.value = if_else(p.value < .5, p.value, 1 - p.value ) ) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_projection.r
#' Generate randomly placed intervals on a genome. #' #' @param genome [genome_df] #' @param length length of intervals #' @param n number of intervals to generate #' @param seed seed RNG for reproducible intervals #' @param sorted return sorted output #' #' @details Sorting can be suppressed with `sorted = FALSE`. #' #' @return [ivl_df] #' #' @family randomizing operations #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/random.html} #' #' @examples #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 10000000, #' "chr2", 50000000, #' "chr3", 60000000, #' "chrX", 5000000 #' ) #' #' bed_random(genome, seed = 10104) #' #' # sorting can be suppressed #' bed_random(genome, sorted = FALSE, seed = 10104) #' #' # 500 random intervals of length 500 #' bed_random(genome, length = 500, n = 500, seed = 10104) #' #' @export bed_random <- function(genome, length = 1000, n = 1e6, seed = 0, sorted = TRUE) { check_required(genome) genome <- check_genome(genome) if (!all(genome$size > length)) { cli::cli_abort("{.var length} must be smaller than all chrom sizes") } out <- random_impl(genome, length, n, seed) if (sorted) { out <- bed_sort(out) } as_tibble(out) }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_random.r
#' Compute relative distances between intervals. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param detail report relative distances for each `x` interval. #' #' @family interval statistics #' #' @return #' If `detail = FALSE`, a [ivl_df] that summarizes #' calculated `.reldist` values with the following columns: #' #' - `.reldist` relative distance metric #' - `.counts` number of metric observations #' - `.total` total observations #' - `.freq` frequency of observation #' #' If `detail = TRUE`, the `.reldist` column reports the relative #' distance for each input `x` interval. #' #' @template stats #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/reldist.html} #' #' @examples #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' x <- bed_random(genome, seed = 1010486) #' y <- bed_random(genome, seed = 9203911) #' #' bed_reldist(x, y) #' #' bed_reldist(x, y, detail = TRUE) #' #' @export bed_reldist <- function(x, y, detail = FALSE) { check_required(x) check_required(y) x <- check_interval(x) y <- check_interval(y) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) grp_indexes <- shared_group_indexes(x, y) res <- dist_impl(x, y, grp_indexes$x, grp_indexes$y, distcalc = "reldist" ) if (detail) { return(res) } res[[".reldist"]] <- floor(res[[".reldist"]] * 100) / 100 nr <- nrow(res) res <- group_by(res, .reldist) res <- summarize( res, .counts = n(), .total = nr, .freq = .counts / .total ) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_reldist.r
#' Adjust intervals by a fixed size. #' #' Out-of-bounds intervals are removed by default. #' #' @param x [ivl_df] #' @param genome [ivl_df] #' @param size number of bases to shift. positive numbers shift right, negative shift left. #' @param fraction define `size` as a fraction of interval #' @param trim adjust coordinates for out-of-bounds intervals #' #' @return [ivl_df] #' #' @family single set operations #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/shift.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 100, 125 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 125 #' ) #' #' bed_glyph(bed_shift(x, genome, size = -20)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~strand, #' "chr1", 100, 150, "+", #' "chr1", 200, 250, "+", #' "chr2", 300, 350, "+", #' "chr2", 400, 450, "-", #' "chr3", 500, 550, "-", #' "chr3", 600, 650, "-" #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 1000, #' "chr2", 2000, #' "chr3", 3000 #' ) #' #' bed_shift(x, genome, 100) #' #' bed_shift(x, genome, fraction = 0.5) #' #' # shift with respect to strand #' stranded <- dplyr::group_by(x, strand) #' bed_shift(stranded, genome, 100) #' #' @export bed_shift <- function(x, genome, size = 0, fraction = 0, trim = FALSE) { check_required(x) check_required(genome) x <- check_interval(x) genome <- check_genome(genome) stranded <- "strand" %in% groups(x) # shift invervals if (!stranded && !fraction) { res <- mutate( x, start = start + size, end = end + size ) } # shift by percent of interval size if (!stranded && fraction) { res <- mutate(x, .size = end - start) res <- mutate( res, start = start + round(.size * fraction), end = end + round(.size * fraction) ) res <- select(res, -.size) } # shift by strand if (stranded && !fraction) { res <- mutate( x, start = ifelse(strand == "+", start + size, start - size ), end = ifelse(strand == "+", end + size, end - size ) ) } # shift by strand and percent if (stranded && fraction) { res <- mutate(x, .size = end - start) res <- mutate( res, start = ifelse(strand == "+", start + round(.size * fraction), start - round(.size * fraction) ), end = ifelse(strand == "+", end + round(.size * fraction), end - round(.size * fraction) ) ) res <- select(res, -.size) } res <- bound_intervals(res, genome, trim) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_shift.r
#' Shuffle input intervals. #' #' @param x [ivl_df] #' @param genome [genome_df] #' @param incl [ivl_df] of included intervals #' @param excl [ivl_df] of excluded intervals #' @param max_tries maximum tries to identify a bounded interval #' @param within shuffle within chromosomes #' @param seed seed for reproducible intervals #' #' @return [ivl_df] #' #' @family randomizing operations #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/shuffle.html} #' #' @examples #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 1e6, #' "chr2", 2e6, #' "chr3", 4e6 #' ) #' #' x <- bed_random(genome, seed = 1010486) #' #' bed_shuffle(x, genome, seed = 9830491) #' #' @export bed_shuffle <- function(x, genome, incl = NULL, excl = NULL, max_tries = 1000, within = FALSE, seed = 0) { check_required(x) check_required(genome) x <- check_interval(x) genome <- check_genome(genome) # flatten incl and excl if (!is.null(incl)) { incl <- bed_merge(incl) } if (!is.null(excl)) { excl <- bed_merge(excl) } # make genome into an interval tbl genome_incl <- mutate(genome, start = 0, end = size) genome_incl <- select(genome_incl, chrom, start, end) # find the included intervals bounds. case where only incl intervals are # defined is not evaluated explicitly, so is the default if (is.null(incl) && is.null(excl)) { incl <- genome_incl } else if (is.null(incl) && !is.null(excl)) { incl <- bed_subtract(genome_incl, excl) } else if (!is.null(incl) && !is.null(excl)) { incl <- bed_subtract(incl, excl) } if (nrow(incl) == 0 || is.null(incl)) { cli::cli_abort("no intervals to sample from.") } res <- shuffle_impl(x, incl, within, max_tries, seed) # bind original x column data to result (#81) res <- bind_cols(res, as_tibble(x[, !colnames(x) %in% colnames(res)])) as_tibble(res) }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_shuffle.r
#' Increase the size of input intervals. #' #' @inheritParams bed_flank #' #' @return [ivl_df] #' #' @family single set operations #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/slop.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 110, 120, #' "chr1", 225, 235 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 400 #' ) #' #' bed_glyph(bed_slop(x, genome, both = 20, trim = TRUE)) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 5000 #' ) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~name, ~score, ~strand, #' "chr1", 500, 1000, ".", ".", "+", #' "chr1", 1000, 1500, ".", ".", "-" #' ) #' #' bed_slop(x, genome, left = 100) #' #' bed_slop(x, genome, right = 100) #' #' bed_slop(x, genome, both = 100) #' #' bed_slop(x, genome, both = 0.5, fraction = TRUE) #' #' @export bed_slop <- function(x, genome, both = 0, left = 0, right = 0, fraction = FALSE, strand = FALSE, trim = FALSE, ...) { check_required(x) check_required(genome) x <- check_interval(x) genome <- check_genome(genome) if (strand && !"strand" %in% colnames(x)) { cli::cli_abort("expected {.var strand} in {.var x}") } if (both != 0 && (left != 0 || right != 0)) { cli::cli_abort("ambiguous side spec for bed_slop") } if (fraction) x <- mutate(x, .size = end - start) if (both != 0) { if (fraction) { res <- mutate( x, start = start - round(both * .size), end = end + round(both * .size) ) } else { res <- mutate( x, start = start - both, end = end + both ) } } else { # calc left and right based on strand if (strand) { if (fraction) { res <- mutate( x, start = ifelse(strand == "+", start - round(left * .size), start - round(right * .size) ), end = ifelse(strand == "+", end + round(right * .size), end + round(left * .size) ) ) } else { res <- mutate( x, start = ifelse(strand == "+", start - left, start - right ), end = ifelse(strand == "+", end + right, end + left ) ) } } else { if (fraction) { res <- mutate( x, start = start - round(left * .size), end = end + round(right * .size) ) } else { res <- mutate( x, start = start - left, end = end + right ) } } } if (fraction) res <- select(res, -.size) res <- bound_intervals(res, genome, trim) res <- bed_sort(res) res <- mutate(res, temp_start = start, temp_end = end ) res <- mutate(res, start = ifelse(temp_start - temp_end < 0, temp_start, temp_end ), end = ifelse(temp_start - temp_end < 0, temp_end, temp_start ) ) res <- select(res, -temp_start, -temp_end) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_slop.r
#' Sort a set of intervals. #' #' @param x [ivl_df] #' @param by_size sort by interval size #' @param by_chrom sort within chromosome #' @param reverse reverse sort order #' #' @seealso #' \url{https://bedtools.readthedocs.io/en/latest/content/tools/sort.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr8", 500, 1000, #' "chr8", 1000, 5000, #' "chr8", 100, 200, #' "chr1", 100, 300, #' "chr1", 100, 200 #' ) #' #' # sort by chrom and start #' bed_sort(x) #' #' # reverse sort order #' bed_sort(x, reverse = TRUE) #' #' # sort by interval size #' bed_sort(x, by_size = TRUE) #' #' # sort by decreasing interval size #' bed_sort(x, by_size = TRUE, reverse = TRUE) #' #' # sort by interval size within chrom #' bed_sort(x, by_size = TRUE, by_chrom = TRUE) #' #' @export bed_sort <- function(x, by_size = FALSE, by_chrom = FALSE, reverse = FALSE) { check_required(x) x <- check_interval(x) if (by_size) { res <- mutate(x, .size = end - start) if (by_chrom) { if (reverse) { res <- res[order(res$chrom, -res$.size, method = "radix"), ] } else { res <- res[order(res$chrom, res$.size, method = "radix"), ] } } else { if (reverse) { res <- res[order(-res$.size, method = "radix"), ] } else { res <- res[order(res$.size, method = "radix"), ] } } # remove .size column and groups in result res <- select(res, -.size) } else { # sort by coordinate if (reverse) { res <- x[order(x$chrom, -x$start, method = "radix" ), ] } else { res <- x[order(x$chrom, x$start, x$end, method = "radix" ), ] } } res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_sort.r
#' Subtract two sets of intervals. #' #' Subtract `y` intervals from `x` intervals. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param any remove any `x` intervals that overlap `y` #' #' @template groups #' #' @family multiple set operations #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/subtract.html} #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 1, 100 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 50, 75 #' ) #' #' bed_glyph(bed_subtract(x, y)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 100, 200, #' "chr1", 250, 400, #' "chr1", 500, 600, #' "chr1", 1000, 1200, #' "chr1", 1300, 1500 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 150, 175, #' "chr1", 510, 525, #' "chr1", 550, 575, #' "chr1", 900, 1050, #' "chr1", 1150, 1250, #' "chr1", 1299, 1501 #' ) #' #' bed_subtract(x, y) #' #' bed_subtract(x, y, any = TRUE) #' #' @export bed_subtract <- function(x, y, any = FALSE) { check_required(x) check_required(y) x <- check_interval(x) y <- check_interval(y) # establish grouping with shared groups (and chrom) groups_xy <- shared_groups(x, y) groups_xy <- unique(as.character(c("chrom", groups_xy))) groups_vars <- rlang::syms(groups_xy) # type convert grouping factors to characters if necessary and ungroup x <- convert_factors(x, groups_xy) y <- convert_factors(y, groups_xy) x <- group_by(x, !!!groups_vars) y <- group_by(y, !!!groups_vars) # find groups not in y not_y_grps <- setdiff(get_labels(x), get_labels(y)) # keep x ivls from groups not found in y res_no_y <- semi_join(x, not_y_grps, by = colnames(not_y_grps)) grp_indexes <- shared_group_indexes(x, y) if (any) { # collect and return x intervals without overlaps res <- intersect_impl(x, y, grp_indexes$x, grp_indexes$y, invert = TRUE ) anti <- filter(res, is.na(.overlap)) anti <- select(anti, chrom, start = start.x, end = end.x) return(anti) } res <- subtract_impl( x, y, grp_indexes$x, grp_indexes$y ) res <- ungroup(res) res <- bind_rows(res, res_no_y) res <- bed_sort(res) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_subtract.r
#' Identify intervals within a specified distance. #' #' @param x [ivl_df] #' @param y [ivl_df] #' @param ... params for bed_slop and bed_intersect #' @inheritParams bed_slop #' @inheritParams bed_intersect #' #' @template groups #' #' @family multiple set operations #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 25, 50, #' "chr1", 100, 125 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 60, 75 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 125 #' ) #' #' bed_glyph(bed_window(x, y, genome, both = 15)) #' #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 10, 100, #' "chr2", 200, 400, #' "chr2", 300, 500, #' "chr2", 800, 900 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 150, 400, #' "chr2", 230, 430, #' "chr2", 350, 430 #' ) #' #' genome <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 500, #' "chr2", 1000 #' ) #' #' bed_window(x, y, genome, both = 100) #' #' @seealso \url{https://bedtools.readthedocs.io/en/latest/content/tools/window.html} #' #' @export bed_window <- function(x, y, genome, ...) { check_required(x) check_required(y) check_required(genome) x <- check_interval(x) y <- check_interval(y) genome <- check_genome(genome) x <- mutate(x, .start = start, .end = end) # capture command line args cmd_args <- list(...) # get arguments for bed_slop and bed_intersect slop_arg_names <- names(formals(bed_slop)) intersect_arg_names <- names(formals(bed_intersect)) # parse supplied args into those for bed_slop or bed_intersect slop_args <- cmd_args[names(cmd_args) %in% slop_arg_names] intersect_args <- cmd_args[names(cmd_args) %in% intersect_arg_names] # pass new list of args to bed_slop slop_x <- do.call( bed_slop, c(list("x" = x, "genome" = genome), slop_args) ) # pass new list of args to bed_intersect res <- do.call( bed_intersect, c(list("x" = slop_x, "y" = y), intersect_args) ) res <- mutate(res, start.x = .start.x, end.x = .end.x) res <- ungroup(res) res <- select(res, -.start.x, -.end.x) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/bed_window.r
#' Fetch data from remote databases. #' #' Currently `db_ucsc` and `db_ensembl` are available for connections. #' #' @name db #' #' @param dbname name of database #' @param host hostname #' @param user username #' @param password password #' @param port MySQL connection port #' @param ... params for connection NULL #' @rdname db #' @seealso \url{https://genome.ucsc.edu/goldenpath/help/mysql.html} #' #' @examples #' \dontrun{ #' if (require(RMariaDB)) { #' library(dplyr) #' ucsc <- db_ucsc("hg38") #' #' # fetch the `refGene` tbl #' tbl(ucsc, "refGene") #' #' # the `chromInfo` tbls have size information #' tbl(ucsc, "chromInfo") #' } #' } #' @export db_ucsc <- function(dbname, host = "genome-mysql.cse.ucsc.edu", user = "genomep", password = "password", port = 3306, ...) { check_db_packages() DBI::dbConnect(RMariaDB::MariaDB(), dbname = dbname, user = user, password = password, host = host, post = port, ... ) # nocov } #' @rdname db #' @seealso \url{https://www.ensembl.org/info/data/mysql.html} #' #' @examples #' \dontrun{ #' if (require(RMariaDB)) { #' library(dplyr) #' # squirrel genome #' ensembl <- db_ensembl("spermophilus_tridecemlineatus_core_67_2") #' #' tbl(ensembl, "gene") #' } #' } #' #' @export db_ensembl <- function(dbname, host = "ensembldb.ensembl.org", user = "anonymous", password = "", port = 3306, ...) { check_db_packages() DBI::dbConnect(RMariaDB::MariaDB(), dbname = dbname, user = user, password = password, host = host, post = port, ... ) # nocov } check_db_packages <- function() { db_pkgs <- c("dbplyr", "DBI", "RMariaDB") pkgs_found <- sapply(db_pkgs, requireNamespace, quietly = TRUE) if (!all(pkgs_found)) { missing <- db_pkgs[!pkgs_found] n <- length(missing) cli::cli_abort("install {n} package{?s} {.pkg {missing}} for db functions.") } }
/scratch/gouwar.j/cran-all/cranData/valr/R/db.r
#' Create intron features. #' #' Numbers in the `score` column are intron numbers from 5' to 3' independent of #' strand. I.e., the first introns for `+` and `-` strand genes both have `score` #' values of `1`. #' #' @param x [ivl_df] in BED12 format #' #' @family feature functions #' #' @examples #' x <- read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' create_introns(x) #' #' @export create_introns <- function(x) { res <- bed12_to_exons(x) res <- group_by(res, name) res <- mutate( res, .start = end, .end = lead(start), score = ifelse(strand == "+", score, score - 1), start = .start, end = .end ) res <- select(res, -.start, -.end) res <- ungroup(res) res <- na.omit(res) # remove zero length intervals res <- filter(res, start < end) res } #' Create 5' UTR features. #' #' @param x [ivl_df] in BED12 format #' #' @family feature functions #' #' @examples #' x <- read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' create_utrs5(x) #' #' @export create_utrs5 <- function(x) { res <- group_by(x, name) res <- mutate( res, start = ifelse(strand == "+", start, cds_end), end = ifelse(strand == "+", cds_start, end) ) res <- ungroup(res) res <- select(res, chrom:strand) # remove zero length intervals res <- filter(res, start < end) res } #' Create 3' UTR features. #' #' @param x [ivl_df] in BED12 format #' #' @family feature functions #' #' @examples #' x <- read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' create_utrs3(x) #' #' @export create_utrs3 <- function(x) { res <- group_by(x, name) res <- mutate( res, start = ifelse(strand == "+", cds_end, start), end = ifelse(strand == "+", end, cds_start) ) res <- ungroup(res) res <- select(res, chrom:strand) # remove zero length intervals res <- filter(res, start < end) res } #' Create transcription start site features. #' #' @param x [ivl_df] in BED format #' #' @family feature functions #' #' @examples #' x <- read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' create_tss(x) #' #' @export create_tss <- function(x) { res <- group_by(x, name) res <- mutate( res, start = ifelse(strand == "+", start, end - 1), end = ifelse(strand == "+", start + 1, end) ) res <- ungroup(res) res <- select(res, chrom:strand) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/features.r
globalVariables(c( "chrom", "start", "end", "strand", ".exon_num", ".exon_size", ".exon_start", "name", ".prev_end", "size", "max.end", "size_mean", "int_est", ".starts", ".ends", ".row", ".start", ".end", ".size", ".strand", "start.x", "end.x", "strand.x", "start.y", "strand.x", "strand.y", ".overlap", ".id", ".value", ".win_num", ".facet", ".id_merge", ".overlap_merge", ".end.x", ".start.x", "CHROM", "POS", "REF", ".counts", "exon_sizes", ".absdist", ".ref_gap", ".ref_points", "exon_starts", "key", "left_end", "left_start", "pos", ".reldist", "right_end", "right_start", ".total", "value", ".midpoint", ".obs_counts", ".length", ".reference_coverage", ".total_trials", ".exp_prob", "p.value", ".win_size", ".row_id", "cds_start", "cds_end", "score", "n_int", "sum_overlap", "sum_x", "sum_xy", "sum_y", "temp_start", "temp_end", "seqnames" ))
/scratch/gouwar.j/cran-all/cranData/valr/R/globals.r
#' sniff number of fields in a bed file #' @noRd sniff_fields <- function(filename) { ncol( readr::read_tsv( filename, n_max = 10, comment = "#", show_col_types = FALSE, name_repair = "minimal" ) ) } #' @title Read BED and related files. #' #' @description read functions for BED and related formats. Filenames can be #' local file or URLs. The read functions load data into tbls with consistent #' `chrom`, `start` and `end` colnames. #' #' @param filename file or URL #' @param col_types column type spec for [readr::read_tsv()] #' @param sort sort the tbl by chrom and start #' @param ... options to pass to [readr::read_tsv()] #' @param n_fields `r lifecycle::badge("deprecated")` #' #' @return [ivl_df] #' #' @family read functions #' #' @details \url{https://genome.ucsc.edu/FAQ/FAQformat.html#format1} #' #' @examples #' # read_bed assumes 3 field BED format. #' read_bed(valr_example("3fields.bed.gz")) #' #' # result is sorted by chrom and start unless `sort = FALSE` #' read_bed(valr_example("3fields.bed.gz"), sort = FALSE) #' #' @export read_bed <- function(filename, col_types = bed12_coltypes, sort = TRUE, ..., n_fields = NULL) { check_required(filename) if (!is.null(n_fields)) { lifecycle::deprecate_warn( "0.6.9", "read_bed(n_fields)", details = "fields are now determined automatically from the file" ) } n_fields <- sniff_fields(filename) coltypes <- col_types[1:n_fields] colnames <- names(coltypes) out <- readr::read_tsv( filename, col_names = colnames, col_types = coltypes, ... ) if (sort) out <- bed_sort(out) out } #' @rdname read_bed #' #' @details \url{https://genome.ucsc.edu/FAQ/FAQformat.html#format1} #' #' @examples #' #' read_bed12(valr_example("mm9.refGene.bed.gz")) #' #' @export read_bed12 <- function(filename, ...) { check_required(filename) n_fields <- sniff_fields(filename) if (n_fields != 12) { cli::cli_abort("expected 12 columns in bed12") } bed12_tbl <- read_bed(filename) bed12_tbl } #' @rdname read_bed #' #' @details \url{https://genome.ucsc.edu/goldenPath/help/bedgraph.html} #' #' @examples #' #' read_bedgraph(valr_example("test.bg.gz")) #' #' @export read_bedgraph <- function(filename, ...) { # load as bed4, rename `value` column and covert to double check_required(filename) n_fields <- sniff_fields(filename) if (n_fields != 4) { cli::cli_abort("expected 4 columns in bedgraph") } out <- read_bed(filename, sort = FALSE) out <- select(out, everything(), value = name) out <- mutate(out, value = as.double(value)) out } #' @rdname read_bed #' #' @details \url{https://genome.ucsc.edu/FAQ/FAQformat.html#format12} #' #' @examples #' #' read_narrowpeak(valr_example("sample.narrowPeak.gz")) #' #' @export read_narrowpeak <- function(filename, ...) { check_required(filename) n_fields <- sniff_fields(filename) colnames <- names(peak_coltypes) out <- readr::read_tsv( filename, col_types = peak_coltypes, col_names = colnames ) out } #' @rdname read_bed #' #' @details \url{https://genome.ucsc.edu/FAQ/FAQformat.html#format13} #' #' @examples #' #' read_broadpeak(valr_example("sample.broadPeak.gz")) #' #' @export read_broadpeak <- function(filename, ...) { check_required(filename) n_fields <- sniff_fields(filename) coltypes <- peak_coltypes[1:length(peak_coltypes) - 1] colnames <- names(coltypes) out <- readr::read_tsv(filename, col_names = colnames, col_types = coltypes) out } peak_coltypes <- list( chrom = readr::col_character(), start = readr::col_integer(), end = readr::col_integer(), name = readr::col_character(), score = readr::col_integer(), strand = readr::col_character(), signal = readr::col_double(), pvalue = readr::col_double(), qvalue = readr::col_double(), peak = readr::col_integer() ) bed12_coltypes <- list( chrom = readr::col_character(), start = readr::col_integer(), end = readr::col_integer(), name = readr::col_character(), score = readr::col_character(), strand = readr::col_character(), cds_start = readr::col_integer(), cds_end = readr::col_integer(), item_rgb = readr::col_character(), exon_count = readr::col_integer(), exon_sizes = readr::col_character(), exon_starts = readr::col_character() ) #' Import and convert a bigwig file into a valr compatible tbl #' #' @description This function will output a 5 column tibble with #' zero-based chrom, start, end, score, and strand columns. #' #' @param path path to bigWig file #' @param set_strand strand to add to output (defaults to "+") #' #' @note This functions uses \code{rtracklayer} to import bigwigs which #' has unstable support for the windows platform and therefore may error #' for windows users (particularly for 32 bit window users). #' #' @examples #' \dontrun{ #' if (.Platform$OS.type != "windows") { #' bw <- read_bigwig(valr_example("hg19.dnase1.bw")) #' head(bw) #' } #' } #' @importFrom rtracklayer import #' @export read_bigwig <- function(path, set_strand = "+") { check_required(path) # note that rtracklayer will produce a one-based GRanges object res <- rtracklayer::import(path) res <- dplyr::as_tibble(res) res <- dplyr::mutate(res, chrom = as.character(seqnames), start = start - 1L, strand = set_strand ) dplyr::select(res, chrom, start, end, score, strand) } #' Import and convert a GTF/GFF file into a valr compatible bed tbl format #' #' @description This function will output a tibble with the #' required chrom, start, and end columns, as well as other columns depending #' on content in GTF/GFF file. #' #' @param path path to gtf or gff file #' @param zero_based if TRUE, convert to zero based #' #' @examples #' #' gtf <- read_gtf(valr_example("hg19.gencode.gtf.gz")) #' head(gtf) #' #' @importFrom rtracklayer import #' @export read_gtf <- function(path, zero_based = TRUE) { gtf <- rtracklayer::import(path) gtf <- as.data.frame(gtf) gtf <- dplyr::mutate_if(gtf, is.factor, as.character) res <- dplyr::rename(gtf, chrom = seqnames) if (zero_based) { res <- dplyr::mutate(res, start = start - 1L) } tibble::as_tibble(res) }
/scratch/gouwar.j/cran-all/cranData/valr/R/read_bed.r
#' Read genome files. #' #' Genome files (UCSC "chromSize" files) contain chromosome name and size #' information. These sizes are used by downstream functions to identify #' computed intervals that have coordinates outside of the genome bounds. #' #' @param path containing chrom/contig names and sizes, one-pair-per-line, #' tab-delimited #' #' @return [genome_df], sorted by `size` #' #' @note URLs to genome files can also be used. #' #' @family read functions #' #' @examples #' read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' \dontrun{ #' # `read_genome` accepts a URL #' read_genome("https://genome.ucsc.edu/goldenpath/help/hg19.chrom.sizes") #' } #' #' @export read_genome <- function(path) { check_required(path) colnames <- c("chrom", "size") genome <- readr::read_tsv(path, col_names = colnames, show_col_types = FALSE) genome <- arrange(genome, desc(size)) genome } #' Select intervals bounded by a genome. #' #' Used to remove out-of-bounds intervals, or trim interval coordinates using a #' `genome`. #' #' @param x [ivl_df] #' @param genome [genome_df] #' @param trim adjust coordinates for out-of-bounds intervals #' #' @return [ivl_df] #' #' @family utilities #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", -100, 500, #' "chr1", 100, 1e9, #' "chr1", 500, 1000 #' ) #' #' genome <- read_genome(valr_example("hg19.chrom.sizes.gz")) #' #' # out-of-bounds are removed by default ... #' bound_intervals(x, genome) #' #' # ... or can be trimmed within the bounds of a genome #' bound_intervals(x, genome, trim = TRUE) #' #' @export bound_intervals <- function(x, genome, trim = FALSE) { x <- check_interval(x) genome <- check_genome(genome) x <- ungroup(x) res <- left_join(x, genome, by = "chrom") if (trim) { res <- mutate( res, start = ifelse(start < 0, 0, pmin(start, size - 1) ), end = ifelse(end > size, size, pmax(1, end) ) ) res <- select(res, -size) } else { res <- filter(res, start >= 0 & start < size & end <= size & end > 0) res <- select(res, -size) } if (any(res$start == res$end)) { n <- sum(res$start == res$end) cli::cli_warn( "{n} interval{?s} discarded with same start and end after bounding" ) } res <- res[res$start != res$end, ] res }
/scratch/gouwar.j/cran-all/cranData/valr/R/read_genome.r
#' Read a VCF file. #' #' @param vcf vcf filename #' #' @family read functions #' @return `data_frame` #' #' @note return value has `chrom`, `start` and `end` columns. #' Interval lengths are the size of the 'REF' field. #' #' @examples #' vcf_file <- valr_example("test.vcf.gz") #' read_vcf(vcf_file) #' #' @export read_vcf <- function(vcf) { check_required(vcf) res <- suppressMessages(readr::read_tsv(vcf, comment = "##")) colnames(res) <- stringr::str_replace(colnames(res), "^#", "") res <- mutate( res, chrom = stringr::str_c("chr", CHROM), start = POS, end = POS + stringr::str_length(REF) ) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/read_vcf.r
#' Calculate interval spacing. #' #' Spacing for the first interval of each chromosome is undefined (`NA`). The #' leading interval of an overlapping interval pair has a negative value. #' #' @param x [ivl_df] #' #' @return [ivl_df] with `.spacing` column. #' #' @family utilities #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 1, 100, #' "chr1", 150, 200, #' "chr2", 200, 300 #' ) #' #' interval_spacing(x) #' #' @export interval_spacing <- function(x) { x <- check_interval(x) res <- bed_sort(x) gx <- groups(x) res <- group_by(res, chrom) res <- mutate(res, .spacing = start - lag(end)) res <- group_by(res, !!!gx) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/spacing.r
#' Flip strands in intervals. #' #' Flips positive (`+`) stranded intervals to negative (`-`) strands, #' and vice-versa. Facilitates comparisons among intervals on opposing strands. #' #' @param x [ivl_df] #' #' @family utilities #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~strand, #' "chr1", 1, 100, "+", #' "chr2", 1, 100, "-" #' ) #' #' flip_strands(x) #' #' @export flip_strands <- function(x) { if (!"strand" %in% colnames(x)) { cli::cli_abort("{.var strand} not found in {.var x}") } x <- check_interval(x) # remove existing groups groups_x <- groups(x) res <- ungroup(x) res <- mutate(res, .strand = ifelse(strand == "+", "-", "+")) res <- select(res, -strand) res <- select(res, everything(), strand = .strand) res <- group_by(res, !!!groups_x) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/strands.r
# Validity checks --------------------------------------------------- #' Bed-like data.frame requirements for valr functions #' #' @name ivl_df #' @docType package NULL #' Bed-like data.frame requirements for valr functions #' @rdname ivl_df #' @name genome_df #' @docType package NULL #' Check bed-like data.frame requirements for valr compatibility #' #' Required column names for interval dataframes are #' `chrom`, `start` and `end`. Internally interval dataframes are #' validated using `check_interval()` #' #' @param x A `data.frame` or `tibble::tibble` #' @rdname ivl_df #' #' @examples #' # using tibble #' x <- tibble::tribble( #' ~chrom, ~start, ~end, #' "chr1", 1, 50, #' "chr1", 10, 75, #' "chr1", 100, 120 #' ) #' #' check_interval(x) #' #' # using base R data.frame #' x <- data.frame( #' chrom = "chr1", #' start = 0, #' end = 100, #' stringsAsFactors = FALSE #' ) #' #' check_interval(x) #' #' @export check_interval <- function(x) { expect_names <- c("chrom", "start", "end") check_names(x, expect_names) if (!tibble::is_tibble(x)) { x <- tibble::as_tibble(x) } x } #' Check genome file data.frame requirements for valr compatibility #' #' Required column names for genome dataframes are #' `chrom` and `size`. Internally genome dataframes are #' validated using `check_genome()`. #' #' @param x A `data.frame` or `tibble::tibble` #' @rdname ivl_df #' #' @examples #' # example genome input #' #' x <- tibble::tribble( #' ~chrom, ~size, #' "chr1", 1e6 #' ) #' #' check_genome(x) #' #' @export check_genome <- function(x) { expect_names <- c("chrom", "size") check_names(x, expect_names) # check for unique refs chroms <- x[["chrom"]] dups <- unique(chroms[duplicated(chroms)]) if (length(dups) > 0) { cli::cli_abort( "duplicate chroms in genome: {dups}" ) } if (!tibble::is_tibble(x)) { x <- tibble::as_tibble(x) } x } check_names <- function(x, expected) { missing <- setdiff(expected, names(x)) if (length(missing) != 0) { n <- length(expected) cli::cli_abort( "expected {n} required names, missing: {missing}", ) } } #' Convert Granges to bed tibble #' #' @param x GRanges object to convert to bed tibble. #' #' @return [tibble::tibble()] #' #' @examples #' \dontrun{ #' gr <- GenomicRanges::GRanges( #' seqnames = S4Vectors::Rle( #' c("chr1", "chr2", "chr1", "chr3"), #' c(1, 1, 1, 1) #' ), #' ranges = IRanges::IRanges( #' start = c(1, 10, 50, 100), #' end = c(100, 500, 1000, 2000), #' names = head(letters, 4) #' ), #' strand = S4Vectors::Rle( #' c("-", "+"), c(2, 2) #' ) #' ) #' #' gr_to_bed(gr) #' #' # There are two ways to convert a bed-like data.frame to GRanges: #' #' gr <- GenomicRanges::GRanges( #' seqnames = S4Vectors::Rle(x$chrom), #' ranges = IRanges::IRanges( #' start = x$start + 1, #' end = x$end, #' names = x$name #' ), #' strand = S4Vectors::Rle(x$strand) #' ) #' # or: #' #' gr <- GenomicRanges::makeGRangesFromDataFrame(dplyr::mutate(x, start = start + 1)) #' } #' #' @export gr_to_bed <- function(x) { # https://www.biostars.org/p/89341/ res <- tibble( chrom = as.character(x@seqnames), start = x@ranges@start - 1, end = x@ranges@start - 1 + x@ranges@width, name = rep(".", length(x)), score = rep(".", length(x)), strand = as.character(x@strand) ) res <- mutate(res, strand = ifelse(strand == "*", ".", strand)) res }
/scratch/gouwar.j/cran-all/cranData/valr/R/tbls.r
#' Plyr function id packaged due to plyr being retired #' Compute a unique numeric id for each unique row in a data frame. #' #' Properties: #' \itemize{ #' \item \code{order(id)} is equivalent to \code{do.call(order, df)} #' \item rows containing the same data have the same value #' \item if \code{drop = FALSE} then room for all possibilites #' } #' #' @param .variables list of variables #' @param drop drop unusued factor levels? #' @return a numeric vector with attribute n, giving total number of #' possibilities #' @keywords internal #' @seealso \code{\link{id_var}} id <- function(.variables, drop = FALSE) { # Drop all zero length inputs lengths <- vapply(.variables, length, integer(1)) .variables <- .variables[lengths != 0] if (length(.variables) == 0) { # inlined "%||%" function to avoid packaging nvars <- nrow(.variables) n <- if (is.null(nvars)) { 0L } else { nvars } return(structure(seq_len(n), n = n)) } # Special case for single variable if (length(.variables) == 1) { return(id_var(.variables[[1]], drop = drop)) } # Calculate individual ids ids <- rev(lapply(.variables, id_var, drop = drop)) p <- length(ids) # Calculate dimensions ndistinct <- vapply(ids, attr, "n", FUN.VALUE = numeric(1), USE.NAMES = FALSE ) n <- prod(ndistinct) if (n > 2^31) { # Too big for integers, have to use strings, which will be much slower :( char_id <- do.call("paste", c(ids, sep = "\r")) res <- match(char_id, unique(char_id)) } else { combs <- c(1, cumprod(ndistinct[-p])) mat <- do.call("cbind", ids) res <- c((mat - 1L) %*% combs + 1L) # nolint } attr(res, "n") <- n if (drop) { id_var(res, drop = TRUE) } else { structure(as.integer(res), n = attr(res, "n")) } } #' Plyr function id_var packaged due to plyr being retired #' Numeric id for a vector. #' @keywords internal id_var <- function(x, drop = FALSE) { if (length(x) == 0) { return(structure(integer(), n = 0L)) } if (!is.null(attr(x, "n")) && !drop) { return(x) } if (is.factor(x) && !drop) { x <- addNA(x, ifany = TRUE) id <- as.integer(x) n <- length(levels(x)) } else { levels <- sort(unique(x), na.last = TRUE) id <- match(x, levels) n <- max(id) } structure(id, n = n) }
/scratch/gouwar.j/cran-all/cranData/valr/R/utils-external.r
#' Provide working directory for valr example files. #' #' @param path path to file #' #' @examples #' valr_example("hg19.chrom.sizes.gz") #' #' @export valr_example <- function(path) { # https://twitter.com/JennyBryan/status/780150538654527488 system.file("extdata", path, package = "valr", mustWork = TRUE) } #' reformat tbl column ordering based upon another tbl #' #' `reorder_names` returns a tbl whose columns are ordered by another tbl. #' The `x` tbl columns are reordered based on the `y` columns #' ordering. `x` columns that do not exist in `y` are moved to the #' last column. #' #' @param x [ivl_df] #' @param y [ivl_df] #' #' @examples #' # names out of order #' x <- tibble::tribble( #' ~end, ~chrom, ~start, ~value, #' 75, "chr1", 125, 10 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~scores, #' "chr1", 50, 100, 1.2, #' "chr1", 100, 150, 2.4 #' ) #' #' reorder_names(x, y) #' @noRd reorder_names <- function(x, y) { names_x <- names(x) names_y <- names(y) names_x <- names_x[order(match(names_x, names_y))] x <- select(x, one_of(names_x)) x } #' Identify groups shared between to tbls #' #' Identify minimum shared groups between `x` and `y` tbls. Returns #' `NULL` if there are no shared groups. #' #' @param x [ivl_df] #' @param y [ivl_df] #' #' @return `list` of groups or `NULL` #' #' @examples #' x <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, #' "chr1", 150, 400, 100, #' "chr2", 230, 430, 200 #' ) #' #' y <- tibble::tribble( #' ~chrom, ~start, ~end, ~value, #' "chr1", 50, 100, 1, #' "chr1", 100, 150, 2 #' ) #' #' x <- dplyr::group_by(x, chrom, value) #' y <- dplyr::group_by(y, chrom, value) #' shared_groups(x, y) #' #' y <- dplyr::group_by(y, chrom) #' shared_groups(x, y) #' #' y <- dplyr::ungroup(y) #' shared_groups(x, y) #' @noRd shared_groups <- function(x, y) { groups_x <- groups(x) groups_y <- groups(y) groups_xy <- intersect(groups_x, groups_y) if (length(groups_xy) == 0) { groups_xy <- NULL } groups_xy } # dplyr::check_suffix check_suffix <- function(suffix) { if (!is.character(suffix) || length(suffix) != 2) { cli::cli_abort("{.var suffix} must be a character vector of length 2.") } } #' Return group labels from tbl_df #' @param grp_df grouped tbl_df #' @return `tibble` of grouping labels or `NULL` if no groups present #' @noRd get_labels <- function(grp_tbl) { grp_df <- attr(grp_tbl, "groups") grp_df <- grp_df[, !colnames(grp_df) %in% ".rows"] grp_df } #' Type convert factors if they are grouping columns #' @param x data frame #' @param group_cols group columns to type convert if factors #' @return ungrouped dataframe #' @noRd convert_factors <- function(x, group_cols) { contains_factor <- sapply(x[, group_cols], is.factor) if (any(contains_factor)) { fcts <- group_cols[contains_factor] cli::cli_warn("Factors are not allowed for grouping. {.vars {fcts}} will be treated as characters") x <- ungroup(x) convert_cols <- group_cols[contains_factor] x <- mutate_at(x, .vars = convert_cols, as.character) } else { ungroup(x) } x } #' Get indexes of groups in each grouped data.frame that are found in the other data.frame #' #' @param x grouped data.frame #' @param y grouped data.frame #' @return named list with integer vector of indexes of groups shared between data.frames #' @noRd shared_group_indexes <- function(x, y) { x <- get_group_data(x) y <- get_group_data(y) shared_rows(x, y) } #' Get indexes of rows in each data.frame that are found in the other data.frame #' #' By default only columns with the same names in the two data.frames will #' be compared #' #' @param x data.frame #' @param y data.frame #' @return named list with integer vector of indexes shared between data.frames #' @noRd shared_rows <- function(x, y) { # based on plyr::match_df shared_cols <- intersect(colnames(x), colnames(y)) combined_df <- bind_rows(x[shared_cols], y[shared_cols]) keys <- id(combined_df, drop = TRUE) n_x <- nrow(x) n_y <- nrow(y) keys <- list( x = keys[seq_len(n_x)], y = keys[n_x + seq_len(n_y)], n = attr(keys, "n") ) x_indexes <- which(keys$x %in% keys$y) y_indexes <- which(keys$y %in% keys$x) list( x = x_indexes, y = y_indexes ) } #' Get data.frame from groups attribute without .rows column #' @param x data.frame #' @return data.frame without the .rows column #' @noRd get_group_data <- function(df) { grps <- attr(df, "groups") grps[, -ncol(grps)] } #' Get a unique column id #' @param x data.frame #' @param col desired column name #' @return unique column name #' @noRd get_id_col <- function(df, col = ".id") { make.unique(c(colnames(df), col))[ncol(df) + 1] }
/scratch/gouwar.j/cran-all/cranData/valr/R/utils.r
#' valr: genome interval arithmetic in R #' #' valr provides tools to read and manipulate intervals and signals on a genome #' reference. valr was developed to facilitate interactive analysis of #' genome-scale data sets, leveraging the power of dplyr and piping. #' #' To learn more about valr, start with the vignette: #' `browseVignettes(package = "valr")` #' #' @author Jay Hesselberth <jay.hesselberth@@gmail.com> #' @author Kent Riemondy <kent.riemondy@@gmail.com> #' #' @docType package #' @name valr #' #' @seealso Report bugs at \url{https://github.com/rnabioco/valr/issues} #' #' @useDynLib valr, .registration = TRUE #' @importFrom Rcpp sourceCpp #' @importFrom tibble tribble as_tibble is_tibble #' @importFrom readr read_tsv col_integer col_character col_double #' @importFrom stringr str_replace str_split str_c str_length fixed #' @importFrom rlang quos sym syms dots_n check_required #' @importFrom stats fisher.test na.omit #' @importFrom utils head tail packageVersion #' @importFrom broom tidy #' @import ggplot2 #' @import dplyr "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/valr/R/valr-package.r
## ----------------------------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.align = "center" ) ## ----------------------------------------------------------------------------- library(valr) library(dplyr) library(ggplot2) library(tibble) ## ----------------------------------------------------------------------------- library(valr) library(dplyr) snps <- read_bed(valr_example("hg19.snps147.chr22.bed.gz")) genes <- read_bed(valr_example("genes.hg19.chr22.bed.gz")) # find snps in intergenic regions intergenic <- bed_subtract(snps, genes) # distance from intergenic snps to nearest gene nearby <- bed_closest(intergenic, genes) nearby |> select(starts_with("name"), .overlap, .dist) |> filter(abs(.dist) < 1000) ## ----------------------------------------------------------------------------- bed_file <- valr_example("3fields.bed.gz") read_bed(bed_file) # accepts filepaths or URLs ## ----------------------------------------------------------------------------- bed <- tribble( ~chrom, ~start, ~end, "chr1", 1657492, 2657492, "chr2", 2501324, 3094650 ) bed ## ----------------------------------------------------------------------------- # a chromosome 100 basepairs in length chrom <- tribble( ~chrom, ~start, ~end, "chr1", 0, 100 ) chrom # single base-pair intervals bases <- tribble( ~chrom, ~start, ~end, "chr1", 0, 1, # first base of chromosome "chr1", 1, 2, # second base of chromosome "chr1", 99, 100 # last base of chromosome ) bases ## ----------------------------------------------------------------------------- # # access the `refGene` tbl on the `hg38` assembly. # if (require(RMariaDB)) { # ucsc <- db_ucsc("hg38") # tbl(ucsc, "refGene") # } ## ----------------------------------------------------------------------------- x <- tribble( ~chrom, ~start, ~end, "chr1", 25, 50, "chr1", 100, 125 ) y <- tribble( ~chrom, ~start, ~end, "chr1", 30, 75 ) bed_glyph(bed_intersect(x, y)) ## ----------------------------------------------------------------------------- x <- tribble( ~chrom, ~start, ~end, "chr1", 1, 50, "chr1", 10, 75, "chr1", 100, 120 ) bed_glyph(bed_merge(x)) ## ----------------------------------------------------------------------------- x <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 1, 100, "+", "chr1", 50, 150, "+", "chr2", 100, 200, "-" ) y <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 50, 125, "+", "chr1", 50, 150, "-", "chr2", 50, 150, "+" ) # intersect tbls by strand x <- group_by(x, strand) y <- group_by(y, strand) bed_intersect(x, y) ## ----------------------------------------------------------------------------- x <- group_by(x, strand) y <- flip_strands(y) y <- group_by(y, strand) bed_intersect(x, y) ## ----------------------------------------------------------------------------- # # calculate the mean and variance for a `value` column # bed_map(a, b, .mean = mean(value), .var = var(value)) # # # report concatenated and max values for merged intervals # bed_merge(a, .concat = concat(value), .max = max(value)) ## ----------------------------------------------------------------------------- # `valr_example()` identifies the path of example files bedfile <- valr_example("genes.hg19.chr22.bed.gz") genomefile <- valr_example("hg19.chrom.sizes.gz") bgfile <- valr_example("hela.h3k4.chip.bg.gz") genes <- read_bed(bedfile) genome <- read_genome(genomefile) y <- read_bedgraph(bgfile) ## ----------------------------------------------------------------------------- # generate 1 bp TSS intervals, `+` strand only tss <- genes |> filter(strand == "+") |> mutate(end = start + 1) # 1000 bp up and downstream region_size <- 1000 # 50 bp windows win_size <- 50 # add slop to the TSS, break into windows and add a group x <- tss |> bed_slop(genome, both = region_size) |> bed_makewindows(win_size) x ## ----------------------------------------------------------------------------- # map signals to TSS regions and calculate summary statistics. res <- bed_map(x, y, win_sum = sum(value, na.rm = TRUE)) |> group_by(.win_id) |> summarize( win_mean = mean(win_sum, na.rm = TRUE), win_sd = sd(win_sum, na.rm = TRUE) ) res ## ----------------------------------------------------------------------------- x_labels <- seq( -region_size, region_size, by = win_size * 5 ) x_breaks <- seq(1, 41, by = 5) sd_limits <- aes( ymax = win_mean + win_sd, ymin = win_mean - win_sd ) ggplot( res, aes( x = .win_id, y = win_mean ) ) + geom_point() + geom_pointrange(sd_limits) + scale_x_continuous( labels = x_labels, breaks = x_breaks ) + labs( x = "Position (bp from TSS)", y = "Signal", title = "Human H3K4me3 signal near transcription start sites" ) + theme_classic()
/scratch/gouwar.j/cran-all/cranData/valr/inst/doc/valr.R
--- title: 'valr overview' date: '`r Sys.Date()`' output: rmarkdown::html_vignette: toc: true toc_depth: 3 vignette: > %\VignetteIndexEntry{valr-overview} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| label: knitr-opts #| echo: false knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.align = "center" ) ``` ```{r} #| label: init #| echo: false #| message: false library(valr) library(dplyr) library(ggplot2) library(tibble) ``` ### Familiar tools, natively in R The functions in `valr` have similar names to their `BEDtools` counterparts, and so will be familiar to users coming from the `BEDtools` suite. Similar to [`pybedtools`](https://daler.github.io/pybedtools/#why-pybedtools), `valr` has a terse syntax: ```{r} #| label: valr-demo #| message: false library(valr) library(dplyr) snps <- read_bed(valr_example("hg19.snps147.chr22.bed.gz")) genes <- read_bed(valr_example("genes.hg19.chr22.bed.gz")) # find snps in intergenic regions intergenic <- bed_subtract(snps, genes) # distance from intergenic snps to nearest gene nearby <- bed_closest(intergenic, genes) nearby |> select(starts_with("name"), .overlap, .dist) |> filter(abs(.dist) < 1000) ``` ### Input data `valr` assigns common column names to facilitate comparisons between tbls. All tbls will have `chrom`, `start`, and `end` columns, and some tbls from multi-column formats will have additional pre-determined column names. See the `read_bed()` documentation for details. ```{r} #| label: file-io bed_file <- valr_example("3fields.bed.gz") read_bed(bed_file) # accepts filepaths or URLs ``` `valr` can also operate on BED-like data.frames already constructed in R, provided that columns named `chrom`, `start` and `end` are present. New tbls can also be constructed as either `tibbles` or base R `data.frames`. ```{r} #| label: trbl-ivls bed <- tribble( ~chrom, ~start, ~end, "chr1", 1657492, 2657492, "chr2", 2501324, 3094650 ) bed ``` ### Interval coordinates `valr` adheres to the BED [format](https://genome.ucsc.edu/FAQ/FAQformat#format1) which specifies that the start position for an interval is zero based and the end position is one-based. The first position in a chromosome is 0. The end position for a chromosome is one position passed the last base, and is not included in the interval. For example: ```{r} #| label: zero-based # a chromosome 100 basepairs in length chrom <- tribble( ~chrom, ~start, ~end, "chr1", 0, 100 ) chrom # single base-pair intervals bases <- tribble( ~chrom, ~start, ~end, "chr1", 0, 1, # first base of chromosome "chr1", 1, 2, # second base of chromosome "chr1", 99, 100 # last base of chromosome ) bases ``` ### Remote databases Remote databases can be accessed with `db_ucsc()` (to access the UCSC Browser) and `db_ensembl()` (to access Ensembl databases). ```{r} #| label: db #| eval: false # access the `refGene` tbl on the `hg38` assembly. if (require(RMariaDB)) { ucsc <- db_ucsc("hg38") tbl(ucsc, "refGene") } ``` ### Visual documentation The `bed_glyph()` tool illustrates the results of operations in `valr`, similar to those found in the `BEDtools` documentation. This glyph shows the result of intersecting `x` and `y` intervals with `bed_intersect()`: ```{r} #| label: intersect-glyph x <- tribble( ~chrom, ~start, ~end, "chr1", 25, 50, "chr1", 100, 125 ) y <- tribble( ~chrom, ~start, ~end, "chr1", 30, 75 ) bed_glyph(bed_intersect(x, y)) ``` And this glyph illustrates `bed_merge()`: ```{r} #| label: merge-glyph x <- tribble( ~chrom, ~start, ~end, "chr1", 1, 50, "chr1", 10, 75, "chr1", 100, 120 ) bed_glyph(bed_merge(x)) ``` ### Grouping data The `group_by` function in dplyr can be used to perform functions on subsets of single and multiple `data_frame`s. Functions in `valr` leverage grouping to enable a variety of comparisons. For example, intervals can be grouped by `strand` to perform comparisons among intervals on the same strand. ```{r} #| label: group-strand x <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 1, 100, "+", "chr1", 50, 150, "+", "chr2", 100, 200, "-" ) y <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 50, 125, "+", "chr1", 50, 150, "-", "chr2", 50, 150, "+" ) # intersect tbls by strand x <- group_by(x, strand) y <- group_by(y, strand) bed_intersect(x, y) ``` Comparisons between intervals on opposite strands are done using the `flip_strands()` function: ```{r} #| label: strand-opp x <- group_by(x, strand) y <- flip_strands(y) y <- group_by(y, strand) bed_intersect(x, y) ``` Both single set (e.g. `bed_merge()`) and multi set operations will respect groupings in the input intervals. ### Column specification Columns in `BEDtools` are referred to by position: ``` bash # calculate the mean of column 6 for intervals in `b` that overlap with `a` bedtools map -a a.bed -b b.bed -c 6 -o mean ``` In `valr`, columns are referred to by name and can be used in multiple name/value expressions for summaries. ```{r} #| label: tidy-eval #| eval: false # calculate the mean and variance for a `value` column bed_map(a, b, .mean = mean(value), .var = var(value)) # report concatenated and max values for merged intervals bed_merge(a, .concat = concat(value), .max = max(value)) ``` ## Getting started ### Meta-analysis This demonstration illustrates how to use `valr` tools to perform a "meta-analysis" of signals relative to genomic features. Here we to analyze the distribution of histone marks surrounding transcription start sites. First we load libraries and relevant data. ```{r} #| label: tss-demo #| warning: false #| message: false # `valr_example()` identifies the path of example files bedfile <- valr_example("genes.hg19.chr22.bed.gz") genomefile <- valr_example("hg19.chrom.sizes.gz") bgfile <- valr_example("hela.h3k4.chip.bg.gz") genes <- read_bed(bedfile) genome <- read_genome(genomefile) y <- read_bedgraph(bgfile) ``` Then we generate 1 bp intervals to represent transcription start sites (TSSs). We focus on `+` strand genes, but `-` genes are easily accommodated by filtering them and using `bed_makewindows()` with `reversed` window numbers. ```{r} #| label: make-tss # generate 1 bp TSS intervals, `+` strand only tss <- genes |> filter(strand == "+") |> mutate(end = start + 1) # 1000 bp up and downstream region_size <- 1000 # 50 bp windows win_size <- 50 # add slop to the TSS, break into windows and add a group x <- tss |> bed_slop(genome, both = region_size) |> bed_makewindows(win_size) x ``` Now we use the `.win_id` group with `bed_map()` to calculate a sum by mapping `y` signals onto the intervals in `x`. These data are regrouped by `.win_id` and a summary with `mean` and `sd` values is calculated. ```{r} #| label: bed-map # map signals to TSS regions and calculate summary statistics. res <- bed_map(x, y, win_sum = sum(value, na.rm = TRUE)) |> group_by(.win_id) |> summarize( win_mean = mean(win_sum, na.rm = TRUE), win_sd = sd(win_sum, na.rm = TRUE) ) res ``` Finally, these summary statistics are used to construct a plot that illustrates histone density surrounding TSSs. ```{r} #| lable: plot-tss #| warning: false #| message: false x_labels <- seq( -region_size, region_size, by = win_size * 5 ) x_breaks <- seq(1, 41, by = 5) sd_limits <- aes( ymax = win_mean + win_sd, ymin = win_mean - win_sd ) ggplot( res, aes( x = .win_id, y = win_mean ) ) + geom_point() + geom_pointrange(sd_limits) + scale_x_continuous( labels = x_labels, breaks = x_breaks ) + labs( x = "Position (bp from TSS)", y = "Signal", title = "Human H3K4me3 signal near transcription start sites" ) + theme_classic() ``` ## Related work * Command-line tools [BEDtools][1] and [bedops][5]. * The Python library [pybedtools][4] wraps BEDtools. * The R packages [GenomicRanges][6], [bedr][7], [IRanges][8] and [GenometriCorr][9] provide similar capability with a different philosophy. [1]: https://bedtools.readthedocs.io/en/latest/ [2]: https://github.com/hadley/dplyr [3]: https://www.rcpp.org/ [4]: https://pythonhosted.org/pybedtools/ [5]: https://bedops.readthedocs.io/en/latest/index.html [6]: https://bioconductor.org/packages/release/bioc/html/GenomicRanges.html [7]: https://CRAN.R-project.org/package=bedr [8]: https://bioconductor.org/packages/release/bioc/html/IRanges.html [9]: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002529 [10]: https://rmarkdown.rstudio.com/ [12]: https://bitbucket.org/snakemake/snakemake/wiki/Home [13]: https://shiny.posit.co/
/scratch/gouwar.j/cran-all/cranData/valr/inst/doc/valr.Rmd
x <- tibble::tribble( ~chrom, ~start, ~end, 'chr1', 100, 250, 'chr2', 250, 500 ) y <- tibble::tribble( ~chrom, ~start, ~end, ~value, 'chr1', 100, 250, 10, 'chr1', 150, 250, 20, 'chr2', 250, 500, 500 ) bed_glyph(bed_map(x, y, value = sum(value)), label = 'value') # summary examples bed_map(x, y, .sum = sum(value)) bed_map(x, y, .min = min(value), .max = max(value)) # identify non-intersecting intervals to include in the result res <- bed_map(x, y, .sum = sum(value)) x_not <- bed_intersect(x, y, invert = TRUE) dplyr::bind_rows(res, x_not) # create a list-column bed_map(x, y, .values = list(value)) # use `nth` family from dplyr bed_map(x, y, .first = dplyr::first(value)) bed_map(x, y, .absmax = abs(max(value))) bed_map(x, y, .count = length(value)) bed_map(x, y, .vals = values(value)) # count defaults are NA not 0; differs from bedtools2 ... bed_map(x, y, .counts = dplyr::n()) # ... but NA counts can be coverted to 0's dplyr::mutate(bed_map(x, y, .counts = dplyr::n()), .counts = ifelse(is.na(.counts), 0, .counts))
/scratch/gouwar.j/cran-all/cranData/valr/inst/example/bed_map.r
--- title: 'valr overview' date: '`r Sys.Date()`' output: rmarkdown::html_vignette: toc: true toc_depth: 3 vignette: > %\VignetteIndexEntry{valr-overview} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| label: knitr-opts #| echo: false knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.align = "center" ) ``` ```{r} #| label: init #| echo: false #| message: false library(valr) library(dplyr) library(ggplot2) library(tibble) ``` ### Familiar tools, natively in R The functions in `valr` have similar names to their `BEDtools` counterparts, and so will be familiar to users coming from the `BEDtools` suite. Similar to [`pybedtools`](https://daler.github.io/pybedtools/#why-pybedtools), `valr` has a terse syntax: ```{r} #| label: valr-demo #| message: false library(valr) library(dplyr) snps <- read_bed(valr_example("hg19.snps147.chr22.bed.gz")) genes <- read_bed(valr_example("genes.hg19.chr22.bed.gz")) # find snps in intergenic regions intergenic <- bed_subtract(snps, genes) # distance from intergenic snps to nearest gene nearby <- bed_closest(intergenic, genes) nearby |> select(starts_with("name"), .overlap, .dist) |> filter(abs(.dist) < 1000) ``` ### Input data `valr` assigns common column names to facilitate comparisons between tbls. All tbls will have `chrom`, `start`, and `end` columns, and some tbls from multi-column formats will have additional pre-determined column names. See the `read_bed()` documentation for details. ```{r} #| label: file-io bed_file <- valr_example("3fields.bed.gz") read_bed(bed_file) # accepts filepaths or URLs ``` `valr` can also operate on BED-like data.frames already constructed in R, provided that columns named `chrom`, `start` and `end` are present. New tbls can also be constructed as either `tibbles` or base R `data.frames`. ```{r} #| label: trbl-ivls bed <- tribble( ~chrom, ~start, ~end, "chr1", 1657492, 2657492, "chr2", 2501324, 3094650 ) bed ``` ### Interval coordinates `valr` adheres to the BED [format](https://genome.ucsc.edu/FAQ/FAQformat#format1) which specifies that the start position for an interval is zero based and the end position is one-based. The first position in a chromosome is 0. The end position for a chromosome is one position passed the last base, and is not included in the interval. For example: ```{r} #| label: zero-based # a chromosome 100 basepairs in length chrom <- tribble( ~chrom, ~start, ~end, "chr1", 0, 100 ) chrom # single base-pair intervals bases <- tribble( ~chrom, ~start, ~end, "chr1", 0, 1, # first base of chromosome "chr1", 1, 2, # second base of chromosome "chr1", 99, 100 # last base of chromosome ) bases ``` ### Remote databases Remote databases can be accessed with `db_ucsc()` (to access the UCSC Browser) and `db_ensembl()` (to access Ensembl databases). ```{r} #| label: db #| eval: false # access the `refGene` tbl on the `hg38` assembly. if (require(RMariaDB)) { ucsc <- db_ucsc("hg38") tbl(ucsc, "refGene") } ``` ### Visual documentation The `bed_glyph()` tool illustrates the results of operations in `valr`, similar to those found in the `BEDtools` documentation. This glyph shows the result of intersecting `x` and `y` intervals with `bed_intersect()`: ```{r} #| label: intersect-glyph x <- tribble( ~chrom, ~start, ~end, "chr1", 25, 50, "chr1", 100, 125 ) y <- tribble( ~chrom, ~start, ~end, "chr1", 30, 75 ) bed_glyph(bed_intersect(x, y)) ``` And this glyph illustrates `bed_merge()`: ```{r} #| label: merge-glyph x <- tribble( ~chrom, ~start, ~end, "chr1", 1, 50, "chr1", 10, 75, "chr1", 100, 120 ) bed_glyph(bed_merge(x)) ``` ### Grouping data The `group_by` function in dplyr can be used to perform functions on subsets of single and multiple `data_frame`s. Functions in `valr` leverage grouping to enable a variety of comparisons. For example, intervals can be grouped by `strand` to perform comparisons among intervals on the same strand. ```{r} #| label: group-strand x <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 1, 100, "+", "chr1", 50, 150, "+", "chr2", 100, 200, "-" ) y <- tribble( ~chrom, ~start, ~end, ~strand, "chr1", 50, 125, "+", "chr1", 50, 150, "-", "chr2", 50, 150, "+" ) # intersect tbls by strand x <- group_by(x, strand) y <- group_by(y, strand) bed_intersect(x, y) ``` Comparisons between intervals on opposite strands are done using the `flip_strands()` function: ```{r} #| label: strand-opp x <- group_by(x, strand) y <- flip_strands(y) y <- group_by(y, strand) bed_intersect(x, y) ``` Both single set (e.g. `bed_merge()`) and multi set operations will respect groupings in the input intervals. ### Column specification Columns in `BEDtools` are referred to by position: ``` bash # calculate the mean of column 6 for intervals in `b` that overlap with `a` bedtools map -a a.bed -b b.bed -c 6 -o mean ``` In `valr`, columns are referred to by name and can be used in multiple name/value expressions for summaries. ```{r} #| label: tidy-eval #| eval: false # calculate the mean and variance for a `value` column bed_map(a, b, .mean = mean(value), .var = var(value)) # report concatenated and max values for merged intervals bed_merge(a, .concat = concat(value), .max = max(value)) ``` ## Getting started ### Meta-analysis This demonstration illustrates how to use `valr` tools to perform a "meta-analysis" of signals relative to genomic features. Here we to analyze the distribution of histone marks surrounding transcription start sites. First we load libraries and relevant data. ```{r} #| label: tss-demo #| warning: false #| message: false # `valr_example()` identifies the path of example files bedfile <- valr_example("genes.hg19.chr22.bed.gz") genomefile <- valr_example("hg19.chrom.sizes.gz") bgfile <- valr_example("hela.h3k4.chip.bg.gz") genes <- read_bed(bedfile) genome <- read_genome(genomefile) y <- read_bedgraph(bgfile) ``` Then we generate 1 bp intervals to represent transcription start sites (TSSs). We focus on `+` strand genes, but `-` genes are easily accommodated by filtering them and using `bed_makewindows()` with `reversed` window numbers. ```{r} #| label: make-tss # generate 1 bp TSS intervals, `+` strand only tss <- genes |> filter(strand == "+") |> mutate(end = start + 1) # 1000 bp up and downstream region_size <- 1000 # 50 bp windows win_size <- 50 # add slop to the TSS, break into windows and add a group x <- tss |> bed_slop(genome, both = region_size) |> bed_makewindows(win_size) x ``` Now we use the `.win_id` group with `bed_map()` to calculate a sum by mapping `y` signals onto the intervals in `x`. These data are regrouped by `.win_id` and a summary with `mean` and `sd` values is calculated. ```{r} #| label: bed-map # map signals to TSS regions and calculate summary statistics. res <- bed_map(x, y, win_sum = sum(value, na.rm = TRUE)) |> group_by(.win_id) |> summarize( win_mean = mean(win_sum, na.rm = TRUE), win_sd = sd(win_sum, na.rm = TRUE) ) res ``` Finally, these summary statistics are used to construct a plot that illustrates histone density surrounding TSSs. ```{r} #| lable: plot-tss #| warning: false #| message: false x_labels <- seq( -region_size, region_size, by = win_size * 5 ) x_breaks <- seq(1, 41, by = 5) sd_limits <- aes( ymax = win_mean + win_sd, ymin = win_mean - win_sd ) ggplot( res, aes( x = .win_id, y = win_mean ) ) + geom_point() + geom_pointrange(sd_limits) + scale_x_continuous( labels = x_labels, breaks = x_breaks ) + labs( x = "Position (bp from TSS)", y = "Signal", title = "Human H3K4me3 signal near transcription start sites" ) + theme_classic() ``` ## Related work * Command-line tools [BEDtools][1] and [bedops][5]. * The Python library [pybedtools][4] wraps BEDtools. * The R packages [GenomicRanges][6], [bedr][7], [IRanges][8] and [GenometriCorr][9] provide similar capability with a different philosophy. [1]: https://bedtools.readthedocs.io/en/latest/ [2]: https://github.com/hadley/dplyr [3]: https://www.rcpp.org/ [4]: https://pythonhosted.org/pybedtools/ [5]: https://bedops.readthedocs.io/en/latest/index.html [6]: https://bioconductor.org/packages/release/bioc/html/GenomicRanges.html [7]: https://CRAN.R-project.org/package=bedr [8]: https://bioconductor.org/packages/release/bioc/html/IRanges.html [9]: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002529 [10]: https://rmarkdown.rstudio.com/ [12]: https://bitbucket.org/snakemake/snakemake/wiki/Home [13]: https://shiny.posit.co/
/scratch/gouwar.j/cran-all/cranData/valr/vignettes/valr.Rmd
#' @include generateXY.R #' @include EMGLLF.R #' @include EMGrank.R #' @include initSmallEM.R #' @include computeGridLambda.R #' @include constructionModelesLassoMLE.R #' @include constructionModelesLassoRank.R #' @include selectVariables.R #' @include main.R #' @include plot_valse.R #' #' @useDynLib valse #' #' @importFrom parallel makeCluster parLapply stopCluster clusterExport #' @importFrom MASS ginv NULL
/scratch/gouwar.j/cran-all/cranData/valse/R/A_NAMESPACE.R
#' EMGLLF #' #' Run a generalized EM algorithm developped for mixture of Gaussian regression #' models with variable selection by an extension of the Lasso estimator (regularization parameter lambda). #' Reparametrization is done to ensure invariance by homothetic transformation. #' It returns a collection of models, varying the number of clusters and the sparsity in the regression mean. #' #' @param phiInit an initialization for phi #' @param rhoInit an initialization for rho #' @param piInit an initialization for pi #' @param gamInit initialization for the a posteriori probabilities #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 #' @param gamma integer for the power in the penaly, by default = 1 #' @param lambda regularization parameter in the Lasso estimation #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 #' @param fast boolean to enable or not the C function call #' #' @return A list (corresponding to the model collection) defined by (phi,rho,pi,llh,S,affec): #' phi : regression mean for each cluster, an array of size p*m*k #' rho : variance (homothetic) for each cluster, an array of size m*m*k #' pi : proportion for each cluster, a vector of size k #' llh : log likelihood with respect to the training set #' S : selected variables indexes, an array of size p*m*k #' affec : cluster affectation for each observation (of the training set) #' #' @export EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, X, Y, eps, fast) { if (!fast) { # Function in R return(.EMGLLF_R(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, X, Y, eps)) } # Function in C .Call("EMGLLF", phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, X, Y, eps, PACKAGE = "valse") } # R version - slow but easy to read .EMGLLF_R <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, X, Y, eps) { # Matrix dimensions n <- nrow(X) p <- ncol(X) m <- ncol(Y) k <- length(piInit) # Adjustments required when p==1 or m==1 (var.sel. or output dim 1) if (p==1 || m==1) phiInit <- array(phiInit, dim=c(p,m,k)) if (m==1) rhoInit <- array(rhoInit, dim=c(m,m,k)) # Outputs phi <- phiInit rho <- rhoInit pi <- piInit llh <- -Inf S <- array(0, dim = c(p, m, k)) # Algorithm variables gam <- gamInit Gram2 <- array(0, dim = c(p, p, k)) ps2 <- array(0, dim = c(p, m, k)) X2 <- array(0, dim = c(n, p, k)) Y2 <- array(0, dim = c(n, m, k)) for (ite in 1:maxi) { # Remember last pi,rho,phi values for exit condition in the end of loop Phi <- phi Rho <- rho Pi <- pi # Computations associated to X and Y for (r in 1:k) { for (mm in 1:m) Y2[, mm, r] <- sqrt(gam[, r]) * Y[, mm] for (i in 1:n) X2[i, , r] <- sqrt(gam[i, r]) * X[i, ] for (mm in 1:m) ps2[, mm, r] <- crossprod(X2[, , r], Y2[, mm, r]) for (j in 1:p) { for (s in 1:p) Gram2[j, s, r] <- crossprod(X2[, j, r], X2[, s, r]) } } ## M step # For pi b <- sapply(1:k, function(r) sum(abs(phi[, , r]))) gam2 <- colSums(gam) a <- sum(gam %*% log(pi)) # While the proportions are nonpositive kk <- 0 pi2AllPositive <- FALSE while (!pi2AllPositive) { pi2 <- pi + 0.1^kk * ((1/n) * gam2 - pi) pi2AllPositive <- all(pi2 >= 0) kk <- kk + 1 } # t(m) is the largest value in the grid O.1^k such that it is nonincreasing while (kk < 1000 && -a/n + lambda * sum(pi^gamma * b) < # na.rm=TRUE to handle 0*log(0) -sum(gam2 * log(pi2), na.rm=TRUE)/n + lambda * sum(pi2^gamma * b)) { pi2 <- pi + 0.1^kk * (1/n * gam2 - pi) kk <- kk + 1 } t <- 0.1^kk pi <- (pi + t * (pi2 - pi))/sum(pi + t * (pi2 - pi)) # For phi and rho for (r in 1:k) { for (mm in 1:m) { ps <- 0 for (i in 1:n) ps <- ps + Y2[i, mm, r] * sum(X2[i, , r] * phi[, mm, r]) nY2 <- sum(Y2[, mm, r]^2) rho[mm, mm, r] <- (ps + sqrt(ps^2 + 4 * nY2 * gam2[r]))/(2 * nY2) } } for (r in 1:k) { for (j in 1:p) { for (mm in 1:m) { S[j, mm, r] <- -rho[mm, mm, r] * ps2[j, mm, r] + sum(phi[-j, mm, r] * Gram2[j, -j, r]) if (abs(S[j, mm, r]) <= n * lambda * (pi[r]^gamma)) { phi[j, mm, r] <- 0 } else if (S[j, mm, r] > n * lambda * (pi[r]^gamma)) { phi[j, mm, r] <- (n * lambda * (pi[r]^gamma) - S[j, mm, r])/Gram2[j, j, r] } else { phi[j, mm, r] <- -(n * lambda * (pi[r]^gamma) + S[j, mm, r])/Gram2[j, j, r] } } } } ## E step # Precompute det(rho[,,r]) for r in 1...k detRho <- sapply(1:k, function(r) gdet(rho[, , r])) sumLogLLH <- 0 for (i in 1:n) { # Update gam[,]; use log to avoid numerical problems logGam <- sapply(1:k, function(r) { log(pi[r]) + log(detRho[r]) - 0.5 * sum((Y[i, ] %*% rho[, , r] - X[i, ] %*% phi[, , r])^2) }) logGam <- logGam - max(logGam) #adjust without changing proportions gam[i, ] <- exp(logGam) norm_fact <- sum(gam[i, ]) gam[i, ] <- gam[i, ] / norm_fact sumLogLLH <- sumLogLLH + log(norm_fact) - log((2 * base::pi)^(m/2)) } sumPen <- sum(pi^gamma * b) last_llh <- llh llh <- -sumLogLLH/n #+ lambda * sumPen dist <- ifelse(ite == 1, llh, (llh - last_llh)/(1 + abs(llh))) Dist1 <- max((abs(phi - Phi))/(1 + abs(phi))) Dist2 <- max((abs(rho - Rho))/(1 + abs(rho))) Dist3 <- max((abs(pi - Pi))/(1 + abs(Pi))) dist2 <- max(Dist1, Dist2, Dist3) if (ite >= mini && (dist >= eps || dist2 >= sqrt(eps))) break } affec = apply(gam, 1, which.max) list(phi = phi, rho = rho, pi = pi, llh = llh, S = S, affec=affec) }
/scratch/gouwar.j/cran-all/cranData/valse/R/EMGLLF.R
#' EMGrank #' #' Run an generalized EM algorithm developped for mixture of Gaussian regression #' models with variable selection by an extension of the low rank estimator. #' Reparametrization is done to ensure invariance by homothetic transformation. #' It returns a collection of models, varying the number of clusters and the rank of the regression mean. #' #' @param Pi An initialization for pi #' @param Rho An initialization for rho, the variance parameter #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 #' @param rank vector of possible ranks #' @param fast boolean to enable or not the C function call #' #' @return A list (corresponding to the model collection) defined by (phi,LLF): #' phi : regression mean for each cluster, an array of size p*m*k #' LLF : log likelihood with respect to the training set #' #' @export EMGrank <- function(Pi, Rho, mini, maxi, X, Y, eps, rank, fast) { if (!fast) { # Function in R return(.EMGrank_R(Pi, Rho, mini, maxi, X, Y, eps, rank)) } # Function in C .Call("EMGrank", Pi, Rho, mini, maxi, X, Y, eps, as.integer(rank), PACKAGE = "valse") } # helper to always have matrices as arg (TODO: put this elsewhere? improve?) --> # Yes, we should use by-columns storage everywhere... [later!] matricize <- function(X) { if (!is.matrix(X)) return(t(as.matrix(X))) X } # R version - slow but easy to read .EMGrank_R <- function(Pi, Rho, mini, maxi, X, Y, eps, rank) { # matrix dimensions n <- nrow(X) p <- ncol(X) m <- ncol(Y) k <- length(Pi) # init outputs phi <- array(0, dim = c(p, m, k)) Z <- rep(1, n) LLF <- 0 # local variables Phi <- array(0, dim = c(p, m, k)) deltaPhi <- c() sumDeltaPhi <- 0 deltaPhiBufferSize <- 20 # main loop ite <- 1 while (ite <= mini || (ite <= maxi && sumDeltaPhi > eps)) { # M step: update for Beta ( and then phi) for (r in 1:k) { Z_indice <- seq_len(n)[Z == r] #indices where Z == r if (length(Z_indice) == 0) next # U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr s <- svd(MASS::ginv(crossprod(matricize(X[Z_indice, ]))) %*% crossprod(matricize(X[Z_indice, ]), matricize(Y[Z_indice, ]))) S <- s$d # Set m-rank(r) singular values to zero, and recompose best rank(r) approximation # of the initial product if (rank[r] < length(S)) S[(rank[r] + 1):length(S)] <- 0 phi[, , r] <- s$u %*% diag(S) %*% t(s$v) %*% Rho[, , r] } # Step E and computation of the loglikelihood sumLogLLF2 <- 0 for (i in seq_len(n)) { sumLLF1 <- 0 maxLogGamIR <- -Inf for (r in seq_len(k)) { dotProduct <- tcrossprod(Y[i, ] %*% Rho[, , r] - X[i, ] %*% phi[, , r]) logGamIR <- log(Pi[r]) + log(gdet(Rho[, , r])) - 0.5 * dotProduct # Z[i] = index of max (gam[i,]) if (logGamIR > maxLogGamIR) { Z[i] <- r maxLogGamIR <- logGamIR } sumLLF1 <- sumLLF1 + exp(logGamIR)/(2 * pi)^(m/2) } sumLogLLF2 <- sumLogLLF2 + log(sumLLF1) } LLF <- -1/n * sumLogLLF2 # update distance parameter to check algorithm convergence (delta(phi, Phi)) deltaPhi <- c(deltaPhi, max((abs(phi - Phi))/(1 + abs(phi)))) #TODO: explain? if (length(deltaPhi) > deltaPhiBufferSize) deltaPhi <- deltaPhi[2:length(deltaPhi)] sumDeltaPhi <- sum(abs(deltaPhi)) # update other local variables Phi <- phi ite <- ite + 1 } list(phi = phi, LLF = LLF) }
/scratch/gouwar.j/cran-all/cranData/valse/R/EMGrank.R
#' computeGridLambda #' #' Construct the data-driven grid for the regularization parameters used for the Lasso estimator #' #' @param phiInit value for phi #' @param rhoInit for rho #' @param piInit for pi #' @param gamInit value for gamma #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param gamma power of weights in the penalty #' @param mini minimum number of iterations in EM algorithm #' @param maxi maximum number of iterations in EM algorithm #' @param eps threshold to stop EM algorithm #' @param fast boolean to enable or not the C function call #' #' @return the grid of regularization parameters for the Lasso estimator. The output is a vector with nonnegative values that are relevant #' to be considered as regularization parameter as they are equivalent to a 0 in the regression parameter. #' #' @export computeGridLambda <- function(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini, maxi, eps, fast) { n <- nrow(X) p <- ncol(X) m <- ncol(Y) k <- length(piInit) list_EMG <- EMGLLF(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda = 0, X, Y, eps, fast) grid <- array(0, dim = c(p, m, k)) for (j in 1:p) { for (mm in 1:m) grid[j, mm, ] <- abs(list_EMG$S[j, mm, ])/(n * list_EMG$pi^gamma) } sort(unique(grid)) }
/scratch/gouwar.j/cran-all/cranData/valse/R/computeGridLambda.R
#' constructionModelesLassoMLE #' #' Construct a collection of models with the Lasso-MLE procedure. #' #' @param phiInit an initialization for phi, get by initSmallEM.R #' @param rhoInit an initialization for rho, get by initSmallEM.R #' @param piInit an initialization for pi, get by initSmallEM.R #' @param gamInit an initialization for gam, get by initSmallEM.R #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 #' @param gamma integer for the power in the penaly, by default = 1 #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 #' @param S output of selectVariables.R #' @param ncores Number of cores, by default = 3 #' @param fast TRUE to use compiled C code, FALSE for R code only #' @param verbose TRUE to show some execution traces #' #' @return a list with several models, defined by phi (the regression parameter reparametrized), #' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh #' (the value of the loglikelihood function for this estimator on the training dataset). The list is given #' for several levels of sparsity, given by several regularization parameters computed automatically. #' #' @export constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, X, Y, eps, S, ncores, fast, verbose) { if (ncores > 1) { cl <- parallel::makeCluster(ncores, outfile = "") parallel::clusterExport(cl, envir = environment(), varlist = c("phiInit", "rhoInit", "gamInit", "mini", "maxi", "gamma", "X", "Y", "eps", "S", "ncores", "fast", "verbose")) } # Individual model computation computeAtLambda <- function(lambda) { if (ncores > 1) require("valse") #nodes start with an empty environment if (verbose) print(paste("Computations for lambda=", lambda)) n <- nrow(X) p <- ncol(X) m <- ncol(Y) k <- length(piInit) sel.lambda <- S[[lambda]]$selected # col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix col.sel <- which(sapply(sel.lambda, length) > 0) #if list of selected vars if (length(col.sel) == 0) return(NULL) # lambda == 0 because we compute the EMV: no penalization here res <- EMGLLF(array(phiInit[col.sel, , ], dim=c(length(col.sel),m,k)), rhoInit, piInit, gamInit, mini, maxi, gamma, 0, as.matrix(X[, col.sel]), Y, eps, fast) # Eval dimension from the result + selected phiLambda2 <- res$phi rhoLambda <- res$rho piLambda <- res$pi phiLambda <- array(0, dim = c(p, m, k)) for (j in seq_along(col.sel)) phiLambda[col.sel[j], sel.lambda[[j]], ] <- phiLambda2[j, sel.lambda[[j]], ] dimension <- length(unlist(sel.lambda)) ## Affectations Gam <- matrix(0, ncol = length(piLambda), nrow = n) for (i in 1:n) { for (r in 1:length(piLambda)) { sqNorm2 <- sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) Gam[i, r] <- piLambda[r] * exp(-0.5 * sqNorm2) * det(rhoLambda[, , r]) } } Gam2 <- Gam/rowSums(Gam) affec <- apply(Gam2, 1, which.max) proba <- Gam2 LLH <- c(sum(log(apply(Gam,1,sum))), (dimension + m + 1) * k - 1) # ## Computation of the loglikelihood # # Precompute det(rhoLambda[,,r]) for r in 1...k # detRho <- sapply(1:k, function(r) gdet(rhoLambda[, , r])) # sumLogLLH <- 0 # for (i in 1:n) # { # # Update gam[,]; use log to avoid numerical problems # logGam <- sapply(1:k, function(r) { # log(piLambda[r]) + log(detRho[r]) - 0.5 * # sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) # }) # # #logGam <- logGam - max(logGam) #adjust without changing proportions -> change the LLH # gam <- exp(logGam) # norm_fact <- sum(gam) # sumLogLLH <- sumLogLLH + log(norm_fact) - m/2* log(2 * base::pi) # } #llhLambda <- c(-sumLogLLH/n, (dimension + m + 1) * k - 1) list(phi = phiLambda, rho = rhoLambda, pi = piLambda, llh = LLH, affec = affec, proba = proba) } # For each lambda, computation of the parameters out <- if (ncores > 1) { parallel::parLapply(cl, 1:length(S), computeAtLambda) } else { lapply(1:length(S), computeAtLambda) } if (ncores > 1) parallel::stopCluster(cl) out }
/scratch/gouwar.j/cran-all/cranData/valse/R/constructionModelesLassoMLE.R
#' constructionModelesLassoRank #' #' Construct a collection of models with the Lasso-Rank procedure. #' #' @param S output of selectVariables.R #' @param k number of components #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 #' @param rank.min integer, minimum rank in the low rank procedure, by default = 1 #' @param rank.max integer, maximum rank in the low rank procedure, by default = 5 #' @param ncores Number of cores, by default = 3 #' @param fast TRUE to use compiled C code, FALSE for R code only #' @param verbose TRUE to show some execution traces #' #' @return a list with several models, defined by phi (the regression parameter reparametrized), #' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh #' (the value of the loglikelihood function for this estimator on the training dataset). The list is given #' for several levels of sparsity, given by several regularization parameters computed automatically, #' and several ranks (between rank.min and rank.max). #' #' @export constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, ncores, fast, verbose) { n <- nrow(X) p <- ncol(X) m <- ncol(Y) L <- length(S) # Possible interesting ranks deltaRank <- rank.max - rank.min + 1 Size <- deltaRank^k RankLambda <- matrix(0, nrow = Size * L, ncol = k + 1) for (r in 1:k) { # On veut le tableau de toutes les combinaisons de rangs possibles, et des # lambdas Dans la premiere colonne : on repete (rank.max-rank.min)^(k-1) chaque # chiffre : ca remplit la colonne Dans la deuxieme : on repete # (rank.max-rank.min)^(k-2) chaque chiffre, et on fait ca (rank.max-rank.min)^2 # fois ... Dans la derniere, on repete chaque chiffre une fois, et on fait ca # (rank.min-rank.max)^(k-1) fois. RankLambda[, r] <- rep(rank.min + rep(0:(deltaRank - 1), deltaRank^(r - 1), each = deltaRank^(k - r)), each = L) } RankLambda[, k + 1] <- rep(1:L, times = Size) if (ncores > 1) { cl <- parallel::makeCluster(ncores, outfile = "") parallel::clusterExport(cl, envir = environment(), varlist = c("A1", "Size", "Pi", "Rho", "mini", "maxi", "X", "Y", "eps", "Rank", "m", "phi", "ncores", "verbose")) } computeAtLambda <- function(index) { lambdaIndex <- RankLambda[index, k + 1] rankIndex <- RankLambda[index, 1:k] if (ncores > 1) require("valse") #workers start with an empty environment # 'relevant' will be the set of relevant columns selected <- S[[lambdaIndex]]$selected relevant <- c() for (j in 1:p) { if (length(selected[[j]]) > 0) relevant <- c(relevant, j) } if (max(rankIndex) < length(relevant)) { phi <- array(0, dim = c(p, m, k)) if (length(relevant) > 0) { res <- EMGrank(S[[lambdaIndex]]$Pi, S[[lambdaIndex]]$Rho, mini, maxi, X[, relevant], Y, eps, rankIndex, fast) llh <- c(res$LLF, sum(rankIndex * (length(relevant) - rankIndex + m))) phi[relevant, , ] <- res$phi } list(llh = llh, phi = phi, pi = S[[lambdaIndex]]$Pi, rho = S[[lambdaIndex]]$Rho) } } # For each lambda in the grid we compute the estimators out <- if (ncores > 1) { parallel::parLapply(cl, seq_len(length(S) * Size), computeAtLambda) } else { lapply(seq_len(length(S) * Size), computeAtLambda) } if (ncores > 1) parallel::stopCluster(cl) out }
/scratch/gouwar.j/cran-all/cranData/valse/R/constructionModelesLassoRank.R
#' generateXY #' #' Generate a sample of (X,Y) of size n #' #' @param n sample size #' @param prop proportion for each cluster #' @param meanX matrix of group means for covariates (of size p) #' @param covX covariance for covariates (of size p*p) #' @param beta regression matrix, of size p*m*k #' @param covY covariance for the response vector (of size m*m) #' #' @return list with X (of size n*p) and Y (of size n*m) #' #' @export generateXY <- function(n, prop, meanX, beta, covX, covY) { p <- dim(covX)[1] m <- dim(covY)[1] k <- dim(beta)[3] X <- matrix(nrow = 0, ncol = p) Y <- matrix(nrow = 0, ncol = m) # random generation of the size of each population in X~Y (unordered) sizePop <- stats::rmultinom(1, n, prop) class <- c() #map i in 1:n --> index of class in 1:k for (i in 1:k) { class <- c(class, rep(i, sizePop[i])) newBlockX <- MASS::mvrnorm(sizePop[i], meanX, covX) X <- rbind(X, newBlockX) Y <- rbind(Y, t(apply(newBlockX, 1, function(row) MASS::mvrnorm(1, row %*% beta[, , i], covY[,])))) } shuffle <- sample(n) list(X = X[shuffle, ], Y = Y[shuffle, ], class = class[shuffle]) }
/scratch/gouwar.j/cran-all/cranData/valse/R/generateXY.R
#' initSmallEM #' #' initialization of the EM algorithm #' #' @param k number of components #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param fast boolean to enable or not the C function call #' #' @return a list with phiInit (the regression parameter reparametrized), #' rhoInit (the covariance parameter reparametrized), piInit (the proportion parameter is the #' mixture model), gamInit (the conditional expectation) #' #' @importFrom stats cutree dist hclust runif #' #' @export initSmallEM <- function(k, X, Y, fast) { n <- nrow(X) p <- ncol(X) m <- ncol(Y) nIte <- 20 Zinit1 <- array(0, dim = c(n, nIte)) betaInit1 <- array(0, dim = c(p, m, k, nIte)) sigmaInit1 <- array(0, dim = c(m, m, k, nIte)) phiInit1 <- array(0, dim = c(p, m, k, nIte)) rhoInit1 <- array(0, dim = c(m, m, k, nIte)) Gam <- matrix(0, n, k) piInit1 <- matrix(0, nIte, k) gamInit1 <- array(0, dim = c(n, k, nIte)) LLFinit1 <- list() # require(MASS) #Moore-Penrose generalized inverse of matrix for (repet in 1:nIte) { distance_clus <- dist(cbind(X, Y)) tree_hier <- hclust(distance_clus) Zinit1[, repet] <- cutree(tree_hier, k) for (r in 1:k) { Z <- Zinit1[, repet] Z_indice <- seq_len(n)[Z == r] #renvoit les indices ou Z==r if (length(Z_indice) == 1) { betaInit1[, , r, repet] <- MASS::ginv(crossprod(t(X[Z_indice, ]))) %*% crossprod(t(X[Z_indice, ]), Y[Z_indice, ]) } else { betaInit1[, , r, repet] <- MASS::ginv(crossprod(X[Z_indice, ])) %*% crossprod(X[Z_indice, ], Y[Z_indice, ]) } sigmaInit1[, , r, repet] <- diag(m) phiInit1[, , r, repet] <- betaInit1[, , r, repet] #/ sigmaInit1[,,r,repet] rhoInit1[, , r, repet] <- solve(sigmaInit1[, , r, repet]) piInit1[repet, r] <- mean(Z == r) } for (i in 1:n) { for (r in 1:k) { dotProduct <- tcrossprod(Y[i, ] %*% rhoInit1[, , r, repet] - X[i, ] %*% phiInit1[, , r, repet]) Gam[i, r] <- piInit1[repet, r] * det(rhoInit1[, , r, repet]) * exp(-0.5 * dotProduct) } sumGamI <- sum(Gam[i, ]) # TODO: next line is a division by zero if dotProduct is big gamInit1[i, , repet] <- Gam[i, ]/sumGamI } miniInit <- 10 maxiInit <- 11 init_EMG <- EMGLLF(phiInit1[, , , repet], rhoInit1[, , , repet], piInit1[repet, ], gamInit1[, , repet], miniInit, maxiInit, gamma = 1, lambda = 0, X, Y, eps = 1e-04, fast) LLFinit1[[repet]] <- init_EMG$llh } b <- which.min(LLFinit1) phiInit <- phiInit1[, , , b] rhoInit <- rhoInit1[, , , b] piInit <- piInit1[b, ] gamInit <- gamInit1[, , b] list(phiInit = phiInit, rhoInit = rhoInit, piInit = piInit, gamInit = gamInit) }
/scratch/gouwar.j/cran-all/cranData/valse/R/initSmallEM.R
#' runValse #' #' Main function #' #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param procedure among 'LassoMLE' or 'LassoRank' #' @param selecMod method to select a model among 'DDSE', 'DJump', 'BIC' or 'AIC' #' @param gamma integer for the power in the penaly, by default = 1 #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 #' @param kmin integer, minimum number of clusters, by default = 2 #' @param kmax integer, maximum number of clusters, by default = 10 #' @param rank.min integer, minimum rank in the low rank procedure, by default = 1 #' @param rank.max integer, maximum rank in the low rank procedure, by default = 5 #' @param ncores_outer Number of cores for the outer loop on k #' @param ncores_inner Number of cores for the inner loop on lambda #' @param thresh real, threshold to say a variable is relevant, by default = 1e-8 #' @param grid_lambda, a vector with regularization parameters if known, by default numeric(0) #' @param size_coll_mod (Maximum) size of a collection of models, by default 50 #' @param fast TRUE to use compiled C code, FALSE for R code only #' @param verbose TRUE to show some execution traces #' @param plot TRUE to plot the selected models after run #' #' @return #' The selected model (except if the collection of models #' has less than 11 models, the function returns the collection as it can not select one using Capushe) #' #' @examples #' n = 50; m = 10; p = 5 #' beta = array(0, dim=c(p,m,2)) #' beta[,,1] = 1 #' beta[,,2] = 2 #' data = generateXY(n, c(0.4,0.6), rep(0,p), beta, diag(0.5, p), diag(0.5, m)) #' X = data$X #' Y = data$Y #' res = runValse(X, Y, kmax = 5, plot=FALSE) #' X <- matrix(runif(100), nrow=50) #' Y <- matrix(runif(100), nrow=50) #' res = runValse(X, Y, plot=FALSE) #' #' @export runValse <- function(X, Y, procedure = "LassoMLE", selecMod = "DDSE", gamma = 1, mini = 10, maxi = 50, eps = 1e-04, kmin = 2, kmax = 3, rank.min = 1, rank.max = 5, ncores_outer = 1, ncores_inner = 1, thresh = 1e-08, grid_lambda = numeric(0), size_coll_mod = 50, fast = TRUE, verbose = FALSE, plot = TRUE) { n <- nrow(X) p <- ncol(X) m <- ncol(Y) if (verbose) print("main loop: over all k and all lambda") if (ncores_outer > 1) { cl <- parallel::makeCluster(ncores_outer, outfile = "") parallel::clusterExport(cl = cl, envir = environment(), varlist = c("X", "Y", "procedure", "selecMod", "gamma", "mini", "maxi", "eps", "kmin", "kmax", "rank.min", "rank.max", "ncores_outer", "ncores_inner", "thresh", "size_coll_mod", "verbose", "p", "m")) } # Compute models with k components computeModels <- function(k) { if (ncores_outer > 1) require("valse") #nodes start with an empty environment if (verbose) print(paste("Parameters initialization for k =", k)) # smallEM initializes parameters by k-means and regression model in each # component, doing this 20 times, and keeping the values maximizing the # likelihood after 10 iterations of the EM algorithm. P <- initSmallEM(k, X, Y, fast) if (length(grid_lambda) == 0) { grid_lambda <- computeGridLambda(P$phiInit, P$rhoInit, P$piInit, P$gamInit, X, Y, gamma, mini, maxi, eps, fast) } if (length(grid_lambda) > size_coll_mod) grid_lambda <- grid_lambda[seq(1, length(grid_lambda), length.out = size_coll_mod)] if (verbose) print("Compute relevant parameters") # select variables according to each regularization parameter from the grid: # S$selected corresponding to selected variables S <- selectVariables(P$phiInit, P$rhoInit, P$piInit, P$gamInit, mini, maxi, gamma, grid_lambda, X, Y, thresh, eps, ncores_inner, fast) if (procedure == "LassoMLE") { if (verbose) print("run the procedure Lasso-MLE") # compute parameter estimations, with the Maximum Likelihood Estimator, # restricted on selected variables. models <- constructionModelesLassoMLE(P$phiInit, P$rhoInit, P$piInit, P$gamInit, mini, maxi, gamma, X, Y, eps, S, ncores_inner, fast, verbose) } else { if (verbose) print("run the procedure Lasso-Rank") # compute parameter estimations, with the Low Rank Estimator, restricted on # selected variables. models <- constructionModelesLassoRank(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, ncores_inner, fast, verbose) } # warning! Some models are NULL after running selectVariables models <- models[sapply(models, function(cell) !is.null(cell))] models } # List (index k) of lists (index lambda) of models models_list <- if (ncores_outer > 1) { parallel::parLapply(cl, kmin:kmax, computeModels) } else { lapply(kmin:kmax, computeModels) } if (ncores_outer > 1) parallel::stopCluster(cl) if (!requireNamespace("capushe", quietly = TRUE)) { warning("'capushe' not available: returning all models") return(models_list) } # Get summary 'tableauRecap' from models tableauRecap <- do.call(rbind, lapply(seq_along(models_list), function(i) { models <- models_list[[i]] # For a collection of models (same k, several lambda): LLH <- sapply(models, function(model) model$llh[1]) k <- length(models[[1]]$pi) sumPen <- sapply(models, function(model) k * (dim(model$rho)[1] + sum(model$phi[,,1] != 0) + 1) - 1) data.frame(model = paste(i, ".", seq_along(models), sep = ""), pen = sumPen/n, complexity = sumPen, contrast = -LLH) })) tableauRecap <- tableauRecap[which(tableauRecap[, 4] != Inf), ] if (verbose) print(tableauRecap) if (nrow(tableauRecap) > 10) { modSel <- capushe::capushe(tableauRecap, n) indModSel <- if (selecMod == "DDSE") { as.numeric(modSel@DDSE@model) } else if (selecMod == "Djump") { as.numeric(modSel@Djump@model) } else if (selecMod == "BIC") { modSel@BIC_capushe$model } else if (selecMod == "AIC") { modSel@AIC_capushe$model } listMod <- as.integer(unlist(strsplit(as.character(indModSel), "[.]"))) modelSel <- models_list[[listMod[1]]][[listMod[2]]] modelSel$models <- tableauRecap if (plot) plot_valse(X, Y, modelSel) return(modelSel) } tableauRecap }
/scratch/gouwar.j/cran-all/cranData/valse/R/main.R
utils::globalVariables(c("Var1","Var2","X1","X2","value")) #, package="valse") #' Plot #' #' A function which plots relevant parameters. #' #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param model the model constructed by valse procedure #' @param comp TRUE to enable pairwise clusters comparison #' @param k1 index of the first cluster to be compared #' @param k2 index of the second cluster to be compared #' #' @importFrom ggplot2 ggplot aes ggtitle geom_tile geom_line scale_fill_gradient2 geom_boxplot theme #' @importFrom cowplot background_grid #' @importFrom reshape2 melt #' #' @return No return value (only plotting). #' #' @export plot_valse <- function(X, Y, model, comp = FALSE, k1 = NA, k2 = NA) { n <- nrow(X) K <- length(model$pi) ## regression matrices gReg <- list() for (r in 1:K) { Melt <- reshape2::melt(t((model$phi[, , r]))) gReg[[r]] <- ggplot2::ggplot(data = Melt, ggplot2::aes(x = Var1, y = Var2, fill = value)) + ggplot2::geom_tile() + ggplot2::scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, space = "Lab") + ggplot2::ggtitle(paste("Regression matrices in cluster", r)) } print(gReg) ## Differences between two clusters if (comp) { if (is.na(k1) || is.na(k2)) print("k1 and k2 must be integers, representing the clusters you want to compare") Melt <- reshape2::melt(t(model$phi[, , k1] - model$phi[, , k2])) gDiff <- ggplot2::ggplot(data = Melt, ggplot2::aes(x = Var1, y = Var2, fill = value)) + ggplot2::geom_tile() + ggplot2::scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, space = "Lab") + ggplot2::ggtitle(paste("Difference between regression matrices in cluster", k1, "and", k2)) print(gDiff) } ### Covariance matrices matCov <- matrix(NA, nrow = dim(model$rho[, , 1])[1], ncol = K) for (r in 1:K) matCov[, r] <- diag(model$rho[, , r]) MeltCov <- reshape2::melt(matCov) gCov <- ggplot2::ggplot(data = MeltCov, ggplot2::aes(x = Var1, y = Var2, fill = value)) + ggplot2::geom_tile() + ggplot2::scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0, space = "Lab") + ggplot2::ggtitle("Covariance matrices (diag., one row per cluster)") print(gCov) ### Proportions gam2 <- matrix(NA, ncol = 2, nrow = n) for (i in 1:n) gam2[i, ] <- c(model$proba[i, model$affec[i]], model$affec[i]) bp <- ggplot2::ggplot(data.frame(gam2), ggplot2::aes(x = X2, y = X1, color = X2, group = X2)) + ggplot2::geom_boxplot() + ggplot2::theme(legend.position = "none") + cowplot::background_grid(major = "xy", minor = "none") + ggplot2::ggtitle("Assignment boxplot per cluster") print(bp) }
/scratch/gouwar.j/cran-all/cranData/valse/R/plot_valse.R
#' selectVariables #' #' For a given lambda, construct the sets of relevant variables for each cluster. #' #' @param phiInit an initial estimator for phi (size: p*m*k) #' @param rhoInit an initial estimator for rho (size: m*m*k) #' @param piInit an initial estimator for pi (size : k) #' @param gamInit an initial estimator for gamma #' @param mini minimum number of iterations in EM algorithm #' @param maxi maximum number of iterations in EM algorithm #' @param gamma power in the penalty #' @param glambda grid of regularization parameters #' @param X matrix of regressors #' @param Y matrix of responses #' @param thresh real, threshold to say a variable is relevant, by default = 1e-8 #' @param eps threshold to say that EM algorithm has converged #' @param ncores Number or cores for parallel execution (1 to disable) #' @param fast boolean to enable or not the C function call #' #' @return a list, varying lambda in a grid, with selected (the indices of variables that are selected), #' Rho (the covariance parameter, reparametrized), Pi (the proportion parameter) #' #' @export selectVariables <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, glambda, X, Y, thresh = 1e-08, eps, ncores = 3, fast) { if (ncores > 1) { cl <- parallel::makeCluster(ncores, outfile = "") parallel::clusterExport(cl = cl, varlist = c("phiInit", "rhoInit", "gamInit", "mini", "maxi", "glambda", "X", "Y", "thresh", "eps"), envir = environment()) } # Computation for a fixed lambda computeCoefs <- function(lambda) { params <- EMGLLF(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, X, Y, eps, fast) p <- ncol(X) m <- ncol(Y) # selectedVariables: list where element j contains vector of selected variables # in [1,m] selectedVariables <- lapply(1:p, function(j) { # from boolean matrix mxk of selected variables obtain the corresponding boolean # m-vector, and finally return the corresponding indices if (m>1) { seq_len(m)[apply(abs(params$phi[j, , ]) > thresh, 1, any)] } else { if (any(params$phi[j, , ] > thresh)) 1 else numeric(0) } }) list(selected = selectedVariables, Rho = params$rho, Pi = params$pi) } # For each lambda in the grid, we compute the coefficients out <- if (ncores > 1) { parLapply(cl, glambda, computeCoefs) } else { lapply(glambda, computeCoefs) } if (ncores > 1) parallel::stopCluster(cl) # Suppress models which are computed twice # sha1_array <- lapply(out, digest::sha1) out[ duplicated(sha1_array) ] selec <- lapply(out, function(model) model$selected) ind_dup <- duplicated(selec) ind_uniq <- which(!ind_dup) out2 <- list() for (l in 1:length(ind_uniq)) out2[[l]] <- out[[ind_uniq[l]]] out2 }
/scratch/gouwar.j/cran-all/cranData/valse/R/selectVariables.R
# Compute the determinant of a matrix, which can be 1x1 (scalar) gdet <- function(M) { ifelse(is.matrix(M), det(M), M[1]) }
/scratch/gouwar.j/cran-all/cranData/valse/R/util.R
############################################################################# #' Function to throw error on invalid directory or file or if the file is #' not readable #' @param filename name of a file or directory #' @return 0 if success, non zero negative values if failure #' @examples #' test_file_exist_read(system.file("extdata", "blank.txt", #' package = "valueEQ5D")) #' @export test_file_exist_read <- function(filename) { ## Checking if the file exists if (file.exists(filename)) { ## Checking if the file is accessible to read if (file.access(filename, 0) != 0) { stop(" Error reading file ") } return(0) } else { stop(" Invalid directory or file ") } } ############################################################################### #' Function to check the given column exists #' @param column_name a column name #' @param data data frame #' @return 0 if success -1 if failure #' @examples #' check_column_exist("age", data.frame( #' age = rep(20, 4), sex = rep("male", 4), #' stringsAsFactors = FALSE #' )) #' @export check_column_exist <- function(column_name, data) { one <- toupper(colnames(data)) two <- toupper(column_name) if (any(one == two)) { return(0) } else { return(-1) } } ############################################################################### #' Function to return the column number for column name #' @param data a data frame #' @param column_name column names of the data frame #' @return column number, if success -1, if failure #' @examples #' get_column_no_colnames(data.frame(age = rep(20, 4), #' sex = rep("male", 4)), "sex") #' @export get_column_no_colnames <- function(data, column_name) { data_column_names <- toupper(colnames(data)) if (any(data_column_names == toupper(column_name))) { column_no <- which(data_column_names == toupper(column_name)) return(column_no) } else { stop("Column name does not exist") } } ################################################################################ #' Function to return frequency table #' @param v a vector #' @return frequency table #' @examples #' get_frequency_table(c(1, 1, 1, 12, 2)) #' @export get_frequency_table <- function(v) { if (!is.null(v)) { res <- cbind(Freq = table(v), Cumul = cumsum(table(v)), relative = prop.table(table(v))) scores <- rownames(res) res <- cbind(scores, res) return(res) } else { stop("Null vector") } } ################################################################################ #' Function to return mode #' @param v a vector #' @return mode if success -1 for failure #' @examples #' get_mode_for_vec(c(1, 1, 2, 3)) #' @export get_mode_for_vec <- function(v) { if (is.numeric(v)) { uniqv <- unique(v) uniqv[which.max(tabulate(match(v, uniqv)))] } else { stop("Non numeric data") } } ############################################################################### #' Function to check format of a numeric column when the values are not bounded #' @param vec a column vector #' @param nrcode non response code corresponding to the column #' @return 0, if success -1, if failure #' @examples #' test_data_num_norange(c(1, 2, 3, 4, -99), -99) #' @export test_data_num_norange <- function(vec, nrcode = NA) { entry <- vec if (is.na(nrcode)) { no_nrcode_entries <- entry[!is.na(entry)] } else { no_nrcode_entries <- entry[entry != nrcode & !is.na(entry)] } if (is.numeric(no_nrcode_entries)) { return(0) } else { stop("Some values-other than NR code is not numeric") } } ################################################################################ #' Function to return descriptive statistics, sum, no of observations, #' mean, mode. median, range, standard deviation and standard error #' @param colum column #' @param column_name the column name #' @param nrcode non response code corresponding to the column #' @return the descriptive statistics for success , -1 for failure #' @examples #' descriptive_stat_data_column(c(1, 2, 3, 4, NA), "scores", NA) #' @import stats #' @export descriptive_stat_data_column <- function(colum, column_name, nrcode = NA) { vec <- colum if (test_data_num_norange(vec, nrcode) == 0) { this_column <- colum if (is.na(nrcode)) { this_column <- this_column[!is.na(colum)] } else { this_column <- this_column[colum != nrcode & !is.na(colum)] } this_sum <- sum(this_column) this_av <- mean(this_column) this_med <- median(this_column) this_mode <- get_mode_for_vec(this_column) this_range_low <- min(this_column) this_range_high <- max(this_column) this_sd <- sd(this_column) this_se <- this_sd / sqrt(length(this_column)) results <- matrix(c(this_sum, this_av, this_sd, this_med, this_mode, this_se, this_range_low, this_range_high, length(this_column)), byrow = TRUE, nrow = 1) colnames(results) <- c("Sum", "Mean", "SD", "Median", "Mode", "SE", "Minimum", "Maximum", "Count") rownames(results) <- column_name return(results) } } ################################################################################ #' Function to convert a number to individual digits #' @param this_number a number #' @return digits #' @examples #' convert_number_to_digits(234) #' @export convert_number_to_digits <- function(this_number) { string_number <- toString(this_number) result <- suppressWarnings(as.numeric(strsplit(string_number, "")[[1]])) if (any(is.na(result))) { stop("The responses are not valid") } else { return(result) } } ################################################################################ #' Function to return the column number for a given column name #' (from list of possible column names that may #' have used) in a data frame #' @param column_names column names in a data frame #' @param data a data frame #' @return the column number #' @examples #' get_colno_existing_colnames(c("age"), data.frame(age = rep(20, 4), #' gender = rep("male", 4))) #' @export get_colno_existing_colnames <- function(column_names, data) { ans_columns <- unlist(lapply(column_names, check_column_exist, data)) if (sum(ans_columns == 0) > 0) { this_col <- which(ans_columns == 0) colnum <- get_column_no_colnames(data, column_names[this_col]) return(colnum) } else { stop("No column exists with specified column names") } } ################################################################################ #' Function to check the gender column and age column subset based on #' the values in it #' have used) in a data frame #' @param data a data frame #' @param gender groupby gender either male or female expected #' @param agelimit list of ages e.g. c(10,20) #' @return the column number #' @examples #' subset_gender_age_to_group(data.frame(age = rep(20, 4), gender = #' rep("male", 4)), "male", c(10, 70)) #' @export subset_gender_age_to_group <- function(data, gender, agelimit) { if (is.null(gender) || toupper(gender) == "NA" || is.na(gender)) { working_data <- data # if no groupby option given } else {# groupby option is given # groupby is male or female if (toupper(gender) == "MALE" || toupper(gender) == "FEMALE") { gendercolumn <- c("sex", "gender", "male", "female", "f", "m") colnum <- get_colno_existing_colnames(gendercolumn, data) data_gender <- unlist(data[colnum]) if (toupper(gender) == "MALE") {# groupby is male malech <- c("M", "m", "male", "MALE", "Male") charinccol <- malech[malech %in% data_gender] working_data <- data[is.element(data_gender, charinccol), ] } else {# groupby is female femalech <- c("F", "f", "female", "FEMALE", "Female") charinccol <- femalech[femalech %in% data_gender] working_data <- data[is.element(data_gender, charinccol), ] } } else { stop("Group by should be euther male or female") } } if (is.null(agelimit) || sum(toupper(agelimit) == "NA") != 0 || sum(is.na(agelimit)) != 0) { # no agelimit option given working_data <- working_data } else {# agelimit option given lowerlimit <- agelimit[1] upperlimit <- agelimit[2] age_columns <- c("age") colnum <- get_colno_existing_colnames(age_columns, working_data) working_data <- working_data[working_data[colnum] >= lowerlimit & working_data[colnum] <= upperlimit, ] } return(working_data) } ############################################################################### #' Function to add an underscore for texts with spaces in between #' @param this_string a string #' @return string where the spaces replaced by "_" #' @examples #' replace_space_underscore("Sri Lanka") #' @export replace_space_underscore <- function(this_string) { sep_string <- unlist(strsplit(this_string, " ")) if (length(sep_string) < 1) { stop("Error in separating the string") } else { new_string <- sep_string[1] if (length(sep_string) > 1) { for (i in 2:length(sep_string)) { new_string <- cbind(new_string, sep_string[i]) } new_string <- paste(new_string, collapse = "_") } else { new_string <- sep_string } return(new_string) } }
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/R/basic_checks.R
#' EQ-5D-3L tariffs using TTO for different countries #' @format A 38 by 28 dataframe #' @source \strong{Argentina}: Table 3 column 5 page 560 in Augustovski et al (2009) <doi:10.1111/j.1524-4733.2008.00468.x> #' @source \strong{Australia}: Table 4 column 6 page 933 in Viney et al (2011) <doi:10.1016/j.jval.2011.04.009> #' @source \strong{Brazil}: Table 2 column 8 page 21 in Santos et al (2016) <doi:10.1177/0272989X15613521> #' @source \strong{Canada}: Table 4 column 2 page 8 in Bansback et al (2012) <https://doi.org/10.1371/journal.pone.0031115> #' @source \strong{Chile}: Table 2 column 5 page 1137 in Zarate et al (2011) <doi:10.1016/j.jval.2011.09.002 #' @source \strong{China}: Table 4 column 4 page 603 in Liu et al (2014) <doi:10.1016/j.jval.2014.05.007> #' @source \strong{Denmark}: Table 4 column 2 page 463 in Wittrup-Jensen et al (2009) <doi:10.1177/1403494809105287> #' @source \strong{France}: Equation page 61 in Chevalier et al (2013) <doi:10.1007/s10198-011-0351-x> #' @source \strong{Germany}: Table 4 column 2 page 129 in Greiner et al (2005) <doi:10.1007/s10198-004-0264-z> #' @source \strong{Hungary}: Table 2 column 11 page 1238 in Rencz et al (2020) <doi:10.1016/j.jval.2020.03.019> #' @source \strong{Iran}: Table 3 column 8 page 174 in Goudarzi et al (2019) <doi:10.1016/j.vhri.2019.01.007> #' @source \strong{Italy}: Table 4 column 5 page 820 in Scalone et al (2013) <http://dx.doi.org/10.1016/j.jval.2013.04.008> #' @source \strong{Japan}: Table 4 column 1 page 41 in Tsuchiya et al (2002) <https://doi.org/10.1002/hec.673> #' @source \strong{South Korea}: Table 3 column 4 page 1191 in Lee et al <doi:10.1111/j.1524-4733.2009.00579.x> #' @source \strong{Malaysia}: Table 4 column 5 page 588 in Aryani et al <doi:10.1016/j.jval.2011.11.024> #' @source \strong{Netherlands}: Table 5 column 3 page 1128 in Lamers et al <doi:10.1002/hec.1124> #' @source \strong{Poland}: Table 5 column 2 page 293 in Golicki et al <https://doi.org/10.1111/j.1524-4733.2009.00596.x> #' @source \strong{Portugal}: Table 4 column 6 page 418 in Ferreira et al <doi:10.1007/s11136-013-0448-z> #' @source \strong{Singapore}: Equation page 504 in Nan Luoß et al <doi:10.1007/s40273-014-0142-1> #' @source \strong{Spain}: Table 3 column 4 page 13 in Badia et al (2001) <doi:10.1177/0272989X0102100102> #' @source \strong{Sri Lanka}: Table 2 column 8 page 1791 in Kularatna et al (2015) <doi:10.1007/s11136-014-0906-2> #' @source \strong{Sweden}: Table 2 column 8 page 436 in Burström et al (2014) <doi:10.1007/s11136-013-0496-4> #' @source \strong{Taiwan}: Table 2 column 4 page 702 in Lee et al (2013) <http://dx.doi.org/10.1016/j.jfma.2012.12.015> #' #' @source \strong{Thailand}: Table 1 column 2 page 1144 (parameters like MO3 are calculated) Tongsiri et al (2011) <doi:10.1016/j.jval.2011.06.005> #' @source \strong{Trinidad and Tobago}: Table 4 page 65 in Bailey et al (2016) <http://dx.doi.org/10.1016/j.vhri.2016.07.010> #' @source \strong{UK}: Table 1 column 2 page 1103 in Dolan et al (1997) <http://dx.doi.org/10.1097/00005650-199711000-00002> #' @source \strong{USA}: Table 5 column 2 page 214 in Shaw et al (2005) <doi:10.1097/00005650-200503000-00003> #' @source \strong{Zimbabwe}: Table 5 column 3 page 7 in Jelsma et al (2003) <https://doi.org/10.1186/1478-7954-1-11> "EQ5D3L_tariffs_TTO.df" #' EQ-5D-3L tariffs using VAS for different countries #' @format A 34 by 12 dataframe #' @source \strong{Argentina}: Table 3 column 2 page 560 in Augustovski et al (2009) <doi:10.1111/j.1524-4733.2008.00468.x> #' @source \strong{Belgium}: Equation 2 page 208 in Cleemput et al (2010) <doi:10.1007/s10198-009-0167-0> #' @source \strong{Denmark}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Europe}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Finland}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Germany}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Malaysia}: Table 4 column 6 page S88 in Yusof et al (2019) <doi:10.1016/j.jval.2011.11.024> #' @source \strong{New Zealand}: Equation 2 page 541 in Devlin et al <doi:10.1002/hec.741> #' @source \strong{Slovenia}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Spain}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> (this is not shown in euroqol website) #' Could not get Sweden VAS values #' @source \strong{UK}: Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> "EQ5D3L_tariffs_VAS.df" #' EQ-5D-5L tariffs for different countries #' @format A 34 by 22 data frame #' @source \strong{Canada}: Table 2 column 5 page 103 in Xie et al (2016) <doi:10.1097/MLR.0000000000000447> #' @source \strong{China}: Table 4 column 4 page 667 in Luo et al (2017) <doi:10.1016/j.jval.2016.11.016> #' @source \strong{England}: Table 2 column 2 page 17 in Devlin et al (2018) <doi:10.1002/hec.3564> #' @source \strong{Ethiopia}: Table 3 column 8 page 12 in Welie et al (2019) <doi:10.1016/j.vhri.2019.08.475> #' @source \strong{France}: Table 3 column 2-6 page 12 in Andrade et al (2019) <doi::10.1007/s40273-019-00876-4> #' @source \strong{Germany}: Table column 9 page 670 in Ludwig et al (2018) <doi:10.1007/s40273-018-0615-8> #' @source \strong{Hong Kong}: Table 3 column 8 page 244 in Wong et al (2018) <doi:10.1007/s40271-017-0278-0> #' @source \strong{Hungary}: Table 3 column 14 page 1241 in Rencz et al (2020) <doi:10.1016/j.jval.2020.03.019> #' @source \strong{Indonesia}: Table 3 column 8 page 1162 in Purba et al (2017) <doi:10.1007/s40273-017-0538-9> #' @source \strong{Ireland}: Table 2 column 2 page 1348 in Hobbins et al (2016) <doi:10.1007/s40273-018-0690-x> #' @source \strong{Japan}: Table 2 column 7 page 651 in Shiroiwa, et al (2016) <doi:10.1016/j.jval.2016.03.1834> #' @source \strong{Korea}: Table 5 column 6 page 1851 in Kim et al (2016) <doi:10.1007/s11136-015-1205-2> #' @source \strong{Malaysia}: Table 2 column 9 page 720 in Shafie et al (2019) <doi:10.1007/s40273-018-0758-7> #' @source \strong{Netherlands}: Table 4 column 8 page 350 in Versteegh et al (2016) <doi:10.1016/j.jval.2016.01.003> #' @source \strong{Poland}: Table 2 column 7 in Golicki et al <doi:10.1007/s40273-019-00811-7> #' @source \strong{Portugal}: Table 3 column 4 in Ferreira1 et al (2014) <doi:10.1007/s11136-019-02226-5> #' @source \strong{Spain}: Table 1 column 9 page 5 in Ramos-Goñiet et al (2018) <https://doi.org/10.1016/j.jval.2017.10.023> #' @source \strong{Taiwan}: Table 2 column 4 page 9 in Lin et al (2018) <https://doi.org/10.1371/journal.pone.0209344> #' @source \strong{Thailand}: Table 3 column 6 page 4 in Pattanaphesaj et al (2018) <doi:10.1080/14737167.2018> #' @source \strong{Uruguay}: Table 2.3 column 5 page 29 in Augustovski et al (2016) <doi:10.1007/s11136-015-1086-4> #' @source \strong{USA}: Table 2 column 2 page 939 in Pickard et al (2019) <doi:10.1016/j.jval.2019.02.009> #' @source \strong{Vietnam}: Table 3 column 5 in Mai et al (2020) <doi:10.1007/s11136-020-02469-7> "EQ5D5L_tariffs.df" #' Probability matrix for the cross walk #' @format A dataframe with 3124 rows and 243 columns #' @source https://euroqol.org/wp-content/uploads/2018/02/EQ-5D-5L_Crosswalk_model_and__methodology2.pdf #' @source Van Hout et al (2012) <doi: 10.1016/j.jval.2012.02.008>. "Probability_matrix_crosswalk.df" #' EQ-5D-3L index values (for each set of response of 3L) for different countries #' @note: For testing purpose -not required by users #' @note: VAS value for state 3333 was reported as -0.022, rather obtained -0.034 and needs to be checked with authors #' @note: There were some implausible orderings and hard coded those only for Australian value sets #' @format A 243 by 38 dataframe #' @source \strong{Argentina}: TTO - Appendix A in Augustovski et al (2009) <doi:10.1111/j.1524-4733.2008.00468.x> #' @source \strong{Argentina}: VAS - Appendix A in Augustovski et al (2009) <doi:10.1111/j.1524-4733.2008.00468.x> #' @source \strong{Australia}: Supplementary in Viney et al (2011) <doi:10.1016/j.jval.2011.04.009> #' @source \strong{Belgium}: VAS - Selected example page 209 in Cleemput et al (2010) <doi:10.1007/s10198-009-0167-0> #' @source \strong{Brazil}: Appendix 1 in Santos et al (2016) <doi:10.1177/0272989X15613521> #' @source \strong{Canada}: Supplementary material Table S2 in Bansback et al (2012) <https://doi.org/10.1371/journal.pone.0031115> #' @source \strong{Chile}: Table 4 page 1139 in Zarate et al (2011) <doi:10.1016/j.jval.2011.09.002 #' @source \strong{China}: Supplementary materials Appendix 2 in Liu et al (2014) <doi:10.1016/j.jval.2014.05.007> #' @source \strong{Denmark}: TTO - Appendix in Wittrup-Jensen et al (2009) <doi:10.1177/1403494809105287> #' @source \strong{Denmark}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Europe}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Finland}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{France}: Selected example page 61 in Chevalier et al (2013) <doi:10.1007/s10198-011-0351-x> #' @source \strong{Germany}: TTO - Selected examples Table 6 page 130 in Greiner et al (2005) <doi:10.1007/s10198-004-0264-z> #' @source \strong{Germany}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Iran}: Selected example page 173 in Goudarzi et al (2019) <doi:10.1016/j.vhri.2019.01.007> #' @source \strong{Italy}: Supplementary materials Appendix 2 in Scalone et al (2013) <http://dx.doi.org/10.1016/j.jval.2013.04.008> #' @source \strong{Japan}: Tsuchiya et al (2002) <https://doi.org/10.1002/hec.673> #' @source \strong{Korea}: Selected example page 1191 in Lee et al <doi:10.1111/j.1524-4733.2009.00579.x> #' @source \strong{Malaysia}: VAS - Supplementary material Appendix 3 in Yusof et al (2019) <doi:10.1016/j.jval.2011.11.024> #' @source \strong{Netherlands}: Lamers et al <doi:10.1002/hec.1124> #' @source \strong{New Zealand}: VAS - Selected examples Table 7 column 5 page 542 in Devlin et al <doi:10.1002/hec.741> #' @source \strong{Poland}: Table 6 page 294 in Golicki et al <https://doi.org/10.1111/j.1524-4733.2009.00596.x> #' @source \strong{Portugal}: Supplementary Material 1 in Ferreira et al <doi:10.1007/s11136-013-0448-z> #' @source \strong{Slovenia}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Singapore}: Selected examples in Nan Luo et al <doi:10.1007/s40273-014-0142-1> #' @source \strong{Spain}: TTO- Badia et al (2001) <doi:10.1177/0272989X0102100102> #' @source \strong{Spain}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{Sri Lanka}: Selected example page 1789 in Kularatna et al (2015) <doi:10.1007/s11136-014-0906-2> #' @source \strong{Sweden}: Supplementary Table 3 in Burström et al (2014) <doi:10.1007/s11136-013-0496-4> #' @source \strong{Taiwan}: Table 3 page 703 in Lee et al (2013) <http://dx.doi.org/10.1016/j.jfma.2012.12.015> #' #' @source \strong{Thailand}: Tongsiri et al (2011) <doi:10.1016/j.jval.2011.06.005> #' @source \strong{Trinidad and Tobago}: Table 5 page 66 in Bailey et al (2016) <http://dx.doi.org/10.1016/j.vhri.2016.07.010> #' @source \strong{UK} : TTO - Selected examples Table 3 page 1105 in Dolan et al (1997) <http://dx.doi.org/10.1097/00005650-199711000-00002> #' @source \strong{UK}: VAS - Table 2.3 page 14 in Szende et al (2014) <doi:10.1007/978-94-007-7596-1> #' @source \strong{USA}: Appendix 1 page 218 in Shaw et al (2005) <doi:10.1097/00005650-200503000-00003> #' @source \strong{Zimbabwe}: Jelsma et al (2003) <https://doi.org/10.1186/1478-7954-1-11> "EQ5D3L_indexvalues.df" #' EQ-5D-5L index values #' @note: For testing purpose -not required by users #' @format A 3125 by 22 dataframe #' @source \strong{Canada}: Selected example Table A3 in Xie et al (2016) <doi:10.1097/MLR.0000000000000447> #' @source \strong{China}: Supplementary Material 1 in Luo et al (2017) <doi:10.1016/j.jval.2016.11.016> #' @source \strong{England}: Selected example Table 3 page 18 and supporting information in Devlin et al (2018) <doi:10.1002/hec.3564> #' @source \strong{Ethopia}: Table 3 column 8 page 12 and supporting information in Welie et al (2019) <doi:10.1016/j.vhri.2019.08.475> #' @source \strong{France}: Table 3 column 2-6 page 12 and supporting information in Andrade et al (2020) <doi::10.1007/s40273-019-00876-4> #' @source \strong{Germany}: Supplementary Material 1 in Ludwig et al (2018) <doi:10.1007/s40273-018-0615-8> #' @source \strong{Hong Kong}: Selected examples Table 3 page 244 in Wong et al (2018) <doi:10.1007/s40271-017-0278-0> #' @source \strong{Indonesia}: Selected examples page 1162 in Purba et al (2017) <doi:10.1007/s40273-017-0538-9> #' @source \strong{Ireland}: Selected example Table 2 page 1348 in Hobbins et al (2016) <doi:10.1007/s40273-018-0690-x> #' @source \strong{Japan}: Shiroiwa, et al (2016) <doi:10.1016/j.jval.2016.03.1834> #' @source \strong{Korea}: Selected example page 1848 in Kim et al (2016) <doi:10.1007/s11136-015-1205-2> #' @source \strong{Malaysia}: Shafie et al (2019) <doi:10.1007/s40273-018-0758-7> #' @source \strong{Netherlands}: Versteegh et al (2016) <doi:10.1016/j.jval.2016.01.003> #' @source \strong{Poland}: Supplementary Material 3 in Golicki et al <doi:10.1007/s40273-019-00811-7> #' @source \strong{Portugal}: Ferreira1 et al (2014) <doi:10.1007/s11136-019-02226-5> #' @source \strong{Spain}: Selected examples in Table 1 in Ramos-Goñiet et al (2018) <https://doi.org/10.1016/j.jval.2017.10.023> #' @source \strong{Taiwan}: Table 3 page 10 in Lin et al (2018) <https://doi.org/10.1371/journal.pone.0209344> #' @source \strong{Thailand}: Pattanaphesaj et al (2018) <doi:10.1080/14737167.2018> #' @source \strong{Uruguay}: Augustovski et al (2016) <doi:10.1007/s11136-015-1086-4> #' @source \strong{USA}: Pickard et al (2019) <doi:10.1016/j.jval.2019.02.009> #' @source \strong{Vietnam}: Mai et al (2020) <doi:10.1007/s11136-020-02469-7> "EQ5D5L_indexvalues.df" #' EQ-5D-5L crosswalk value sets for 10 countries #' @note: For testing purpose -not required by users #' @source https://euroqol.org/eq-5d-instruments/eq-5d-5l-about/valuation-standard-value-sets/crosswalk-index-value-calculator/ (accessed on Aug 03,2019) #' @source Van Hout et al (2012) <doi: 10.1016/j.jval.2012.02.008>. #' @format A 3125 by 11 dataframe "EQ5D5L_crosswalk_indexvalues.df"
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/R/data.R
############################################################################### #' Function to check the EQ-5D-3L scores #' @param dimen a must input,response for EQ-5D-3L mobility or the 5 digit #' response, or the vector of responses, e.g. 11111, c(1, 1, 1, 1, 1) or 1 #' @param dimen2 response for EQ-5D-3L self care, or NA if the responses #' are given as dimensions #' @param dimen3 response for EQ-5D-3L usual activities,or NA if the #' responses are given as dimensions #' @param dimen4 response for EQ-5D-3L pain/discomfort, or NA if the #' responses are given as dimensions #' @param dimen5 response for EQ-5D-3L anxiety/depression, or NA if #' the responses are given as dimensions #' @examples #' check_scores_3L(c(1, 2, 3, 3, 3)) #' check_scores_3L(1, 2, 3, 3, 3) #' check_scores_3L(1, 2, 3, 2, 3) #' check_scores_3L(12323) #' @export check_scores_3L <- function(dimen, dimen2 = NA, dimen3 = NA, dimen4 = NA, dimen5 = NA) { responses <- c(dimen, dimen2, dimen3, dimen4, dimen5) # first value should be not be a NA, do not contain NA if (sum(is.na(dimen)) > 0) { this_score <- NA return(NA) } else { if (length(dimen) != 5 && length(dimen) != 1) { stop("Invalid EQ-5D-3L responses-check the responses to each question") } else { if (length(dimen) == 5) { # first value a vector this_score <- paste(dimen, collapse = "") responses <- dimen } else { if (length(dimen) == 1) { this_score <- paste(responses[!is.na(responses)], collapse = "") # first value 5 digit number or actual response for mobility responses <- convert_number_to_digits(this_score) } } } } if (!all(responses %in% 1:3)) { stop("Responses not valid for EQ-5D-3L scores") } else { this_score <- as.numeric(this_score) if (this_score < 11111) { return(NA) }else{ return(responses) } } } ################################################################################ #' Function to check the EQ-5D-5L scores #' @param dimen a must input,response for EQ-5D-3L mobility or the 5 digit #' response, or the vector of responses, e.g. 11111, c(1,1,1,1,1) or 1 #' @param dimen2 response for EQ-5D-5L self care, or NA if the responses are #' given as dimensions #' @param dimen3 response for EQ-5D-5L usual activities,or NA if the responses #' are given as dimensions #' @param dimen4 response for EQ-5D-5L pain/discomfort, or NA if the responses #' are given as dimensions #' @param dimen5 response for EQ-5D-5L anxiety/depression, or NA if the #' responses are given as dimensions #' @examples #' check_scores_5L(c(1, 2, 3, 5, 3)) #' check_scores_5L(1, 2, 3, 4, 3) #' check_scores_5L(12323) #' @export check_scores_5L <- function(dimen, dimen2 = NA, dimen3 = NA, dimen4 = NA, dimen5 = NA) { responses <- c(dimen, dimen2, dimen3, dimen4, dimen5) if (sum(is.na(dimen)) > 0) { # first value should be not be a NA, do not contain NA this_score <- NA return(NA) } else { if (length(dimen) != 5 && length(dimen) != 1) { stop("Invalid EQ-5D-5L responses-check the responses to each question") } else { if (length(dimen) == 5) { # first value a vector this_score <- paste(dimen, collapse = "") responses <- dimen } else { if (length(dimen) == 1) { this_score <- paste(responses[!is.na(responses)], collapse = "") # first value 5 digit number or actual response for mobility responses <- convert_number_to_digits(this_score) } } } } if (!all(responses %in% 1:5)) { stop("Responses not valid for EQ-5D-5L scores") } else { this_score <- as.numeric(this_score) if (this_score < 11111) { return(NA) }else{ return(responses) } } } ################################################################################# #' Function to value EQ-5D-5L scores for various countries #' @param country a country name from the list Canada,China,England, #' Germany,HongKong,Indonesia,Ireland,Japan,Korea,Malaysia,Netherlands, #' Poland,Spain,Taiwan,Thailand,and Uruguay #' @param dimen a must input,response for EQ-5D-5L mobility or the 5 digit #' response, or the vector of responses, e.g. 11111, c(1,1,1,1,1) or 1 #' @param dimen2 response for EQ-5D-5L self care, or NA if the responses are #' given as dimen #' @param dimen3 response for EQ-5D-5L usual activities,or NA if the responses #' are given as dimen #' @param dimen4 response for EQ-5D-5L pain/discomfort, or NA if the responses #' are given as dimen #' @param dimen5 response for EQ-5D-5L anxiety/depression, or NA if the #' responses are given as dimen #' @return index values if success, negative values if failure #' @examples #' value_5L_Ind("England", 23434) #' value_5L_Ind("China", 2, 3, 4, 3, 4) #' value_5L_Ind("Poland", c(1, 2, 3, 4, 3)) #' @export value_5L_Ind <- function(country, dimen, dimen2 = NA, dimen3 = NA, dimen4 = NA, dimen5 = NA) { country_list <- c( "Canada", "China", "England", "Ethiopia", "France", "Germany", "Hong_Kong", "Hungary", "Indonesia", "Ireland", "Japan", "Korea", "Malaysia", "Netherlands", "Poland", "Portugal", "Spain", "Taiwan", "Thailand", "Uruguay", "USA", "Vietnam" ) country <- replace_space_underscore(country) if (country %in% country_list) { scores <- check_scores_5L(dimen, dimen2, dimen3, dimen4, dimen5) if (sum(is.na(scores)) > 0) return(NA) if (sum(scores) > 0) { eq5d_valueset <- EQ5D5L_tariffs.df names(scores) <- c("MO", "SC", "UA", "PD", "AD") rows <- paste0(names(scores), scores) rownum1 <- which(row.names(eq5d_valueset) == rows[1]) rownum2 <- which(row.names(eq5d_valueset) == rows[2]) rownum3 <- which(row.names(eq5d_valueset) == rows[3]) rownum4 <- which(row.names(eq5d_valueset) == rows[4]) rownum5 <- which(row.names(eq5d_valueset) == rows[5]) rownumfh <- which(row.names(eq5d_valueset) == "fullHealth") rownuminter <- which(row.names(eq5d_valueset) == "intercept") rownumn4 <- which(row.names(eq5d_valueset) == "N4") rownumn45 <- which(row.names(eq5d_valueset) == "Num45sq") inter_value <- NA if (any(scores > 1) && !is.na(eq5d_valueset[rownuminter, country])) { inter_value <- eq5d_valueset[rownuminter, country] } n4value <- NA if (any(scores >= 4) && !is.na(eq5d_valueset[rownumn4, country])) { n4value <- eq5d_valueset[rownumn4, country] } n45 <- which(scores %in% c(4, 5)) n45value <- NA if (length(n45) >= 1 & !is.na(eq5d_valueset[rownumn45, country])) { n45value <- (length(n45) - 1)^2 * eq5d_valueset[rownumn45, country] } n45sall <- 0 if (length(n45) >= 1) { for (i in seq_len(length(n45))) { names45row <- paste0(names(scores)[n45[i]], "45") rownumn45r <- which(row.names(eq5d_valueset) == names45row) if (!is.na(eq5d_valueset[rownumn45r, country])) { n45rvalue <- eq5d_valueset[rownumn45r, country] n45sall <- n45sall + n45rvalue } else { n45rvalue <- 0 n45sall <- n45sall + n45rvalue } } } dim_response <- c( eq5d_valueset[rownum1, country], eq5d_valueset[rownum2, country], eq5d_valueset[rownum3, country], eq5d_valueset[rownum4, country], eq5d_valueset[rownum5, country] ) sum_response <- sum(dim_response, na.rm = TRUE) values <- c( eq5d_valueset[rownumfh, country], inter_value, sum_response, n4value, n45value, n45sall ) values_state <- sum(values, na.rm = TRUE) return(values_state) } } else { stop("No tariffs found for the country you specified for EQ-5D-5L. Please try later") } } ################################################################################ #' Function to value EQ-5D-5L scores for any country and group by gender and age #' @param eq5dresponse_data the data containing eq5d responses #' @param mo column name for EQ-5D-5L mobility #' @param sc column name for response for EQ-5D-5L self care #' @param ua column name for response for EQ-5D-5L usual activities #' @param pd column name for response for EQ-5D-5L pain/discomfort #' @param ad column name for response for EQ-5D-5L anxiety/depression #' @param country country of interest, by default is England #' @param groupby male or female -grouping by gender, default NULL #' @param agelimit vector of ages to show upper and lower limits, default NULL #' @return index value if success, negative values for failure #' @examples #' data <- data.frame( #' age = c(10, 20), sex = c("M", "F"), #' mo = c(1, 2), sc = c(1, 2), ua = c(3, 4), pd = c(3, 4), ad = c(3, 4) #' ) #' value_5L(data, "mo", "sc", "ua", "pd", "ad", "England", NULL, c(10, 70)) #' @export #' @description Function to value EQ-5D-5L descriptive system to index value. value_5L <- function(eq5dresponse_data, mo, sc, ua, pd, ad, country = "England", groupby = NULL, agelimit = NULL) { country <- replace_space_underscore(country) eq5d_colnames <- c(mo, sc, ua, pd, ad) ans_eq5d_colnames <- sapply(eq5d_colnames, check_column_exist, eq5dresponse_data) if (all(ans_eq5d_colnames == 0)) { # if the eq5d column names match working_data <- subset_gender_age_to_group(eq5dresponse_data, groupby, agelimit) scores <- c() if (nrow(working_data) < 1) { stop("no entries with the given criteria - Please check the contents or the criteria") } else { for (j in 1:nrow(working_data)) { res1 <- working_data[j, mo] res2 <- working_data[j, sc] res3 <- working_data[j, ua] res4 <- working_data[j, pd] res5 <- working_data[j, ad] this_score <- value_5L_Ind(country, c(res1, res2, res3, res4, res5)) scores <- c(scores, this_score) } new_data <- cbind(working_data, scores) colnames(new_data) <- c(colnames(working_data), "EQ-5D-5L scores") scores_noNA <- scores[!is.na(scores)] if (length(scores_noNA) >= 1) { stats <- descriptive_stat_data_column(scores_noNA, "EQ-5D-5L") freq_table <- get_frequency_table(scores_noNA) first <- is.null(groupby) || toupper(groupby) == "NA" || is.na(groupby) second <- is.null(agelimit) || sum(toupper(agelimit) == "NA") != 0 || sum(is.na(agelimit)) != 0 if (first & second) { title <- paste("Histogram of EQ-5D-5L index values", sep = "") } else { if (first & !second) { title <- paste("Histogram of EQ-5D-5L index values", " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } else { if (!first & second) { title <- paste("Histogram of EQ-5D-5L index values for ", groupby, sep = "" ) } else { title <- paste("Histogram of EQ-5D-5L index values for ", groupby, " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } } } oldpar <- graphics::par(no.readonly = TRUE) graphics::par(mar = c(4, 4, 2, 2)) hist_plot <- graphics::hist(scores_noNA, main = title) results <- list("stats" = stats, "freq_table" = freq_table, "histogram" = hist_plot, "modified_data" = new_data) return(results) on.exit(graphics::par(oldpar)) } else { print("No relevant rows with non NA scores") } } } else { stop("EQ-5D column names do not match") } } ################################################################################ #' Function to value EQ-5D-3L scores for various countries #' @param country a country name from the list Belgium,Brazil,Canada,Chile, #' Denmark,Europe,Finland,France,Germany,Italy,Japan,Korea,Netherlands, #' NewZealand,Poland,Portugal,Slovenia,Spain,Taiwan,Thailand,UK,USA,and Zimbabwe #' @param method method name either TTO or VAS #' @param dimen a must input,response for EQ-5D-5L mobility or the 5 digit #' response, or the vector of responses, e.g. 11111, c(1,1,1,1,1) or 1 #' @param dimen2 response for EQ-5D-3L self care, or NA if the responses are #' given as dimen #' @param dimen3 response for EQ-5D-3L usual activities,or NA if the responses #' are given as dimen #' @param dimen4 response for EQ-5D-3L pain/discomfort, or NA if the responses #' are given as dimen #' @param dimen5 response for EQ-5D-3L anxiety/depression, or NA if the #' responses are given as dimen #' @return index value based if success, negative values for failure #' @examples #' value_3L_Ind("UK", "TTO", 23131) #' value_3L_Ind("Spain", "TTO", 2, 3, 1, 3, 1) #' value_3L_Ind("Denmark", "VAS", c(1, 2, 3, 1, 3)) #' @export value_3L_Ind <- function(country, method, dimen, dimen2 = NA, dimen3 = NA, dimen4 = NA, dimen5 = NA) { countrylist <- c( "Argentina", "Australia", "Belgium", "Brazil", "Canada", "Chile", "China", "Denmark", "Europe", "Finland", "France", "Germany", "Hungary","Iran", "Italy", "Japan", "Korea", "Malaysia", "Netherlands", "New_Zealand", "Poland", "Portugal", "Singapore", "Slovenia", "Spain", "Sri_Lanka", "Sweden", "Taiwan", "Thailand", "Trinidad_and_Tobago", "UK", "USA", "Zimbabwe" ) VAS_countrylist <- c( "Argentina", "Belgium", "Denmark", "Europe", "Finland", "Germany", "Malaysia", "New_Zealand", "Slovenia", "Spain", "UK" ) TTO_countrylist <- c( "Argentina", "Australia", "Brazil", "Canada", "Chile", "China", "Denmark", "France", "Germany", "Hungary", "Iran", "Italy", "Japan", "Korea", "Netherlands", "Poland", "Portugal", "Singapore", "Spain", "Sri_Lanka", "Sweden", "Taiwan", "Thailand", "Trinidad_and_Tobago", "UK", "USA", "Zimbabwe" ) australia.impalusibleordering.scores <- c(33132, 12133, 13133, 22133, 23133, 32133, 33133, 12233, 13233, 22233, 23233, 32233, 33233, 33232, 33323, 13332, 13333, 23332, 23333, 32333, 33332, 33333) country <- replace_space_underscore(country) if (country %in% countrylist) { scores <- check_scores_3L(dimen, dimen2, dimen3, dimen4, dimen5) if (sum(is.na(scores)) > 0) return(NA) if (sum(scores) > 0) { if (method == "TTO" && country %in% TTO_countrylist) { eq5d_valueset <- EQ5D3L_tariffs_TTO.df } else { if (method == "VAS" && country %in% VAS_countrylist) { eq5d_valueset <- EQ5D3L_tariffs_VAS.df } else { stop("No tariff found") } } score_num <- as.numeric(paste(scores, collapse = "")) if (country == "Australia" & sum(score_num %in% australia.impalusibleordering.scores) > 0) { values_state <- .correctImplausibleOrdering(scores) } else { names(scores) <- c("MO", "SC", "UA", "PD", "AD") rows <- paste0(names(scores), scores) col <- check_column_exist(country, eq5d_valueset) if (col == 0) { min2or3 <- which(scores %in% c(2, 3)) if (length(min2or3) == 5) { all_equals2or3 <- 1 } else { all_equals2or3 <- c() } which3 <- which(scores %in% c(3)) which2 <- which(scores %in% c(2)) rownums <- c() dim_response <- NA min3_value <- NA all_equals2or3_value <- NA min2or3_value <- NA c3sq_value <- NA d1_value <- NA i2_value <- NA i2_sq_value <- NA i3_value <- NA i3_sq_value <- NA only1sand2s_value <- NA only1sand3s_value <- NA atleast2andatleast3_value <- NA nos2withatleast3_value <- NA nos2Sq_value <- NA nos3Sq_value <- NA mo3sc3_value <- NA mo3ua3_value <- NA mo3pd3_value <- NA mo3ad3_value <- NA sc3ua3_value <- NA sc3pd3_value <- NA sc3ad3_value <- NA ua3pd3_value <- NA ua3ad3_value <- NA pd3ad3_value <- NA mo2ua2_value <- NA sc3ua2_value <- NA rownumfh <- which(row.names(eq5d_valueset) == "FullHealth") rownum_min2or3 <- which(row.names(eq5d_valueset) == "Constant") rownumn_min3 <- which(row.names(eq5d_valueset) == "N3") rownum_only1sand2s <- which(row.names(eq5d_valueset) == "Only1sand2s") rownum_only1sand3s <- which(row.names(eq5d_valueset) == "Only1sand3s") rownum_atleast2andatleast3 <- which(row.names(eq5d_valueset) == "Atleast2andatleast3") rownum_nos2withatleast3 <- which(row.names(eq5d_valueset) == "Nos2withatleast3") rownum_nos2Sq <- which(row.names(eq5d_valueset) == "Nos2Sq") rownum_nos3Sq <- which(row.names(eq5d_valueset) == "Nos3Sq") if (method == "TTO") { rownum_all_equals2or3 <- which(row.names(eq5d_valueset) == "X5") rownum_C3sq <- which(row.names(eq5d_valueset) == "C3sq") rownumn_D1 <- which(row.names(eq5d_valueset) == "D1") rownumn_I2 <- which(row.names(eq5d_valueset) == "I2") rownumn_I2_sq <- which(row.names(eq5d_valueset) == "I2_sq") rownumn_I3 <- which(row.names(eq5d_valueset) == "I3") rownumn_I3_sq <- which(row.names(eq5d_valueset) == "I3_sq") rownum_MO3SC3 <- which(row.names(eq5d_valueset) == "MO3SC3") rownum_MO3UA3 <- which(row.names(eq5d_valueset) == "MO3UA3") rownum_MO3PD3 <- which(row.names(eq5d_valueset) == "MO3PD3") rownum_MO3AD3 <- which(row.names(eq5d_valueset) == "MO3AD3") rownum_SC3UA3 <- which(row.names(eq5d_valueset) == "SC3UA3") rownum_SC3PD3 <- which(row.names(eq5d_valueset) == "SC3PD3") rownum_SC3AD3 <- which(row.names(eq5d_valueset) == "SC3AD3") rownum_UA3PD3 <- which(row.names(eq5d_valueset) == "UA3PD3") rownum_UA3AD3 <- which(row.names(eq5d_valueset) == "UA3AD3") rownum_PD3AD3 <- which(row.names(eq5d_valueset) == "PD3AD3") rownum_MO2UA2 <- which(row.names(eq5d_valueset) == "MO2UA2") rownum_SC3UA2 <- which(row.names(eq5d_valueset) == "SC3UA2") } else { rownum_all_equals2or3 <- NA rownum_C3sq <- NA rownumn_D1 <- NA rownumn_I2 <- NA rownumn_I2_sq <- NA rownumn_I3 <- NA rownumn_I3_sq <- NA rownum_MO3SC3 <- NA rownum_MO3UA3 <- NA rownum_MO3PD3 <- NA rownum_MO3AD3 <- NA rownum_SC3UA3 <- NA rownum_SC3PD3 <- NA rownum_SC3AD3 <- NA rownum_UA3PD3 <- NA rownum_UA3AD3 <- NA rownum_PD3AD3 <- NA rownum_MO2UA2 <- NA rownum_SC3UA2 <- NA } if (length(min2or3) > 0) { for (i in seq_len(length(min2or3))) { rownams <- row.names(eq5d_valueset) ro <- which(rownams == rows[min2or3[i]]) rownums <- cbind(rownums, ro) } dim_response <- eq5d_valueset[rownums, country] } if (any(scores >= 3) && !is.na(eq5d_valueset[rownumn_min3, country])) { min3_value <- eq5d_valueset[rownumn_min3, country] } if (length(which3) >= 1 & sum(is.na(rownum_C3sq) == 0)) { if (!is.na(eq5d_valueset[rownum_C3sq, country])) { c3sq_value <- (length(which3))^2 * eq5d_valueset[rownum_C3sq, country] } } if (length(all_equals2or3) >= 1 & sum(is.na(rownum_all_equals2or3) == 0)) { if (!is.na(eq5d_valueset[rownum_all_equals2or3, country])) { all_equals2or3_value <- eq5d_valueset[rownum_all_equals2or3, country] } } if (sum(scores) > 5 & length(min2or3) >= 1 & sum(is.na(rownum_min2or3) == 0)) { if (!is.na(eq5d_valueset[rownum_min2or3, country])) { min2or3_value <- eq5d_valueset[rownum_min2or3, country] } } if (sum(scores) > 5 & length(min2or3) >= 1 & sum(is.na(rownumn_D1) == 0)) { if (!is.na(eq5d_valueset[rownumn_D1, country])) { d1_value <- (length(min2or3) - 1) * eq5d_valueset[rownumn_D1, country] } } if (sum(scores) > 5 & length(which2) >= 1 & sum(is.na(rownumn_I2) == 0)) { if (!is.na(eq5d_valueset[rownumn_I2, country])) { i2_value <- (length(which2) - 1) * eq5d_valueset[rownumn_I2, country] } } if (sum(scores) > 5 & length(which2) >= 1 & sum(is.na(rownumn_I2_sq) == 0)) { if (!is.na(eq5d_valueset[rownumn_I2_sq, country])) { i2_sq_value <- (length(which2) - 1)^2 * eq5d_valueset[rownumn_I2_sq, country] } } if (sum(scores) > 5 & length(which3) >= 1 & sum(is.na(rownumn_I3) == 0)) { if (!is.na(eq5d_valueset[rownumn_I3, country])) { i3_value <- (length(which3) - 1) * eq5d_valueset[rownumn_I3, country] } } if (sum(scores) > 5 & length(which3) >= 1 & sum(is.na(rownumn_I3_sq) == 0)) { if (!is.na(eq5d_valueset[rownumn_I3_sq, country])) { i3_sq_value <- (length(which3) - 1)^2 * eq5d_valueset[rownumn_I3_sq, country] } } if (all(scores <= 2) & !all(scores == 1) & sum(is.na(rownum_only1sand2s) == 0)) { if (!is.na(eq5d_valueset[rownum_only1sand2s, country])) { only1sand2s_value <- eq5d_valueset[rownum_only1sand2s, country] } } ## !all(scores==3) & need ?? if (!any(scores == 2) & !all(scores == 1) & sum(is.na(rownum_only1sand3s) == 0)) { if (!is.na(eq5d_valueset[rownum_only1sand3s, country])) { only1sand3s_value <- eq5d_valueset[rownum_only1sand3s, country] } } if (any(scores == 2) & any(scores == 3) & sum(is.na(rownum_atleast2andatleast3) == 0)) { if (!is.na(eq5d_valueset[rownum_atleast2andatleast3, country])) { atleast2andatleast3_value <- eq5d_valueset[rownum_atleast2andatleast3, country] } } if (any(scores == 2) & any(scores == 3) & sum(is.na(rownum_nos2withatleast3) == 0)) { if (!is.na(eq5d_valueset[rownum_nos2withatleast3, country])) { nos2withatleast3_value <- length(which(scores == 2)) * eq5d_valueset[rownum_nos2withatleast3, country] } } if (any(scores == 2) & sum(is.na(rownum_nos2Sq) == 0)) { if (!is.na(eq5d_valueset[rownum_nos2Sq, country])) { nos2Sq_value <- (length(which(scores == 2)))^2 * eq5d_valueset[rownum_nos2Sq, country] } } if (any(scores == 3) & sum(is.na(rownum_nos3Sq) == 0)) { if (!is.na(eq5d_valueset[rownum_nos3Sq, country])) { nos3Sq_value <- (length(which(scores == 3)))^2 * eq5d_valueset[rownum_nos3Sq, country] } } if (scores[["MO"]] == 3 & scores[["SC"]] == 3 & sum(is.na(rownum_MO3SC3) == 0)) { if (!is.na(eq5d_valueset[rownum_MO3SC3, country])) { mo3sc3_value <- eq5d_valueset[rownum_MO3SC3, country] } } if (scores[["MO"]] == 3 & scores[["UA"]] == 3 & sum(is.na(rownum_MO3UA3) == 0)) { if (!is.na(eq5d_valueset[rownum_MO3UA3, country])) { mo3ua3_value <- eq5d_valueset[rownum_MO3UA3, country] } } if (scores[["MO"]] == 3 & scores[["PD"]] == 3 & sum(is.na(rownum_MO3PD3) == 0)) { if (!is.na(eq5d_valueset[rownum_MO3PD3, country])) { mo3pd3_value <- eq5d_valueset[rownum_MO3PD3, country] } } if (scores[["MO"]] == 3 & scores[["AD"]] == 3 & sum(is.na(rownum_MO3AD3) == 0)) { if (!is.na(eq5d_valueset[rownum_MO3AD3, country])) { mo3ad3_value <- eq5d_valueset[rownum_MO3AD3, country] } } if (scores[["SC"]] == 3 & scores[["UA"]] == 3 & sum(is.na(rownum_SC3UA3) == 0)) { if (!is.na(eq5d_valueset[rownum_SC3UA3, country])) { sc3ua3_value <- eq5d_valueset[rownum_SC3UA3, country] } } if (scores[["SC"]] == 3 & scores[["PD"]] == 3 & sum(is.na(rownum_SC3PD3) == 0)) { if (!is.na(eq5d_valueset[rownum_SC3PD3, country])) { sc3pd3_value <- eq5d_valueset[rownum_SC3PD3, country] } } if (scores[["SC"]] == 3 & scores[["AD"]] == 3 & sum(is.na(rownum_SC3AD3) == 0)) { if (!is.na(eq5d_valueset[rownum_SC3AD3, country])) { sc3ad3_value <- eq5d_valueset[rownum_SC3AD3, country] } } if (scores[["UA"]] == 3 & scores[["PD"]] == 3 & sum(is.na(rownum_UA3PD3) == 0)) { if (!is.na(eq5d_valueset[rownum_UA3PD3, country])) { ua3pd3_value <- eq5d_valueset[rownum_UA3PD3, country] } } if (scores[["UA"]] == 3 & scores[["AD"]] == 3 & sum(is.na(rownum_UA3AD3) == 0)) { if (!is.na(eq5d_valueset[rownum_UA3AD3, country])) { ua3ad3_value <- eq5d_valueset[rownum_UA3AD3, country] } } if (scores[["PD"]] == 3 & scores[["AD"]] == 3 & sum(is.na(rownum_PD3AD3) == 0)) { if (!is.na(eq5d_valueset[rownum_PD3AD3, country])) { pd3ad3_value <- eq5d_valueset[rownum_PD3AD3, country] } } if (scores[["MO"]] == 2 & scores[["UA"]] == 2 & sum(is.na(rownum_MO2UA2) == 0)) { if (!is.na(eq5d_valueset[rownum_MO2UA2, country])) { mo2ua2_value <- eq5d_valueset[rownum_MO2UA2, country] } } if (scores[["SC"]] == 3 & scores[["UA"]] == 2 & sum(is.na(rownum_SC3UA2) == 0)) { if (!is.na(eq5d_valueset[rownum_SC3UA2, country])) { sc3ua2_value <- eq5d_valueset[rownum_SC3UA2, country] } } if (country == "Germany" && method == "VAS") { prod.response <- prod(dim_response, na.rm = TRUE) values <- c( eq5d_valueset[rownumfh, country], prod.response, min2or3_value, min3_value, all_equals2or3_value, c3sq_value, d1_value, i2_value, i2_sq_value, i3_value, i3_sq_value, only1sand2s_value, only1sand3s_value, atleast2andatleast3_value, nos2withatleast3_value, nos2Sq_value, nos3Sq_value ) values_state <- prod(values, na.rm = TRUE) } else { sum_response <- sum(dim_response, na.rm = TRUE) values <- c( eq5d_valueset[rownumfh, country], sum_response, min2or3_value, min3_value, all_equals2or3_value, c3sq_value, d1_value, i2_value, i2_sq_value, i3_value, i3_sq_value, only1sand2s_value, only1sand3s_value, atleast2andatleast3_value, nos2withatleast3_value, nos2Sq_value, nos3Sq_value, mo3sc3_value, mo3ua3_value, mo3pd3_value, mo3ad3_value, sc3ua3_value, sc3pd3_value, sc3ad3_value, ua3pd3_value, ua3ad3_value, pd3ad3_value, mo2ua2_value, sc3ua2_value ) values_state <- sum(values, na.rm = TRUE) } } else { stop("No country tariffs on valueset") } } return(values_state) } } else { stop("No country tariffs found for the country you specified for EQ-5D-3L. Please try later") } } ################################################################################ #' Function to value EQ-5D-3L columns to index values for any country and group #' by gender and age #' @param eq5dresponse_data the data containing eq5d responses #' @param mo column name for EQ-5D-3L mobility #' @param sc column name for response for EQ-5D-3L self care #' @param ua column name for response for EQ-5D-3L usual activities #' @param pd column name for response for EQ-5D-3L pain/discomfort #' @param ad column name for response for EQ-5D-3L anxiety/depression #' @param country country of interest, by default is UK, if groupby has to #' specify the country should be specified #' @param method Either "TTO" or "VAS" #' @param groupby male or female -grouping by gender, default NULL #' @param agelimit vector of ages to show upper and lower limits #' @return the descriptive statistics of index values, frequency table and #' the modified data where the last column will be the index values #' data<-data.frame(age=c(10,20),sex=c("M","F"),mo=c(1,2),sc=c(1,2),ua=c(3,4), #' pd=c(3,1),ad=c(3,1)) #' value_3L(data, "mo", "sc","ua", "pd", "ad","UK","TTO",NULL,c(10,70)) #' @export #' @description Main function to value EQ-5D-5L descriptive system to 5L #' index values. value_3L <- function(eq5dresponse_data, mo, sc, ua, pd, ad, country, method, groupby, agelimit) { country <- replace_space_underscore(country) eq5d_colnames <- c(mo, sc, ua, pd, ad) ans_eq5d_colnames <- sapply(eq5d_colnames, check_column_exist, eq5dresponse_data) if (all(ans_eq5d_colnames == 0)) { # if the eq5d column names match working_data <- subset_gender_age_to_group(eq5dresponse_data, groupby, agelimit) if (nrow(working_data) < 1) { stop("no entries with the given criteria - Please check the contents or the criteria") } else { scores <- c() for (j in 1:nrow(working_data)) { res1 <- working_data[j, mo] res2 <- working_data[j, sc] res3 <- working_data[j, ua] res4 <- working_data[j, pd] res5 <- working_data[j, ad] this_score <- value_3L_Ind(country, method, res1, res2, res3, res4, res5) scores <- c(scores, this_score) } new_data <- cbind(working_data, scores) colnames(new_data) <- c(colnames(working_data), "EQ-5D-3L scores") scores_noNA <- scores[!is.na(scores)] if (length(scores_noNA) >= 1) { stats <- descriptive_stat_data_column(scores_noNA, "EQ-5D-3L") freq_table <- get_frequency_table(scores_noNA) first <- is.null(groupby) || toupper(groupby) == "NA" || is.na(groupby) second <- is.null(agelimit) || sum(toupper(agelimit) == "NA") != 0 || sum(is.na(agelimit)) != 0 if (first & second) { title <- paste("Histogram of EQ-5D-3L index values", sep = "") } else { if (first & !second) { title <- paste("Histogram of EQ-5D-3L index values", " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } else { if (!first & second) { title <- paste("Histogram of EQ-5D-3L index values for ", groupby, sep = "" ) } else { title <- paste("Histogram of EQ-5D-3L index values for ", groupby, " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } } } hist_plot <- graphics::hist(scores_noNA, main = title) results <- list("stats" = stats, "frequency_table" = freq_table, "histogram" = hist_plot, "modified_data" = new_data) return(results) } else { print("No relevant rows with non NA scores") } } } else {# if the eq 5d column names do not match stop("EQ-5D column names do not match") } } ################################################################################ #' Function to map EQ-5D-5L descriptive system to 3L index value #' @param country default is "UK" #' @param method CW cross walk #' @param dimen response for EQ-5D-5L mobility or the 5 digit response, or #' the vector of responses, e.g. 11111, c(1,1,1,1,1) or 1 #' @param dimen2 response for EQ-5D-5L self care, or NA if the responses are #' given as dimen #' @param dimen3 response for EQ-5D-5L usual activities,or NA if the responses #' are given as dimen #' @param dimen4 response for EQ-5D-5L pain/discomfort, or NA if the responses #' are given as dimen #' @param dimen5 response for EQ-5D-5L anxiety/depression, or NA if the #' responses are given as dimen #' @return index value of EQ-5D-3L, -1 if any error #' @examples #' map_5Lto3L_Ind("UK", "CW", 11125) #' map_5Lto3L_Ind("UK", "CW", c(1, 1, 1, 2, 5)) #' map_5Lto3L_Ind("UK", "CW", 1, 1, 1, 2, 5) #' @export #' @description Function to map EQ-5D-5L descriptive system to 3L index value #'(ref:Van Hout et al 2012 and code inspired from #'https://github.com/brechtdv/eq5d-mapping) map_5Lto3L_Ind <- function(country = "UK", method = "CW", dimen, dimen2 = NA, dimen3 = NA, dimen4 = NA, dimen5 = NA) { country_list <- c("Denmark", "France", "Germany", "Japan", "Netherlands", "Spain", "Thailand", "UK", "USA", "Zimbabwe") country <- replace_space_underscore(country) if (country %in% country_list) { responses <- c(dimen, dimen2, dimen3, dimen4, dimen5) if (sum(is.na(dimen)) > 0) { # first value should be not be a NA, do not contain NA this_score_5L <- NA values_state <- NA return(values_state) } else { # check first value should be a vector containing responses or a #5digit number if (length(dimen) != 5 && length(dimen) != 1) { stop("Expecting the full response as5 digit number or just the response for mobilty") } else {# first value a vector or a 5 figit number if (length(dimen) == 5) {# first value a vector if (any(dimen < 1) || any(dimen > 5)) { stop("Invalid EQ-5D-5L responses-check the responses to each question") } this_score_5L <- as.numeric(paste(dimen, collapse = "")) } else {# first value 5 digit number or actual response for mobility if (length(dimen) == 1) { if (dimen >= 11111 && dimen <= 55555) { # valid 5 digit number this_score_5L <- dimen } else { if (dimen <= 5 && dimen > 0) { # valid response to mobility four_res <- c(dimen2, dimen3, dimen4, dimen5) if (sum(is.na(four_res)) == 0) { if (all(responses <= 5) && all(responses > 0)) { this_score_5L <- paste(responses, collapse = "") # all valid and generate the score } else {# error values stop("Invalid EQ-5D-5L responses-check the responses to each question") } } else { # missing values this_score_5L <- NA values_state <- NA return(values_state) } } else { stop("Invalid EQ-5D-5L response to mobility") } } } } } } ## create a vector of all possible 3L index values (length == 3^5) index_3L <- numeric(243) ## create a dataframe of all possible 3L scores scores_3L <- expand.grid( AD = seq(3), PD = seq(3), UA = seq(3), SC = seq(3), MO = seq(3) ) ## calculate the index value for each score ## using function EQ5D_be based on Cleemput et al, 2010 for (i in seq(243)) { index_3L[i] <- value_3L_Ind( country, "TTO", scores_3L[i, "MO"], scores_3L[i, "SC"], scores_3L[i, "UA"], scores_3L[i, "PD"], scores_3L[i, "AD"] ) } ## create a dataframe of all possible 5L scores scores_5L <- expand.grid( AD = seq(5), PD = seq(5), UA = seq(5), SC = seq(5), MO = seq(5) ) ## 5L to 3L CROSSWALK ## load 'probability matrix' from 'EQ-5D-5L_Crosswalk_Value_Sets' ## this is saved as dataframe 'm' if (toupper(method) == "CW") { prob.matrix <- Probability_matrix_crosswalk.df m <- prob.matrix rows_m <- nrow(m) cols_m <- ncol(m) if (rows_m != 3125 || cols_m != 243) { stop("Error in number of cols or rows of probability matrix") } ## multiply each row of 't(m)' with 'index_3L' m_prod <- t(t(m) * index_3L) ## obtain sum per row ## crosswalking index value for each 5L score m_sums <- rowSums(m_prod) ## reorder columns and convert to matrix scores_5L <- with(scores_5L, cbind(MO, SC, UA, PD, AD)) ## create 5L score labels scores_5L_chr <- apply(scores_5L, 1, paste, collapse = "") this_score <- which(scores_5L_chr == paste(this_score_5L, collapse = "")) if (country == "Zimbabwe" & this_score_5L == "11111") { return(0.9) } else { return(m_sums[this_score]) } } else { stop("The specified method is not implemented") } } else { stop("Crosswalk for the country specified is not implemented") } } ############################################################################### #' Function to map EQ-5D-5L scores to EQ-5D-3L index values as per the #' specific country and group by gender and age #' @param eq5dresponse_data the data containing eq5d5L responses #' @param mobility column name for EQ-5D-5L mobility #' @param self_care column name for response for EQ-5D-5L self care #' @param usual_activities column name for response for EQ-5D-5L usual #' activities #' @param pain_discomfort column name for response for EQ-5D-5L pain/discomfort #' @param anxiety column name for response for EQ-5D-5L anxiety/depression #' @param country country of interest, by default is UK, if groupby has to #' specify the country should be specified #' @param method CW cross walk #' @param groupby male or female -grouping by gender, default NULL #' @param agelimit vector of ages to show upper and lower limits #' @return index value if success, negative values for failure #' @examples #' map_5Lto3L(data.frame( #' mo = c(1), sc = c(4), ua = c(4), pd = c(3), #' ad = c(3) #' ), "mo", "sc", "ua", "pd", "ad") #' @export #' @description Function to map EQ-5D-5L scores to EQ-5D-3L index values map_5Lto3L <- function(eq5dresponse_data, mobility, self_care, usual_activities, pain_discomfort, anxiety, country = "UK", method = "CW", groupby = NULL, agelimit = NULL) { country <- replace_space_underscore(country) eq5d_colnames <- c(mobility, self_care, usual_activities, pain_discomfort, anxiety) ans_eq5d_colnames <- sapply(eq5d_colnames, check_column_exist, eq5dresponse_data) if (all(ans_eq5d_colnames == 0)) { # if the eq5d column names match working_data <- subset_gender_age_to_group(eq5dresponse_data, groupby, agelimit) scores <- c() if (nrow(working_data) < 1) { stop("no entries with the given criteria - Please check the contents or the criteria") } else { for (j in 1:nrow(working_data)) { res1 <- working_data[j, mobility] res2 <- working_data[j, self_care] res3 <- working_data[j, usual_activities] res4 <- working_data[j, pain_discomfort] res5 <- working_data[j, anxiety] this_score <- map_5Lto3L_Ind(country, method, c(res1, res2, res3, res4, res5)) scores <- c(scores, this_score) } new_data <- cbind(working_data, scores) colnames(new_data) <- c(colnames(working_data), "Mapped EQ-5D-3L scores") scores_noNA <- scores[!is.na(scores)] if (length(scores_noNA) >= 1) { stats <- descriptive_stat_data_column(scores_noNA, "EQ-5D-3L") freq_table <- get_frequency_table(scores_noNA) first <- is.null(groupby) || toupper(groupby) == "NA" || is.na(groupby) second <- is.null(agelimit) || sum(toupper(agelimit) == "NA") != 0 || sum(is.na(agelimit)) != 0 if (first & second) { title <- paste("Histogram of EQ-5D-3L index values", sep = "") } else { if (first & !second) { title <- paste("Histogram of EQ-5D-3L index values", " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } else { if (!first & second) { title <- paste("Histogram of EQ-5D-3L index values for ", groupby, sep = "" ) } else { title <- paste("Histogram of EQ-5D-3L index values for ", groupby, " with ages between ", agelimit[1], " and ", agelimit[2], sep = "" ) } } } hist_plot <- graphics::hist(scores, main = title) results <- list("stats" = stats, "freq_table" = freq_table, "histogram" = hist_plot, "modified_data" = new_data) return(results) } else { print("No relevant rows with non NA scores") } } } else {# if the eq 5d column names do not match stop("EQ-5D column names do not match") } } ################################################################################ #' Function to correct implausible ordering in Australian valueset for EQ-5D-3L #' @param scores , EQ-5D-3L scores as a number #' @return the value that read from the stored dataframe #' @examples #' .correctImplausibleOrdering(11121) #' @export #' @description Correcting the implausible ordering .correctImplausibleOrdering <- function(scores) { value <- 0 score_num <- as.numeric(paste(scores, collapse = "")) australia_impalusibleordering_scores <- c( 33132, 12133, 13133, 22133, 23133, 32133, 33133, 12233, 13233, 22233, 23233, 32233, 33233, 33232, 33323, 13332, 13333, 23332, 23333, 32333, 33332, 33333 ) australia_impalusibleordering_values <- c( -0.045, 0.154, 0.154, 0.086, 0.086, -0.083, -0.083, 0.101, 0.101, 0.033, 0.033, -0.136, -0.136, -0.098, -0.199, 0.020, 0.020, -0.048, -0.048, -0.206, -0.217, -0.217 ) if (sum(score_num %in% australia_impalusibleordering_scores) > 0) { index <- which(score_num == australia_impalusibleordering_scores) value <- australia_impalusibleordering_values[index] } return(value) } ################################################################################
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/R/eq5d.R
## Required to avoid the note/warning with RMD check "no visible ## binding for global variable" #' @import utils utils::globalVariables(names = c( "Probability_matrix_crosswalk.df", "EQ5D3L_indexvalues.df", "EQ5D5L_indexvalues.df", "EQ5D5L_crosswalk_indexvalues.df", "EQ5D5L_tariffs.df", "EQ5D3L_tariffs_TTO.df", "EQ5D3L_tariffs_VAS.df" ), package = "valueEQ5D", add = TRUE)
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/R/globals.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, include = FALSE--------------------------------------------------- library(valueEQ5D) ## ---- echo = FALSE------------------------------------------------------------ ## EQ-5D-3L data set.seed(17) EQ5D3Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d3L.q1 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q2 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q3 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q4 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q5 = (sample(c(1, 2, 3), 10, replace = T))) ## EQ-5D-5L data set.seed(17) EQ5D5Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d5L.q1 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q2 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q3 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q4 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q5 = (sample(c(1, 2, 3, 4, 5), 10, replace = T))) ## ---- echo = FALSE------------------------------------------------------------ ## Valuing EQ-5D-3L individual score value_3L_Ind("UK", "TTO", 1, 2, 3, 2, 2) value_3L_Ind("UK", "VAS", c(1, 2, 3, 2, 2)) value_3L_Ind("UK", "TTO", 12322) ## ---- echo = FALSE------------------------------------------------------------ result1 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, NULL) ## ---- echo = FALSE------------------------------------------------------------ result1$stats result1$frequencyTable result1$histogram result1$modifiedData ## ---- echo = FALSE------------------------------------------------------------ result2 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", c(10, 70)) result3 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", NULL) result4 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, c(10, 70)) ## ---- echo = FALSE------------------------------------------------------------ ## Valuing EQ-5D-5L individual score value_5L_Ind("England", 1, 2, 3, 4, 5) value_5L_Ind("England", c(1, 2, 3, 4, 5)) value_5L_Ind("England", 12345) value_5L_Ind("Germany", 12345) value_5L_Ind("Spain", 12345) value_5L_Ind("Indonesia", 12345) ## ---- echo = FALSE------------------------------------------------------------ value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", NULL, NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", "male", c(10, 70)) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Indonesia", "male", NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Ireland", NULL, c(10, 70)) ## ---- echo = FALSE------------------------------------------------------------ ## Valuing EQ-5D-5L individual score map_5Lto3L_Ind("UK", "CW", 1, 2, 3, 4, 5) map_5Lto3L_Ind("UK", "CW", c(1, 2, 3, 4, 5)) map_5Lto3L_Ind("Denmark", "CW", 12345) ## ---- echo = FALSE------------------------------------------------------------ map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", NULL, NULL) map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", "male", c(10, 70))
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/inst/doc/User_Guide.R
--- title: "User Guide" author: "Sheeja Manchira Krishnan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{User Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(valueEQ5D) ``` ## valueEQ5D EQ-5D is a standardized instrument developed by the EuroQol(R) Group as a measure of health-related quality of life that can be used in a wide range of health conditions and treatments (https://euroqol.org/eq-5d-instruments/). The EQ-5D consists of a descriptive system and a visual analog scale (VAS). The descriptive system comprises five dimensions: mobility, self-care, usual activities, pain/discomfort and anxiety/depression. The EQ-5D VAS records the patients' self-rated health on a vertical visual analogue scale. This can be used as a quantitative measure of health outcome that reflects the patients' own judgment. The scores on these five dimensions can be presented as a health profile or can be converted to a single summary index number (utility) reflecting preferably compared to other health profiles. Currently three versions of EQ-5D exist: EQ-5D with 3 levels of severity for each of the 5 dimensions: EQ-5D-3L EQ-5D with 5 levels of severity for each of the 5 dimensions: EQ-5D-5L EQ-5D for use in children: EQ-5D-Y This package can be used for valuing the adult EQ-5D descriptive system scores - both 5L and 3L for different countries. EQ-5D-5L scores can be valued for the following countries: Canada, China, England, Ethiopia, France, Germany, Hong Kong, Hungary, Indonesia, Ireland, Japan, Korea, Malaysia, Netherlands, Poland, Portugal, Spain, Taiwan, Thailand, Uruguay, USA and Vietnam. Canada: Xie et al (2016) \doi:10.1097/MLR.0000000000000447 China:Luo et al (2017) \doi:10.1016/j.jval.2016.11.016 England: Devlin et al (2018) \doi:10.1002/hec.3564 Ethiopia: Welie et al (2019) \doi:10.1016/j.vhri.2019.08.475 France:Andrade et al (2019) \doi::10.1007/s40273-019-00876-4 Germany: Ludwig et al (2018) \doi:10.1007/s40273-018-0615-8 Hong Kong: Wong et al (2018) \doi:10.1007/s40271-017-0278-0 Hungary:Rencz et al (2020) \doi:10.1016/j.jval.2020.03.019 Indonesia: Purba et al (2017) \doi:10.1007/s40273-017-0538-9 Ireland: Hobbins et al (2016) \doi:10.1007/s40273-018-0690-x Japan: Shiroiwa, et al (2016) \doi:10.1016/j.jval.2016.03.1834 Korea: Kim et al (2016) \doi:10.1007/s11136-015-1205-2 Malaysia: Shafie et al (2019) \doi:10.1007/s40273-018-0758-7 Netherlands: Versteegh et al (2016) \doi:10.1016/j.jval.2016.01.003 Poland: Golicki et al \doi:10.1007/s40273-019-00811-7 Portugal:Ferreira1 et al (2014) \doi:10.1007/s11136-019-02226-5 Spain: Ramos-Goñiet et al (2018) \https://doi.org/10.1016/j.jval.2017.10.023 Taiwan: Lin et al (2018) \https://doi.org/10.1371/journal.pone.0209344 Thailand: Pattanaphesaj et al (2018) \doi:10.1080/14737167.2018 Uruguay: Augustovski et al (2016) \doi:10.1007/s11136-015-1086-4 USA: Pickard et al (2019) \doi:10.1016/j.jval.2019.02.009 Vietnam: Mai et al (2020) \doi:10.1007/s11136-020-02469-7 EQ-5D-3L scores can be valued for the countries Argentina, Australia, Belgium, Brazil, Canada, Chile, China, Denmark, Europe, Finland, France, Germany, Hungary, Iran, Italy, Japan, Korea, Malaysia, Netherlands, New Zealand, Poland, Portugal, Singapore, Slovenia, Spain, Sri Lanka, Sweden, Taiwan, Thailand, Trinidad and Tobago, UK, USA, and Zimbabwe. Argentina: Augustovski et al (2009) \doi:10.1111/j.1524-4733.2008.00468.x Australia: Viney et al (2011) \doi:10.1016/j.jval.2011.04.009 Belgium: Cleemput et al (2010) \doi:10.1007/s10198-009-0167-0 Brazil: Santos et al (2016) \doi:10.1177/0272989X15613521 Canada: Bansback et al (2012) \https://doi.org/10.1371/journal.pone.0031115 Chile: Zarate et al (2011) \doi:10.1016/j.jval.2011.09.002. China: Liu et al (2014) \doi:10.1016/j.jval.2014.05.007 Denmark TTO: Wittrup-Jensen et al (2009) \doi:10.1177/1403494809105287 Denmark VAS: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Europe: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Finland: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 France: Chevalier et al (2013) \doi:10.1007/s10198-011-0351-x Germany (TTO): Greiner et al (2005) \doi:10.1007/s10198-004-0264-z Germany (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Hungary (TTO): Rencz et al (2020) \doi:10.1016/j.jval.2020.03.019 Iran: Goudarzi et al (2019) \doi:10.1016/j.vhri.2019.01.007 Italy: Scalone et al (2013) \http://dx.doi.org/10.1016/j.jval.2013.04.008 Japan: Tsuchiya et al (2002) \https://doi.org/10.1002/hec.673 Korea: Lee et al \doi:10.1111/j.1524-4733.2009.00579.x Malaysia: Yusof et al (2019) \doi:10.1016/j.jval.2011.11.024 Netherlands: Lamers et al \doi:10.1002/hec.1124 New Zealand: Devlin et al \doi:10.1002/hec.741 Poland: Golicki et al \https://doi.org/10.1111/j.1524-4733.2009.00596.x Portugal: Ferreira et al \doi:10.1007/s11136-013-0448-z Singapore: Nan Luo et al \doi:10.1007/s40273-014-0142-1 Slovenia: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Spain (TTO): Badia et al (2001) \doi:10.1177/0272989X0102100102 Spain (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Sri Lanka: Kularatna et al (2015) \doi:10.1007/s11136-014-0906-2 Sweden: Burström et al (2014) \doi:10.1007/s11136-013-0496-4 Taiwan: Lee et al (2013) \http://dx.doi.org/10.1016/j.jfma.2012.12.015 Thailand: Tongsiri et al (2011) \doi:10.1016/j.jval.2011.06.005 Trinidad and Tobago: Bailey et al (2016) \http://dx.doi.org/10.1016/j.vhri.2016.07.010 UK (TTO): Dolan et al (1997) \http://dx.doi.org/10.1097/00005650-199711000-00002 UK (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 USA: Shaw et al (2005) \doi:10.1097/00005650-200503000-00003 Zimbabwe: Jelsma et al (2003) \https://doi.org/10.1186/1478-7954-1-11 The 5L descriptive scores can be mapped to 3L index values for 10 countries using the NICE recommended Van Hout et al. method. If the individual responses are in column formats (e.g. in csv) they can be used as arguments in the methods. In brief, for valuing EQ-5D-3L responses from individual responses to the descriptive system, use "value_3LIindscores";for valuing EQ-5D-5L responses from descriptive system, use "value_5LIindscores"; and for mapping EQ-5D-5L responses from descriptive system to EQ-5D-3L index values, use "map_5Lto3L_Ind". The arguments for all these three parameters will be country names and followed by the five individual responses. If the requirement is to get the summary statistics of collected EQ-5D responses from many individuals with conditions on gender and age use these methods: valuing EQ-5D-3L responses use "value_3L";for valuing EQ-5D-5L responses, use "value_5L"; and for mapping EQ-5D-5L responses to EQ-5D-3L index values, use "map_5Lto3L". The arguments for all these three parameters will be country names and followed by the five column names of the EQ-5D responses and the data containing these EQ-5D responses. EQ-5D-5L responses for England are converted to index values using Devlin et al. method. EQ-5D-3L responses for England are converted to index values using Dolan et al. method. EQ-5D-5L responses for England are mapped to EQ-5D-3L index values using Van Hout et al. method. Whenever the EQ-5D-5L responses are taken as input parameters, code checks if the input values are with in the bounds, i.e for 3L they have to be between 1 and 3 and for 5L between 1 and 5, throws error otherwise. ## Data For demonstration purposes, a simulated data set representing treatment and control arm of randomised controlled trial will be used. If any of the responses are invalid i.e other than 1 to 5 for EQ-5D-5L or 1 to 3 for EQ-5D-3L, it will throw error and return -1. ```{r, echo = FALSE} ## EQ-5D-3L data set.seed(17) EQ5D3Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d3L.q1 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q2 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q3 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q4 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q5 = (sample(c(1, 2, 3), 10, replace = T))) ## EQ-5D-5L data set.seed(17) EQ5D5Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d5L.q1 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q2 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q3 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q4 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q5 = (sample(c(1, 2, 3, 4, 5), 10, replace = T))) ``` ## Examples- Valuing EQ-5D-3L Each of the below calls will give same answer while valuing the EQ-5D-3L individual score 1, 2, 3, 2, 2 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. ```{r, echo = FALSE } ## Valuing EQ-5D-3L individual score value_3L_Ind("UK", "TTO", 1, 2, 3, 2, 2) value_3L_Ind("UK", "VAS", c(1, 2, 3, 2, 2)) value_3L_Ind("UK", "TTO", 12322) ``` When the data is in column format as in example below, use the 'value3L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. This will provide the summary statistics, frequency table, histogram and modified data ```{r, echo = FALSE } result1 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, NULL) ``` The results can be called using the stats, frequencyTable, histogram and modifiedData which are given below. ```{r, echo = FALSE } result1$stats result1$frequencyTable result1$histogram result1$modifiedData ``` Similarly, we can use the options to get the results for particular gender with given age ranges. ```{r, echo = FALSE } result2 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", c(10, 70)) result3 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", NULL) result4 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, c(10, 70)) ``` ## Examples- Valuing EQ-5D-5L Similarly, each of the below calls values EQ-5D-5L individual score 1, 2, 3, 4, 5 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. For EQ-5D-5L, no method to be given explicitly. ```{r, echo = FALSE } ## Valuing EQ-5D-5L individual score value_5L_Ind("England", 1, 2, 3, 4, 5) value_5L_Ind("England", c(1, 2, 3, 4, 5)) value_5L_Ind("England", 12345) value_5L_Ind("Germany", 12345) value_5L_Ind("Spain", 12345) value_5L_Ind("Indonesia", 12345) ``` When the data is in column format as in example below, use the 'value_5L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. This will provide the summary statistics, frequency table, histogram and modified data ```{r, echo = FALSE } value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", NULL, NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", "male", c(10, 70)) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Indonesia", "male", NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Ireland", NULL, c(10, 70)) ``` ## Examples- Mapping EQ-5D-5L scores to EQ-5D-3L index values for UK and other countries Each of the below calls will give same EQ-5d-3L index values while valuing the EQ-5D-5L individual score 1, 2, 3, 4, 5 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. ```{r, echo = FALSE } ## Valuing EQ-5D-5L individual score map_5Lto3L_Ind("UK", "CW", 1, 2, 3, 4, 5) map_5Lto3L_Ind("UK", "CW", c(1, 2, 3, 4, 5)) map_5Lto3L_Ind("Denmark", "CW", 12345) ``` When the data is in column format as in example below, use the 'map_5Lto3L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. ```{r, echo = FALSE } map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", NULL, NULL) map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", "male", c(10, 70)) ```
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/inst/doc/User_Guide.Rmd
--- title: "User Guide" author: "Sheeja Manchira Krishnan" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{User Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(valueEQ5D) ``` ## valueEQ5D EQ-5D is a standardized instrument developed by the EuroQol(R) Group as a measure of health-related quality of life that can be used in a wide range of health conditions and treatments (https://euroqol.org/eq-5d-instruments/). The EQ-5D consists of a descriptive system and a visual analog scale (VAS). The descriptive system comprises five dimensions: mobility, self-care, usual activities, pain/discomfort and anxiety/depression. The EQ-5D VAS records the patients' self-rated health on a vertical visual analogue scale. This can be used as a quantitative measure of health outcome that reflects the patients' own judgment. The scores on these five dimensions can be presented as a health profile or can be converted to a single summary index number (utility) reflecting preferably compared to other health profiles. Currently three versions of EQ-5D exist: EQ-5D with 3 levels of severity for each of the 5 dimensions: EQ-5D-3L EQ-5D with 5 levels of severity for each of the 5 dimensions: EQ-5D-5L EQ-5D for use in children: EQ-5D-Y This package can be used for valuing the adult EQ-5D descriptive system scores - both 5L and 3L for different countries. EQ-5D-5L scores can be valued for the following countries: Canada, China, England, Ethiopia, France, Germany, Hong Kong, Hungary, Indonesia, Ireland, Japan, Korea, Malaysia, Netherlands, Poland, Portugal, Spain, Taiwan, Thailand, Uruguay, USA and Vietnam. Canada: Xie et al (2016) \doi:10.1097/MLR.0000000000000447 China:Luo et al (2017) \doi:10.1016/j.jval.2016.11.016 England: Devlin et al (2018) \doi:10.1002/hec.3564 Ethiopia: Welie et al (2019) \doi:10.1016/j.vhri.2019.08.475 France:Andrade et al (2019) \doi::10.1007/s40273-019-00876-4 Germany: Ludwig et al (2018) \doi:10.1007/s40273-018-0615-8 Hong Kong: Wong et al (2018) \doi:10.1007/s40271-017-0278-0 Hungary:Rencz et al (2020) \doi:10.1016/j.jval.2020.03.019 Indonesia: Purba et al (2017) \doi:10.1007/s40273-017-0538-9 Ireland: Hobbins et al (2016) \doi:10.1007/s40273-018-0690-x Japan: Shiroiwa, et al (2016) \doi:10.1016/j.jval.2016.03.1834 Korea: Kim et al (2016) \doi:10.1007/s11136-015-1205-2 Malaysia: Shafie et al (2019) \doi:10.1007/s40273-018-0758-7 Netherlands: Versteegh et al (2016) \doi:10.1016/j.jval.2016.01.003 Poland: Golicki et al \doi:10.1007/s40273-019-00811-7 Portugal:Ferreira1 et al (2014) \doi:10.1007/s11136-019-02226-5 Spain: Ramos-Goñiet et al (2018) \https://doi.org/10.1016/j.jval.2017.10.023 Taiwan: Lin et al (2018) \https://doi.org/10.1371/journal.pone.0209344 Thailand: Pattanaphesaj et al (2018) \doi:10.1080/14737167.2018 Uruguay: Augustovski et al (2016) \doi:10.1007/s11136-015-1086-4 USA: Pickard et al (2019) \doi:10.1016/j.jval.2019.02.009 Vietnam: Mai et al (2020) \doi:10.1007/s11136-020-02469-7 EQ-5D-3L scores can be valued for the countries Argentina, Australia, Belgium, Brazil, Canada, Chile, China, Denmark, Europe, Finland, France, Germany, Hungary, Iran, Italy, Japan, Korea, Malaysia, Netherlands, New Zealand, Poland, Portugal, Singapore, Slovenia, Spain, Sri Lanka, Sweden, Taiwan, Thailand, Trinidad and Tobago, UK, USA, and Zimbabwe. Argentina: Augustovski et al (2009) \doi:10.1111/j.1524-4733.2008.00468.x Australia: Viney et al (2011) \doi:10.1016/j.jval.2011.04.009 Belgium: Cleemput et al (2010) \doi:10.1007/s10198-009-0167-0 Brazil: Santos et al (2016) \doi:10.1177/0272989X15613521 Canada: Bansback et al (2012) \https://doi.org/10.1371/journal.pone.0031115 Chile: Zarate et al (2011) \doi:10.1016/j.jval.2011.09.002. China: Liu et al (2014) \doi:10.1016/j.jval.2014.05.007 Denmark TTO: Wittrup-Jensen et al (2009) \doi:10.1177/1403494809105287 Denmark VAS: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Europe: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Finland: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 France: Chevalier et al (2013) \doi:10.1007/s10198-011-0351-x Germany (TTO): Greiner et al (2005) \doi:10.1007/s10198-004-0264-z Germany (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Hungary (TTO): Rencz et al (2020) \doi:10.1016/j.jval.2020.03.019 Iran: Goudarzi et al (2019) \doi:10.1016/j.vhri.2019.01.007 Italy: Scalone et al (2013) \http://dx.doi.org/10.1016/j.jval.2013.04.008 Japan: Tsuchiya et al (2002) \https://doi.org/10.1002/hec.673 Korea: Lee et al \doi:10.1111/j.1524-4733.2009.00579.x Malaysia: Yusof et al (2019) \doi:10.1016/j.jval.2011.11.024 Netherlands: Lamers et al \doi:10.1002/hec.1124 New Zealand: Devlin et al \doi:10.1002/hec.741 Poland: Golicki et al \https://doi.org/10.1111/j.1524-4733.2009.00596.x Portugal: Ferreira et al \doi:10.1007/s11136-013-0448-z Singapore: Nan Luo et al \doi:10.1007/s40273-014-0142-1 Slovenia: Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Spain (TTO): Badia et al (2001) \doi:10.1177/0272989X0102100102 Spain (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 Sri Lanka: Kularatna et al (2015) \doi:10.1007/s11136-014-0906-2 Sweden: Burström et al (2014) \doi:10.1007/s11136-013-0496-4 Taiwan: Lee et al (2013) \http://dx.doi.org/10.1016/j.jfma.2012.12.015 Thailand: Tongsiri et al (2011) \doi:10.1016/j.jval.2011.06.005 Trinidad and Tobago: Bailey et al (2016) \http://dx.doi.org/10.1016/j.vhri.2016.07.010 UK (TTO): Dolan et al (1997) \http://dx.doi.org/10.1097/00005650-199711000-00002 UK (VAS): Szende et al (2014) \doi:10.1007/978-94-007-7596-1 USA: Shaw et al (2005) \doi:10.1097/00005650-200503000-00003 Zimbabwe: Jelsma et al (2003) \https://doi.org/10.1186/1478-7954-1-11 The 5L descriptive scores can be mapped to 3L index values for 10 countries using the NICE recommended Van Hout et al. method. If the individual responses are in column formats (e.g. in csv) they can be used as arguments in the methods. In brief, for valuing EQ-5D-3L responses from individual responses to the descriptive system, use "value_3LIindscores";for valuing EQ-5D-5L responses from descriptive system, use "value_5LIindscores"; and for mapping EQ-5D-5L responses from descriptive system to EQ-5D-3L index values, use "map_5Lto3L_Ind". The arguments for all these three parameters will be country names and followed by the five individual responses. If the requirement is to get the summary statistics of collected EQ-5D responses from many individuals with conditions on gender and age use these methods: valuing EQ-5D-3L responses use "value_3L";for valuing EQ-5D-5L responses, use "value_5L"; and for mapping EQ-5D-5L responses to EQ-5D-3L index values, use "map_5Lto3L". The arguments for all these three parameters will be country names and followed by the five column names of the EQ-5D responses and the data containing these EQ-5D responses. EQ-5D-5L responses for England are converted to index values using Devlin et al. method. EQ-5D-3L responses for England are converted to index values using Dolan et al. method. EQ-5D-5L responses for England are mapped to EQ-5D-3L index values using Van Hout et al. method. Whenever the EQ-5D-5L responses are taken as input parameters, code checks if the input values are with in the bounds, i.e for 3L they have to be between 1 and 3 and for 5L between 1 and 5, throws error otherwise. ## Data For demonstration purposes, a simulated data set representing treatment and control arm of randomised controlled trial will be used. If any of the responses are invalid i.e other than 1 to 5 for EQ-5D-5L or 1 to 3 for EQ-5D-3L, it will throw error and return -1. ```{r, echo = FALSE} ## EQ-5D-3L data set.seed(17) EQ5D3Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d3L.q1 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q2 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q3 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q4 = (sample(c(1, 2, 3), 10, replace = T)), eq5d3L.q5 = (sample(c(1, 2, 3), 10, replace = T))) ## EQ-5D-5L data set.seed(17) EQ5D5Ldata <- data.frame(age = abs(rnorm(10, 60, 20)), sex = factor(sample(c("M", "F"), 10, replace = T)), arm = factor(sample(c("Control", "Intervention"), 10, replace = T)), eq5d5L.q1 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q2 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q3 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q4 = (sample(c(1, 2, 3, 4, 5), 10, replace = T)), eq5d5L.q5 = (sample(c(1, 2, 3, 4, 5), 10, replace = T))) ``` ## Examples- Valuing EQ-5D-3L Each of the below calls will give same answer while valuing the EQ-5D-3L individual score 1, 2, 3, 2, 2 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. ```{r, echo = FALSE } ## Valuing EQ-5D-3L individual score value_3L_Ind("UK", "TTO", 1, 2, 3, 2, 2) value_3L_Ind("UK", "VAS", c(1, 2, 3, 2, 2)) value_3L_Ind("UK", "TTO", 12322) ``` When the data is in column format as in example below, use the 'value3L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. This will provide the summary statistics, frequency table, histogram and modified data ```{r, echo = FALSE } result1 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, NULL) ``` The results can be called using the stats, frequencyTable, histogram and modifiedData which are given below. ```{r, echo = FALSE } result1$stats result1$frequencyTable result1$histogram result1$modifiedData ``` Similarly, we can use the options to get the results for particular gender with given age ranges. ```{r, echo = FALSE } result2 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", c(10, 70)) result3 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", "male", NULL) result4 <- value_3L(EQ5D3Ldata, "eq5d3L.q1", "eq5d3L.q2", "eq5d3L.q3", "eq5d3L.q4", "eq5d3L.q5", "UK", "TTO", NULL, c(10, 70)) ``` ## Examples- Valuing EQ-5D-5L Similarly, each of the below calls values EQ-5D-5L individual score 1, 2, 3, 4, 5 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. For EQ-5D-5L, no method to be given explicitly. ```{r, echo = FALSE } ## Valuing EQ-5D-5L individual score value_5L_Ind("England", 1, 2, 3, 4, 5) value_5L_Ind("England", c(1, 2, 3, 4, 5)) value_5L_Ind("England", 12345) value_5L_Ind("Germany", 12345) value_5L_Ind("Spain", 12345) value_5L_Ind("Indonesia", 12345) ``` When the data is in column format as in example below, use the 'value_5L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. This will provide the summary statistics, frequency table, histogram and modified data ```{r, echo = FALSE } value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", NULL, NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "England", "male", c(10, 70)) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Indonesia", "male", NULL) value_5L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "Ireland", NULL, c(10, 70)) ``` ## Examples- Mapping EQ-5D-5L scores to EQ-5D-3L index values for UK and other countries Each of the below calls will give same EQ-5d-3L index values while valuing the EQ-5D-5L individual score 1, 2, 3, 4, 5 for mobility, self care, social activity, pain and discomfort, and anxiety respectively. ```{r, echo = FALSE } ## Valuing EQ-5D-5L individual score map_5Lto3L_Ind("UK", "CW", 1, 2, 3, 4, 5) map_5Lto3L_Ind("UK", "CW", c(1, 2, 3, 4, 5)) map_5Lto3L_Ind("Denmark", "CW", 12345) ``` When the data is in column format as in example below, use the 'map_5Lto3L' to get the summary statistics while returning back the modified data. Use conditions if the results need to be based on a particular gender or particular age group as in the examples below. ```{r, echo = FALSE } map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", NULL, NULL) map_5Lto3L(EQ5D5Ldata, "eq5d5L.q1", "eq5d5L.q2", "eq5d5L.q3", "eq5d5L.q4", "eq5d5L.q5", "UK", "CW", "male", c(10, 70)) ```
/scratch/gouwar.j/cran-all/cranData/valueEQ5D/vignettes/User_Guide.Rmd
#' Polygons of 25 administration area of Seoul, Republic of Korea. #' #' A dataset containing the wgs84 coordinated polygons and other attributes. #' #' @format A sf with 25 rows and 3 variables: #' \describe{ #' \item{name}{id codes with 4 digit number} #' \item{value}{numbers of sub-administration area} #' \item{geometry}{wgs84 base coordinated polygons} #' ... #' } #' @source \url{https://github.com/vuski/admdongkor} "seoul" #' H3 addresses within Seoul, Republic of Korea. #' #' A dataset containing the h3 resolution level 8 addresses and other attributes. #' #' @format A data.frame with 1329 rows and 2 variables: #' \describe{ #' \item{name}{h3 resolution 8 address} #' \item{value}{meaningless number} #' ... #' } #' @source \url{https://github.com/vuski/admdongkor} "seoul_h3"
/scratch/gouwar.j/cran-all/cranData/valuemap/R/data.R
#' @import sf #' @import leaflet #' @import htmltools #' @import dplyr #' @import utils #' @import devtools #' @import h3jsr NULL
/scratch/gouwar.j/cran-all/cranData/valuemap/R/imports.R
#' Making choropleth map with sf polygons #' #' This function make a leaflet object. #' You can easily visualize your sf polygons based on "value" column. #' You have options : #' background map (= map) #' color legend boundary values (= legend.cut) #' color palette for color legend (= palette) #' showing "value" number on center of polygons (= show.text) #' color for "value" number text on center of polygons (= text.color) #' @param data A sf object with polygons who has "name" & "value" columns ("value" column must be numeric type) #' @param map A map name of leaflet::providers #' @param legend.cut A numeric vector which means color legend boundary values #' @param palette A color name of RColorBrewer palettes #' @param show.text A boolean who determines showing "value" number on center of polygons #' @param text.color A color name for "value" number text on center of polygons #' @param text.format A format function for "value" number text on center of polygons #' @return #' A leaflet object. #' @export #' #' @examples #' # Only run this example in interactive R sessions #' if (interactive()) valuemap(seoul) #' #' # Emphasize great of equal to 20 polygons #' if (interactive()) valuemap(seoul, legend.cut=c(20)) #' #' # Visualize without center number on polygons #' if (interactive()) valuemap(seoul, legend.cut=c(15,17,20), show.text=FALSE) #' #' # Change color palette & center number on polygons text color, format & change background map #' if (interactive()) #' valuemap( #' seoul, map=providers$Stamen.Toner, palette='YlOrRd', #' text.color='blue', text.format=function(x) paste(x,'EA') #' ) valuemap <- function(data, map=providers$OpenStreetMap, legend.cut=NULL, palette='Blues', show.text=TRUE, text.color='black', text.format=function(x) x){ # label & highlight setting popup.labels <- sprintf('<strong>%s</strong><br/>value: %s', data$name, text.format(data$value)) %>% lapply(HTML) popup.label.options <- labelOptions(style=list(padding='3px 8px'), textsize='15px') highlight.options <- highlightOptions(weight=5, color='white', dashArray='', fillOpacity=.7, bringToFront=TRUE) # color setting bins <- if(is.null(data)){ stop('is.null(data) == TRUE') }else if(nrow(data) == 0){ stop('nrow(data) == 0') }else if(is.null(legend.cut) & nrow(data) == 1) { data$value }else if(is.null(legend.cut) & nrow(data) > 1){ data$value %>% summary %>% unclass %>% unique }else{ c(-Inf, legend.cut, Inf) } pals <- colorBin(palette, domain=data$value, bins=bins) # base plot base.map <- leaflet(data) %>% addProviderTiles(provider=map) # plot detail if(show.text){ centers <- suppressWarnings(st_centroid(data)) labels <- sprintf('<strong>%s</strong>', text.format(data$value)) %>% lapply(HTML) label.options <- labelOptions(noHide=TRUE, direction='center', textOnly=TRUE, textsize='12px', style=list(color=text.color)) base.map %>% addPolygons( color='white', weight=2, opacity=1, dashArray=3, fillColor=~pals(value), fillOpacity=.7, highlightOptions=highlight.options, label=popup.labels, labelOptions=popup.label.options ) %>% addLabelOnlyMarkers(data=centers, label=labels, labelOptions=label.options) %>% addLegend(pal=pals, values=data$value, opacity=.7, title=NULL, position='bottomright') }else{ base.map %>% addPolygons( color='white', weight=2, opacity=1, dashArray=3, fillColor=~pals(value), fillOpacity=.7, highlightOptions=highlight.options, label=popup.labels, labelOptions=popup.label.options ) %>% addLegend(pal=pals, values=data$value, opacity=.7, title=NULL, position='bottomright') } } #' Making choropleth map with data.frame of h3 address #' #' This function make a leaflet object. #' You can easily visualize your data.frame with h3 address "name" column based on "value" column. #' You have options : #' background map (= map) #' color legend boundary values (= legend.cut) #' color palette for color legend (= palette) #' showing "value" number on center of polygons (= show.text) #' color for "value" number text on center of polygons (= text.color) #' @param data A data.frame object who has "h3_addr" & "value" columns ("value" column must be numeric type) #' @param map A map name of leaflet::providers #' @param legend.cut A numeric vector which means color legend boundary values #' @param palette A color name of RColorBrewer palettes #' @param show.text A boolean who determines showing "value" number on center of polygons #' @param text.color A color name for "value" number text on center of polygons #' @param text.format A format function for "value" number text on center of polygons #' @return #' A leaflet object. #' @export #' #' @examples #' if (interactive()){ #' valuemap_h3(seoul_h3, legend.cut=1:6, show.text=FALSE) #' } valuemap_h3 <- function(data, map=providers$OpenStreetMap, legend.cut=NULL, palette='Blues', show.text=TRUE, text.color='black', text.format=function(x) x){ sf_data <- data %>% mutate(geometry = h3jsr::cell_to_polygon(data$name)) %>% st_as_sf valuemap( sf_data, map=map, legend.cut=legend.cut, palette=palette, show.text=show.text, text.color=text.color, text.format=text.format ) }
/scratch/gouwar.j/cran-all/cranData/valuemap/R/valuemap.R
# ------------------------------------------------------------------------------ # ----- Step1_YieldCurveGeneration.R ------------------------------------------- # ------------------------------------------------------------------------------ checkDCC <- function(dcc){ if (!(dcc == "Thirty360" || dcc == "ACT360" || dcc == "ACT365" || dcc == "ACTACT")){ errstr <- gsub("[\r\n]", "", sprintf("dcc should be one of Thirty360, ACT360, ACT365, or ACTACT. Given %s.", dcc)) stop (errstr) } } checkBDC <- function(bdc){ # Check if the business day convention bdc is a valid input ---------------- if (bdc != "Actual" && bdc != "Following" && bdc != "Preceding" && bdc != "Modified_Prec" && bdc != "Modified_Foll") { errstr <- sprintf("Business day convention should be one of Actual, Following, Preceding, Modified_Prec, or Modified_Foll. Given %s.", bdc) stop (errstr) } } checkCalendar <- function(calendar){ # Check if the holiday calendar is a valid input ----------------- if (calendar != "General" && calendar != "NY"){ errstr <- sprintf("calendar should be one of General or NY. Given %s.", calendar) stop (errstr) } } checkHolidays <- function(holidays){ # Check if the user-defined holidays is a valid input ------------------ if ((!is.null(holidays)) && (!inherits(holidays, "Date")) ) { errstr <- sprintf("Holidays should be date type, given %s type.", typeof(holidays)) stop (errstr) } } # ------------------------------------------------------------------------------ isLeapYear <- function(date){ # Indicate if the given date is in a leap year ---------------------------- # Input ------------------------------------------------------------------- # -- date: string "YYYY-MM-DD" or a Date data type ------------------------- # Output ------------------------------------------------------------------- # -- Binary, TRUE if date is in a leap year or FALSE otherwise ------------- date <- as.Date(date) year <- as.numeric(format(date, format = "%Y")) bres <- FALSE if (year %% 4 == 0 && year %% 100 != 0) { bres <- TRUE } if (year %% 400 == 0) { bres <- TRUE } return(bres) } # ------------------------------------------------------------------------------ leapYearDays <- function(dateA, dateB){ # Calculate the number of days in leap years between dateA and dateB # Input # -- dateA: string "YYYY-MM-DD" or a Date data type # -- dateB: string "YYYY-MM-DD" or a Date data type # Output # -- Integer number of days in leap years between dateA and dateB dateA <- as.Date(dateA) dateB <- as.Date(dateB) if (dateA > dateB) { return(leapYearDays(dateB, dateA)) } # Then we can assume datA is before datB yearA <- as.numeric(format(dateA, format = "%Y")) yearB <- as.numeric(format(dateB, format = "%Y")) days <- 0 for (y in yearA:yearB) { dateTemp <- as.Date(paste(y, 1, 1, sep = "-")) if (isLeapYear(dateTemp)) { days <- days + as.Date(paste(y + 1, 1, 1, sep = "-")) - dateTemp } } if (isLeapYear(dateA)) { days <- days + (as.Date(paste(yearA - 1, 12, 31, sep = "-")) - dateA) } if (isLeapYear(dateB)) { days <- days + (dateB - as.Date(paste(yearB, 12, 31, sep = "-"))) } return(as.numeric(days)) } # ------------------------------------------------------------------------------ fracYear <- function(dateA, dateB, dcc){ # Calculates the year fraction between dateA and dateB according to day # count convention dcc # Input # -- dateA: string "YYYY-MM-DD" or a Date data type # -- dateB: string "YYYY-MM-DD" or a Date data type # -- dcc: string, day count convention # -- Thirty360: 30 days a month, 360 days a year # -- ACT360: actual day count, 360 days a year # -- ACT365: actual day count, 365 days a year # -- ACTACT: actual day count, actual days in a year # Output # -- double, year fraction between two dates dateA <- as.Date(dateA) dateB <- as.Date(dateB) checkDCC(dcc) if (dateB < dateA) { return (fracYear(dateB, dateA, dcc)) } yearA <- as.numeric(format(dateA, format = "%Y")) yearB <- as.numeric(format(dateB, format = "%Y")) monthA <- as.numeric(format(dateA, format = "%m")) monthB <- as.numeric(format(dateB, format = "%m")) dayA <- as.numeric(format(dateA, format = "%d")) dayB <- as.numeric(format(dateB, format = "%d")) if (dcc == "Thirty360") { dayA <- min(dayA, 30) if (dayB == 31 && dayA == 30) { dayB <- 30 } frac <- (yearB - yearA) + (monthB - monthA) / 12 + (dayB - dayA) / 360 } else if (dcc == "ACT360") { frac <- as.numeric((dateB - dateA)) / 360 } else if (dcc == "ACT365") { frac <- as.numeric((dateB - dateA)) / 365 } else if (dcc == "ACTACT") { leapDays <- leapYearDays(dateA, dateB) nonLeapDays <- abs(as.numeric(dateB - dateA)) - leapDays frac <- leapDays / 366 + nonLeapDays / 365 } return (frac) } # ------------------------------------------------------------------------------ isWeekend <- function(date){ # Indicate whether the given date is a weekend # Input: # -- date: string "YYYY-MM-DD" or a Date data type # Output: # -- binary output, TRUE if date is a weekend or FALSE otherwise date <- as.Date(date) w <- weekdays(date) if (w == "Saturday" || w == "Sunday"){ return (TRUE) }else{ return (FALSE) } } # ------------------------------------------------------------------------------ isHolidayNY <- function(date){ # Indicate whether the given date is a holiday based on New York holiday # schedule # Input: # -- date: string "YYYY-MM-DD" or a Date data type # Output: # -- binary output, TRUE if holiday or FALSE otherwise date <- as.Date(date) w <- weekdays(date) # day of the week as string d <- as.numeric(format(date, format = "%d")) # day in a month as numeric m <- as.numeric(format(date, format = "%m")) # month in a year as numeric if (# MLK day, third Mon in Jan ((d >= 15 && d <= 21) && w == "Monday" && m == 1) # Washington's birthday, 3rd Mon in Feb || ((d >= 15 && d <= 21) && w == "Monday" && m == 2) # Mamorial day, last Mon in may || (d >= 25 && w == "Monday" && m == 5) # National day (Fri/Mon before/after) || ((d == 4 && m == 7) || (d == 5 && w == "Monday" && m == 7) || (d == 3 && w == "Friday" && m == 7)) # Labor day, 1st Mon in Sep || (d <= 7 && w == "Monday" && m == 9) # Columbus day, 2nd Mon in Oct || ((d >= 8 && d <= 14) && w == "Monday" && m == 10) # Veteran's day || ((d == 11 && m == 11) || (d == 12 && w == "Monday" && m == 11) || (d == 10 && w == "Friday" && m == 11)) # US thanksgiving, 4th Thu in Nov || ((d >= 22 && d <= 28) && w == "Thursday" && m == 11) # Christmas (Fri/Mon before/after) || ((d == 25 && m == 12) || (d == 26 && w == "Monday" && m == 12) || (d == 24 && w == "Friday" && m == 12)) # New Year (Fri/Mon before/after) || ((d == 1 && m == 1) || (d == 2 && w == "Monday" && m == 1) || ((d == 31 && w == "Friday" && m == 12))) ){ return (TRUE) } else { return (FALSE) } } # ------------------------------------------------------------------------------ isBusinessDay <- function(date, calendar, holidays){ # Indicates whether date is a business day based on calendar # Input # -- date: string "YYYY-MM-DD" or a Date data type # -- calendar: string, indicate the desired calendar # -- NY: New York holiday calendar # -- General: all weekdays are business days # Output # -- Binary output, indicating whether the given date is a business day date <- as.Date(date) checkCalendar(calendar) if(!is.null(holidays) && date >= min(holidays) && date <= max(holidays)){ return(date %in% holidays) } else if (calendar == "NY") { return(!isWeekend(date) && !isHolidayNY(date)) } else if (calendar == "General") { return(!isWeekend(date)) } } # ------------------------------------------------------------------------------ rollDate <- function(date, bdc, calendar, holidays){ # Roll the given date to the nearest business day based on a a given # business day convention bdc and holiday calendar calendar # Input # -- date: string "YYYY-MM-DD" or a Date data type # -- bdc: string, business day convention # -- Actual: No rolling on the date applied even if it is a non-business day # -- Preceding: 1st business day before holiday # -- Following: 1st business day after holiday # -- Modified_Prec: Same as "Preceding" unless it belongs to a different # month, in which case 1st business day after holiday # -- Modified_Foll: Same as "Following" unless it belongs to a different # month, in which case 1st business day before holiday # -- calendar: string, indicate the desired calendar # -- NY: New York holiday calendar # -- General: all weekdays are business days date <- as.Date(date) checkCalendar(calendar) checkBDC(bdc) dateTemp <- date while ( !bdc == "Actual" && !isBusinessDay(dateTemp, calendar, holidays)) { if (bdc == "Following") { dateTemp <- dateTemp + 1 } else if (bdc == "Preceding") { dateTemp <- dateTemp - 1 } else if (bdc == "Modified_Prec") { dateTemp <- rollDate(date, bdc = "Preceding", calendar, holidays) if (format(dateTemp, format = "%m") != format(date, format = "%m")){ dateTemp <- rollDate(date, bdc = "Following", calendar, holidays) } }else if (bdc == "Modified_Foll") { dateTemp <- rollDate(date, bdc = "Following", calendar, holidays) if (format(dateTemp, format = "%m") != format(date, format = "%m")){ dateTemp <- rollDate(date, bdc = "Preceding", calendar, holidays) } } } return(dateTemp) } # ----------------------------------------------------------------------------- genSchedule <- function(settleDate, freq, tenor, calendar, bdc, holidays){ # Calculates an array of dates, spaced by freq months until tenor years. # All dates are business days according to holiday calendar calendar and # adjusted according to business day count convention bdc. # Input # -- settleDate: string "YYYY-MM-DD" or a Date data type, # the settlement date # -- freq: integer, frequency of payment in months, e.g., 3 for quarterly # -- tenor: integer, number of years until maturity # -- bdc: string, business day convention # -- Actual: No rolling on the date applied even if it is a non-business day # -- Preceding: 1st business day before holiday # -- Following: 1st business day after holiday # -- Modified_Prec: Same as "Preceding" unless it belongs to a different # month, in which case 1st business day after holiday # -- Modified_Foll: Same as "Following" unless it belongs to a different # month, in which case 1st business day before holiday # -- calendar: string, indicate the desired calendar # -- NY: New York holiday calendar # -- General: all weekdays are business days settleDate <- as.Date(settleDate) checkCalendar(calendar) checkBDC(bdc) count <- tenor * 12 / freq month <- paste(toString(freq), "months", sep = " ") schedule <- seq.Date(settleDate, length.out = count + 1, by = month) for (i in 1:length(schedule)){ schedule[i] <- rollDate(schedule[i], bdc = bdc, calendar = calendar, holidays = holidays) } return (schedule) } # ------------------------------------------------------------------------------ Initialize <- function(rate, tenor, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, holidays){ # Rolls the settlement date based on yield curve observation date # curveDateAnd and number of settlement dates numSetDay # the first entry of discount vector to 1. # Input # -- rate: vector of doubles of zero coupon rates # -- tenor: vector of integers corresponding tenors # -- fixFreq: integer, fixed leg frequency of payment in months # -- fixDCC: string, fixed leg DCC # -- fltFreq: integer, floating leg frequency of payment in months # -- fltDCC: string, floating leg DCC # -- bdc: string, business day convention # -- Actual: No rolling on the date applied even if it is a non-business day # -- Preceding: 1st business day before holiday # -- Following: 1st business day after holiday # -- Modified_Prec: Same as "Preceding" unless it belongs to a different # month, in which case 1st business day after holiday # -- Modified_Foll: Same as "Following" unless it belongs to a different # month, in which case 1st business day before holiday # -- calendar: string, indicate the desired calendar # -- NY: New York holiday calendar # -- General: all weekdays are business days # -- curveDate: string "YYYY-MM-DD" or a Date data type, yield curve date # -- numSetDay: integer, number of settlement days from yield curve date # -- yieldCurveDCC: Yield curve DCC checkBDC(bdc) checkCalendar(calendar) checkDCC(fixDCC) checkDCC(fltDCC) checkDCC(yieldCurveDCC) curveDate <- as.Date(curveDate) settleDate <- curveDate for (i in 1:numSetDay) { settleDate <- rollDate(settleDate + 1, bdc = "Following", calendar = calendar, holidays) } # initialize curveDate to curveDate and df to 1 curveDate <- c(curveDate) df <- c(1) return (data.frame(curveDate, df, settleDate)) } # ------------------------------------------------------------------------------ logLinear <- function(dateIn, obsDate, discountFac, curveDate, yieldCurveDCC){ # Calculates the log-linearly interpolated/extrapolated discount factor at # dateIn given the yield curve specified by the vector of discount factors # discountFac and their corresponding tenors. The yield curve date is # curveDate and the yield curve day count convention is yieldCurveDCC. # Input # -- dateIn: string "YYYY-MM-DD" or a Date data type, date of interest # -- obsDate: vector of curveDate, tenors on observed yield curve # -- discountFac: vector of doubles, discount factors on observe yield curve # -- curveDate: string "YYYY-MM-DD" or a Date data type, yield curve date # -- yieldCurveDCC: Yield curve DCC checkDCC(yieldCurveDCC) dateIn <- as.Date(dateIn) obsDate <- as.Date(obsDate) curveDate <- as.Date(curveDate) N <- length(obsDate) if (N < 2){ stop("Need at least two observed values to interpolate") } if (dateIn < min(obsDate)){ stop("Date is ealier than the curve date") } # if dateIn is already in obsDate, return its equivalent discount rate if (dateIn %in% obsDate){ return (discountFac[which(obsDate == dateIn)]) } # coding convenience: obsDate in the last interval unless identified # otherwise date1 <- obsDate[N - 1] date2 <- obsDate[N] dDF1 <- discountFac[N - 1] dDF2 <- discountFac[N] for (i in 1:(N - 1)) { if (dateIn > obsDate[i] && dateIn < obsDate[i + 1]){ date1 <- obsDate[i] date2 <- obsDate[i + 1] dDF1 <- discountFac[i] dDF2 <- discountFac[i + 1] break } } date1Frac <- fracYear(curveDate, date1, yieldCurveDCC) date2Frac <- fracYear(curveDate, date2, yieldCurveDCC) dInFrac <- fracYear(curveDate, dateIn, yieldCurveDCC) # log-scale linear interpolation of discount factors temp <- log(dDF2) * (dInFrac - date1Frac) + log(dDF1) * (date2Frac - dInFrac) return (exp(temp / (date2Frac - date1Frac))) } # ------------------------------------------------------------------------------ pvSwap <- function(rate, tenor, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, paymentDate, discountFac, settleDate, holidays){ # Calculates the present value of an Interest rate Swap (IRS) by subtracting # the present value of floating leg from that of fix leg under specified # day count convention and holiday calendar. # Input # -- rate: double, given swap rate (fixed leg) # -- tenor: integer, given tenor of swap # -- fixFreq: integer, fixed leg frequency of payment in months # -- fixDCC: string, fixed leg DCC # -- fltFreq: integer, floating leg frequency of payment in months # -- fltDCC: string, floating leg DCC # -- bdc: string, business day convention # -- Actual: No rolling on the date applied even if it is a non-business day # -- Preceding: 1st business day before holiday # -- Following: 1st business day after holiday # -- Modified_Prec: Same as "Preceding" unless it belongs to a different # month, in which case 1st business day after holiday # -- Modified_Foll: Same as "Following" unless it belongs to a different # month, in which case 1st business day before holiday # -- calendar: string, indicate the desired calendar # -- NY: New York holiday calendar # -- General: all weekdays are business days # -- curveDate: string "YYYY-MM-DD" or a Date data type, yield curve date # -- numSetDay: integer, number of settlement days from yield curve date # -- yieldCurveDCC: Yield curve DCC # -- paymentDate: payment dates # -- discountFac: discount factors on payment curveDate # -- settleDate: settlement date checkBDC(bdc) checkCalendar(calendar) checkDCC(fixDCC) checkDCC(fltDCC) checkDCC(yieldCurveDCC) curveDate <- as.Date(curveDate) settleDate <- as.Date(settleDate) # Calculates the present value of fixed leg fixSchedule <- genSchedule(settleDate, fixFreq, tenor, calendar, bdc, holidays) fixPV <- 0 numFix <- length(fixSchedule) if (numFix < 2) { stop("Fixed leg has only one payment date.") } for (i in 2:numFix){ df <- logLinear(fixSchedule[i], paymentDate, discountFac, curveDate, yieldCurveDCC) fixPV <- fixPV + fracYear(fixSchedule[i - 1], fixSchedule[i], fixDCC) * rate * df } # Calculates the present value of floating leg fltSchedule <- genSchedule(settleDate, fltFreq, tenor, calendar, bdc, holidays) fltPV <- 0 numFlt <- length(fltSchedule) if (numFlt < 2) { stop("Floating leg has only one payment date.") } for (i in 2:numFlt){ df <- logLinear(fltSchedule[i], paymentDate, discountFac, curveDate, yieldCurveDCC) dt <- fracYear(fltSchedule[i - 1], fltSchedule[i], fltDCC) dfr <- (logLinear(fltSchedule[i - 1], paymentDate, discountFac, curveDate, yieldCurveDCC) / logLinear(fltSchedule[i], paymentDate, discountFac, curveDate, yieldCurveDCC) - 1) / dt fltPV <- fltPV + dt * dfr * df } return (fixPV - fltPV) } # ------------------------------------------------------------------------------ solve_rate <- function(index, swapRates, tenors, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, paymentDate, discountFac, settleDate, holidays){ # Secant method to solve for the discount rate such that the present value # of a IRS equals zero # Input # -- index: integer, index of the input swap rate # -- swapRates: vector of doubles of swap rates # -- tenors: vector of integers of corresponding tenors # -- fixFreq: integer, fixed leg frequency of payment in months # -- fixDCC: string, fixed leg DCC # -- fltFreq: integer, floating leg frequency of payment in months # -- fltDCC: string, floating leg DCC # -- bdc: string, business day convention # -- calendar: string, indicate the desired calendar # -- curveDate: string "YYYY-MM-DD" or a Date data type, yield curve date # -- numSetDay: integer, number of settlement days from yield curve date # -- yieldCurveDCC: Yield curve DCC # -- paymentDate: payment dates # -- discountFac: discount factors on payment dates # -- settleDate: settlement date # -- holidays: user-defined holidays checkBDC(bdc) checkCalendar(calendar) checkDCC(fixDCC) checkDCC(fltDCC) checkDCC(yieldCurveDCC) curveDate <- as.Date(curveDate) settleDate <- as.Date(settleDate) # pick the first initial discount rate rate0 rate0 <- exp(-swapRates[index] * fracYear(as.Date(curveDate), as.Date(paymentDate[index + 1]), yieldCurveDCC)) discountFac[index + 1] <- rate0 df0 <- pvSwap(swapRates[index], tenors[index], fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, paymentDate, discountFac, settleDate, holidays) # pick the second initial discount rate rate1 rate1 <- rate0 + 0.001 discountFac[index + 1] <- rate1 df1 <- pvSwap(swapRates[index], tenors[index], fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, paymentDate, discountFac, settleDate, holidays) # apply Secant method with threshold 1e-10 while (abs(rate1 - rate0) >= 1e-10){ dx <- rate0 - df0 * (rate1 - rate0) / (df1 - df0) discountFac[index + 1] <- dx df <- pvSwap(swapRates[index], tenors[index], fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, paymentDate, discountFac, settleDate, holidays) rate0 <- rate1 df0 <- df1 rate1 <- dx df1 <- df } return (discountFac[index + 1]) } # ------------------------------------------------------------------------------ #' Build Curve #' #' @description Bootstrap discount factors from a yield curve. #' #' @param swapRates A vector of doubles of swap rates. #' @param tenors A vector of integers of corresponding tenors. #' @param fixFreq An integer of fixed leg frequency of payment in months. #' #' Default is 6, semi-annual payments. #' @param fixDCC A string of fixed leg day count convention from four options: #' "Thirty360", "ACT360", "ACT365", or "ACTACT". #' #' Default is "Thirty360". #' @param fltFreq An integer of floating leg frequency of payment in months. #' #' Default is 6, semi-annual payments. #' @param fltDCC A string of floating leg day count convention from four options: #' "Thirty360", "ACT360", "ACT365", or "ACTACT". #' #' Default is "Thirty360". #' @param bdc A string of business day convention from five options: #' \itemize{ #' \item "Actual": No rolling on the date applied even if it is a non-business day #' \item "Preceding": 1st business day before holiday #' \item "Following": 1st business day after holiday #' \item "Modified_Prec": Same as "Preceding" unless it belongs to a different #' month, in which case 1st business day after holiday #' \item "Modified_Foll": Same as "Following" unless it belongs to a different #' month, in which case 1st business day before holiday #' } #' Default is "Actual". #' @param calendar A string of the desired calendar convention #' from two options: #' \itemize{ #' \item "NY": New York holiday calendar #' \item "General": all weekdays are business days #' } #' @param curveDate A string in the format of "YYYY-MM-DD" of yield curve date. #' @param numSetDay An integer of settlement days from yield curve date. #' @param yieldCurveDCC A string of yield curve day count convention from four options: #' "Thirty360", "ACT360", "ACT365", or "ACTACT". Default is "Thirty360". #' @param holidays An optional vector dates of user-defined holidays. If provided, #' within the given holidays range, the calendar provided in the parameter "calendar" #' will not be applied; #' #' If the date is not in the given holidays range, it will follow the calendar provided in #' the "calendar" parameter #' #' #' @return Outputs a data frame of strings of discount dates and doubles of #' discount factors. #' @examples #' rate <- c(0.69, 0.77, 0.88, 1.01, 1.14, 1.38, 1.66, 2.15) * 0.01 #' tenor <- c(1, 2, 3, 4, 5, 7, 10, 30) #' fixFreq <- 6 #' fixDCC <- "Thirty360" #' fltFreq <- 6 #' fltDCC <- "ACT360" #' calendar <- "NY" #' bdc <- "Modified_Foll" #' curveDate <- "2016-02-08" #' numSetDay <- 2 #' yieldCurveDCC <- "Thirty360" #' holidays <- NULL #' buildCurve(rate, tenor, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, #' curveDate, numSetDay, yieldCurveDCC, holidays) #' @export buildCurve <- function(swapRates, tenors, fixFreq = 6, fixDCC = "Thirty360", fltFreq = 6, fltDCC = "Thirty360", calendar = "General", bdc = c("Actual", "Preceding", "Following", "Modified_Prec", "Modified_Foll"), curveDate, numSetDay, yieldCurveDCC = "Thirty360", holidays = NULL){ checkBDC(bdc) checkCalendar(calendar) checkDCC(fixDCC) checkDCC(fltDCC) checkDCC(yieldCurveDCC) curveDate <- as.Date(curveDate) checkHolidays(holidays) if (length(swapRates) != length(tenors)){ stop("Number of discount factors is different from the number of observation dates") } # temporary output: yield curve date dates, discount factor discountFac, # and settlement date settleDate --------------------------------------- outTemp <- Initialize(swapRates, tenors, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, holidays) settleDate <- outTemp$settleDate numTenor <- length(tenors) obsDate <- rep(outTemp$curveDate, numTenor + 1) discountFac <- rep(outTemp$df, numTenor + 1) # Set up maturity dates in the form of "YYYY-MM-DD" for (i in 1:numTenor){ posixDate <- as.POSIXlt(settleDate) posixDate$year <- posixDate$year + tenors[i] obsDate[i + 1] <- as.Date(posixDate) } # Discount factors for (i in 1:numTenor){ discountFac[i + 1] <- solve_rate(i, swapRates, tenors, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, obsDate[1:(i + 1)], discountFac[1:(i + 1)], settleDate, holidays) } # Zero rate zeroRate <- mat.or.vec(nr = numTenor + 1, nc = 1) dayCount <- mat.or.vec(nr = numTenor + 1, nc = 1) for (i in 1:(numTenor + 1)){ dayCount[i] <- fracYear(curveDate, obsDate[i], yieldCurveDCC) if (dayCount[i] == 0) { zeroRate[i] <- 0 } else { zeroRate[i] <- -log(discountFac[i]) / dayCount[i] } } # Forward Curve forwardCurve <- mat.or.vec(nr = numTenor + 1, nc = 1) for (i in 1:numTenor){ forwardCurve[i] <- (logLinear(obsDate[i], obsDate, discountFac, curveDate, yieldCurveDCC) / logLinear(obsDate[i + 1], obsDate, discountFac, curveDate, yieldCurveDCC) - 1) / fracYear(obsDate[i], obsDate[i + 1], yieldCurveDCC) } forwardCurve[numTenor + 1] <- forwardCurve[numTenor] return (data.frame(obsDate, discountFac, zeroRate, forwardCurve, dayCount)) }
/scratch/gouwar.j/cran-all/cranData/vamc/R/Step1_YieldCurveGeneration.R
# ------------------------------------------------------------------------------ # ----- Step2_ScenarioGeneration.R --------------------------------------------- # ------------------------------------------------------------------------------ #' Generate Index Scenerio #' #' @description Simulate a 3D array, numScen-by-numStep-by-numIndex, of Black-Scholes return #' factors for numIndex indices in each of numStep time steps and each of #' numScen scenarios. Covariances among indices are specified in covMatrix. #' Stepsize is given is dT and interpolated discount factors are given in vDF. #' Random seed is optional for reproducibility. #' #' @param covMatrix A numIndex-by-numIndex matrix of doubles of covariances #' among numIndex indices. #' @param numScen An integer of number of scenario (sample paths) to be #' simulated. #' @param numStep An integer of number of periods to be simulated. #' @param indexNames A vector of strings containing index names. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @param forwardCurve A vector of doubles of discount rates at each time step. #' @param seed An integer of the deterministic seed for random sampling. #' @return Outputs a 3D array (numScen-by-numStep-by-numIndex) of index #' scenarios #' @examples #' genIndexScen(mCov, 100, 360, indexNames, 1 / 12, cForwardCurve, 1) #' @export #' @importFrom stats rnorm genIndexScen <- function(covMatrix, numScen, numStep, indexNames, dT = 1 / 12, forwardCurve, seed){ if (!missing(seed)) { set.seed(seed) # fix random seed if it is given } if (numScen < 1 || numStep < 1) { stop("Please input a numScen and numStep greater than zero") } numIndex <- length(indexNames) # no. of indices # Cholesky decomposed covariance matrix among indices cd <- chol(covMatrix) stdFactor <- rowSums(cd ^ 2) / 2 # std.dev factor in BS growth factor dtSqrt <- sqrt(dT) # construct matrices for faster computations stdFactorMat <- matrix(rep(stdFactor, each = numStep), nrow = numStep) fcMat <- matrix(rep(forwardCurve, times = numIndex), ncol = numIndex) mmu <- (fcMat - stdFactorMat) * dT # 3D array of scenarios, numScen-by numStep-by-numIndex indexScen <- array(0, dim = c(numScen, numStep, numIndex)) for (i in 1:numScen) { # numIndex by numStep matrix of temporary standard normal rv's temp <- matrix(rnorm(numIndex * numStep), nrow = numStep, ncol = numIndex) # Black-Scholes return factors, vectorized implementation for speed tempMat <- exp(mmu + dtSqrt * (temp %*% cd)) indexScen[i, , ] <- tempMat } return (indexScen) } #----- random covariance matrix #' @importFrom stats runif dim <- 5 covMatrix <- matrix(runif(dim ^ 2), nrow = dim) covMatrix <- t(covMatrix) %*% covMatrix while (min(eigen(covMatrix)$values) < 1e-6) { covMatrix <- matrix(runif(dim ^ 2), nrow = dim) covMatrix <- t(covMatrix) %*% covMatrix } # ------------------------------------------------------------------------------ #' @importFrom stats runif rFundMap <- function(indexNames, numFund){ # Simulate a random fund map that maps the indices to numFund funds. # The first numIndex rows of the fund map is an identity matrix. # Inputs: # -- indexNames: vector of strings, names of indices # -- numFund: integer, number of funds # Outputs: # -- fundMap: (numIndex + numFund)-by-numIndex of doubles numIndex <- length(indexNames) # placeholder of fundMap fundMap <- mat.or.vec(nr = numFund, nc = numIndex) colnames(fundMap) <- indexNames # randomly assign the number of indices that a fund will be mapped with indexMap <- ceiling(runif(numFund, min = 1, max = numIndex)) # randomly pick indexMap from the pool of indices and assign random weights for (i in 1:numFund) { # select index select <- sample(indexNames, indexMap[i], replace = FALSE) # generate weight fundMap[i, select] <- sample(c(1:10), length(select), replace = TRUE) fundMap[i, ] <- fundMap[i, ] / sum(fundMap[i, ]) } # combine identity matrix to fund map return (fundMap = rbind(diag(numIndex), fundMap)) } # ------------------------------------------------------------------------------ #' #' Generate Fund Scenerio #' #' @description Calculate numScen-by-numStep-by-numFund fund scenarios based on given index #' scenarios indexScen and fund map fundMap that maps indices to funds. #' #' @param fundMap A numFund-by-numIndex matrix of doubles, #' mapping indices to funds. #' @param indexScen A numScen-by-numStep-by-numIndex array of doubles, #' index scenarios. #' @return Outputs a numScen-by-numStep-by-numFund array of doubles of #' fund scenarios. #' @examples #' genFundScen(fundMap, indexScen) #' @export genFundScen <- function(fundMap, indexScen){ # Calculate numScen-by-numIndex-by-numStep fund scenarios based on # given index scenarios indexScen and fund map fundMap that maps indices # to funds # extract dimensions from given input scenDim <- dim(indexScen) numFund <- nrow(fundMap) if (length(scenDim) == 3) { if (dim(fundMap)[2] != dim(indexScen)[3]) { stop("Funds from indexScen should align with funds from fundMap") } numScen <- scenDim[1] numStep <- scenDim[2] # placeholder for the fund scenarios, numScen-by-numStep-by-numFund fundScen <- array(0, dim = c(numScen, numStep, numFund)) for (j in 1:numScen) { # the j-th scenario for all indices scenJ <- indexScen[j, , ] # convert an index scen. to fund scen. fundScen[j, , ] <- t(fundMap %*% t(scenJ)) } } else if (length(scenDim) == 2) { if (dim(fundMap)[2] != dim(indexScen)[2]) { stop("Funds from indexScen should align with funds from fundMap") } # convert an index scen. to fund scen. fundScen <- t(fundMap %*% t(indexScen)) } return (fundScen) }
/scratch/gouwar.j/cran-all/cranData/vamc/R/Step2_ScenarioGeneration.R
# ------------------------------------------------------------------------------ # ----- Step3_PolicyGeneration.R ----------------------------------------------- # ------------------------------------------------------------------------------ #' Generate Portfolio at Inception #' #' @description Generate a portfolio of VA contracts at inception based on given attribute #' ranges and investment fund information. #' #' @param birthDayRng A vector of two strings in 'YYYY-MM-DD' of birthday range. #' @param issueRng A vector of two strings in 'YYYY-MM-DD' of issue date range. #' @param matRng A vector of two integers, range of policy maturity. #' @param acctValueRng A vector of two doubles, range of initial account values. #' @param femPct A double, percentage of female policyholders in the #' portfolio. #' @param fundFee A vector of doubles, fees charged by each fund in bps. #' @param baseFee A double, base fee for all funds in bps. #' @param prodPct A vector of non-negative doubles, proportions of rider types. #' @param prodType A vector of strings, names of different rider types. #' @param riderFee A vector of doubles, rider fees for different riders in bps. #' @param rollUpRate A vector of doubles, roll up rates for different rider #' types in bps. #' @param withdrawalRate A vector of doubles, withdrawal rates for different #' rider types in bps. #' @param numPolicy An integer, number of each type of policies to be generated. #' @return Outputs a data frame of 45 columns of attributes in an annuity #' contract. #' @examples #' genPortInception(c("1980-01-01", "1990-01-01"), c("2001-08-01", "2014-01-01"), #' c(15, 30), c(5e4, 5e5), 0.4, c(30, 50, 60, 80, 10, 38, 45, 55, 47, 46), #' 200, rep(1 / 4, 4), c("WBRP", "WBRU", "WBSU", "DBWB"), #' riderFee = c(25, 35, 35, 50), rep(5, 4), rep(5, 4), 100) #' \dontrun{ #' genPortInception() #' } #' @importFrom stats runif #' @export genPortInception <- function(birthDayRng = c("1950-01-01", "1980-01-01"), issueRng = c("2001-08-01", "2014-01-01"), matRng = c(15, 30), acctValueRng = c(5e4, 5e5), femPct = 0.4, fundFee = c(30, 50, 60, 80, 10, 38, 45, 55, 47, 46), baseFee = 200, prodPct = rep(1 / 19, 19), prodType = c("DBRP", "DBRU", "DBSU", "ABRP", "ABRU", "ABSU", "IBRP", "IBRU", "IBSU", "MBRP", "MBRU", "MBSU", "WBRP", "WBRU", "WBSU", "DBAB", "DBIB", "DBMB", "DBWB"), riderFee = c(25, 35, 35, 50, 60, 60, 60, 70, 70, 50, 60, 60, 65, 75, 75, 75, 85, 75, 90), rollUpRate = rep(5, 19), withdrawalRate = rep(5, 19), numPolicy = 10){ if (matRng[1] < 0 || matRng[1] > matRng[2]) { stop("Please input a positive and increasing maturity range") } if (acctValueRng[1] < 0 || acctValueRng[1] > acctValueRng[2]) { stop("Please input a positive and increasing account value range") } if (femPct < 0 || femPct > 1) { stop("Please input a female percentage between 0 and 1") } if (sum(prodPct) != 1 || (TRUE %in% (prodPct < 0))) { stop("Please input a vector of valid product percentages sum up to 1") } if ((length(prodType) == length(rollUpRate) && length(rollUpRate) == length(riderFee) && length(riderFee) == length(withdrawalRate)) == FALSE){ msg <- gsub("[\r\n]", "", "Please input prodType, rollUpRate, riderFee, and withdrawalRate in the same length") stop(msg) } if (numPolicy < 1){ stop("Please input numPolicy > 0") } numFund <- length(fundFee) numType <- length(prodType) portSize <- numPolicy * numType # recordID, survivorShip, gender, productType, baseFee recordID <- 1:portSize survivorShip <- rep(1, portSize) gender <- sample(c("F", "M"), portSize, replace = T, prob = c(femPct, 1 - femPct)) productType <- rep(prodType, each = numPolicy) # Random issue dates issueDate1 <- as.Date(issueRng[1]) issueDate2 <- as.Date(issueRng[2]) # sample random dates within range then set them to beginning of month issueDate <- sample(seq.Date(issueDate1, issueDate2, by = "day"), portSize) issueDate <- as.Date(cut(issueDate, "month")) # Random maturity dates matDate <- issueDate matYear <- sample(seq(matRng[1], matRng[2]), size = portSize, replace = TRUE) datePOS <- as.POSIXlt(matDate) datePOS$year <- datePOS$year + matYear matDate <- as.Date(datePOS) # Random birth dates birDate1 <- as.Date(birthDayRng[1]) birDate2 <- as.Date(birthDayRng[2]) # sample random dates within range then set them to beginning of month birthDate <- sample(seq.Date(birDate1, birDate2, by = "day"), portSize) birthDate <- as.Date(cut(birthDate, "month")) currentDate <- issueDate # at inception, current date is the issue date baseFee <- rep(baseFee, portSize) / 1e4 # constant M&E base fee # riderFee, rollUpRate, wbWithdrawalRate all depend on product type names(riderFee) <- prodType riderFee <- riderFee[productType] / 1e4 names(rollUpRate) <- prodType rollUpRate <- rollUpRate[productType] / 1e4 names(withdrawalRate) <- prodType wbWithdrawalRate <- withdrawalRate[productType] / 1e4 # Initial values at inception gbAmt <- rep(0, portSize) gmwbBalance <- rep(0, portSize) withdrawal <- rep(0, portSize) portfolio <- data.frame(recordID, survivorShip, gender, productType, issueDate, matDate, birthDate, currentDate, baseFee, riderFee, rollUpRate, gbAmt, gmwbBalance, wbWithdrawalRate, withdrawal) # Fund info calculation # -- fundNum, unclear what the usage is at the moment fundNum <- matrix(rep(1:numFund, each = portSize), nrow = portSize) colnames(fundNum) <- paste0("fundNum", sprintf("%d", 1:numFund)) # -- fundValue, at inception, equally allocate account value to # selected funds fundValue <- matrix(0, nrow = portSize, ncol = numFund) colnames(fundValue) <- paste0("fundValue", sprintf("%d", 1:numFund)) acctVal <- runif(portSize, min = acctValueRng[1], max = acctValueRng[2]) # no. of funds to be invested nSelected <- ceiling(runif(portSize, max = numFund)) for (i in 1:portSize) { selectedFunds <- sample(1:numFund, nSelected[i], replace = FALSE) fundValue[i, selectedFunds] <- acctVal[i] / nSelected[i] } # -- fundFee, same for all policies fundFee <- matrix(rep(fundFee, each = portSize), nrow = portSize) / 1e4 colnames(fundFee) <- paste0("fundFee", sprintf("%d", 1:numFund)) return(data.frame(cbind(portfolio, fundNum, fundValue, fundFee))) }
/scratch/gouwar.j/cran-all/cranData/vamc/R/Step3_PolicyGeneration.R
# ------------------------------------------------------------------------------ # ----- Step4_MonteCarloValuation.R -------------------------------------------- # ------------------------------------------------------------------------------ #' @importFrom utils read.csv readMortTable <- function(dirFileName, header = T){ # reads mortality table from .csv file specified by dirFileName # colname indicates whether column names are included in the file # By default, the first column is age x, the second and third columns are # mortality rates qx for male & female # Inputs: # -- dirFileName: string of directory and filename # -- colname: binary indicator for whether column name is given in the file # Output: # -- mortTable: a dataframe with three columns, x and qx for male & female # -- ALSO need to overwrite existing default mortTable in the workspace mortTable <- read.csv(file = dirFileName, header = header, sep = ",") colnames(mortTable) <- c("age", "F", "M") # mortTable is a dataframe with three columns, x and qx for male & female return(mortTable) } # ------------------------------------------------------------------------------ #' Calculate Mortality Factors #' #' @description Calculates the mortality factors (t - 1)px q(x + t - 1) and tpx required to #' valuate the inPolicy. Extract gender, age (birth date & current date), #' valuation date (current date), and maturity date from inPolicy, mortality #' rates from mortTable. #' #' @param inPolicy A vector containing 45 attributes of a VA policy, #' usually a row of a VA portfolio dataframe. #' @param mortTable A dataframe with three columns of doubles representing the #' mortality table. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @return Outputs a two-column data frame of doubles of mortFactors (t - 1)px #' q(x + t - 1) and tpx. #' @examples #' exPolicy <- VAPort[1, ] #' calcMortFactors(exPolicy, mortTable, dT = 1 / 12) #' @export calcMortFactors <- function(inPolicy, mortTable, dT = 1 / 12){ birDate <- as.POSIXlt(inPolicy[1, "birthDate"]) curDate <- as.POSIXlt(inPolicy[1, "currentDate"]) matDate <- as.POSIXlt(inPolicy[1, "matDate"]) if (curDate < birDate) stop("Current date is prior to birth date.") if (matDate < birDate) stop("Maturity date is prior to birth date.") if (matDate < curDate) stop("Maturity date is prior to current date.") # no. of months sine birth to current date, # all dates are first of the month monBirCur <- 12 * (curDate$year - birDate$year) + (curDate$mon - birDate$mon) # no. of months sine birth to maturity date, # all dates are first of the month monMatCur <- 12 * (matDate$year - birDate$year) + (matDate$mon - birDate$mon) ageCur <- monBirCur / 12 # age at current date x0 <- floor(ageCur) # integer age at current date t0 <- ageCur - x0 # fractional age at current date xT <- floor(monMatCur / 12) # integer age after numStep periods gender <- ifelse(inPolicy[1, "gender"] == "F", "female", "male") # annual mortality rates from current age to maturity age maxRng <- c(x0:mortTable$age[length(mortTable$age)]) ageRngLen <- xT - x0 + 1 annualQ <- mortTable[mortTable$age %in% maxRng, gender] # append some 1's if xMat > max age in mortality table annualQ <- c(annualQ, rep(1, as.numeric(ageRngLen > length(maxRng)) * (ageRngLen - length(annualQ)))) p <- rep(1) # placeholder for the p factors, default 1 for 0px pq <- rep(1) # placeholder for the pq factors # loop over for numStep with stepsize dT xCur <- x0 tCur <- t0 dP <- 1 step <- 1 while (dP > 0.00001) { baseQ <- annualQ[xCur - x0 + 1] # base qx in current loop # prob. of death within next dT at age xCur+tCur dtQxt <- (dT * baseQ) / (1 - tCur * baseQ) # prob. of surviving the current dT p[step + 1] <- p[step] * (1 - dtQxt) dP <- p[step + 1] pq[step] <- p[step] * dtQxt # prob. of death in the current dT # update age for next period # age at current date ageCur <- ageCur + dT # integer age at current date xCur <- floor(ageCur + 1e-10) # fractional age at current date tCur <- ifelse(abs(ageCur - xCur) < 1e-10, 0, ageCur - xCur) step <- step + 1 } return(mortFactors = data.frame(pq = pq, p = p[-1])) } # ------------------------------------------------------------------------------ # Remarks: # 1. projectDBRP is 1 of the 19 cash flow projection functions # 2. Each cash flow projection function follows calculations in # Appendix A of the paper # 3. User-defined VA policies should have an associated projectXXXX function # Project a DBRP policy whose attributes are specified in inPolicy based on # one fund scenario oneFundScen and steplength dT. projectDBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary: annual roll-up inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] * (1 + inPolicy[1, "rollUpRate"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectABRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # renewal if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step] else { LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))]) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * inPolicy[1, "gbAmt"] / dAV[step] }else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, "gbAmt"] / numFund } } } newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) - as.POSIXlt(inPolicy[1, "issueDate"])) inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"] inPolicy[1, "matDate"] <- as.Date(newDate) # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectABRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary: annual roll-up inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] * (1 + inPolicy[1, "rollUpRate"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # renewal if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step] else { LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))]) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * inPolicy[1, "gbAmt"] / dAV[step] }else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, "gbAmt"] / numFund } } } newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) - as.POSIXlt(inPolicy[1, "issueDate"])) inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"] inPolicy[1, "matDate"] <- as.Date(newDate) # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectABSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # renewal if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]){ if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step] else { LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))]) if (dAV[step] > 0.00001){ inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * inPolicy[1, "gbAmt"] / dAV[step] }else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, "gbAmt"] / numFund } } } newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) - as.POSIXlt(inPolicy[1, "issueDate"])) inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"] inPolicy[1, "matDate"] <- as.Date(newDate) # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectIBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # calculate guarateed price of an annuity with payments of $1 per annum # with r = 5% ag <- 0 dP <- 1 nY <- 0 r <- 0.05 while (dP > 0.00001) { # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 ag <- ag + dP * exp(-r * nY) nY <- nY + 1 } # calculate market price of an annuity with payments of $1 per annum # beginning at time T aT <- 0 dP <- 1 nY <- 0 while (dP > 0.00001) { dFR <- 0 if (nY * 12 < numStep) dFR <- df[nY * 12 + 1] else dFR <- df[numStep - 1] # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 aT <- aT + dP * exp(-dFR * nY) nY <- nY + 1 } # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectIBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # calculate guarateed price of an annuity with payments of $1 per annum # with r = 5% ag <- 0 dP <- 1 nY <- 0 r <- 0.05 while (dP > 0.00001) { # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 ag <- ag + dP * exp(-r * nY) nY <- nY + 1 } # calculate market price of an annuity with payments of $1 per annum # beginning at time T aT <- 0 dP <- 1 nY <- 0 while (dP > 0.00001){ dFR <- 0 if (nY * 12 < numStep) dFR <- df[nY * 12 + 1] else dFR <- df[numStep - 1] # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 aT <- aT + dP * exp(-dFR * nY) nY <- nY + 1 } # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary: annual roll-up inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] * (1 + inPolicy[1, "rollUpRate"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectIBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # calculate guarateed price of an annuity with payments of $1 per annum # with r = 5% ag <- 0 dP <- 1 nY <- 0 r <- 0.05 while (dP > 0.00001) { # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 ag <- ag + dP * exp(-r * nY) nY <- nY + 1 } # calculate market price of an annuity with payments of $1 per annum # beginning at time T aT <- 0 dP <- 1 nY <- 0 while (dP > 0.00001) { dFR <- 0 if (nY * 12 < numStep) dFR <- df[nY * 12 + 1] else dFR <- df[numStep - 1] # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 aT <- aT + dP * exp(-dFR * nY) nY <- nY + 1 } # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectMBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectMBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary: annual roll-up inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] * (1 + inPolicy[1, "rollUpRate"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectMBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]){ LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectWBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } numFund <- dim(oneFundScen)[2] # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV #update annual withdrawal dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"] dWA <- min(dWAG, inPolicy[1, "gmwbBalance"]) inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0 dAV[step] <- max(0, dAV[step] - dWA) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * (dAV[step] / (dAV[step] + dWA)) } else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund) } } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- max(0, dWA - dAV[step]) RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectWBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } numFund <- dim(oneFundScen)[2] # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary: annual roll-up inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] * (1 + inPolicy[1, "rollUpRate"]) #update annual withdrawal dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"] dWA <- min(dWAG, inPolicy[1, "gmwbBalance"]) inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0 dAV[step] <- max(0, dAV[step] - dWA) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * (dAV[step] / (dAV[step] + dWA)) } else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund) } } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- max(0, dWA - dAV[step]) RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectWBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } numFund <- dim(oneFundScen)[2] # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) #update annual withdrawal dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"] dWA <- min(dWAG, inPolicy[1, "gmwbBalance"]) inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0 dAV[step] <- max(0, dAV[step] - dWA) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * (dAV[step] / (dAV[step] + dWA)) } else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund) } } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- 0 LA[step] <- max(0, dWA - dAV[step]) RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBAB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] # renewal if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step] else { LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))]) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * inPolicy[1, "gbAmt"] / dAV[step] } else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, "gbAmt"] / numFund } } } newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) - as.POSIXlt(inPolicy[1, "issueDate"])) inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"] inPolicy[1, "matDate"] <- as.Date(newDate) # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBIB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # calculate guarateed price of an annuity with payments of $1 per annum # with r = 5% ag <- 0 dP <- 1 nY <- 0 r <- 0.05 while (dP > 0.00001) { # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 ag <- ag + dP * exp(-r * nY) nY <- nY + 1 } # calculate market price of an annuity with payments of $1 per annum # beginning at time T aT <- 0 dP <- 1 nY <- 0 while (dP > 0.00001) { dFR <- 0 if (nY * 12 < numStep) dFR <- df[nY * 12 + 1] else dFR <- df[numStep - 1] # fetch p at j+1th step if (nY * 12 < length(p)) dP <- p[nY * 12 + 1] else dP <- 0 aT <- aT + dP * exp(-dFR * nY) nY <- nY + 1 } # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon){ raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else{ dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBMB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- 0 RC[step] <- dFee[step] # at maturity date if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) { LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0) } # update currentDate newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ projectDBWB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){ # projection will always start from the current date for numStep periods numStep <- dim(oneFundScen)[1] if (((length(pq) >= numStep) && (length(p) >= numStep) && (length(df) >= numStep)) == FALSE) { stop("df, pq, and p must have length > numStep from oneFundScen") } numFund <- dim(oneFundScen)[2] # Initialize DA, LA, RC DA <- rep(0, numStep) LA <- rep(0, numStep) RC <- rep(0, numStep) # These two variables are defined within the function only dAV <- rep(0, numStep) dFee <- rep(0, numStep) # The main for loop to do the calculations, hard to parallelize for (step in 1:numStep) { # one suggestion for partial account evolution to avoid fund_helper dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * oneFundScen[step, ] * (1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT) # if (as.POSIXlt(inPolicy[1, "currentDate"])$mon == as.POSIXlt(inPolicy[1, "issueDate"])$mon) { raw <- sum(dPartialAV) dFee[step] <- sum(raw * inPolicy[1, "riderFee"]) #update individual fund value dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"] dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV # anniversary inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"]) #update annual withdrawal dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"] dWA <- min(dWAG, inPolicy[1, "gmwbBalance"]) inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0 dAV[step] <- max(0, dAV[step] - dWA) if (dAV[step] > 0.00001) { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- inPolicy[1, grep("fundValue", colnames(inPolicy))] * (dAV[step] / (dAV[step] + dWA)) } else { inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund) } } else { dAV[step] <- sum(dPartialAV) inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV } # update policy info DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step]) LA[step] <- max(0, dWA - dAV[step]) RC[step] <- dFee[step] newDate <- as.POSIXlt(inPolicy[1, "currentDate"]) newDate$mon <- newDate$mon + 1 inPolicy[1, "currentDate"] <- as.Date(newDate) } # return death benefit, living benefit, and risk charge # outPolicy is used in aging, but not in valuation pq <- pq[1:numStep] * df[1:numStep] p <- p[1:numStep] * df[1:numStep] return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC, outPolicy = inPolicy)) } # ------------------------------------------------------------------------------ #' Valuate One Policy #' #' @description Valuate a VA policy specified in inPolicy based on the simulated fund #' scenarios fundScen. The time step length is specified in dT and the #' discount rate for each period is specified in df. #' @param inPolicy A vector containing 45 attributes of a VA policy, #' usually a row of a VA portfolio dataframe. #' @param mortTable A dataframe with three columns of doubles representing the #' mortality table. #' @param fundScen A numScen-by-numStep-by-numFund array of doubles of #' return factors (i.e., exp(mu_t dt)) in each period. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @param df A vector of doubles of risk-free discount rates of different tenor #' (not forward rates), should have length being numStep. #' @return Outputs a list of doubles of policyValue, the average discounted #' payoff of the VA, and riskCharge, the average discounted risk charges. #' @examples #' fundScen <- genFundScen(fundMap, indexScen)[1, , ] #' exPolicy <- VAPort[1, ] #' valuateOnePolicy(exPolicy, mortTable, fundScen, 1 / 12, cForwardCurve) #' @export valuateOnePolicy <- function(inPolicy, mortTable, fundScen, dT = 1 / 12, df){ if (inPolicy[1, "matDate"] <= inPolicy[1, "currentDate"]) { return(policyValue = 0) } # If the policy is inforce, select the risk project function to calculate # the death benefits, living benefits, and risk charges for each period Type <- inPolicy[1, "productType"] # projectFun is projectXXXX where XXXX is the product type of the policy projectFun <- get(paste0("project", Type)) # In valuation, project from current date to maturity date startDate <- as.POSIXlt(inPolicy[1, "currentDate"]) endDate <- as.POSIXlt(inPolicy[1, "matDate"]) numStep <- 12 * (endDate$year - startDate$year) + (endDate$mon - startDate$mon) scenDim <- dim(fundScen) # Check if only one scenario is provided if (length(scenDim) == 2){ numScen <- 1 } else { numScen <- scenDim[1] } # Calculate actuarial discount factors mortFactors <- calcMortFactors(inPolicy, mortTable, dT) pq <- matrix(mortFactors[, "pq"], nrow = 1) # enforced row vector p <- matrix(mortFactors[, "p"], nrow = 1) # enforced row vector # Placeholders for results DA <- c() LA <- c() RC <- c() # this for loop is parallelizable for (scen in 1:numScen) { if (numScen == 1) { curScen <- matrix(fundScen[1:numStep, ], nrow = numStep) VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df) DA[scen] <- VABenefits$DA LA[scen] <- VABenefits$LA RC[scen] <- VABenefits$RC } else { # curScen is numStep-by-numFund curScen <- matrix(fundScen[scen, 1:numStep, ], nrow = numStep) VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df) DA[scen] <- VABenefits$DA LA[scen] <- VABenefits$LA RC[scen] <- VABenefits$RC } } # Calculate discounted payoffs for all scenarios (1-by-numScen vectors) DAs <- sum(DA) LAs <- sum(LA) RCs <- sum(RC) return(list(policyValue = (DAs + LAs) / numScen, riskCharge = RCs / numScen)) } # ------------------------------------------------------------------------------ #' Age One Policy #' #' @description Age a VA policy specified in inPolicy from currentDate (specified in #' inPolicy) to targetDate. The againg scenario is given in fundScen. #' The time step length is specified in dT. #' Here we input a rather irrelevant parameter df to "hack" for a more #' flexible user-defined projection function. #' @param inPolicy A vector containing 45 attributes of a VA policy, #' usually a row of a VA portfolio dataframe. #' @param mortTable A dataframe with three columns of doubles representing the #' mortality table. #' @param fundScen A numScen-by-numStep-by-numFund array of doubles of #' return factors (i.e., exp(mu_t dt)) in each period. #' @param scenDates A vector containing strings in the format of "YYYY-MM-DD" #' of dates corresponding to each period in fundScen. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @param targetDate A string in the format of "YYYY-MM-DD" of valuation date #' of the portfolio. #' @param df A vector of doubles of risk-free discount rates of different tenor #' (not forward rates), should have length being numStep. #' @return Outputs a vector containing 45 attributes of a VA policy, where #' currentDate, gbAmt, GMWBbalance, withdrawal, & fundValue could be updated #' as a result of aging. Usually a row of a VA portfolio dataframe. #' @examples #' exPolicy <- VAPort[1, ] #' targetDate <- "2016-01-01" #' histFundScen <- genFundScen(fundMap, histIdxScen) #' ageOnePolicy(exPolicy, mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' \dontrun{ #' targetDate <- "2001-01-01" #' histFundScen <- genFundScen(fundMap, histIdxScen) #' ageOnePolicy(exPolicy, mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' } #' \dontrun{ #' exPolicy <- VAPort[1, ] #' exPolicy[1, c("currentDate", "issueDate")] <- c("2001-01-01", "2001-01-01") #' histFundScen <- genFundScen(fundMap, histIdxScen) #' ageOnePolicy(exPolicy, mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' } #' @section Note: #' Target date MUST be PRIOR to the last date of historical scenario date, #' Current date MUST be LATER than the first date of historical scenario date. #' @export ageOnePolicy <- function(inPolicy, mortTable, fundScen, scenDates, dT = 1 / 12, targetDate, df){ scenStartDate <- as.POSIXlt(scenDates[1]) targetDate <- as.Date(targetDate) if (targetDate <= inPolicy[1, "currentDate"]) { warning("No aging required. Original policy returned.") return(inPolicy) } if (targetDate > max(scenDates)) { msg <- "No aging performed. Target date beyond the last date of historical scenario date 2016-02-01" msg <- gsub("[\r\n]", " ", msg) stop(msg) } if (min(scenDates) > inPolicy[1, "currentDate"]) { msg <- "No aging performed. Current date before the first date of historical scenario date 2001-08-01" msg <- gsub("[\r\n]", " ", msg) stop(msg) } rollEndDate <- as.POSIXlt(targetDate) rollEndDate$mon <- rollEndDate$mon - 1 # If the policy is inforce, select the risk project function to calculate # the death benefits, living benefits, and risk charges for each period type <- inPolicy[1, "productType"] # projectFun is projectXXXX where XXXX is the product type of the policy projectFun <- get(paste0("project", type)) # In valuation, project from current date to maturity date startDate <- as.POSIXlt(inPolicy[1, "currentDate"]) endDate <- as.POSIXlt(inPolicy[1, "matDate"]) numStep <- 12 * (endDate$year - startDate$year) + (endDate$mon - startDate$mon) agingStartIndx <- 12 * (startDate$year - scenStartDate$year) + (startDate$mon - scenStartDate$mon) agingEndIndx <- 12 * (rollEndDate$year - scenStartDate$year) + (rollEndDate$mon - scenStartDate$mon) rngScenIndx <- c(agingStartIndx:agingEndIndx) curScen <- fundScen[rngScenIndx, ] # Calculate actuarial discount factors mortFactors <- calcMortFactors(inPolicy, mortTable, dT) actFactors <- mortFactors * df[1:numStep] pq <- matrix(actFactors[, "pq"], nrow = 1) # enforced row vector p <- matrix(actFactors[, "p"], nrow = 1) # enforced row vector VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df) # Update different fields in the output policy outPolicy <- VABenefits$outPolicy return(outPolicy) } # ------------------------------------------------------------------------------ #' Valuate a Portfolio #' #' @description Valuate a portfolio VA policies specified in each curPolicy of inPortfolio #' based on the simulated fund scenarios fundScen. #' The time step length is specified in dT and the discount rate for each period #' is specified in df. #' @param inPortfolio A dataframe containing numPolicy rows and 45 attributes #' of each VA policy. #' @param mortTable A dataframe with three columns of doubles representing the #' mortality table. #' @param fundScen A numScen-by-numStep-by-numFund array of doubles of #' return factors (i.e., exp(mu_t dt)) in each period. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @param df A vector of doubles of risk-free discount rates of different tenor #' (not forward rates), should have length being numStep. #' @return Outputs a list of doubles of portVal, the sum of average discounted #' payoff of the VAs in inPortfolio, portRC, the sum of average discounted #' risk charges of the VAs in inPortfolio, and vectors of doubles of these #' average discounted values for each policy. #' @examples #' fundScen <- genFundScen(fundMap, indexScen)[1, , ] #' valuatePortfolio(VAPort[1:2, ], mortTable, fundScen, 1 / 12, cForwardCurve) #' @export valuatePortfolio <- function(inPortfolio, mortTable, fundScen, dT = 1 / 12, df){ numPolicy <- nrow(inPortfolio) vecVal <- rep(0, numPolicy) vecRC <- rep(0, numPolicy) for (i in 1:numPolicy){ curPolicy <- inPortfolio[i, ] outTemp <- valuateOnePolicy(curPolicy, mortTable, fundScen, dT, df) vecVal[i] <- outTemp$policyValue vecRC[i] <- outTemp$riskCharge } return (list(portVal = sum(vecVal), portRC = sum(vecRC), vecVal = vecVal, vecRC = vecRC)) } # ------------------------------------------------------------------------------ #' Age a Portfolio #' #' @description Age a portfolio of VA policies specified in each inPolicy of inPortfolio from #' currentDate (specified in inPolicy) to targetDate. The againg scenario is #' given in fundScen. The time step length is specified in dT. #' Here we input a rather irrelevant parameter df to "hack" for a more flexible #' user-defined projection function. #' @param inPortfolio A dataframe containing numPolicy rows and 45 attributes #' of each VA policy. #' @param mortTable A dataframe with three columns of doubles representing the #' mortality table. #' @param fundScen A numScen-by-numStep-by-numFund array of doubles of #' return factors (i.e., exp(mu_t dt)) in each period. #' @param scenDates A vector containing strings in the format of "YYYY-MM-DD" #' of dates corresponding to each period in fundScen. #' @param dT A double of stepsize in years; dT = 1 / 12 would be monthly. #' @param targetDate A string in the format of "YYYY-MM-DD" of valuation date of #' the portfolio. #' @param df A vector of doubles of risk-free discount rates of different tenor #' (not forward rates), should have length being numStep. #' @return Outputs a dataframe containing numPolicy rows and 45 attributes of #' each VA policy, where currentDate, gbAmt, GMWBbalance, withdrawal, #' & fundValue of each policy could be updated as a result of aging. #' @examples #' targetDate <- "2016-01-01" #' histFundScen <- genFundScen(fundMap, histIdxScen) #' agePortfolio(VAPort[1:2, ], mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' \dontrun{ #' targetDate <- "2001-01-01" #' histFundScen <- genFundScen(fundMap, histIdxScen) #' agePortfolio(VAPort, mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' } #' \dontrun{ #' VAPort[1, c("currentDate", "issueDate")] <- c("2001-01-01", "2001-01-01") #' histFundScen <- genFundScen(fundMap, histIdxScen) #' agePortfolio(VAPort, mortTable, histFundScen, histDates, dT = 1 / 12, #' targetDate, cForwardCurve) #' } #' @section Note: #' Target date MUST be PRIOR to the last date of historical scenario date, #' Current date MUST be LATER than the first date of historical scenario date. #' @export agePortfolio <- function(inPortfolio, mortTable, fundScen, scenDates, dT = 1 / 12, targetDate, df){ numPolicy <- nrow(inPortfolio) for (i in 1:numPolicy){ inPolicy <- inPortfolio[i, ] inPortfolio[i, ] <- ageOnePolicy(inPolicy, mortTable, fundScen, scenDates, dT = 1 / 12, targetDate, df) } return (outPortfolio = inPortfolio) }
/scratch/gouwar.j/cran-all/cranData/vamc/R/Step4_MonteCarloValuation.R
# ------------------------------------------------------------------------------ #' Constant Forward Curve #' #' A dataset containing 2 percent continuously compounded annual interest rate #' for illustration purposes. #' #' @format A vector with 360 elements: #' \describe{ #' \item{rate}{discount rate} #' ... #' } "cForwardCurve" # ------------------------------------------------------------------------------ #' Fund Map for 10 Funds #' #' A dataset containing a default mapping from five indices to ten different #' funds. #' #' @format A matrix with 10 rows and 5 columns: #' \describe{ #' \item{index name}{name for each index} #' \item{fund number}{proportion of fund allocated to a particular index} #' ... #' } "fundMap" # ------------------------------------------------------------------------------ #' Covariance Matrix for 5 Indices #' #' A dataset containing the covariance matrix among the returns of five indices. #' #' @format A matrix with 5 rows and 5 columns: #' \describe{ #' \item{index number}{number for each index} #' ... #' } "mCov" # ------------------------------------------------------------------------------ #' 5 Indices for 10 Scenarios over 360 Months #' #' A dataset containing a 3D array, number of scenarios (10) by #' number of indices (5) by number of time steps (360), of Black-Scholes return #' factors for each index in each of time step and each of scenario. #' #' @format A 3D array with dimensions 10x360x5: #' \describe{ #' \item{scenario}{scenario number} #' \item{month}{month since valuation date} #' \item{index number}{monthly return for a particular index in one scenario #' one month} #' ... #' } "indexScen" # ------------------------------------------------------------------------------ #' Historical Scenario Dates #' #' A dataset containing the dates at which historical returns for different #' indices were observed. #' #' @format A vector with 175 elements: #' \describe{ #' \item{date}{each observation date of the historical scenarios} #' ... #' } "histDates" # ------------------------------------------------------------------------------ #' Historical Index Scenario for 5 Indices over 175 Months #' #' A dataset containing a matrix, number of indices (5) by #' number of time steps (175), of observed historical returns #' for each index in each of time step in the past. #' #' @format A data frame with dimensions 175 rows and 10 columns: #' \describe{ #' \item{FIXED}{historical return for index "FIXED" in one month} #' \item{INT}{historical return for index "INT" in one month} #' \item{MONEY}{historical return for index "MONEY" in one month} #' \item{SMALL}{historical return for index "SMALL" in one month} #' \item{US}{historical return for index "US" in one month} #' ... #' } #' @source \url{http://www.math.uconn.edu/~gan/software.html} #' @section Remark: #' These historical index scenarios were assessed on 2008-09-12 "histIdxScen" # ------------------------------------------------------------------------------ #' Index Names #' #' A dataset containing names for each index. #' #' @format A vector with 5 elements: #' \describe{ #' \item{name}{name of the index} #' ... #' } "indexNames" # ------------------------------------------------------------------------------ #' Mortality Rate for Male and Female from Ages 5 to 115 #' #' A dataset containing the mortality rates for male and female from ages 5 #' to 115 (table IAM 1996 from the Society of Actuaries). #' #' @format A data frame with 110 rows and 3 columns: #' \describe{ #' \item{age}{individual's age} #' \item{male}{mortality of a male at a particular age ranging from 5 to 115} #' \item{female}{mortality of a female at a particular age ranging #' from 5 to 115} #' ... #' } #' @source \url{https://mort.soa.org} "mortTable" # ------------------------------------------------------------------------------ #' Swap Rates across 30 Years #' #' A dataset containing US swap rates for various maturities. #' #' @format A vector with 8 elements: #' \describe{ #' \item{rate}{swap rate} #' ... #' } #' @source \url{http://www.federalreserve.gov} #' @section Remark: #' These swap rates were assessed on 2016-02-08 "swapRate" # ------------------------------------------------------------------------------ #' A Randomly Generated Pool of Variable Annuities #' #' A dataset containing information of the policy and the policy holder. #' #' @format A data frame with 19 row and 45 columns: #' \describe{ #' \item{recordID}{Unique identifier of the policy} #' \item{survivorShip}{Positive weighting number} #' \item{gender}{Gender of the policyholder} #' \item{productType}{Product type} #' \item{issueDate}{Issue date} #' \item{matDate}{Maturity date} #' \item{birthDate}{Birth date of the policyholder} #' \item{currentDate}{Current date} #' \item{baseFee}{M&E (Mortality & Expense) fee} #' \item{riderFee}{Rider fee} #' \item{rollUpRate}{Roll-up rate} #' \item{gbAmt}{Guaranteed benefit} #' \item{gmwbBalance}{GMWB balance} #' \item{wbWithdrawalRate}{Guaranteed withdrawal rate} #' \item{withdrawal}{Withdrawal so far} #' \item{fundNum1}{Fund number of the 1st investment fund} #' \item{fundNum2}{Fund number of the 2nd investment fund} #' \item{fundNum3}{Fund number of the 3rd investment fund} #' \item{fundNum4}{Fund number of the 4th investment fund} #' \item{fundNum5}{Fund number of the 5th investment fund} #' \item{fundNum6}{Fund number of the 6th investment fund} #' \item{fundNum7}{Fund number of the 7th investment fund} #' \item{fundNum8}{Fund number of the 8th investment fund} #' \item{fundNum9}{Fund number of the 9th investment fund} #' \item{fundNum10}{Fund number of the 10th investment fund} #' \item{fundValue1}{Fund value of the 1st investment fund} #' \item{fundValue2}{Fund value of the 2nd investment fund} #' \item{fundValue3}{Fund value of the 3rd investment fund} #' \item{fundValue4}{Fund value of the 4th investment fund} #' \item{fundValue5}{Fund value of the 5th investment fund} #' \item{fundValue6}{Fund value of the 6th investment fund} #' \item{fundValue7}{Fund value of the 7th investment fund} #' \item{fundValue8}{Fund value of the 8th investment fund} #' \item{fundValue9}{Fund value of the 9th investment fund} #' \item{fundValue10}{Fund value of the 10th investment fund} #' \item{fundFee1}{Fund management fee of the 1st investment fund} #' \item{fundFee2}{Fund management fee of the 2nd investment fund} #' \item{fundFee3}{Fund management fee of the 3rd investment fund} #' \item{fundFee4}{Fund management fee of the 4th investment fund} #' \item{fundFee5}{Fund management fee of the 5th investment fund} #' \item{fundFee6}{Fund management fee of the 6th investment fund} #' \item{fundFee7}{Fund management fee of the 7th investment fund} #' \item{fundFee8}{Fund management fee of the 8th investment fund} #' \item{fundFee9}{Fund management fee of the 9th investment fund} #' \item{fundFee10}{Fund management fee of the 10th investment fund} #' ... #' } "VAPort"
/scratch/gouwar.j/cran-all/cranData/vamc/R/data.R
#' vamc: A package for pricing a pool of variable annuities. #' #' The vamc package provides a Monte Carlo engine for valuating a pool of #' variable annuities. The key steps are: #' YieldCurveGeneration, ScenarioGeneration, PolicyGenerationl, #' and MonteCarloValuation. #' #' @section YieldCurveGeneration functions: #' YieldCurveGeneration generates a forward curve from swap rates. #' The forward curve is obtained by solving for swap rates that equates values of #' floating and fixed notes. #' @section ScenarioGeneration functions: #' ScenarioGeneration generates a random fund scenario under Black-Scholes. #' After simulating random index scenarios, a fundMap is used to allocate returns #' of indices to each fund according to proportion of investment. #' @section PolicyGenerationl functions: #' PolicyGenerationl randomly generates a pool of variable annuities for #' user-input birthday range, issue-date range, maturity range, account value range, #' female percentage, fund management fee, fund base fee, product types, #' rider fee of each type, roll-up-rate for roll-up featured guarantees, #' withdrawal rate for GMWB, and number of policies to be generated for each type. #' @section MonteCarloValuation functions: #' MonteCarloValuation discounts cash flow from living and death benefits, as #' well as risk charges for each policy in the portfolio. #' #' @docType package #' @name vamc #' @importFrom Rdpack reprompt #' @references #' \insertRef{gan2017vabd}{vamc} NULL
/scratch/gouwar.j/cran-all/cranData/vamc/R/vamc.R
## ---- echo = FALSE------------------------------------------------------------ knitr::opts_chunk$set(collapse = TRUE, comment = "#>") library(vamc) ## ----------------------------------------------------------------------------- # Initialize required inputs to boostrap a curve swap <- c(0.69, 0.77, 0.88, 1.01, 1.14, 1.38, 1.66, 2.15)*0.01 tenor <- c(1, 2, 3, 4, 5, 7, 10, 30) fixFreq <- 6 fixDCC <- "Thirty360" fltFreq <- 6 fltDCC <- "ACT360" calendar <- "General" bdc <- "Modified_Foll" curveDate <- "2016-02-08" numSetDay <- 2 yieldCurveDCC <- "Thirty360" holidays <- NULL # Bootstrap a forward curve buildCurve(swap, tenor, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, holidays) ## ---- echo=FALSE, results='asis'---------------------------------------------- # Default randomly generated covariance matrix knitr::kable(mCov, col.names = indexNames) ## ---- echo=FALSE, results='asis'---------------------------------------------- # Default index names knitr::kable(indexNames, col.names = c("Index Names")) ## ----------------------------------------------------------------------------- # We will show the index simulated path for five months of the first scenario indexScen <- genIndexScen(mCov, 100, 360, indexNames, 1 / 12, cForwardCurve, 1) indexScen[1, 1:5, ] ## ----------------------------------------------------------------------------- # Again, we show the fund simulated path for five months of the first scenario fundScen <- genFundScen(fundMap, indexScen) fundScen[1, 1:5, ] ## ----------------------------------------------------------------------------- # For illustration purposes, we will only simulate one guarantee contract for each of # the 19 guarantee types. Please note that due to randomness the generated portfolio # under this code block may not align with the default VAPort under lazy data. if(capabilities("long.double")) { VAport <- genPortInception(issueRng = c("2001-08-01", "2014-01-01"), numPolicy = 1) } ## ----------------------------------------------------------------------------- # In this vignette, we will arbitrarily use the first two scenarios from fundSen to # valuate a portfolio of two guarantees to speed up the execution of the example. # The input cForwardCurve is a vector of 0.02 with dimension 360. valuatePortfolio(VAPort[1:5, ], mortTable, fundScen[1, , ], 1 / 12, cForwardCurve) ## ----------------------------------------------------------------------------- # Again, we will arbitrarily age a portfolio of two guarantees to speed up the execution. targetDate <- "2016-01-01" # Here we generate historical fund scenarios using default index data stored under "histIdxScen" histFundScen <- genFundScen(fundMap, histIdxScen) # Perform aging agePortfolio(VAPort[1:2, ], mortTable, histFundScen, histDates, dT = 1 / 12, targetDate, cForwardCurve) ## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- exPolicy <- VAPort[1, ] valuateOnePolicy(exPolicy, mortTable, fundScen[1:2, , ], 1 / 12, cForwardCurve) ## ----------------------------------------------------------------------------- # Similarly, users can age this single policy before pricing it. We use the same # target date and historical fund scenario as generated before exPolicy <- VAPort[1, ] ageOnePolicy(exPolicy, mortTable, histFundScen, histDates, dT = 1 / 12, targetDate, cForwardCurve)
/scratch/gouwar.j/cran-all/cranData/vamc/inst/doc/my-vignette.R
--- title: "A Comprehensive Monte Carlo Valuation of Variable Annuities" author: "Hengxin Li, Mingbin Feng, Mingyi Jiang" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{A Comprehensive Monte Carlo Valuation of Variable Annuities} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # Package Info This package uses Monte Carlo simulation to estimate the fair market value of a large portfolio of synthetic variable annuities. The portfolio of variable annuities under consideration is generated based on realistic features of common types of guarantee riders in North America. The Monte Carlo simulation engine generates sample paths of asset prices based on Black-Scholes model. In this vignette, we will demonstrate the functionalities provided in this package. For illustrative purposes, we will use few scenarios to valuate a pool of two variable annuities. Users may obtain a more robust valuation result by increasing the amount of risk-neutral scenarios. ## Yield Curve Generation In this step, we exploit Secant method to calculate discount factors and forward rates at different tenor based on given swap rates using `buildCurve()`. ```{r, echo = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "#>") library(vamc) ``` ```{r} # Initialize required inputs to boostrap a curve swap <- c(0.69, 0.77, 0.88, 1.01, 1.14, 1.38, 1.66, 2.15)*0.01 tenor <- c(1, 2, 3, 4, 5, 7, 10, 30) fixFreq <- 6 fixDCC <- "Thirty360" fltFreq <- 6 fltDCC <- "ACT360" calendar <- "General" bdc <- "Modified_Foll" curveDate <- "2016-02-08" numSetDay <- 2 yieldCurveDCC <- "Thirty360" holidays <- NULL # Bootstrap a forward curve buildCurve(swap, tenor, fixFreq, fixDCC, fltFreq, fltDCC, calendar, bdc, curveDate, numSetDay, yieldCurveDCC, holidays) ``` ## Generate index scenarios and fund scenarios In the following example, we first simulate the index movements using `genIndexScen()`. Three of the inputs to `genIndexScen()` are stored as default data under variable names "mCov", "indexNames", and "cForwardCurve" respectively. For illustration purposes, we will simulate 100 scenarios for 360 steps with a step length dT = 1/12 and seed = 1. The underlying model utilizes the __multivariate Black-Scholes model__. All the simulated index movements are stored in a __3D-array__ with dimensions [number of Scenarios, number of Steps, number of Indices] ### Default covariance matrix ```{r, echo=FALSE, results='asis'} # Default randomly generated covariance matrix knitr::kable(mCov, col.names = indexNames) ``` ### Default index names ```{r, echo=FALSE, results='asis'} # Default index names knitr::kable(indexNames, col.names = c("Index Names")) ``` ### Risk-neutral path simulation for 5 indices ```{r} # We will show the index simulated path for five months of the first scenario indexScen <- genIndexScen(mCov, 100, 360, indexNames, 1 / 12, cForwardCurve, 1) indexScen[1, 1:5, ] ``` Then we use `genFundScen()` to map the index movements to funds according to different allocations of capital using a fund map (stored as default data under variable "fundMap"). The fund movements are also stored in a 3D-array with dimension [number of Scenarios, number of Steps, number of Funds] ### Risk-neutral path simulation for 10 funds ```{r} # Again, we show the fund simulated path for five months of the first scenario fundScen <- genFundScen(fundMap, indexScen) fundScen[1, 1:5, ] ``` ## Generate a synthetic portfolio of variable annuities Perhaps the most value-added step in this package is the generation of synthetic portfolio of variable annuities that has realistic charateristic features. Using the fuction `genPortInception()`, users can generate a synthetic variable annuity portfolio of desirable size. The function `genPortInception()` has certain predetermined default values based on the research in the package reference. We recommend users to change these default values, such as maturity and issue range, to match their portfolio characteristics. In the current version, there are a few constraints for the portfolio being generated: The issue range must be later than the first date of historical scenario; The maturity range should also be set after the valuation date to be meaningful. ```{r} # For illustration purposes, we will only simulate one guarantee contract for each of # the 19 guarantee types. Please note that due to randomness the generated portfolio # under this code block may not align with the default VAPort under lazy data. if(capabilities("long.double")) { VAport <- genPortInception(issueRng = c("2001-08-01", "2014-01-01"), numPolicy = 1) } ``` ## Monte Carlo Valuation After generating the above required elements for Monte Carlo valuation, we can now proceed to calculate the fair market price of the portfolio by calling the function `valuatePortfolio()`. Under the current version of the package, all the annuity contracts in the portfolio are assumed to be valuated on the same date, i.e. the first date of our simulated fund scenario. Users can either use the default mortality table by calling "mortTable", or input a mortality table to project liability cash flows. ```{r} # In this vignette, we will arbitrarily use the first two scenarios from fundSen to # valuate a portfolio of two guarantees to speed up the execution of the example. # The input cForwardCurve is a vector of 0.02 with dimension 360. valuatePortfolio(VAPort[1:5, ], mortTable, fundScen[1, , ], 1 / 12, cForwardCurve) ``` Note that users can also "age" the portfolio, calling the function `agePortfolio()`, to a particular valuation date by incorporating the historical fund movements prior to that date. ```{r} # Again, we will arbitrarily age a portfolio of two guarantees to speed up the execution. targetDate <- "2016-01-01" # Here we generate historical fund scenarios using default index data stored under "histIdxScen" histFundScen <- genFundScen(fundMap, histIdxScen) # Perform aging agePortfolio(VAPort[1:2, ], mortTable, histFundScen, histDates, dT = 1 / 12, targetDate, cForwardCurve) ``` ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Closing Remarks Though the primary purpose of this package is to valuate a portfolio of variable annuities, users can also use the `valuateOnePolicy()` and the `ageOnePolicy()` functions to perform fair market valuation on a single variable annuity as demonstrated below. ### Valuation of one variable annuity ```{r} exPolicy <- VAPort[1, ] valuateOnePolicy(exPolicy, mortTable, fundScen[1:2, , ], 1 / 12, cForwardCurve) ``` ### Aging of one variable annuity ```{r} # Similarly, users can age this single policy before pricing it. We use the same # target date and historical fund scenario as generated before exPolicy <- VAPort[1, ] ageOnePolicy(exPolicy, mortTable, histFundScen, histDates, dT = 1 / 12, targetDate, cForwardCurve) ```
/scratch/gouwar.j/cran-all/cranData/vamc/inst/doc/my-vignette.Rmd