content
stringlengths
0
14.9M
filename
stringlengths
44
136
###################################################################################### #' @title unitary alpha #' #' @md #' #' @description The function 'unitaryAlpha()' computes #' the unitary alpha (\insertCite{lc23}{ANOPA}). This #' quantity is a novel way to compute correlation in a matrix #' where each column is a measure and each line, a subject. #' This measure is based on Cronbach's alpha (which could be #' labeled a 'global alpha'). #' #' @usage unitaryAlpha( m ) #' #' @param m A data matrix for a group of observations. #' #' @return A measure of correlation between -1 and +1. #' #' @details This measure is derived from Cronbach' measure of #' reliability as shown by \insertCite{lc23;textual}{ANOPA}. #' #' @references #' \insertAllCited{} #' #' @examples #' #' # Generate a random matrix (here binary entries) #' set.seed(42) #' N <- M <- 10 #' m <- matrix( runif(N*M), N, M) #' #' # compute the unitary alpha from that random matrix #' unitaryAlpha(m) #' ###################################################################################### #' #' @export unitaryAlpha # ###################################################################################### unitaryAlpha <- function(m) { k <- dim(m)[2] V <- var(apply(m, 1, FUN=sum)) S <- sum(apply(m, 2, FUN=var)) (V-S)/((k-1)*S) }
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/ANOPA-unitaryAlpha.R
#' Arrington et al. (2002) dataset #' #' The data, taken from \insertCite{a02;textual}{ANOPA}, is a dataset examining #' the distribution of fishes with empty stomachs, classified over #' three factors: #' 'Collection location' (3 levels: Africa, Central/South America, North America), #' 'Diel feeding behavior' (2 levels: diurnal, nocturnal), #' 'Trophic category' (4 levels: Detritivore, Invertivore, Omnivore, Piscivore). #' It is therefore a 3 × 2 × 4 design with 24 cells. #' The original data set also contains Order, Family and Species of the observed #' fishes and can be obtained from #' https://figshare.com/collections/HOW_OFTEN_DO_FISHES_RUN_ON_EMPTY_/3297635 #' It was commented in \insertCite{wh11;textual}{ANOPA}. #' #' @md #' #' @docType data #' #' @usage ArringtonEtAl2002 #' #' @format A data frame. #' #' @keywords datasets #' #' @references #' \insertAllCited{} #' #' @source \doi{10.1890/0012-9658(2002)083[2145:HODFRO]2.0.CO;2} #' #' @examples #' #' # see the dataset #' ArringtonEtAl2002 #' #' # The columns s and n indicate the number of fishes with #' # empty stomachs (the "success") and the total number #' # of fishes observed, respectively. Thus s/n is the proportion. #' #' # run the ANOPA analysis #' w <- anopa( {s; n} ~ Location * Diel * Trophism, ArringtonEtAl2002) #' #' # make a plot with all the factors #' anopaPlot(w) #' #' # ... or with a subset of factors, with #' anopaPlot(w, ~ Location * Trophism) #' #' # Because of the three-way interaction, extract simple effects for each Diel #' e <- emProportions( w, {s;n} ~ Location * Trophism | Diel ) #' #' # As the two-way simple interaction for Nocturnal * Diel is close to significant, #' # we extract the second-order simple effects for each Diel and each Location #' e <- emProportions(w, {s;n} ~ Trophism | Location * Diel ) #' # As seen, the Trophism is significant for Noctural fishes of #' # Central/South America. #' #' "ArringtonEtAl2002"
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/ArringtonEtAll2002.R
################################################################################### #' @title ArticleExample1 #' #' @name ArticleExample1 #' #' @description These are the data from the first example reported in #' \insertCite{lc23}{ANOPA}. It shows ficticious data with regards to #' the proportion of incubation as a function of the distracting task. #' The design is a between-subject design with 4 groups. #' #' @md #' #' @docType data #' #' @format An object of class data.frame. #' #' @keywords datasets #' #' @references #' \insertAllCited{} #' #' @source \doi{10.20982/tqmp.19.2.p173} #' #' @examples #' library(ANOPA) #' #' # the ArticleExample1 data shows an effect of the type of distracting task #' ArticleExample1 #' #' # We perform an anopa on this dataset #' w <- anopa( {nSuccess; nParticipants} ~ DistractingTask, ArticleExample1) #' #' # We finish with post-hoc Tukey test #' e <- posthocProportions( w ) #' #' # a small plot is *always* a good idea #' anopaPlot(w) #' #' "ArticleExample1"
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/ArticleExample1.R
################################################################################### #' @title ArticleExample2 #' #' @name ArticleExample2 #' #' @description These are the data from the second example reported in #' \insertCite{lc23}{ANOPA}. It shows ficticious data with regards to #' the proportion of graduation for persons with dyslexai as a function #' of the moment of diagnostic (early or late) and the socoi-economic status (SES). #' The design is a between-subject design with 2 x 3 = 6 groups. #' #' @md #' #' @docType data #' #' @format An object of class data.frame. #' #' @keywords datasets #' #' @references #' \insertAllCited{} #' #' @source \doi{10.20982/tqmp.19.2.p173} #' #' @examples #' library(ANOPA) #' #' # the ArticleExample2 data shows an effect on the success to graduate as a function of #' # socioeconomic status and moment of diagnostic: #' ArticleExample2 #' #' # perform an anopa on this dataset #' w <- anopa( {s;n} ~ MofDiagnostic * SES, ArticleExample2) #' #' # a small plot is *always* a good idea #' anopaPlot(w) #' # here the plot is only for the main effect of SES. #' anopaPlot(w, ~ SES) #' "ArticleExample2"
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/ArticleExample2.R
################################################################################### #' @title ArticleExample3 #' #' @name ArticleExample3 #' #' @description These are the data from the third example reported in #' \insertCite{lc23}{ANOPA}. It shows ficticious data with regards to #' the proportion of patients suffering delirium tremens as a function #' of the drug adminstered (cBau, eaPoe, R&V, Placebo). #' The design is a within-subject design with 4 measurements (order of #' adminstration randomized). #' #' @md #' #' @docType data #' #' @format An object of class data.frame. #' #' @keywords datasets #' #' @references #' \insertAllCited{} #' #' @source \doi{10.20982/tqmp.19.2.p173} #' #' @examples #' library(ANOPA) #' #' # the ArticleExample3 data shows an effect of the drug administered on the #' # proportion of participants who had an episode of delirium tremens #' ArticleExample3 #' #' # perform an anopa on this dataset #' w <- anopa( cbind(cBau,eaPoe,RnV,Placebo) ~ ., ArticleExample3, WSFactors = "Drug(4)") #' #' # We finish with post-hoc Tukey test #' e <- posthocProportions( w ) #' #' # a small plot is *always* a good idea #' anopaPlot(w) #' #' "ArticleExample3"
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/ArticleExample3.R
################################################################################### #' @title A collection of minimal Examples from various designs with one or two factors. #' #' @name minimalExamples #' #' @md #' #' @description The datasets present minimal examples that are analyzed with an #' Analysis of Frequency Data method (described in \insertCite{lc23;textual}{ANOPA}. #' The five datasets are #' - 'minimalBSExample': an example with a single factor (state of residency) #' - 'twoWayExample': an example with two factors, Class and Difficulty #' - 'minimalWSExample': an example with a within-subject design (three measurements) #' - 'twoWayWithinExample': an example with two within-subject factors #' - 'minimalMxExample': a mixed design having one within and one between-subject factors #' #' @docType data #' #' @format Objects of class data.frame: #' #' @keywords datasets #' #' @references #' \insertAllCited{} #' #' @examples #' library(ANOPA) #' #' # the twoWayExample data with proportions per Classes and Difficulty levels #' twoWayExample #' #' # perform an anopa on this dataset #' w <- anopa( {success;total} ~ Difficulty * Class, twoWayExample) #' #' # We analyse the proportions by Difficulty for each Class #' e <- emProportions(w, ~ Difficulty | Class) #' #' @rdname minimalExamples "minimalBSExample" #' @rdname minimalExamples "twoWayExample" #' @rdname minimalExamples "minimalWSExample" #' @rdname minimalExamples "twoWayWithinExample" #' @rdname minimalExamples "minimalMxExample"
/scratch/gouwar.j/cran-all/cranData/ANOPA/R/minimalExamples.R
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) library(superb) ## ----message = FALSE, warning = FALSE----------------------------------------- ArticleExample1 ## ----------------------------------------------------------------------------- ArticleExample1$nSuccess / ArticleExample1$nParticipants ## ----------------------------------------------------------------------------- w <- anopa( {nSuccess; nParticipants} ~ DistractingTask, ArticleExample1) ## ----message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 1**. The proportion of illumination as a function of the distracting task. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(w) ## ----------------------------------------------------------------------------- summarize(w) ## ----------------------------------------------------------------------------- corrected(w) ## ----------------------------------------------------------------------------- # posthocProportions( w ) ## not yet bundled in the library
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/A-WhatIsANOPA.R
--- title: "What is an Analysis of Proportions using the Anscombe Transform?" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes what an analysis of proportion is. vignette: > %\VignetteIndexEntry{What is an Analysis of Proportions using the Anscombe Transform?} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The _ANalysis Of Proportion using the Anscombe transform_ (ANOPA) is a framework for analyzing proportions (often written as percentages) across groups or across measurements. This framework is similar to the well-known ANOVA and uses the same general approach. It allows analyzing _main effects_ and _interaction effects._ It also allow analyzing _simple effects_ (in case of interactions) as well as _orthogonal contrats_ and _post-hoc_ tests. Further, ANOPA makes it easy to generate proportion plots which includes confidence intervals, and to compute _eta-square_ as a measure of effect size. Finally, power planning is easy within ANOPA. ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) library(superb) ``` ## A basic example As an example, suppose a study where three groups of participants are tested on their ability to have an illumination according to the nature of a distracting task. This example is found in \insertCite{lc23;textual}. The data can be given with 1s for those participants who experienced an illumination and with 0s for those who didn't. Thus, a table having one line per participant giving the observations would look like: | Condition of distraction | Illumination? | |:----------------|-------------------------| | Doing Crosswords | 1 | | Doing Crosswords | 0 | | Doing Crosswords | 0 | | ... | ... | | Doing Crosswords | 1 | | Solving Sudokus | 0 | | Solving Sudokus | 1 | | Solving Sudokus | 1 | | ... | ... | | Solving Sudokus | 0 | | Performing chants | 0 | | Performing chants | 1 | | ... | ... | | Performing chants | 0 | | Controlling breath | 1 | | Controlling breath | 1 | | ... | ... | | Controlling breath | 0 | This long table can easily be reduced by "compiling" the results, that is, by counting the numer of participants per group who experienced and illumination. Because the group sizes may not be equal, counting the number of participants in each group is also needed. We would then observe | Condition of distraction | Number of illumination | Group size | |:----------------|-------------------------|---------------------------| | Doing Crosswords | 10 | 30 | | Solving Sudokus | 14 | 22 | | Performing chants | 7 | 18 | | Controlling breath | 5 | 27 | From these data, we may wonder if the four interventions are equally likely to result in an illumination. Transforming the number of illumination in percentage provide some indications that this may not be the case: | Condition of distraction | Percentage of illumination | |:----------------|---------------------------| | Doing Crosswords | 33.3% | | Solving Sudokus | 63.6% | | Performing chants | 38.9% | | Controlling breath | 18.5% | In all likelihood, solving Sudokos puts participants in a better mental disposition to have an illumination whereas controlling ones' breath might be the worst intervention to favor illuminations. But how can we be confident of the reliability of this observation? The sample is fairly large (total sample size of 97) and the effect seems important (percentages ranging from 18 to 64% are not showing trivially small differences) so that we can expect decent statistical power. How do we proceed to formally test this? This is the purpose of ANOPA. ## The rational behind the test (optional) ANOPA makes the following operations transparent. Hence, if you are not interested in the internals of an ANOPA, you can just skip to the next section. The general idea is to have an ANOVA-like procedure to analyse proportions. One critical assumption in ANOVA is that the variances are homogeneous, that is, constant across conditions. Sadly, this is not the case of proportions. Indeed, proportions close to 0% or close to 100% (floor and ceiling) are obtained when in the population, the true proportions are small (or large; we consider the former scenario hereafter, but the rational is symmetrical for large population proportions). When this is the case, there is very little room to observe in a sample a proportion much deviant from the population proportion. For example if the population proportion is, say, 5%, then in a sample of 20 participants, you cannot expect to observe frequencies very far from 5%. A contrario, if the population true proportion is 50%, then on a sample of 20 participants, a larger range of observed proportions are possible. This simple illustration shows that the possible variance in the scores are not homogeneous: few variance is expected for extreme proportions and more variance is expceted for proportions in the middle of the range (near 50%). Because the purpose of the analysis is to see if the proportions might be different, it means that we envision that they occupy some range, and therefore, we cannot maintain that variances are homogeneous. We therefore need a "variance-stabilizing" approach. The purpose of the Anscombe transform (an extension of the arcsine transform) is precisely this: replace proportions with an alternate measure which has the same expected variance irrespective of the population variance \insertCite{a48,textual}. Anscombe showed that the variance of this transformed proportions is a constant $1/(4 (n+1/2))$ determined only by the number of observations. Thus, we have a variance- stabilizing transformation. As an added bonus, not only are the variances stabilized, but we actually know their values. Hence, it is no longer necessary to estimate the "error term" in an ANOVA. As the error term is known, the denominator of the ANOVA is calculated without degrees of freedom (we set them to $\infty$ to denote this). Recent works (see last section) confirms that this transformation is actually the most accurate approximation we know to this day and that there is very little room to find a more accurate transfomraiton. # Analyzing the data The dataset above can be found in a compiled format in the dataframe ``ArticleExample1``: ```{r, message = FALSE, warning = FALSE} ArticleExample1 ``` (there are alternate formats for the data discussed in the vignette [DataFormatsForProportions](../articles/B-DataFormatsForProportions.html). As seen the group labels are given in column ``DistractingTask`` whereas the observations are described in ``nSuccess`` (the number of 1s) and ``nParticipants`` (the number of observations, i.e., the number of 0s and 1s). To see the results as proportions, divide the number of succcess by the number of observations, for example ```{r} ArticleExample1$nSuccess / ArticleExample1$nParticipants ``` (multiply by 100 to have percentages rather than proportions.) The analysis is very simply triggered by the following ```{r} w <- anopa( {nSuccess; nParticipants} ~ DistractingTask, ArticleExample1) ``` The first argument is a formula which describes how the data are presented (before the ~) and what are the factors in the design (after the ~). Here, because the observations are actually described over two colums (the number of 1s and the total number of participants in each group), we use the ``{s;n}`` notation which can be read as "s over n" (note the curly braces and the semi-colon which is not standard notation in R). The second argument is the data frame, here in compiled form. You are done! Please start (always start) with a plot. ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 1**. The proportion of illumination as a function of the distracting task. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` This plot shows confidence intervals that are "difference adjusted" \insertCite{b12,textual}. Such confidence intervals allows comparing between-conditions using the golden rule: _if a result is not included in the confidence interval of another score, then the two conditions are likely significantly different_. In the above plot, we see that the Breath condition is not included in the Sudoky condition, so that we can expect these two conditions to differ significantly, and as such, the ANOPA to show a significant rejection of the null hypothesis that all the proportion are equal. The ANOPA table is obtained as usual with ``summary()`` or ``summarize()``: ```{r} summarize(w) ``` or if you just want the corrected statistics (recommended), with ```{r} corrected(w) ``` As seen, the (uncorrected) effect of the _Distracting Task_ is significant ($F(3, \infty) = 3.51$, $p = .014$). Because for small samples, the _F_ distribution is biased up, an adjusted version can be consulted (last three columns). The results is nearly the same here ($F(3, \infty) = 3.39$, $p = 0.017$) because this sample is far from being small. The correction is obtained with Williams' method \insertCite{w76} and reduces the _F_ by 3.6% (column `correction` shows 1.0357). # Post-hoc test The proportions can be further analyzed using a post-hoc test to determine which pairs of distracting tasks have different proportions of illumination. To that end, we use Tukey's Honestly Significant Difference (HSD) procedure. ```{r} # posthocProportions( w ) ## not yet bundled in the library ``` As seen, the Breath condition differs significantly from the Sudoku condition. Also the Crosswords condition also differs from the Sudoku conditions. Thease are the only two conditions for which a difference seems statistically warranted. This is it. Enjoy! The vignette [ArringtonExample](../articles/D-ArringtonExample.html) examines a real dataset where more than one factor is present. # A common confusion A common confusion with regards to proportions is to believe that _mean proportion_ is a proportion. In Warton and Hui 2011, we also have _median proportions_. All these expresses confusion as to what a proportion is. A proportion *must* be based on 1s and 0s. Thus, if a group's score is a proportion, it means that all the members of that group have been observed once, and were coded as 0 or 1. If you have multiple observations per subject, and if the group's score is the mean of the subject's proportion, then you are in an un-pure scenario: your primary data (the subjects proportions) are *not* 0 or 1 and therefore, analyzing this situation cannot be done with ANOPA. If, on the other hand, you consider that the repeated measurements of each participant is a factor, then you can analyze the results with ANOPA assuming that the factor "repetition of the measurement" is a within-subject factor. In the worst-case situation, if the participants were measured multiple times, but you do not have access to the individual measurements, then you may treat the proportions as being _means_ and run a standard ANOVA. However, keep in mind that this approach is only warranted if you have a lot of measurements (owing to the central limit theorem). With just a handful of measurements, well, no one can help you... # Why infinite degrees of freedom? (optional) For some, this notation may seems bizzare, or arbitrary. However, it is formally an exact notation. An equivalent notation relates the $t$ tests and the $z$ tests. As is well-known, the $t$ test is used when the population variance is unknown and estimated from the sample's variance. In this test, this variance can be seen as the "error term". However, when the population variance is known, we can use this information and the test then becomes a $z$ test. Yet, the $t$ distribution (and the critical value of this test) is identical to a standardized Normal distribution when the degrees of freedom in the $t$ distribution tends to infinity. In other words, a $z$ test is the same as a $t$ test when there is no uncertainty in the error term. And when there is no uncertainty in the error term, we can replace the degrees of freedom with infinity. This rationale is the same in the ANOPA which explains why we note the denominator's degree of freedom with infinity. # Why the arcsin transform? (optional) This transformation may seem quite arbitrary. Its origin shows indeed that this solution was found by a vague intuition. Fisher is the first to propose trigonometric transformations for the study of statistics in 1915. This approach was found fertile when applied to correlation testing, where the arctan transform (formally, the inverse hyperbolic tangent transformation) provided an excellent approximation \insertCite{f21}{ANOPA}. When Fisher considered the proportions, his first attempt was to suggest a cosine transform \insertCite{f22}{ANOPA}. Zubin later refined the approach by suggesting the arcsine transform \incertCite{z35}{ANOPA}. The basic form of the arcsine transform was later refined by Anscombe to the form we use in the ANOPA \insertCite{a48}{ANOPA}. Anscombe modifications, the addition of 3/8 to the number of success and 3/4 to the number of trials, led to a theoretical variance exactly equal to $1/(4 \times n)$. Formidable development in the early 90s showed that this transform has other important characteristics. For example, \insertCite{c90;textual}{ANOPA} and \insertCite{ll90;textual}{ANOPA} derived that this transform will either underestimate the true probability or overestimate it. More importantly, Chen showed that no other transformation is known to fluctuate less than the arcsine transform around the exact probability. This transformation is therefore the best option when analyzing proportions. You can read more in @lc23; also check @c90 or @ll90 mathematical demonstrations showing the robustness of the ANOA. Finally, @w76 explains the correction factor and its purpose. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/A-WhatIsANOPA.Rmd
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this is hidden; general initializations.\n") library(ANOPA) w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) rownames(dataLong1) <- NULL w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) rownames(dataLong2) <- NULL ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- library(ANOPA) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- dataWide1 ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- w1 <- anopa( success ~ Class * Difficulty, dataWide1) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- dataWide2 ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, dataWide2, WSFactors = "Moment(4)" ) ## ----message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE---------------------- head(dataLong1) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- w1Long <- anopa( Value ~ Class * Difficulty * Variable | Id, dataLong1 ) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- head(dataLong2) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- w2Long <- anopa( Value ~ Status * Variable | Id, dataLong2, WSFactors="Moment(4)" ) ## ----message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE---------------------- dataCompiled1 ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- w1Compiled <- anopa( {success; Count} ~ Class * Difficulty, dataCompiled1 ) ## ----message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE---------------------- dataCompiled2 ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE----------------------- toCompiled(w1) toCompiled(w2) ## ----------------------------------------------------------------------------- w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) ## ----message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE------------------------ w3 <- anopa( cbind(r11,r12,r12,r21,r22,r23) ~ . , twoWayWithinExample, WSFactors = c("A(3)","B(2)") ) toCompiled(w3) ## ----message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE------------------------ options("ANOPA.feedback" = "none")
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/B-DataFormatsForProportions.R
--- title: "Data formats for proportions" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes the various ways that proportions can be entered in a data.frame. vignette: > %\VignetteIndexEntry{Data formats for proportions} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") library(ANOPA) w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) rownames(dataLong1) <- NULL w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) rownames(dataLong2) <- NULL ``` # Data formats for proportions proportions are actually not raw data: they are the proportion of one response (typically called a `success`) over all the responses (the other responses being called collectively a `failure`). As such, a proportion is a _summary statistic_, a bit like the mean is a summary statistic of continuous data. Very often, the `success` are coded using the digit `1` and the `failure`, with the digit `0`. When this is the case, computing the mean is actually the same as computing the proportion of successes. However, it is a conceptual mistake to think of proportions as means, because they must the processed completely differently from averages. For example, standard error and confidence intervals for proportions are obtained using very different procedures than standard error and confidence intervals for the mean. In this vignette, we review various ways that data can be coded in a data frame. In a nutshell, there are three ways to represent success or failures, Wide, Long, and Compiled. The first two shows raw scores whereas the last shows a summary of the data. Before we begin, we load the package ``ANOPA`` (if is not present on your computer, first upload it to your computer from CRAN or from the source repository ``devtools::install_github("dcousin3/ANOPA")``): ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} library(ANOPA) ``` ## First format: Wide data format In this format, there is one line per _subject_ and one column for each measurements. The columns contain only 1s (`success`) or 0s (`failure). If the particpant was measured multiple times, there is one (or some) within-subject factor(s) resulting in multiple columns of measurements. In between-group design, there is only a single column of scores. As an example, consider the following data for a between-subject factor design with two factors: Class (2 levels) and Difficulty (3 levels) for 6 groups. There is an identical number of participants in each, 12, for a total of 72 participants. ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} dataWide1 ``` When the data are in a wide format, the formula in ``anopa()`` must provide the columns where the success/failure are stored, and the conditions after the usual ~, as in ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1 <- anopa( success ~ Class * Difficulty, dataWide1) ``` (how dataWide1 was obtained is shown below in the Section *Converting between formats* below.) As another example, consider the following example obtained in a mixed, within- and between- subject design. It has a factor `Status` with 8, 9 and 7 participants per group respectively. It also has four repeated measures, `bpre`, `bpost`, `b1week` and `b5week` which represent four different Moments of measurements. The data frame is ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} dataWide2 ``` The formula for analyzing these data in this format is ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, dataWide2, WSFactors = "Moment(4)" ) ``` It is necessary to (a) group all the measurement columns using `cbind()`; (b) indicate the within-subject factor(s) using the argument `WSFactors` along with the number of levels each in a string. ## Second format: Long data format This format may be prefered for linear modelers (but it may rapidly becomes _very_ long!). There is always at least these columns: One Id column, one column to indicate a within-subject level, and one column to indicate the observed score. On the other hand, this format has fewer columns in repeated measure designs. This example shows the first 6 lines of the 2-factor between design data above, stored in the long format. ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} head(dataLong1) ``` To analyse such data format within ``anopa()``, use ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1Long <- anopa( Value ~ Class * Difficulty * Variable | Id, dataLong1 ) ``` The vertical line symbol indicates that the observations are nested within ``Id`` (i.e., all the lines with the same Id are actually the same subject). With the mixed design described above, the data begin as: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} head(dataLong2) ``` and are analyzed with the formula: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w2Long <- anopa( Value ~ Status * Variable | Id, dataLong2, WSFactors="Moment(4)" ) ``` ## Third format: Compiled data format This format is compiled, in the sense that the 0s and 1s have been replaced by a single count of success for each cell of the design. Hence, we no longer have access to the raw data. This format however has the advantage of being very compact, requiring few lines. Here is the data for the 2 between-subject factors example ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} dataCompiled1 ``` To use a compiled format in `anopa()`, use ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1Compiled <- anopa( {success; Count} ~ Class * Difficulty, dataCompiled1 ) ``` where ``succes`` identifies in which column the total number of successes are stored. The column Count indicates the total number of observations in that cell. The notation {s;n} is read ``s over n`` (note the curly braces and semicolon). For the mixed design presented earlier, the data looks like: ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} dataCompiled2 ``` where there are columns for the number of success for each repeated measures. A new columns appear ``uAlpha``. This column (called _unitary alpha_) is a measure of correlation (between -1 and +1). In this ficticious example, the correlations are near zero (negative actually) by chance as the data were generated randomly. It is not possible to run an ANOPA analysis at this time on compiled data when there are repeated measures (but this may change in a future version). ## Converting between formats Once entered in an ``anopa()`` structure, it is possible to convert to any format using ``toWide()``, ``toCompiled()`` and ``toLong()``. For example: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} toCompiled(w1) toCompiled(w2) ``` The compiled format is probably the most compact format, but the wide format is the most explicite format (as we see all the subjects and their scores on a single line, one subject per line). ## Getting the example data frame Above, we used two examples. They are available in this package under the names ``twoWayExample`` and ``minimalMxExample``. The first is available in compiled form, the second in wide form. We converted these data set in other formats using: ```{r} w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) ``` ## Multiple repeated-measure factors One limitation is with regards to repeated measures: It is not possible to guess the name of the within-subject factors from the names of the columns. This is why, as soon as there are more than one measurement, the argument ``WSFactors`` must be added. Suppose a two-way within-subject design with 2 x 3 levels. The data set ``twoWayWithinExample`` has 6 columns; the first three are for the factor A, level 1, and the last three are for factor A, level 2. Within each triplet of column, the factor B goes from 1 to 3. ```{r, message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE} w3 <- anopa( cbind(r11,r12,r12,r21,r22,r23) ~ . , twoWayWithinExample, WSFactors = c("A(3)","B(2)") ) toCompiled(w3) ``` A "fyi" message is shown which lets you see how the variables are interpreted. Such messages can be inhibited by changing the option ```{r, message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE} options("ANOPA.feedback" = "none") ``` To know more about analyzing proportions with ANOPA, refer to @lc23 or to [What is an ANOPA?](../articles/A-WhatIsANOPA.html). # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/B-DataFormatsForProportions.Rmd
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this will be hidden; use for general initializations.\n") library(ANOPA) ## ----message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."---- library(ANOPA) w <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) anopaPlot(w) ## ----message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 2**. The proportions as a function of Difficulty only. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(w, ~ Difficulty ) ## ----message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 3**. Same as Figure 2 with some visual improvements."---- library(ggplot2) anopaPlot(w, ~ Difficulty) + theme_bw() + # change theme scale_x_discrete(limits = c("Easy", "Moderate", "Difficult")) #change order
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/C-ConfidenceIntervals.R
--- title: "Confidence intervals with proportions" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how to plot confidence intervals with proportions. vignette: > %\VignetteIndexEntry{Confidence intervals with proportions} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) ``` Probably the most useful tools for data analysis is a plot with suitable error bars [@cgh21]. In this vignette, we show how to make confidence intervals for proportions. ## Theory behind Confidence intervals for proportions For proportions, ANOPA is based on the Anscombe transform \insertCite{a48}{ANOPA}. This measure has a known theoretical standard error which depends only on sampe size $n$: $$SE_{A}(n) = 1/\sqrt{4(n+1/2)}.$$ Consequently, when the groups' sizes are similar, homogeneity of variances holds. From this, we can decomposed the total test statistic $F$ into a component for each cell of the design. We thus get $$\left[ A + z_{0.5-\gamma/2} \times SE_{A}(n), \; A + z_{0.5+\gamma/2} \times SE_{A}(n) \right]$$ in which $SE_{A}(n)$ is the theoretical standard error based only on $n$, and $\gamma$ is the desired confidence level (often .95). This technique returns _stand-alone_ confidence intervals, that is, intervals which can be used to compare the proportion to a fixed point. However, such _stand-alone_ intervals cannot be used to compare one proportion to another proportion [@cgh21]. To compare an observed proportion to another observed proportion, it is necessary to adjust them for pair-wise differences [@b12]. This is achieved by increasing the wide of the intervals by $\sqrt{2}$. Also, in repeated measure designs, the correlation is beneficial to improve estimates. As such, the interval wide can be reduced when correlation is positive by multiplying its length by $\sqrt{1-\alpha_1}$, where $\alpha_1$ is a measure of correlation in a matrix containing repeated measures (based on the unitary alpha measure). Finally, the above returns confidence intervals for the *transformed* scores. However, when used in a plot, it is typically more convenient to plot proportions (from 0 to 1) rather than Anscombe-scores (from 0 to $\pi/2 \approx$ 1.57). Thus, it is possible to rescale the vertical axis using the inverse Anscombe transform and be shown proportions. This is it. ## Complicated? Well, not really: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."} library(ANOPA) w <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) anopaPlot(w) ``` Because the analyses ``summary(w)`` suggests that only the factor `Difficulty` has a significant effect, you may select only that factors for plotting, with e.g., ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 2**. The proportions as a function of Difficulty only. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w, ~ Difficulty ) ``` As is the case with any ``ggplot2`` figure, you can customize it at will. For example, ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 3**. Same as Figure 2 with some visual improvements."} library(ggplot2) anopaPlot(w, ~ Difficulty) + theme_bw() + # change theme scale_x_discrete(limits = c("Easy", "Moderate", "Difficult")) #change order ``` As you can see from this plot, Difficulty is very significant, and the most different conditions are Easy vs. Difficult. Here you go. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/C-ConfidenceIntervals.Rmd
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this is hidden; general initializations.\n") library(ANOPA) ## ----------------------------------------------------------------------------- ArringtonEtAl2002 ## ----------------------------------------------------------------------------- getOption("ANOPA.zeros") ## ----------------------------------------------------------------------------- w <- anopa( {s; n} ~ Location * Diel * Trophism, ArringtonEtAl2002) ## ----------------------------------------------------------------------------- uncorrected(w) ## ----message=FALSE, warning=FALSE, fig.width=5, fig.height=5, fig.cap="**Figure 1**. The proportions in the Arrington et al. 2002 data. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(w) ## ----message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(w, ~ Trophism * Location) ## ----------------------------------------------------------------------------- e <- emProportions( w, ~ Location * Trophism | Diel )
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/D-ArringtonExample.R
--- title: "Analyzing proportions with the Arrington et al. 2002 example" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how a real dataset with 4 factors can be analyzed. vignette: > %\VignetteIndexEntry{Analyzing proportions with the Arrington et al. 2002 example} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") library(ANOPA) ``` @a02 published a data set available from the web. It presents species of fish and what proportion of them were empty stomached when catched. The dataset contained 36000+ catches, which where identified by their Location (Africa, North America, rest of America), by their Trophism (their diet, Detritivore, Invertivore, Omnivore, Piscivore) and by the moment of feeding (Diel: Diurnal or Nocturnal). The compiled scores can be consulted with ```{r} ArringtonEtAl2002 ``` One first difficulty with this dataset is that some of the cells are missing (e.g., African fish that are Detrivore during the night). As is the case for other sorts of analyses (e.g., ANOVAs), data with missing cells cannot be analyzed because the error terms cannot be computed. One solution adopted by @wh11 was to impute the missing value. We are not aware if this is an adequate solution, and if so, what imputation would be acceptable. Consider the following with adequate care. Warton imputed the missing cells with a very small proportion. In ANOPA, both the proportions and the group sizes are required. We implemented a procedure that impute a count of 0.05 (fractional counts are not possible from observations, but are not forbidden in ANOPA) obtained from a single observation. Consult the default option with ```{r} getOption("ANOPA.zeros") ``` The analysis is obtained with ```{r} w <- anopa( {s; n} ~ Location * Diel * Trophism, ArringtonEtAl2002) ``` The `fyi` message lets you know that cells are missing; the `Warning` message lets you know that these cells were imputed (you can suppress messages with `options("ANOPA.feedback"="none")`. To see the result, use `summary(w)` (which shows the corrected and uncorrected statistics) or `uncorrected(w)` (as the sample is quite large, the correction will be immaterial...), ```{r} uncorrected(w) ``` These suggests an interaction Diel : Trophism close to significant. You can easily make a plot with all the factors using ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=5, fig.cap="**Figure 1**. The proportions in the Arrington et al. 2002 data. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` The missing cells are absent from the plot. To highlight the interaction, restrict the plot to ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w, ~ Trophism * Location) ``` which shows clearly massive difference between Trophism, and small differences between Omnivorous and Piscivorous fishes with regards to Location. This can be confirmed by examining simple effects (a.k.a. expected marginal analyzes): ```{r} e <- emProportions( w, ~ Location * Trophism | Diel ) ``` (but it will have to wait for the next version of ANOPA ;-) # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/D-ArringtonExample.Rmd
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) ## ----------------------------------------------------------------------------- warton <- data.frame( grp = c(1,2), s = c(3,1), n = c(3,3) ) ## ----------------------------------------------------------------------------- w <- anopa( {s;n}~grp, warton) summary( w ) ## ----message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions for two groups. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(w) ## ----------------------------------------------------------------------------- laurencelle <- data.frame( grp = c(1,2), s = c(3,0), n = c(3,3) ) ## ----------------------------------------------------------------------------- l <- anopa( {s;n}~grp, laurencelle) summary(l) ## ----message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 2**. The proportions for two extremely different groups. Error bars show difference-adjusted 95% confidence intervals."---- anopaPlot(l)
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/E-ArcsineIsAsinine.R
--- title: "Is the ArcSine transformation so asinine in the end?" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes what an analysis of frequency data is. vignette: > %\VignetteIndexEntry{Is the ArcSine transformation so asinine in the end?} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- Some authors documented that analyses of proportions can be performed with as few as 3 participants per group [e.g., @wh11]. Some also reported finiding multiple configurations with significant results using logistic regressions. We do not think this is sensible. ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) ``` Let's generate compiled data with two groups. In the `warton` scenario, the first group has only successes and in the second group, 2 out of 3 participants have failure. ```{r} warton <- data.frame( grp = c(1,2), s = c(3,1), n = c(3,3) ) ``` Analyzing this, the ANOPA suggests no difference... ```{r} w <- anopa( {s;n}~grp, warton) summary( w ) ``` ... something quite evident from the plot: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions for two groups. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` (one tip of the confidence intervals is so off the scale that it is missing.) Because the sample is so small, it is actually possible to enumerate all the possible results (there are 64 of them). If we allow no success or a single success in one group, and all success in the other group, there are 14 cases. 14 out of 64 is far from being exceptional, and thus, there is no significant result here, congruent with the result of the ANOPA analysis (and contradicting the results from a logistic regression). Lets consider a more extreme result: The first group has only successes and the second, only failures (there is two such cases out of 64): ```{r} laurencelle <- data.frame( grp = c(1,2), s = c(3,0), n = c(3,3) ) ``` The analyse using ANOPA says: ```{r} l <- anopa( {s;n}~grp, laurencelle) summary(l) ``` that is, a significant result (and note that 2 out of 64 is indeed rare at the .05 threshold with a p of .031 = 2/64). The plot is congruent with this result: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 2**. The proportions for two extremely different groups. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(l) ``` The logistic regression, when applied to proportions, has very inflated type-I error rates so that this technique should be avoided. The reason is quite simple: the logit transformation is not variance-stabilizing. In fact, it exaggerate the variances across levels of population proportions. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/E-ArcsineIsAsinine.Rmd
## ----echo = FALSE, message = FALSE, results = 'hide', warning = FALSE--------- cat("this is hidden; general initializations.\n") ## ----warning=FALSE, message=FALSE--------------------------------------------- options("ANOPA.feedback" = 'none') library(ANOPA) library(testthat) nsim <- 1000 # increase for more reliable simulations. theN <- 20 # number of simulated participants ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- s ~ grp # the formula # BSDesign <- list(grp = c("ctrl","plcbo")) #one factor, two groups # thePs <- c(0.3, 0.3) # the true proportions, equal # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(41) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign ) # w <- anopa(frm, smp[,2:3] ) # res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design B, testing B: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.early, s.middle, s.late) ~ . # WSDesign <- list(moment = c("early","middle","late")) # thePs <- c(0.3, 0.3, 0.3) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(42) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, NULL, WSDesign ) # w <- anopa(frm, smp[,2:4] , WSFactors = "M(3)" ) # res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design W, testing W: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- s ~ grp * eta # WSDesign <- list() # BSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late")) # thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(41) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign ) # w <- anopa(frm, smp ) # res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design BxB, testing B: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.repue.early, s.ajun.early, # s.repue.middle, s.ajun.middle, # s.repue.late, s.ajun.late) ~ . # BSDesign <- list() # WSDesign <- list(eta = c("repue","ajun"), moment = c("early","middle","late")) # thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(41) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, NULL, WSDesign ) # w <- anopa(frm, smp, WSFactors = c("e(2)", "m(3)") ) # res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design WxW, testing W: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.early, s.middle, s.late) ~ grp # BSDesign <- list(grp = c("ctrl","plcbo")) # WSDesign <- list(moment = c("early","middle","late")) # thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(41) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign, WSDesign ) # w <- anopa(frm, smp[,2:5] , WSFactors = "M(3)") # res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design WxB, testing B: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- s ~ grp * eta * a # BSDesign <- list(eta = c("repue","ajun"), # grp = c("early","middle","late"), a = c("1","2","3","4")) # thePs <- rep(0.3, 24) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(41) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign ) # w <- anopa(frm, smp ) # res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design BxBxB, testing B: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.repue.early.1, s.ajun.early.1, s.repue.middle.1, # s.ajun.middle.1, s.repue.late.1, s.ajun.late.1, s.repue.early.2, # s.ajun.early.2, s.repue.middle.2, s.ajun.middle.2, s.repue.late.2, # s.ajun.late.2, s.repue.early.3, s.ajun.early.3, s.repue.middle.3, # s.ajun.middle.3, s.repue.late.3, s.ajun.late.3, s.repue.early.4, # s.ajun.early.4, s.repue.middle.4, s.ajun.middle.4, s.repue.late.4, # s.ajun.late.4 ) ~ . # WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late"), a = c("1","2","3","4")) # thePs <- rep(0.3, 24) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(43) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, NULL, WSDesign ) # w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)", "a(4)") ) # res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design WxWxW, testing W: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, # s.ajun.middle, s.repue.late, s.ajun.late ) ~ a # BSDesign <- list( a = c("1","2","3","4") ) # WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) # thePs <- rep(0.3, 24) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(43) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign, WSDesign ) # w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) # res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design BxWxW, testing W: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE---------------------- # frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, # s.ajun.middle, s.repue.late, s.ajun.late ) ~ a # BSDesign <- list( a = c("1","2","3","4") ) # WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) # thePs <- rep(0.3, 24) # # # test type-I error rate when no effect as is the case for factor 2 # set.seed(42) # res <- c() # for (i in 1:nsim) { # smp <- GRP( thePs, theN, BSDesign, WSDesign ) # w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) # res <- c(res, if(summarize(w)[3,4]<.05) 1 else 0) # } # typeI <- mean(res) # cat( "Design BxWxW, testing B: ", typeI, "\n") # # # tolerance is large as the number of simulations is small # expect_equal( typeI, .05, tolerance = 0.035) ## ----------------------------------------------------------------------------- options("ANOPA.feedback" = 'all')
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/F-TestingTypeIError.R
--- title: "Testing type-I error rates" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how to test type-I error rates in the ANOPA. vignette: > %\VignetteIndexEntry{Testing type-I error rates} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") ``` **A valid statistical test is one where the amount of type-I errors (rejecting the null when it should not) does not exceed the test threshold (often 5%).** We say it in bold because it seems to be commonly forgotten. Logistic regressions applied to proportions deviates massively from this rule, becoming very liberal tests. ANOPA respect very closely this rule when the corrected statistics are consulted. The following code shows how to test the type-I error rate for ANOPA.It uses simulated data with no differences. The only limitations herein is that no correlations is added when repeated measures are used. We suggests here to shut down error and feedback messages: ```{r, warning=FALSE, message=FALSE} options("ANOPA.feedback" = 'none') library(ANOPA) library(testthat) nsim <- 1000 # increase for more reliable simulations. theN <- 20 # number of simulated participants ``` Note that the simulations are actually not run in this vignette, as they take times. We wished to provide code in case you wished to test type-I error rate by yourself. The present code is also not optimized for speed (and in particular, is not parallelized); we wished to keep the code as simple as possible for readability. In all the following, the number of simulated participants per group is small (20) but can be varied. # Simulations with a single factor ## Simulation with one between factor ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp # the formula BSDesign <- list(grp = c("ctrl","plcbo")) #one factor, two groups thePs <- c(0.3, 0.3) # the true proportions, equal # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp[,2:3] ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design B, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with one within factor ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.early, s.middle, s.late) ~ . WSDesign <- list(moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.3) # test type-I error rate when no effect as is the case for factor 2 set.seed(42) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp[,2:4] , WSFactors = "M(3)" ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design W, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # Simulations with two factors ## Simulation with two factors, between design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp * eta WSDesign <- list() BSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with two factors, within design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late) ~ . BSDesign <- list() WSDesign <- list(eta = c("repue","ajun"), moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)", "m(3)") ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with two factors, mixed design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.early, s.middle, s.late) ~ grp BSDesign <- list(grp = c("ctrl","plcbo")) WSDesign <- list(moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp[,2:5] , WSFactors = "M(3)") res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # Simulations with three factors ## Simulation with three factors, all between design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp * eta * a BSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late"), a = c("1","2","3","4")) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxBxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, within design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early.1, s.ajun.early.1, s.repue.middle.1, s.ajun.middle.1, s.repue.late.1, s.ajun.late.1, s.repue.early.2, s.ajun.early.2, s.repue.middle.2, s.ajun.middle.2, s.repue.late.2, s.ajun.late.2, s.repue.early.3, s.ajun.early.3, s.repue.middle.3, s.ajun.middle.3, s.repue.late.3, s.ajun.late.3, s.repue.early.4, s.ajun.early.4, s.repue.middle.4, s.ajun.middle.4, s.repue.late.4, s.ajun.late.4 ) ~ . WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late"), a = c("1","2","3","4")) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(43) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)", "a(4)") ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxWxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, mixed design, testing within ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late ) ~ a BSDesign <- list( a = c("1","2","3","4") ) WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(43) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxWxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, mixed design, testing between ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late ) ~ a BSDesign <- list( a = c("1","2","3","4") ) WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(42) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) res <- c(res, if(summarize(w)[3,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxWxW, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # The end Let's restore the warnings and messages before leaving: ```{r} options("ANOPA.feedback" = 'all') ```
/scratch/gouwar.j/cran-all/cranData/ANOPA/inst/doc/F-TestingTypeIError.Rmd
--- title: "What is an Analysis of Proportions using the Anscombe Transform?" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes what an analysis of proportion is. vignette: > %\VignetteIndexEntry{What is an Analysis of Proportions using the Anscombe Transform?} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The _ANalysis Of Proportion using the Anscombe transform_ (ANOPA) is a framework for analyzing proportions (often written as percentages) across groups or across measurements. This framework is similar to the well-known ANOVA and uses the same general approach. It allows analyzing _main effects_ and _interaction effects._ It also allow analyzing _simple effects_ (in case of interactions) as well as _orthogonal contrats_ and _post-hoc_ tests. Further, ANOPA makes it easy to generate proportion plots which includes confidence intervals, and to compute _eta-square_ as a measure of effect size. Finally, power planning is easy within ANOPA. ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) library(superb) ``` ## A basic example As an example, suppose a study where three groups of participants are tested on their ability to have an illumination according to the nature of a distracting task. This example is found in \insertCite{lc23;textual}. The data can be given with 1s for those participants who experienced an illumination and with 0s for those who didn't. Thus, a table having one line per participant giving the observations would look like: | Condition of distraction | Illumination? | |:----------------|-------------------------| | Doing Crosswords | 1 | | Doing Crosswords | 0 | | Doing Crosswords | 0 | | ... | ... | | Doing Crosswords | 1 | | Solving Sudokus | 0 | | Solving Sudokus | 1 | | Solving Sudokus | 1 | | ... | ... | | Solving Sudokus | 0 | | Performing chants | 0 | | Performing chants | 1 | | ... | ... | | Performing chants | 0 | | Controlling breath | 1 | | Controlling breath | 1 | | ... | ... | | Controlling breath | 0 | This long table can easily be reduced by "compiling" the results, that is, by counting the numer of participants per group who experienced and illumination. Because the group sizes may not be equal, counting the number of participants in each group is also needed. We would then observe | Condition of distraction | Number of illumination | Group size | |:----------------|-------------------------|---------------------------| | Doing Crosswords | 10 | 30 | | Solving Sudokus | 14 | 22 | | Performing chants | 7 | 18 | | Controlling breath | 5 | 27 | From these data, we may wonder if the four interventions are equally likely to result in an illumination. Transforming the number of illumination in percentage provide some indications that this may not be the case: | Condition of distraction | Percentage of illumination | |:----------------|---------------------------| | Doing Crosswords | 33.3% | | Solving Sudokus | 63.6% | | Performing chants | 38.9% | | Controlling breath | 18.5% | In all likelihood, solving Sudokos puts participants in a better mental disposition to have an illumination whereas controlling ones' breath might be the worst intervention to favor illuminations. But how can we be confident of the reliability of this observation? The sample is fairly large (total sample size of 97) and the effect seems important (percentages ranging from 18 to 64% are not showing trivially small differences) so that we can expect decent statistical power. How do we proceed to formally test this? This is the purpose of ANOPA. ## The rational behind the test (optional) ANOPA makes the following operations transparent. Hence, if you are not interested in the internals of an ANOPA, you can just skip to the next section. The general idea is to have an ANOVA-like procedure to analyse proportions. One critical assumption in ANOVA is that the variances are homogeneous, that is, constant across conditions. Sadly, this is not the case of proportions. Indeed, proportions close to 0% or close to 100% (floor and ceiling) are obtained when in the population, the true proportions are small (or large; we consider the former scenario hereafter, but the rational is symmetrical for large population proportions). When this is the case, there is very little room to observe in a sample a proportion much deviant from the population proportion. For example if the population proportion is, say, 5%, then in a sample of 20 participants, you cannot expect to observe frequencies very far from 5%. A contrario, if the population true proportion is 50%, then on a sample of 20 participants, a larger range of observed proportions are possible. This simple illustration shows that the possible variance in the scores are not homogeneous: few variance is expected for extreme proportions and more variance is expceted for proportions in the middle of the range (near 50%). Because the purpose of the analysis is to see if the proportions might be different, it means that we envision that they occupy some range, and therefore, we cannot maintain that variances are homogeneous. We therefore need a "variance-stabilizing" approach. The purpose of the Anscombe transform (an extension of the arcsine transform) is precisely this: replace proportions with an alternate measure which has the same expected variance irrespective of the population variance \insertCite{a48,textual}. Anscombe showed that the variance of this transformed proportions is a constant $1/(4 (n+1/2))$ determined only by the number of observations. Thus, we have a variance- stabilizing transformation. As an added bonus, not only are the variances stabilized, but we actually know their values. Hence, it is no longer necessary to estimate the "error term" in an ANOVA. As the error term is known, the denominator of the ANOVA is calculated without degrees of freedom (we set them to $\infty$ to denote this). Recent works (see last section) confirms that this transformation is actually the most accurate approximation we know to this day and that there is very little room to find a more accurate transfomraiton. # Analyzing the data The dataset above can be found in a compiled format in the dataframe ``ArticleExample1``: ```{r, message = FALSE, warning = FALSE} ArticleExample1 ``` (there are alternate formats for the data discussed in the vignette [DataFormatsForProportions](../articles/B-DataFormatsForProportions.html). As seen the group labels are given in column ``DistractingTask`` whereas the observations are described in ``nSuccess`` (the number of 1s) and ``nParticipants`` (the number of observations, i.e., the number of 0s and 1s). To see the results as proportions, divide the number of succcess by the number of observations, for example ```{r} ArticleExample1$nSuccess / ArticleExample1$nParticipants ``` (multiply by 100 to have percentages rather than proportions.) The analysis is very simply triggered by the following ```{r} w <- anopa( {nSuccess; nParticipants} ~ DistractingTask, ArticleExample1) ``` The first argument is a formula which describes how the data are presented (before the ~) and what are the factors in the design (after the ~). Here, because the observations are actually described over two colums (the number of 1s and the total number of participants in each group), we use the ``{s;n}`` notation which can be read as "s over n" (note the curly braces and the semi-colon which is not standard notation in R). The second argument is the data frame, here in compiled form. You are done! Please start (always start) with a plot. ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 1**. The proportion of illumination as a function of the distracting task. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` This plot shows confidence intervals that are "difference adjusted" \insertCite{b12,textual}. Such confidence intervals allows comparing between-conditions using the golden rule: _if a result is not included in the confidence interval of another score, then the two conditions are likely significantly different_. In the above plot, we see that the Breath condition is not included in the Sudoky condition, so that we can expect these two conditions to differ significantly, and as such, the ANOPA to show a significant rejection of the null hypothesis that all the proportion are equal. The ANOPA table is obtained as usual with ``summary()`` or ``summarize()``: ```{r} summarize(w) ``` or if you just want the corrected statistics (recommended), with ```{r} corrected(w) ``` As seen, the (uncorrected) effect of the _Distracting Task_ is significant ($F(3, \infty) = 3.51$, $p = .014$). Because for small samples, the _F_ distribution is biased up, an adjusted version can be consulted (last three columns). The results is nearly the same here ($F(3, \infty) = 3.39$, $p = 0.017$) because this sample is far from being small. The correction is obtained with Williams' method \insertCite{w76} and reduces the _F_ by 3.6% (column `correction` shows 1.0357). # Post-hoc test The proportions can be further analyzed using a post-hoc test to determine which pairs of distracting tasks have different proportions of illumination. To that end, we use Tukey's Honestly Significant Difference (HSD) procedure. ```{r} # posthocProportions( w ) ## not yet bundled in the library ``` As seen, the Breath condition differs significantly from the Sudoku condition. Also the Crosswords condition also differs from the Sudoku conditions. Thease are the only two conditions for which a difference seems statistically warranted. This is it. Enjoy! The vignette [ArringtonExample](../articles/D-ArringtonExample.html) examines a real dataset where more than one factor is present. # A common confusion A common confusion with regards to proportions is to believe that _mean proportion_ is a proportion. In Warton and Hui 2011, we also have _median proportions_. All these expresses confusion as to what a proportion is. A proportion *must* be based on 1s and 0s. Thus, if a group's score is a proportion, it means that all the members of that group have been observed once, and were coded as 0 or 1. If you have multiple observations per subject, and if the group's score is the mean of the subject's proportion, then you are in an un-pure scenario: your primary data (the subjects proportions) are *not* 0 or 1 and therefore, analyzing this situation cannot be done with ANOPA. If, on the other hand, you consider that the repeated measurements of each participant is a factor, then you can analyze the results with ANOPA assuming that the factor "repetition of the measurement" is a within-subject factor. In the worst-case situation, if the participants were measured multiple times, but you do not have access to the individual measurements, then you may treat the proportions as being _means_ and run a standard ANOVA. However, keep in mind that this approach is only warranted if you have a lot of measurements (owing to the central limit theorem). With just a handful of measurements, well, no one can help you... # Why infinite degrees of freedom? (optional) For some, this notation may seems bizzare, or arbitrary. However, it is formally an exact notation. An equivalent notation relates the $t$ tests and the $z$ tests. As is well-known, the $t$ test is used when the population variance is unknown and estimated from the sample's variance. In this test, this variance can be seen as the "error term". However, when the population variance is known, we can use this information and the test then becomes a $z$ test. Yet, the $t$ distribution (and the critical value of this test) is identical to a standardized Normal distribution when the degrees of freedom in the $t$ distribution tends to infinity. In other words, a $z$ test is the same as a $t$ test when there is no uncertainty in the error term. And when there is no uncertainty in the error term, we can replace the degrees of freedom with infinity. This rationale is the same in the ANOPA which explains why we note the denominator's degree of freedom with infinity. # Why the arcsin transform? (optional) This transformation may seem quite arbitrary. Its origin shows indeed that this solution was found by a vague intuition. Fisher is the first to propose trigonometric transformations for the study of statistics in 1915. This approach was found fertile when applied to correlation testing, where the arctan transform (formally, the inverse hyperbolic tangent transformation) provided an excellent approximation \insertCite{f21}{ANOPA}. When Fisher considered the proportions, his first attempt was to suggest a cosine transform \insertCite{f22}{ANOPA}. Zubin later refined the approach by suggesting the arcsine transform \incertCite{z35}{ANOPA}. The basic form of the arcsine transform was later refined by Anscombe to the form we use in the ANOPA \insertCite{a48}{ANOPA}. Anscombe modifications, the addition of 3/8 to the number of success and 3/4 to the number of trials, led to a theoretical variance exactly equal to $1/(4 \times n)$. Formidable development in the early 90s showed that this transform has other important characteristics. For example, \insertCite{c90;textual}{ANOPA} and \insertCite{ll90;textual}{ANOPA} derived that this transform will either underestimate the true probability or overestimate it. More importantly, Chen showed that no other transformation is known to fluctuate less than the arcsine transform around the exact probability. This transformation is therefore the best option when analyzing proportions. You can read more in @lc23; also check @c90 or @ll90 mathematical demonstrations showing the robustness of the ANOA. Finally, @w76 explains the correction factor and its purpose. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/A-WhatIsANOPA.Rmd
--- title: "Data formats for proportions" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes the various ways that proportions can be entered in a data.frame. vignette: > %\VignetteIndexEntry{Data formats for proportions} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") library(ANOPA) w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) rownames(dataLong1) <- NULL w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) rownames(dataLong2) <- NULL ``` # Data formats for proportions proportions are actually not raw data: they are the proportion of one response (typically called a `success`) over all the responses (the other responses being called collectively a `failure`). As such, a proportion is a _summary statistic_, a bit like the mean is a summary statistic of continuous data. Very often, the `success` are coded using the digit `1` and the `failure`, with the digit `0`. When this is the case, computing the mean is actually the same as computing the proportion of successes. However, it is a conceptual mistake to think of proportions as means, because they must the processed completely differently from averages. For example, standard error and confidence intervals for proportions are obtained using very different procedures than standard error and confidence intervals for the mean. In this vignette, we review various ways that data can be coded in a data frame. In a nutshell, there are three ways to represent success or failures, Wide, Long, and Compiled. The first two shows raw scores whereas the last shows a summary of the data. Before we begin, we load the package ``ANOPA`` (if is not present on your computer, first upload it to your computer from CRAN or from the source repository ``devtools::install_github("dcousin3/ANOPA")``): ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} library(ANOPA) ``` ## First format: Wide data format In this format, there is one line per _subject_ and one column for each measurements. The columns contain only 1s (`success`) or 0s (`failure). If the particpant was measured multiple times, there is one (or some) within-subject factor(s) resulting in multiple columns of measurements. In between-group design, there is only a single column of scores. As an example, consider the following data for a between-subject factor design with two factors: Class (2 levels) and Difficulty (3 levels) for 6 groups. There is an identical number of participants in each, 12, for a total of 72 participants. ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} dataWide1 ``` When the data are in a wide format, the formula in ``anopa()`` must provide the columns where the success/failure are stored, and the conditions after the usual ~, as in ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1 <- anopa( success ~ Class * Difficulty, dataWide1) ``` (how dataWide1 was obtained is shown below in the Section *Converting between formats* below.) As another example, consider the following example obtained in a mixed, within- and between- subject design. It has a factor `Status` with 8, 9 and 7 participants per group respectively. It also has four repeated measures, `bpre`, `bpost`, `b1week` and `b5week` which represent four different Moments of measurements. The data frame is ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} dataWide2 ``` The formula for analyzing these data in this format is ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, dataWide2, WSFactors = "Moment(4)" ) ``` It is necessary to (a) group all the measurement columns using `cbind()`; (b) indicate the within-subject factor(s) using the argument `WSFactors` along with the number of levels each in a string. ## Second format: Long data format This format may be prefered for linear modelers (but it may rapidly becomes _very_ long!). There is always at least these columns: One Id column, one column to indicate a within-subject level, and one column to indicate the observed score. On the other hand, this format has fewer columns in repeated measure designs. This example shows the first 6 lines of the 2-factor between design data above, stored in the long format. ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} head(dataLong1) ``` To analyse such data format within ``anopa()``, use ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1Long <- anopa( Value ~ Class * Difficulty * Variable | Id, dataLong1 ) ``` The vertical line symbol indicates that the observations are nested within ``Id`` (i.e., all the lines with the same Id are actually the same subject). With the mixed design described above, the data begin as: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} head(dataLong2) ``` and are analyzed with the formula: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w2Long <- anopa( Value ~ Status * Variable | Id, dataLong2, WSFactors="Moment(4)" ) ``` ## Third format: Compiled data format This format is compiled, in the sense that the 0s and 1s have been replaced by a single count of success for each cell of the design. Hence, we no longer have access to the raw data. This format however has the advantage of being very compact, requiring few lines. Here is the data for the 2 between-subject factors example ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} dataCompiled1 ``` To use a compiled format in `anopa()`, use ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} w1Compiled <- anopa( {success; Count} ~ Class * Difficulty, dataCompiled1 ) ``` where ``succes`` identifies in which column the total number of successes are stored. The column Count indicates the total number of observations in that cell. The notation {s;n} is read ``s over n`` (note the curly braces and semicolon). For the mixed design presented earlier, the data looks like: ```{r, message=FALSE, warning=FALSE, echo=FALSE, eval=TRUE} dataCompiled2 ``` where there are columns for the number of success for each repeated measures. A new columns appear ``uAlpha``. This column (called _unitary alpha_) is a measure of correlation (between -1 and +1). In this ficticious example, the correlations are near zero (negative actually) by chance as the data were generated randomly. It is not possible to run an ANOPA analysis at this time on compiled data when there are repeated measures (but this may change in a future version). ## Converting between formats Once entered in an ``anopa()`` structure, it is possible to convert to any format using ``toWide()``, ``toCompiled()`` and ``toLong()``. For example: ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=TRUE} toCompiled(w1) toCompiled(w2) ``` The compiled format is probably the most compact format, but the wide format is the most explicite format (as we see all the subjects and their scores on a single line, one subject per line). ## Getting the example data frame Above, we used two examples. They are available in this package under the names ``twoWayExample`` and ``minimalMxExample``. The first is available in compiled form, the second in wide form. We converted these data set in other formats using: ```{r} w1 <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) dataWide1 <- toWide(w1) dataCompiled1 <-toCompiled(w1) dataLong1 <- toLong(w1) w2 <- anopa( cbind(bpre, bpost, b1week, b5week) ~ Status, minimalMxExample, WSFactors = "Moment(4)") dataWide2 <- toWide(w2) dataCompiled2 <-toCompiled(w2) dataLong2 <- toLong(w2) ``` ## Multiple repeated-measure factors One limitation is with regards to repeated measures: It is not possible to guess the name of the within-subject factors from the names of the columns. This is why, as soon as there are more than one measurement, the argument ``WSFactors`` must be added. Suppose a two-way within-subject design with 2 x 3 levels. The data set ``twoWayWithinExample`` has 6 columns; the first three are for the factor A, level 1, and the last three are for factor A, level 2. Within each triplet of column, the factor B goes from 1 to 3. ```{r, message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE} w3 <- anopa( cbind(r11,r12,r12,r21,r22,r23) ~ . , twoWayWithinExample, WSFactors = c("A(3)","B(2)") ) toCompiled(w3) ``` A "fyi" message is shown which lets you see how the variables are interpreted. Such messages can be inhibited by changing the option ```{r, message=TRUE, warning=FALSE, echo=TRUE, eval=TRUE} options("ANOPA.feedback" = "none") ``` To know more about analyzing proportions with ANOPA, refer to @lc23 or to [What is an ANOPA?](../articles/A-WhatIsANOPA.html). # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/B-DataFormatsForProportions.Rmd
--- title: "Confidence intervals with proportions" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how to plot confidence intervals with proportions. vignette: > %\VignetteIndexEntry{Confidence intervals with proportions} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) ``` Probably the most useful tools for data analysis is a plot with suitable error bars [@cgh21]. In this vignette, we show how to make confidence intervals for proportions. ## Theory behind Confidence intervals for proportions For proportions, ANOPA is based on the Anscombe transform \insertCite{a48}{ANOPA}. This measure has a known theoretical standard error which depends only on sampe size $n$: $$SE_{A}(n) = 1/\sqrt{4(n+1/2)}.$$ Consequently, when the groups' sizes are similar, homogeneity of variances holds. From this, we can decomposed the total test statistic $F$ into a component for each cell of the design. We thus get $$\left[ A + z_{0.5-\gamma/2} \times SE_{A}(n), \; A + z_{0.5+\gamma/2} \times SE_{A}(n) \right]$$ in which $SE_{A}(n)$ is the theoretical standard error based only on $n$, and $\gamma$ is the desired confidence level (often .95). This technique returns _stand-alone_ confidence intervals, that is, intervals which can be used to compare the proportion to a fixed point. However, such _stand-alone_ intervals cannot be used to compare one proportion to another proportion [@cgh21]. To compare an observed proportion to another observed proportion, it is necessary to adjust them for pair-wise differences [@b12]. This is achieved by increasing the wide of the intervals by $\sqrt{2}$. Also, in repeated measure designs, the correlation is beneficial to improve estimates. As such, the interval wide can be reduced when correlation is positive by multiplying its length by $\sqrt{1-\alpha_1}$, where $\alpha_1$ is a measure of correlation in a matrix containing repeated measures (based on the unitary alpha measure). Finally, the above returns confidence intervals for the *transformed* scores. However, when used in a plot, it is typically more convenient to plot proportions (from 0 to 1) rather than Anscombe-scores (from 0 to $\pi/2 \approx$ 1.57). Thus, it is possible to rescale the vertical axis using the inverse Anscombe transform and be shown proportions. This is it. ## Complicated? Well, not really: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."} library(ANOPA) w <- anopa( {success;total} ~ Class * Difficulty, twoWayExample) anopaPlot(w) ``` Because the analyses ``summary(w)`` suggests that only the factor `Difficulty` has a significant effect, you may select only that factors for plotting, with e.g., ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 2**. The proportions as a function of Difficulty only. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w, ~ Difficulty ) ``` As is the case with any ``ggplot2`` figure, you can customize it at will. For example, ```{r, message=FALSE, warning=FALSE, fig.width=4, fig.height=3, fig.cap="**Figure 3**. Same as Figure 2 with some visual improvements."} library(ggplot2) anopaPlot(w, ~ Difficulty) + theme_bw() + # change theme scale_x_discrete(limits = c("Easy", "Moderate", "Difficult")) #change order ``` As you can see from this plot, Difficulty is very significant, and the most different conditions are Easy vs. Difficult. Here you go. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/C-ConfidenceIntervals.Rmd
--- title: "Analyzing proportions with the Arrington et al. 2002 example" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how a real dataset with 4 factors can be analyzed. vignette: > %\VignetteIndexEntry{Analyzing proportions with the Arrington et al. 2002 example} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") library(ANOPA) ``` @a02 published a data set available from the web. It presents species of fish and what proportion of them were empty stomached when catched. The dataset contained 36000+ catches, which where identified by their Location (Africa, North America, rest of America), by their Trophism (their diet, Detritivore, Invertivore, Omnivore, Piscivore) and by the moment of feeding (Diel: Diurnal or Nocturnal). The compiled scores can be consulted with ```{r} ArringtonEtAl2002 ``` One first difficulty with this dataset is that some of the cells are missing (e.g., African fish that are Detrivore during the night). As is the case for other sorts of analyses (e.g., ANOVAs), data with missing cells cannot be analyzed because the error terms cannot be computed. One solution adopted by @wh11 was to impute the missing value. We are not aware if this is an adequate solution, and if so, what imputation would be acceptable. Consider the following with adequate care. Warton imputed the missing cells with a very small proportion. In ANOPA, both the proportions and the group sizes are required. We implemented a procedure that impute a count of 0.05 (fractional counts are not possible from observations, but are not forbidden in ANOPA) obtained from a single observation. Consult the default option with ```{r} getOption("ANOPA.zeros") ``` The analysis is obtained with ```{r} w <- anopa( {s; n} ~ Location * Diel * Trophism, ArringtonEtAl2002) ``` The `fyi` message lets you know that cells are missing; the `Warning` message lets you know that these cells were imputed (you can suppress messages with `options("ANOPA.feedback"="none")`. To see the result, use `summary(w)` (which shows the corrected and uncorrected statistics) or `uncorrected(w)` (as the sample is quite large, the correction will be immaterial...), ```{r} uncorrected(w) ``` These suggests an interaction Diel : Trophism close to significant. You can easily make a plot with all the factors using ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=5, fig.cap="**Figure 1**. The proportions in the Arrington et al. 2002 data. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` The missing cells are absent from the plot. To highlight the interaction, restrict the plot to ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions as a function of class and Difficulty. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w, ~ Trophism * Location) ``` which shows clearly massive difference between Trophism, and small differences between Omnivorous and Piscivorous fishes with regards to Location. This can be confirmed by examining simple effects (a.k.a. expected marginal analyzes): ```{r} e <- emProportions( w, ~ Location * Trophism | Diel ) ``` (but it will have to wait for the next version of ANOPA ;-) # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/D-ArringtonExample.Rmd
--- title: "Is the ArcSine transformation so asinine in the end?" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes what an analysis of frequency data is. vignette: > %\VignetteIndexEntry{Is the ArcSine transformation so asinine in the end?} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- Some authors documented that analyses of proportions can be performed with as few as 3 participants per group [e.g., @wh11]. Some also reported finiding multiple configurations with significant results using logistic regressions. We do not think this is sensible. ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this will be hidden; use for general initializations.\n") library(ANOPA) library(ggplot2) ``` Let's generate compiled data with two groups. In the `warton` scenario, the first group has only successes and in the second group, 2 out of 3 participants have failure. ```{r} warton <- data.frame( grp = c(1,2), s = c(3,1), n = c(3,3) ) ``` Analyzing this, the ANOPA suggests no difference... ```{r} w <- anopa( {s;n}~grp, warton) summary( w ) ``` ... something quite evident from the plot: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 1**. The proportions for two groups. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(w) ``` (one tip of the confidence intervals is so off the scale that it is missing.) Because the sample is so small, it is actually possible to enumerate all the possible results (there are 64 of them). If we allow no success or a single success in one group, and all success in the other group, there are 14 cases. 14 out of 64 is far from being exceptional, and thus, there is no significant result here, congruent with the result of the ANOPA analysis (and contradicting the results from a logistic regression). Lets consider a more extreme result: The first group has only successes and the second, only failures (there is two such cases out of 64): ```{r} laurencelle <- data.frame( grp = c(1,2), s = c(3,0), n = c(3,3) ) ``` The analyse using ANOPA says: ```{r} l <- anopa( {s;n}~grp, laurencelle) summary(l) ``` that is, a significant result (and note that 2 out of 64 is indeed rare at the .05 threshold with a p of .031 = 2/64). The plot is congruent with this result: ```{r, message=FALSE, warning=FALSE, fig.width=5, fig.height=3, fig.cap="**Figure 2**. The proportions for two extremely different groups. Error bars show difference-adjusted 95% confidence intervals."} anopaPlot(l) ``` The logistic regression, when applied to proportions, has very inflated type-I error rates so that this technique should be avoided. The reason is quite simple: the logit transformation is not variance-stabilizing. In fact, it exaggerate the variances across levels of population proportions. # References
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/E-ArcsineIsAsinine.Rmd
--- title: "Testing type-I error rates" bibliography: "../inst/REFERENCES.bib" csl: "../inst/apa-6th.csl" output: rmarkdown::html_vignette description: > This vignette describes how to test type-I error rates in the ANOPA. vignette: > %\VignetteIndexEntry{Testing type-I error rates} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message = FALSE, results = 'hide', warning = FALSE} cat("this is hidden; general initializations.\n") ``` **A valid statistical test is one where the amount of type-I errors (rejecting the null when it should not) does not exceed the test threshold (often 5%).** We say it in bold because it seems to be commonly forgotten. Logistic regressions applied to proportions deviates massively from this rule, becoming very liberal tests. ANOPA respect very closely this rule when the corrected statistics are consulted. The following code shows how to test the type-I error rate for ANOPA.It uses simulated data with no differences. The only limitations herein is that no correlations is added when repeated measures are used. We suggests here to shut down error and feedback messages: ```{r, warning=FALSE, message=FALSE} options("ANOPA.feedback" = 'none') library(ANOPA) library(testthat) nsim <- 1000 # increase for more reliable simulations. theN <- 20 # number of simulated participants ``` Note that the simulations are actually not run in this vignette, as they take times. We wished to provide code in case you wished to test type-I error rate by yourself. The present code is also not optimized for speed (and in particular, is not parallelized); we wished to keep the code as simple as possible for readability. In all the following, the number of simulated participants per group is small (20) but can be varied. # Simulations with a single factor ## Simulation with one between factor ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp # the formula BSDesign <- list(grp = c("ctrl","plcbo")) #one factor, two groups thePs <- c(0.3, 0.3) # the true proportions, equal # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp[,2:3] ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design B, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with one within factor ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.early, s.middle, s.late) ~ . WSDesign <- list(moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.3) # test type-I error rate when no effect as is the case for factor 2 set.seed(42) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp[,2:4] , WSFactors = "M(3)" ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design W, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # Simulations with two factors ## Simulation with two factors, between design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp * eta WSDesign <- list() BSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with two factors, within design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late) ~ . BSDesign <- list() WSDesign <- list(eta = c("repue","ajun"), moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)", "m(3)") ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with two factors, mixed design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.early, s.middle, s.late) ~ grp BSDesign <- list(grp = c("ctrl","plcbo")) WSDesign <- list(moment = c("early","middle","late")) thePs <- c(0.3, 0.3, 0.5, 0.5, 0.7, 0.7) # thePs <- c(0.3, 0.7, 0.3, 0.7, 0.3, 0.7) # or no effect on factor 1 # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp[,2:5] , WSFactors = "M(3)") res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # Simulations with three factors ## Simulation with three factors, all between design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- s ~ grp * eta * a BSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late"), a = c("1","2","3","4")) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(41) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign ) w <- anopa(frm, smp ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxBxB, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, within design ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early.1, s.ajun.early.1, s.repue.middle.1, s.ajun.middle.1, s.repue.late.1, s.ajun.late.1, s.repue.early.2, s.ajun.early.2, s.repue.middle.2, s.ajun.middle.2, s.repue.late.2, s.ajun.late.2, s.repue.early.3, s.ajun.early.3, s.repue.middle.3, s.ajun.middle.3, s.repue.late.3, s.ajun.late.3, s.repue.early.4, s.ajun.early.4, s.repue.middle.4, s.ajun.middle.4, s.repue.late.4, s.ajun.late.4 ) ~ . WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late"), a = c("1","2","3","4")) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(43) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, NULL, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)", "a(4)") ) res <- c(res, if(summarize(w)[2,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design WxWxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, mixed design, testing within ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late ) ~ a BSDesign <- list( a = c("1","2","3","4") ) WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(43) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) res <- c(res, if(summarize(w)[1,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxWxW, testing W: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` ## Simulation with three factors, mixed design, testing between ```{r, message=FALSE, warning=FALSE, echo=TRUE, eval=FALSE} frm <- cbind(s.repue.early, s.ajun.early, s.repue.middle, s.ajun.middle, s.repue.late, s.ajun.late ) ~ a BSDesign <- list( a = c("1","2","3","4") ) WSDesign <- list(eta = c("repue","ajun"), grp = c("early","middle","late") ) thePs <- rep(0.3, 24) # test type-I error rate when no effect as is the case for factor 2 set.seed(42) res <- c() for (i in 1:nsim) { smp <- GRP( thePs, theN, BSDesign, WSDesign ) w <- anopa(frm, smp, WSFactors = c("e(2)","g(3)") ) res <- c(res, if(summarize(w)[3,4]<.05) 1 else 0) } typeI <- mean(res) cat( "Design BxWxW, testing B: ", typeI, "\n") # tolerance is large as the number of simulations is small expect_equal( typeI, .05, tolerance = 0.035) ``` # The end Let's restore the warnings and messages before leaving: ```{r} options("ANOPA.feedback" = 'all') ```
/scratch/gouwar.j/cran-all/cranData/ANOPA/vignettes/F-TestingTypeIError.Rmd
#' Start ANOVAIREVA #' @title Launch 'ANOVAIREVA' Interface #' @return Nothing #' @description ANOVAIREVA() loads interactive user interface built using R 'shiny'. #' @details The interactive user interface is to provide an easy way for anova analysis and downloading relevant plots. #' @keywords ANOVAIREVA #' @examples #' if(interactive()){ #' library(rmarkdown) #' ANOVAIREVA() #' } ANOVAIREVA <- function() { rmarkdown::run(system.file("img", "ANOVAIREVA.Rmd", package = "ANOVAIREVA")) Sys.setenv("R_TESTS" = "") }
/scratch/gouwar.j/cran-all/cranData/ANOVAIREVA/R/ANOVAIREVA.R
--- title: "ANOVA Interactive" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(car)# for Anova library(ggplot2)# for plots library(plotly)# plots library(datasets) ``` ANOVA (short of Analysis of Variance) is a statistical tool for determining differences/Variances between two or more groups of independent variables. One way ANOVA checks for one independent variable whereas Two way ANOVA checks for two independent variables. They can also be checked for interaction effect i.e. The presence of one causes changes in the other. Further t test is performed to find which level of independent variable shows variation. Independent variable and Dependent variable are Categorical and Metric respectively. ```{r,echo=FALSE} cat(sprintf("\n ")) #inputpanel sidebarPanel( inputPanel( checkboxInput("Choice","Check to choose your own file",value=FALSE), fileInput("file","Upload a file in *.csv format with headers",placeholder = "No file selected"), uiOutput("Var1"), uiOutput("Var2"), uiOutput("Var3"), selectInput("way", " select the anova technique to be used", choices= c("one way","two way")), uiOutput("int"), downloadButton("dataset"," Download dataset"), downloadButton("plot","Download Plot") ) ) #selection ofvariable 1 output$Var1<- renderUI({ if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.numeric) selectInput("var1"," Select the dependent variable",choices = colnames(data)) } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.numeric) selectInput("var1"," Select the dependent variable",choices = colnames(data)) } }) #selection of variable 2 output$Var2<- renderUI({ if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.factor) selectInput("var2","Select the independent variable 1",choices = colnames(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.factor) selectInput("var2","Select the independent variable 1",choices = colnames(data)) } }) #selection of third variable for two way output$Var3<- renderUI({ if (input$way=="two way") { if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.factor) data=select(data,-c(input$var2)) selectInput("var3","Select the independent variable 2",choices = colnames(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.factor) data=select(data,-c(input$var2)) selectInput("var3","Select the independent variable 2",choices = colnames(data)) } } }) #selection of whether or not to check for interaction effect output$int<-renderUI({ if (input$way=="two way") { selectInput("inter","Please select whether to check for interaction or not",choices = c("no","yes")) } }) # tabs mainPanel( tabsetPanel( tabPanel("Summary",verbatimTextOutput("summ")), tabPanel("Anova",verbatimTextOutput("ANOVA")), tabPanel("Post Hoc(for single variables) ",verbatimTextOutput("PH")), tabPanel("Visualization",plotlyOutput("Plot")) ), h6("Designed and Developed by : Ms. Revathi Kumar", tags$img(src ="R.JPG", height= 200, width=200)), h6("Mentored by :", tags$img(src ="K.JPG", height= 300, width=300)) ) #for the summary of the data output$summ<- renderPrint({ if (input$Choice==FALSE) { data("iris") data= iris str(data) summary(data) cat(sprintf("\n The summary of the dataset is as follows:\n")) print(summary(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} str(data) summary(data) cat(sprintf("\n The summary of the dataset is as follows:\n")) print(summary(data)) } }) #for ANOVA test output$ANOVA<- renderPrint({ if (input$Choice==FALSE) { data("iris") data= iris if (input$way=="one way") { cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) summary(a) } else { if (input$inter=="no") { cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) mo=paste(input$var2,input$var3,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } else{ cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Hypothesis 3\n")) cat(sprintf("\n Null Hypothesis : there is no interaction between %s and %s",input$var2,input$var3)) cat(sprintf("\n Alternate Hypothesis : there is interaction between %s and %s\n",input$var2,input$var3)) m=paste(input$var2,input$var3,sep = "*") mo=paste(input$var2,input$var3,m,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } print(a) } } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} if (input$way=="one way") { cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) summary(a) } else { if (input$inter=="no") { cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) mo=paste(input$var2,input$var3,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } else{ cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Hypothesis 3\n")) cat(sprintf("\n Null Hypothesis : there is no interaction between %s and %s",input$var2,input$var3)) cat(sprintf("\n Alternate Hypothesis : there is interaction between %s and %s\n",input$var2,input$var3)) m=paste(input$var2,input$var3,sep = "*") mo=paste(input$var2,input$var3,m,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } print(a) } } }) #for Post Hoc test output$PH<- renderPrint({ if(input$Choice==FALSE){ data("iris") data= iris mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) TukeyHSD(a) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) TukeyHSD(a) } }) #plot output$Plot <- renderPlotly({ if(input$Choice==FALSE){ data("iris") data= iris if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() b<- ggplotly(b) print(b)} else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() b<- ggplotly(b) print(b) } } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() b<- ggplotly(b) print(b)} else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() b<- ggplotly(b) print(b) } } }) #for downloading the dataset datasetInput1<- reactive({ if(input$Choice==FALSE){ data("iris") data= iris data1<- data.frame(data) } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data1<-data.frame(data) } }) output$dataset<-downloadHandler( filename = function(){ paste("dataset",".csv",sep = "") }, content = function(file){write.csv(datasetInput1(),file,row.names = FALSE)} ) plotInput1<-reactive({ if (input$Choice==FALSE) { data("iris") data= iris } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} } if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() print(b) } else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() print(b) } }) output$plot<-downloadHandler( filename = function() { paste("PLOT", '.png', sep='') }, content = function(file) { ggsave(file,plotInput1()) }) ```
/scratch/gouwar.j/cran-all/cranData/ANOVAIREVA/inst/ANOVAIREVA.Rmd
--- title: "ANOVA Interactive" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(car)# for Anova library(ggplot2)# for plots library(plotly)# plots library(datasets) ``` ANOVA (short of Analysis of Variance) is a statistical tool for determining differences/Variances between two or more groups of independent variables. One way ANOVA checks for one independent variable whereas Two way ANOVA checks for two independent variables. They can also be checked for interaction effect i.e. The presence of one causes changes in the other. Further t test is performed to find which level of independent variable shows variation. Independent variable and Dependent variable are Categorical and Metric respectively. ```{r,echo=FALSE} cat(sprintf("\n ")) #inputpanel sidebarPanel( inputPanel( checkboxInput("Choice","Check to choose your own file",value=FALSE), fileInput("file","Upload a file in *.csv format with headers",placeholder = "No file selected"), uiOutput("Var1"), uiOutput("Var2"), uiOutput("Var3"), selectInput("way", " select the anova technique to be used", choices= c("one way","two way")), uiOutput("int"), downloadButton("dataset"," Download dataset"), downloadButton("plot","Download Plot") ) ) #selection ofvariable 1 output$Var1<- renderUI({ if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.numeric) selectInput("var1"," Select the dependent variable",choices = colnames(data)) } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.numeric) selectInput("var1"," Select the dependent variable",choices = colnames(data)) } }) #selection of variable 2 output$Var2<- renderUI({ if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.factor) selectInput("var2","Select the independent variable 1",choices = colnames(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.factor) selectInput("var2","Select the independent variable 1",choices = colnames(data)) } }) #selection of third variable for two way output$Var3<- renderUI({ if (input$way=="two way") { if (input$Choice==FALSE) { data("iris") data= iris data=select_if(data,is.factor) data=select(data,-c(input$var2)) selectInput("var3","Select the independent variable 2",choices = colnames(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data=select_if(data,is.factor) data=select(data,-c(input$var2)) selectInput("var3","Select the independent variable 2",choices = colnames(data)) } } }) #selection of whether or not to check for interaction effect output$int<-renderUI({ if (input$way=="two way") { selectInput("inter","Please select whether to check for interaction or not",choices = c("no","yes")) } }) # tabs mainPanel( tabsetPanel( tabPanel("Summary",verbatimTextOutput("summ")), tabPanel("Anova",verbatimTextOutput("ANOVA")), tabPanel("Post Hoc(for single variables) ",verbatimTextOutput("PH")), tabPanel("Visualization",plotlyOutput("Plot")) ), h6("Designed and Developed by : Ms. Revathi Kumar", tags$img(src ="R.JPG", height= 200, width=200)), h6("Mentored by :", tags$img(src ="K.JPG", height= 300, width=300)) ) #for the summary of the data output$summ<- renderPrint({ if (input$Choice==FALSE) { data("iris") data= iris str(data) summary(data) cat(sprintf("\n The summary of the dataset is as follows:\n")) print(summary(data)) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} str(data) summary(data) cat(sprintf("\n The summary of the dataset is as follows:\n")) print(summary(data)) } }) #for ANOVA test output$ANOVA<- renderPrint({ if (input$Choice==FALSE) { data("iris") data= iris if (input$way=="one way") { cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) summary(a) } else { if (input$inter=="no") { cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) mo=paste(input$var2,input$var3,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } else{ cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Hypothesis 3\n")) cat(sprintf("\n Null Hypothesis : there is no interaction between %s and %s",input$var2,input$var3)) cat(sprintf("\n Alternate Hypothesis : there is interaction between %s and %s\n",input$var2,input$var3)) m=paste(input$var2,input$var3,sep = "*") mo=paste(input$var2,input$var3,m,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } print(a) } } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} if (input$way=="one way") { cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) summary(a) } else { if (input$inter=="no") { cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) mo=paste(input$var2,input$var3,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } else{ cat(sprintf("\n Hypothesis 1\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var2)) cat(sprintf("\n Hypothesis 2\n")) cat(sprintf("\n Null Hypothesis : means of %s is uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Alternate Hypothesis : means of %s is not uniform across all groups of %s\n",input$var1,input$var3)) cat(sprintf("\n Hypothesis 3\n")) cat(sprintf("\n Null Hypothesis : there is no interaction between %s and %s",input$var2,input$var3)) cat(sprintf("\n Alternate Hypothesis : there is interaction between %s and %s\n",input$var2,input$var3)) m=paste(input$var2,input$var3,sep = "*") mo=paste(input$var2,input$var3,m,sep = "+") mod1= paste(input$var1,mo,sep = "~") model=lm(formula = as.formula(mod1),data = data) a<- Anova(model) } print(a) } } }) #for Post Hoc test output$PH<- renderPrint({ if(input$Choice==FALSE){ data("iris") data= iris mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) TukeyHSD(a) } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} mod= paste(input$var1,input$var2,sep = "~") model=lm(formula = as.formula(mod),data = data) a<- aov(model) TukeyHSD(a) } }) #plot output$Plot <- renderPlotly({ if(input$Choice==FALSE){ data("iris") data= iris if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() b<- ggplotly(b) print(b)} else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() b<- ggplotly(b) print(b) } } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() b<- ggplotly(b) print(b)} else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() b<- ggplotly(b) print(b) } } }) #for downloading the dataset datasetInput1<- reactive({ if(input$Choice==FALSE){ data("iris") data= iris data1<- data.frame(data) } else { file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} data1<-data.frame(data) } }) output$dataset<-downloadHandler( filename = function(){ paste("dataset",".csv",sep = "") }, content = function(file){write.csv(datasetInput1(),file,row.names = FALSE)} ) plotInput1<-reactive({ if (input$Choice==FALSE) { data("iris") data= iris } else{ file1=input$file if (is.null(file1)){return()} data = read.table(file = file1$datapath,sep = ",",header = TRUE) if (is.null(data())){return()} } if(input$way=="one way"){ b<-ggplot(data = data, aes_string(x= input$var2,y= input$var1, fill = input$var2 ))+labs(y=input$var1,x=input$var2)+ geom_boxplot() print(b) } else { w= aggregate(data[,input$var1],by=list(k= data[,input$var2],l= data[,input$var3]),mean) b<-ggplot(data= w, aes_string(x= w$k, y= w$x,fill= w$l ))+labs(y=input$var1,x=input$var2,fill= input$var3 )+theme(legend.position = "top")+geom_line(aes_string(group= w$l))+ geom_point() print(b) } }) output$plot<-downloadHandler( filename = function() { paste("PLOT", '.png', sep='') }, content = function(file) { ggsave(file,plotInput1()) }) ```
/scratch/gouwar.j/cran-all/cranData/ANOVAIREVA/inst/img/ANOVAIREVA.Rmd
#' Start ANOVAShiny #' @title Launch 'ANOVAShiny' Interface #' @return Nothing #' @description ANOVAShiny() loads interactive user interface built using R 'shiny'. #' @details The interactive user interface is to provide an easy way for anova analysis and downloading relevant plots. #' @keywords ANOVAShiny #' @examples #' if(interactive()){ #' library(rmarkdown) #' ANOVAShiny() #' } ANOVAShiny <- function() { rmarkdown::run(system.file("img", "ANOVAShiny.Rmd", package = "ANOVAShiny")) Sys.setenv("R_TESTS" = "") }
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny/R/ANOVAShiny.R
--- title: "ANOVA One-way and Two-way" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(rhandsontable) library(DescTools) ``` ```{r,echo=FALSE} sidebarPanel( inputPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), uiOutput("vx"), selectInput("antype","Select Type of ANOVA",choices = c("One-way","Two-way","Factorial"),selected = "One-way"), downloadButton("downloadPlot", "Download Plot"), downloadButton("downloaddata2", "Download Example Dataset") ), inputPanel( h6("Inputs" ), uiOutput("vy"), uiOutput("vxd"), uiOutput("vxd2") ), inputPanel( h6("Mean,SD,n Inputs",width =300), rHandsontableOutput("testdata"), downloadButton("downloaddata", "Download Dataset") ) ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model", verbatimTextOutput("AD")), tabPanel("Visualization", plotOutput("VP")), tabPanel("Model on aggregate(One-way)",verbatimTextOutput("AN")) ), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) cat(sprintf("\nSnapshot of the dataset\n")) print(head(ds)) options(scipen = 999) if(input$antype == "One-way") { cat(sprintf("\nHypotheses for One-way ANOVA\n")) cat(sprintf("\nHo :%s of all categories of %s are same\n",input$variabley,input$variablexd)) cat(sprintf("\nHa :%s of not all categories of %s are same\n\n",input$variabley,input$variablexd)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) cat(sprintf("\nPosthoc tests of Least Significant Difference(LSD)\n")) print(PostHocTest(fit, method = "lsd", conf.level=NA)) } if(input$antype == "Two-way") { cat(sprintf("\nHypotheses for Two-way ANOVA\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } if(input$antype == "Factorial") { cat(sprintf("\nHypotheses for Factorial Design(Two way ANOVA with interaction)\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s and their interaction\n ",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s and their interaction\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } }) output$VP<-renderPlot({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) # attach(ds) # interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley#),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = #input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) HH::interaction2wt(as.formula(mod),data = ds) } }) output$vx <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vy <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.numeric) selectInput("variabley","Select the dependent variable",choices = colnames(ds),selected = "" ) }) output$vxd <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd","Select the independent variable",choices = colnames(ds),selected = "" ) }) output$vxd2 <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd2","Select the 2nd independent variable",choices = colnames(ds),selected = colnames(ds)[2] ) }) output$testdata <- renderRHandsontable({ DF = data.frame(Group=c(1,2,3), mean= c(62,52,60), sd = c(6,5,7), n= c(8,8,7)) rhandsontable(DF) }) output$AN <- renderPrint({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } dataset = df2 print(summary(aov(as.formula(values~ind),data = dataset))) cat(sprintf("\nPost Hoc tests\n")) print(PostHocTest(aov(as.formula(values~ind),data = dataset), method = "lsd", conf.level=NA)) }) datasetInput1 <- reactive({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } data = df2 }) output$downloaddata <- downloadHandler( filename = function() { filetitle = paste("dataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput1(), file, row.names = FALSE) } ) output$downloadPlot<- downloadHandler( filename = function() { paste("ANOVAplot", ".png", sep = "") }, content = function(file) { png(file) if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) attach(ds) interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) } dev.off() }) datasetInput2 <- reactive({ data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose) data = data }) output$downloaddata2 <- downloadHandler( filename = function() { filetitle = paste("Exampledataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput2(), file, row.names = FALSE) } )
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny/inst/ANOVAShiny.Rmd
--- title: "ANOVA One-way and Two-way" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(rhandsontable) library(DescTools) ``` ```{r,echo=FALSE} sidebarPanel( inputPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), uiOutput("vx"), selectInput("antype","Select Type of ANOVA",choices = c("One-way","Two-way","Factorial"),selected = "One-way"), downloadButton("downloadPlot", "Download Plot"), downloadButton("downloaddata2", "Download Example Dataset") ), inputPanel( h6("Inputs" ), uiOutput("vy"), uiOutput("vxd"), uiOutput("vxd2") ), inputPanel( h6("Mean,SD,n Inputs",width =300), rHandsontableOutput("testdata"), downloadButton("downloaddata", "Download Dataset") ) ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model", verbatimTextOutput("AD")), tabPanel("Visualization", plotOutput("VP")), tabPanel("Model on aggregate(One-way)",verbatimTextOutput("AN")) ), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) cat(sprintf("\nSnapshot of the dataset\n")) print(head(ds)) options(scipen = 999) if(input$antype == "One-way") { cat(sprintf("\nHypotheses for One-way ANOVA\n")) cat(sprintf("\nHo :%s of all categories of %s are same\n",input$variabley,input$variablexd)) cat(sprintf("\nHa :%s of not all categories of %s are same\n\n",input$variabley,input$variablexd)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) cat(sprintf("\nPosthoc tests of Least Significant Difference(LSD)\n")) print(PostHocTest(fit, method = "lsd", conf.level=NA)) } if(input$antype == "Two-way") { cat(sprintf("\nHypotheses for Two-way ANOVA\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } if(input$antype == "Factorial") { cat(sprintf("\nHypotheses for Factorial Design(Two way ANOVA with interaction)\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s and their interaction\n ",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s and their interaction\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } }) output$VP<-renderPlot({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) # attach(ds) # interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley#),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = #input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) HH::interaction2wt(as.formula(mod),data = ds) } }) output$vx <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vy <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.numeric) selectInput("variabley","Select the dependent variable",choices = colnames(ds),selected = "" ) }) output$vxd <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd","Select the independent variable",choices = colnames(ds),selected = "" ) }) output$vxd2 <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd2","Select the 2nd independent variable",choices = colnames(ds),selected = colnames(ds)[2] ) }) output$testdata <- renderRHandsontable({ DF = data.frame(Group=c(1,2,3), mean= c(62,52,60), sd = c(6,5,7), n= c(8,8,7)) rhandsontable(DF) }) output$AN <- renderPrint({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } dataset = df2 print(summary(aov(as.formula(values~ind),data = dataset))) cat(sprintf("\nPost Hoc tests\n")) print(PostHocTest(aov(as.formula(values~ind),data = dataset), method = "lsd", conf.level=NA)) }) datasetInput1 <- reactive({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } data = df2 }) output$downloaddata <- downloadHandler( filename = function() { filetitle = paste("dataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput1(), file, row.names = FALSE) } ) output$downloadPlot<- downloadHandler( filename = function() { paste("ANOVAplot", ".png", sep = "") }, content = function(file) { png(file) if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) attach(ds) interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) } dev.off() }) datasetInput2 <- reactive({ data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose) data = data }) output$downloaddata2 <- downloadHandler( filename = function() { filetitle = paste("Exampledataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput2(), file, row.names = FALSE) } )
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny/inst/img/ANOVAShiny.Rmd
#' Start ANOVAShiny2 #' @title Launch 'ANOVAShiny2' Interface #' @return Nothing #' @description ANOVAShiny2() loads interactive user interface built using R 'shiny'. #' @details The interactive user interface is to provide an easy way for anova analysis and downloading relevant plots. #' @keywords ANOVAShiny2 #' @examples #' if(interactive()){ #' library(rmarkdown) #' ANOVAShiny2() #' } ANOVAShiny2 <- function() { rmarkdown::run(system.file("img", "ANOVAShiny2.Rmd", package = "ANOVAShiny2")) Sys.setenv("R_TESTS" = "") }
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny2/R/ANOVAShiny2.R
--- title: "ANOVA One-way and Two-way" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(rhandsontable) library(DescTools) ``` ```{r,echo=FALSE} options(shiny.maxRequestSize = 100 * 1024^2) sidebarPanel( inputPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), uiOutput("vx"), selectInput("antype","Select Type of ANOVA",choices = c("One-way","Two-way","Factorial"),selected = "One-way"), downloadButton("downloadPlot", "Download Plot"), downloadButton("downloaddata2", "Download Example Dataset") ), inputPanel( h6("Inputs" ), uiOutput("vy"), uiOutput("vxd"), uiOutput("vxd2") ), inputPanel( h6("Mean,SD,n Inputs",width =300), rHandsontableOutput("testdata"), downloadButton("downloaddata", "Download Dataset") ) ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model", verbatimTextOutput("AD")), tabPanel("Visualization", plotOutput("VP")), tabPanel("Model on aggregate(One-way)",verbatimTextOutput("AN")) ), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) cat(sprintf("\nSnapshot of the dataset\n")) print(head(ds)) options(scipen = 999) if(input$antype == "One-way") { cat(sprintf("\nHypotheses for One-way ANOVA\n")) cat(sprintf("\nHo :%s of all categories of %s are same\n",input$variabley,input$variablexd)) cat(sprintf("\nHa :%s of not all categories of %s are same\n\n",input$variabley,input$variablexd)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) cat(sprintf("\nPosthoc tests of Least Significant Difference(LSD)\n")) print(PostHocTest(fit, method = "lsd", conf.level=NA)) } if(input$antype == "Two-way") { cat(sprintf("\nHypotheses for Two-way ANOVA\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } if(input$antype == "Factorial") { cat(sprintf("\nHypotheses for Factorial Design(Two way ANOVA with interaction)\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s and their interaction\n ",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s and their interaction\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } }) output$VP<-renderPlot({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) # attach(ds) # interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley#),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = #input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) HH::interaction2wt(as.formula(mod),data = ds) } }) output$vx <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vy <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.numeric) selectInput("variabley","Select the dependent variable",choices = colnames(ds),selected = "" ) }) output$vxd <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd","Select the independent variable",choices = colnames(ds),selected = "" ) }) output$vxd2 <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd2","Select the 2nd independent variable",choices = colnames(ds),selected = colnames(ds)[2] ) }) output$testdata <- renderRHandsontable({ DF = data.frame(Group=c(1,2,3), mean= c(62,52,60), sd = c(6,5,7), n= c(8,8,7)) rhandsontable(DF) }) output$AN <- renderPrint({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } dataset = df2 print(summary(aov(as.formula(values~ind),data = dataset))) cat(sprintf("\nPost Hoc tests\n")) print(PostHocTest(aov(as.formula(values~ind),data = dataset), method = "lsd", conf.level=NA)) }) datasetInput1 <- reactive({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } data = df2 }) output$downloaddata <- downloadHandler( filename = function() { filetitle = paste("dataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput1(), file, row.names = FALSE) } ) output$downloadPlot<- downloadHandler( filename = function() { paste("ANOVAplot", ".png", sep = "") }, content = function(file) { png(file) if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) attach(ds) interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) } dev.off() }) datasetInput2 <- reactive({ data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose) data = data }) output$downloaddata2 <- downloadHandler( filename = function() { filetitle = paste("Exampledataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput2(), file, row.names = FALSE) } )
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny2/inst/ANOVAShiny2.Rmd
--- title: "ANOVA One-way and Two-way" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(rhandsontable) library(DescTools) ``` ```{r,echo=FALSE} options(shiny.maxRequestSize = 100 * 1024^2) sidebarPanel( inputPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), uiOutput("vx"), selectInput("antype","Select Type of ANOVA",choices = c("One-way","Two-way","Factorial"),selected = "One-way"), downloadButton("downloadPlot", "Download Plot"), downloadButton("downloaddata2", "Download Example Dataset") ), inputPanel( h6("Inputs" ), uiOutput("vy"), uiOutput("vxd"), uiOutput("vxd2") ), inputPanel( h6("Mean,SD,n Inputs",width =300), rHandsontableOutput("testdata"), downloadButton("downloaddata", "Download Dataset") ) ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model", verbatimTextOutput("AD")), tabPanel("Visualization", plotOutput("VP")), tabPanel("Model on aggregate(One-way)",verbatimTextOutput("AN")) ), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) cat(sprintf("\nSnapshot of the dataset\n")) print(head(ds)) options(scipen = 999) if(input$antype == "One-way") { cat(sprintf("\nHypotheses for One-way ANOVA\n")) cat(sprintf("\nHo :%s of all categories of %s are same\n",input$variabley,input$variablexd)) cat(sprintf("\nHa :%s of not all categories of %s are same\n\n",input$variabley,input$variablexd)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) cat(sprintf("\nPosthoc tests of Least Significant Difference(LSD)\n")) print(PostHocTest(fit, method = "lsd", conf.level=NA)) } if(input$antype == "Two-way") { cat(sprintf("\nHypotheses for Two-way ANOVA\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } if(input$antype == "Factorial") { cat(sprintf("\nHypotheses for Factorial Design(Two way ANOVA with interaction)\n")) cat(sprintf("\nHo :%s is not influenced by the treatments %s and %s and their interaction\n ",input$variabley,input$variablexd,input$variablexd2)) cat(sprintf("\nHa:%s is influenced by either of the treatments %s and %s and their interaction\n",input$variabley,input$variablexd,input$variablexd2)) mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) fit <- aov(as.formula(mod), data=ds) print(summary(fit)) } }) output$VP<-renderPlot({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) # attach(ds) # interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley#),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = #input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) HH::interaction2wt(as.formula(mod),data = ds) } }) output$vx <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vy <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.numeric) selectInput("variabley","Select the dependent variable",choices = colnames(ds),selected = "" ) }) output$vxd <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd","Select the independent variable",choices = colnames(ds),selected = "" ) }) output$vxd2 <- renderUI({ if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else { file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) selectInput("variablexd2","Select the 2nd independent variable",choices = colnames(ds),selected = colnames(ds)[2] ) }) output$testdata <- renderRHandsontable({ DF = data.frame(Group=c(1,2,3), mean= c(62,52,60), sd = c(6,5,7), n= c(8,8,7)) rhandsontable(DF) }) output$AN <- renderPrint({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } dataset = df2 print(summary(aov(as.formula(values~ind),data = dataset))) cat(sprintf("\nPost Hoc tests\n")) print(PostHocTest(aov(as.formula(values~ind),data = dataset), method = "lsd", conf.level=NA)) }) datasetInput1 <- reactive({ data_frame = data.frame(hot_to_r(input$testdata)) levels = length(unique(data_frame$n)) data_frame1 = select(data_frame,c(n,mean,sd)) data_frame1$mean = as.numeric(data_frame$mean) data_frame1$sd = as.numeric(data_frame$sd) data_frame1$n = as.numeric(data_frame$n) rnorm2 = function(n,mean,sd) { mean+ sd*scale(rnorm(n)) } df = mapply(rnorm2,data_frame1[,1],data_frame1[,2],data_frame[,3]) if(levels > 1) { attributes(df) = list( names = names(df), row.names=1:max(data_frame$n), class = 'data.frame') colnames(df) = 1:ncol(df) df2 =data.frame(values = unlist(df)) row.names(df2)= 1: nrow(df2) data_frame2 = select(data_frame,c(Group,n)) data_frame2$Group = as.factor(data_frame2$Group) dataset1 = data_frame2 dataset1 = data.frame(ind = dataset1[rep(seq_len(nrow(dataset1)), dataset1$n) ,1 ]) row.names(dataset1) = 1:nrow(dataset1) df2$ind = as.factor(dataset1$ind) } else { df = data.frame(df) df2 = data.frame(stack(df[1:ncol(df)])) #print(df2) } data = df2 }) output$downloaddata <- downloadHandler( filename = function() { filetitle = paste("dataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput1(), file, row.names = FALSE) } ) output$downloadPlot<- downloadHandler( filename = function() { paste("ANOVAplot", ".png", sep = "") }, content = function(file) { png(file) if(input$ex == TRUE) {data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose)} else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) options(scipen = 999) if(input$antype == "One-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) boxplot(as.formula(mod),data =ds,col = "red",ylab = input$variabley,xlab =input$variablexd,main = "One-way ANOVA Visualization") } if(input$antype == "Two-way") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"+") mod = paste(mod,input$variablexd2) boxplot(as.formula(mod),data =ds,col = "red",xlab ="Treatments",ylab = input$variabley,main = "Two-way ANOVA Visualization ") } if(input$antype == "Factorial") { mod = paste(input$variabley,"~") mod = paste(mod,input$variablexd) mod = paste(mod,"*") mod = paste(mod,input$variablexd2) attach(ds) interaction.plot(get(input$variablexd),get(input$variablexd2),get(input$variabley),type ="b",col= c("red","blue"),main = "Interaction between Treatments",xlab = input$variablexd,ylab = input$variabley,trace.label = input$variablexd2) } dev.off() }) datasetInput2 <- reactive({ data("ToothGrowth") data = ToothGrowth data$dose = factor(data$dose) data = data }) output$downloaddata2 <- downloadHandler( filename = function() { filetitle = paste("Exampledataset") paste(filetitle, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput2(), file, row.names = FALSE) } )
/scratch/gouwar.j/cran-all/cranData/ANOVAShiny2/inst/img/ANOVAShiny2.Rmd
#' @name aov1r #' @rdname aov1r #' @title One-way random effect ANOVA #' @description Fits a one-way random effect ANOVA model. #' #' @param formula a formula of the form \code{y~group} #' @param data optional dataframe #' @param x output of \code{summary} #' @param object an \code{aov1r} object (output of an \code{aov1r} call) #' @param ... ignored #' #' @return \code{aov1r} returns an object of class \code{aov1r}; #' @import data.table #' @importFrom lazyeval f_eval_lhs f_eval_rhs f_lhs f_rhs #' #' @examples #' dat <- simAOV1R(I=2, J=3, mu=10, sigmab=1, sigmaw=1) #' fit <- aov1r(y ~ group, data=dat) #' summary(fit) NULL #' @rdname aov1r #' @export aov1r <- function(formula, data=NULL){ DT <- data.table(y = lazyeval::f_eval_lhs(formula, data=data), group = lazyeval::f_eval_rhs(formula, data=data)) DT[, means := mean(y), by="group"] ssw <- with(DT, crossprod(y-means)[1L,1L]) DT2 <- DT[, list(means = means[1L], Ji = .N), by="group"] DT2[, Mean:=mean(means)] balanced <- all(DT2[["Ji"]][1L] == DT2[["Ji"]][-1L]) I <- nrow(DT2) Jh <- I/sum(1/DT2[["Ji"]]) ssb <- Jh*with(DT2, crossprod(Mean-means)[1L,1L]) terms <- c(y = as.character(lazyeval::f_lhs(formula)), group = as.character(lazyeval::f_rhs(formula))) N <- nrow(DT) out <- list( "Sums of squares" = c(ssw=ssw, ssb=ssb), "Variance components" = c(sigma2w = ssw/(N-I), sigma2b = (ssb/(I-1)-ssw/(N-I))/Jh), "Design" = c(I=I, Jh=Jh, N=N), "Balanced" = balanced, "grandMean" = DT2$Mean[1L], "groupMeans" = setNames( as.data.frame(DT2[, .SD, .SDcols=c("group", "means")]), c("group", "mean")), "data" = data[, terms], "terms" = terms ) class(out) <- "aov1r" out } #' @rdname aov1r #' @export summary.aov1r <- function(object, ...){ out <- list() class(out) <- "summary.aov1r" out[["Response"]] <- object$terms[["y"]] out[["Factor"]] <- object$terms[["group"]] attr(out, "Balanced") <- object[["Balanced"]] out } #' @rdname aov1r #' @export print.summary.aov1r <- function(x, ...){ for(foo in names(x)){ cat(foo, ": ", x[[foo]], "\n", sep="") } if(attr(x, "Balanced")){ cat("Design is balanced.\n") }else{ cat("Design is *not* balanced.\n") } } #' @title Prediction interval for one-way random effect ANOVA #' @description Prediction interval for the one-way random effect ANOVA model, #' based on a Satterthwaite approximation of the degrees of freedom. #' #' @param object an output of \code{\link{aov1r}} #' @param level confidence level #' @param ... ignored #' #' @return A vector of length two, the bounds of the prediction interval. #' @export #' @importFrom stats qt #' #' @references T. Y. Lin, C. T. Liao. #' \emph{Prediction intervals for general balanced linear random models}. #' Journal of Statistical Planning and Inference 138 (2008), 3164 – 3175. #' <doi:10.1016/j.jspi.2008.01.001> #' #' @examples #' dat <- simAOV1R(I=2, J=3, mu=10, sigmab=1, sigmaw=1) #' fit <- aov1r(y ~ group, data=dat) #' predict(fit) predict.aov1r <- function(object, level=0.95, ...){ I <- object[["Design"]][["I"]] J <- object[["Design"]][["Jh"]] N <- object[["Design"]][["N"]] SSb <- object[["Sums of squares"]][["ssb"]] SSw <- object[["Sums of squares"]][["ssw"]] a <- (1/J*(1+1/I))/(I-1) b <- (J-1)/J/(N-I) v <- a*SSb+b*SSw # estimates the variance of (Ynew-Ybar) nu <- v^2/((a*SSb)^2/(I-1)+(b*SSw)^2/(N-I)) # Satterthwaite degrees of freedom alpha.over.two <- (1-level)/2 bounds <- object[["grandMean"]] + c(-1,1)*sqrt(v)*qt(1-alpha.over.two, nu) names(bounds) <- paste0(100*c(alpha.over.two, 1-alpha.over.two), "%") attr(bounds, "std.error") <- sqrt(v) attr(bounds, "df") <- nu bounds } #' @title Confidence intervals #' @description Confidence intervals for the one-way random effect ANOVA. #' #' @param object an output of \code{\link{aov1r}} #' @param parm ignored #' @param level confidence level #' @param SDs logical, whether to return confidence intervals about the #' standard deviations or about the variances #' @param x an output of \code{confint} applied to an \code{aov1r} object #' @param ... ignored #' #' @return A dataframe providing the bounds of the confidence #' intervals. #' #' @references Richard K. Burdick, Franklin. A. Graybill. #' \emph{Confidence Intervals on Variance Components}. #' CRC Press; 1st edition (1992). #' ISBN-13: 978-0824786441. #' #' @export #' @importFrom stats qf qt sd #' #' @examples #' dat <- simAOV1R(I=2, J=3, mu=10, sigmab=1, sigmaw=1) #' fit <- aov1r(y ~ group, data=dat) #' confint(fit) confint.aov1r <- function(object, parm, level = 0.95, SDs = TRUE, ...){ I <- object[["Design"]][["I"]] J <- object[["Design"]][["Jh"]] balanced <- object[["Balanced"]] if(!balanced){ warning( "Design is not balanced - confidence intervals are not valid." ) } SSb <- object[["Sums of squares"]][["ssb"]] SSw <- object[["Sums of squares"]][["ssw"]] sigma2w <- object[["Variance components"]][["sigma2w"]] sigma2b <- object[["Variance components"]][["sigma2b"]] DFb <- I - 1 # between df DFw <- I * (J - 1) # within df MSSb <- SSb/DFb; MSSw <- SSw/DFw # mean sums of squares a <- (1 - level) / 2 ## grandMean confidence interval tstar <- qt(1-a, DFb) stdev <- sd(object[["groupMeans"]][["mean"]]) muLCB <- object[["grandMean"]] - tstar * stdev / sqrt(I) muUCB <- object[["grandMean"]] + tstar * stdev / sqrt(I) ## Within variance confidence interval withinLCB <- sigma2w / qf(1-a, DFw, Inf) # Within lwr withinUCB <- sigma2w / qf(a, DFw, Inf) # Within upr ## Between variance confidence interval G1 <- 1 - (1 / qf(1-a, DFb, Inf)) G2 <- 1 - (1 / qf(1-a, DFw, Inf)) H1 <- (1 / qf(a, DFb, Inf)) - 1 H2 <- (1 / qf(a, DFw, Inf)) - 1 G12 <- ((qf(1-a, DFb, DFw) - 1)^2 - (G1^2 * qf(1-a, DFb, DFw)^2) - (H2^2)) / qf(1-a, DFb, DFw) H12 <- ((1 - qf(a, DFb, DFw))^2 - H1^2 * qf(a, DFb, DFw)^2 - G2^2) / qf(a, DFb, DFw) Vu <- H1^2 * MSSb^2 + G2^2 * MSSw^2 + H12 * MSSb * MSSw Vl <- G1^2 * MSSb^2 + H2^2 * MSSw^2 + G12 * MSSw * MSSb betweenLCB <- (MSSb - MSSw - sqrt(Vl)) / J # Betwen lwr betweenUCB <- (MSSb - MSSw + sqrt(Vu)) / J # Between upr ## Total variance confidence interval sigma2tot <- sigma2w + sigma2b # estimate totalLCB <- sigma2tot - (sqrt(G1^2 * MSSb^2 + G2^2 * (J - 1)^2 * MSSw^2) / J) # Total lwr totalUCB <- sigma2tot + (sqrt(H1^2 * MSSb^2 + H2^2 * (J - 1)^2 * MSSw^2) / J) # Total upr # Output estimate <- c(sigma2w, sigma2b, sigma2tot) lwr <- c(withinLCB, betweenLCB, totalLCB) upr <- c(withinUCB, betweenUCB, totalUCB) if(SDs){ estimate <- sign(estimate) * sqrt(abs(estimate)) lwr <- sign(lwr) * sqrt(abs(lwr)) upr <- sign(upr) * sqrt(abs(upr)) } out <- data.frame( estimate = c(object[["grandMean"]], estimate), lwr = c(muLCB, lwr), upr = c(muUCB, upr) ) rownames(out) <- c("grandMean", "within", "between", "total") attr(out, "confidence level") <- level attr(out, "standard deviations") <- SDs class(out) <- c("confint.aov1r", class(out)) out } #' @rdname confint.aov1r #' @importFrom utils capture.output #' @export print.confint.aov1r <- function(x, ...){ cat(capture.output(print.data.frame(x)), sep = "\n") cat('\nattr(,"confidence level")\n') cat(capture.output(attr(x,"confidence level"))) cat('\nattr(,"standard deviations")\n') cat(capture.output(attr(x,"standard deviations")), "\n") }
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/aov1r.R
#' @title Krishnamoorthy & Mathew's example 4.1 #' @description The dataset used in Krishnammorthy & Mathew's example 4.1. #' @name KM41 #' @docType data #' @references Krishnamoorthy and Mathew, Statistical Tolerance Regions, Wiley 2009. #' @keywords data #' @usage data(KM41) #' @format A data frame with 25 rows and 2 columns. #' @examples #' data(KM41) #' str(KM41) #' table(KM41$Batch) NULL
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/datasets.R
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/internal.R
#' Generalized pivotal quantities #' #' Simulates from the generalized pivotal quantities. #' #' @param fit an \code{\link{aov1r}} object #' @param n number of simulations #' #' @return The simulations in a dataframe. #' #' @references Samaradasa Weerahandi. #' \emph{Exact Statistical Methods for Data Analysis}. #' Springer, New York, NY (1995). #' <doi:10.1007/978-1-4612-0825-9> #' #' @importFrom stats rchisq #' @export #' #' @examples #' dat <- simAOV1R(I=20, J=5, mu=10, sigmab=1, sigmaw=1) #' fit <- aov1r(y ~ group, data=dat) #' nsims <- 20000 #' pivsims <- rGPQ(fit, nsims) #' pivsims$GPQ_sigma2tot <- pivsims$GPQ_sigma2b + pivsims$GPQ_sigma2w #' # Generalized confidence intervals: #' lapply(pivsims, quantile, probs = c(0.025, 0.975)) #' # compare with the frequentist confidence intervals: #' confint(fit, SDs = FALSE) #' # Generalized prediction interval: #' with( #' pivsims, #' quantile(rnorm(nsims, GPQ_mu, sqrt(GPQ_sigma2tot)), #' probs = c(0.025, 0.975)) #' ) #' # compare with the frequentist prediction interval: #' predict(fit) rGPQ <- function(fit, n=10000){ I <- fit[["Design"]][["I"]] J <- fit[["Design"]][["Jh"]] N <- fit[["Design"]][["N"]] ssb <- fit[["Sums of squares"]][["ssb"]] ssw <- fit[["Sums of squares"]][["ssw"]] Z <- rnorm(n) U2b <- rchisq(n, I-1) U2w <- rchisq(n, N-I) data.frame( GPQ_mu = fit[["grandMean"]] - Z/sqrt(U2b)*sqrt(ssb/I/J), GPQ_sigma2b = 1/J*(ssb/U2b - ssw/U2w), GPQ_sigma2w = ssw/U2w ) } # pivotal0 <- function(fit, Z, U2b, U2w){ # I <- fit[["Design"]][["I"]] # J <- fit[["Design"]][["Jh"]] # ssb <- fit[["Sums of squares"]][["ssb"]] # ssw <- fit[["Sums of squares"]][["ssw"]] # list( # G_mu = fit[["grandmean"]] - Z/sqrt(U2b)*sqrt(ssb/I/J), # G_sigma2b = 1/J*(ssb/U2b - ssw/U2w), # G_sigma2w = ssw/U2w # ) # }
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/pivotal.R
#' @title Simulation of one-way random effect ANOVA #' @description Simulates a balanced one-way random effect ANOVA model. #' #' @param I integer, number of groups #' @param J integer, number of replicates per group #' @param mu numeric, overall mean #' @param sigmab positive number, the between standard deviation #' @param sigmaw positive number, the within standard deviation #' #' @return A dataframe. #' #' @export #' @importFrom utils stack #' @importFrom stats setNames rnorm #' @importFrom purrr map #' @importFrom cellranger num_to_letter #' #' @examples #' simAOV1R(I=2, J=3, mu=10, sigmab=1, sigmaw=1) simAOV1R <- function(I, J, mu, sigmab, sigmaw){ setNames( stack( setNames( purrr::map(rnorm(I,mu,sigmab), ~ rnorm(J, .x, sigmaw)), cellranger::num_to_letter(1:I))), c("y", "group")) }
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/sampling.R
utils::globalVariables(c("y", "means", "Mean"))
/scratch/gouwar.j/cran-all/cranData/AOV1R/R/zzz.R
--- title: "Satterwaithe" author: "Stéphane Laurent" date: "7 mai 2018" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Consider the one-way ANOVA with random effect. ```{r} library(AOV1R) set.seed(666) I=3; J=4 dat <- simAV1R(I, J, mu=0, sigmab=2, sigmaw=3) fit <- aov1r(y ~ group, data=dat) ssb <- fit[["Sums of squares"]][["ssb"]] ssw <- fit[["Sums of squares"]][["ssw"]] total_variance <- sum(fit[["Variance components"]]) ``` Commpute the Sattewaithe degrees of freedom of the total variance by the ordinary method: ```{r} # Satterwaithe degrees of freedom of the total variance a <- 1/J/(I-1) b <- (1-1/J) * 1/I/(J-1) (a*ssb+b*ssw)^2/((a*ssb)^2/(I-1) + (b*ssw)^2/(I*(J-1))) ``` Now, here is another way to get these degrees of freedom. ```{r} # other way to get the Satterwaithe df library(VCA) vca <- anovaMM(y ~ (group), Data=dat) # estimated variance of total variance var_total_var <- sum(vcovVC(vca)) # Satterwaithe df 2*total_variance^2 / var_total_var # = Satterwaithe df ```
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/GSatterwaithe.Rmd
KM41 <- data.frame( Batch = rep(c("B1","B2","B3","B4","B5"), each=5), y = c(379, 357, 390, 376, 376, 363, 367, 382, 381, 359, 401, 402, 407, 402, 396, 402, 387, 392, 395, 394, 415, 405, 396, 390, 395) ) # saved: data(KM41) fit <- aov1r(y~Batch, KM41) I <- fit[["Design"]][["I"]] J <- fit[["Design"]][["Jh"]] ssb <- fit[["Sums of squares"]][["ssb"]] ssw <- fit[["Sums of squares"]][["ssw"]] n <- 1000000 Z <- rnorm(n) U2b <- rchisq(n, I-1) U2w <- rchisq(n, I*(J-1)) X <- Z*sqrt(pmax(0, 1/J*(1+1/I)*ssb/U2b - 1/J*ssw/U2w)) # p 315 quantile(X, .975)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/KM41/KM41.R
--- title: "Prediction interval for the AV1R model" author: "Stéphane Laurent" date: "8 novembre 2017" output: html_document: toc: yes editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` # The balanced case Consider the balanced one-way random effect ANOVA model: $$ Y_{ij} = \mu + A_i + G_{ij}, \quad i=1, \ldots, I, \quad j=1, \ldots, J, $$ where $A_i \sim \mathcal{N}(0, \sigma^2_b)$ and $G_{ij} \sim \mathcal{N}(0, \sigma^2_w)$. Denote by $Y^{\text{new}} \sim \mathcal{N}(\mu, \sigma^2_b + \sigma^2_w)$ a future observation. One has $$ \overline{Y}_{\bullet\bullet} \sim \mathcal{N}\left(\mu, \frac{J\sigma^2_b+\sigma^2_w}{IJ}\right) $$ hence $$ \overline{Y}_{\bullet\bullet} - Y^{\text{new}} \sim \mathcal{N}\left(0, \left(1+\frac{1}{I}\right)\sigma^2_b + \left(1+\frac{1}{IJ}\right)\sigma^2_w\right). $$ Recall that $I(J-1)\sigma^2_w$ is estimated by $SS_w \sim \sigma^2_w \chi^2_{I(J-1)}$ and $(I-1)(J\sigma^2_b+\sigma^2_w)$ is estimated by $SS_b \sim (J\sigma^2_b+\sigma^2_w)\chi^2_{I-1}$, hence $\sigma^2_w$ is estimated by $\frac{1}{I(J-1)}SS_w$ and $\sigma^2_b$ is estimated by $$ \frac{1}{J}\left(\frac{SS_b}{I-1} - \frac{SS_w}{I(J-1)}\right). $$ Therefore, the variance of $\overline{Y}_{\bullet\bullet} - Y^{\text{new}}$ is estimated by $a SS_b + b SS_w$ where $$ a = \frac{1}{J(I-1)}\left(1+\frac{1}{I}\right) $$ and $$ b = \left(1+\frac{1}{IJ}\right)\frac{1}{I(J-1)} - \left(1+\frac{1}{I}\right)\frac{1}{JI(J-1)} = \frac{1}{IJ}. $$ Using the Satterthwaite approximation, we find that $$ \frac{\overline{Y}_{\bullet\bullet} - Y^{\text{new}}}{\sqrt{a SS_b + b SS_w}} \approx t_{\hat\nu} $$ with $$ \hat\nu = \frac{{(a SS_b + b SS_w)}^2}{\dfrac{{(a SS_b)}^2}{I-1} + \dfrac{{(b SS_w)}^2}{I(J-1)}}. $$ This yields an approximate prediction interval. # The unbalanced case Now consider the general case $$ Y_{ij} = \mu + A_i + G_{ij}, \quad i=1, \ldots, I, \quad j=1, \ldots, J_i. $$ We define $N = \sum_{i=1}^I J_i$ and $\widetilde{J}$ as the harmonic mean of the $J_i$'s. One can check that $SS_w \sim \sigma^2_w \chi^2_{N-I}$ and setting $\overline{\overline{Y}} = \frac{1}{I}\sum_{i=1}^I\overline{Y}_{i\bullet}$, $$ \overline{\overline{Y}} \sim \mathcal{N}\left(\mu, \frac{\widetilde{J}\sigma^2_b + \sigma^2_w}{I\widetilde{J}}\right). $$ Now, set $$ \widetilde{SS}_b = \widetilde{J}\sum_{i=1}^I(\overline{Y}_{i\bullet} - \overline{\overline{Y}}). $$ It is known that $$ \widetilde{SS}_b \approx (\widetilde{J}\sigma^2_b + \sigma^2_w) \chi^2_{I-1}, $$ and that $\widetilde{SS}_b$ is independent of $SS_w$. In addition, $SS_w$ is independent of $\overline{\overline{Y}}$, but $\overline{\overline{Y}}$ is not independent of $\widetilde{SS}_b$. Ignoring this dependence and proceeding as in the balanced case, we find the Satterthwaite degrees of freedom $$ \hat\nu = \frac{{(a \widetilde{SS}_b + b SS_w)}^2}{\dfrac{{(a \widetilde{SS}_b)}^2}{I-1} + \dfrac{{(b SS_w)}^2}{N-I}} $$ with $$ a = \frac{1}{\widetilde{J}(I-1)}\left(1+\frac{1}{I}\right) $$ and $$ b = \left(1+\frac{1}{I\widetilde{J}}\right)\frac{1}{N-I} - \left(1+\frac{1}{I}\right)\frac{1}{\widetilde{J}(N-I)} = \frac{\widetilde{J}-1}{\widetilde{J}(N-I)}. $$
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/Notes/AOV1Rpredict.Rmd
--- title: "Prediction interval for the AV1R model" author: "Stéphane Laurent" date: "8 novembre 2017" output: html_document editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` Consider the balanced one-way random effect ANOVA model: $$ Y_{ij} = \mu + A_i + G_{ij}, \quad i=1, \ldots, I, \quad j=1, \ldots, J, $$ where $A_i \sim \mathcal{N}(0, \sigma^2_b)$ and $G_{ij} \sim \mathcal{N}(0, \sigma^2_w)$. Denote by $Y^{\text{new}} \sim \mathcal{N}(\mu, \sigma^2_b + \sigma^2_w)$ a future observation. One has $$ \bar{Y}_{\bullet\bullet} \sim \mathcal{N}\left(\mu, \frac{J\sigma^2_b+\sigma^2_w}{IJ}\right) $$ hence $$ \bar{Y}_{\bullet\bullet} - Y^{\text{new}} \sim \mathcal{N}\left(0, \left(1+\frac{1}{I}\right)\sigma^2_b + \left(1+\frac{1}{IJ}\right)\sigma^2_w\right). $$ Recall that $I(J-1)\sigma^2_w$ is estimated by $SS_w \sim \sigma^2_w \chi^2_{I(J-1)}$ and $(I-1)(J\sigma^2_b+\sigma^2_w)$ is estimated by $SS_b \sim (J\sigma^2_b+\sigma^2_w)\chi^2_{I-1}$, hence $\sigma^2_w$ is estimated by $\frac{1}{I(J-1)}SS_w$ and $\sigma^2_b$ is estimated by $$ \frac{1}{J}\left(\frac{SS_b}{I-1} - \frac{SS_w}{I(J-1)}\right). $$ Therefore, the variance of $\bar{Y}_{\bullet\bullet} - Y^{\text{new}}$ is estimated by $a SS_b + b SS_w$ where $$ a = \frac{1}{J(I-1)}\left(1+\frac{1}{I}\right) $$ and $$ b = \left(1+\frac{1}{IJ}\right)\frac{1}{I(J-1)} - \left(1+\frac{1}{I}\right)\frac{1}{JI(J-1)} = \frac{1}{IJ}. $$ Using the Satterthwaite approximation, we find that $$ \frac{\bar{Y}_{\bullet\bullet} - Y^{\text{new}}}{\sqrt{a SS_b + b SS_w}} \approx t_{\hat\nu} $$ with $$ \hat\nu = \frac{{(a SS_b + b SS_w)}^2}{\dfrac{{(a SS_b)}^2}{I-1} + \dfrac{{(b SS_w)}^2}{I(J-1)}}. $$ This yields an approximate prediction interval. ```{r} knitr::knit_exit() ``` ```{r} library(AOV1R) I = 1000; J = 10 dat <- simAV1R(I, J, mu=0, sigmab=2, sigmaw=3) av1r <- aov(y ~ Error(group), data=dat) sav1r <- summary(av1r) ``` ```{r} ssb <- sav1r$`Error: group`[[1]]$`Sum Sq` ssw <- sav1r$`Error: Within`[[1]]$`Sum Sq` 1/J * (ssb/(I-1) - ssw/I/(J-1)) ``` ```{r} library(VCA) anovaMM(y ~ (group), Data=dat) ``` ```{r} I=5; J=3 dat <- simAV1R(I, J, mu=0, sigmab=3, sigmaw=2) dat <- dat[-1,] fit <- aov1r(y ~ group, data=dat) fit$`Sums of squares` aov(y ~ Error(group), data=dat) anovaMM(y ~ (group), Data=dat) remlMM(y ~ (group), Data=dat) fit$`Variance components` ```
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/Notes/LinLiao00.Rmd
library(AOV1R) n <- 15000 Z <- rnorm(n) I <- 2; J <- 3 U2b <- rchisq(n, I-1) U2w <- rchisq(n, I*(J-1)) mu <- 10; sigmab <- 1; sigmaw <- 1 nsims <- 1000 test_mu <- test_sigmab <- test_sigmaw <- logical(nsims) for(i in 1:nsims){ dat <- simAV1R(I=2, J=3, mu=mu, sigmab=sigmab, sigmaw=sigmaw) fit <- aov1r(y ~ group, dat) pivots <- AOV1R:::pivotal0(fit, Z, U2b, U2w) confint_mu <- quantile(pivots[["G_mu"]], c(0.025, 0.975)) test_mu[i] <- mu > confint_mu[1] && mu < confint_mu[2] confint_sigma2b <- quantile(pivots[["G_sigma2b"]], c(0.025, 0.975)) test_sigmab[i] <- sigmab^2 > confint_sigma2b[1] && sigmab^2 < confint_sigma2b[2] confint_sigma2w <- quantile(pivots[["G_sigma2w"]], c(0.025, 0.975)) test_sigmaw[i] <- sigmaw^2 > confint_sigma2w[1] && sigmaw^2 < confint_sigma2w[2] } mean(test_mu) mean(test_sigmab) mean(test_sigmaw) #### set.seed(666) dat <- simAV1R(I=6, J=2, mu=10, sigmab=2, sigmaw=2) fit <- aov1r(y~group, dat) predict(fit) library(rstanarm) options(mc.cores = parallel::detectCores()) sfit <- stan_lmer(y ~ (1|group), data=dat, prior_covariance = decov(1, 1, 0.01, 100), iter = 3500, warmup=1000, adapt_delta = 0.98, prior_PD=FALSE) predictive_interval(sfit, newdata=data.frame(group="xxx"), prob=0.95) predictive_interval(sfit, newdata=data.frame(group="xxx"), re.form=NA, prob=0.95) samples <- rstan::extract(sfit$stanfit) # aux is sigma and theta_L is sigma²_b psims <- rnorm(10000, samples$alpha, sqrt(samples$theta_L[,1]+samples$aux^2)) quantile(psims, c(0.025, 0.975)) pivotals <- AOV1R:::pivotal(fit) plot(density(pivotals$G_mu)) lines(density(samples$alpha), col="red") plot(density(pivotals$G_sigma2b)) lines(density(samples$theta_L[,1]), col="red") plot(density(pivotals$G_sigma2w)) lines(density(samples$aux^2), col="red") library(brms) options(mc.cores = parallel::detectCores()) bfit <- brm(y ~ (1|group), data = dat, control = list(adapt_delta = 0.95), prior = c(prior(cauchy(0,5),class="sd")), iter = 3500, warmup = 1000) pred <- posterior_predict(bfit, newdata=data.frame(group="xxx"), allow_new_levels=TRUE) quantile(pred, c(0.025, 0.975)) samples <- posterior_samples(bfit) names(samples) psims <- rnorm(10000, samples$b_Intercept, sqrt(samples$sd_group__Intercept^2 + samples$sigma^2)) quantile(psims, c(0.025, 0.975)) pivotals <- AOV1R:::pivotal(fit) plot(density(pivotals$G_mu)) lines(density(samples$b_Intercept), col="red") plot(density(pivotals$G_sigma2b)) lines(density(samples$sd_group__Intercept^2), col="red") plot(density(pivotals$G_sigma2w)) lines(density(samples$sigma^2), col="red") plot(density(pivotals$G_sigma2b+pivotals$G_sigma2w, from=0, to=200)) lines(density(samples$sd_group__Intercept^2+samples$sigma^2), col="red") # assez nickel ! plot(pivotals$G_mu[1:2000], pivotals$G_sigma2w[1:2000]) points(samples$b_Intercept, samples$sigma^2, col="red") library(lme4) lfit <- lmer(y ~ (1|group), dat)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/coverage/coverage.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ## ----confidenceIntervals------------------------------------------------------ library(AOV1R) dat <- simAOV1R(I = 20, J = 5, mu = 10, sigmab = 1, sigmaw = 1) fit <- aov1r(y ~ group, data = dat) nsims <- 50000L gpq <- rGPQ(fit, nsims) gpq[["GPQ_sigma2tot"]] <- with(gpq, GPQ_sigma2b + GPQ_sigma2w) # Generalized confidence intervals: t(vapply(gpq, quantile, numeric(2L), probs = c(2.5, 97.5)/100)) ## ----predictiveDistribution--------------------------------------------------- ypred <- with(gpq, rnorm(nsims, GPQ_mu, sqrt(GPQ_sigma2tot))) ## ----predictionInterval------------------------------------------------------- quantile(ypred, probs = c(2.5, 97.5)/100) ## ----------------------------------------------------------------------------- p <- 90/100 alpha <- 2.5/100 z <- qnorm(p) GPQ_lowerQuantile <- with(gpq, GPQ_mu - z*sqrt(GPQ_sigma2tot)) GPQ_upperQuantile <- with(gpq, GPQ_mu + z*sqrt(GPQ_sigma2tot)) c( quantile(GPQ_lowerQuantile, probs = alpha), quantile(GPQ_upperQuantile, probs = 1-alpha) )
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/doc/using-gpq.R
--- title: "Using the generalized pivotal quantities" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using the generalized pivotal quantities} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` The *generalized pivotal quantities* were introduced by Weerahandi. These are random variables, which are simulated by the function `rGPQ`. Statistical inference based on the generalized pivotal quantities is similar to Bayesian posterior inference. For example, a generalized confidence interval of a parameter is obtained by taking the quantiles of the generalized pivotal quantity associated to this parameter. ## Generalized confidence interval Below is an example. We derive generalized confidence intervals for the three parameters defining the ANOVA model as well as for the total variance. ```{r confidenceIntervals} library(AOV1R) dat <- simAOV1R(I = 20, J = 5, mu = 10, sigmab = 1, sigmaw = 1) fit <- aov1r(y ~ group, data = dat) nsims <- 50000L gpq <- rGPQ(fit, nsims) gpq[["GPQ_sigma2tot"]] <- with(gpq, GPQ_sigma2b + GPQ_sigma2w) # Generalized confidence intervals: t(vapply(gpq, quantile, numeric(2L), probs = c(2.5, 97.5)/100)) ``` ## Generalized prediction interval Here we generate simulations of the generalized predictive distribution: ```{r predictiveDistribution} ypred <- with(gpq, rnorm(nsims, GPQ_mu, sqrt(GPQ_sigma2tot))) ``` And then we get the generalized prediction interval by taking quantiles: ```{r predictionInterval} quantile(ypred, probs = c(2.5, 97.5)/100) ``` ## One-sided generalized tolerance intervals To get the bound of a one-sided generalized tolerance interval with tolerance level $p$ and confidence level $1-\alpha$, generate the simulations of the generalized pivotal quantity associated to the $100p\%$-quantile of the distribution of the response, then take the $100\alpha\%$-quantile of these simulations for the right-sided tolerance interval and the $100(1-\alpha)\%$-quantile for the left-sided tolerance interval: ```{r} p <- 90/100 alpha <- 2.5/100 z <- qnorm(p) GPQ_lowerQuantile <- with(gpq, GPQ_mu - z*sqrt(GPQ_sigma2tot)) GPQ_upperQuantile <- with(gpq, GPQ_mu + z*sqrt(GPQ_sigma2tot)) c( quantile(GPQ_lowerQuantile, probs = alpha), quantile(GPQ_upperQuantile, probs = 1-alpha) ) ```
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/doc/using-gpq.Rmd
library(AOV1R) dat <- simAV1R(I = 5, J = 3, mu = 10, sigmab = 1, sigmaw = 1) fit <- aov1r(y ~ group, dat) pivots <- pivotal(fit) sims_qupp <- qnorm(0.975, pivots$G_mu, sqrt(pivots$G_sigma2b+pivots$G_sigma2w)) sims_qlow <- qnorm(0.025, pivots$G_mu, sqrt(pivots$G_sigma2b+pivots$G_sigma2w)) median(sims_qlow); median(sims_qupp) predict(fit)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/predictive/estimate_quantile.R
library(AOV1R) I <- 2; J <- 3 dat <- simAV1R(I=I, J=J, mu=10, sigmab=1, sigmaw=1) fit <- aov1r(y ~ group, dat) pivots <- pivotal(fit) predict(fit) quantile(pivots$G_mu, c(0.025, 0.975)) I <- 2; J <- 3 dat <- simAV1R(I=I, J=J, mu=10, sigmab=1, sigmaw=1) fit <- aov1r(y ~ group, dat) pivots <- pivotal(fit) ybar <- fit$grandmean sigma2 <- median((pivots$G_sigma2b + pivots$G_sigma2w) + (J*pivots$G_sigma2b + pivots$G_sigma2w)/I/J) ybar + c(-1,1)*qnorm(.975, mean=0, sd=sqrt(sigma2)) # me semble trop petit predict(fit) ybar + c(-1,1)*median(qnorm(.975, mean=0, sd = sqrt((pivots$G_sigma2b + pivots$G_sigma2w) + (J*pivots$G_sigma2b + pivots$G_sigma2w)/I/J))) predict(fit)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/predictive/methodIII.R
library(AOV1R) nsims <- 1000 test <- logical(nsims) for(i in 1:nsims){ dat <- simAV1R(I=2, J=3, mu=10, sigmab=1, sigmaw=1) fit <- aov1r(y ~ group, dat) pred <- predict(fit) newy <- simAV1R(I=1, J=1, mu=10, sigmab=1, sigmaw=1)$y test[i] <- newy > pred[1] && newy < pred[2] } mean(test) # unbalanced nsims <- 1000 test <- logical(nsims) for(i in 1:nsims){ dat <- simAV1R(I=3, J=4, mu=10, sigmab=1, sigmaw=1)[-c(1,2),] fit <- aov1r(y ~ group, dat) pred <- predict(fit) newy <- simAV1R(I=1, J=1, mu=10, sigmab=1, sigmaw=1)$y test[i] <- newy > pred[1] && newy < pred[2] } mean(test) n <- 20000 Z <- rnorm(n) I <- 6; J <- 4 U2b <- rchisq(n, I-1) U2w <- rchisq(n, I*(J-1)) nsims <- 1000 test <- logical(nsims) for(i in 1:nsims){ dat <- simAV1R(I=I, J=J, mu=10, sigmab=1, sigmaw=1) fit <- aov1r(y ~ group, dat) pivots <- AOV1R:::pivotal0(fit, Z, U2b, U2w) # sims <- numeric(n) # for(j in 1:n){ # sims[j] <- simAV1R(I=1, J=1, # mu=pivots$G_mu[j], # sigmab=sqrt(max(0,pivots$G_sigma2b[j])), # sigmaw=sqrt(pivots$G_sigma2w[j]))$y # } sims <- rnorm(n, pivots$G_mu, sqrt(pivots$G_sigma2b+pivots$G_sigma2w)) pred <- quantile(sims, c(0.025, 0.975)) newy <- simAV1R(I=1, J=1, mu=10, sigmab=1, sigmaw=1)$y test[i] <- newy > pred[1] && newy < pred[2] } mean(test) # 0.99 avec la 1ère méthode, pour nsims=100 # 0.985 avec la deuxième pour large nsims (I=2 J=3) #### set.seed(666) dat <- simAV1R(I=6, J=2, mu=10, sigmab=2, sigmaw=2) fit <- aov1r(y~group, dat) predict(fit) library(rstanarm) options(mc.cores = parallel::detectCores()) sfit <- stan_lmer(y ~ (1|group), data=dat, prior_covariance = decov(1, 1, 0.01, 100), iter = 3500, warmup=1000, adapt_delta = 0.98, prior_PD=FALSE) predictive_interval(sfit, newdata=data.frame(group="xxx"), prob=0.95) predictive_interval(sfit, newdata=data.frame(group="xxx"), re.form=NA, prob=0.95) samples <- rstan::extract(sfit$stanfit) # aux is sigma and theta_L is sigma²_b psims <- rnorm(10000, samples$alpha, sqrt(samples$theta_L[,1]+samples$aux^2)) quantile(psims, c(0.025, 0.975)) pivotals <- AOV1R:::pivotal(fit) plot(density(pivotals$G_mu)) lines(density(samples$alpha), col="red") plot(density(pivotals$G_sigma2b)) lines(density(samples$theta_L[,1]), col="red") plot(density(pivotals$G_sigma2w)) lines(density(samples$aux^2), col="red") library(brms) options(mc.cores = parallel::detectCores()) bfit <- brm(y ~ (1|group), data = dat, control = list(adapt_delta = 0.95), prior = c(prior(cauchy(0,5),class="sd")), iter = 3500, warmup = 1000) pred <- posterior_predict(bfit, newdata=data.frame(group="xxx"), allow_new_levels=TRUE) quantile(pred, c(0.025, 0.975)) samples <- posterior_samples(bfit) names(samples) psims <- rnorm(10000, samples$b_Intercept, sqrt(samples$sd_group__Intercept^2 + samples$sigma^2)) quantile(psims, c(0.025, 0.975)) pivotals <- AOV1R:::pivotal(fit) plot(density(pivotals$G_mu)) lines(density(samples$b_Intercept), col="red") plot(density(pivotals$G_sigma2b)) lines(density(samples$sd_group__Intercept^2), col="red") plot(density(pivotals$G_sigma2w)) lines(density(samples$sigma^2), col="red") plot(density(pivotals$G_sigma2b+pivotals$G_sigma2w, from=0, to=200)) lines(density(samples$sd_group__Intercept^2+samples$sigma^2), col="red") # assez nickel ! plot(pivotals$G_mu[1:2000], pivotals$G_sigma2w[1:2000]) points(samples$b_Intercept, samples$sigma^2, col="red") library(lme4) lfit <- lmer(y ~ (1|group), dat)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/predictive/predictive.R
library(AOV1R) set.seed(666) I=3; J=4 dat <- simAV1R(I, J, mu=0, sigmab=2, sigmaw=3) fit <- aov1r(y ~ group, data=dat) ssb <- fit[["Sums of squares"]][["ssb"]] ssw <- fit[["Sums of squares"]][["ssw"]] sigma2b <- fit[["Variance components"]][["sigma2b"]] sigma2w <- fit[["Variance components"]][["sigma2w"]] total_variance <- sum(fit[["Variance components"]]) # standard error of the fixed effect ( stderr <- sqrt(ssb/(I-1)/I/J) ) lfit <- lme4::lmer(y ~ (1|group), data=dat) summary(lfit)$coefficients I=3; J=4 nsims <- 3000 sims_stderr <- numeric(nsims) sims_totalVariance <- numeric(nsims) for(i in 1:nsims){ dat <- simAV1R(I, J, mu=0, sigmab=2, sigmaw=3) fit <- aov1r(y ~ group, data=dat) ssb <- fit[["Sums of squares"]][["ssb"]] sims_stderr[i] <- sqrt(ssb/(I-1)/I/J) sims_totalVariance[i] <- sum(fit[["Variance components"]]) } mean(sims_stderr^2) # a/b if Gamma(a,b) var(sims_stderr^2) # a/b^2 ( b <- mean(sims_stderr^2) / var(sims_stderr^2) ) ( a <- b * mean(sims_stderr^2) ) # (I-1)/2 plot(sims_stderr, sims_totalVariance, xlab="standard error of intercept", ylab="estimate of total variance") plot(sims_stderr^2, sims_totalVariance, xlab="squared standard error of intercept (constant times SSb)", ylab="estimate of total variance") library(lmerTest) lfit <- lmerTest::lmer(y ~ (1|group), data=dat) library(nlme) lmefit <- lme(y ~ 1, random = list(group = ~ 1), data=dat)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/satterwaithe_prediction.R
library(AOV1R) set.seed(666) I=3; J=4 dat <- simAV1R(I, J, mu=0, sigmab=2, sigmaw=3) fit <- aov1r(y ~ group, data=dat) ssb <- fit[["Sums of squares"]][["ssb"]] ssw <- fit[["Sums of squares"]][["ssw"]] total_variance <- sum(fit[["Variance components"]]) # Satterwaithe degrees of freedom of the total variance a <- 1/J/(I-1) b <- (1-1/J) * 1/I/(J-1) (a*ssb+b*ssw)^2/((a*ssb)^2/(I-1) + (b*ssw)^2/(I*(J-1))) # other way to get the Satterwaithe df library(VCA) vca <- anovaMM(y ~ (group), Data=dat) # estimated variance of total variance var_total_var <- sum(vcovVC(vca)) 2*total_variance^2 / var_total_var # = Satterwaithe df
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/satterwaithe_totalVariance.R
library(AOV1R) # check law SSb (error on my blog!) I <- 2; J <- 5 mu <- 10; sigmab <- 2; sigmaw <- 3 sigma2 <- J*sigmab^2 + sigmaw^2 nsims <- 1000 result <- numeric(nsims) for(i in 1:nsims){ dat <- simAV1R(I=I, J=J, mu=mu, sigmab=sigmab, sigmaw=sigmaw) fit <- aov1r(y ~ group, dat) result[i] <- fit$`Sums of squares`[["ssb"]] } ssbs <- result/sigma2/(I-1) curve(ecdf(ssbs)(x), from=0, to=2) curve(pchisq(x, I-1), add=TRUE, col="red") # check estimates I <- 2; J <- 3 mu <- 10; sigmab <- 2; sigmaw <- 3 nsims <- 50000 sigma2b <- sigma2w <- numeric(nsims) for(i in 1:nsims){ dat <- simAV1R(I=I, J=J, mu=mu, sigmab=sigmab, sigmaw=sigmaw) fit <- aov1r(y ~ group, dat) estimates <- fit$`Variance components` sigma2w[i] <- estimates[["sigma2w"]] sigma2b[i] <- estimates[["sigma2b"]] } mean(sigma2w) mean(sigma2b)
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/sumsofsquares.R
library(AOV1R) dat <- simAV1R(I=3, J=4, mu=0, sigmab=2, sigmaw=3) dat <- dat[-c(1,2),] means <- aggregate(y~group, data=dat, FUN=mean)[["y"]] freqs <- aggregate(y~group, data=dat, FUN=length)[["y"]] SWb <- function(rho, means, freqs){ w <- freqs/(1+rho*freqs) sum(w*(means-sum(w*means)/sum(w))^2) } SWb(0.5, means, freqs) rho <- seq(0,5,length.out = 50) swb <- sapply(rho, function(x) SWb(x, means, freqs)) plot(rho, swb, type="l") nsims <- 2000 sims <- numeric(nsims) for(i in 1:nsims){ dat <- simAV1R(I=3, J=4, mu=0, sigmab=2, sigmaw=3) dat <- dat[-c(1,2),] means <- aggregate(y~group, data=dat, FUN=mean)[["y"]] freqs <- aggregate(y~group, data=dat, FUN=length)[["y"]] sims[i] <- SWb(4/9, means, freqs) } curve(ecdf(sims/9)(x), from=0, to=6) curve(pchisq(x, 2), add=TRUE, col="red")
/scratch/gouwar.j/cran-all/cranData/AOV1R/inst/unbalanced/unbalanced00.R
--- title: "Using the generalized pivotal quantities" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using the generalized pivotal quantities} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` The *generalized pivotal quantities* were introduced by Weerahandi. These are random variables, which are simulated by the function `rGPQ`. Statistical inference based on the generalized pivotal quantities is similar to Bayesian posterior inference. For example, a generalized confidence interval of a parameter is obtained by taking the quantiles of the generalized pivotal quantity associated to this parameter. ## Generalized confidence interval Below is an example. We derive generalized confidence intervals for the three parameters defining the ANOVA model as well as for the total variance. ```{r confidenceIntervals} library(AOV1R) dat <- simAOV1R(I = 20, J = 5, mu = 10, sigmab = 1, sigmaw = 1) fit <- aov1r(y ~ group, data = dat) nsims <- 50000L gpq <- rGPQ(fit, nsims) gpq[["GPQ_sigma2tot"]] <- with(gpq, GPQ_sigma2b + GPQ_sigma2w) # Generalized confidence intervals: t(vapply(gpq, quantile, numeric(2L), probs = c(2.5, 97.5)/100)) ``` ## Generalized prediction interval Here we generate simulations of the generalized predictive distribution: ```{r predictiveDistribution} ypred <- with(gpq, rnorm(nsims, GPQ_mu, sqrt(GPQ_sigma2tot))) ``` And then we get the generalized prediction interval by taking quantiles: ```{r predictionInterval} quantile(ypred, probs = c(2.5, 97.5)/100) ``` ## One-sided generalized tolerance intervals To get the bound of a one-sided generalized tolerance interval with tolerance level $p$ and confidence level $1-\alpha$, generate the simulations of the generalized pivotal quantity associated to the $100p\%$-quantile of the distribution of the response, then take the $100\alpha\%$-quantile of these simulations for the right-sided tolerance interval and the $100(1-\alpha)\%$-quantile for the left-sided tolerance interval: ```{r} p <- 90/100 alpha <- 2.5/100 z <- qnorm(p) GPQ_lowerQuantile <- with(gpq, GPQ_mu - z*sqrt(GPQ_sigma2tot)) GPQ_upperQuantile <- with(gpq, GPQ_mu + z*sqrt(GPQ_sigma2tot)) c( quantile(GPQ_lowerQuantile, probs = alpha), quantile(GPQ_upperQuantile, probs = 1-alpha) ) ```
/scratch/gouwar.j/cran-all/cranData/AOV1R/vignettes/using-gpq.Rmd
#' Calculate cohort deviation #' #' Calculate cohort deviation #' #' @inheritParams apci #' @param A,P,C The numbers of age groups, period groups, and cohort groups separately. #' @param model A generalized linear regression model generated from #' the internal function temp_model #' #' #' @return A list containing: #' \item{cohort_average}{The estimated inter-cohort average deviations from age #' and period main effects.} #' \item{cohort_slope}{The estimated intra-cohort life-course linear slopes.} #' \item{int_matrix}{A matrix containing the estimated coefficients for #' age-by-period interactions.} #' \item{cohort_index}{Indices indicating different cohorts.} #' #' @export # change column names (cohort slope) cohortdeviation <- function(A,P,C, model = temp6, weight = "wt", covariate, gee=FALSE, unequal_interval = FALSE, age_range = NULL, period_range = NULL, age_interval = NULL, period_interval = NULL, age_group = NULL, period_group = NULL, ...){ # library("magrittr") r6 = model$coefficients[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)")] # r6se = summary(model)$coef[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)"),"Std. Error"] r6se = summary(model)$coef[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)"), stringr::str_detect(colnames(summary(model)$coef) , "Std. Error|Robust S.E.")] r6p = summary(model)$coef[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)") , stringr::str_detect(colnames(summary(model)$coef) , "Pr(>|t|)|Robust z")] ############# computing "full" interactions index ############## T = array(rep(0, A*P*(A-1)*(P-1)), dim=c(A*P, (A-1)*(P-1))) ind1 = A*1:(P-1) ind2 = (A*(P-1)+1):(A*P-1) ind3 = A*P ind = c(ind1,ind2,ind3) newind = 1:(A*P) newind = newind[-ind] T[newind,] = diag((A-1)*(P-1)) T[ind1,] = -diag(P-1)[,rep(1:(P-1),each=A-1)] T[ind2,] = -diag(A-1)[,rep(1:(A-1),P-1)] T[ind3,] = rep(1,(A-1)*(P-1)) ############# computing "full" interactions contrast ############## # iatemp = vcov(model)[(covn+A+P): length(r6), (covn+A+P): length(r6)] if(gee==TRUE){ row_ind <- stringr::str_detect(rownames(model$robust.variance) , "^acc([0-9])*:pcc([0-9])*$") col_ind <- stringr::str_detect(colnames(model$robust.variance) , "^acc([0-9])*:pcc([0-9])*$") iatemp = model$robust.variance[row_ind,col_ind] }else{ row_ind <- stringr::str_detect(rownames(vcov(model)) , "^acc([0-9])*:pcc([0-9])*$") col_ind <- stringr::str_detect(colnames(vcov(model)) , "^acc([0-9])*:pcc([0-9])*$") iatemp = vcov(model)[row_ind,col_ind] } row_ind_r6 <- stringr::str_detect(names(r6) , "^acc([0-9])*:pcc([0-9])*$") col_ind_r6 <- stringr::str_detect(names(r6) , "^acc([0-9])*:pcc([0-9])*$") iavcov = T%*%iatemp%*%t(T) # df = model$df.residual # nrow(data)-length(model$coefficients) if(gee==TRUE){ df = model$nobs-length(model$coefficients) }else{ df = nrow(model$data)-length(model$coefficients) } iaesti = as.vector(T%*%r6[row_ind_r6]) iase = sqrt(diag(iavcov)) if(df ==0 ){ iap = 2 * pnorm(-abs(iaesti/iase)) }else{ iap = pt(-abs(iaesti/iase), df)*2 } # cindex comes from function ageperiod_group # cindex <- sapply(1:P,function(j){ # seq((A+j-1),j, -1) # }) if(unequal_interval==TRUE){ cindex <- ageperiod_group(age_range = age_range, period_range = period_range, age_interval = age_interval, period_interval = period_interval, age_group = age_group,period_group = period_group) }else{ cindex <- sapply(1:P,function(j){ seq((A+j-1),j, -1) }) } sig = rep(' ', (A*P)) sig[iap<.05] = '* ' sig[iap<.01] = '** ' sig[iap<.001] = '***' iasig = sig cohortindex = as.vector(cindex) ia = as.data.frame(cbind(iaesti,iase,iap,iasig, cohortindex)) ####################### inter-cohort changes ####################### # cohortint <- sapply(1:C,function(k){ cohortint <- sapply(1:max(cindex),function(k){ O = sum(cindex == k) k1 = rep(1/O, O) k2 = rep(0, A*P) k2[cindex == k] = k1 contresti = k2%*%iaesti contrse = sqrt(t(k2)%*%iavcov%*%k2) t = contresti/contrse if (t > 0){ if(df ==0 ){ p = 2 * pnorm(-abs(t)) }else{ p = 2*pt(t, df, lower.tail=F) } } else { if(df ==0 ){ p = 2 * pnorm(-abs(t)) }else{ p = 2*pt(t, df, lower.tail=T) } } sig <- ' ' if (p<.05){ sig <- '* ' } if(p<.01){ sig <- '** ' } if(p<.001){ sig <- '***' } c(contresti,contrse,t,p,sig) })%>%t%>% as.data.frame%>% # `colnames<-`(c("cint","cintse","cintt","cintp","sig")) `colnames<-`(c("cohort_average","cohort_average_se", "cohort_average_t","cohort_average_p","sig")) cohortint$cohort_group = seq(1, max(cindex)) cohortint = cohortint[,c("cohort_group", "cohort_average","cohort_average_se", "cohort_average_t","cohort_average_p", "sig")] ####################### intra-cohort changes ####################### poly = 1 srange <- as.data.frame(table(cindex)) srangen <- as.numeric(as.character(srange$cindex[srange$Freq>1])) # cohortslope <- sapply((poly+1):(C-poly),function(k){ cohortslope <- sapply(sort(srangen),function(k){ o = sum(cindex == k) k1 = contr.poly(o) k2 = rep(0, A*P) k2[cindex == k] = k1[,poly] contresti = k2%*%iaesti contrse = sqrt(t(k2)%*%iavcov%*%k2) t = contresti/contrse if (t > 0){ if(df ==0 ){ p = 2 * pnorm(-abs(t)) }else{ p = 2*pt(t, df, lower.tail=F) } } else { if(df ==0 ){ p = 2 * pnorm(-abs(t)) }else{ p = 2*pt(t, df, lower.tail=T) } } sig <- ' ' if (p<.05){ sig <- '* ' } if(p<.01){ sig <- '** ' } if(p<.001){ sig <- '***' } c(k,contresti,contrse,t,p,sig) })%>%t%>% as.data.frame%>% # `colnames<-`(c("cslope","cslopese","cslopet","cslopep","sig")) `colnames<-`(c("cindex","cohort_slope","cohort_slope_se","cohort_slope_t", "cohort_slope_p","sig")) cohortslope <- merge(srange,cohortslope,all.x = TRUE) cohortslope <- cohortslope[order(as.numeric(as.character(cohortslope$cindex))),] # cohortslope$cohort_group = seq(1, max(cindex)) cohortslope$cohort_group <- cohortslope$cindex cohortslope$cindex <- NULL # sigintra = sig cohortslope = cohortslope[,c("cohort_group", "cohort_slope","cohort_slope_se","cohort_slope_t", "cohort_slope_p", "sig")] # message("Cohortint") # print(cohortint) # message("Cohortslope") # print(cohortslope) list(cohort_average = cohortint,cohort_slope=cohortslope, int_matrix = ia,cohort_index = cindex) } # cohortdeviation(A,P,C)
/scratch/gouwar.j/cran-all/cranData/APCI/R/CohortDeviation.R
#' Estimate age effect and period effect #' #' Estimate age and period effect from APCI model #' #' @inheritParams apci #' @inheritParams cohortdeviation #' #' @return A list containing: #' \item{intercept}{The overall intercept.} #' \item{age_effect}{The estimated age main effect.} #' \item{period_effect}{The estimated period main effect.} #' #' @export maineffect <- function(A,P,C, model = temp6, data, gee=FALSE, ...){ r6 = model$coefficients[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)")] r6se = summary(model)$coef[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)"), stringr::str_detect(colnames(summary(model)$coef) , "Std. Error|Robust S.E.")] r6p = summary(model)$coef[stringr::str_detect(names(model$coefficients) , "acc|pcc|(Intercept)") , stringr::str_detect(colnames(summary(model)$coef) , "Pr(>|t|)|Robust z")] ############# computing "full" age, period, and covariance effects ############## fullae = array(rep(0, A), dim=c(A, 1)) fullas = array(rep(0, A), dim=c(A, 1)) S1 = array(rep(0, A*(A-1)), dim=c(A, (A-1))) ind = A*1:(A-1) newind = 1:(A*(A-1)) newind = newind[-ind] S1[newind] = diag(A-1) S1[ind] = rep(-1,(A-1)) # fullae = as.vector(S1%*%model$coef[(1+covn+1):(1+covn+A-1)]) fullae = as.vector(S1%*%model$coef[stringr::str_detect(names(model$coef) , "^acc([0-9])*$" )]) if(gee==TRUE){ df = model$nobs-length(model$coefficients) row_ind <- stringr::str_detect(rownames(model$robust.variance) , "^acc([0-9])*$") col_ind <- stringr::str_detect(colnames(model$robust.variance) , "^acc([0-9])*$") fullas = sqrt(diag(S1%*%model$robust.variance[row_ind, col_ind]%*%t(S1))) fullat = fullae/fullas # df = nrow(data)-length(model$coefficients) df = model$nobs-length(model$coefficients) }else{ row_ind <- stringr::str_detect(rownames(vcov(model)) , "^acc([0-9])*$") col_ind <- stringr::str_detect(colnames(vcov(model)) , "^acc([0-9])*$") fullas = sqrt(diag(S1%*%vcov(model)[row_ind, col_ind]%*%t(S1))) fullat = fullae/fullas # df = nrow(model$data)-length(model$coefficients) df = model$df.residual } if(df ==0 ){ fullap = 2 * pnorm(-abs(fullat)) }else{ fullap = pt(-abs(fullat),df)*2 } sig = rep(' ', A) sig[fullap<.05] = '* ' sig[fullap<.01] = '** ' sig[fullap<.001] = '***' fullasig = sig fulla=cbind(fullae, fullas, fullap, fullasig) fullpe = array(rep(0, P), dim=c(P, 1)) fullps = array(rep(0, P), dim=c(P, 1)) S2 = array(rep(0, P*(P-1)), dim=c(P, (P-1))) ind = P*1:(P-1) newind = 1:(P*(P-1)) newind = newind[-ind] S2[newind] = diag(P-1) S2[ind] = rep(-1,(P-1)) fullpe = as.vector(S2%*%model$coef[stringr::str_detect(names(model$coef) , "^pcc([0-9])*$" )]) if(gee==TRUE){ row_ind <- stringr::str_detect(rownames(model$robust.variance) , "^pcc([0-9])*$") col_ind <- stringr::str_detect(colnames(model$robust.variance) , "^pcc([0-9])*$") fullps = sqrt(diag(S2%*%model$robust.variance[row_ind,col_ind]%*%t(S2))) }else{ row_ind <- stringr::str_detect(rownames(vcov(model)) , "^pcc([0-9])*$") col_ind <- stringr::str_detect(colnames(vcov(model)) , "^pcc([0-9])*$") fullps = sqrt(diag(S2%*%vcov(model)[row_ind,col_ind]%*%t(S2))) } fullpt = fullpe/fullps if(df ==0 ){ fullpp = 2 * pnorm(-abs(fullpt)) }else{ fullpp = pt(-abs(fullpt),df)*2 } sig = rep(' ', P) sig[fullpp<.05] = '* ' sig[fullpp<.01] = '** ' sig[fullpp<.001] = '***' fullpsig = sig fullp=cbind(fullpe, fullps, fullpp, fullpsig) inte = as.vector(r6[1]) intse = r6se[1] # if GEE, generate p-values with manual test, modification: 2022-11-10 if(gee==TRUE){ if(df ==0 ){ intp = 2 * pnorm(-abs(inte/intse)) }else{ intp = pt(-abs(inte/intse),df)*2 } }else{ intp = r6p[1] } intsig = rep(' ', 1) intsig[r6p[1]<.05] = '* ' intsig[r6p[1]<.01] = '** ' intsig[r6p[1]<.001] = '***' fullint = cbind(inte,intse,intp,intsig) maineff = rbind(fullint,fulla,fullp) colnames(maineff) = c("estimate", "se", "p", "sig") rownames(maineff) = c() age_results <- maineff[2:(A+1),] colnames(age_results) = c("age_estimate", "age_se", "age_p", "sig") rownames(age_results) = c() period_results <- maineff[(A+2):(A+P+1),] colnames(period_results) = c("period_estimate", "period_se", "period_p", "sig") rownames(period_results) = c() list(intercept = maineff[1,], age_effect = cbind(`age_group` = seq(1,A),age_results), period_effect = cbind(`period_group` = seq(1,P), period_results)) } # maineffect(A,P,C,model = temp6)
/scratch/gouwar.j/cran-all/cranData/APCI/R/MainEffect.R
#' Get the cohort index matrix for any age and period groups #' #' This function returns the cohort index matrix for any age and period #' groups. The cohort index matrix will then be used to extract the cohort #' effects. #' #' @param age_range,period_range Numeric vector indicating the actual #' age and period range (e.g., 10 to 59 years old from 2000 to 2019). #' @param age_interval,period_interval,age_group,period_group Numeric #' values or character vectors indicating how age and period are #' grouped. \code{age_interval} and \code{period_interval} are numbers #' indicating the width of age and period groups respectively. #' \code{age_group} and \code{period_group} are character vectors #' explicitly listing all potential age and period groups. Either #' \code{age_interval}(\code{period_interval}) or \code{age_group} #' (\code{period_group}) have to be defined when \code{unequal_interval} #' is \code{TRUE}. #' #' @return a matrix respresenting the relationship among age, period, #' and cohort groups under the current setting. #' #' @examples #' ## age and period groups have equal width #' ageperiod_group(age_range = 10:59, period_range = 2000:2019, #' age_interval = 5, period_interval = 5) #' ageperiod_group(age_range = 10:59, period_range = 2000:2019, #' age_group = c("10-14","15-19","20-24","25-29", #' "30-34","35-39","40-44","45-49", #' "50-54","55-59"), #' period_group = c("2000-2004","2005-2009","2010-2014", #' "2015-2019")) #' #' ## age and period groups have unequal width #' ageperiod_group(age_range = 10:59, period_range = 2000:2019, #' age_interval = 10, period_interval = 5) #' ageperiod_group(age_range = 10:59, period_range = 2000:2019, #' age_group = c("10-19","20-29","30-39","40-49","50-59"), #' period_group = c("2000-2004","2005-2009", #' "2010-2014","2015-2019")) #' #' @export # Define age and period groups # one drawback of this function: it does not allow for seeting up # the initial grouping point. # also, if unequal, the cohort group numbers will not be A + P -1 # therefore, in calculating the cohort average, this can be a problem # since argument C should be changed # if the users define some groups not always the equal, this function should # allow for the customized grouping strategy to make everything more # flexible ageperiod_group <- function(age_range, period_range, age_interval=NULL, period_interval=NULL, age_group = NULL, period_group = NULL){ # define expanded data frame for specific age and period ranges df <- expand.grid(A=age_range,P=period_range) if(!is.null(age_group)){ # age_group <- c("10-19","20-29","30-39","40-49","50-59") age_group_split <- stringr::str_extract_all(age_group, "[0-9]+",simplify = T) df$acc <- cut(df$A, breaks = as.numeric(c(age_group_split[1],age_group_split[,2])), right = T, include.lowest = T, labels = 1:length(age_group)) }else{ df$acc <- floor((df$A-min(age_range))/age_interval) + 1 } if(!is.null(period_group)){ # period_group <- c("2000-2004","2005-2009","2010-2014","2015-2019") period_group_split <- stringr::str_extract_all(period_group, "[0-9]+",simplify = T) df$pcc <- cut(df$P, breaks = as.numeric(c(period_group_split[1],period_group_split[,2])), right = T, include.lowest = T, labels = 1:length(period_group)) }else{ df$pcc <- floor((df$P-min(period_range))/period_interval) + 1 } # define cohort df$cohort <- df$P - df$A df <- dplyr::mutate(dplyr::group_by(df,acc,pcc),crange = paste(min(cohort),max(cohort)) ) df <- dplyr::ungroup(df) df$ccc <- dplyr::group_indices(dplyr::group_by(df,crange)) matrix(unique(dplyr::select(.data = df, acc,pcc,ccc))$ccc, nrow = length(unique(df$acc)), ncol = length(unique(df$pcc))) }
/scratch/gouwar.j/cran-all/cranData/APCI/R/ageperiod_group.R
#' Run APC-I model #' #' Run APC-I model #' @inheritParams temp_model #' @inheritParams ageperiod_group #' @param dev.test Logical, specifying if the global F test should be #' implemented before fitting the APC-I model. If \code{TRUE}, apci will first run the #' global F test and report the test results; otherwise, apci will skip this #' step and return NULL. The default setting is \code{TRUE}. However, users should be #' aware that the algorithm will not automatically stop even if there is no #' significant age-by-period interactions based on the global F test. #' @param print Logical, specifying if the intermediate results should be #' displayed in the console when fitting the model. The default setting is #' \code{TRUE} to display the results of each procedure. #' @param unequal_interval Logical, indicating if age and period groups are #' of the same interval width. The default is set as \code{TRUE}. #' #' #' @return A list containing: #' \item{model}{The fitted generalized linear model.} #' \item{intercept}{The overall intercept.} #' \item{age_effect}{The estimated age main effect.} #' \item{period_effect}{The estimated period main effect.} #' \item{cohort_average}{The estimated inter-cohort average deviations from age #' and period main effects.} #' \item{cohort_slope}{The estimated intra-cohort life-course linear slopes.} #' \item{int_matrix}{A matrix containing the estimated coefficients for #' age-by-period interactions.} #' \item{cohort_index}{Indices indicating different cohorts.} #' \item{data}{Data used for fitting APC-I model.} #' #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' # explore the raw data pattern #' apci.plot.raw(data = test_data, outcome_var = "inlfc",age = "acc", #' period = "pcc") #' ## alternatively, #' apci.plot(data = test_data, outcome_var = "inlfc", age = "acc",model=APC_I, #' period = "pcc", type = "explore") #' #' # visaulze estimated cohort effects with bar plot #' apci.bar(model = APC_I, age = "acc", #' period = "pcc", outcome_var = "inlfc") #' #' # visaulze estimated cohort effects with heatmap plot #' apci.plot.heatmap(model = APC_I, age = "acc",period = "pcc") #' ## alternatively, #' apci.plot(data = test_data, outcome_var = "inlfc", age = "acc",model=APC_I, #' period = "pcc") #' #' @export # APCI Model apci <- function(outcome = "inlfc", age = "acc", period = "pcc", cohort = NULL, weight = NULL, covariate = NULL, data, family ="quasibinomial", dev.test = TRUE, print = TRUE, gee = FALSE, id = NULL, corstr = "exchangeable", unequal_interval = FALSE, age_range = NULL, period_range = NULL, age_interval = NULL, period_interval = NULL, age_group = NULL, period_group = NULL, ...){ # change family name if the input is "binomial" if(family=="binomial"&gee==FALSE){ family <- "quasibinomial" } data <- as.data.frame(data) # prepare data if(unequal_interval==TRUE){ if(!is.null(age_group)){ age_group_split <- stringr::str_extract_all(age_group, "[0-9]+",simplify = T) data$acc <- cut(data[,age], breaks = as.numeric(c(age_group_split[1],age_group_split[,2])), right = T, include.lowest = T, labels = 1:length(age_group)) data[,age] <- as.factor(data$acc) }else{ data$acc <- floor((data[,age]-min(age_range))/age_interval) + 1 data[,age] <- as.factor(data$acc) } if(!is.null(period_group)){ period_group_split <- stringr::str_extract_all(period_group, "[0-9]+",simplify = T) data$pcc <- cut(data[,period], breaks = as.numeric(c(period_group_split[1],period_group_split[,2])), right = T, include.lowest = T, labels = 1:length(period_group)) data[,period] <- as.factor(data$pcc) }else{ data$pcc <- floor((data[,period]-min(period_range))/period_interval) + 1 data[,period] <- as.factor(data$pcc) } } pre <- temp_model(outcome = outcome, age = age, period = period, cohort = cohort, weight = weight, covariate = covariate, data = data, family = family, gee = gee, id = id, corstr = corstr) A <- pre$A P <- pre$P C <- pre$C temp6 <- pre$model age. <- age; period. <- period; cohort. <- cohort;outcome. <- outcome family. <- family; weight. <- weight;gee. <- gee # F test in Step 1 and Step 2 if(dev.test==TRUE){ Tests <- tests(model = temp6,A=A,P=P,C=C,data = data, weight=weight., age = age.,period = period.,cohort = cohort.,outcome = outcome.,family = family.) }else{ Tests <- NULL } # main effect MainEffect <- maineffect(A=A,P=P,C=C,model = temp6,gee=gee.) # cohort deviation CohortDeviation <- cohortdeviation(A=A,P=P,C=C,model = temp6,gee=gee.) # CohortDeviation <- cohortdeviation(A=A,P=P,C=C,model = temp6,gee=gee., # unequal_interval = unequal_interval, # age_range = age_range, # period_range = period_range, # age_interval = age_interval, # period_interval = period_interval, # age_group = age_group, # period_group = period_group) if(print=="TRUE"){ # Main Effect message("Intercept: \n") # print(MainEffect$intercept) print(data.frame(estimate = sprintf("%.3f",as.numeric(MainEffect$intercept[1])), se = sprintf("%.3f",as.numeric(MainEffect$intercept[2])), p = sprintf("%.3f",as.numeric(MainEffect$intercept[3])), sig = sprintf("%.3s",MainEffect$intercept[4]) )) message("") message("Age Effect: \n") # print(MainEffect$age_effect) print(data.frame(age_group = MainEffect$age_effect[,1], age_estimate = sprintf("%.3f",as.numeric(MainEffect$age_effect[,2])), age_se = sprintf("%.3f",as.numeric(MainEffect$age_effect[,3])), age_p = sprintf("%.3f",as.numeric(MainEffect$age_effect[,4])), age_sig = MainEffect$age_effect[,5] )) message("") message("Period Effect: \n") # print(MainEffect$period_effect) print(data.frame(period_group = MainEffect$period_effect[,1], period_estimate = sprintf("%.3f",as.numeric(MainEffect$period_effect[,2])), period_se = sprintf("%.3f",as.numeric(MainEffect$period_effect[,3])), period_p = sprintf("%.3f",as.numeric(MainEffect$period_effect[,4])), period_sig = MainEffect$period_effect[,5] )) message("") # Cohort Deviation message("Cohort Deviation: \n") # print(CohortDeviation$cohort_average) print(data.frame(cohort_average_group = CohortDeviation$cohort_average[,1], cohort_average = sprintf("%.3f",as.numeric(CohortDeviation$cohort_average[,2])), cohort_average_se = sprintf("%.3f",as.numeric(CohortDeviation$cohort_average[,3])), cohort_average_t = sprintf("%.3f",as.numeric(CohortDeviation$cohort_average[,4])), cohort_average_p = sprintf("%.3f",as.numeric(CohortDeviation$cohort_average[,5])), cohort_average_sig = CohortDeviation$cohort_average[,6] )) message("") message("Cohort Life Course Dynamics: \n") # print(CohortDeviation$cohort_slope) print(data.frame(cohort_slope_group = CohortDeviation$cohort_slope[,1], cohort_slope = sprintf("%.3f",as.numeric(CohortDeviation$cohort_slope[,2])), cohort_slope_se = sprintf("%.3f",as.numeric(CohortDeviation$cohort_slope[,3])), cohort_slope_t = sprintf("%.3f",as.numeric(CohortDeviation$cohort_slope[,4])), cohort_slope_p = sprintf("%.3f",as.numeric(CohortDeviation$cohort_slope[,5])), cohort_slope_sig = CohortDeviation$cohort_slope[,6] )) message("") } # output: list(model = pre$model,dev_global=Tests$dev_global, # dev_local=Tests$dev_local, intercept = MainEffect$intercept, age_effect=MainEffect$age_effect, period_effect=MainEffect$period_effect, cohort_average = CohortDeviation$cohort_average, cohort_slope=CohortDeviation$cohort_slope, int_matrix = CohortDeviation$int_matrix, cohort_index = CohortDeviation$cohort_index, data = data) }
/scratch/gouwar.j/cran-all/cranData/APCI/R/apci.R
utils::globalVariables(c("group_id", "cohort_average", "cohort_group", "star", "age_group", "age_estimate", "period_group", "period_estimate", "value", "temp6", "gee", "acc", "pcc", "cohort", "crange", "ccc", "pnorm", "glm", ".")) # . age_estimate age_group as_tibble cohort_average cohort_group # coord_flip element_blank geom_abline geom_line geom_path geom_point # geom_text ggplot group_by group_id labs period_estimate period_group # star summarise temp6 theme_bw theme_void value ylim
/scratch/gouwar.j/cran-all/cranData/APCI/R/globals.R
#' Estimate APC-I model #' #' Estimate the APCI original model. This is a generalized linear regression model. #' #' @param data A data frame containing the outcome variable, age group #' indicator, period group indicator, and covariates to be used in the model. #' If the variable(s) are not found in data, there will be an error message #' reminding the users to check the input data again. #' @param outcome An object of class character containing the name of the #' outcome variable. The outcome variable can be continuous, categorical, #' or count. #' @param age An object of class character representing the age group index #' taking on a small number of distinct values in the data. Usually, the vector #' should be converted to a factor (or the terms of "category" and "enumerated #' type"). #' @param period An object of class character, similar to the argument of age, #' representing the time period index in the data. #' @param cohort An optional object of class character representing cohort #' membership index in the data. Usually, the cohort index can be generated #' from the age group index and time period index in the data because of the #' intrinsic relationship among these three time-related indices. #' @param weight An optional vector of sample weights to be used in the model #' fitting process. If non-NULL, the weights will be used in the first step to #' estimate the model. Observations with negative weights will be automatically #' dropped in modeling. #' @param covariate An optional vector of characters, representing the name(s) #' of the user-specified covariate(s) to be used in the model. If the #' variable(s) are not found in data, there will be an error message reminding #' the users to check the data again. #' @param family Used to specify the statistical distribution of the error #' term and link function to be used in the model. Usually, it is a character #' string naming a family function. For example, family can be "binomial", #' "multinomial"", or "gaussian". Users could also check R package glm for #' more details of family functions. #' @param gee Logical, indicating if the data is cross-sectional data or #' longitudinal/panel data. If \code{TRUE}, the generalized estimating equation #' will be used to correct the standard error estimates. The default is #' \code{FALSE}, indicating that the data are cross-sectional. #' @param id A vector of character, specifying the cluster index in longitudinal #' data. It is required when \code{gee} is \code{TRUE}. The length of the vector #' should be the same as the number of observations. #' @param corstr A character string, specifying a possible correlation #' structure in the error terms when \code{gee} is \code{TRUE}. The following #' are allowed: \code{independence}, \code{fixed}, \code{stat\_M\_dep}, #' \code{non\_stat\_M\_dep}, \code{exchangeable}, \code{AR-M} and #' \code{unstructured}. The default value is \code{exchangeable}. #' @param \dots Additional arguments to be passed to the function. #' #' @return A list containing: #' \item{A}{Age group index.} #' \item{P}{Period group index.} #' \item{C}{Cohort group index.} #' \item{model}{Fitted APCI models of outcome on predictors.} #' #' @export # load packages: if packages is not successfully loaded, install the corresponding # packages # pkgs <- c("haven","survey","tidyverse","magrittr",'data.table') # installpackages <- lapply(pkgs,function(x){ # if(x %in% rownames(installed.packages()) == FALSE) {install.packages(`x`)} # }) # loadpackages <- lapply(pkgs,function(x){ # library(`x`,character.only = T) # }) # rm(list = c("installpackages","loadpackages","pkgs")) # change variable names () # get temp6 temp_model <- function(data, outcome = "inlfc", age = "acc", period = "pcc", cohort = NULL, weight = NULL, covariate = NULL, family = "quasibinomial", gee = FALSE, id = NULL, corstr = "exchangeable", ...){ ############### #no missing data ############### data2 <- as.data.frame(data) if(family=="binomial"&gee==FALSE){ family <- "quasibinomial" } # data2 = na.omit( data[ , unique(c(outcome,age,period,cohort,weight,covariate,id)[!is.null(c(outcome,age,period,cohort,weight,covariate,id))]) ] ) if(is.null(weight)){ weight <- 1 } # A = nlevels(data2[,age]) # P = nlevels(data2[,period]) A = length(unique(data2[,age])) P = length(unique(data2[,period])) C = A + P - 1 data2$acc <- data2[,age] data2$pcc <- data2[,period] data2$acc <- as.factor(data2$acc) data2$pcc <- as.factor(data2$pcc) # if(age!='acc'&length(str_which(covariate,age))==0 ){ # data2[,age] <- NULL # } # if(period!='pcc'&length(str_which(covariate,period))==0){ # data2[,period] <- NULL # } # # if(length(str_which(covariate,age))>0){ # covariate <- covariate[-str_which(covariate,age)] # } # if(length(str_which(covariate,period))>0){ # covariate <- covariate[-str_which(covariate,period)] # } options(contrasts=c("contr.sum","contr.poly"), na.action = na.omit) wtdata2 = survey::svydesign(id=~1, strata=NULL, data=data2, weights=as.formula(paste0("~",weight))) if(!is.null(covariate)&length(covariate)>0){ temp6_formula <- as.formula(paste0(outcome,"~",paste(c(paste(covariate,collapse = "+"), paste0(age,"*",period)),collapse = "+"))) }else{ temp6_formula <- as.formula(paste0(outcome,"~",paste(c(paste0(age,"*",period)),collapse = "+"))) } print(temp6_formula) if(!is.null(covariate)&length(covariate)>0){ temp6_formula <- as.formula(paste0(outcome,"~",paste(c(paste(covariate,collapse = "+"), paste0('acc',"*",'pcc')),collapse = "+"))) }else{ temp6_formula <- as.formula(paste0(outcome,"~",paste(c(paste0('acc',"*",'pcc')),collapse = "+"))) } if(gee==TRUE){ temp6 = gee::gee(temp6_formula, id = id, # id = get(id), # id = eval(parse(text = id)), data = data2, family = get(family), corstr = corstr) temp6$model <- data2 }else{ temp6 = survey::svyglm(temp6_formula, wtdata2, family = get(family)) if(temp6$df.residual == 0){ temp6 = glm(temp6_formula, data2, family = get(family)) } } # temp6 # output list(A=A,P=P,C=C,model=temp6) }
/scratch/gouwar.j/cran-all/cranData/APCI/R/temp6_model.R
#' Local and global F test #' #' Implement local and global F test for APC-I model #' #' @inheritParams apci #' @inheritParams cohortdeviation #' #' @return A list displaying the global F test results. #' #' @export tests <- function(model,age="acc",period="pcc",cohort="ccc", A, P, C, data, weight = "wt", family, outcome, ...){ if(family=="binomial"){ family <- "quasibinomial" } ## global test ## # x = survey::regTermTest(model, "acc:pcc", method="Wald") x = survey::regTermTest(model, paste0(age,":",period), method="Wald") step1gf = c(x$Ftest, x$df, x$ddf, x$p) names(step1gf) = c("GlobalF", "df1", "dd2", "p") # message("Global F test: \n") # print(step1gf) # message("") # global test results if(step1gf["p"] > 0.05){ warning("\n no significant overall age-by-period interaction effect") } ## local F test ## # ia.index = array(seq(1:(A*P)), dim=c(A,P)) # data$ia <- sapply(1:nrow(data),function(i){ # ia.index[data$ac[i], data$pc[i]] # }) # wald.test = array(rep(0, C*5), dim=c(C, 5)) # # step2 <- lapply(1:C, function(k){ # data$ia_sel = 0 # data$ia_sel[data$cc==k] = data$ia[data$cc==k] # data$ia_sel = as.factor(data$ia_sel) # # wtdata = survey::svydesign(id=~1, strata=NULL, data=data, # weights=as.formula(paste0("~",weight))) # # # m2 = survey::svyglm(inlfc ~ acc + pcc + ia_sel, wtdata, family = quasibinomial) # m2 = survey::svyglm(as.formula(paste0(outcome,"~",age,"+",period,"+","ia_sel")), wtdata, family = get(family)) # # wald = survey::regTermTest(m2, "ia_sel", method="Wald") # wald.sig = rep(' ', 1) # wald.sig[wald$p<0.05] = '* ' # wald.sig[wald$p<0.01] = '** ' # wald.sig[wald$p<0.001] = '***' # wald.test[k,] <<- cbind(wald$Ftest, wald$df, wald$ddf, wald.sig, wald$p) # }) # # cohort_index = 1:C # # step2lf=cbind(cohort_index, wald.test[,c(1,2,3,5,4)]) # colnames(step2lf) = c('cohort_index', 'df1', 'df2', 'Ftest', "p-val", "Significance") # message("Local F test: \n") # print(step2lf) # message("") # list(dev_global=step1gf,dev_local=step2lf) list(dev_global=step1gf) } # tests(model = temp6,A,P,C,data = data,age="acc",period="pcc",cohort="ccc")
/scratch/gouwar.j/cran-all/cranData/APCI/R/tests.R
#' Plot the hexagram heatmap #' #' Plot the cohort effect in the style of hexagram #' #' @param model A list recording the results from function \code{apci}. #' @inheritParams apci #' @param color_scale A vector including two numbers #' indicating the limit of the values to be plotted. The #' first number is the minimum value to be visualized and the #' second is the maximum value to be visualized. If NULL, the #' algorithm will automatically select the limits from the data #' (estimation results) to set up the scale. #' #' @param color_map A vector, representing the color palettes to #' be used in the figure. The default setting is greys if color_map is #' \code{NULL}. Alternations, for example, can be c("blue", "yellow"), #' blues, etc. #' #' @param first_age The first age group. #' @param first_period The first period group. #' @param interval The width of age and period groups. #' @param first_age_isoline Isoline for the first age group. #' @param first_period_isoline Isoline for the first period group. #' @param isoline_interval Interval of isoline. #' @param line_width Width of lines. Default is 0.5. #' @param line_color Line colors. Default is grey. #' @param label_size Axis label size. Default is 0.5. #' @param label_color Axis label color. Default is Black. #' @param scale_units Units of scales. #' @param wrap_cohort_labels Display the cohort label or not. The default is #' \code{TRUE}. #' @param quantile A number valued between 0 and 1, representing the #' desirable percentiles to be used in visualizing the data or model. #' If \code{NULL}, the original scale of the outcome variable will be used. #' #' @return A hexagram visualizing the APC-I model results. #' #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' # plot hexagram #' apci.plot.hexagram(model=APC_I,age="acc",period="pcc",first_age = 20, #' first_period = 1940, interval = 5) #' @export # hexagram #### #matrix: age as rows, period as columns first_age = 0, apci.plot.hexagram <- function(model, #matrix: age as rows, period as columns first_age, age, period, first_age, first_period, interval, first_age_isoline = NULL, first_period_isoline = NULL, isoline_interval = NULL, color_scale = NULL, color_map = NULL, line_width = .5, line_color = "grey", label_size = .5, label_color = "black", scale_units = "Quintile", wrap_cohort_labels = TRUE, quantile = NULL){ data <- model$int_matrix data.raw <- as.data.frame(model$model$model) data.raw[,age] <- data.raw$acc data.raw[,period] <- data.raw$pcc data$period <- rep(1:nlevels(data.raw[,period]), each = nlevels(data.raw[,age]))%>%as.factor data$age <- rep(1:nlevels(data.raw[,age]), nlevels(data.raw[,period]))%>%as.factor data$value <- data$iaesti%>%as.character%>%as.numeric data <- data.table::dcast(data.table::as.data.table(data), age~period,value.var = "value")%>% as.data.frame%>%.[,-1]%>%as.matrix nnrow <- nrow(data) nncol <- ncol(data) if(!is.null(quantile)){ data <- cut(data,quantile(data,probs = seq(0,1,quantile)), include.lowest = T, labels = quantile(data, probs = seq(0,1,quantile))[-1]) } data <- as.numeric(data) data <- matrix(data,nrow=nnrow,ncol=nncol) # setting default values for missing parameters if(is.null(first_age_isoline)){ first_age_isoline = first_age } if(is.null(first_period_isoline)){ first_period_isoline = first_period } if(is.null(isoline_interval)){ isoline_interval = 2 * interval } if(is.null(color_scale)){ #if color scale is missing use the min and max of data color_scale[1] <- min(data) color_scale[2] <- max(data) } if(is.null(color_map)){ # define jet colormap jet.colors <- colorRampPalette(c("black", "#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) color_map = jet.colors(100) }else{ jet.colors <- colorRampPalette(c(color_map[1],color_map[2]))(100) color_map = jet.colors } # end of default values m <- dim(data)[1] n <- dim(data)[2] last_age = first_age + (m - 1) * interval last_period = first_period + (n - 1) * interval first_cohort = first_period - last_age last_cohort = last_period - first_age age_isolines = seq(from = first_age_isoline, to = last_age, by = isoline_interval) period_isolines = seq(from = first_period_isoline, to = last_period, by = isoline_interval) last_age_isoline = tail(age_isolines,1) first_cohort_isoline = first_period_isoline - last_age_isoline cohort_isolines = seq(from = first_cohort_isoline, to = last_cohort, by = isoline_interval) periods <- seq(from = first_period, to = last_period, by = interval) ages <- seq(from = first_age, to = last_age, by = interval) cohorts <- seq(from = first_cohort, to = last_cohort, by = interval) n_ages <- length(ages) n_periods <-length(periods) n_cohorts <- length(cohorts) n_age_isolines <- length(age_isolines) n_period_isolines <- length(period_isolines) n_cohort_isolines <- length(cohort_isolines) # apply the limits to the data by truncating it data[data<color_scale[1]] = color_scale[1] data[data>color_scale[2]] = color_scale[2] # === plotting ==== ncol <- length(color_map) not_nan_data <- !is.nan(data) v_data <- as.vector(data[not_nan_data]) datac = cut(data[not_nan_data], #discretize the data seq(from = color_scale[1], to = color_scale[2], length.out = ncol), include.lowest = T, labels = F) a <- interval / sqrt(3) # radius of the hexagon (distance from center to a vertex). b <- sqrt(3)/2 * a # half height of the hexagon (distance from the center perpendicular to the middle of the top edge) yv <- c(0, b, b, 0, -b, -b, 0) xv <- c(-a, -a/2, a/2, a, a/2, -a/2, -a) # compute the center of each hexagon by creating an a*p grid for each age-period combination P0 <- matrix(periods, nrow = n_ages, ncol=n_periods, byrow = TRUE) A0 <- t(matrix(ages, nrow = n_periods, ncol = n_ages, byrow = TRUE)) # convert the grid to the X-Y coordinate X <- compute_xcoordinate(P0) Y <- compute_ycoordinate(P0, A0) # only keep those that have non-NA values X <- X[not_nan_data] Y <- Y[not_nan_data] # get the color for each level color_map2 <- color_map[datac] Xvec <- as.vector(X) Yvec <- as.vector(Y) n_hexagons <- length(Xvec) # compute the X and Y cooridinate for each hexagon - each hexagon is a row and each point is a column Xhex <- outer(Xvec, xv, '+') Yhex <- outer(Yvec, yv, '+') minX <- min(Xhex) - interval maxX <- max(Xhex) + interval if (wrap_cohort_labels){ minY <- min(Yhex) - interval } else { minY <- compute_ycoordinate(p=first_period, a=first_age - (last_period-first_period)) - interval } maxY <- max(Yhex) + interval layout(t(1:2),widths=c(4,1)) # two columns - one for the plot, the other for the colorbar par(mar=c(.5,.5,.5,.5)) plot(x = NULL, y = NULL, xlim = c(minX,maxX), ylim = c(minY,maxY), axes=FALSE, frame.plot=FALSE, xaxt = 'n', yaxt = 'n', type = 'n', asp = 1) for (i in 1:n_hexagons){ polygon(x = Xhex[i,], y = Yhex[i,], col = color_map2[i], border = NA, # Color of polygon border lwd = 1) lwd = 1) } #age-isolines y1 <- compute_ycoordinate(first_period,age_isolines) y2 <- compute_ycoordinate(last_period+ interval,age_isolines) x1 <- compute_xcoordinate(first_period) x2 <- compute_xcoordinate(last_period + interval) for (i in 1:n_age_isolines){ lines(x=c(x1,x2), y=c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x=x2, y=y2[i], labels = paste("A:",age_isolines[i]), col = label_color, cex = label_size, srt = -30, adj = c(0, 0.5)) } # period-isolines x <- compute_xcoordinate(period_isolines) y1 <- compute_ycoordinate(period_isolines, first_age) y2 <- compute_ycoordinate(period_isolines, last_age+interval) for (i in 1:n_period_isolines){ lines(x=c(x[i], x[i]), y=c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x=x[i], y=y2[i], labels = paste("P:",period_isolines[i]), col = label_color, cex = label_size, srt = 90, adj = c(0, .5)) #pos = 4) } # cohort-isolines (need some more processing!) # determine the periods where the cohort isolines cross the last age p_top <- cohort_isolines + last_age p_top <- p_top[p_top < last_period] n_top <- length(p_top) # and the periods where they cross the first age p_bottom <- cohort_isolines + first_age p_bottom <- p_bottom[p_bottom > first_period] n_bottom <- length(p_bottom) # and the ages where they cross the first period a_left <- first_period - cohort_isolines if (wrap_cohort_labels){ a_left <- a_left[a_left >= first_age] } n_left <- length(a_left) # and the ages where they cross the last period a_right <- last_period - cohort_isolines a_right <- a_right[a_right <= last_age] n_right <- length(a_right) # combine the periods and ages initial and final points on the a*p coordinates # first the left-bottom edge if (wrap_cohort_labels){ p1 <- c(rep(first_period, n_left), p_bottom) a1 <- c(a_left, rep(first_age, n_bottom)) } else { p1 <- c(rep(first_period, n_left)) a1 <- c(a_left) } # then the top-right edge p2 <- c(p_top, rep(last_period, n_right)) a2 <- c(rep(last_age, n_top), a_right) # convert the a*p coordinates to x-y coordinates x1 <- compute_xcoordinate(p1-interval) #,a1-1) x2 <- compute_xcoordinate(p2) #,a2) y1 <- compute_ycoordinate(p1-interval, a1-interval) y2 <- compute_ycoordinate(p2, a2) # finally draw the lines. for (i in 1:n_cohort_isolines){ lines(x=c(x1[i], x2[i]), y=c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x=x1[i], y=y1[i], labels = paste("C:",cohort_isolines[i]+n_age_isolines), col = label_color, cex = label_size, srt = 30, adj = c(1,0.5)) } # create the colorbar par(las=2) par(mar=c(10,2,10,2.5)) cb_range <- seq(from = color_scale[1], to = color_scale[2], length.out = ncol) image(y=cb_range,z=t(cb_range), col=color_map, axes=FALSE, main=scale_units, cex.main=.8) axis(4,cex.axis=label_size,mgp=c(0,.5,0)) } #' Calculate x coordinate value #' #' Calculate x coordinate value for plotting hexagram in visualizing APC-I #' results. #' #' @param p Period value. #' @return The coordinate value for x axis. #' @export compute_xcoordinate <- function(p) { x <- p * sqrt(3) / 2 return(x) } #' Calculate y coordinate value #' #' Calculate y coordinate value for plotting hexagram in visualizing APC-I #' results. #' #' @param p Period value #' @param a Age value #' @return The coordinate value for y axis. #' @export compute_ycoordinate <- function(p, a){ y <- a - p / 2 return(y) } # heatmap #### #' Plot the heatmap for APC-I model #' #' Plot the heatmap to visualize cohort effects estimated by APC-I model. #' #' @inheritParams apci.plot.hexagram #' @param \dots Additional arguments to be passed to the function. #' #' @return A heatmap visualizing cohort effects estimated by APC-I model. #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' # plot heatmap #' apci.plot.heatmap(model=APC_I,age="acc",period="pcc",first_age = 20, #' first_period = 1940, interval = 5) #' @export apci.plot.heatmap <- function(model, age, period, color_map = NULL, color_scale = NULL, quantile = NULL, ...){ data <- model$int_matrix data.raw <- as.data.frame(model$model$model) data.raw[,age] <- data.raw$acc data.raw[,period] <- data.raw$pcc data$period <- rep(1:nlevels(data.raw[,period]), each = nlevels(data.raw[,age]))%>%as.factor data$age <- rep(1:nlevels(data.raw[,age]), nlevels(data.raw[,period]))%>%as.factor data$value <- data$iaesti%>%as.character%>%as.numeric if(!is.null(quantile)){ data$value <- cut(data$value,quantile(data$value, probs = seq(0,1,quantile)), include.lowest = T, labels = quantile(data$value, probs = seq(0,1,quantile))[-1]) data$value <- as.numeric(data$value) color_scale <- c(min(data$value,na.rm = T), max(data$value,na.rm = T)) color_scale[1] <- round(color_scale[1],2)#-0.01 color_scale[2] <- round(color_scale[2],2)#+0.01 bk <- seq(1,1/quantile,1) # nm <- "Age-Period Interaction\nQuantile" nm <- "Deviation (Quantile)" }else{ color_scale <- c(min(data$value,na.rm = T), max(data$value,na.rm = T)) color_scale[1] <- round(color_scale[1],2)-0.01 color_scale[2] <- round(color_scale[2],2)+0.01 bk <- seq(color_scale[1],color_scale[2], (color_scale[2]-color_scale[1])/5) # nm <- "Age-Period Interaction" nm <- "Deviation" } if(is.null(color_map)){ color_map <- colorRampPalette(c('white','black'))(100) }else{ color_map <- colorRampPalette(c(color_map[1],color_map[2]))(100) } g <- ggplot2::ggplot(data, ggplot2::aes(x=period,y=age,fill=value))+ ggplot2::geom_tile()+ # geom_text(label = data$iasig,color = "green",size = 5)+ # remove the stars ggplot2::coord_equal()+ ggplot2::theme_bw()+ ggplot2::scale_fill_gradientn(colors = color_map, # low = color_map[1], # high = color_map[length(color_map)], name=nm, breaks = bk, limits = color_scale)+ ggplot2::theme(legend.title = ggplot2::element_text(size=8)) ggplot2::labs(x = 'Period Group', y = 'Age Group') model$cohort_average$cohort_index <- seq(nlevels(data.raw[,age])-1, -nlevels(data.raw[,period])+1,-1) # run <- lapply(model$cohort_average$cohort_index[model$cohort_average$sig!=" "], run <- lapply((1:nrow(model$cohort_average))[model$cohort_average$sig!=" "], function(i){ if(is.na(model$cohort_slope[i,"sig"])){ intercept <- model$cohort_average$cohort_index[i] g<<-g+ ggplot2::geom_abline(intercept = intercept, slope = 1,color = "green",linetype='dotted') }else{ if(model$cohort_slope[i,"sig"]==" "){ intercept <- model$cohort_average$cohort_index[i] g<<-g+ ggplot2::geom_abline(intercept = intercept, slope = 1,color = "green",linetype='dotted') }else{ if((model$cohort_average[i,"cohort_average"]%>%as.character%>%as.numeric)>0){ intercept <- model$cohort_average$cohort_index[i] g<<-g+ ggplot2::geom_abline(intercept = intercept, slope = 1,color = "green",linetype='solid') }else{ intercept <- model$cohort_average$cohort_index[i] g<<-g+ ggplot2::geom_abline(intercept = intercept, slope = 1,color = "green",linetype='dashed') } } } }) g+ ggplot2::labs(caption = "Line:\n average cohort effect is significantly different from the main effect Intra-cohort change:\n solid positive; dashed: negative; dotted: no change") } # model <- APC_I # age <- "acc" # period <- "pcc" # # apci.plot.heatmap(model = APC_I, # age = "acc",period = 'pcc') # line.raw #### #' Plotting age and period patterns #' #' Visualize the age and period patterns by plotting the #' raw scores in each age and period square. #' #' @inheritParams apci.plot.hexagram #' @inheritParams apci #' @param outcome_var An object of class character indicating #' the name of the outcome variable used in the model. The #' outcome variable can be a continuous, binary, categorical, or count variable. #' @param \dots Additional arguments to be passed to the function. #' #' @return A plot with two panels showing the age and period trends separately. #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' # plot the raw pattern #' apci.plot.raw(data = test_data, outcome_var = "inlfc",age = "acc", #' period = "pcc") #' @export apci.plot.raw <- function(data, outcome_var, age, period, ...){ data <- as.data.frame(data) data$outcome_var <- data[,outcome_var] data$age <- data[,age]%>%as.factor data$period <- data[,period]%>%as.factor g1 <- ggplot2::ggplot(data%>% dplyr::group_by(age,period) %>% dplyr::summarize(outcome_var = mean(outcome_var,na.rm=TRUE),.groups='drop'), ggplot2::aes(x=period,group=age, y = outcome_var,col=age))+ ggplot2::geom_point()+ ggplot2::geom_path()+ ggplot2::theme_bw()+ ggplot2::labs(x = "Period Group",names = "Age", y = as.character(outcome_var))+ ggplot2::geom_point(data = data %>% dplyr::group_by(period) %>% dplyr::summarise(outcome_var = mean(outcome_var, na.rm = T)), mapping = ggplot2::aes(x = period, group=NA,y=outcome_var,col=NA), size = 3,shape=8,color="black") g2 <- ggplot2::ggplot(data%>% dplyr::group_by(age,period) %>% dplyr::summarize(outcome_var = mean(outcome_var,na.rm=TRUE),.groups='drop'), ggplot2::aes(x=age,group=period,y = outcome_var,col=period))+ ggplot2::geom_point()+ ggplot2::geom_path()+ ggplot2::theme_bw()+ ggplot2::labs(x = "Age Group",names = "Period", y = as.character(outcome_var))+ ggplot2::geom_point(data = data %>% dplyr::group_by(age) %>% dplyr::summarise(outcome_var = mean(outcome_var,na.rm = T)), mapping = ggplot2::aes(x = age,group=NA, y=outcome_var,col=NA), size = 3,shape=8,color="black") ggpubr::ggarrange(g1,g2, labels = c("A", "B"), ncol = 2, nrow = 1) } # combine #### #' Plotting age and period raw scores and APC-I model results #' #' Arranging data exploration and model results representation #' in a harmonized way. #' #' @inheritParams apci.plot.heatmap #' @inheritParams apci #' @param outcome_var An object of class character indicating #' the name of the outcome variable used in the model. The #' outcome variable can be a continuous, binary, categorical, or count variable. #' @param type Character, "explore" or "model". If type is "explore", #' plots for age and period raw scores will be generated. If type is #' "model", model results will be plotted. The default setting is "model". #' @param \dots Additional arguments to be passed to the function. #' #' @return A plot with three panels showing the raw scores or APC-I #' model results. #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' ## plot the raw pattern #' apci.plot(data = test_data, outcome_var = "inlfc", age = "acc",model=APC_I, #' period = "pcc", type = "explore") #' ## plot the model results #' apci.plot(data = test_data, outcome_var = "inlfc", age = "acc",model=APC_I, #' period = "pcc", type = "model") #' @export apci.plot <- function(model, age, period, outcome_var, type = "model", quantile = NULL, ...){ if(type=="explore"){ data <- as.data.frame(model$model$data) data$outcome_var <- data[,outcome_var] data$age <- data[,age] data$period <- data[,period] data <- dplyr::group_by(.data = data,age,period) data <- dplyr::summarise(.data=data, outcome_var = mean(outcome_var,na.rm = T)) # data <- data%>% # group_by(age,period)%>% # summarise(outcome_var = mean(outcome_var,na.rm = T)) g1 <- ggplot2::ggplot(data, ggplot2::aes(x=period,group=age,y = outcome_var,col=age))+ ggplot2::geom_point()+ ggplot2::geom_path()+ ggplot2::theme_bw()+ ggplot2::labs(x = "Period Group",names = "Age", y = as.character(outcome_var))+ ggplot2::geom_point(data = data %>% dplyr::group_by(period) %>% dplyr::summarise(outcome_var = mean(outcome_var,na.rm = T)), mapping = ggplot2::aes(x = period,group=NA,y=outcome_var,col=NA), size = 3, shape=8,color="black") g2 <- ggplot2::ggplot(data, ggplot2::aes(x=age,group=period,y = outcome_var,col=period))+ ggplot2::geom_point()+ ggplot2::geom_path()+ ggplot2::theme_bw()+ ggplot2::labs(x = "Age Group",names = "Period",y = as.character(outcome_var))+ ggplot2::geom_point(data = data %>% dplyr::group_by(age) %>% dplyr::summarise(outcome_var = mean(outcome_var,na.rm = T)), mapping = ggplot2::aes(x = age, group=NA,y=outcome_var,col=NA), size = 3,shape=8,color="black") g3 <- ggplot2::ggplot(data, ggplot2::aes(x=period,y=age,fill=outcome_var))+ ggplot2::geom_tile()+ ggplot2::coord_equal()+ ggplot2::theme_bw()+ ggplot2::theme(legend.title = ggplot2::element_blank())+ ggplot2::labs(x = 'Period Group', y = 'Age Group') g4 <- ggplot2::ggplot(data,ggplot2::aes(x=age,y=outcome_var))+ ggplot2::theme_void()+ ggplot2::geom_text(x=0.5,y=0.5, label=c("Data Exploration")) g <- ggpubr::ggarrange(g3,g1+ggplot2::coord_flip(),g2,g4, labels = c("C", "A","P"), ncol = 2, nrow = 2) } if(type=="model"){ g3 <- apci.plot.heatmap(model=model, age = age,period = period, quantile = quantile, color_map = c('blue','yellow')) g3 <- g3+ggplot2::theme(legend.title = ggplot2::element_blank())+ ggplot2::labs(caption = "") # define the scales data <- model$int_matrix data.raw <- as.data.frame(model$model$model) data.raw[,age] <- data.raw$acc data.raw[,period] <- data.raw$pcc data$period <- as.factor(rep(1:nlevels(data.raw[,period]), each = nlevels(data.raw[,age]))) data$age <- as.factor( rep(1:nlevels(data.raw[,age]), nlevels(data.raw[,period])) ) data$value <- data$iaesti%>%as.character%>%as.numeric color_scale <- c(min(data$value,na.rm = T), max(data$value,na.rm = T)) color_scale[1] <- round(color_scale[1],2)-0.01 color_scale[2] <- round(color_scale[2],2)+0.01 bk <- seq(color_scale[1],color_scale[2],(color_scale[2]-color_scale[1])/5) # define the scales === data <- model$age_effect%>%as.data.frame data$age_estimate <- data$age_estimate%>%as.character%>%as.numeric data$age_group <- data$age_group%>%as.character%>%as.numeric%>%as.factor g4 <- ggplot2::ggplot(data, ggplot2::aes(x=age_group,group=NA,y = age_estimate))+ ggplot2::theme_void()+ ggplot2::geom_text(x=0.5,y=0.5, label=c("APC-I Model")) g5 <- ggplot2::ggplot(data, ggplot2::aes(x=age_group, group=NA,y = age_estimate))+ ggplot2::geom_point()+ ggplot2::geom_line()+ ggplot2::labs(x = "Age Group", y = "Estimated Age Effect")+ ggplot2::theme_bw() data <- model$period_effect%>%as.data.frame data$period_estimate <- data$period_estimate%>%as.character%>%as.numeric data$period_group <- data$period_group%>%as.character%>%as.numeric%>%as.factor color_scale <- c(min(data$period_estimate,na.rm = T), max(data$period_estimate,na.rm = T)) color_scale[1] <- round(color_scale[1],2)-0.01 color_scale[2] <- round(color_scale[2],2)+0.01 g6 <- ggplot2::ggplot(data, ggplot2::aes(x=period_group,group=NA,y = period_estimate))+ ggplot2::geom_point()+ ggplot2::geom_line()+ ggplot2::labs(x = "Period Group", y = "Estimated Period Effect")+ ggplot2::ylim(color_scale)+ ggplot2::theme_bw() g <- ggpubr::ggarrange(g3,g5+ggplot2::coord_flip(),g6,g4, labels = c("C", "A","P"), ncol = 2, nrow = 2) } g } # barplot---- #' Make barplot for cohort effect #' #' Visualize cohort effects estimated by APC-I model with bar plots. #' #' @inheritParams apci.plot #' @param outcome_var An object of class character indicating #' the name of the outcome variable used in the model. The #' outcome variable can be a continuous, binary, categorical, or count variable. #' @param cohort_label An optional vector, representing the labels of #' cohort groups in the x asix. #' @param \dots Additional arguments to be passed to the function. #' #' @return A bar plot visualizing the cohort effects estimated by APC-I model. #' @examples #' # load package #' library("APCI") #' # load data #' test_data <- APCI::women9017 #' test_data$acc <- as.factor(test_data$acc) #' test_data$pcc <- as.factor(test_data$pcc) #' test_data$educc <- as.factor(test_data$educc) #' test_data$educr <- as.factor(test_data$educr) #' #' # fit APC-I model #' APC_I <- APCI::apci(outcome = "inlfc", #' age = "acc", #' period = "pcc", #' cohort = "ccc", #' weight = "wt", #' data = test_data,dev.test=FALSE, #' print = TRUE, #' family = "gaussian") #' summary(APC_I) #' #' ## visualizing estimated cohort effects with bar plot #' apci.bar(model = APC_I, age = "acc", period = "pcc") #' @export apci.bar <- function(model, age, period, outcome_var, cohort_label = NULL, # type = "model", # quantile = NULL, ...){ df <- as.data.frame(model$cohort_average) df$cohort_group <- if(is.null(cohort_label)){ factor(df$cohort_group) }else{ factor(df$cohort_group,labels = cohort_label) } df$group_id <- NA df$cohort_average <- as.numeric( as.character(df$cohort_average) ) df$star <- ifelse(df$sig!=" ",df$cohort_average,NA) # df$cohort_average_exp <- exp(df$cohort_average) p <- ggplot2::ggplot(df,ggplot2::aes(group=group_id,y = cohort_average))+ ggplot2::geom_bar(ggplot2::aes(x=cohort_group,fill=group_id), stat="identity", position=ggplot2::position_dodge(), col="black")+ ggplot2::scale_fill_brewer(palette="Greys",name = "Region")+ ggplot2::theme_minimal()+ ggplot2::labs(x = "cohort group",names = "",y = "cohort deviation")+ ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5))+ ggplot2::geom_text(ggplot2::aes(x=cohort_group,y=star*1.1), label = "*", color="red",size = 7) p } # model <- APC_I # age <- "acc" # period <- "pcc" # # apci.bar(model = APC_I, # age = "acc",period = 'pcc')
/scratch/gouwar.j/cran-all/cranData/APCI/R/visualization.R
EERpenalties <- function(n, k = n - 1, m = min(n - 2, k), eer = .20, reps = 50000, rnd = 3){ if(!isTRUE(all.equal(n %% 4, 0))) stop("n must be a multiple of 4") if(!isTRUE(all.equal(k %% 1, 0))) stop("k must be an integer") if(!isTRUE(all.equal(m %% 1, 0))) stop("m must be an integer") if(k > n - 1) stop("k cannot be greater than n-1") if(k < 1) stop("k cannot be less than 1") if(m > k) stop("m cannot be greater than k") if(m < 1) stop("m cannot be less than 1") if(m > (n - 2)) stop("m cannot be greater than n-2") if(eer <= 0) stop("EER must be greater than 0") if(eer >= 1) stop("EER must be less than 1") # Algorythm callculates differences between penalties # starting with diff(m-1) = pen(m) - pen(m-1) cs <- NULL startj <- m - 1 # The value of diff(m-1) can (under a certain condition) be calculated analytically. if(qf((1 - eer / (k + 1 - m)), 1, (n - 1 - m)) > n - 1 - m){ cs <- log(qf((1 - eer / (k + 1 - m)), 1, (n - 1 - m)) / (n - 1 - m) + 1) startj <- m - 2 } # Stop if m=1 and diff(0) was calculated above. if(startj < 0){ cs <- round(c(0, cs), rnd) return(cs) } # Loop that estimates diff(j) for j = starj, startj-1, ... 1, 0. # Estimate of diff(j) is based on assuming j large active effects. for(j in startj:0){ # Create matrix of squared random N(0,1) observations. # Number of columns is n-1-j which equals inactive columns (k-j) plus unused columns (n-1-k). sqres<-matrix(rnorm(reps*(n-1-j))^2,reps,n-1-j) # If there is more than one inactive column (k!=j+1) then sort entries for inactive columns. # Inactive columns are the last (k-j) columns. if((n - k) != (n - 1 - j)) sqres[ , (n - k):(n - 1 - j)] <- t(apply(sqres[ , (n - k):(n - 1 - j)], 1, sort)) # Find RSS for models containing just the j active effects, the j-effect model + 1, ... the j-effect model + m-j. lRSS <- log(apply(sqres, 1, cumsum)[(n - m - 1):(n - 1 - j), ]) d1 <- dim(lRSS)[1] # If d1==2 then m = j+1. In this case at most one variable is being added. # The differences in log(RSS) are found and the relevant quantile taken to estimate diff(j). if(d1 == 2){ out <- lRSS[2, ] - lRSS[1, ] cs <- as.numeric(quantile(out, 1 - eer)) } # If m> j+1 then the maximum number of additional variables is >=2. # The models that add >=1 variable are compared and the one that will minimize # APC* identified. For this model the difference in log(RSS) between this # this model and the j-variable model plus its current penalty is recorded in out. # The value of diff(j) that allows the specified EER to be achieved # is estimated (newc) and the current list of penalties is updated. if(d1 > 2){ out <- lRSS[d1, ] - apply((lRSS[-d1, ] + c(cs, 0)), 2, min) newc <- quantile(out, 1 - eer) cs <- c(cs + newc, newc) } } cs <- round(c(0, cs[length(cs):1]), rnd) attributes(cs) <- NULL return(cs) }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/EERpenalties.R
FDRpenalties <- function(n, k = n - 1, m = min(n - 2, k), fdr = .1, reps = 50000, rnd = 3){ # Checks on conditions that must be satisfied if(!isTRUE(all.equal(n%%4,0))) stop("n must be a multiple of 4") if(!isTRUE(all.equal(k%%1,0))) stop("k must be an integer") if(!isTRUE(all.equal(m%%1,0))) stop("m must be an integer") if(k > n - 1) stop("k cannot be greater than n-1") if(k < 1) stop("k cannot be less than 1") if(m > k) stop("m cannot be greater than k") if(m < 1) stop("m cannot be less than 1") if(m > (n-2)) stop("m cannot be greater than n-2") if(m > k) stop("m cannot be greater than k") if(fdr <= 0) stop("fdr must be greater than 0") if(fdr >= 1) stop("fdr must be less than 1") if (fdr > 1/m) warning("fdr > 1/m which results in some penalties being equal") # Algorythm callculates differences between penalties # starting with diff(m-1) = pen(m) - pen(m-1) cs <- NULL startj <- m - 1 # If fdr >= 1 then diff(j)=0 for sufficiently large j if((fdr * m) >= 1){ num <- floor(fdr * m) cs <- rep(0, num) startj <- m - 1 - num } # Loop that estimates diff(j) for j = starj, startj-1, ... 1, 0. # Estimate of diff(j) is based on assuming j large active effects. for(j in startj:0){ # Create matrix of squared random N(0,1) observations. # Number of columns is n-1-j which equals inactive columns (k-j) plus unused columns (n-1-k). sqres <- matrix(rnorm(reps * (n - 1 - j)) ^ 2, reps, n - 1 - j) # If there is more than one inactive column (k!=j+1) then sort entries for inactive columns. # Inactive columns are the last (k-j) columns. if((n - k) != (n - 1 - j)) sqres[, (n - k):(n - 1 - j)] <- t(apply(sqres[ ,(n - k):(n - 1 - j)], 1, sort)) # Find RSS for models containing just the j active effects, the j-effect model + 1, ... the j-effect model + m-j. lRSS <- log(apply(sqres, 1, cumsum)[(n - m - 1):(n - 1 - j), ]) d1<-dim(lRSS)[1] # If d1==2 then m = j+1. In this case at most one variable is being added. # The differences in log(RSS) are found and the relevant quantile taken to estimate diff(j). if(d1 == 2){ out <- lRSS[2, ]- lRSS[1, ] cs <- as.numeric(quantile(out, 1 - (fdr * m))) } # If m> j+1 then the maximum number of additional variables is >=2. # The models that add >=1 variable are compared and the one that will minimize # APC* identified. For this model the difference in log(RSS) between this # this model and the j-variable model plus its current penalty is recorded in out # and the number of additional variables in wts. The number of allowable errors are # calculated (toterrs) and the value of diff(j) that allows this to be achieved # is estimated (newc) and the current list of penalties is updated. if(d1 > 2){ out <- lRSS[d1, ] - apply((lRSS[-d1, ] + c(cs, 0)), 2, min) wts <- d1 - apply((lRSS[-d1, ] + c(cs, 0)), 2, order)[1, ] wts <- wts / (wts + j) ord <- order(out, decreasing = TRUE) oout <- out[ord] owts <- cumsum(wts[ord]) toterrs <- reps * fdr newc <- min(oout[owts <= toterrs]) cs <- c(cs + newc, newc) } } # The set of estimated penalties is returned. cs <- round(c(0, cs[length(cs):1]), rnd) attributes(cs) <- NULL return(cs) }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/FDRpenalties.R
IERpenalties <- function(n, k = n - 1, m = min(n - 2, k), ier = .05, reps = 50000, rnd = 3){ n = as.numeric(n) k = as.numeric(k) m = as.numeric(m) ier == as.numeric(ier) # Checks on conditions that must be satisfied if(!isTRUE(all.equal(n%%4,0))) stop("n must be a multiple of 4") if(!isTRUE(all.equal(k%%1,0))) stop("k must be an integer") if(!isTRUE(all.equal(m%%1,0))) stop("m must be an integer") if(k > n - 1) stop("k cannot be greater than n-1") if(k < 1) stop("k cannot be less than 1") if(m > k) stop("m cannot be greater than k") if(m < 1) stop("m cannot be less than 1") if(m > (n - 2)) stop("m cannot be greater than n-2") if(ier <= 0) stop("IER must be greater than 0") if(ier >= 1) stop("IER must be less than 1") if((k - m + 1) * ier >= 1) stop("(k-m+1)*ier must be less than 1") # Algorythm callculates differences between penalties # starting with diff(m-1) = pen(m) - pen(m-1) cs <- NULL startj <- m - 1 # The value of diff(m-1) can be calculated analytically under the condition being tested. if(qf((1 - ier), 1, (n - 1 - m)) > n - 1 - m){ cs <- log(qf((1 - ier), 1,(n - 1 - m)) / (n - 1 - m) + 1) startj <- m - 2 } # Stop if m=1 and diff(0) was calculated above. if(startj < 0){ cs <- round(c(0, cs), rnd) return(cs) } # Loop that estimates diff(j) for j = starj, startj-1, ... 1, 0. # Estimate of diff(j) is based on assuming j large active effects. for(j in startj:0){ # Create matrix of squared random N(0,1) observations. # Number of columns is n-1-j which equals inactive columns (k-j) plus unused columns (n-1-k). sqres <- matrix(rnorm(reps * (n - 1 - j)) ^ 2, reps, n - 1 - j) # If there is more than one inactive column (k!=j+1) then sort entries for inactive columns. # Inactive columns are the last (k-j) columns. if((n - k) != (n - 1 - j)) sqres[ , (n - k):(n - 1 - j)] <- t(apply(sqres[ , (n - k):(n - 1 - j)], 1, sort)) # Find RSS for models containing just the j active effects, the j-effect model + 1, ... the j-effect model + m-j. lRSS <- log(apply(sqres, 1, cumsum)[(n - m - 1):(n - 1 - j), ]) d1<-dim(lRSS)[1] # If d1==2 then m = j+1. In this case at most one variable is being added. # The differences in log(RSS) are found and the relevant quantile taken to estimate diff(j). if(d1 == 2){ out <- lRSS[2, ] - lRSS[1, ] cs <- as.numeric(quantile(out, 1 - ier * (k - j))) } # If m> j+1 then the maximum number of additional variables is >=2. # The models that add >=1 variable are compared and the one that will minimize # APC* identified. For this model the difference in log(RSS) between this # this model and the j-variable model plus its current penalty is recorded in out # and the number of additional variables in wts. The number of allowable errors are # calculated (toterrs) and the value of diff(j) that allows this to be achieved # is estimated (newc) and the current list of penalties is updated. if(d1 > 2){ out <- lRSS[d1, ]- apply((lRSS[-d1, ]+c(cs, 0)), 2, min) wts <- d1 - apply((lRSS[-d1, ] + c(cs, 0)), 2, order)[1, ] ord <- order(out, decreasing = TRUE) oout <- out[ord] owts <- cumsum(wts[ord]) toterrs <- reps * ier * (k - j) newc <- min(oout[owts <= toterrs]) cs <- c(cs + newc, newc) } } # The set of estimated penalties is returned. cs <- round(c(0, cs[length(cs):1]), rnd) attributes(cs) <- NULL return(cs) }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/IERpenalties.R
apc <- function(y, x, maxsize, level = 0.05, method = 1, data = NULL, effnames = NULL, reps = 50000, dp = 4){ if(is.element("formula", class(y))){ formula <- y rm(y) options(contrasts=c('contr.sum','contr.poly')) mod <- model.frame(formula, data = data) x <- model.matrix(formula, data = data)[ , -1] y <- model.extract(mod, "response") } m <- as.integer(maxsize) method <- as.integer(method) nr <- nrow(x) k <- ncol(x) xtx<-t(x)%*%x if(!isTRUE(all.equal(xtx[lower.tri(xtx)],(rep(0,((k*(k-1))/2)))))) stop("model matrix is not orthogonal") effnames <- colnames(x) if (is.null(effnames)) effnames <- paste("C", 1:nr, sep = "") if (method == 1) cs <- IERpenalties(n=nr, k = k, m = m, ier = level, reps = reps) if (method == 2) cs <- EERpenalties(n=nr, k = k, m = m, eer = level, reps = reps) if (method == 3) cs <- FDRpenalties(n=nr, k = k, m = m, fdr = level, reps = reps) # cts <- cs[(m+1):1] bhat <- lm(y ~ x)$coefficients rss0 <- sum((lm(y ~ 1)$residuals) ^ 2) rssE <- (bhat[-1] ^ 2) * diag(xtx) ord.rssE <- order(rssE, decreasing = TRUE) Ests <- bhat[c(1, 1 + ord.rssE)] Col <- as.integer(c(0, ord.rssE)) APC <- c(log(c(rss0,rss0-cumsum(rssE[ord.rssE])[1:m])) + cs,rep(NA,k-m)) K <- as.integer(which.min(APC)) Active <- rep(" ", m + 1) Active[1:K] <- "* " apc <- APC[K] Results <- data.frame(Model = c("intercept",paste("+", effnames[ord.rssE[1:m]])), Size = 0:m, RSS = round(c(rss0, rss0 - cumsum(rssE[ord.rssE])[1:m]), dp), APC = round(APC[1:(m + 1)], dp), Active = Active) effnames.ord <- effnames[ord.rssE] if(isTRUE(identical(K,as.integer(1)))) active <- "none" else active <- paste(sort(effnames.ord[1:(K-1)]), collapse=", ") if(isTRUE(identical(K,as.integer(k+1)))) nonactive <- "none" else nonactive <- paste(sort(effnames.ord[K:length(effnames)]), collapse=", ") out <- list(Results = Results, Penalties = cs, Level = level, ErrorType = c("IER","EER","FDR")[method], k = k, maxsize = maxsize, apc = apc, Ests = Ests, ActEffs = active, NonActEffs = nonactive) class(out) = "apc" return(out) }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/apc.R
plot.apc <- function(x, elabs = TRUE, ...){ plot(c(-.5, x$m+.5), c(min(x$Results[,4],na.rm=TRUE), max(x$Results[,4],na.rm=TRUE)), type = "n", xlab = "Model size", ylab = "APC*") if (isTRUE(elabs)) text(0:x$m, x$Results[1:(x$m+1), 4], labels = names(x$Ests)[1:(x$m+1)]) else points(0:x$m, x$Results[1:(x$m+1), 4], pch = 19) }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/plot.apc.R
summary.apc <- function(object, ...){ cat("Error control: ", object$ErrorType, " at ", object$Level, "\n") print(object$Results, quote = FALSE, row.names = FALSE) cat("---", "\n") cat("Minimum APC:", object$apc, "\n") cat("Penalties:", object$Penalties, "\n") cat("Active Effects:", object$ActEffs, "\n") cat("Non-active Effects:", object$NonActEffs, "\n") }
/scratch/gouwar.j/cran-all/cranData/APCanalysis/R/summary.apc.R
#' Create a summary table for multiple estimated GAM models #' #' Create a table to summarize the overall effect strengths of the age, period #' and cohort effects for models fitted with \code{\link[mgcv]{gam}} or #' \code{\link[mgcv]{bam}}. The output format can be adjusted by passing #' arguments to \code{\link[knitr]{kable}} via the \code{...} argument. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect. #' #' @inheritParams plot_jointMarginalAPCeffects #' @param digits Number of digits for numeric columns. Defaults to 2. #' @param apc_range Optional list with one or multiple elements with names #' \code{"age","period","cohort"} to filter the data. Each element should #' contain a numeric vector of values for the respective variable that should #' be kept in the data. All other values are deleted before producing the table. #' @param ... Optional additional arguments passed to \code{\link[knitr]{kable}}. #' #' @return Table created with \code{\link[knitr]{kable}}. #' #' @import checkmate dplyr #' @importFrom knitr kable #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' #' # create the summary table for one model #' model_pure <- gam(mainTrip_distance ~ te(age, period), data = travel) #' create_APCsummary(model_pure, dat = travel) #' #' # create the summary table for multiple models #' model_cov <- gam(mainTrip_distance ~ te(age, period) + s(household_income), #' data = travel) #' model_list <- list("pure model" = model_pure, #' "covariate model" = model_cov) #' create_APCsummary(model_list, dat = travel) #' create_APCsummary <- function(model_list, dat, digits = 2, apc_range = NULL, ...) { checkmate::assert_choice(class(model_list)[1], choices = c("list","gam")) if (class(model_list)[1] == "list") { checkmate::assert_list(model_list, types = "gam") } checkmate::assert_data_frame(dat) checkmate::assert_number(digits, lower = 0) checkmate::assert_list(apc_range, types = "numeric", max.len = 3, null.ok = TRUE, any.missing = FALSE) checkmate::assert_subset(names(apc_range), choices = c("age","period","cohort")) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 model <- NULL # reformat 'model_list' to a list, if only one model object was specified if (class(model_list)[1] == "gam") { model_list <- list(model_list) } # retrieve model labels if (!is.null(names(model_list))) { model_labels <- names(model_list) } else { model_labels <- paste("model", 1:length(model_list)) } # create the summary table tab <- lapply(1:length(model_list), function(i) { create_oneAPCsummaryTable(model_list[[i]], dat, apc_range) %>% mutate(model = model_labels[i]) %>% select(model, everything()) }) %>% dplyr::bind_rows() # remove the 'model' column if only one model is in the table if (length(model_list) == 1) { tab <- tab %>% select(-model) } return(knitr::kable(tab, digits = digits, ...)) } #' Internal helper to create a summary table for one estimated GAM model #' #' Internal helper function to be called in \code{\link{create_APCsummary}}. #' This function creates the summary table for one model estimated with #' \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' #' @inheritParams plot_APCheatmap #' @inheritParams create_APCsummary #' @return \code{data.frame} containing aggregated information on the #' individual effects. #' #' @import checkmate dplyr #' create_oneAPCsummaryTable <- function(model, dat, apc_range = NULL) { checkmate::assert_class(model, classes = "gam") checkmate::assert_data_frame(dat) checkmate::assert_list(apc_range, types = "numeric", max.len = 3, null.ok = TRUE, any.missing = FALSE) checkmate::assert_subset(names(apc_range), choices = c("age","period","cohort")) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 value <- max_effect <- min_effect <- NULL # retrieve datasets with the marginal effects dat_list <- plot_marginalAPCeffects(model, dat, return_plotData = TRUE) used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) vars <- c("age","period","cohort") summary_tab <- lapply(vars, function(var) { dat_var <- dat_list[[paste0("dat_",var)]] if (var %in% names(apc_range)) { # filter the dataset dat_var <- dat_var %>% filter(value %in% apc_range[[var]]) } tab <- data.frame(effect = var, value_withMaxEffect = dat_var$value[which.max(dat_var$effect)], value_withMinEffect = dat_var$value[which.min(dat_var$effect)], max_effect = max(dat_var$effect), min_effect = min(dat_var$effect)) if (!used_logLink) { # identity link tab <- tab %>% mutate(difference = max_effect - min_effect) } else { # log or logit link tab <- tab %>% mutate(ratio = max_effect / min_effect) } return(tab) }) %>% dplyr::bind_rows() return(summary_tab) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/create_APCsummary.R
#' Create model summary tables for multiple estimated GAM models #' #' Create publication-ready summary tables of all linear and nonlinear effects #' for models fitted with \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' The output format of the tables can be adjusted by passing arguments to #' \code{\link[knitr]{kable}} via the \code{...} argument. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effects. #' #' The table for linear coefficients includes the estimated coefficient #' (\code{coef}), the corresponding standard error (\code{se}), lower and upper #' limits of 95% confidence intervals (\code{CI_lower}, \code{CI_upper}) and #' the p-values for all coefficients apart from the intercept. #' #' The table for nonlinear coefficients include the estimated degrees of freedom #' (\code{edf}) and the p-value for each estimate. #' #' @param model_list list of APC models #' @param digits number of displayed digits #' @param ... additional arguments to \code{\link[knitr]{kable}} #' @inheritParams extract_summary_linearEffects #' #' @return List of tables created with \code{\link[knitr]{kable}}. #' #' @import checkmate dplyr #' @importFrom mgcv summary.gam #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' model <- gam(mainTrip_distance ~ te(age, period) + residence_region + #' household_size + s(household_income), data = travel) #' #' create_modelSummary(list(model), dat = travel) #' create_modelSummary <- function(model_list, digits = 2, method_expTransform = "simple", ...) { checkmate::assert_list(model_list, types = "gam") checkmate::assert_number(digits, lower = 0) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 model <- param <- edf <- pvalue <- NULL # retrieve model labels if (!is.null(names(model_list))) { model_labels <- names(model_list) } else { model_labels <- paste("model", 1:length(model_list)) } # create the summary table for all linear effects tab_linear <- lapply(1:length(model_list), function(i) { extract_summary_linearEffects(model_list[[i]], method_expTransform = method_expTransform) %>% mutate(model = model_labels[i]) %>% select(model, everything()) %>% mutate(pvalue = case_when(param == "(Intercept)" ~ "-", pvalue < 0.0001 ~ "<.0001", TRUE ~ as.character(round(pvalue, 4)))) }) %>% dplyr::bind_rows() # create the summary table for all nonlinear effects tab_nonlinear <- lapply(1:length(model_list), function(i) { tab_raw <- mgcv::summary.gam(model_list[[i]])$s.table %>% as.data.frame() tab <- tab_raw %>% mutate(param = row.names(tab_raw)) %>% dplyr::rename(pvalue = "p-value") %>% mutate(model = model_labels[i]) %>% select(model, param, edf, pvalue) %>% mutate(pvalue = case_when(pvalue < 0.0001 ~ "<.0001", TRUE ~ as.character(round(pvalue, 4)))) row.names(tab) <- NULL return(tab) }) %>% dplyr::bind_rows() tab_list <- list(knitr::kable(tab_linear, digits = digits, ...), knitr::kable(tab_nonlinear, digits = digits, ...)) return(tab_list) } #' Internal helper to extract summary of linear effects in a gam model #' #' Internal helper function to create a \code{data.frame} containing the linear #' effects summary of a model fitted with \code{\link[mgcv]{gam}} or #' \code{\link[mgcv]{bam}}. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect, #' see argument \code{method_expTransform}. #' #' @param model Model fitted with \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' @param method_expTransform One of \code{c("simple","delta")}, stating if #' standard errors and confidence interval limits should be transformed by #' a simple exp transformation or using the delta method. The delta method can #' be unstable in situations and lead to negative confidence interval limits. #' Only used when the model was estimated with a log or logit link. #' #' @import checkmate dplyr #' @importFrom mgcv summary.gam #' extract_summary_linearEffects <- function(model, method_expTransform = "simple") { checkmate::assert_class(model, classes = "gam") checkmate::assert_choice(method_expTransform, choices = c("simple","delta")) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 param <- coef <- se <- coef_exp <- se_exp <- CI_lower <- CI_upper <- CI_lower_exp <- CI_upper_exp <- pvalue <- NULL used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) x <- mgcv::summary.gam(model)$p.table dat <- data.frame(param = row.names(x), coef = unname(x[,1]), se = unname(x[,2]), CI_lower = unname(x[,1] - qnorm(0.975) * x[,2]), CI_upper = unname(x[,1] + qnorm(0.975) * x[,2]), pvalue = unname(x[,4]), stringsAsFactors = FALSE) %>% mutate(param = factor(param, levels = row.names(x))) if (used_logLink) { if (method_expTransform == "simple") { dat <- dat %>% mutate(coef_exp = exp(coef), se_exp = exp(se), CI_lower_exp = exp(CI_lower), CI_upper_exp = exp(CI_upper)) %>% select(param, coef, se, CI_lower, CI_upper, coef_exp, se_exp, CI_lower_exp, CI_upper_exp, pvalue) } else { # method_expTransform == "delta" # confidence intervals on exp scale are computed based on delta method dat <- dat %>% mutate(coef_exp = exp(coef), se_exp = sqrt(se^2 * exp(coef)^2)) %>% mutate(CI_lower_exp = coef_exp - qnorm(0.975) * se_exp, CI_upper_exp = coef_exp + qnorm(0.975) * se_exp) %>% select(param, coef, se, CI_lower, CI_upper, coef_exp, se_exp, CI_lower_exp, CI_upper_exp, pvalue) } } return(dat) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/create_modelSummary.R
#' Data from the German Reiseanalyse survey #' #' This dataset from the #' \href{https://reiseanalyse.de/en/}{Reiseanalyse survey} comprises travel #' information on German travelers between 1971 and 2018. Data were collected #' in a yearly repeated cross-sectional survey of German pleasure travels, #' based on a sample representative for the (West) German citizens (until 2009) #' or for all German-speaking residents (starting 2010). Travelers from former #' East Germany are only included since 1990. Note that the sample only contains #' trips with at least five days of trip length. For details see #' \href{https://journals.sagepub.com/doi/10.1177/1354816620987198}{Weigert et al. (2021)}. #' #' The data are a 10% random sample of all respondents who undertook at least #' one trip in the respective year, between 1971 and 2018. We thank the #' \href{https://reiseanalyse.de/en/}{Forschungsgemeinschaft Urlaub und Reisen e.V.} #' for allowing us to publish this sample. #' #' @docType data #' #' @usage data(travel) #' #' @format A dataframe containing #' \describe{ #' \item{period}{Year in which the respondent traveled.} #' \item{age}{Age of the respondent.} #' \item{sampling_weight}{Individual weight of each respondent to account for #' a not perfectly representative sample and project the sample results to #' the population of German citizens (until 2009) or of German-speaking #' residents (starting 2010). Only available since 1974.} #' \item{german_citizenship}{Indicator if the respondent is German citizen or #' not. Only available since 2010. Until 2009, all respondents were German #' citizens.} #' \item{residence_region}{Indicator if the respondent's main residence is in a #' federal state in the former area of West Germany or in the former area of #' East Germany.} #' \item{household size}{Categorized size of the respondent's household.} #' \item{household_income}{Joint income (in €) of the respondent's household.} #' \item{mainTrip_duration}{Categorized trip length of the respondent's #' \emph{main trip}. The main trip is the trip which the respondent stated was #' his/her most important trip in the respective year.} #' \item{mainTrip_distance}{Distance (in km) between the center of the #' respondent's federal state and the center of the country of destination, #' for the \emph{main trip}. The main trip is the trip which the respondent #' stated was his/her most important trip in the respective year.} #' } #' #' @references Weigert, M., Bauer, A., Gernert, J., Karl, M., Nalmpatian, A., Küchenhoff, #' H., and Schmude, J. (2021). Semiparametric APC analysis of destination choice #' patterns: Using generalized additive models to quantify the impact of age, #' period, and cohort on travel distances. \emph{Tourism Economics}. #' \doi{10.1177/1354816620987198}. #' #' Forschungsgemeinschaft Urlaub und Reisen e.V. (FUR) (2020b) \emph{Survey of #' tourist demand in Germany for holiday travel and short breaks}. Available at: #' \href{https://reiseanalyse.de/wp-content/uploads/2022/11/RA2020_First-results_EN.pdf}{https://reiseanalyse.de/wp-content/uploads/2022/11/RA2020_First-results_EN.pdf} #' (accessed 13 January 2023). #' #' @keywords datasets #' "travel" #' Drug deaths of white men in the United States #' #' Dataset on the number of unintentional drug overdose deaths in the United #' States for each age group between 1999 and 2019, retrieved from the CDC #' WONDER Online Database. The data only cover white men. #' #' The data were exported from the CDC WONDER Online Database (see link in #' references down below), based on the following settings: #' * Group by \emph{Year} and by \emph{Single-Year Ages} #' * Demographics: Gender \emph{Male}; Ethnicity \emph{White} #' * Cause of death: \emph{Drug / Alcohol Induced Causes}. Then select the more #' specific category \emph{Drug poisonings (overdose) Unintentional (X40-X44)}. #' #' @docType data #' #' @usage data(drug_deaths) #' #' @format A dataframe containing #' \describe{ #' \item{period}{Calendar year} #' \item{age}{Age group.} #' \item{deaths}{Number of observed unintentional drug overdose deaths in the #' respective age group and calendar year.} #' \item{population}{Number of white men in the respective age group and #' calendar year in the U.S. population.} #' \item{mortality_rate}{Drug overdose mortality rate for the respective age #' group and calendar year, reported as the number of deaths per 100,000 #' people. Calculated as \code{100000 * deaths / population}.} #' } #' #' @references #' Jalal, H., & Burke, D. S. (2020). Hexamaps for Age-Period-Cohort Data #' Visualization and Implementation in R. \emph{Epidemiology (Cambridge, Mass.)}, #' 31(6), e47. #' \doi{10.1097/EDE.0000000000001236}. #' #' Centers for Disease Control and Prevention, National Center for Health #' Statistics. Underlying Cause of Death 1999-2019 on CDC WONDER Online Database, #' released in 2020. Data are from the Multiple Cause of Death Files, 1999-2019, #' as compiled from data provided by the 57 vital statistics jurisdictions #' through the Vital Statistics Cooperative Program. Accessed at #' \href{http://wonder.cdc.gov/ucd-icd10.html}{wonder.cdc.gov/ucd-icd10.html} #' on 6 December 2021. #' #' @keywords datasets #' "drug_deaths"
/scratch/gouwar.j/cran-all/cranData/APCtools/R/data.R
#' Heatmap of an APC surface #' #' Plot the heatmap of an APC structure. The function can be used in two ways: #' Either to plot the observed mean structure of a metric variable, by #' specifying \code{dat} and the variable \code{y_var}, or by specifying #' \code{dat} and the \code{model} object, to plot some mean structure #' represented by an estimated two-dimensional tensor product surface. The model #' must be estimated with \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' #' See also \code{\link{plot_APChexamap}} to plot a hexagonal heatmap with #' adapted axes. #' #' If the plot is created based on the \code{model} object and the model was #' estimated with a log or logit link, the function automatically performs an #' exponential transformation of the effect. #' #' @param dat Dataset with columns \code{period} and \code{age}. If \code{y_var} #' is specified, the dataset must contain the respective column. If \code{model} #' is specified, the dataset must have been used for model estimation with #' \code{gam} or \code{bam}. #' @param y_var Optional character name of a metric variable to be plotted. #' @param model Optional regression model estimated with \code{\link[mgcv]{gam}} #' or \code{\link[mgcv]{bam}} to estimate a smoothed APC surface. Only used if #' \code{y_var} is not #' specified. #' @param dimensions Character vector specifying the two APC dimensions that #' should be visualized along the x-axis and y-axis. Defaults to #' \code{c("period","age")}. #' @param apc_range Optional list with one or multiple elements with names #' \code{"age","period","cohort"} to filter the data. Each element should #' contain a numeric vector of values for the respective variable that should #' be kept in the data. All other values are deleted. #' @param bin_heatmap,bin_heatmapGrid_list \code{bin_heatmap} indicates if the #' heatmap surface should be binned. Defaults to TRUE. If TRUE, the binning #' grid borders are defined by \code{bin_heatmapGrid_list}. This is a list with #' each element a numeric vector and a name out of #' \code{c("age","period","cohort")}. Can maximally have three elements. Defaults #' to NULL, where the heatmap is binned in 5 year steps along the x-axis and the #' y-axis. #' @param markLines_list Optional list that can be used to highlight the borders #' of specific age groups, time intervals or cohorts. Each element must be a #' numeric vector of values where horizontal, vertical or diagonal lines should #' be drawn (depends on which APC dimension is displayed on which axis). #' The list can maximally have three elements and must have names out of #' \code{c("age","period","cohort")}. #' @param markLines_displayLabels Optional character vector defining for which #' dimensions the lines defined through \code{markLines_list} should be marked #' by a respective label. The vector should be a subset of #' \code{c("age","period","cohort")}, or NULL to suppress all labels. #' Defaults to \code{c("age","period","cohort")}. #' @param y_var_logScale Indicator if \code{y_var} should be log10 transformed. #' Only used if \code{y_var} is specified. Defaults to FALSE. #' @param plot_CI Indicator if the confidence intervals should be plotted. #' Only used if \code{y_var} is not specified. Defaults to TRUE. #' @param legend_limits Optional numeric vector passed as argument \code{limits} #' to \code{\link[ggplot2]{scale_fill_gradient2}}. #' #' @return Plot grid created with \code{\link[ggpubr]{ggarrange}} (if #' \code{plot_CI} is TRUE) or a \code{ggplot2} object (if \code{plot_CI} is #' FALSE). #' #' @import checkmate dplyr ggplot2 #' @importFrom ggpubr ggarrange #' @importFrom mgcv predict.gam #' @export #' #' @references Weigert, M., Bauer, A., Gernert, J., Karl, M., Nalmpatian, A., #' Küchenhoff, H., and Schmude, J. (2021). Semiparametric APC analysis of #' destination choice patterns: Using generalized additive models to quantify #' the impact of age, period, and cohort on travel distances. #' \emph{Tourism Economics}. doi:10.1177/1354816620987198. #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @seealso plot_APChexamap #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' #' # variant A: plot observed mean structures #' # observed heatmap #' plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", #' bin_heatmap = FALSE, y_var_logScale = TRUE) #' #' # with binning #' plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", #' bin_heatmap = TRUE, y_var_logScale = TRUE) #' #' # variant B: plot some smoothed, estimated mean structure #' model <- gam(mainTrip_distance ~ te(age, period) + residence_region + #' household_size + s(household_income), data = travel) #' #' # plot the smooth tensor product surface #' plot_APCheatmap(dat = travel, model = model, bin_heatmap = FALSE, plot_CI = FALSE) #' #' # ... same plot including the confidence intervals #' plot_APCheatmap(dat = travel, model = model, bin_heatmap = FALSE) #' #' # the APC dimensions can be flexibly assigned to the x-axis and y-axis #' plot_APCheatmap(dat = travel, model = model, dimensions = c("age","cohort"), #' bin_heatmap = FALSE, plot_CI = FALSE) #' #' # add some reference lines #' plot_APCheatmap(dat = travel, model = model, bin_heatmap = FALSE, plot_CI = FALSE, #' markLines_list = list(cohort = c(1910,1939,1955,1980))) #' #' # default binning of the tensor product surface in 5-year-blocks #' plot_APCheatmap(dat = travel, model = model, plot_CI = FALSE) #' #' # manual binning #' manual_binning <- list(period = seq(min(travel$period, na.rm = TRUE) - 1, #' max(travel$period, na.rm = TRUE), by = 5), #' cohort = seq(min(travel$period - travel$age, na.rm = TRUE) - 1, #' max(travel$period - travel$age, na.rm = TRUE), by = 10)) #' plot_APCheatmap(dat = travel, model = model, plot_CI = FALSE, #' bin_heatmapGrid_list = manual_binning) #' plot_APCheatmap <- function(dat, y_var = NULL, model = NULL, dimensions = c("period","age"), apc_range = NULL, bin_heatmap = TRUE, bin_heatmapGrid_list = NULL, markLines_list = NULL, markLines_displayLabels = c("age","period","cohort"), y_var_logScale = FALSE, plot_CI = TRUE, legend_limits = NULL) { checkmate::assert_data_frame(dat) checkmate::assert_true(!is.null(y_var) | !is.null(model)) checkmate::assert_character(y_var, len = 1, null.ok = TRUE) checkmate::assert_choice(y_var, choices = colnames(dat), null.ok = TRUE) checkmate::assert_class(model, classes = "gam", null.ok = TRUE) checkmate::assert_character(dimensions, len = 2) checkmate::assert_subset(dimensions, choices = c("age","period","cohort")) checkmate::assert_list(apc_range, types = "numeric", max.len = 3, null.ok = TRUE, any.missing = FALSE) checkmate::assert_subset(names(apc_range), choices = c("age","period","cohort")) checkmate::assert_logical(bin_heatmap, len = 1) checkmate::assert_list(bin_heatmapGrid_list, min.len = 1, max.len = 3, types = "numeric", null.ok = TRUE) checkmate::assert_subset(names(bin_heatmapGrid_list), choices = c("age","period","cohort")) checkmate::assert_list(markLines_list, min.len = 1, max.len = 3, types = "numeric", null.ok = TRUE) checkmate::assert_subset(names(markLines_list), choices = c("age","period","cohort")) checkmate::assert_character(markLines_displayLabels, null.ok = TRUE) checkmate::assert_subset(markLines_displayLabels, choices = c("age","period","cohort")) checkmate::assert_logical(y_var_logScale, len = 1) checkmate::assert_logical(plot_CI, len = 1) checkmate::assert_numeric(legend_limits, len = 2, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 period <- age <- effect <- se <- exp_effect <- exp_se <- upper <- lower <- exp_upper <- exp_lower <- cohort <- x <- y <- plot_effect <- plot_lower <- plot_upper <- NULL if (!is.null(y_var)) { # plot observed structures dat <- dat %>% mutate(cohort = period - age) %>% dplyr::rename(effect = y_var) %>% # rename 'y_var' for easier handling filter(!is.na(effect)) plot_dat <- dat # if 'y_var' is not binned, take the average of observations with the same # age and period, to prevent overplotting if (!bin_heatmap) { plot_dat <- plot_dat %>% group_by(period, age) %>% summarize(effect = mean(effect)) %>% ungroup() } # create some variables and objects, to re-use the model-based code plot_dat <- plot_dat %>% mutate(cohort = period - age, upper = effect, lower = effect) dat_predictionGrid <- plot_dat if (y_var_logScale) { plot_dat <- plot_dat %>% mutate(effect = log10(effect)) } plot_CI <- FALSE used_logLink <- FALSE legend_title <- ifelse(!y_var_logScale, paste0("average ",y_var), paste0("average log10(",y_var,")")) y_trans <- "identity" } else { # plot smoothed, model-based structures # create a dataset for predicting the values of the APC surface grid_age <- min(dat$age, na.rm = TRUE):max(dat$age, na.rm = TRUE) grid_period <- min(dat$period, na.rm = TRUE):max(dat$period, na.rm = TRUE) dat_predictionGrid <- expand.grid(age = grid_age, period = grid_period) %>% mutate(cohort = period - age) # add random values for all further covariates in the model, # necessary for calling mgcv:::predict.gam covars <- attr(model$terms, "term.labels") covars <- covars[!(covars %in% c("age","period","cohort"))] if (length(covars) > 0) { dat_predictionGrid[,covars] <- dat[1, covars] } # create a dataset containing the estimated values of the APC surface terms_model <- sapply(model$smooth, function(x) { x$label }) terms_index_APC <- which(grepl("age", terms_model) | grepl("period", terms_model)) term_APCsurface <- terms_model[terms_index_APC] prediction <- mgcv::predict.gam(object = model, newdata = dat_predictionGrid, type = "terms", terms = term_APCsurface, se.fit = TRUE) plot_dat <- dat_predictionGrid %>% mutate(effect = as.vector(prediction$fit), se = as.vector(prediction$se.fit)) %>% mutate(effect = effect - mean(effect)) %>% mutate(lower = effect - qnorm(0.95) * se, upper = effect + qnorm(0.95) * se) used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) legend_title <- ifelse(used_logLink, "Mean exp effect", "Mean effect") y_trans <- ifelse(used_logLink, "log", "identity") if (used_logLink) { plot_dat <- plot_dat %>% mutate(exp_effect = exp(effect), exp_se = sqrt((se^2) * (exp_effect^2))) %>% mutate(exp_lower = exp_effect - qnorm(0.975) * exp_se, exp_upper = exp_effect + qnorm(0.975) * exp_se) %>% select(-effect, -se, -upper, -lower) %>% dplyr::rename(effect = exp_effect, se = exp_se, upper = exp_upper, lower = exp_lower) } } # filter the data if (!is.null(apc_range)) { if (!is.null(apc_range$age)) { plot_dat <- plot_dat %>% filter(age %in% apc_range$age) } if (!is.null(apc_range$period)) { plot_dat <- plot_dat %>% filter(period %in% apc_range$period) } if (!is.null(apc_range$cohort)) { plot_dat <- plot_dat %>% filter(cohort %in% apc_range$cohort) } } # bin the heatmap surface, if necessary if (!bin_heatmap) { # no binning plot_dat <- plot_dat %>% dplyr::rename(plot_effect = effect, plot_upper = upper, plot_lower = lower) } else { # bin the heatmap # define the binning grid, if still necessary if (is.null(bin_heatmapGrid_list)) { bin_heatmapGrid_list <- list(seq(min(dat_predictionGrid[[dimensions[1]]], na.rm = TRUE) - 1, max(dat_predictionGrid[[dimensions[1]]], na.rm = TRUE), by = 5), seq(min(dat_predictionGrid[[dimensions[2]]], na.rm = TRUE) - 1, max(dat_predictionGrid[[dimensions[2]]], na.rm = TRUE), by = 5)) names(bin_heatmapGrid_list) <- dimensions } dims_toBin <- names(bin_heatmapGrid_list) dims_catVarNames <- paste0(dims_toBin, "_cat") for (i in 1:length(dims_toBin)) { plot_dat[[dims_catVarNames[i]]] <- cut(plot_dat[[dims_toBin[i]]], breaks = bin_heatmapGrid_list[[dims_toBin[i]]]) } plot_dat <- plot_dat %>% group_by_at(vars(dims_catVarNames)) %>% mutate(plot_effect = mean(effect), plot_lower = mean(lower), plot_upper = mean(upper)) } # create variables x, y, z additional to the APC variables, for easier handling plot_dat$x <- plot_dat[[dimensions[1]]] plot_dat$y <- plot_dat[[dimensions[2]]] dim_3 <- ifelse(!("age" %in% dimensions), "age", ifelse(!("period" %in% dimensions), "period", "cohort")) plot_dat$z <- plot_dat[[dim_3]] x_lab <- capitalize_firstLetter(dimensions[1]) y_lab <- capitalize_firstLetter(dimensions[2]) # overall theme gg_theme <- theme(plot.title = element_text(hjust = 0.5), legend.position = "bottom", legend.key.width = unit(1.2, "cm")) # create the base heatmap plot gg_effect <- ggplot() + geom_tile(data = plot_dat, aes(x = x, y = y, fill = plot_effect)) + ggtitle(ifelse(!is.null(y_var), "", "Effect")) + xlab(x_lab) + ylab(y_lab) + gg_theme if (!plot_CI) { # no confidence intervals to be plotted limits_color <- c(NA,NA) gg_list <- list(gg_effect) } else { # add heatmaps for the confidence interval borders to the plot limits_color <- c(min(floor(plot_dat$plot_lower * 1000) / 1000), max(ceiling(plot_dat$plot_upper * 1000) / 1000)) gg_lower <- ggplot() + geom_tile(data = plot_dat, aes(x = x, y = y, fill = plot_lower)) + ggtitle("Lower 95% CI boundary") + xlab(x_lab) + gg_theme + theme(axis.title.y = element_blank()) gg_upper <- ggplot() + geom_tile(data = plot_dat, aes(x = x, y = y, fill = plot_upper)) + ggtitle("Upper 95% CI boundary") + xlab(x_lab) + gg_theme + theme(axis.title.y = element_blank()) gg_list <- list(gg_effect, gg_lower, gg_upper) } # color scale scale_midpoint <- ifelse(!is.null(model), 0, mean(plot_dat$plot_effect)) gg_list <- lapply(gg_list, function(gg) { gg + scale_fill_gradient2(legend_title, limits = legend_limits, trans = y_trans, low = "dodgerblue4", mid = "white", high = "firebrick3", midpoint = scale_midpoint) }) # mark some age groups / periods / cohorts in each plot if (!is.null(markLines_list)) { gg_list <- gg_addReferenceLines(gg_list = gg_list, dimensions = dimensions, plot_dat = plot_dat, markLines_list = markLines_list, markLines_displayLabels = markLines_displayLabels) } # create final plot output if (!plot_CI) { plot <- gg_list[[1]] + theme(plot.title = element_blank()) } else { plot <- ggpubr::ggarrange(plotlist = gg_list, legend = "bottom", common.legend = TRUE, ncol = 3, widths = c(.34,.32,.32)) } return(plot) } #' Internal helper to add reference lines in an APC heatmap #' #' Internal helper function to add reference lines in an APC heatmap #' (vertically, horizontally or diagonally). The function takes an existing list #' of ggplot objects, adds the specified reference lines in each plot and #' returns the edited ggplot list again. To be called from within #' \code{\link{plot_APCheatmap}}. #' #' @inheritParams plot_APCheatmap #' @param gg_list Existing list of ggplot objects where the reference lines #' should be marked in each individual ggplot. #' @param plot_dat Dataset used for creating the heatmap. #' #' @import dplyr ggplot2 #' gg_addReferenceLines <- function(gg_list, dimensions, plot_dat, markLines_list, markLines_displayLabels) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 x <- y <- x_start <- x_end <- y_start <- y_end <- group <- NULL dim_3 <- ifelse(!("age" %in% dimensions), "age", ifelse(!("period" %in% dimensions), "period", "cohort")) # add vertical lines if (dimensions[1] %in% names(markLines_list)) { gg_list <- lapply(gg_list, function(gg) { gg + geom_vline(xintercept = markLines_list[[dimensions[1]]], col = gray(0.3), lty = 2) }) # add labels if (dimensions[1] %in% markLines_displayLabels) { dim1Labels_dat <- data.frame(x = markLines_list[[dimensions[1]]], y = max(plot_dat$y)) gg_list <- lapply(gg_list, function(gg) { gg + geom_label(data = dim1Labels_dat, aes(x = x, y = y, label = x), hjust = 0, nudge_x = 1, nudge_y = 1) }) } } # add horiztonal lines if (dimensions[2] %in% names(markLines_list)) { gg_list <- lapply(gg_list, function(gg) { gg + geom_hline(yintercept = markLines_list[[dimensions[2]]], col = gray(0.3), lty = 2) }) # add labels if (dimensions[2] %in% markLines_displayLabels) { dim2Labels_dat <- data.frame(x = max(plot_dat$x), y = markLines_list[[dimensions[2]]]) gg_list <- lapply(gg_list, function(gg) { gg + geom_label(data = dim2Labels_dat, aes(x = x, y = y, label = y), vjust = 0, nudge_x = 1, nudge_y = 1) }) } } # add diagonal lines if (dim_3 %in% names(markLines_list)) { # create a dataset for the line segments dat_segments <- lapply(markLines_list[[dim_3]], function(z) { data.frame(x_start = case_when(dim_3 == "cohort" & dimensions[1] == "period" ~ min(plot_dat$y) + z, dim_3 == "cohort" & dimensions[1] == "age" ~ min(plot_dat$y) - z, dim_3 == "period" ~ z - min(plot_dat$y), dim_3 == "age" & dimensions[1] == "cohort" ~ min(plot_dat$y) - z, dim_3 == "age" & dimensions[1] == "period" ~ min(plot_dat$y) + z), x_end = case_when(dim_3 == "cohort" & dimensions[1] == "period" ~ max(plot_dat$y) + z, dim_3 == "cohort" & dimensions[1] == "age" ~ max(plot_dat$y) - z, dim_3 == "period" ~ z - max(plot_dat$y), dim_3 == "age" & dimensions[1] == "cohort" ~ max(plot_dat$y) - z, dim_3 == "age" & dimensions[1] == "period" ~ max(plot_dat$y) + z), y_start = min(plot_dat$y), y_end = max(plot_dat$y), group = ifelse(match(z, markLines_list[[dim_3]]) == 1, paste(capitalize_firstLetter(dim_3), z), as.character(z))) }) %>% dplyr::bind_rows() # if necessary, cut each segment, s.t. it doesn't exceed the plot limits dat_segments <- ensure_segmentsInPlotRange(dat_segments, plot_dat) # add the segments to the plots gg_list <- lapply(gg_list, function(gg) { gg + geom_segment(data = dat_segments, aes(x = x_start, xend = x_end, y = y_start, yend = y_end, group = group), col = gray(0.3), lty = 2) }) # add labels if (dim_3 %in% markLines_displayLabels) { gg_list <- lapply(gg_list, function(gg) { gg + geom_label(data = dat_segments, aes(x = x_end, y = y_end, label = group)) }) } } return(gg_list) } #' Internal helper for gg_addReferenceLines to keep diagonal lines in the plot range #' #' Internal helper function to be called from within #' \code{\link{gg_addReferenceLines}}. This function takes the dataset prepared #' for adding diagonal reference lines in the plot, checks if some diagonals #' exceed the plot limits, cuts them accordingly, if necessary, and again #' returns the corrected dataset. #' #' @inheritParams gg_addReferenceLines #' @param dat_segments Dataset containing information on the diagonal reference #' lines. #' ensure_segmentsInPlotRange <- function(dat_segments, plot_dat) { x_range <- range(plot_dat$x) slopes <- (dat_segments$y_end - dat_segments$y_start) / (dat_segments$x_end - dat_segments$x_start) for (i in 1:nrow(dat_segments)) { dat_i <- dat_segments[i,] # since the lines in dat_segments sometimes have negative slope, flexibly # retrieve the start and end values from the columns x_start_var <- ifelse(dat_i$x_start < dat_i$x_end, "x_start", "x_end") y_start_var <- ifelse(dat_i$x_start < dat_i$x_end, "y_start", "y_end") x_end_var <- ifelse(dat_i$x_end > dat_i$x_start, "x_end", "x_start") y_end_var <- ifelse(dat_i$x_end > dat_i$x_start, "y_end", "y_start") # check the start of the line if (dat_i[[x_start_var]] < x_range[1]) { dat_segments[i, x_start_var] <- x_range[1] dat_segments[i, y_start_var] <- dat_i[[y_end_var]] - slopes[i] * (dat_i[[x_end_var]] - x_range[1]) } # check the end of the line if (dat_i[[x_end_var]] > x_range[2]) { dat_segments[i, x_end_var] <- x_range[2] dat_segments[i, y_end_var] <- dat_i[[y_start_var]] + slopes[i] * (x_range[2] - dat_i[[x_start_var]]) } } return(dat_segments) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_APCheatmap.R
#' Hexamap of an APC surface #' #' Plot the heatmap of an APC structure using a hexagon-based plot with adapted #' axes. In this way, the one temporal dimension that is represented by the #' diagonal structure is visually not underrepresented compared to the other two #' dimensions on the x-axis and y-axis. \cr #' The function can be used in two ways: Either to plot the observed mean #' structure of a metric variable, by specifying \code{dat} and the variable #' \code{y_var}, or by specifying \code{dat} and the \code{model} object, to #' plot some mean structure represented by an estimated two-dimensional tensor #' product surface. The model must be estimated with \code{\link[mgcv]{gam}} or #' \code{\link[mgcv]{bam}}. #' #' See also \code{\link{plot_APCheatmap}} to plot a regular heatmap. #' #' If the plot is created based on the \code{model} object and the model was #' estimated with a log or logit link, the function automatically performs an #' exponential transformation of the effect. #' #' @inheritParams plot_APCheatmap #' @param obs_interval Numeric specifying the interval width based on which the #' data is spaced. Only used if \code{y_var} is specified. Defaults to 1, i.e. #' observations each year. #' @param iso_interval Numeric specifying the interval width between the #' isolines along each axis. Defaults to 5. #' @param color_vec Optional character vector of color names, specifying the #' color continuum. #' @param color_range Optional numeric vector with two elements, specifying the #' ends of the color scale in the legend. #' @param line_width Line width of the isolines. Defaults to 0.5. #' @param line_color Character color name for the isolines. Defaults to gray. #' @param label_size Size of the labels along the axes. Defaults to 0.5. #' @param label_color Character color name for the labels along the axes. #' @param legend_title Optional character title for the legend. #' #' @return Creates a plot with base R functions (not \code{ggplot2}). #' #' @import checkmate dplyr graphics #' @importFrom grDevices colorRampPalette #' @importFrom mgcv predict.gam #' @importFrom tidyr pivot_wider #' @export #' #' @references Jalal, H., Burke, D. (2020). Hexamaps for Age–Period–Cohort #' Data Visualization and Implementation in R. #' \emph{Epidemiology}, 31 (6), e47-e49. doi: 10.1097/EDE.0000000000001236. #' #' @author Hawre Jalal \email{hjalal@@pitt.edu}, #' Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @seealso \code{\link{plot_APCheatmap}} #' #' @examples #' library(APCtools) #' library(mgcv) #' library(dplyr) #' #' data(drug_deaths) #' #' # restrict to data where the mortality rate is available #' drug_deaths <- drug_deaths %>% filter(!is.na(mortality_rate)) #' #' # hexamap of an observed structure #' plot_APChexamap(dat = drug_deaths, #' y_var = "mortality_rate", #' color_range = c(0,40)) #' #' # hexamap of a smoothed structure #' model <- gam(mortality_rate ~ te(age, period, bs = "ps", k = c(8,8)), #' data = drug_deaths) #' #' plot_APChexamap(dat = drug_deaths, model = model) #' plot_APChexamap <- function (dat, y_var = NULL, model = NULL, apc_range = NULL, y_var_logScale = FALSE, obs_interval = 1, iso_interval = 5, color_vec = NULL, color_range = NULL, line_width = .5, line_color = gray(0.5), label_size = .5, label_color = "black", legend_title = NULL) { checkmate::assert_data_frame(dat) checkmate::assert_true(!is.null(y_var) | !is.null(model)) checkmate::assert_character(y_var, len = 1, null.ok = TRUE) checkmate::assert_choice(y_var, choices = colnames(dat), null.ok = TRUE) checkmate::assert_class(model, classes = "gam", null.ok = TRUE) checkmate::assert_list(apc_range, types = "numeric", max.len = 3, null.ok = TRUE, any.missing = FALSE) checkmate::assert_subset(names(apc_range), choices = c("age","period","cohort")) checkmate::assert_logical(y_var_logScale, len = 1) checkmate::assert_numeric(obs_interval, lower = 0, len = 1) checkmate::assert_numeric(iso_interval, lower = 0, len = 1) checkmate::assert_character(color_vec, null.ok = TRUE) checkmate::assert_numeric(color_range, len = 2, null.ok = TRUE) checkmate::assert_numeric(line_width, lower = 0, len = 1) checkmate::assert_character(line_color, len = 1) checkmate::assert_numeric(label_size, lower = 0, len = 1) checkmate::assert_character(label_color, len = 1) checkmate::assert_character(legend_title, len = 1, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 period <- age <- effect <- cohort <- NULL if (!is.null(y_var)) { # plot observed structures plot_dat <- dat %>% mutate(cohort = period - age) %>% dplyr::rename(effect = y_var) %>% # rename 'y_var' for easier handling filter(!is.na(effect)) # if a period-age combination is appearing multiple times, take the average if (max(table(paste(plot_dat$period, plot_dat$age))) > 1) { plot_dat <- plot_dat %>% group_by(period, age) %>% summarize(effect = mean(effect)) %>% ungroup() } if (y_var_logScale) { plot_dat <- plot_dat %>% mutate(effect = log10(effect)) if (!is.null(color_range)) { color_range <- log10(color_range) } } used_logLink <- FALSE if (is.null(legend_title)) { legend_title <- y_var if (y_var_logScale) { legend_title <- paste0("log10(", y_var, ")") } } } else { # plot smoothed, model-based structures # create a dataset for predicting the values of the APC surface grid_age <- min(dat$age, na.rm = TRUE):max(dat$age, na.rm = TRUE) grid_period <- min(dat$period, na.rm = TRUE):max(dat$period, na.rm = TRUE) dat_predictionGrid <- expand.grid(age = grid_age, period = grid_period) %>% mutate(cohort = period - age) # add random values for all further covariates in the model, # necessary for calling mgcv:::predict.gam covars <- attr(model$terms, "term.labels") covars <- covars[!(covars %in% c("age","period","cohort"))] if (length(covars) > 0) { dat_predictionGrid[,covars] <- dat[1, covars] } # create a dataset containing the estimated values of the APC surface terms_model <- sapply(model$smooth, function(x) { x$label }) terms_index_APC <- which(grepl("age", terms_model) | grepl("period", terms_model)) term_APCsurface <- terms_model[terms_index_APC] prediction <- mgcv::predict.gam(object = model, newdata = dat_predictionGrid, type = "terms", terms = term_APCsurface, se.fit = TRUE) plot_dat <- dat_predictionGrid %>% mutate(effect = as.vector(prediction$fit)) %>% mutate(effect = effect - mean(effect)) used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) if (is.null(legend_title)) { legend_title <- ifelse(used_logLink, "Mean exp effect", "Mean effect") } if (used_logLink) { plot_dat <- plot_dat %>% mutate(effect = exp(effect)) } } # filter the dataset if (!is.null(apc_range)) { if (!is.null(apc_range$age)) { plot_dat <- plot_dat %>% filter(age %in% apc_range$age) } if (!is.null(apc_range$period)) { plot_dat <- plot_dat %>% filter(period %in% apc_range$period) } if (!is.null(apc_range$cohort)) { plot_dat <- plot_dat %>% filter(cohort %in% apc_range$cohort) } } # reformat the data to wide format mat <- plot_dat %>% select(period, age, effect) %>% tidyr::pivot_wider(id_cols = age, names_from = period, values_from = effect) %>% arrange(age) %>% select(-1) %>% as.matrix() row.names(mat) <- sort(unique(plot_dat$age)) # setting default values for missing parameters if (is.null(color_range)) { color_range <- range(mat, na.rm = TRUE) # use a symmetric color scale if the value range spans zero (or one with # log or logit link) if (!used_logLink) { if (color_range[1] < 0 & color_range[2] > 0) { color_range <- c(-1,1) * max(abs(range(mat, na.rm = TRUE))) } } if (used_logLink) { if (color_range[1] < 1 & color_range[2] > 1) { max_scale <- max(color_range, 1 / color_range) color_range <- c(1 / max_scale, max_scale) } } } if (is.null(color_vec)) { color_palette <- grDevices::colorRampPalette(c("dodgerblue4", gray(0.95), "firebrick3")) color_vec <- color_palette(100) } # end of default values nA <- 1 + diff(range(as.numeric(row.names(mat)))) nP <- 1 + diff(range(as.numeric(colnames(mat)))) first_age <- min(plot_dat$age) last_age <- first_age + (nA - 1) * obs_interval first_period <- min(plot_dat$period) last_period <- first_period + (nP - 1) * obs_interval first_cohort <- first_period - last_age last_cohort <- last_period - first_age age_isolines <- seq(from = first_age, to = last_age, by = iso_interval) period_isolines <- seq(from = first_period, to = last_period, by = iso_interval) cohort_isolines <- seq(from = first_cohort, to = last_cohort, by = iso_interval) ages <- seq(from = first_age, to = last_age, by = obs_interval) periods <- seq(from = first_period, to = last_period, by = obs_interval) cohorts <- seq(from = first_cohort, to = last_cohort, by = obs_interval) ages <- ages[ages %in% row.names(mat)] periods <- periods[periods %in% colnames(mat)] n_ages <- length(ages) n_periods <- length(periods) n_cohorts <- length(cohorts) n_age_isolines <- length(age_isolines) n_period_isolines <- length(period_isolines) n_cohort_isolines <- length(cohort_isolines) # apply the limits to the data by truncating it mat[mat < color_range[1]] <- color_range[1] mat[mat > color_range[2]] <- color_range[2] ### plotting ncol <- length(color_vec) not_nan_mat <- !is.na(mat) & !is.nan(mat) v_mat <- as.vector(mat[not_nan_mat]) # Define color sequence: if (!used_logLink) { color_seq <- seq(from = color_range[1], to = color_range[2], length.out = ncol + 1) } if (used_logLink) { color_seq <- exp(seq(from = log(color_range[1]), to = log(color_range[2]), length.out = ncol + 1)) } matc <- cut(mat[not_nan_mat], # discretize the data breaks = color_seq, include.lowest = TRUE, labels = FALSE) a <- obs_interval / sqrt(3) # radius of the hexagon (distance from center to a vertex). b <- sqrt(3)/2 * a # half height of the hexagon (distance from the center perpendicular to the middle of the top edge) yv <- c(0, b, b, 0, -b, -b, 0) xv <- c(-a, -a/2, a/2, a, a/2, -a/2, -a) # compute the center of each hexagon by creating an a*p grid for each age-period combination P0 <- matrix(periods, nrow = n_ages, ncol = n_periods, byrow = TRUE) A0 <- t(matrix(ages, nrow = n_periods, ncol = n_ages, byrow = TRUE)) # convert the grid to the X-Y Coordinate X <- compute_xCoordinate(P0) Y <- compute_yCoordinate(P0, A0) minX <- min(X) - 2*obs_interval maxX <- max(X) + 2*obs_interval minY <- min(Y) - 2*obs_interval maxY <- max(Y) + 2*obs_interval # only keep those that have non-NA values X <- X[not_nan_mat] Y <- Y[not_nan_mat] # get the color for each level color_vec2 <- color_vec[matc] Xvec <- as.vector(X) Yvec <- as.vector(Y) n_hexagons <- length(Xvec) # compute the X and Y cooridinate for each hexagon - each hexagon is a row and # each polygon point is a column Xhex <- outer(Xvec, xv, '+') Yhex <- outer(Yvec, yv, '+') # make sure to reset the plot layout when the function exits oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) # plot layout with two columns - for the plot and the colorbar layout(t(1:2), widths = c(4,1)) par(mar = c(.5,.5,.5,.5)) plot(x = NULL, y = NULL, xlim = c(minX,maxX), ylim = c(minY,maxY), axes = FALSE, frame.plot = FALSE, xaxt = 'n', yaxt = 'n', type = 'n', asp = 1) for (i in 1:n_hexagons) { polygon(x = Xhex[i,], # X-Coordinates of polygon y = Yhex[i,], # Y-Coordinates of polygon col = color_vec2[i], # Color of polygon border = NA, # Color of polygon border lwd = 1) } # age-isolines y1 <- compute_yCoordinate(first_period, age_isolines) y2 <- compute_yCoordinate(last_period + obs_interval, age_isolines) x1 <- compute_xCoordinate(first_period) x2 <- compute_xCoordinate(last_period + obs_interval) for (i in 1:n_age_isolines) { lines(x=c(x1,x2), y=c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x=x2, y=y2[i], labels = paste("A:",age_isolines[i]), col = label_color, cex = label_size, srt = -30, adj = c(0, 0.5)) } # period-isolines x <- compute_xCoordinate(period_isolines) y1 <- compute_yCoordinate(period_isolines, first_age) y2 <- compute_yCoordinate(period_isolines, last_age + obs_interval) for (i in 1:n_period_isolines) { lines(x=c(x[i], x[i]), y=c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x=x[i], y=y2[i], labels = paste("P:",period_isolines[i]), col = label_color, cex = label_size, srt = 90, adj = c(0, .5)) } # cohort-isolines (need some more processing!) # determine the periods where the cohort isolines cross the last age p_top <- cohort_isolines + last_age p_top <- p_top[p_top < last_period] n_top <- length(p_top) # and the periods where they cross the first age p_bottom <- cohort_isolines + first_age p_bottom <- p_bottom[p_bottom > first_period] n_bottom <- length(p_bottom) # and the ages where they cross the first period a_left <- first_period - cohort_isolines a_left <- a_left[a_left >= first_age] n_left <- length(a_left) # and the ages where they cross the last period a_right <- last_period - cohort_isolines a_right <- a_right[a_right <= last_age] n_right <- length(a_right) # combine the periods and ages initial and final points on the a*p coordinates # first the left-bottom edge p1 <- c(rep(first_period, n_left), p_bottom) a1 <- c(a_left, rep(first_age, n_bottom)) # then the top-right edge p2 <- c(p_top, rep(last_period, n_right)) a2 <- c(rep(last_age, n_top), a_right) # convert the a*p Coordinates to x-y coordinates x1 <- compute_xCoordinate(p1 - obs_interval) x2 <- compute_xCoordinate(p2) y1 <- compute_yCoordinate(p1 - obs_interval, a1 - obs_interval) y2 <- compute_yCoordinate(p2, a2) # finally draw the lines for (i in 1:n_cohort_isolines) { lines(x = c(x1[i], x2[i]), y = c(y1[i],y2[i]), col = line_color, lwd = line_width) text(x = x1[i], y = y1[i], labels = paste("C:",cohort_isolines[i]), col = label_color, cex = label_size, srt = 30, adj = c(1,.5)) } # create the colorbar par(las = 2) par(mar = c(10,2,10,2.5)) if (!used_logLink) { image(y = color_seq, z = t(color_seq), breaks = color_seq, col = color_vec, axes = FALSE, main = legend_title, cex.main = .8) axis(4, cex.axis = label_size, mgp = c(0,.5,0)) } if (used_logLink) { image(y = color_seq, z = t(color_seq), breaks = color_seq, col = color_vec, axes = FALSE, main = legend_title, cex.main = .8, log = "y") axis(4, cex.axis = label_size, mgp = c(0,.5,0)) } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_APChexamap.R
#' Internal helper to tilt the x-axis for the hexamap plot #' #' Internal helper function to be called in \code{\link{plot_APChexamap}}, #' to tilt the x-axis for the hexamap plot. #' #' @param period_vec Numeric vector of period values. #' compute_xCoordinate <- function(period_vec) { x <- period_vec * sqrt(3) / 2 return(x) } #' Internal helper to tilt the x-axis for the hexamap plot #' #' Internal helper function to be called in \code{\link{plot_APChexamap}}, #' to tilt the x-axis for the hexamap plot. #' #' @param period_vec Numeric vector of period values. #' @param age_vec Numeric vector of age values. #' compute_yCoordinate <- function(period_vec, age_vec){ y <- age_vec - period_vec / 2 return(y) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_APChexamap_helpers.R
#' Plot the density of one metric or categorical variable #' #' Create a density plot or a boxplot of one metric variable or a barplot #' of one categorical variable, based on a specific subset of the data. #' #' If \code{plot_density} is called internally from within #' \code{\link{plot_densityMatrix}} (i.e., if the dataset contains some of the #' columns \code{c("age_group","period_group","cohort_group")}), this function #' will calculate the metric densities individually for these groups. #' #' @param dat Dataset with columns \code{period} and \code{age} and the #' main variable specified through argument \code{y_var}. #' @param y_var Character name of the main variable to be plotted. #' @param plot_type One of \code{c("density","boxplot")}. Only used if the #' \code{y_var} column is metric. #' @param apc_range Optional list with one or multiple elements with names #' \code{"age","period","cohort"} to filter the data. Each element should #' contain a numeric vector of values for the respective variable that should #' be kept in the data. All other values are deleted. #' @param highlight_diagonals Optional internal parameter which is only #' specified when \code{plot_density} is called from within #' \code{plot_densityMatrix}. See \code{\link{plot_densityMatrix}} for details. #' @param y_var_cat_breaks Optional numeric vector of breaks to categorize #' \code{y_var} based on calling function \code{\link{cut}}. Only used to #' highlight the categories based on different colors. And only used if the #' \code{y_var} column is numeric. #' @param y_var_cat_labels Optional character vector for the names of the #' categories that were defined based on \code{y_var_cat_breaks}. The length of #' this vector must be one shorter than \code{length(y_var_cat_breaks)}. Only #' used if the \code{y_var} column is numeric. #' @param weights_var Optional character name of a weights variable used to #' project the results in the sample to some population. #' @param log_scale Indicator if the main variable should be log10 transformed. #' Only used if the \code{y_var} column is numeric. Defaults to FALSE. #' @param xlab,ylab,legend_title Optional plot annotations. #' @param ... Additional arguments passed to \code{\link[stats]{density}}. #' #' @return ggplot object #' #' @import checkmate dplyr #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' data(travel) #' #' plot_density(dat = travel, y_var = "mainTrip_distance") #' #' plot_density(dat = travel, y_var = "mainTrip_distance") #' plot_density <- function(dat, y_var, plot_type = "density", apc_range = NULL, highlight_diagonals = NULL, y_var_cat_breaks = NULL, y_var_cat_labels = NULL, weights_var = NULL, log_scale = FALSE, xlab = NULL, ylab = NULL, legend_title = NULL, ...) { checkmate::assert_data_frame(dat) checkmate::assert_character(y_var, len = 1) checkmate::assert_choice(plot_type, choices = c("density","boxplot")) checkmate::assert_list(apc_range, types = "numeric", max.len = 3, null.ok = TRUE, any.missing = FALSE) checkmate::assert_subset(names(apc_range), choices = c("age","period","cohort")) checkmate::assert_list(highlight_diagonals, types = "numeric", null.ok = TRUE) checkmate::assert_numeric(y_var_cat_breaks, null.ok = TRUE) checkmate::assert_character(y_var_cat_labels, len = length(y_var_cat_breaks) - 1, null.ok = TRUE) checkmate::assert_character(weights_var, max.len = 1, null.ok = TRUE) checkmate::assert_logical(log_scale, len = 1) checkmate::assert_character(xlab, len = 1, null.ok = TRUE) checkmate::assert_character(ylab, len = 1, null.ok = TRUE) checkmate::assert_character(legend_title, max.len = 1, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 age <- period <- cohort <- NULL dat$cohort <- dat$period - dat$age # remove NA measurements if (any(is.na(dat[[y_var]]))) { message("Excluding ",sum(is.na(dat[[y_var]])), " missing observations of ",y_var,"...") dat <- dat[!is.na(dat[[y_var]]),] } # filter the dataset if (!is.null(apc_range)) { if (!is.null(apc_range$age)) { dat <- dat %>% filter(age %in% apc_range$age) } if (!is.null(apc_range$period)) { dat <- dat %>% filter(period %in% apc_range$period) } if (!is.null(apc_range$cohort)) { dat <- dat %>% filter(cohort %in% apc_range$cohort) } } # create a dataset to highlight specific diagonals if (!is.null(highlight_diagonals)) { dat_diag <- create_highlightDiagonalData(dat, highlight_diagonals) } else { dat_diag <- NULL } # main plot if (is.numeric(dat[[y_var]])) { # metric variable gg <- plot_density_metric(dat = dat, y_var = y_var, plot_type = plot_type, dat_highlightDiagonals = dat_diag, y_var_cat_breaks = y_var_cat_breaks, y_var_cat_labels = y_var_cat_labels, weights_var = weights_var, log_scale = log_scale, xlab = xlab, ylab = ylab, legend_title = legend_title, ...) } else { # categorical variable gg <- plot_density_categorical(dat = dat, y_var = y_var, dat_highlightDiagonals = dat_diag, weights_var = weights_var, xlab = xlab, ylab = ylab, ...) } return(gg) } #' Internal helper to plot a metric density #' #' Internal helper function to plot one metric density, to be called from within #' \code{\link{plot_density}}. #' #' @inheritParams plot_density #' @param dat_highlightDiagonals Optional dataset created by #' \code{\link{create_highlightDiagonalData}} to highlight specific diagonals #' in a density matrix. #' #' @import dplyr ggplot2 #' plot_density_metric <- function(dat, y_var, plot_type = "density", dat_highlightDiagonals = NULL, y_var_cat_breaks = NULL, y_var_cat_labels = NULL, weights_var = NULL, log_scale = FALSE, xlab = NULL, ylab = NULL, legend_title = NULL, ...) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 x <- y <- x_cat <- weight <- NULL # delete potential NA's from y_var, since these mess with stats::density() dat <- dat[!is.na(dat[[y_var]]),] # log10 transform the main variable, and create a function to accordingly # adjust the labels on the x axis (the function is passed to scale_x_continuous()) if (log_scale) { dat[[y_var]] <- log10(dat[[y_var]]) if (!is.null(y_var_cat_breaks)) { y_var_cat_breaks <- log10(y_var_cat_breaks) } label_function <- function(x) { paste0("10^",x) } } else { # no log transformation label_function <- function(x) { x } # identity function } # general plot preparations if (is.null(xlab)) { xlab <- ifelse(!log_scale, y_var, paste(y_var, "on log10 scale")) } if (is.null(ylab)) { ylab <- "Density" } # base plot gg <- ggplot() if (!is.null(dat_highlightDiagonals)) { gg <- gg_highlightDiagonals(gg, dat, dat_highlightDiagonals) } # final plot type-specific preparations if (plot_type == "density") { dat_dens <- calc_density(dat = dat, y_var = y_var, weights_var = weights_var, ...) # categorize y_var if (!is.null(y_var_cat_breaks)) { dat_dens <- dat_dens %>% mutate(x_cat = cut(x, breaks = y_var_cat_breaks, labels = y_var_cat_labels, dig.lab = 6)) } # final plot preparations xlim <- range(dat_dens$x) # main plot gg <- gg + geom_line(data = dat_dens, aes(x = x, y = y), col = gray(0.4)) if (!is.null(y_var_cat_breaks)) { gg <- gg + geom_ribbon(data = dat_dens, aes(x = x, ymin = 0, ymax = y, fill = x_cat)) + scale_fill_brewer(palette = "Blues", direction = -1) } else { gg <- gg + geom_ribbon(data = dat_dens, aes(x = x, ymin = 0, ymax = y), fill = gray(0.4)) } gg <- gg + ylab(ylab) + labs(fill = legend_title) + scale_x_continuous(xlab, labels = label_function, limits = xlim, guide = guide_axis(check.overlap = TRUE)) + theme(legend.position = "bottom") } else { # plot_type == "boxplot" # preparations of the weights if (!is.null(weights_var)) { dat <- dat %>% dplyr::rename(weight = weights_var) } else { dat$weight <- 1 } # rename the main variable for easier handling dat <- dat %>% dplyr::rename(x = y_var) # main plot gg <- gg + geom_boxplot(data = dat, aes(x = x, weight = weight), col = gray(0.3), outlier.color = gray(0.3), outlier.alpha = 0.2) + scale_x_continuous(xlab, labels = label_function, guide = guide_axis(check.overlap = TRUE)) + ylim(c(-1,1)) } # general theme gg <- gg + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) return(gg) } #' Internal helper to plot a categorical density #' #' Internal helper function to plot one categorical density, to be called from #' within \code{\link{plot_density}}. #' #' @inheritParams plot_density_metric #' @param xlab,ylab Optional plot annotations. #' #' @import dplyr ggplot2 #' plot_density_categorical <- function(dat, y_var, dat_highlightDiagonals = NULL, weights_var = NULL, xlab = NULL, ylab = NULL) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 x <- weight <- ..count.. <- NULL # make sure the main variable is encoded as factor dat <- dat %>% dplyr::rename(x = y_var) %>% mutate(x = factor(x)) # preparations of the weights if (!is.null(weights_var)) { dat <- dat %>% dplyr::rename(weight = weights_var) } else { dat$weight <- 1 } # final plot preparations if (is.null(xlab)) { xlab <- y_var } if (is.null(ylab)) { ylab <- "Rel. frequency" } # base plot gg <- ggplot() if (!is.null(dat_highlightDiagonals)) { gg <- gg_highlightDiagonals(gg, dat, dat_highlightDiagonals) } # main plot gg <- gg + geom_bar(data = dat, aes(x = x, y = ..count../sum(..count..), weight = weight, fill = x)) + scale_fill_brewer(y_var, palette = "Set2") + xlab(xlab) + ylab(ylab) + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) return(gg) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_density.R
#' Create a matrix of density plots #' #' This function creates a matrix of individual density plots #' (i.e., a \emph{ridgeline matrix}) or boxplots (for #' metric variables) or of individual barplots (for categorical variables). #' The age, period or cohort information can each either be plotted on the #' x-axis or the y-axis. #' #' @inheritParams plot_density #' @param dimensions Character vector specifying the two APC dimensions that #' should be visualized along the x-axis and y-axis. Defaults to #' \code{c("period","age")}. #' @param age_groups,period_groups,cohort_groups Each a list. Either containing #' purely scalar values or with each element specifying the two borders of one #' row or column in the density matrix. E.g., if the period should be visualized #' in decade columns from 1980 to 2009, specify #' \code{period_groups = list(c(1980,1989), c(1990,1999), c(2000,2009))}. #' The list can be named to specify labels for the categories. Only the two #' arguments must be passed that were specified by the \code{dimensions} #' argument. #' @param highlight_diagonals Optional list to define diagonals in the density #' that should be highlighted with different colors. Each list element should be #' a numeric vector stating the index of the diagonals (counted from the top #' left) that should be highlighted in the same color. If the list is named, the #' names are used as legend labels. #' @param legend_title Optional plot annotation. #' @param ... Additional arguments passed to \code{\link{plot_density}}. #' #' @return ggplot object #' #' @import checkmate dplyr ggplot2 #' @importFrom stats as.formula #' @export #' #' @references Weigert, M., Bauer, A., Gernert, J., Karl, M., Nalmpatian, A., #' Küchenhoff, H., and Schmude, J. (2021). Semiparametric APC analysis of #' destination choice patterns: Using generalized additive models to quantify #' the impact of age, period, and cohort on travel distances. #' \emph{Tourism Economics}. doi:10.1177/1354816620987198. #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' #' # define categorizations for the main trip distance #' dist_cat_breaks <- c(1,500,1000,2000,6000,100000) #' dist_cat_labels <- c("< 500 km","500 - 1,000 km", "1,000 - 2,000 km", #' "2,000 - 6,000 km", "> 6,000 km") #' #' age_groups <- list(c(80,89),c(70,79),c(60,69),c(50,59),c(40,49),c(30,39),c(20,29)) #' period_groups <- list(c(1970,1979),c(1980,1989),c(1990,1999),c(2000,2009),c(2010,2019)) #' cohort_groups <- list(c(1980,1989),c(1970,1979),c(1960,1969),c(1950,1959),c(1940,1949), #' c(1930,1939),c(1920,1929)) #' #' plot_densityMatrix(dat = travel, #' y_var = "mainTrip_distance", #' age_groups = age_groups, #' period_groups = period_groups, #' log_scale = TRUE) #' #' \donttest{ #' # highlight two cohorts #' plot_densityMatrix(dat = travel, #' y_var = "mainTrip_distance", #' age_groups = age_groups, #' period_groups = period_groups, #' highlight_diagonals = list(8, 10), #' log_scale = TRUE) #' #' # also mark different distance categories #' plot_densityMatrix(dat = travel, #' y_var = "mainTrip_distance", #' age_groups = age_groups, #' period_groups = period_groups, #' log_scale = TRUE, #' y_var_cat_breaks = dist_cat_breaks, #' y_var_cat_labels = dist_cat_labels, #' highlight_diagonals = list(8, 10), #' legend_title = "Distance category") #' #' # flexibly assign the APC dimensions to the x-axis and y-axis #' plot_densityMatrix(dat = travel, #' y_var = "mainTrip_distance", #' dimensions = c("period","cohort"), #' period_groups = period_groups, #' cohort_groups = cohort_groups, #' log_scale = TRUE, #' y_var_cat_breaks = dist_cat_breaks, #' y_var_cat_labels = dist_cat_labels, #' legend_title = "Distance category") #' #' # use boxplots instead of densities #' plot_densityMatrix(dat = travel, #' y_var = "mainTrip_distance", #' plot_type = "boxplot", #' age_groups = age_groups, #' period_groups = period_groups, #' log_scale = TRUE, #' highlight_diagonals = list(8, 10)) #' #' # plot categorical variables instead of metric ones #' plot_densityMatrix(dat = travel, #' y_var = "household_size", #' age_groups = age_groups, #' period_groups = period_groups, #' highlight_diagonals = list(8, 10)) #' } #' plot_densityMatrix <- function(dat, y_var, dimensions = c("period","age"), age_groups = NULL, period_groups = NULL, cohort_groups = NULL, plot_type = "density", highlight_diagonals = NULL, y_var_cat_breaks = NULL, y_var_cat_labels = NULL, weights_var = NULL, log_scale = FALSE, legend_title = NULL, ...) { checkmate::assert_data_frame(dat) checkmate::assert_character(y_var, len = 1) checkmate::assert_character(dimensions, len = 2) checkmate::assert_subset(dimensions, choices = c("age","period","cohort")) if ("age" %in% dimensions) { checkmate::assert_list(age_groups, null.ok = FALSE) } else { checkmate::assert_null(age_groups) } if ("period" %in% dimensions) { checkmate::assert_list(period_groups, null.ok = FALSE) } else { checkmate::assert_null(period_groups) } if ("cohort" %in% dimensions) { checkmate::assert_list(cohort_groups, null.ok = FALSE) } else { checkmate::assert_null(cohort_groups) } checkmate::assert_choice(plot_type, choices = c("density","boxplot")) checkmate::assert_list(highlight_diagonals, types = "numeric", null.ok = TRUE) checkmate::assert_numeric(y_var_cat_breaks, null.ok = TRUE) checkmate::assert_character(y_var_cat_labels, len = length(y_var_cat_breaks) - 1, null.ok = TRUE) checkmate::assert_character(weights_var, max.len = 1, null.ok = TRUE) checkmate::assert_logical(log_scale) checkmate::assert_character(legend_title, len = 1, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 age_group <- period_group <- cohort_group <- NULL dat$cohort <- dat$period - dat$age dimX_groups <- get(paste0(dimensions[1], "_groups")) dimY_groups <- get(paste0(dimensions[2], "_groups")) # if intervals were specified for both main dimensions: check if same size if (length(dimX_groups[[1]]) == 2 | length(dimY_groups[[1]]) == 2) { interval_sizes <- c() if (length(dimX_groups[[1]]) == 2) { interval_sizes <- sapply(dimX_groups, function(x) x[2] - x[1]) } if (length(dimY_groups[[1]]) == 2) { interval_sizes <- append(interval_sizes, sapply(dimY_groups, function(x) x[2] - x[1])) } if (length(unique(interval_sizes)) > 1) { stop("All intervals specified in '",dimensions[1],"_groups' and '", dimensions[2],"_groups' must have the sime size.") } } # if diagonals should be highlighted: calculate the diagonal labels if (!is.null(highlight_diagonals)) { operation_sign <- ifelse("period" %in% dimensions, -1, +1) for (i in 1:length(highlight_diagonals)) { d <- highlight_diagonals[[i]] dimY_refGroup <- min(c(length(dimY_groups), d)) dimX_refGroup <- ifelse(d <= length(dimY_groups), 1, d - length(dimY_groups) + 1) # case 1 for label calculation: X and Y groups are both scalar or interval if (length(dimX_groups[[1]]) == length(dimY_groups[[1]])) { # case 1: two intervals or two scalars d_label <- abs(dimX_groups[[dimX_refGroup]][1] + operation_sign * dimY_groups[[dimY_refGroup]][1]) } else { # case 2: one of X / Y is scalar, the other interval d_label <- abs(dimX_groups[[dimX_refGroup]] + operation_sign * dimY_groups[[dimY_refGroup]]) } names(highlight_diagonals)[i] <- paste(sort(d_label), collapse = " - ") } } # define the APC groups if ("age" %in% dimensions) { dat <- dat %>% mutate(age_group = create_groupVariable(dat, "age", groups_list = age_groups)) %>% filter(!is.na(age_group)) } if ("period" %in% dimensions) { dat <- dat %>% mutate(period_group = create_groupVariable(dat, "period", groups_list = period_groups)) %>% filter(!is.na(period_group)) } if ("cohort" %in% dimensions) { dat <- dat %>% mutate(cohort_group = create_groupVariable(dat, "cohort", groups_list = cohort_groups)) %>% filter(!is.na(cohort_group)) } # define axis labels and facets main_lab <- ifelse(!log_scale, y_var, paste(y_var, "on log10 scale")) x_lab <- capitalize_firstLetter(dimensions[1]) y_lab <- capitalize_firstLetter(dimensions[2]) facet_formula <- stats::as.formula(paste(paste0(dimensions[2],"_group"), "~", paste0(dimensions[1],"_group"))) legend.position <- ifelse(is.numeric(dat[[y_var]]) & is.null(highlight_diagonals), "none", "right") # create density matrix gg <- plot_density(dat = dat, y_var = y_var, plot_type = plot_type, highlight_diagonals = highlight_diagonals, y_var_cat_breaks = y_var_cat_breaks, y_var_cat_labels = y_var_cat_labels, weights_var = weights_var, log_scale = log_scale, legend_title = legend_title, ...) + facet_grid(facets = facet_formula, switch = "y") + labs(subtitle = x_lab, x = main_lab, y = y_lab) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(), plot.subtitle = element_text(hjust = 0.5), strip.text.y.left = element_text(angle = 0), legend.position = legend.position, panel.grid.minor = element_blank()) return(gg) } #' Internal helper to create a group variable as base for a density matrix #' #' Internal helper function to create a group variable based on the #' categorization of either age, period or cohort. To be called from within #' \code{\link{plot_densityMatrix}}. #' #' @param dat Dataset with a column \code{"age"}, \code{"period"} or #' \code{"cohort"}, dependent on the specified \code{APC_var}. #' @param APC_var One of \code{c("age","period","cohort")}. #' @param groups_list A list with each element specifying the borders of one #' row or column in the density matrix. E.g., if the period should be visualized #' in decade columns from 1980 to 2009, specify #' \code{groups_list = list(c(1980,1989), c(1990,1999), c(2000,2009))}. #' The list can be named to specify labels for the categories. #' #' @return Vector for the grouping that can be added as additional column to #' the data. #' #' @import dplyr #' create_groupVariable <- function(dat, APC_var, groups_list) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 group_var <- NULL # rename the 'APC_var' column, for easier handling dat <- dat %>% dplyr::rename(var = APC_var) # create the group labels, if not specified if (is.null(names(groups_list))) { names(groups_list) <- sapply(groups_list, function(x) { paste(x, collapse = " - ") }) } # add a variable 'group_var' to the data dat$group_var <- "not categorized" for (i in 1:length(groups_list)) { if (length(groups_list[[i]]) == 1) { # groups are only specific scalar values dat <- dat %>% mutate(group_var = case_when(var == groups_list[[i]] ~ names(groups_list)[i], TRUE ~ group_var)) } else { # groups are intervals dat <- dat %>% mutate(group_var = case_when(var >= groups_list[[i]][1] & var <= groups_list[[i]][2] ~ names(groups_list)[i], TRUE ~ group_var)) } } dat <- dat %>% mutate(group_var = factor(group_var, levels = names(groups_list))) return(dat$group_var) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_densityMatrix.R
#' Internal helper to calculate the (group-specific) density of a variable #' #' Internal helper function that is called in \code{\link{plot_density}} to #' calculate the density of a metric variable. If \code{plot_density} is called #' from within \code{\link{plot_densityMatrix}} (i.e., when some of the columns #' \code{c("age_group","period_group","cohort_group")} are part of the dataset, #' the density is computed individually for all respective APC groups. #' #' @inheritParams plot_density #' #' @return Dataset with the calculated densities. #' #' @import dplyr #' @importFrom stats density #' calc_density <- function(dat, y_var, weights_var = NULL, ...) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 dim1 <- dim2 <- NULL # remove potential NA values from 'y_var' dat <- dat[!is.na(dat[[y_var]]),] # retrieve the weights vector weights_vector <- NULL if (!is.null(weights_var)) { # make sure the weights are not NA if (any(is.na(dat[[weights_var]]))) { warning("Deleting ",sum(is.na(dat[[weights_var]])), " observations where the weights variable is NA.") dat <- dat[!is.na(dat[[weights_var]]),] } weights_vector <- dat[[weights_var]] } # calculate the densities if (all(!(c("age_group","period_group","cohort_group") %in% names(dat)))) { # calculate one global density dens <- stats::density(x = dat[[y_var]], weights = weights_vector, ...) dat_dens <- data.frame(x = dens$x, y = dens$y) } else { # calculate one density for each APC subgroup dimensions <- c() if ("age_group" %in% names(dat)) { dimensions <- append(dimensions, "age_group") } if ("period_group" %in% names(dat)) { dimensions <- append(dimensions, "period_group") } if ("cohort_group" %in% names(dat)) { dimensions <- append(dimensions, "cohort_group") } # 'dimensions' always has two elements only when called from within 'plot_densityMatrix' dim1_categories <- levels(dat[[dimensions[1]]]) dim2_categories <- levels(dat[[dimensions[2]]]) dat_list1 <- lapply(dim1_categories, function(dim1_cat) { dat_list12 <- lapply(dim2_categories, function(dim2_cat) { dim12_rows <- which(dat[[dimensions[1]]] == dim1_cat & dat[[dimensions[2]]] == dim2_cat) if (length(dim12_rows) < 2) { # return nothing if the combination is not part of the data return(NULL) } dens <- stats::density(x = dat[dim12_rows, y_var, drop = TRUE], weights = weights_vector[dim12_rows], ...) dat_dens12 <- data.frame(x = dens$x, y = dens$y, dim1 = dim1_cat, dim2 = dim2_cat) return(dat_dens12) }) dat_dens1 <- dplyr::bind_rows(dat_list12) return(dat_dens1) }) dat_dens <- dplyr::bind_rows(dat_list1) %>% mutate(dim1 = factor(dim1, levels = dim1_categories), dim2 = factor(dim2, levels = dim2_categories)) colnames(dat_dens)[colnames(dat_dens) == "dim1"] <- dimensions[1] colnames(dat_dens)[colnames(dat_dens) == "dim2"] <- dimensions[2] } return(dat_dens) } #' Internal helper to add the diagonal highlighting to a ggplot #' #' Internal helper function to highlight diagonals in a density matrix. The #' function takes an existing ggplot object, adds the diagonal highlighting #' and returns the edited ggplot object again. #' #' @inheritParams plot_density #' @param gg Existing ggplot object to which the diagonal highlighting should #' be added. #' @param dat_highlightDiagonals Dataset created by #' \code{\link{create_highlightDiagonalData}} to highlight specific diagonals #' in a density matrix. #' #' @import dplyr ggplot2 #' @importFrom scales hue_pal #' gg_highlightDiagonals <- function(gg, dat, dat_highlightDiagonals) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 col_group <- NULL diag_dimension <- ifelse(!("age_group" %in% names(dat)), "Age groups", ifelse(!("period_group" %in% names(dat)), "Periods", "Cohorts")) ncols_highlight <- length(unique(dat_highlightDiagonals$col_group)) - 1 col_vector <- c(scales::hue_pal()(ncols_highlight), gray(0.9)) gg <- gg + geom_rect(data = dat_highlightDiagonals, aes(col = col_group), size = 2, fill = "transparent", xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf) + scale_color_manual(diag_dimension, values = col_vector) return(gg) } #' Internal helper to create a dataset for ggplot2 to highlight diagonals #' #' Internal helper function to create a dataset for \code{ggplot2} that can #' be used to highlight specific diagonals in a density matrix. #' #' @inheritParams plot_density #' #' @import dplyr #' create_highlightDiagonalData <- function(dat, highlight_diagonals) { # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 dim1 <- dim2 <- col_group <- NULL dimensions <- c() if ("age_group" %in% names(dat)) { dimensions <- append(dimensions, "age_group") } if ("period_group" %in% names(dat)) { dimensions <- append(dimensions, "period_group") } if ("cohort_group" %in% names(dat)) { dimensions <- append(dimensions, "cohort_group") } diag_dimension <- ifelse(!("age_group" %in% names(dat)), "age groups", ifelse(!("period_group" %in% names(dat)), "periods", "cohorts")) # create a data.frame from 'highlight_diagonals' for easier handling diag_dat <- data.frame(diagonal = unlist(highlight_diagonals, use.names = FALSE)) diag_dat$label <- sapply(1:length(highlight_diagonals), function(i) { label <- ifelse(!is.null(names(highlight_diagonals)), names(highlight_diagonals)[i], paste("Diagonal",LETTERS[i])) return(rep(label, times = length(highlight_diagonals[[i]]))) }) %>% unlist() # 'dimensions' always has two elements only when called from within 'plot_densityMatrix' dim1_categories <- levels(dat[[dimensions[1]]]) dim2_categories <- levels(dat[[dimensions[2]]]) dat_list1 <- lapply(1:length(dim1_categories), function(i1) { dat_list12 <- lapply(1:length(dim2_categories), function(i2) { dat_highlight12 <- data.frame(dim1 = dim1_categories[i1], dim2 = dim2_categories[i2], col_group = paste("other",diag_dimension)) col_group_row <- match(i1 + i2 - 1, diag_dat$diagonal) if (!is.na(col_group_row)) { dat_highlight12$col_group <- diag_dat$label[col_group_row] } return(dat_highlight12) }) dat_highlight1 <- dplyr::bind_rows(dat_list12) return(dat_highlight1) }) dat_highlight <- dplyr::bind_rows(dat_list1) %>% mutate(dim1 = factor(dim1, levels = dim1_categories), dim2 = factor(dim2, levels = dim2_categories)) colnames(dat_highlight)[colnames(dat_highlight) == "dim1"] <- dimensions[1] colnames(dat_highlight)[colnames(dat_highlight) == "dim2"] <- dimensions[2] # use 'other age groups / periods / cohorts' as last level for the 'highlight_diagonals' legend dat_highlight <- dat_highlight %>% mutate(col_group = factor(col_group, levels = c(unique(diag_dat$label), paste("other",diag_dimension)))) return(dat_highlight) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_density_helpers.R
#' Joint plot to compare the marginal APC effects of multiple models #' #' This function creates a joint plot of the marginal APC effects of multiple #' estimated models. It creates a plot with one pane per age, period and #' cohort effect, each containing one lines for each estimated model. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect. #' #' Since the plot output created by the function is no \code{ggplot2} object, #' but an object created with \code{ggpubr::ggarrange}, the overall theme #' of the plot cannot be changed by adding the theme in the form of #' '\code{plot_jointMarginalAPCeffects(...) + theme_minimal(...)}'. #' Instead, you can call \code{theme_set(theme_minimal(...))} as an individual #' call before calling \code{plot_jointMarginalAPCeffects(...)}. The latter #' function will then use this global plotting theme. #' #' @inheritParams plot_APCheatmap #' @param model_list A list of regression models estimated with #' \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. If the list is named, the #' names are used as labels. Can also be a single model object instead of a list. #' @param vlines_list Optional list that can be used to highlight the borders of #' specific age groups, time intervals or cohorts. Each element must be a #' numeric vector of values on the x-axis where vertical lines should be drawn. #' The list can maximally have three elements and must have names out of #' \code{c("age","period","cohort"}. #' @param ylab,ylim Optional ggplot2 styling arguments. #' #' @return Plot grid created with \code{\link[ggpubr]{ggarrange}}. #' #' @import checkmate dplyr ggplot2 #' @importFrom ggpubr ggarrange #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' #' # plot marginal effects of one model #' model_pure <- gam(mainTrip_distance ~ te(age, period), data = travel) #' plot_jointMarginalAPCeffects(model_pure, dat = travel) #' #' # plot marginal effects of multiple models #' model_cov <- gam(mainTrip_distance ~ te(age, period) + s(household_income), #' data = travel) #' model_list <- list("pure model" = model_pure, #' "covariate model" = model_cov) #' plot_jointMarginalAPCeffects(model_list, dat = travel) #' #' # mark specific cohorts #' plot_jointMarginalAPCeffects(model_list, dat = travel, #' vlines_list = list("cohort" = c(1966.5,1982.5,1994.5))) #' plot_jointMarginalAPCeffects <- function(model_list, dat, vlines_list = NULL, ylab = NULL, ylim = NULL) { checkmate::assert_choice(class(model_list)[1], choices = c("list","gam")) if (class(model_list)[1] == "list") { checkmate::assert_list(model_list, types = "gam") } checkmate::assert_data_frame(dat) checkmate::assert_list(vlines_list, min.len = 1, max.len = 3, types = "numeric", null.ok = TRUE) checkmate::assert_subset(names(vlines_list), choices = c("age","period","cohort")) checkmate::assert_character(ylab, len = 1, null.ok = TRUE) checkmate::assert_numeric(ylim, len = 2, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 effect <- type <- value <- NULL # reformat 'model_list' to a list, if only one model object was specified if (class(model_list)[1] == "gam") { model_list <- list(model_list) } # retrieve model labels if (!is.null(names(model_list))) { model_labels <- names(model_list) } else { model_labels <- paste("model", 1:length(model_list)) } # retrieve datasets with the marginal effects datList_list <- lapply(model_list, function(x) { plot_marginalAPCeffects(x, dat, return_plotData = TRUE) }) if (is.null(ylim)) { ylim <- lapply(datList_list, function(x) { dplyr::bind_rows(x) }) %>% dplyr::bind_rows() %>% pull(effect) %>% range() } used_logLink <- (model_list[[1]]$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model_list[[1]]$family[[1]]) if (is.null(ylab)) { ylab <- ifelse(used_logLink, "exp(Effect)", "Effect") } # base plots gg_age <- gg_period <- gg_cohort <- ggplot() # marginal age effect dat_age <- lapply(1:length(datList_list), function(i) { datList_list[[i]]$dat_age %>% mutate(type = model_labels[i]) }) %>% dplyr::bind_rows() %>% mutate(type = factor(type, levels = model_labels)) if ("age" %in% names(vlines_list)) { gg_age <- gg_age + geom_vline(xintercept = vlines_list$age, col = gray(0.5), lty = 2) } gg_age <- gg_age + geom_hline(yintercept = ifelse(used_logLink, 1, 0), col = gray(0.3), lty = 2) + geom_line(data = dat_age, aes(x = value, y = effect, col = type)) + xlab("Age") + scale_y_continuous(trans = ifelse(used_logLink, "log2", "identity"), name = ylab, limits = ylim) + theme(legend.title = element_blank()) # marginal period effect dat_period <- lapply(1:length(datList_list), function(i) { datList_list[[i]]$dat_period %>% mutate(type = model_labels[i]) }) %>% dplyr::bind_rows() %>% mutate(type = factor(type, levels = model_labels)) if ("period" %in% names(vlines_list)) { gg_period <- gg_period + geom_vline(xintercept = vlines_list$period, col = gray(0.5), lty = 2) } gg_period <- gg_period + geom_hline(yintercept = ifelse(used_logLink, 1, 0), col = gray(0.3), lty = 2) + geom_line(data = dat_period, aes(x = value, y = effect, col = type)) + xlab("Period") + scale_y_continuous(trans = ifelse(used_logLink, "log2", "identity"), name = ylab, limits = ylim) + theme(legend.title = element_blank(), axis.title.y = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank()) # marginal cohort effect dat_cohort <- lapply(1:length(datList_list), function(i) { datList_list[[i]]$dat_cohort %>% mutate(type = model_labels[i]) }) %>% dplyr::bind_rows() %>% mutate(type = factor(type, levels = model_labels)) if ("cohort" %in% names(vlines_list)) { gg_cohort <- gg_cohort + geom_vline(xintercept = vlines_list$cohort, col = gray(0.5), lty = 2) } gg_cohort <- gg_cohort + geom_hline(yintercept = ifelse(used_logLink, 1, 0), col = gray(0.3), lty = 2) + geom_line(data = dat_cohort, aes(x = value, y = effect, col = type)) + xlab("Cohort") + scale_y_continuous(trans = ifelse(used_logLink, "log2", "identity"), name = ylab, limits = ylim) + theme(legend.title = element_blank(), axis.title.y = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank()) # no color coding when only one model is plotted if (length(model_list) == 1) { gg_age <- gg_age + scale_color_manual(values = gray(0.2)) gg_period <- gg_period + scale_color_manual(values = gray(0.2)) gg_cohort <- gg_cohort + scale_color_manual(values = gray(0.2)) } # joint plot ggpubr::ggarrange(plotlist = list(gg_age, gg_period, gg_cohort), legend = ifelse(length(model_list) == 1, "none", "bottom"), common.legend = TRUE, ncol = 3, widths = c(0.36, 0.32, 0.32)) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_jointMarginalAPCeffects.R
#' Plot linear effects of a gam in an effect plot #' #' Create an effect plot of linear effects of a model fitted with #' \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect. #' #' @inheritParams extract_summary_linearEffects #' @param variables Optional character vector of variable names specifying which #' effects should be plotted. The order of the vector corresponds to #' the order in the effect plot. If the argument is not specified, all linear #' effects are plotted according to the order of their appearance in the model #' output. #' @param return_plotData If TRUE, the dataset prepared for plotting is #' returned. Defaults to FALSE. #' @param refCat If TRUE, reference categories are added to the output for #' categorical covariates. Defaults to FALSE. #' @param ... Additional arguments passed to #' \code{\link{extract_summary_linearEffects}}. #' #' #' @return ggplot object #' #' @import checkmate dplyr #' @importFrom colorspace scale_colour_discrete_qualitative #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' model <- gam(mainTrip_distance ~ te(age, period) + residence_region + #' household_size + s(household_income), data = travel) #' #' plot_linearEffects(model) #' plot_linearEffects <- function(model, variables = NULL, return_plotData = FALSE, refCat = FALSE, ...) { checkmate::assert_class(model, classes = "gam") checkmate::assert_character(variables, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 coef <- CI_lower <- CI_upper <- coef_exp <- CI_lower_exp <- CI_upper_exp <- param <- vargroup <- varnames <- vars <- var_classes <- ref <- new_row <- min_index <- se <- pvalue <- se_exp <- NULL used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) ylab <- ifelse(used_logLink, "exp(Effect)", "Effect") # extract model information plot_dat <- extract_summary_linearEffects(model, ...) %>% mutate(param = as.character(param)) plot_dat$vargroup <- NA # categorize the coefficients in groups (one for each variable) var_classes <- attributes(model$pterms)$dataClasses[-1] vars <- names(var_classes) for (i in vars) { # potentially more than one coefficient for factor or character variables: if (var_classes[i] %in% c("character", "factor")) { varnames <- paste0(i, unlist(unname(model$xlevels[i]))) plot_dat$vargroup[which(plot_dat$param %in% varnames)] <- i # Add information about the reference category: if (refCat == TRUE) { ref <- varnames[!(varnames %in% plot_dat$param)] min_index <- which(plot_dat$param %in% varnames)[1] if (used_logLink == FALSE) { new_row <- c(ref, 0, 0, 0, 0, 0, i) plot_dat <- rbind(plot_dat[1:(min_index - 1), ], new_row, plot_dat[min_index:nrow(plot_dat), ]) plot_dat <- plot_dat %>% mutate(coef = as.numeric(coef), se = as.numeric(se), CI_lower = as.numeric(CI_lower), CI_upper = as.numeric(CI_upper), pvalue = as.numeric(pvalue)) } else { new_row <- c(ref, 0, 0, 0, 0, 1, 0, 1, 1, 0, i) plot_dat <- rbind(plot_dat[1:(min_index - 1), ], new_row, plot_dat[min_index:nrow(plot_dat), ]) plot_dat <- plot_dat %>% mutate(coef = as.numeric(coef), se = as.numeric(se), CI_lower = as.numeric(CI_lower), CI_upper = as.numeric(CI_upper), coef_exp = as.numeric(coef_exp), se_exp = as.numeric(se_exp), CI_lower_exp = as.numeric(CI_lower_exp), CI_upper_exp = as.numeric(CI_upper_exp), value = as.numeric(pvalue)) } row.names(plot_dat) <- 1:nrow(plot_dat) } } # only a single coefficient for numeric variables else { plot_dat$vargroup[which(plot_dat$param == i)] <- i } } # remove the intercept plot_dat <- plot_dat[-1,] # select variables to plot: if (!is.null(variables)) { plot_dat <- plot_dat %>% filter(vargroup %in% variables) } # remove the vargroup label from the coefficient labels for categorical variables cat_coefs <- which(nchar(plot_dat$param) > nchar(plot_dat$vargroup)) if (length(cat_coefs) > 0) { plot_dat$param[cat_coefs] <- substr(plot_dat$param[cat_coefs], nchar(plot_dat$vargroup[cat_coefs]) + 1, 100) } # reorder dataset according to specified variable vector var_levels <- if(is.null(variables)) unique(plot_dat$vargroup) else variables plot_dat <- plot_dat %>% mutate(vargroup = factor(x = vargroup, levels = var_levels)) %>% arrange(vargroup) plot_dat$param <- factor(plot_dat$param, levels = unique(plot_dat$param)) # final preparations if (used_logLink) { if (any(plot_dat$CI_lower_exp < 0)) { warning("Note: After the delta method transformation some values of the lower confidence interval border resulted were negative. These values were set to 0.01") plot_dat$CI_lower_exp[plot_dat$CI_lower_exp < 0] <- 0.01 } plot_dat <- plot_dat %>% select(-coef, -se, -CI_lower, -CI_upper) #%>% #dplyr::rename(coef = coef_exp, CI_lower = CI_lower_exp, CI_upper = CI_upper_exp) } if (return_plotData) { return(plot_dat) } # rename variables in case of log link: if (used_logLink) { plot_dat <- plot_dat %>% dplyr::rename(coef = coef_exp, CI_lower = CI_lower_exp, CI_upper = CI_upper_exp) } # create plot gg <- ggplot(plot_dat, mapping = aes(x = param, y = coef)) + geom_hline(yintercept = ifelse(used_logLink, 1, 0), col = gray(0.3), lty = 2) + geom_pointrange(mapping = aes(ymin = CI_lower, ymax = CI_upper, col = vargroup), size = 1) + geom_point(mapping = aes(col = vargroup), size = 1) + scale_y_continuous(trans = ifelse(used_logLink, "log2", "identity"), name = ylab) + colorspace::scale_colour_discrete_qualitative(palette = "Dark 3") + facet_wrap(~ vargroup, scales = "free_x", nrow = 1) + theme(legend.position = "none", axis.title.x = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1)) return(gg) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_linearEffects.R
#' Plot of marginal APC effects based on an estimated GAM model #' #' Plot the marginal effect of age, period or cohort, based on an APC model #' estimated as a semiparametric additive regression model with \code{\link[mgcv]{gam}} #' or \code{\link[mgcv]{bam}}. #' This function is a simple wrapper to \code{\link{plot_partialAPCeffects}}, #' called with argument \code{hide_partialEffects = TRUE}. #' #' @inheritParams plot_partialAPCeffects #' #' @return ggplot object #' #' @export #' #' @references Weigert, M., Bauer, A., Gernert, J., Karl, M., Nalmpatian, A., #' Küchenhoff, H., and Schmude, J. (2021). Semiparametric APC analysis of #' destination choice patterns: Using generalized additive models to quantify #' the impact of age, period, and cohort on travel distances. #' \emph{Tourism Economics}. doi:10.1177/1354816620987198. #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' model <- gam(mainTrip_distance ~ te(age, period), data = travel) #' #' plot_marginalAPCeffects(model, dat = travel, variable = "age") #' #' # mark specific cohorts #' plot_marginalAPCeffects(model, dat = travel, variable = "cohort", #' vlines_vec = c(1966.5,1982.5,1994.5)) #' plot_marginalAPCeffects <- function(model, dat, variable = "age", vlines_vec = NULL, return_plotData = FALSE) { plot_partialAPCeffects(model = model, dat = dat, variable = variable, hide_partialEffects = TRUE, vlines_vec = vlines_vec, return_plotData = return_plotData) } #' Partial APC plots based on an estimated GAM model #' #' Create the partial APC plots based on an APC model estimated as a semiparametric #' additive regression model with \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect. #' #' @inheritParams plot_APCheatmap #' @param variable One of \code{c("age","period","cohort")}, specifying the #' temporal dimension for which the partial effect plots should be created. #' @param hide_partialEffects If TRUE, only the marginal effect will be plotted. #' Defaults to FALSE. #' @param vlines_vec Optional numeric vector of values on the x-axis where #' vertical lines should be drawn. Can be used to highlight the borders of #' specific age groups, time intervals or cohorts. #' @param return_plotData If TRUE, a list of the datasets prepared for plotting #' is returned instead of the ggplot object. The list contains one dataset each #' for the overall effect (= evaluations of the APC surface to plot the partial #' effects) and for each marginal APC effect (no matter the specified value of #' the argument \code{variable}). Defaults to FALSE. #' #' @return ggplot object (if \code{hide_partialEffects} is TRUE) or a plot grid #' created with \code{\link[ggpubr]{ggarrange}} (if FALSE). #' #' @import checkmate dplyr ggplot2 stringr #' @importFrom ggpubr ggarrange #' @importFrom mgcv predict.gam #' @export #' #' @references Weigert, M., Bauer, A., Gernert, J., Karl, M., Nalmpatian, A., #' Küchenhoff, H., and Schmude, J. (2021). Semiparametric APC analysis of #' destination choice patterns: Using generalized additive models to quantify #' the impact of age, period, and cohort on travel distances. #' \emph{Tourism Economics}. doi:10.1177/1354816620987198. #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de}, #' Maximilian Weigert \email{maximilian.weigert@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' model <- gam(mainTrip_distance ~ te(age, period), data = travel) #' #' plot_partialAPCeffects(model, dat = travel, variable = "age") #' #' # mark specific cohorts #' plot_partialAPCeffects(model, dat = travel, variable = "cohort", #' vlines_vec = c(1966.5,1982.5,1994.5)) #' plot_partialAPCeffects <- function(model, dat, variable = "age", hide_partialEffects = FALSE, vlines_vec = NULL, return_plotData = FALSE) { checkmate::assert_class(model, classes = "gam") checkmate::assert_data_frame(dat) checkmate::assert_choice(variable, choices = c("age","period","cohort")) checkmate::assert_logical(hide_partialEffects) checkmate::assert_numeric(vlines_vec, min.len = 1, null.ok = TRUE) checkmate::assert_logical(return_plotData) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 period <- age <- effect <- cohort <- exp_effect <- value <- NULL # create a dataset for predicting the values of the APC surface grid_age <- min(dat$age, na.rm = TRUE):max(dat$age, na.rm = TRUE) grid_period <- min(dat$period, na.rm = TRUE):max(dat$period, na.rm = TRUE) dat_predictionGrid <- expand.grid(age = grid_age, period = grid_period) %>% mutate(cohort = period - age) # add random values for all further covariates in the model, # necessary for calling mgcv:::predict.gam covars <- attr(model$terms, "term.labels") covars <- covars[!(covars %in% c("age","period","cohort"))] covars <- if_else(stringr::str_detect(string = covars, pattern = "(?<=\\().*(?=\\))"), stringr::str_extract(string = covars, pattern = "(?<=\\().*(?=\\))"), covars) if (length(covars) > 0) { dat_cov <- dat[,covars, drop = FALSE] row <- which(apply(dat_cov, 1, function(x) { all(!is.na(x)) }))[1] dat_predictionGrid[,covars] <- dat[row, covars] } # create a dataset containing the estimated values of the APC surface terms_model <- sapply(model$smooth, function(x) { x$label }) terms_index_APC <- which(grepl("age", terms_model) | grepl("period", terms_model)) term_APCsurface <- terms_model[terms_index_APC] dat_overallEffect <- dat_predictionGrid %>% mutate(effect = rowSums(mgcv::predict.gam(object = model, newdata = dat_predictionGrid, type = "terms", terms = term_APCsurface))) %>% mutate(effect = effect - mean(effect)) used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) if (used_logLink) { dat_overallEffect <- dat_overallEffect %>% mutate(exp_effect = exp(effect)) } # calculate the mean effects per age / period / cohort dat_age <- dat_overallEffect %>% group_by(age) %>% summarize(effect = mean(effect)) %>% ungroup() %>% mutate(variable = "Age") %>% dplyr::rename(value = age) dat_period <- dat_overallEffect %>% group_by(period) %>% summarize(effect = mean(effect)) %>% ungroup() %>% mutate(variable = "Period") %>% dplyr::rename(value = period) dat_cohort <- dat_overallEffect %>% group_by(cohort) %>% summarize(effect = mean(effect)) %>% ungroup() %>% mutate(variable = "Cohort") %>% dplyr::rename(value = cohort) if (used_logLink) { dat_age <- dat_age %>% mutate(exp_effect = exp(effect)) dat_period <- dat_period %>% mutate(exp_effect = exp(effect)) dat_cohort <- dat_cohort %>% mutate(exp_effect = exp(effect)) } # define the theme theme <- theme(text = element_text(size = 16), axis.title = element_text(size = 16), axis.text = element_text(size = 16), legend.text = element_text(size = 12), legend.key.width = unit(1.2, "cm"), plot.title = element_text(hjust = 0.5, size = 18, face = "bold"), strip.text.y = element_text(size = 16), strip.placement = "outside", strip.background = element_blank(), axis.title.y = element_text(margin = margin(0, 10, 0, 0)), axis.title.x = element_text(margin = margin(10, 0, 0, 0))) # final preparations if (used_logLink) { dat_overallEffect <- dat_overallEffect %>% select(-effect) %>% dplyr::rename(effect = exp_effect) dat_age <- dat_age %>% select(-effect) %>% dplyr::rename(effect = exp_effect) dat_period <- dat_period %>% select(-effect) %>% dplyr::rename(effect = exp_effect) dat_cohort <- dat_cohort %>% select(-effect) %>% dplyr::rename(effect = exp_effect) } y_lab <- ifelse(used_logLink, "Exp effect", "Effect") # return the plot data instead of the ggplot object, if specified if (return_plotData) { return(list("dat_overallEffect" = dat_overallEffect, "dat_age" = dat_age, "dat_period" = dat_period, "dat_cohort" = dat_cohort)) } # base plot gg <- ggplot() if (!is.null(vlines_vec)) { gg <- gg + geom_vline(xintercept = vlines_vec, col = gray(0.5), lty = 2) } # main plot if (variable == "age") { if (hide_partialEffects) { # plot with only the marginal age effect gg_final <- gg + geom_line(data = dat_age, mapping = aes(x = value, y = effect)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + ylab(y_lab) + xlab("Age") + theme + ggtitle("Marginal age effect") } else { # plots including the partial effects # age versus period gg_AP <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = age, y = effect, group = period, col = period)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Period") + geom_line(data = dat_age, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + ylab(y_lab) + xlab("Age") + theme + ggtitle("Age effect by periods") # age versus cohort gg_AC <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = age, y = effect, group = cohort, col = cohort)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Cohort") + geom_line(data = dat_age, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + theme + theme(axis.text.y = element_blank()) + ylab(" ") + xlab("Age") + ggtitle("Age effect by cohorts") if (used_logLink) { gg_AP <- gg_AP + scale_y_continuous(trans = "log2") gg_AC <- gg_AC + scale_y_continuous(trans = "log2") } # combine both plots gg_final <- ggpubr::ggarrange(plotlist = list(gg_AP, gg_AC), ncol = 2, legend = "bottom") } } else if (variable == "period") { if (hide_partialEffects) { # plot with only the marginal age effect gg_final <- gg + geom_line(data = dat_period, mapping = aes(x = value, y = effect)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + ylab(y_lab) + xlab("Period") + theme + ggtitle("Marginal period effect") } else { # plots including the partial effects # period versus age gg_PA <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = period, y = effect, group = age, col = age)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Age") + geom_line(data = dat_period, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + ylab(y_lab) + xlab("Period") + theme + ggtitle("Period effect by age") # period versus cohort gg_PC <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = period, y = effect, group = cohort, col = cohort)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Cohort") + geom_line(data = dat_period, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + theme + theme(axis.text.y = element_blank()) + ylab(" ") + xlab("Period") + ggtitle("Period effect by cohorts") if (used_logLink) { gg_PA <- gg_PA + scale_y_continuous(trans = "log2") gg_PC <- gg_PC + scale_y_continuous(trans = "log2") } # combine both plots gg_final <- ggpubr::ggarrange(plotlist = list(gg_PA, gg_PC), ncol = 2, legend = "bottom") } } else if (variable == "cohort") { if (hide_partialEffects) { # plot with only the marginal age effect gg_final <- gg + geom_line(data = dat_cohort, mapping = aes(x = value, y = effect)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + ylab(y_lab) + xlab("Cohort") + theme + ggtitle("Marginal cohort effect") } else { # plots including the partial effects # cohort versus age gg_CA <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = cohort, y = effect, group = age, col = age)) + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Age") + geom_line(data = dat_cohort, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + ylab(y_lab) + xlab("Cohort") + theme + ggtitle("Cohort effect by age") # cohort versus period gg_CP <- gg + geom_line(data = dat_overallEffect, mapping = aes(x = cohort, y = effect, group = period, col = period)) + scale_color_continuous(low = "grey90", high = "grey10", name = "Period") + scale_x_continuous(guide = guide_axis(check.overlap = TRUE)) + geom_line(data = dat_cohort, mapping = aes(x = value, y = effect), size = 1.5, col = "RoyalBlue3") + theme + theme(axis.text.y = element_blank()) + ylab(" ") + xlab("Cohort") + ggtitle("Cohort effect by periods") if (used_logLink) { gg_CA <- gg_CA + scale_y_continuous(trans = "log2") gg_CP <- gg_CP + scale_y_continuous(trans = "log2") } # combine both plots gg_final <- ggpubr::ggarrange(plotlist = list(gg_CA, gg_CP), ncol = 2, legend = "bottom") } } return(gg_final) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_partialAPCeffects.R
#' Plot 1D smooth effects for \code{\link[mgcv]{gam}} models #' #' Plots 1D smooth effects for a GAM model fitted with \code{\link[mgcv]{gam}} #' or \code{\link[mgcv]{bam}}. #' #' If the model was estimated with a log or logit link, the function #' automatically performs an exponential transformation of the effect, #' see argument \code{method_expTransform}. #' #' @param model GAM model fitted with \code{\link[mgcv]{gam}} or #' \code{\link[mgcv]{bam}}. #' @param plot_ci If \code{TRUE} CIs are plotted. Only used if \code{plot_type = 1}. #' @param select Index of smooth term to be plotted. #' @param alpha \code{(1-alpha)} CIs are calculated. The default 0.05 leads to #' 95% CIs. #' @param ylim Optional limits of the y-axis. #' @param method_expTransform One of \code{c("simple","delta")}, stating if #' standard errors and confidence interval limits should be transformed by #' a simple exp transformation or using the delta method. The delta method can #' be unstable in situations and lead to negative confidence interval limits. #' Only used when the model was estimated with a log or logit link. #' @param return_plotData If TRUE, the dataset prepared for plotting is #' returned. Defaults to FALSE. #' #' @return ggplot object #' #' @importFrom grDevices gray #' @importFrom stats qnorm #' @import checkmate dplyr ggplot2 #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' library(mgcv) #' #' data(travel) #' model <- gam(mainTrip_distance ~ te(age, period) + residence_region + #' household_size + s(household_income), data = travel) #' #' plot_1Dsmooth(model, select = 2) #' plot_1Dsmooth <- function(model, plot_ci = TRUE, select, alpha = 0.05, ylim = NULL, method_expTransform = "simple", return_plotData = FALSE) { checkmate::assert_class(model, classes = "gam") checkmate::assert_logical(plot_ci) checkmate::assert_numeric(select, lower = 1) checkmate::assert_numeric(alpha, lower = 0, upper = 1) checkmate::assert_numeric(ylim, len = 2, null.ok = TRUE) checkmate::assert_choice(method_expTransform, choices = c("simple","delta")) checkmate::assert_logical(return_plotData, len = 1) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 fit <- se <- fit_exp <- se_exp <- CI_lower <- CI_upper <- CI_lower_exp <- CI_upper_exp <- x <- y <- y_exp <- NULL used_logLink <- (model$family[[2]] %in% c("log","logit")) | grepl("Ordered Categorical", model$family[[1]]) ylab <- ifelse(used_logLink, "exp(Effect)", "Effect") plotObject <- get_plotGAMobject(model) plotObject <- plotObject[[select]] plot_dat <- data.frame(x = plotObject$x, y = plotObject$fit, se = plotObject$se / plotObject$se.mult) %>% mutate(CI_lower = y - qnorm(1 - alpha/2)*se, CI_upper = y + qnorm(1 - alpha/2)*se) if (used_logLink) { # transform the point estimates plot_dat <- plot_dat %>% mutate(y_exp = exp(y)) %>% select(-y) %>% dplyr::rename(y = y_exp) # transform the confidence intervals if (plot_ci) { if (method_expTransform == "simple") { plot_dat <- plot_dat %>% mutate(se_exp = exp(se), CI_lower_exp = exp(CI_lower), CI_upper_exp = exp(CI_upper)) %>% select(-se, -CI_lower, -CI_upper) %>% dplyr::rename(se = se_exp, CI_lower = CI_lower_exp, CI_upper = CI_upper_exp) } else { # method_expTransform == "delta" plot_dat <- plot_dat %>% mutate(se_exp = sqrt(se^2 * y^2)) %>% mutate(CI_lower_exp = y - qnorm(1 - alpha/2) * se_exp, CI_upper_exp = y + qnorm(1 - alpha/2) * se_exp) %>% select(-se, -CI_lower, -CI_upper) %>% dplyr::rename(se = se_exp, CI_lower = CI_lower_exp, CI_upper = CI_upper_exp) # correct negative CI_lower borders if (any(plot_dat$CI_lower < 0)) { warning("Note: After the delta method transformation some values of the lower confidence interval border resulted were negative. These values were set to 0.01") plot_dat$CI_lower[plot_dat$CI_lower < 0] <- 0.01 } } } } if (return_plotData) { return(plot_dat) } # if 'ylim' is set and the CIs exceed it, trim them accordingly if (!is.null(ylim)) { plot_dat$CI_lower[plot_dat$CI_lower < ylim[1]] <- ylim[1] plot_dat$CI_upper[plot_dat$CI_upper > ylim[2]] <- ylim[2] } # plot gg <- ggplot(plot_dat, aes(x = x, y = y)) if (plot_ci) { gg <- gg + geom_ribbon(aes(ymin = CI_lower, ymax = CI_upper, fill = "")) + scale_fill_manual(values = gray(0.75)) } gg <- gg + geom_hline(yintercept = ifelse(used_logLink, 1, 0), col = gray(0.3), lty = 2) + geom_line(aes(col = "")) + xlab(plotObject$xlab) + scale_y_continuous(trans = ifelse(used_logLink, "log2", "identity"), name = ylab, limits = ylim) + scale_color_manual(values = "black") + theme(legend.position = "none") return(gg) } #' Extract returned values of plot.gam() while suppressing creation of the plot #' #' Internal helper function to extract the values returned of #' \code{\link[mgcv]{plot.gam}} while suppressing creation of the plot. #' #' @inheritParams plot_1Dsmooth #' #' @importFrom grDevices png dev.off #' @importFrom mgcv plot.gam #' @import checkmate #' get_plotGAMobject <- function(model) { checkmate::assert_class(model, classes = "gam") # Idea: Save the plot in a temporal png file, which is deleted right # afterwards. png("temp.png") # plot.gam returns all terms by default, select and rug are set only to decrease evaluation time plot.df <- mgcv::plot.gam(model, select = 1, rug = FALSE) dev.off() unlink("temp.png", recursive = TRUE) # delete 'raw' elements as they are very large but not necessary for plotting the effects plot.df <- lapply(plot.df, function(x) { x$raw <- NULL x }) return(invisible(plot.df)) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_smoothEffects.R
#' Distribution plot of one variable against one APC dimension #' #' Plot the distribution of one variable in the data against age, period or #' cohort. Creates a bar plot for categorical variables (see argument #' \code{geomBar_position}) and boxplots or a line plot of median values for #' metric variables (see \code{plot_type}). #' #' @param dat Dataset containing columns \code{age} and \code{period}. #' @param y_var Character name of the variable to plot. #' @param apc_dimension One of \code{c("age","period","cohort")}. Defaults to #' \code{"period"}. #' @param log_scale Indicator if the visualized variable should be log10 #' transformed. Only used if the variable is numeric. Defaults to FALSE. #' @param plot_type One of \code{c("boxplot","line","line-points")}, specifying #' if boxplots or a line plot of median values should be drawn for metric #' variables. \code{"line-points"} adds points to the line plot where #' observations are available. #' @param geomBar_position Value passed to \code{\link[ggplot2]{geom_bar}} as #' \code{position} argument. Only used if the visualized variable is categorical. #' Defaults to \code{"fill"}. #' @param legend_title Optional character title for the legend which is drawn #' for categorical variables. #' @param ylab,ylim Optional arguments for styling the ggplot. #' #' @return ggplot object #' #' @import checkmate dplyr ggplot2 #' @importFrom stats median #' @export #' #' @author Alexander Bauer \email{alexander.bauer@@stat.uni-muenchen.de} #' #' @examples #' library(APCtools) #' data(travel) #' #' # plot a metric variable #' plot_variable(dat = travel, y_var = "mainTrip_distance", #' apc_dimension = "period", log_scale = TRUE) #' plot_variable(dat = travel, y_var = "mainTrip_distance", #' apc_dimension = "period", log_scale = TRUE, plot_type = "line") #' #' # plot a categorical variable #' plot_variable(dat = travel, y_var = "household_size", apc_dimension = "period") #' plot_variable(dat = travel, y_var = "household_size", apc_dimension = "period", #' geomBar_position = "stack") #' plot_variable <- function(dat, y_var, apc_dimension = "period", log_scale = FALSE, plot_type = "boxplot", geomBar_position = "fill", legend_title = NULL, ylab = NULL, ylim = NULL) { checkmate::assert_data_frame(dat) checkmate::assert_choice("age", colnames(dat)) checkmate::assert_choice("period", colnames(dat)) checkmate::assert_choice(y_var, choices = colnames(dat)) checkmate::assert_choice(apc_dimension, choices = c("age","period","cohort")) checkmate::assert_logical(log_scale, len = 1) checkmate::assert_choice(plot_type, choices = c("boxplot","line", "line-points"), null.ok = TRUE) checkmate::assert_character(geomBar_position, len = 1) checkmate::assert_character(legend_title, len = 1, null.ok = TRUE) checkmate::assert_character(ylab, len = 1, null.ok = TRUE) checkmate::assert_numeric(ylim, len = 2, null.ok = TRUE) # some NULL definitions to appease CRAN checks regarding use of dplyr/ggplot2 period <- age <- x <- y <- NULL dat <- dat %>% mutate(cohort = period - age) var_class <- ifelse(class(dat[[y_var]]) %in% c("character","factor"), "categorical", "metric") # remove NA measurements if (any(is.na(dat[[y_var]]))) { message("Excluding ",sum(is.na(dat[[y_var]])), " missing observations of ",y_var,"...") dat <- dat[!is.na(dat[[y_var]]),] } # rename the variables for easier handling dat <- dat %>% dplyr::rename(x = apc_dimension, y = y_var) # create plot if (var_class == "categorical") { if (is.null(ylab)) { ylab <- ifelse(geomBar_position == "fill", "Rel. frequency", "Frequency") } if (is.null(legend_title)) { legend_title <- y_var } gg <- ggplot(dat, aes(x = factor(x), fill = y)) + geom_bar(position = geomBar_position) + scale_fill_brewer(legend_title, palette = "Set2") + scale_y_continuous(ylab, limits = ylim) + scale_x_discrete(guide = guide_axis(check.overlap = TRUE)) } else { # var_class == "metric" # compute the median values if (plot_type %in% c("line","line-points")) { dat <- dat %>% group_by(x) %>% summarize(y = stats::median(y, na.rm = TRUE)) %>% ungroup() } # log10 transform the main variable, and create a function to accordingly # adjust the labels on the y axis (the function is passed to scale_y_continuous()) if (log_scale) { dat <- dat %>% mutate(y = log10(y)) label_function <- function(x) { paste0("10^",x) } } else { # no log transformation label_function <- function(x) { x } # identity function } # main plot if (plot_type == "boxplot") { gg <- ggplot(dat, aes(x = factor(x), y = y)) + geom_boxplot(col = gray(0.3), outlier.color = gray(0.3), outlier.alpha = 0.2) + scale_x_discrete(guide = guide_axis(check.overlap = TRUE)) if (is.null(ylab)) { ylab <- y_var } } else { # plot_type %in% c("line","line-points") gg <- ggplot(dat, aes(x = x, y = y), col = gray(0.3)) + geom_line() if (plot_type == "line-points") { gg <- gg + geom_point() } if (is.null(ylab)) { ylab <- paste0("median(",y_var,")") } } gg <- gg + scale_y_continuous(ylab, labels = label_function, limits = ylim) } # final theme adjustments gg <- gg + xlab(capitalize_firstLetter(apc_dimension)) return(gg) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/plot_variable.R
#' Internal function to capitalize the first letter of a character #' #' Internal helper function to capitalize the first letter of a character value. #' The use case is to create a plot label like 'Age' from a variable name like #' 'age'. #' #' @param char Character value whose first letter should be capitalized #' #' @import checkmate #' capitalize_firstLetter <- function(char) { checkmate::assert_character(char, len = 1) char_cap <- paste0(toupper(substr(char, 1, 1)), substr(char, 2, nchar(char))) return(char_cap) }
/scratch/gouwar.j/cran-all/cranData/APCtools/R/small_helpers.R
## ---- echo=FALSE-------------------------------------------------------------- # global settings knitr::opts_chunk$set(fig.width = 10) ## ----packages, message = FALSE------------------------------------------------ library(APCtools) library(dplyr) # general data handling library(mgcv) # estimation of generalized additive regression models (GAMs) library(ggplot2) # data visualization library(ggpubr) # arranging multiple ggplots in a grid with ggarrange() # set the global theme of all plots theme_set(theme_minimal()) ## ----data preparation--------------------------------------------------------- data(travel) ## ---- message=FALSE, warning=FALSE, fig.height=2.2---------------------------- gg1 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE) gg2 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE, plot_type = "boxplot") gg3 <- plot_density(dat = travel, y_var = "household_size") ggpubr::ggarrange(gg1, gg2, gg3, nrow = 1) ## ---- message = FALSE, fig.height=4------------------------------------------- plot_variable(dat = travel, y_var = "mainTrip_distance", apc_dimension = "period", plot_type = "line", ylim = c(0,1000)) plot_variable(dat = travel, y_var = "household_size", apc_dimension = "period") ## ---- fig.width=8, message=FALSE---------------------------------------------- age_groups <- list(c(80,89),c(70,79),c(60,69),c(50,59), c(40,49),c(30,39),c(20,29)) period_groups <- list(c(1970,1979),c(1980,1989),c(1990,1999), c(2000,2009),c(2010,2019)) plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE) ## ---- fig.height=6.5, message=FALSE------------------------------------------- plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), log_scale = TRUE) ## ---- fig.height=6.5, message=FALSE------------------------------------------- dist_cat_breaks <- c(1,500,1000,2000,6000,100000) dist_cat_labels <- c("< 500 km","500 - 1,000 km", "1,000 - 2,000 km", "2,000 - 6,000 km", "> 6,000 km") plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE, y_var_cat_breaks = dist_cat_breaks, y_var_cat_labels = dist_cat_labels, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), legend_title = "Distance category") ## ---- fig.height=6.5, message=FALSE------------------------------------------- plot_densityMatrix(dat = travel, y_var = "household_size", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10)) ## ---- fig.height=6.5, fig.width=8--------------------------------------------- plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, bin_heatmap = FALSE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ## ---- fig.height=6.5, fig.width=8--------------------------------------------- plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ## ---- fig.height=6.5---------------------------------------------------------- plot_APChexamap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE) ## ----------------------------------------------------------------------------- # GAM without covariates model_pure <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)), data = travel) # GAM including covariates model_cov <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)) + residence_region + household_size + s(household_income), data = travel) # create a named list of the two models, useful for some functions model_list <- list("pure model" = model_pure, "covariate model" = model_cov) ## ---- fig.height=3, fig.width=8----------------------------------------------- plot_APCheatmap(dat = travel, model = model_pure) ## ---- fig.height=6.5---------------------------------------------------------- plot_APChexamap(dat = travel, model = model_pure) ## ----------------------------------------------------------------------------- plot_marginalAPCeffects(model = model_pure, dat = travel, variable = "age") ## ----------------------------------------------------------------------------- plot_jointMarginalAPCeffects(model_list = model_list, dat = travel, vlines_list = list("cohort" = c(1900,1920,1939, 1946,1966,1982, 1994))) ## ----------------------------------------------------------------------------- plot_partialAPCeffects(model = model_pure, dat = travel, variable = "period") ## ----------------------------------------------------------------------------- create_APCsummary(model_list = model_list, dat = travel) ## ----------------------------------------------------------------------------- APCtools::plot_linearEffects(model_cov) APCtools::plot_1Dsmooth(model_cov, select = 2) ## ----------------------------------------------------------------------------- summary_list <- create_modelSummary(model_list) summary_list[[1]] summary_list[[2]]
/scratch/gouwar.j/cran-all/cranData/APCtools/inst/doc/main_functionality.R
--- title: "APC Analysis with APCtools" author: "Alexander Bauer, Maximilian Weigert" date: "`r format(Sys.time(), '%d.%B %Y')`" output: html_document: toc: yes vignette: > %\VignetteIndexEntry{APC Analysis with APCtools} %\usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} rmarkdown::html_vignette: default editor_options: chunk_output_type: console --- ```{r, echo=FALSE} # global settings knitr::opts_chunk$set(fig.width = 10) ``` This document gives an overview of the functionality provided by the R package `APCtools`. Age-Period-Cohort (APC) analysis is used to disentangle observed trends (e.g. of social, economic, medical or epidemiological data) to enable conclusions about the developments over three temporal dimensions: * Age, representing the developments associated with chronological age over someones life cycle. * Period, representing the developments over calendar time which affect all age groups simultaneously. * Cohort, representing the developments observed over different birth cohorts and generations. The critical challenge in APC analysis is that these main components are linearly dependent: $$ cohort = period - age $$ Accordingly, flexible methods and visualization techniques are needed to properly disentagle observed temporal association structures. The `APCtools` package comprises different methods that tackle this problem and aims to cover all steps of an APC analysis. This includes state-of-the-art descriptive visualizations as well as visualization and summary functions based on the estimation of a generalized additive regression model (GAM). The main functionalities of the package are highlighted in the following. For details on the statistical methodology see [Weigert et al. (2021)](https://doi.org/10.1177/1354816620987198) or our corresponding [research poster](https://www.researchgate.net/publication/353852226_Visualization_techniques_for_semiparametric_APC_analysis_Using_Generalized_Additive_Models_to_examine_touristic_travel_distances). The *hexamaps* (hexagonally binned heatmaps) are outlined in [Jalal & Burke (2020)](https://doi.org/10.1097/EDE.0000000000001236). ## Load relevant packages Before we start, let's load the relevant packages for the following analyses. ```{r packages, message = FALSE} library(APCtools) library(dplyr) # general data handling library(mgcv) # estimation of generalized additive regression models (GAMs) library(ggplot2) # data visualization library(ggpubr) # arranging multiple ggplots in a grid with ggarrange() # set the global theme of all plots theme_set(theme_minimal()) ``` ## Example data APC analyses require long-term panel or repeated cross-sectional data. The package includes two exemplary datasets on the travel behavior of German tourists (dataset `travel`) and the number of unintentional drug overdose deaths in the United States (`drug_deaths`). See the respective help pages `?travel` and `?drug_deaths` for details. In the following, we will use the `travel` dataset to investigate if travel distances of the main trip of German travelers mainly change over the life cycle of a person (age effect), macro-level developments like decreasing air travel prices in the last decades (period effect) or the generational membership of a person, which is shaped by similar socialization and historical experiences (cohort effect). ```{r data preparation} data(travel) ``` ## Descriptive visualizations Different functions are available for descriptively visualizing observed structures. This includes plots for the marginal distribution of some variable of interest, 1D plots for the development of some variable over age, period or cohort, as well as density matrices that visualize the development over all temporal dimensions. ### Marginal distribution of one variable The marginal distribution of a variable can be visualized using `plot_density`. Metric variables can be plotted using a density plot or a boxplot, while categorical variables can be plotted using a bar chart. ```{r, message=FALSE, warning=FALSE, fig.height=2.2} gg1 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE) gg2 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE, plot_type = "boxplot") gg3 <- plot_density(dat = travel, y_var = "household_size") ggpubr::ggarrange(gg1, gg2, gg3, nrow = 1) ``` ### 1D: One variable against age, period or cohort Plotting the distribution of a variable against age, period or cohort is possible with function `plot_variable`. The distribution of metric and categorical variables is visualized using boxplots or line charts (see argument `plot_type`) and bar charts, respectively. The latter by default show relative frequencies, but can be changed to show absolute numbers by specifying argument `geomBar_position = "stack"`. ```{r, message = FALSE, fig.height=4} plot_variable(dat = travel, y_var = "mainTrip_distance", apc_dimension = "period", plot_type = "line", ylim = c(0,1000)) plot_variable(dat = travel, y_var = "household_size", apc_dimension = "period") ``` ### 2D: Density matrices To include all temporal dimensions in one plot, `APCtools` contains function `plot_densityMatrix`. In Weigert et al. (2021), this plot type was referred to as *ridgeline matrix* when plotting multiple density plots for a metric variable. The basic principle of a density matrix is to (i) visualize two of the temporal dimensions on the x- and y-axis (specified using the argument `dimensions`), s.t. the third temporal dimension is represented on the diagonals of the matrix, and (ii) to categorize the respective variables on the x- and y-axis in meaningful groups. The function then creates a grid, where each cell contains the distribution of the selected `y_var` variable in the respective category. By default, age and period are depicted on the x- and y-axis, respectively, and cohort on the diagonals. The categorization is defined by specifying two of the arguments `age_groups`, `period_groups` and `cohort_groups`. ```{r, fig.width=8, message=FALSE} age_groups <- list(c(80,89),c(70,79),c(60,69),c(50,59), c(40,49),c(30,39),c(20,29)) period_groups <- list(c(1970,1979),c(1980,1989),c(1990,1999), c(2000,2009),c(2010,2019)) plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE) ``` To highlight the effect of the variable depicted on the diagonal (here: cohort), different diagonals can be highlighted using argument `highlight_diagonals`. ```{r, fig.height=6.5, message=FALSE} plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), log_scale = TRUE) ``` For metric variables it is further possible to use a color scale for the density function (arguments `y_var_cat_breaks` and `y_var_cat_labels`) to highlight local developments over the APC dimensions. ```{r, fig.height=6.5, message=FALSE} dist_cat_breaks <- c(1,500,1000,2000,6000,100000) dist_cat_labels <- c("< 500 km","500 - 1,000 km", "1,000 - 2,000 km", "2,000 - 6,000 km", "> 6,000 km") plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE, y_var_cat_breaks = dist_cat_breaks, y_var_cat_labels = dist_cat_labels, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), legend_title = "Distance category") ``` Alternatively to plotting a conditional density in each matrix cell, metric variables can also be visualized using boxplots by specifying `plot_type = "boxplot"`. Categorical variables are automatically plotted using conditional bar plots. ```{r, fig.height=6.5, message=FALSE} plot_densityMatrix(dat = travel, y_var = "household_size", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10)) ``` ### 2D: Heatmaps Similar to density matrices, heatmaps simultaneously visualize all three APC dimensions. Instead of individual distributions in separated categories, however, a color scale is used to depict the mean value in a specific region. When setting `bin_heatmap = FALSE`, the function `plot_APCheatmap` shows the average observed values of a metric variable for each observed combination of age and period values (if `dimensions = c("period","age")`). The borders of groups along the diagonal can be marked using argument `markLines_list`. ```{r, fig.height=6.5, fig.width=8} plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, bin_heatmap = FALSE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ``` By default, the heatmap is binned in five year blocks along the x- and y-axis to focus on larger-scale developments. Note that the grid used for binning can flexibly be defined with the argument `bin_heatmapGrid_list`. ```{r, fig.height=6.5, fig.width=8} plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ``` ### 2D: Hexamaps As an alternative to classical heatmaps observed developments can also be visualized using *hexamaps*, i.e. hexagonally binned heatmaps with rotated axes to give all three APC dimensions similar visual weight. This resolves the central problem of classical heatmaps that developments along the diagonal dimension are visually much harder to grasp than developments along the x- or y-axis. Function `plot_APChexamap` can be called similarly to `plot_APCheatmap`. Note, however, that hexamaps are currently implemented using the base R plot functions and not based on `ggplot2`. ```{r, fig.height=6.5} plot_APChexamap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE) ``` ## Model-based analyses After getting a descriptive overview of the data with the functions outlined above, an established approach to analyze APC structures in more detail is to use the semiparametric approach offered by generalized additive regression models (GAMs). This regression-based approach offers several benefits: * Compared to alternative regression-based APC approaches it circumvents the issue of linear dependency of the APC dimensions by estimating a flexible two-dimensional tensor product surface. * The model separates the underlying smooth effects of age, period and cohort from random variation in the data and allows for the subsequent visualization of marginal age, period and cohort effects. * While the above heatmaps and hexamaps can only be used to visualize the observed distribution of *metric* variables, the regression-based approach also allows for estimating and plotting the mean structure in settings with a categorical response variable, for example by estimating a logistic regression model. * The regression-based approach allows for accounting for further control variables in the model. E.g., when analyzing how travel distances developed over the last decades, each persons income can be included as a covariate in the model estimation to account for income differences between travelers in different decades. For estimating a regression model we utilize the function `gam` implemented in package `mgcv`. For further methodological details see Weigert et al. (2021). Let's fit two GAM models, one without and one with further control variables. See `?travel` for an explanation of the used variables. The tensor product surface is estimated with two marginal P-spline bases with eight basis functions each. ```{r} # GAM without covariates model_pure <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)), data = travel) # GAM including covariates model_cov <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)) + residence_region + household_size + s(household_income), data = travel) # create a named list of the two models, useful for some functions model_list <- list("pure model" = model_pure, "covariate model" = model_cov) ``` ### Heatmaps Similary to the descriptive visualization outlined above, the tensor product surface estimated with the regression model can be created with `plot_APCheatmap`, by passing the model object as argument `model` to the function. ```{r, fig.height=3, fig.width=8} plot_APCheatmap(dat = travel, model = model_pure) ``` ### Hexamaps The same applies to the hexamaps. By calling `plot_APChexamap` and specifying the `model` argument, a hexamap of the estimated mean structure is created. ```{r, fig.height=6.5} plot_APChexamap(dat = travel, model = model_pure) ``` ### Marginal APC effects Based on the tensor product surface estimated through the regression model, marginal effects can be extracted by taking the average of all values on the surface along one dimension. Marginal effects can be visualized using `plot_marginalAPCeffects`. ```{r} plot_marginalAPCeffects(model = model_pure, dat = travel, variable = "age") ``` The marginal effects of multiple models can also be visualized in one plot using `plot_jointMarginalAPCeffects`. ```{r} plot_jointMarginalAPCeffects(model_list = model_list, dat = travel, vlines_list = list("cohort" = c(1900,1920,1939, 1946,1966,1982, 1994))) ``` ### Partial APC plots partial APC plots can be used to get deeper insights into a specific temporal effect and the interrelations between the temporal dimensions. These plots show the estimated effect for one temporal dimension dependent on the remaining two dimensions. The mean marginal effect is marked as a bold blue line, and one grayscale line is added for each partial effect, that is, for the estimated differences when just focusing on a specific age, period or cohort group. In the following example, the period effect is visualized, dependent on different age groups and cohorts. See [Weigert et al. (2021)](https://doi.org/10.1177/1354816620987198) for details on the interpretation of partial APC plots. ```{r} plot_partialAPCeffects(model = model_pure, dat = travel, variable = "period") ``` ### APC summary table The function `create_APCsummary` can be used to get a compact overview of the magnitude of the individual APC effects. It extracts information about the minimum and maximum values of the marginal APC effects and computes the overall size of each effect. ```{r} create_APCsummary(model_list = model_list, dat = travel) ``` ### Covariate effect plots Control variables in the regression models can be visualized using the functions `plot_linearEffects` and `plot_1Dsmooth` which create an effect plot of all linear effects in a model and a line plot of a one-dimensional smooth effect, respectively. ```{r} APCtools::plot_linearEffects(model_cov) APCtools::plot_1Dsmooth(model_cov, select = 2) ``` ### Model summary tables The function `create_modelSummary` can be used to create publication-ready tables of the full model results. It can either be applied to a list of multiple models or a list of a single model, and returns a list with both a table for all linear coefficients and all nonlinear estimates. ```{r} summary_list <- create_modelSummary(model_list) summary_list[[1]] summary_list[[2]] ```
/scratch/gouwar.j/cran-all/cranData/APCtools/inst/doc/main_functionality.Rmd
library(dplyr) library(ggplot2) # helper function to get points on an ellipse ----------------------------- # Note: angles must be specified in units of pi. get_ellipsePointsDat <- function(center_x, center_y, radius_x, radius_y, angle_min, angle_max) { ellipsis_dat <- data.frame( x = center_x + radius_x * cos(seq(angle_min, angle_max, length.out = 500)), y = center_y + radius_y * sin(seq(angle_min, angle_max, length.out = 500)) ) return(ellipsis_dat) } # define data.frame objects for drawing all lines ------------------------- mainCoordinates_dat <- data.frame( x = c(0,2.15, 0,0, 0,1.1), y = c(0,0, 0,1.15, 0,1.1), group = c(1,1, 2,2, 3,3) ) diagonals_dat <- data.frame( x = c(0,.6, 0,.2, .4,1.4, .8,1.8, 1.2,2, 1.6,2), y = c(.4,1, .8,1, 0,1, 0,1, 0,.8, 0,.4), group = c(4,4, 5,5, 6,6, 7,7, 8,8, 9,9) ) horizontals_dat <- data.frame( x = rep(c(0,2), times = 2), y = rep(c(.4,.8), each = 2), group = rep(1:2, each = 2) ) verticals_dat <- data.frame( x = rep(c(.4,.8,1.2,1.6,2), each = 2), y = rep(c(0,1), times = 5), group = rep(1:5, each = 2) ) A_dat <- data.frame( x = c(0,.4, 0,.15, .15,.3, .3,.49, .5,.4, .5,.6, .49,.51, .51,.6), y = c(.4,.8, .4,.4, .4,.55, .55,.55, .8,.8, .8,.6, .55,.51, .51,.6), group = c(1,1, 8,8, 7,7, 6,6, 2,2, 3,3, 5,5, 4,4), type = 1 # "1" for defining the main area of the polygon ) %>% arrange(desc(group)) A_hole_dat <- data.frame( x = c(.35,.47, .35,.44, .47,.44), y = c(.6,.6, .6,.695, .6,.695), group = c(1,1, 2,2, 3,3), type = 2 # "2" for defining a hole in the polygon ) %>% arrange(y) P_dat <- data.frame( x = c(.8,.8, .95,.8, .8,1.05, .95,.95, 1.05,.95), y = c(.1,.7, .1,.1, .7,.7, .4,.1, .4,.4), group = c(1,1, 5,5, 2,2, 4,4, 3,3) ) %>% arrange(group) C_dat <- data.frame( x = c(1.9,1.9, 1.9,1.9, 1.8,1.9, 1.8,1.9, 1.9,1.85, 1.9,1.85) - .07, y = c(.4,.32, 0,.08, .4,.4, 0,0, .32,.32, .08,.08), group = c(2,2, 5,5, 1,1, 4,4, 3,3, 6,6) ) %>% arrange(group) P_outerCircle_dat <- get_ellipsePointsDat(center_x = 1.05, center_y = .55, radius_x = .25, radius_y = .15, angle_min = -.5*pi, angle_max = .5*pi) P_hole_dat <- get_ellipsePointsDat(center_x = 1.02, center_y = .55, radius_x = .18, radius_y = .09, angle_min = -.5*pi, angle_max = .5*pi) %>% filter(y <= x - 0.4) %>% dplyr::bind_rows(data.frame(x = .92, y = .52)) %>% dplyr::bind_rows(data.frame(x = .92, y = .46)) %>% dplyr::bind_rows(data.frame(x = .92, y = .46), .) C_outerCircle_dat <- get_ellipsePointsDat(center_x = 1.8-.07, center_y = .2, radius_x = .4, radius_y = .2, angle_min = .5*pi, angle_max = 1.5*pi) %>% filter(y <= x - 1.2) %>% arrange(y) C_innerCircle_dat <- get_ellipsePointsDat(center_x = 1.85-.07, center_y = .2, radius_x = .28, radius_y = .12, angle_min = .5*pi, angle_max = 1.5*pi) A_poly_dat <- A_dat %>% dplyr::bind_rows(A_hole_dat) P_poly_dat <- P_dat[P_dat$group <= 2,] %>% dplyr::bind_rows(P_outerCircle_dat) %>% dplyr::bind_rows(P_dat[P_dat$group > 2,]) %>% mutate(type = 1) # "1" for defining the main area of the polygon P_poly_dat <- P_hole_dat %>% mutate(type = 2) %>% # "2" for defining a hole in the polygon dplyr::bind_rows(P_poly_dat, .) C_poly_dat <- C_dat[C_dat$group <= 3,] %>% dplyr::bind_rows(C_innerCircle_dat) %>% dplyr::bind_rows(C_dat[C_dat$group > 3,]) %>% dplyr::bind_rows(C_outerCircle_dat) # main plot --------------------------------------------------------------- ggplot() + geom_line(data = diagonals_dat, aes(x, y, group = group), lineend = "round", lty = 2, size = 1, col = "#3E78B2") + geom_line(data = horizontals_dat, aes(x, y, group = group), lineend = "round", lty = 2, size = 1, col = "#3E78B2") + geom_line(data = verticals_dat, aes(x, y, group = group), lineend = "round", lty = 2, size = 1, col = "#3E78B2") + geom_line(data = A_dat, aes(x, y, group = group), size = 2, lineend = "round") + geom_line(data = A_hole_dat, aes(x, y, group = group), size = 2, lineend = "round") + geom_polygon(data = A_poly_dat, aes(x, y, subgroup = type), fill = gray(0.3)) + geom_line(data = mainCoordinates_dat, aes(x, y, group = group), lineend = "round", size = 4, col = "#3E78B2", arrow = arrow(angle = 15, ends = "last", type = "closed")) + geom_line(data = P_dat, aes(x, y, group = group), size = 2, lineend = "round") + geom_line(data = C_dat, aes(x, y, group = group), size = 2, lineend = "round") + geom_path(data = P_outerCircle_dat, aes(x, y), size = 2, lineend = "round") + geom_path(data = P_innerCircle_dat, aes(x, y), size = 2, lineend = "round") + geom_path(data = C_outerCircle_dat, aes(x, y), size = 2, lineend = "round") + geom_path(data = C_innerCircle_dat, aes(x, y), size = 2, lineend = "round") + geom_polygon(data = P_poly_dat, aes(x, y, subgroup = type), fill = gray(0.3)) + geom_polygon(data = C_poly_dat, aes(x, y), fill = gray(0.3)) + theme(axis.title = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), panel.background = element_blank(), legend.position = "none", plot.background = element_rect(fill = "transparent", color = NA), panel.grid = element_blank()) # ggsave("main_plot.pdf", width = 7, height = 5)
/scratch/gouwar.j/cran-all/cranData/APCtools/man/figures/hex-sticker/individual_images/main_plot.R
--- title: "APC Analysis with APCtools" author: "Alexander Bauer, Maximilian Weigert" date: "`r format(Sys.time(), '%d.%B %Y')`" output: html_document: toc: yes vignette: > %\VignetteIndexEntry{APC Analysis with APCtools} %\usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} rmarkdown::html_vignette: default editor_options: chunk_output_type: console --- ```{r, echo=FALSE} # global settings knitr::opts_chunk$set(fig.width = 10) ``` This document gives an overview of the functionality provided by the R package `APCtools`. Age-Period-Cohort (APC) analysis is used to disentangle observed trends (e.g. of social, economic, medical or epidemiological data) to enable conclusions about the developments over three temporal dimensions: * Age, representing the developments associated with chronological age over someones life cycle. * Period, representing the developments over calendar time which affect all age groups simultaneously. * Cohort, representing the developments observed over different birth cohorts and generations. The critical challenge in APC analysis is that these main components are linearly dependent: $$ cohort = period - age $$ Accordingly, flexible methods and visualization techniques are needed to properly disentagle observed temporal association structures. The `APCtools` package comprises different methods that tackle this problem and aims to cover all steps of an APC analysis. This includes state-of-the-art descriptive visualizations as well as visualization and summary functions based on the estimation of a generalized additive regression model (GAM). The main functionalities of the package are highlighted in the following. For details on the statistical methodology see [Weigert et al. (2021)](https://doi.org/10.1177/1354816620987198) or our corresponding [research poster](https://www.researchgate.net/publication/353852226_Visualization_techniques_for_semiparametric_APC_analysis_Using_Generalized_Additive_Models_to_examine_touristic_travel_distances). The *hexamaps* (hexagonally binned heatmaps) are outlined in [Jalal & Burke (2020)](https://doi.org/10.1097/EDE.0000000000001236). ## Load relevant packages Before we start, let's load the relevant packages for the following analyses. ```{r packages, message = FALSE} library(APCtools) library(dplyr) # general data handling library(mgcv) # estimation of generalized additive regression models (GAMs) library(ggplot2) # data visualization library(ggpubr) # arranging multiple ggplots in a grid with ggarrange() # set the global theme of all plots theme_set(theme_minimal()) ``` ## Example data APC analyses require long-term panel or repeated cross-sectional data. The package includes two exemplary datasets on the travel behavior of German tourists (dataset `travel`) and the number of unintentional drug overdose deaths in the United States (`drug_deaths`). See the respective help pages `?travel` and `?drug_deaths` for details. In the following, we will use the `travel` dataset to investigate if travel distances of the main trip of German travelers mainly change over the life cycle of a person (age effect), macro-level developments like decreasing air travel prices in the last decades (period effect) or the generational membership of a person, which is shaped by similar socialization and historical experiences (cohort effect). ```{r data preparation} data(travel) ``` ## Descriptive visualizations Different functions are available for descriptively visualizing observed structures. This includes plots for the marginal distribution of some variable of interest, 1D plots for the development of some variable over age, period or cohort, as well as density matrices that visualize the development over all temporal dimensions. ### Marginal distribution of one variable The marginal distribution of a variable can be visualized using `plot_density`. Metric variables can be plotted using a density plot or a boxplot, while categorical variables can be plotted using a bar chart. ```{r, message=FALSE, warning=FALSE, fig.height=2.2} gg1 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE) gg2 <- plot_density(dat = travel, y_var = "mainTrip_distance", log_scale = TRUE, plot_type = "boxplot") gg3 <- plot_density(dat = travel, y_var = "household_size") ggpubr::ggarrange(gg1, gg2, gg3, nrow = 1) ``` ### 1D: One variable against age, period or cohort Plotting the distribution of a variable against age, period or cohort is possible with function `plot_variable`. The distribution of metric and categorical variables is visualized using boxplots or line charts (see argument `plot_type`) and bar charts, respectively. The latter by default show relative frequencies, but can be changed to show absolute numbers by specifying argument `geomBar_position = "stack"`. ```{r, message = FALSE, fig.height=4} plot_variable(dat = travel, y_var = "mainTrip_distance", apc_dimension = "period", plot_type = "line", ylim = c(0,1000)) plot_variable(dat = travel, y_var = "household_size", apc_dimension = "period") ``` ### 2D: Density matrices To include all temporal dimensions in one plot, `APCtools` contains function `plot_densityMatrix`. In Weigert et al. (2021), this plot type was referred to as *ridgeline matrix* when plotting multiple density plots for a metric variable. The basic principle of a density matrix is to (i) visualize two of the temporal dimensions on the x- and y-axis (specified using the argument `dimensions`), s.t. the third temporal dimension is represented on the diagonals of the matrix, and (ii) to categorize the respective variables on the x- and y-axis in meaningful groups. The function then creates a grid, where each cell contains the distribution of the selected `y_var` variable in the respective category. By default, age and period are depicted on the x- and y-axis, respectively, and cohort on the diagonals. The categorization is defined by specifying two of the arguments `age_groups`, `period_groups` and `cohort_groups`. ```{r, fig.width=8, message=FALSE} age_groups <- list(c(80,89),c(70,79),c(60,69),c(50,59), c(40,49),c(30,39),c(20,29)) period_groups <- list(c(1970,1979),c(1980,1989),c(1990,1999), c(2000,2009),c(2010,2019)) plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE) ``` To highlight the effect of the variable depicted on the diagonal (here: cohort), different diagonals can be highlighted using argument `highlight_diagonals`. ```{r, fig.height=6.5, message=FALSE} plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), log_scale = TRUE) ``` For metric variables it is further possible to use a color scale for the density function (arguments `y_var_cat_breaks` and `y_var_cat_labels`) to highlight local developments over the APC dimensions. ```{r, fig.height=6.5, message=FALSE} dist_cat_breaks <- c(1,500,1000,2000,6000,100000) dist_cat_labels <- c("< 500 km","500 - 1,000 km", "1,000 - 2,000 km", "2,000 - 6,000 km", "> 6,000 km") plot_densityMatrix(dat = travel, y_var = "mainTrip_distance", age_groups = age_groups, period_groups = period_groups, log_scale = TRUE, y_var_cat_breaks = dist_cat_breaks, y_var_cat_labels = dist_cat_labels, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10), legend_title = "Distance category") ``` Alternatively to plotting a conditional density in each matrix cell, metric variables can also be visualized using boxplots by specifying `plot_type = "boxplot"`. Categorical variables are automatically plotted using conditional bar plots. ```{r, fig.height=6.5, message=FALSE} plot_densityMatrix(dat = travel, y_var = "household_size", age_groups = age_groups, period_groups = period_groups, highlight_diagonals = list("born 1950 - 1959" = 8, "born 1970 - 1979" = 10)) ``` ### 2D: Heatmaps Similar to density matrices, heatmaps simultaneously visualize all three APC dimensions. Instead of individual distributions in separated categories, however, a color scale is used to depict the mean value in a specific region. When setting `bin_heatmap = FALSE`, the function `plot_APCheatmap` shows the average observed values of a metric variable for each observed combination of age and period values (if `dimensions = c("period","age")`). The borders of groups along the diagonal can be marked using argument `markLines_list`. ```{r, fig.height=6.5, fig.width=8} plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, bin_heatmap = FALSE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ``` By default, the heatmap is binned in five year blocks along the x- and y-axis to focus on larger-scale developments. Note that the grid used for binning can flexibly be defined with the argument `bin_heatmapGrid_list`. ```{r, fig.height=6.5, fig.width=8} plot_APCheatmap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE, markLines_list = list(cohort = c(1900,1920,1939,1946, 1966,1982,1994))) ``` ### 2D: Hexamaps As an alternative to classical heatmaps observed developments can also be visualized using *hexamaps*, i.e. hexagonally binned heatmaps with rotated axes to give all three APC dimensions similar visual weight. This resolves the central problem of classical heatmaps that developments along the diagonal dimension are visually much harder to grasp than developments along the x- or y-axis. Function `plot_APChexamap` can be called similarly to `plot_APCheatmap`. Note, however, that hexamaps are currently implemented using the base R plot functions and not based on `ggplot2`. ```{r, fig.height=6.5} plot_APChexamap(dat = travel, y_var = "mainTrip_distance", y_var_logScale = TRUE) ``` ## Model-based analyses After getting a descriptive overview of the data with the functions outlined above, an established approach to analyze APC structures in more detail is to use the semiparametric approach offered by generalized additive regression models (GAMs). This regression-based approach offers several benefits: * Compared to alternative regression-based APC approaches it circumvents the issue of linear dependency of the APC dimensions by estimating a flexible two-dimensional tensor product surface. * The model separates the underlying smooth effects of age, period and cohort from random variation in the data and allows for the subsequent visualization of marginal age, period and cohort effects. * While the above heatmaps and hexamaps can only be used to visualize the observed distribution of *metric* variables, the regression-based approach also allows for estimating and plotting the mean structure in settings with a categorical response variable, for example by estimating a logistic regression model. * The regression-based approach allows for accounting for further control variables in the model. E.g., when analyzing how travel distances developed over the last decades, each persons income can be included as a covariate in the model estimation to account for income differences between travelers in different decades. For estimating a regression model we utilize the function `gam` implemented in package `mgcv`. For further methodological details see Weigert et al. (2021). Let's fit two GAM models, one without and one with further control variables. See `?travel` for an explanation of the used variables. The tensor product surface is estimated with two marginal P-spline bases with eight basis functions each. ```{r} # GAM without covariates model_pure <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)), data = travel) # GAM including covariates model_cov <- gam(mainTrip_distance ~ te(age, period, bs = "ps", k = c(8,8)) + residence_region + household_size + s(household_income), data = travel) # create a named list of the two models, useful for some functions model_list <- list("pure model" = model_pure, "covariate model" = model_cov) ``` ### Heatmaps Similary to the descriptive visualization outlined above, the tensor product surface estimated with the regression model can be created with `plot_APCheatmap`, by passing the model object as argument `model` to the function. ```{r, fig.height=3, fig.width=8} plot_APCheatmap(dat = travel, model = model_pure) ``` ### Hexamaps The same applies to the hexamaps. By calling `plot_APChexamap` and specifying the `model` argument, a hexamap of the estimated mean structure is created. ```{r, fig.height=6.5} plot_APChexamap(dat = travel, model = model_pure) ``` ### Marginal APC effects Based on the tensor product surface estimated through the regression model, marginal effects can be extracted by taking the average of all values on the surface along one dimension. Marginal effects can be visualized using `plot_marginalAPCeffects`. ```{r} plot_marginalAPCeffects(model = model_pure, dat = travel, variable = "age") ``` The marginal effects of multiple models can also be visualized in one plot using `plot_jointMarginalAPCeffects`. ```{r} plot_jointMarginalAPCeffects(model_list = model_list, dat = travel, vlines_list = list("cohort" = c(1900,1920,1939, 1946,1966,1982, 1994))) ``` ### Partial APC plots partial APC plots can be used to get deeper insights into a specific temporal effect and the interrelations between the temporal dimensions. These plots show the estimated effect for one temporal dimension dependent on the remaining two dimensions. The mean marginal effect is marked as a bold blue line, and one grayscale line is added for each partial effect, that is, for the estimated differences when just focusing on a specific age, period or cohort group. In the following example, the period effect is visualized, dependent on different age groups and cohorts. See [Weigert et al. (2021)](https://doi.org/10.1177/1354816620987198) for details on the interpretation of partial APC plots. ```{r} plot_partialAPCeffects(model = model_pure, dat = travel, variable = "period") ``` ### APC summary table The function `create_APCsummary` can be used to get a compact overview of the magnitude of the individual APC effects. It extracts information about the minimum and maximum values of the marginal APC effects and computes the overall size of each effect. ```{r} create_APCsummary(model_list = model_list, dat = travel) ``` ### Covariate effect plots Control variables in the regression models can be visualized using the functions `plot_linearEffects` and `plot_1Dsmooth` which create an effect plot of all linear effects in a model and a line plot of a one-dimensional smooth effect, respectively. ```{r} APCtools::plot_linearEffects(model_cov) APCtools::plot_1Dsmooth(model_cov, select = 2) ``` ### Model summary tables The function `create_modelSummary` can be used to create publication-ready tables of the full model results. It can either be applied to a list of multiple models or a list of a single model, and returns a list with both a table for all linear coefficients and all nonlinear estimates. ```{r} summary_list <- create_modelSummary(model_list) summary_list[[1]] summary_list[[2]] ```
/scratch/gouwar.j/cran-all/cranData/APCtools/vignettes/main_functionality.Rmd
# #' Implementation of APF and FDR robust estimation #' #' \code{apf_fdr} returns robust estimates of the Average Power Function (APF) #' and Bayes False Discovery Rate (FDR) for each value of the threshold Gamma #' on the p-value and Tau on the correlation coefficient. #' #' @param data Either a vector, matrix or dataframe. #' @param type Set \code{"datf"} if \code{data} is a matrix or dataframe containing #' the raw data (columns); \code{"pvl"} for a vector of p-values. #' @param corr The type of correlation to use when \code{type = "datf"}. It can be #' set to either \code{"spearman"} or \code{"pearson"}. #' @param lobs When \code{type = "pvl"}, it indicates the number of datapoints used to #' compute the correlations. #' @param seed A seed (natural number) for the resampling. #' @param gamm The threshold gamma on the p-values to explore (typically \eqn{\le} 0.05 or #' 0.1). A min, max and step length value need to be set. #' #' @return A list with four elements. A vector \code{APF_gamma} containing the robust #' estimates of APF (5th quantiles) for all the gamma values set in \code{gamm}. A vector #' \code{FDR_gamma} with the robust estimates of Bayes FDR (95th quantiles). A #' vector \code{tau_gamma} with the correlation coefficients \emph{tau} for each gamma #' value explored and another vector with the relative values gamma (\code{gammaval}). #' #' @examples #' \donttest{data("Ex1") #' APF_lst <- apf_fdr(Ex1,"pvl",lobs=100)} #' # The example uses the dataset Ex1 (in the APFr package) which is #' # a vector of 100 p-values. The number of datapoints used to #' # compute each p-value in this example is set to 100. As a result, #' # a list of 4 objects is returned. #' \dontshow{ #' prova <- runif(3) #' apf_fdr(prova,"pvl",lobs=15,gamm=c(0.01,0.05,0.02))} #' #' @references Quatto, P, Margaritella, N, et al. Brain networks #' construction using Bayes FDR and average power function. \emph{Stat Methods Med Res}. #' Published online May 14th, 2019; DOI:10.1177/0962280219844288. #' #' @export # apf_fdr <- function(data, type="datf", corr="spearman", lobs=0, seed=111, gamm=c(0.0001, 0.1, 0.002)) { # #--ERRORS: if (type == "datf") { if (is.data.frame(data) == FALSE && is.matrix(data) == FALSE) { stop("data are not in matrix nor data frame form") } if (dim(data)[1] < 3) { stop("not enough observations to compute tests") } } if (type == "pvl" && is.vector(data) == FALSE) { stop("data are not in vector form") } if (gamm[1] < 0 || gamm[2] > 1 || gamm[1] > gamm[2] || gamm[3] < 0 || gamm[3] > 1) { stop("gamm values are not in [0,1] or gamm[1] > gamm[2]") } if (seed < 1 || all.equal(seed, as.integer(seed)) != TRUE){ stop("seed should be a natural positive number") } #------- if (type == "datf") { # Perform multiple correlation tests lobs <- dim(data)[1] corre <- stats::cor(as.matrix(data), method = corr, use = "all.obs") vect_cor <- corre[upper.tri(corre)] vect_t <- vect_cor * sqrt( (lobs - 2) / (1 - (vect_cor ^ 2))) samplep <- (2 - 2 * stats::pt(abs(vect_t), lobs - 2)) # number of tests nt <- (ncol(data)) * (ncol(data) - 1) / 2 # rm(vect_cor, vect_t) } else { samplep <- data nt <- length(samplep) } if (lobs == 0) { stop("no. datapoints (lobs) must be specified when type=pvl") } #NEW if (length(which(samplep <= gamm[1])) == length(samplep)){ stop("all p-values <= gamm[1]") } # message("Creating Bayes FDR...") #1) Resample 1000 times + original data -> lst_p ------------------------------- # set.seed(seed) resamples <- lapply(1:1000, function(i) sample(samplep, replace = T)) hh <- length(resamples) + 1 vuoto <- rep(NA, length(samplep)) lst_p <- as.list(replicate(hh, vuoto, simplify = F))# lst_p[[1]] <- samplep for (k in 2:length(lst_p)) { lst_p[[k]] <- resamples[[k - 1]] } rm(k, hh, vuoto) #2) Count # p-values below LAMBDA (lambd) which varies between 0.01 and 0.98 #NEW if (length(which(samplep <= 0.0001)) == length(samplep)){ lambd <- seq(from = 0.0000001, to = 0.0001, by = 0.000001) } else { lambd <- seq(from = 0.0001, to = 0.9901, by = 0.01) } R_lambd <- rep(NA, length(lambd)) lst_R_lambd <- as.list(replicate(length(lst_p), R_lambd, simplify = F)) for (q in 1:length(lst_p)) { for (i in 1:length(lambd)) { lst_R_lambd[[q]][i] <- max(sum(as.numeric(lst_p[[q]] <= lambd[i]), na.rm = TRUE), 1) } #NEW if (length(which(lst_R_lambd[[q]] == length(samplep))) > 0 && min(which(lst_R_lambd[[q]] == length(samplep))) == 1){ stop("all p-values <= 1e-07") } } rm(R_lambd, i, q) #3) Avoid pi_0=0 by carrying over the last R_lambd<length(samplep) for (q in 1:length(lst_p)){ if (length(which(lst_R_lambd[[q]] == length(samplep))) > 0){ lst_R_lambd[[q]][which(lst_R_lambd[[q]] == length(samplep))] <- lst_R_lambd[[q]][min(which(lst_R_lambd[[q]] == length(samplep))) - 1] } } # #5) From lst_R_lambd to lst_W_lambd lst_W_lambd <- lapply(lst_R_lambd, function(x) nt - x) # #6) Estimate pi_0(lambd) lst_pg0_lambd <- lapply(lst_W_lambd, function(x) x / ( (1 - lambd) * nt)) # #7) R_gamm: no. p-values <= threshold gammval # (gammaval can be changed a posteriori to focus on a specific # subset of the gamma range) # gammaval <- seq(from = gamm[1], to = gamm[2], by = gamm[3]) R_gamm <- rep(NA, length(gammaval) ) lst_R_gamm <- as.list(replicate(length(lst_p), R_gamm, simplify = F)) for (q in 1:length(lst_p) ) { for (i in 1:length(gammaval) ) { lst_R_gamm[[q]][i] <- max(sum(as.numeric(lst_p[[q]] <= gammaval[i]), na.rm = TRUE), 1) } } rm(R_gamm, i, q) # # Estimate P(p-values<= gammaval). lst_Pr_P_le_gamm <- lapply(lst_R_gamm, function(x) x / nt) # # 8) FDR----------------------------------------------------------------------- # create a list with FDR matrices of size lambd X gammaval mtx <- matrix(0, nrow = length(lambd), ncol = length(gammaval)) lst_fdr <- as.list(replicate(length(lst_p), mtx, simplify = F)) for (q in 1:length(lst_p) ) { for (i in 1:length(lambd) ) { for (j in 1:length(gammaval) ) { lst_fdr[[q]][i, j] <- min( ( ( (lst_pg0_lambd[[q]][i]) * gammaval[j]) / lst_Pr_P_le_gamm[[q]][j]), 1) } } } rm(i, j, q, mtx) # 9) bootstrap plugin FDR------------------------------------------------------ # compare lst_fdr[[i]], i=2,...,1001 with lst_fdr[[1]] to obtain MSE # Find FDR that minimises MSE for every gammaval # # MSE matrix hh <- length(resamples) + 1 vect <- rep(0, length(resamples)) MSE <- matrix(0, length(lambd), length(gammaval)) for (i in 1:length(lambd)) { for (j in 1:length(gammaval)) { vect <- sapply(lst_fdr[2 : hh], "[[", i, j) MSE[i, j] <- (1 / length(resamples)) * sum( (vect - lst_fdr[[1]][i, j]) ^ 2) } } rm(hh, vect, i, j) # # fdr_g <- rep(0, length(lst_p)) lst_fdr_gamm1 <- as.list(replicate(length(fdr_g), rep(0, length(gammaval)), simplify = F)) lst_1 <- as.list(replicate(length(gammaval), matrix(0, nrow = length(lambd), ncol = 3), simplify = F)) lst_MSE_fdr <- as.list(replicate(length(lst_p), lst_1, simplify = F)) lst_MSE_fdr_ord <- rep(lst_MSE_fdr, 1) cont_lambd <- c(1 : length(lambd)) cont_lambd_mat <- matrix(cont_lambd, nrow = length(lambd), ncol = length(gammaval)) # for (j in 1 : length(lst_p)) { for (i in 1 : length(gammaval)) { lst_MSE_fdr[[j]][[i]] <- as.matrix(cbind(MSE[, i], lst_fdr[[j]][, i], cont_lambd_mat[, i])) } } for (j in 1 : length(lst_p)) { for (i in 1 : length(gammaval)) { lst_MSE_fdr_ord[[j]][[i]] <- lst_MSE_fdr[[j]][[i]][order(lst_MSE_fdr[[j]][[i]][, 1]), ] } } for (j in 1 : length(lst_p)) { lst_fdr_gamm1[[j]] <- sapply(lst_MSE_fdr_ord[[j]], "[[", 1, 2) } # lst_fdr_gamm2 <- as.list(replicate(length(gammaval), fdr_g, simplify = F)) # for (i in 1 : length(gammaval)) { lst_fdr_gamm2[[i]] <- sapply(lst_fdr_gamm1, "[[", i) } # # For every lst_fdr_gamm2[[i]] there are 1001 resamples of FDR. # Extract the 95th quantile. FDR_gamma <- rep(0, length(gammaval)) for (i in 1 : length(gammaval)) { FDR_gamma[i] <- stats::quantile(sort(lst_fdr_gamm2[[i]]), .95) } # rm(lst_MSE_fdr, lst_MSE_fdr_ord, lst_fdr_gamm1, lst_fdr_gamm2, lst_1, fdr_g) # #### FDR_gamma contains 95th FDR percentile for every gammaval### # # 10) Average Power Function (APF) -------------------------------------------- # message("Creating APF...") # APF numerator lst_APF1 <- as.list(replicate(length(lst_p), matrix(0, nrow = length(lambd), ncol = length(gammaval)), simplify = F)) for (q in 1 : length(lst_p)) { for (j in 1 : length(gammaval)) { for (i in 1 : length(lambd)) { lst_APF1[[q]][i, j] <- (lst_R_gamm[[q]][j] - (gammaval[j] * lst_pg0_lambd[[q]][i] * nt)) } } } # # APF denominator lst_APF2 <- lapply(lst_pg0_lambd, function(x) (1 - x) * nt) # lst_APF <- as.list(replicate(length(lst_p), matrix(0, nrow = length(lambd), ncol = length(gammaval)), simplify = F)) for (q in 1 : length(lst_p)) { for (i in 1 : length(lambd)) { for (j in 1: length(gammaval)) { lst_APF[[q]][i, j] <- ( (lst_APF1[[q]][i, j]) / (lst_APF2[[q]][i])) } } } for (q in 1 : length(lst_p)) { for (i in 1 : length(lambd)) { for (j in 1: length(gammaval)) { lst_APF[[q]][i, j] <- min(lst_APF[[q]][i, j], 1) } } } rm(lst_APF1, lst_APF2) # # 11) bootstrap plugin APF------------------------------------------------------ # # MSE matrix hh <- length(resamples) + 1 vect <- rep(0, length(resamples)) MSE2 <- matrix(0, length(lambd), length(gammaval)) for (i in 1:length(lambd)) { for (j in 1:length(gammaval)) { vect <- sapply(lst_APF[2 : hh], "[[", i, j) MSE2[i, j] <- (1 / length(resamples)) * sum( (vect - lst_APF[[1]][i, j]) ^ 2) } } rm(hh, vect, i, j) # # APF_g <- rep(0, length(lst_p)) lst_APF_gamm1 <- as.list(replicate(length(APF_g), rep(0, length(gammaval)), simplify = F)) lst_1 <- as.list(replicate(length(gammaval), matrix(0, nrow = length(lambd), ncol = 3), simplify = F)) lst_MSE2APF <- as.list(replicate(length(lst_p), lst_1, simplify = F)) lst_MSE2APF_ord <- rep(lst_MSE2APF, 1) # for (j in 1 : length(lst_p)) { for (i in 1 : length(gammaval)) { lst_MSE2APF[[j]][[i]] <- as.matrix(cbind(MSE[, i], lst_APF[[j]][, i], cont_lambd_mat[, i])) } } for (j in 1 : length(lst_p)) { for (i in 1 : length(gammaval)) { lst_MSE2APF_ord[[j]][[i]] <- lst_MSE2APF[[j]][[i]][order(lst_MSE2APF[[j]][[i]][, 1]), ] } } for (j in 1 : length(lst_p)) { lst_APF_gamm1[[j]] <- sapply(lst_MSE2APF_ord[[j]], "[[", 1, 2) } # lst_APF_gamm2 <- as.list(replicate(length(gammaval), APF_g, simplify = F)) # for (i in 1 : length(gammaval)) { lst_APF_gamm2[[i]] <- sapply(lst_APF_gamm1, "[[", i) } # # For each lst_APF_gamm2[[i]] there are 1001 resamples of APF # Extract the 5th percentile APF_gamma <- rep(0, length(gammaval)) for (i in 1 : length(gammaval)) { APF_gamma[i] <- stats::quantile(sort(lst_APF_gamm2[[i]]), .05) } rm(APF_g, lst_MSE2APF, lst_MSE2APF_ord, lst_APF_gamm1, lst_APF_gamm2, lst_1) # #### APF_gamma contains 5th APF percentile for every gammaval### # # 12) compute tau from gamma values. tau_gamma <- abs(stats::qt(gammaval / 2, lobs - 2) / sqrt(lobs - 2 + (stats::qt(gammaval / 2, lobs - 2)) ^ 2)) # # 13) create the final list APF_lst <- list(APF_gamma, FDR_gamma, tau_gamma, gammaval) names(APF_lst) <- c("APF_gamma", "FDR_gamma", "tau_gamma", "gammaval") message("Done!") return(APF_lst) }
/scratch/gouwar.j/cran-all/cranData/APFr/R/apf_fdr.R
# #' Generate smooth graphs for the APF and FDR estimates #' #' \code{apf_plot} returns a graph with Average Power Function (APF), #' Bayes False Discovery Rate (FDR) and APF vs. FDR. In addition, when #' \code{tab = TRUE}, a table containing APF, FDR, tau and gamma values #' for a selected subset of APF and FDR is printed. #' #' @param APF_lst The output from the \code{apf_fdr} function. #' @param tab If \code{TRUE}, a table with relevant values of APF, FDR, #' tau and gamma is printed. #' @param APF_inf Sets the minimum value of APF to appear in the table #' when \code{tab = TRUE}. #' @param FDR_sup Sets the maximum value of Bayes FDR to appear in the #' table when \code{tab = TRUE}. #' #' @return Smooth graphs for APF vs Gamma (left), FDR vs Gamma (centre) #' and APF vs FDR (right). Regions where FDR \eqn{\le} \code{FDR_sup} and #' APF \eqn{\ge} \code{APF_inf} (if presents) are highlighted in yellow #' and printed in a table (if \code{tab = TRUE}) together with the relative #' values of \emph{gamma} and \emph{tau}. #' #' @examples #' data("Ex2") #' apf_plot(Ex2) #' # Ex2 is an example of output obtained #' # from the apf_fdr() function. #' #' @references Quatto, P, Margaritella, N, et al. Brain networks #' construction using Bayes FDR and average power function. \emph{Stat Methods Med Res}. #' Published online May 14th, 2019; DOI:10.1177/0962280219844288. #' #' @export # apf_plot <- function(APF_lst, tab = TRUE, APF_inf = 0.5, FDR_sup = 0.05){ graphics::par(mfrow = c(1, 3)) graphics::plot(APF_lst$gammaval, stats::smooth.spline(APF_lst$APF_gamma, spar = 0.8)$y, type = "l", lwd = 2, main = "Average Power Function (APF)", ylab = "APF", xlab = "Gamma") graphics::plot(APF_lst$gammaval, stats::smooth.spline(APF_lst$FDR_gamma, spar = 0.8)$y, type = "l", lwd = 2, main = "Bayes FDR", ylab = "FDR", xlab = "Gamma") graphics::plot(stats::smooth.spline(APF_lst$FDR_gamma, spar = 0.8)$y, stats::smooth.spline(APF_lst$APF_gamma, spar = 0.8)$y, type = "n", lwd = 2, main = "APF vs. Bayes FDR", xlab = "Bayes FDR", ylab = "APF") graphics::rect(0, 0.5, 0.05, 1, col = "yellow", border = NA, density = 4) graphics::lines(stats::smooth.spline(APF_lst$FDR_gamma, spar = 0.8)$y, stats::smooth.spline(APF_lst$APF_gamma, spar = 0.8)$y, lwd = 2) graphics::abline(h = 0.5, lwd = 2, lty = 2, col = "blue") graphics::abline(v = c(0.05, 0.1), lwd = 2, lty = 2, col = c("red", "orange")) # if (tab == TRUE) { sel <- which(stats::smooth.spline(APF_lst$APF_gamma, spar = 0.8)$y >= APF_inf & stats::smooth.spline(APF_lst$FDR_gamma, spar = 0.8)$y <= FDR_sup) if ( length(sel) > 0) { gamma_sel <- APF_lst$gammaval[sel] tau_sel <- APF_lst$tau_gamma[sel] APF_sel <- stats::smooth.spline(APF_lst$APF_gamma, spar = 0.8)$y[sel] FDR_sel <- stats::smooth.spline(APF_lst$FDR_gamma, spar = 0.8)$y[sel] results_tab <- as.data.frame(round(cbind(gamma_sel, tau_sel, FDR_sel, APF_sel), 3)) colnames(results_tab) <- c("Gamma", "tau", "95th FDR", "5th APF") return(results_tab) } } }
/scratch/gouwar.j/cran-all/cranData/APFr/R/apf_plot.R
#' Example dam genotypes #' #' @format A matrix with 14 rows (one row = one dam) and 100 columns (one column = one marker) "APIS_dam"
/scratch/gouwar.j/cran-all/cranData/APIS/R/APIS_dam.R
#' Example offspring genotypes #' #' @format A matrix with 500 rows (one row = one offspring) and 100 columns (one column = one marker) "APIS_offspring"
/scratch/gouwar.j/cran-all/cranData/APIS/R/APIS_offspring.R
#' Example offspring 3n genotypes #' #' @format A matrix with 50 rows (one row = one offspring) and 100 columns (one column = one marker) "APIS_offspring3n"
/scratch/gouwar.j/cran-all/cranData/APIS/R/APIS_offspring3n.R
#' Example sire genotypes #' #' @format A matrix with 39 rows (one row = one sire) and 100 columns (one column = one marker) "APIS_sire"
/scratch/gouwar.j/cran-all/cranData/APIS/R/APIS_sire.R
#' Shiny App for interactive session of APIS #' #' Launch the shiny interface to use APIS interactively #' #' @import dplyr #' @import ggplot2 #' @import cowplot #' @import shinyBS #' @import shinythemes #' @import htmltools #' @rawNamespace import(shiny, except=c(dataTableOutput, renderDataTable)) #' @importFrom plotly plotlyOutput renderPlotly ggplotly #' @importFrom parallel detectCores #' @importFrom utils read.table #' @importFrom DT datatable dataTableOutput renderDataTable #' @importFrom gridExtra grid.table #' @importFrom grDevices dev.off pdf #' @importFrom graphics plot.new #' @importFrom utils head write.table read.table #' @importFrom rlang .data #' #' @return void : most results are automatically saved #' #' @export launch_APIShiny = function(){ addResourcePath("www",system.file("www",package = "APIS")) ui <- fluidPage( theme = shinytheme("spacelab"), # Application title fluidRow(column(12,titlePanel(title = div("Parentage assignment with APIS",style="font-family:Arial;font-weight:bold;margin-bottom:-1em", img(src="/www/sysaaf.png",height=70,width=70), img(src="/www/inrae.png",height=50,width=120), img(src="/www/Ifremer.png",height=70,width=120), img(src="/www/feamp2.png",height=70,width=110))))), fluidRow(column(12,titlePanel(title = div("By J.Roche",style="color:#000000;font-size:10px;height:20px;")))), fluidRow( column(6, h2("Choose your step",style="font-size:20px;font-weight:bold"), navlistPanel(widths = c(3,9),id = 'nav',selected = 0, tabPanel("Formatting",value=0, fileInput(inputId = "to_format", label = div("File of genotype to format for APIS (.ped from AXAS or .vcf)", bsButton(inputId = "qx1",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".txt",".ped",".vcf")), conditionalPanel(condition = "output.colFormat", uiOutput(outputId = 'uiNoColSN'), uiOutput(outputId = 'uiNoColGeno'), selectInput(inputId = "what", label = div("Which individuals are in the file ?", bsButton(inputId = "qx2",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c("Only parents","Only offspring","Both"),selected = "Only parents",multiple = FALSE)), conditionalPanel(condition = "input.what=='Both' | input.what=='Only offspring'", selectInput(inputId = "ploidy_format", label = div("Choose the ploidy level of the offspring", bsButton(inputId = "q52",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c(2,3),selected = 2)), conditionalPanel(condition = "input.what=='Both'", fileInput(inputId = "list_par", label = div("File with the name of the Parents", bsButton(inputId = "qx3",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".txt")), selectInput(inputId = 'header_list_par', label = div("Is there a header in the file with the parent names ?", bsButton(inputId = "q111",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c('Yes','No'),selected = 'No'), uiOutput(outputId = 'col_head0')), fileInput(inputId = "snp_map", label = div("File with the marker names (ex : .map from AXAS) (optional)", bsButton(inputId = "qx4",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".txt",".map")), conditionalPanel(condition = "output.colMap", selectInput(inputId = 'header_snp_map', label = div("Is there a header in the file with the marker names ?", bsButton(inputId = "q1111",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c('Yes','No'),selected = 'No'), uiOutput(outputId = 'uiNoColMap')), selectInput(inputId = "markerType", label = div("Choose the type of marker", bsButton(inputId = "q53",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c("SNP","microsat"),selected = "SNP"), uiOutput(outputId = "go_format") ), tabPanel("APIS",value = 1, conditionalPanel(condition = "output.APIS_launched == 0", # checkboxInput(inputId = "try_recom",value = FALSE, # label = div("Find the most suitable recombinaison rate", # bsButton(inputId = "q13",label = "",icon = icon("question"), style = "info", size = "extra-small"))), checkboxInput(inputId = "both_parents_in",value = TRUE, label = div("Male and female parents are in the same dataset", bsButton(inputId = "q131",label = "",icon = icon("question"), style = "info", size = "extra-small"))), selectInput(inputId = "ploidy", label = div("Choose the ploidy level of the offspring", bsButton(inputId = "q5",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c(2,3),selected = 2), fileInput(inputId = "data_off", label = div("Dataset of OFFSPRING", bsButton(inputId = "q1",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".Rdata",".txt")), conditionalPanel(condition = "input.both_parents_in==true", fileInput(inputId = "data_par", label = div("Dataset of PARENTS", bsButton(inputId = "q2",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".Rdata",".txt")), fileInput(inputId = "sexe_par", label = div("File (.txt) with the sex of each parents", bsButton(inputId = "q3",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = ".txt"), conditionalPanel(condition = "output.dta_sex_load", uiOutput(outputId = "uiChangeSN")), conditionalPanel(condition = "output.dta_sex_load", uiOutput(outputId = "uiChangeSe"))), conditionalPanel(condition = "input.both_parents_in!=true", fileInput(inputId = "data_par1", label = div("Dataset of male (sire)", bsButton(inputId = "q2.1",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".Rdata",".txt")), fileInput(inputId = "data_par2", label = div("Dataset of female (dam)", bsButton(inputId = "q2.2",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".Rdata",".txt"))), fileInput(inputId = "snp_in", label = div("File (.txt) with the markers to use (optional)", bsButton(inputId = "q4",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = ".txt"), uiOutput(outputId = "nb_snp_poss"), selectInput(inputId = "markerType2", label = div("Choose the type of marker", bsButton(inputId = "q54",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c("SNP","microsat"),selected = "SNP"), sliderInput(inputId = "core", label = div("Choose number of cores for parallelization", bsButton(inputId = "q7",label = "",icon = icon("question"), style = "info", size = "extra-small")), min = 1,max = detectCores() - 1,value = detectCores() - 2,step = 1), uiOutput(outputId = "go_apis")), conditionalPanel(condition = "output.APIS_launched == 1", selectInput(inputId = "method", label = div("Choose the method for assignment", bsButton(inputId = "q6",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c("exclusion","likelihood"),selected = "likelihood"), conditionalPanel(condition = "input.method=='exclusion'", radioButtons(inputId = "exclu_thres", label = div("Choose threshold method for exclusion", bsButton(inputId = "q61",label = "",icon = icon("question"), style = "info", size = "extra-small")), choices = c("Mismatch number","Error rate"), selected = "Error rate") ), conditionalPanel(condition = "input.method=='likelihood' || (input.method=='exclusion' && input.exclu_thres=='Error rate')", sliderInput(inputId = "acceptError",label = "Error rate allowed",min = 0,max = 0.25,value = 0.05,step = 0.005) ), conditionalPanel(condition = "input.method=='exclusion' && input.exclu_thres=='Mismatch number'", uiOutput(outputId = "acceptMismatch0") # sliderInput(inputId = "acceptMismatch",label = "Number of mismatch allowed",min = 0,max = 50,value = 5,step = 1) ), # sliderInput(inputId = "acceptError",label = "Error rate allowed",min = 0,max = 0.25,value = 0.05,step = 0.01), textInput(inputId = "save_name", label = div("Name of the file to save (click the '?' for help)", bsButton(inputId = "q8",label = "",icon = icon("question"), style = "info", size = "extra-small"))), actionButton(inputId = "SaveAPIS",label = "Save pedigree & log") ) ), tabPanel("Verification",value = 2, fileInput(inputId = "data_res", label = div("Dataset created after the assignment with APIS in the first part (.Rdata)", bsButton(inputId = "q9",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = ".Rdata"), fileInput(inputId = "tab_accou", label = div("The file with the mating plan", bsButton(inputId = "q10",label = "",icon = icon("question"), style = "info", size = "extra-small")), accept = c(".txt")), conditionalPanel(condition = "output.dta_fac_load", uiOutput(outputId = "uiChangeSN2")), conditionalPanel(condition = "output.dta_fac_load", uiOutput(outputId = "uiChangeFa")), textInput(inputId = "save_name2", label = div("Name of the file to save (optional)", bsButton(inputId = "q18",label = "",icon = icon("question"), style = "info", size = "extra-small"))), actionButton(inputId = "launch_verif",label = "Launch the verification"), conditionalPanel(condition = "input.launch_verif>0", actionButton(inputId = "SavePlot",label = "Save output")) # fileInput(inputId = "id_off", # label = div("A .txt file with ID and barecode of offspring for INFAQUA", # bsButton(inputId = "q11",label = "",icon = icon("question"), style = "info", size = "extra-small")), # accept = c(".txt")), # conditionalPanel(condition = "output.changeSN3", # uiOutput(outputId = "uiChangeSN3")), # conditionalPanel(condition = "output.changeID", # uiOutput(outputId = "uiChangeID")), # actionButton(inputId = "SaveInf",label = "Save output for INFAQUA") ) ), ), column(5, h5(div("Warnings : Make sure your app is launched on the desired directory. If not, quit the app and use setwd() !", style="color:red;font-weight:bold;margin-bottom:3em")), conditionalPanel(condition = "input.nav==0", h5(div("Warnings : this step is only required if you did not use the genotyping shiny application AND works only for diploid genotype. Else the format is already good OR you should use the genotyping application for your triploids.", style="color:red;font-weight:bold;margin-bottom:3em")), a("Input .ped format example",href="/www/Ped_Example.txt",target="_blank"), # for hyperlink a("Input .vcf format example",href="/www/Vcf_Example.txt",target="_blank"), # for hyperlink h6("You can find 'log_Formating.txt' in './log' directory for more info."), # a("log Formating",href="/www/log/log_Formating.txt",target="_blank"), # for hyperlink dataTableOutput(outputId = "head"), dataTableOutput(outputId = "headLpar"), dataTableOutput(outputId = "headMap"), textOutput(outputId = "end_format")), conditionalPanel(condition = "input.nav==1", a("Input .Rdata or .txt format example",href="/www/Data_Example.txt",target="_blank"), # for hyperlink h6("You can find 'log_APIS.txt' in './log' directory for more info."), # a("log APIS",href="/www/log/log_APIS.txt",target="_blank"), # for hyperlink dataTableOutput(outputId = "head_sex"), textOutput(outputId = "WarningMarker"), tags$head(tags$style("#WarningMarker{color: red;font-size: 20px;}")), conditionalPanel(condition = "output.APIS_launched == 1", h4("Plot result from APIS : Select accepted error or mismatch threshold"), plotOutput(outputId = "graph_res"))), conditionalPanel(condition = "input.nav==2", dataTableOutput(outputId = "head_fac"), navbarPage("Choose section", tabPanel(title = "Summary", textOutput(outputId = "txt1"), textOutput(outputId = "txt2"), textOutput(outputId = "txt3"), textOutput(outputId = "txt4"), dataTableOutput(outputId = "tab"), plotOutput(outputId = "plot")), tabPanel(title = "Parents fertility", textOutput(outputId = "sent1.1"), textOutput(outputId = "sent1.2"), textOutput(outputId = "sent2.1"), textOutput(outputId = "sent2.2"), plotlyOutput(outputId = "barplot")), tabPanel(title = "Mating plan", plotlyOutput(outputId = "heatmap")))) ), column(1) ), bsTooltip(id = "qx1",title = "File with in row, individuals and in column, genotype. Each allele have to be separated by a space if .txt<br/>Else, file created by AXAS with the extension .ped<br/>.vcf format also accepted.<br/>See examples for more details"), bsTooltip(id = "qx2",title = "Select whether there is only parents/offspring or if there is both in the .ped file<br/>If it is both, a file with parents names will be asked to separate parents from offspring"), bsTooltip(id = "qx3",title = "The .txt file with the parents names must be an unique column as follow (no header) :<br/>Indiv1<br/>Indiv2<br/>Indiv3<br/>..."), bsTooltip(id = "qx4",title = "File with MarkerName in the same order as the genotype in the input file<br/>Can be .txt file with all marker names as a single column (no header) or the file created by AXAS (for example) with the markers names with the extension .map<br/>It is used to have the names of the markers (no provided in .ped file)"), bsTooltip(id = "q1",title = "File created by the genotyping application (or during formating phase) with extension _genoAPIS.Rdata<br/>Can also be a .txt file with marker as columns and individuals as rows with genotype format as A/A/A or A/A.<br/>This is the offspring dataset"), bsTooltip(id = "q2",title = "File created by the genotyping application (or during formating phase) with extension _genoAPIS.Rdata<br/>Can also be a .txt file with marker as columns and individuals as rows with genotype format as A/A.<br/>This is the parents dataset"), bsTooltip(id = "q3",title = "A file containing at least two rows : SampleName (or CodeBarre) & Sex like so :<br/>SampleName Sex<br/>Name1 Sex1<br/>Name2 Sex2<br/>Name3 Sex3<br/>... ...<br/>The SampleName variable is the name of the sample as in the genotyping application.<br/>The Sex variable should take values from 1 to 3 : 1, male ; 2, female ; 3, neo-male ; 4, neo-female<br/>It is used to separate male from female"), bsTooltip(id = "q4",title = "A file containing one row with NO header like so :<br/>Marker1<br/>Marker2<br/>Marker3<br/>... ...<br/>Only those markers will be used ofr assignment at the condition that they are in the dataset."), bsTooltip(id = "q5",title = "Select 2 if offspring are diploids and 3 if they are triploids"), bsTooltip(id = "q131",title = "Tick if male (sire) and female (dam) parents are in the same dataset"), bsTooltip(id = "q2.1",title = "File created by the genotyping application (or during formating phase) with extension _genoAPIS.Rdata<br/>File with only the male (sire) parents.<br/>Can also be a .txt file with marker as columns and individuals as rows with genotype format as A/A."), bsTooltip(id = "q2.2",title = "File created by the genotyping application (or during formating phase) with extension _genoAPIS.Rdata<br/>File with only the female (dam) parents.<br/>Can also be a .txt file with marker as columns and individuals as rows with genotype format as A/A."), bsTooltip(id = "q111",title = "If the loaded file has a header, select Yes. If it is a single column file with no header, select No."), bsTooltip(id = "q1111",title = "If the loaded file has a header, select Yes. If it is a single column file with no header, select No."), bsTooltip(id = "q52",title = "Select 2 if individuals are diploids and 3 if they are triploids"), bsTooltip(id = "q53",title = "Select SNP if markers in your dataset are SNP and microsat if it is microsatellite markers"), bsTooltip(id = "q54",title = "Select SNP if markers in your dataset are SNP and microsat if it is microsatellite markers"), bsTooltip(id = "q6",title = "APIS can use two differents method for assignment : likelihood and exclusion.<br/>It selects the best pair of parents based on differents criteria<br/>See the documentation of APIS for more details"), bsTooltip(id = "q61",title = "APIS can use two threshold for exclusion method.<br/>Error rate: automatically estimate the threshold given the accepted error rate.<br/>Mismatch number: select the number of maximum mismatch."), bsTooltip(id = "q7",title = "APIS uses paralellization to reduce running time. Select the number of core<br/>Max is number of core of the computer -1 so you can t block your computer<br/>Max-1 recommended (default)"), bsTooltip(id = "q8",title = "Type the name you want for saving ;<br/>Will automatically be added : _MethodSelected.Rdata for the file that contains the result of APIS and the list of markers selected ; and _ped.txt for the pedigree result",trigger = "click"), bsTooltip(id = "q18",title = "Type the name you want for saving plots<br/>Will automatically be added an extension to identify each plots<br/>The same name will be use if you want an output for INFAQUA"), bsTooltip(id = "q9",title = "Select the file finishing by _MethodSelected.Rdata created in the APIS part"), bsTooltip(id = "q10",title = "A file containing at least two rows : SampleName (or CodeBarre) & Facto like so :<br/>SampleName Facto<br/>Name1 Facto1<br/>Name2 Facto2<br/>Name3 Facto3<br/>... ...<br/>The SampleName (or CodeBarre) variable is the name of the sample as in the genotyping application.<br/>The facto variable means factorial. It is used to verify whether parents of an idividual have met or not."), bsTooltip(id = "q11",title = "A file containing at least two rows : ID & SampleName (or CodeBarre) like so :<br/>ID SampleName<br/>Id1 Name1<br/>Id2 Name2<br/>Id3 Name3<br/>... ...<br/>The SampleName (or CodeBarre) variable is the name of the sample as in the genotyping application.<br/>The ID variable referes to the animal idenfier."), bsTooltip(id = "q12",title = "Select the number of the column corresponding to marker names.<br/>Ignored if .txt file with a single column."), bsTooltip(id = "q13",title = "Use this to find an optimum for the recombinaison rate (default 0.5).<br/>If selected, will perform multiple APIS assignment with different recombinaison rate to find an optimum.<br/>This operation takes more time but can improve the assignment.") ) # Define server logic server <- function(input, output,session) { ##### Event from APIS ##### dataset = reactiveValues(off=data.frame(),par=data.frame(),sexe=data.frame(),snp_off=data.frame(),snp_par=data.frame(),snp_to_keep=c(),snp_kept=c(), nbMarker=c(),AlertNbMarker=NULL, APIS_launched=0, apis_likelihood=data.frame(),apis_exclusion=data.frame(), dta_sex_load=FALSE,tmp_sexe=data.frame(),choices=NULL,displayed=data.frame(), path_log='') #---Load dataset of offspring and stock the data observeEvent(input$data_off,{ if (!is.null(input$data_off$datapath)){ if (grepl(pattern = ".txt",x = input$data_off$name)){ dataset$off = read.table(file=input$data_off$datapath) allele_freq = as.data.frame(get_allele_frequencies(dataset$off,ploidy_level = as.numeric(input$ploidy))) min_non_0 = function(x){ x[x==min(x[x!=0])][1] } dataset$snp_off = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,MAF=apply(X = allele_freq %>% select(.data$Freq_A,.data$Freq_T,.data$Freq_C,.data$Freq_G),MARGIN = 1,FUN = min_non_0,simplify = TRUE)) } else if (grepl(pattern = ".Rdata",x = input$data_off$name)){ tmp=load(file = input$data_off$datapath) eval(parse(text = paste0("dataset$off = ",tmp[1]))) eval(parse(text = paste0("dataset$snp_off = ",tmp[2]))) } else { stop("File extension not supported !") } } }) #---Load dataset of parents and stock the data observeEvent(input$data_par,{ if (!is.null(input$data_par$datapath)){ if (grepl(pattern = ".txt",x = input$data_par$name)){ dataset$par = read.table(file=input$data_par$datapath) allele_freq = as.data.frame(get_allele_frequencies(dataset$par,ploidy_level = 2)) if (!is.null(allele_freq$Freq_NA)){ dataset$snp_par = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,CR=1-allele_freq$Freq_NA) } else { dataset$snp_par = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,CR=1) } } else if (grepl(pattern = ".Rdata",x = input$data_par$name)){ tmp=load(file = input$data_par$datapath) eval(parse(text = paste0("dataset$par = ",tmp[1]))) eval(parse(text = paste0("dataset$snp_par = ",tmp[2]))) } else { stop("File extension not supported !") } } }) observeEvent(c(input$data_par1,input$data_par2),{ if (!is.null(input$data_par1$datapath) & !is.null(input$data_par2$datapath)){ if (grepl(pattern = ".txt",x = input$data_par1$name)){ tmp1.1 = read.table(file=input$data_par1$datapath) allele_freq1 = as.data.frame(get_allele_frequencies(tmp1.1,ploidy_level = 2)) if (!is.null(allele_freq1$Freq_NA)){ tmp1.2 = data.frame(MarkerName=rownames(allele_freq1),toKeep=TRUE,CR=1-allele_freq1$Freq_NA) } else { tmp1.2 = data.frame(MarkerName=rownames(allele_freq1),toKeep=TRUE,CR=1) } } else if (grepl(pattern = ".Rdata",x = input$data_par$name)){ tmp=load(file = input$data_par1$datapath) eval(parse(text = paste0("tmp1.1 = ",tmp[1]))) eval(parse(text = paste0("tmp1.2 = ",tmp[2]))) } else { stop("File extension not supported !") } if (grepl(pattern = ".txt",x = input$data_par2$name)){ tmp2.1 = read.table(file=input$data_par2$datapath) allele_freq2 = as.data.frame(get_allele_frequencies(tmp2.1,ploidy_level = 2)) if (!is.null(allele_freq2$Freq_NA)){ tmp2.2 = data.frame(MarkerName=rownames(allele_freq2),toKeep=TRUE,CR=1-allele_freq2$Freq_NA) } else { tmp2.2 = data.frame(MarkerName=rownames(allele_freq2),toKeep=TRUE,CR=1) } } else if (grepl(pattern = ".Rdata",x = input$data_par2$name)){ tmp=load(file = input$data_par2$datapath) eval(parse(text = paste0("tmp2.1 = ",tmp[1]))) eval(parse(text = paste0("tmp2.2 = ",tmp[2]))) } else { stop("File extension not supported !") } marker_shared = tmp1.2$MarkerName[which(tmp1.2$MarkerName %in% tmp2.2$MarkerName)] dataset$snp_par = data.frame(MarkerName=marker_shared, CR=(tmp1.2$CR[tmp1.2$MarkerName %in% marker_shared]*nrow(tmp1.1)+tmp2.2$CR[tmp2.2$MarkerName %in% marker_shared]*nrow(tmp2.1))/(nrow(tmp1.1)+nrow(tmp2.1)), toKeep=TRUE) dataset$par = rbind(tmp1.1 %>% select(all_of(marker_shared)),tmp2.1 %>% select(all_of(marker_shared))) dataset$sexe = rbind(data.frame(SampleName=rownames(tmp1.1),Sexe=1),data.frame(SampleName=rownames(tmp2.1),Sexe=2)) } }) #---Load dataset of parents with their sexe and stock the data # Verify that 'SampleName' and 'Sexe' is a variable : if not, detection to change the colname of the samples name to match with SampleName observeEvent(input$sexe_par,{ if (!is.null(input$sexe_par$datapath)){ dataset$dta_sex_load=TRUE dataset$sexe=read.table(file = input$sexe_par$datapath,header=T) if (length(dataset$sexe)[1]==1){ delim=find_delim(readLines(con=input$sexe_par$datapath,n = 1)) dataset$sexe=read.table(file = input$sexe_par$datapath,header=T,sep=delim) } dataset$choices = colnames(dataset$sexe) dataset$tmp_sexe=dataset$sexe } }) #--- Change SampleName and Sex column ----- output$dta_sex_load <- reactive({ dataset$dta_sex_load }) outputOptions(output, 'dta_sex_load', suspendWhenHidden=FALSE) #---SelectInput with the names of the different columns : select the sample name output$uiChangeSN = renderUI({ if (dataset$dta_sex_load){ selectInput(inputId = "newSN", label = div("Choose the variable corresponding to SampleName", tipify(el = bsButton(inputId = "1",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Please select the corresponding SampleName column so that APIS could run. See the table nearby to help you.")), choices = dataset$choices,selected = dataset$choices[1],multiple = FALSE) } }) #---SelectInput with the names of the different columns : select the sample name output$uiChangeSe = renderUI({ if (dataset$dta_sex_load){ selectInput(inputId = "newSe", label = div("Choose the variable corresponding to Sex", tipify(el = bsButton(inputId = "2",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Please select the corresponding Sex column so that APIS could run. See the table nearby to help you.")), choices = dataset$choices,selected = dataset$choices[2],multiple = FALSE) } }) #---Change the name in regard to what is selected as samplename / sexe observeEvent(c(input$newSN,input$newSe),{ if (!is.null(input$newSN) & !is.null(input$newSe)){ dataset$sexe = dataset$tmp_sexe dataset$displayed = head(dataset$sexe) colnames(dataset$sexe)[which(colnames(dataset$sexe)==input$newSN)]='SampleName' colnames(dataset$sexe)[which(colnames(dataset$sexe)==input$newSe)]='Sexe' colnames(dataset$displayed)[which(colnames(dataset$displayed)==input$newSN)]=paste0(input$newSN,' (SampleName)') colnames(dataset$displayed)[which(colnames(dataset$displayed)==input$newSe)]=paste0(input$newSe,' (Sex)') } }) #---Head of the dataset to help select corresponding columns output$head_sex = renderDataTable({ datatable(head(dataset$displayed),rownames = FALSE,options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Head data sex')) }) #---Find the SNPs in common between parents and offspring => keeps only good snp !! observeEvent(c(dataset$snp_off,dataset$snp_par),{ if (length(dataset$snp_off)>0 & length(dataset$snp_par)>0){ m_par_true = dataset$snp_par$MarkerName[dataset$snp_par$toKeep] m_off_true = dataset$snp_off$MarkerName[dataset$snp_off$toKeep] dataset$snp_to_keep = m_off_true[which(m_off_true %in% m_par_true)] } }) #---textInput to select the number of SNP for assignment output$nb_snp_poss = renderUI({ if (length(dataset$snp_to_keep)>0 & is.null(input$snp_in$datapath)){ textInput(inputId = "nb_snp",value = paste0(min(100,length(dataset$snp_to_keep))), label = div("Choose the number of markers to be used for the assignment", tipify(el = bsButton(inputId = "3",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Number of markers to select for the assignment after ordering them by call rate of parents and MAF of offspring (only for SNP markers)."))) } }) #---Stock the number of markers asked by user and display warning message if necessary observeEvent(input$nb_snp,{ suppressWarnings(a <- as.numeric(input$nb_snp)) if (is.na(a)){ dataset$AlertNbMarker = "Warning : The input must be a numeric !" } else { if (a>length(dataset$snp_to_keep)){ # a>nb marker shared between par and off if (length(dataset$snp_to_keep)>1500){ dataset$AlertNbMarker = paste0("Warning : There is only ",length(dataset$snp_to_keep)," markers shared between parents and offspring ! Number max of markers will be used. Additional warning : The computation time might be high ! 100 to 500 markers is usually enough for assignment.") } else { dataset$AlertNbMarker = paste0("Warning : There is only ",length(dataset$snp_to_keep)," markers shared between parents and offspring ! Number max of markers will be used.") } dataset$nbMarker = length(dataset$snp_to_keep) } else if (a>1500){ dataset$AlertNbMarker = "Warning : The computation time might be high ! 100 to 500 markers is usually enough for assignment." dataset$nbMarker = a } else if (a<1){ dataset$AlertNbMarker = "Warning : The number of marker cannot be null or negative. Automatically set to 10 and less (if 10 not possible)." dataset$nbMarker = ifelse(length(dataset$snp_to_keep)>9,10,1) } else { dataset$AlertNbMarker = NULL dataset$nbMarker = a } } }) #--- Warning message du to marker number output$WarningMarker = renderText({ dataset$AlertNbMarker }) # #---RenderUI for exclusion threshold : to control max value in function of dataset length # output$exclusionErrorUI = renderUI({ # if (length(dataset$snp_kept)>0){ # sliderInput(inputId = "exclusionError", # label = div("Choose the threshold for mismatch", # tipify(el = bsButton(inputId = "4",label = "",icon = icon("question"), style = "info", size = "extra-small"), # title = "Choose the number of mismatch allowed for the assignment for the best pair of parents.")), # min = 0,max = max(10,floor(0.2*length(dataset$snp_kept))),value = 0,step = 1) # } # }) graphApis = reactiveValues(p1=ggplot(),p2=ggplot(),p3=ggplot(),tot=ggplot()) ##### Graph APIS ##### output$graph_res = renderPlot({ if (length(dataset$apis_likelihood)>0){ p1=graphApis$p1 if (input$method=='likelihood'){ p3=graphApis$p3 THRESHOLD=estimate_mendel_threshold(dataset$apis_likelihood,as.numeric(input$acceptError)) p2=graphApis$p2 + geom_vline(xintercept=THRESHOLD) } else if (input$method=='exclusion'){ p2=graphApis$p2 if (input$exclu_thres=='Error rate'){ THRESHOLD=estimate_exclusion_threshold(dataset$apis_exclusion,as.numeric(input$acceptError)) } else { THRESHOLD=as.numeric(input$acceptMismatch) } p3=graphApis$p3 + geom_vline(xintercept=THRESHOLD+0.5) } graphApis$tot = plot_grid(plot_grid(p1,p2),p3,nrow = 2) graphApis$tot } }) #---To specify that APIS has been launched and that the rest of the ui can be displayed output$APIS_launched <- reactive({ dataset$APIS_launched }) outputOptions(output, 'APIS_launched', suspendWhenHidden=FALSE) #---To launch APIS by beeing sure that there is everything required output$go_apis = renderUI({ if (!is.null(input$data_off) & (!is.null(input$data_par) | (!is.null(input$data_par1) & !is.null(input$data_par2))) & (length(dataset$nbMarker)==1 | !is.null(input$snp_in$datapath)) & !is.null(input$save_name)){ actionButton(inputId = "launch1",label = "Launch APIS assignment") } }) output$acceptMismatch0 = renderUI({ if (input$exclu_thres=='Mismatch number' & input$method=='exclusion' & !is.null(dataset$apis_exclusion)){ sliderInput(inputId = "acceptMismatch",label = "Number of mismatch allowed",min = 0,max = max(dataset$apis_exclusion$mismatch_2,na.rm = TRUE)+5,value = 5,step = 1) } }) ##### Launch APIS ##### observeEvent(input$launch1,{ if (length(dataset$off)>0 & length(dataset$par)>0 & (length(dataset$nbMarker)==1 | !is.null(input$snp_in$datapath)) & !is.null(input$save_name)){ if (!dir.exists("./log")){ dir.create("./log") } date_time = Sys.time() date_time=gsub(pattern = "-",replacement = "",x = date_time) date_time=gsub(pattern = ":",replacement = "",x = date_time) # date_time=gsub(pattern = " CEST",replacement = "_",x = date_time) date_time=gsub(pattern = " ",replacement = "_",x = date_time) date_time=substr(x = date_time,start = 3,stop = 16) path_log = paste0("./log/",date_time,"_log_APIS.txt") dataset$path_log=path_log write(x = "-----Launching of APIS-----",file = path_log) print("-----Launching of APIS-----") write(x = paste0("Offspring file : ",input$data_off$name),file = path_log,append = TRUE) if (input$both_parents_in){ if (is.null(input$sexe_par$datapath)){ stop("Must provide dataset with sexe of each parents !") } write(x = paste0("Parents file : ",input$data_par$name),file = path_log,append = TRUE) write(x = paste0("Parental sex file : ",input$sexe_par$name),file = path_log,append = TRUE) } else { write(x = paste0("Parents file sire : ",input$data_par1$name),file = path_log,append = TRUE) write(x = paste0("Parents file dam : ",input$data_par2$name),file = path_log,append = TRUE) } if (!is.null(input$snp_in$datapath)){ snp_kept = read.table(file = input$snp_in$datapath,header = FALSE) snp_kept = snp_kept[,1] write(x = paste0("Marker file : ",input$snp_in$name),file = path_log,append = TRUE) snp_kept=snp_kept[which(snp_kept %in% colnames(dataset$off) & snp_kept %in% colnames(dataset$par))] } else { if (input$markerType2=='SNP'){ snp_kept = dataset$snp_off %>% select(.data$MarkerName,.data$MAF) %>% left_join(dataset$snp_par %>% select(.data$MarkerName,.data$CR),by=c("MarkerName"="MarkerName")) %>% arrange(desc(.data$MAF),desc(.data$CR)) %>% filter(!is.na(.data$MAF),!is.na(.data$CR)) %>% select(.data$MarkerName) } else { # input$markerType2=='microsat' snp_kept = dataset$snp_off %>% select(.data$MarkerName) %>% left_join(dataset$snp_par %>% select(.data$MarkerName,.data$CR),by=c("MarkerName"="MarkerName")) %>% arrange(desc(.data$CR)) %>% filter(!is.na(.data$CR)) %>% select(.data$MarkerName) } snp_kept = snp_kept$MarkerName[1:min(as.numeric(dataset$nbMarker),length(snp_kept$MarkerName))] } if (length(snp_kept) < 1){ write(x = "Error : 0 marker kept !",file = path_log,append = TRUE) write(x = "If a list is provided: dataset doesnt have those markers ; if not, dataset doesnt share marker !",file = path_log,append = TRUE) stop("If a list is provided: dataset doesnt have those markers ; if not, dataset doesnt share marker !") } write(x = paste0("Marker type : ",input$markerType2),file = path_log,append = TRUE) write(x = paste0("Number of snp kept : ",length(snp_kept)),file = path_log,append = TRUE) offspring = dataset$off %>% as.data.frame() %>% select(all_of(snp_kept)) %>% as.matrix() sire0 = dataset$par %>% as.data.frame() %>% select(all_of(snp_kept)) sire = sire0[which(toupper(rownames(sire0)) %in% toupper(dataset$sexe$SampleName[dataset$sexe$Sexe==1 | dataset$sexe$Sexe==3])),] %>% as.matrix() dam0 = dataset$par %>% as.data.frame() %>% select(all_of(snp_kept)) dam = dam0[which(toupper(rownames(dam0)) %in% toupper(dataset$sexe$SampleName[dataset$sexe$Sexe==2 | dataset$sexe$Sexe==4])),] %>% as.matrix() par_nam_tot=toupper(rownames(dataset$par)) par_nam_rest=toupper(c(rownames(sire),rownames(dam))) par_nam_list=toupper(dataset$sexe$SampleName) no_sex = which(! par_nam_tot %in% par_nam_rest) sex_but_no = which(! par_nam_list %in% par_nam_rest) if (length(no_sex)>0){ warning("Some parent(s) does not have a sex assigned !") print(par_nam_tot[no_sex]) write(x = paste0("WARNING -- Sex missing for some parents : ",par_nam_tot[no_sex]),file = path_log,append = TRUE) } if (length(sex_but_no)>0){ warning("Some parent(s) are missing in the dataset !") print(par_nam_list[sex_but_no]) write(x = paste0("WARNING -- Parents missing : ",par_nam_list[sex_but_no]),file = path_log,append = TRUE) } write(x = paste0("Offspring ploidy : ",input$ploidy),file = path_log,append = TRUE) write(x = paste0("Number of cores (parallelization) : ",input$core),file = path_log,append = TRUE) write(x = paste0("Launch time : ",Sys.time()),file = path_log,append = TRUE) if (as.numeric(input$ploidy)==3){ if (FALSE){ # input$try_recom v_recom = seq(0.1,0.9,0.1) v_eff = NULL m_re = 0 for (v in v_recom){ to_save=APIS_3n(offspring_genotype = offspring[,1:min(100,ncol(offspring))], sire_genotype = sire[,1:min(100,ncol(offspring))], dam_genotype = dam[,1:min(100,ncol(offspring))], number_cores = 10, verbose=TRUE, simulation_if_small = FALSE, t_recom=v) if (m_re<mean(to_save$log_file$delta_1_2,na.rm = T)){ v_eff = v m_re = mean(to_save$log_file$delta_1_2,na.rm = T) } } res_APIS=APIS_3n(offspring_genotype = offspring, sire_genotype = sire, dam_genotype = dam, number_cores = as.numeric(input$core), verbose=TRUE, simulation_if_small = FALSE, t_recom = v_eff, method = "mendel") } else { res_APIS=APIS_3n(offspring_genotype = offspring, sire_genotype = sire, dam_genotype = dam, number_cores = as.numeric(input$core), verbose=TRUE, simulation_if_small = FALSE, t_recom = 0.5, method = "") } } else if(as.numeric(input$ploidy)==2){ res_APIS=APIS_2n(offspring_genotype = offspring, sire_genotype = sire, dam_genotype = dam, number_cores = as.numeric(input$core), verbose=TRUE, simulation_if_small = FALSE, method = "") } else { stop("Incorrect number of ploidy !") } write(x = paste0("End time : ",Sys.time()),file = path_log,append = TRUE) dataset$snp_kept = snp_kept dataset$apis_likelihood = res_APIS$log_file_likelihood dataset$apis_exclusion = res_APIS$log_file_exclusion dataset$APIS_launched = 1 dataset$displayed = data.frame() # reinitialisation so that it does not overcharge the user experience #---Generate graph tmp = data.frame(Value1 = c(dataset$apis_likelihood$probability_1,dataset$apis_likelihood$probability_2), Value2 = c(dataset$apis_likelihood$delta_1_2,dataset$apis_likelihood$delta_2_3), Which = as.factor(rep(c(1,2),each=length(dataset$apis_likelihood$probability_1)))) graphApis$p1=ggplot(data=tmp,aes(x = .data$Value1,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ theme_bw()+ labs(x = "Mendelian probability",y="Count",fill="Assignment",col="Assignment") graphApis$p2=ggplot(data=tmp,aes(x = .data$Value2,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ theme_bw()+ labs(x = "Delta of mendelian probability",y="Count",fill="Assignment",col="Assignment") tmp = data.frame(Value = c(dataset$apis_exclusion$mismatch_1,dataset$apis_exclusion$mismatch_2), Which = as.factor(rep(c(1,2),each=length(dataset$apis_exclusion$mismatch_1)))) graphApis$p3=ggplot(data=tmp,aes(x = .data$Value,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ theme_bw()+ labs(x = "Number of mismatch",y="Count",fill="Assignment",col="Assignment") print("-----End APIS-----") } }) observeEvent(input$SaveAPIS,{ if (length(dataset$apis_likelihood)>0){ write(x = "----Saving APIS assignment----",file = dataset$path_log,append = TRUE) write(x = paste0("Method : ",input$method),file = dataset$path_log,append = TRUE) mismatch_error=FALSE if (input$method=='exclusion'){ if (input$exclu_thres=='Error rate'){ THRESHOLD=as.numeric(input$acceptError) estiThreshold=estimate_exclusion_threshold(dataset$apis_exclusion,as.numeric(input$acceptError)) write(x = paste0("Error accepted : ",as.numeric(input$acceptError*100),"%"),file = dataset$path_log,append = TRUE) write(x = paste0("Mismatch threshold : ",estiThreshold),file = dataset$path_log,append = TRUE) } else { mismatch_error=TRUE THRESHOLD=as.numeric(input$acceptMismatch) estiThreshold=THRESHOLD write(x = paste0("Mismatch threshold : ",estiThreshold),file = dataset$path_log,append = TRUE) } ped = dataset$apis_exclusion %>% select(.data$offspring,.data$sire_1,.data$dam_1) %>% rename(sire=.data$sire_1,dam=.data$dam_1) ind_na=unique(which(dataset$apis_exclusion$mismatch_1>estiThreshold | dataset$apis_exclusion$mismatch_1==dataset$apis_exclusion$mismatch_2)) ped[ind_na,2:3]=c(NA,NA) log_APIS = dataset$apis_exclusion } else if (input$method=='likelihood'){ THRESHOLD=as.numeric(input$acceptError) estiThreshold=estimate_mendel_threshold(dataset$apis_likelihood,as.numeric(input$acceptError)) ped = dataset$apis_likelihood %>% select(.data$offspring,.data$sire_1,.data$dam_1) %>% rename(sire=.data$sire_1,dam=.data$dam_1) ind_na=unique(which(dataset$apis_likelihood$delta_1_2<estiThreshold)) ped[ind_na,2:3]=c(NA,NA) write(x = paste0("Error accepted : ",as.numeric(input$acceptError*100),"%"),file = dataset$path_log,append = TRUE) write(x = paste0("Threshold of delta probability estimated : ",estiThreshold),file = dataset$path_log,append = TRUE) log_APIS = dataset$apis_likelihood } write(x = paste0("Saving name (in './Results_APIS' folder) : ",input$save_name),file = dataset$path_log,append = TRUE) if (!dir.exists("./Results_APIS")){ dir.create("./Results_APIS") } snp_kept = dataset$snp_kept df_par = dataset$sexe save(log_APIS,ped,snp_kept,df_par,THRESHOLD,estiThreshold,mismatch_error,file = paste0("./Results_APIS/",input$save_name,"_",input$method,".Rdata")) ggsave(graphApis$tot,filename = paste0("./Results_APIS/",input$save_name,"_",input$method,".png"),width = 7,height = 7) write.table(x = ped,file = paste0("./Results_APIS/",input$save_name,"_ped.txt"),quote = FALSE,row.names = FALSE) write.table(x = log_APIS,file = paste0("./Results_APIS/",input$save_name,"_logfile.csv"),sep=";",quote = FALSE,row.names = FALSE) print("-----APIS files saved ! -----") } }) ##### Event from Verification ##### verif = reactiveValues(out1=NULL,out2=NULL,out3=NULL,out4=NULL,tab=data.frame(), threshold=NULL, sentence_dam1=NULL,sentence_sire1=NULL,sentence_dam2=NULL,sentence_sire2=NULL, data=data.frame(),accou=data.frame(),ped=data.frame(),accou_tmp=data.frame(), dta_fac_load=FALSE,choices=NULL,displayed=data.frame()) #---Summary of the assignment output$txt1 = renderText({ verif$out1 }) output$txt2 = renderText({ verif$out2 }) output$txt3 = renderText({ verif$out3 }) output$txt4 = renderText({ verif$out4 }) #---Summary number of offspring by sexe output$sent1.1 = renderText({ verif$sentence_dam1 }) output$sent2.1 = renderText({ verif$sentence_sire1 }) output$sent1.2 = renderText({ verif$sentence_dam2 }) output$sent2.2 = renderText({ verif$sentence_sire2 }) #---Table with assignment where parents should not have met output$tab = renderDataTable({ if (is.null(input$tab_accou$datapath)){ datatable(verif$tab,rownames = FALSE,#options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Un-assigned offspring')) } else { datatable(verif$tab,rownames = FALSE,#options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Out-of-plan assignment')) } }) #---Load dataset of parents with their sexe and stock the data # Verify that 'SampleName' is a variable : if not, detection to change the colname of the samples name to match with SampleName observeEvent(input$tab_accou,{ if (!is.null(input$tab_accou$datapath)){ verif$dta_fac_load=TRUE verif$accou=read.table(file = input$tab_accou$datapath,header=T) if (length(verif$accou)[1]==1){ delim = find_delim(readLines(con=input$tab_accou$datapath,n=1)) verif$accou=read.table(file = input$tab_accou$datapath,header=T,sep=delim) } verif$choices = colnames(verif$accou) verif$tmp_accou=verif$accou } }) #--- Change SampleName and Facto when data loaded VERIF ----- output$dta_fac_load <- reactive({ verif$dta_fac_load }) outputOptions(output, 'dta_fac_load', suspendWhenHidden=FALSE) #--- SelectInput with the names of the different columns : select the sample name output$uiChangeSN2 = renderUI({ if (verif$dta_fac_load){ selectInput(inputId = "newSN2", label = div("Choose the variable corresponding to SampleName", tipify(el = bsButton(inputId = "6",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Please select the corresponding SampleName column so that APIS could run. See the table nearby to help you.")), choices = verif$choices,selected = verif$choices[1],multiple = FALSE) } }) #--- SelectInput with the names of the different columns : select the Facto output$uiChangeFa = renderUI({ if (verif$dta_fac_load){ selectInput(inputId = "newFa", label = div("Choose the variable corresponding to Facto", tipify(el = bsButton(inputId = "7",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Please select the corresponding Facto column so that APIS could run. See the table nearby to help you.")), choices = verif$choices,selected = verif$choices[2],multiple = FALSE) } }) #--- Change the name in regard to what is selected as samplename and facto observeEvent(c(input$newSN2,input$newFa,input$tab_accou),{ if (!is.null(input$newSN2) & !is.null(input$newFa)){ verif$accou = verif$tmp_accou verif$displayed = verif$accou colnames(verif$accou)[which(colnames(verif$accou)==input$newSN2)]='SampleName' colnames(verif$accou)[which(colnames(verif$accou)==input$newFa)]='Facto' colnames(verif$displayed)[which(colnames(verif$displayed)==input$newSN2)]=paste0(input$newSN2,' (SampleName)') colnames(verif$displayed)[which(colnames(verif$displayed)==input$newFa)]=paste0(input$newFa,' (Facto)') } }) #---Head of the dataset to help select corresponding columns output$head_fac = renderDataTable({ datatable(head(verif$displayed),rownames = FALSE,options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Head data factorial')) }) ##### Graph Verification ##### #--- Plot of proba/mismatch between couple 1 and 2 output$plot = renderPlot({ if (length(verif$data)>0){ if (regexpr("likelihood",input$data_res$name)==-1){ # si il ny a pas likelihood dans le nom => donc exclusion tmp = data.frame(Value = c(verif$data$mismatch_1,verif$data$mismatch_2), Which = as.factor(rep(c(1,2),each=length(verif$data$mismatch_1)))) ggplot(data=tmp,aes(x = .data$Value,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ geom_vline(xintercept = verif$threshold)+ theme_bw()+ labs(x = "Number of mismatch",y="Count",fill="Assignment",col="Assignment") } else { tmp = data.frame(Value1 = c(verif$data$probability_1,verif$data$probability_2), Value2 = c(verif$data$delta_1_2,verif$data$delta_2_3), Which = as.factor(rep(c(1,2),each=length(verif$data$probability_1)))) p1=ggplot(data=tmp,aes(x = .data$Value1,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ theme_bw()+ labs(x = "Mendelian probability",y="Count",fill="Assignment",col="Assignment") p2=ggplot(data=tmp,aes(x = .data$Value2,color = .data$Which,fill = .data$Which))+ geom_histogram(alpha = 0.25,position = "identity")+ scale_fill_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ scale_color_manual(values = c("1"="#56B4E9","2"="#D55E00"))+ theme_bw()+ geom_vline(xintercept = verif$threshold)+ labs(x = "Delta of mendelian probability",y="Count",fill="Assignment",col="Assignment") plot_grid(p1,p2) } } }) #--- Barplot with the number of offspring for a given parents output$barplot = renderPlotly({ if(length(to_plot$ggbar)>0){ ggplotly(to_plot$ggbar) } }) #--- Heatmap with color for the number of offspring between sireX and damX output$heatmap = renderPlotly({ if (length(to_plot$ggheat)>0){ ggplotly(p=to_plot$ggheat,tooltip=c('x','y','fill','text')) } }) observeEvent(input$SavePlot,{ if (length(to_plot$ggbar$data)>0 & length(to_plot$ggheat$data)>0){ if (!dir.exists("./Results_verif")){ dir.create("./Results_verif") } ggsave(plot = to_plot$ggbar,filename = paste0("./Results_verif/",input$save_name2,"_barplot.png"),width = 18,height = 9) ggsave(plot = to_plot$ggheat+coord_fixed(),filename = paste0("./Results_verif/",input$save_name2,"_heatmap.png"),width = 12,height = 12) txt=ggplot(data=data.frame(x=0,y=0),aes(x=.data$x,y=.data$y))+ geom_text(x=0,y=0.9,label="Summary of the APIS assignment",size=10)+ geom_text(x=0,y=0,label=paste(verif$out1,verif$out2,verif$out3,verif$out4,paste0("File name : ",input$data_res$name),sep = "\n"),size=3)+ geom_text(x=0,y=-0.9,label=paste(verif$sentence_dam1,verif$sentence_dam2,verif$sentence_sire1,verif$sentence_sire2,sep = "\n"),size=3)+ xlim(-1,1)+ylim(-1,1)+ theme(axis.line = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), panel.background = element_blank(), axis.text = element_blank()) plot1 = plot_grid(txt,to_plot$ggbar,nrow=2) if (is.null(input$tab_accou$datapath)){ txt2=ggplot(data=data.frame(x=0,y=0),aes(x=.data$x,y=.data$y))+ geom_text(x=0,y=0.9,label="Un-assigned offspring",size=5)+ xlim(-1,1)+ylim(-1,1)+ theme(axis.line = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), panel.background = element_blank(), axis.text = element_blank()) } else { txt2=ggplot(data=data.frame(x=0,y=0),aes(x=.data$x,y=.data$y))+ geom_text(x=0,y=0.9,label="Out-of-plan assignment",size=5)+ xlim(-1,1)+ylim(-1,1)+ theme(axis.line = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), panel.background = element_blank(), axis.text = element_blank()) } nMAX = 15 pdf(paste0("./Results_verif/",input$save_name2,"_summary.pdf")) print(plot1) print(to_plot$ggheat) if (nrow(verif$tab)==0 & !is.null(input$tab_accou$datapath)){ print("No out-of-plan assignment") } else if (nrow(verif$tab)==0 & is.null(input$tab_accou$datapath)){ print("No un-assigned offspring") } else if (nrow(verif$tab)<=nMAX){ print(txt2) grid.table(verif$tab,rows=NULL) } else { print(txt2) grid.table(verif$tab[1:nMAX,],rows=NULL) n = (nrow(verif$tab)-nMAX)%/%nMAX leftover = (nrow(verif$tab)-nMAX)%%nMAX for (k in 1:n){ plot.new() grid.table(verif$tab[(nMAX*k+1):(nMAX*(k+1)),],rows=NULL) } if (leftover>0){ plot.new() grid.table(verif$tab[(nMAX*(n+1)+1):nrow(verif$tab),],rows=NULL) } } dev.off() print("----- Plot saved ! -----") } }) ##### INFAQUA ##### # infaqua = reactiveValues(file_off=data.frame(),tmp_off=data.frame(),changeSN=FALSE,changeID=FALSE,choices=NULL,choices2=NULL) # observeEvent(input$id_off,{ # if (!is.null(input$id_off$datapath)){ # infaqua$changeSN=FALSE # infaqua$changeID=FALSE # infaqua$file_off = read.table(file = input$id_off$datapath,header = TRUE,sep = "\t") # if (length(infaqua$file_off)==1){ # infaqua$file_off = read.table(file = input$id_off$datapath,header = TRUE,sep = ";") # } # if (is.null(infaqua$file_off$SampleName) & is.null(infaqua$file_off$CodeBarre)){ # infaqua$changeSN=TRUE # infaqua$tmp_off=infaqua$file_off # infaqua$choices = colnames(infaqua$file_off) # } else if (is.null(infaqua$file_off$SampleName)){ # so there is CodeBarre # colnames(infaqua$file_off)[colnames(infaqua$file_off)=="CodeBarre"]="SampleName" # } # if(is.null(infaqua$file_off$ID)){ # infaqua$changeID=TRUE # infaqua$tmp_off=infaqua$file_off # infaqua$choices2 = colnames(infaqua$file_off) # } # } # }) # #--- Change SampleName INFAQUA ----- # output$changeSN3 <- reactive({ # infaqua$changeSN # }) # outputOptions(output, 'changeSN3', suspendWhenHidden=FALSE) # # #--- SelectInput with the names of the different columns : select the sample name # output$uiChangeSN3 = renderUI({ # if (infaqua$changeSN){ # selectInput(inputId = "newSN3", # label = div("Choose the variable corresponding to SampleName/CodeBarre", # tipify(el = bsButton(inputId = "8",label = "",icon = icon("question"), style = "info", size = "extra-small"), # title = "Apparently, no column in your file has the name SampleName as it should have. So please select the corresponding SampleName column so that the formating for INFAQUA could run.")), # choices = infaqua$choices,selected = 1,multiple = FALSE) # } # }) # #--- Change ID INFAQUA ----- # output$changeID <- reactive({ # infaqua$changeID # }) # outputOptions(output, 'changeID', suspendWhenHidden=FALSE) # # #--- SelectInput with the names of the different columns : select the sample name # output$uiChangeID = renderUI({ # if (infaqua$changeID){ # selectInput(inputId = "newID", # label = div("Choose the variable corresponding to ID", # tipify(el = bsButton(inputId = "9",label = "",icon = icon("question"), style = "info", size = "extra-small"), # title = "Apparently, no column in your file has the name ID as it should have. So please select the corresponding ID column so that the formating for INFAQUA could run.")), # choices = infaqua$choices,selected = 1,multiple = FALSE) # } # }) # #---Change the name in regard to what is selected as SampleName/ID # observeEvent(c(input$newSN3,input$newID),{ # if (!is.null(input$newSN3) & !is.null(input$newID)){ # if (input$newSN3 != input$newID){ # infaqua$file_off = infaqua$tmp_off # colnames(infaqua$file_off)[which(colnames(infaqua$file_off)==input$newSN3)]='SampleName' # colnames(infaqua$file_off)[which(colnames(infaqua$file_off)==input$newID)]='ID' # } # } else if (!is.null(input$newSN3)){ # infaqua$file_off = infaqua$tmp_off # colnames(infaqua$file_off)[which(colnames(infaqua$file_off)==input$newSN3)]='SampleName' # } else if (!is.null(input$newID)){ # infaqua$file_off = infaqua$tmp_off # colnames(infaqua$file_off)[which(colnames(infaqua$file_off)==input$newID)]='ID' # } # }) # #--- Launch Save INFAQUA ----- # observeEvent(input$SaveInf,{ # if (length(to_plot$ggbar$data)>0 & ! is.null(input$id_off$datapath)){ # Must launch verif first to load other dataset # if (!is.null(infaqua$file_off$SampleName)){ # if (!dir.exists("./forINFAQUA")){ # dir.create("./forINFAQUA") # } # verif$ped %>% # filter(.,!is.na(sire))%>% # left_join(.,infaqua$file_off,by=c("offspring"="SampleName"))%>% # mutate(.,IDoff=ID)%>% # select(.,-c("ID"))%>% # left_join(.,verif$accou%>%select(.,c("ID","SampleName")),by=c("sire"="SampleName"))%>% # mutate(.,IDsire=ID)%>% # select(.,-c("ID"))%>% # left_join(.,verif$accou%>%select(.,c("ID","SampleName")),by=c("dam"="SampleName"))%>% # mutate(.,IDdam=ID)%>% # select(.,-c("ID"))%>% # select(.,c("IDoff","offspring","IDdam","dam","IDsire","sire"))%>% # pour reordonner # rename(.,identifiant_adn=IDoff,adn_labo=offspring, # identifiant_adn_mere=IDdam,adn_labo_mere=dam, # identifiant_adn_pere=IDsire,adn_labo_pere=sire) %>% # write.table(.,file = paste0("./forINFAQUA/",input$save_name2,"_INFAQUA.csv"), quote = FALSE, row.names=FALSE,sep = ";",append=FALSE) # } # } # }) ##### Fin INFAQUA ##### to_plot=reactiveValues(heatmap=data.frame(),ggbar=ggplot(),ggheat=ggplot()) ##### Launch Verification ##### observeEvent(input$launch_verif,{ if (!is.null(input$data_res)){ load(file = input$data_res$datapath) # log_APIS , ped , snp_kept , df_par , THRESHOLD , estiThreshold , mismatch_error verif$data = log_APIS verif$ped = ped verif$threshold = estiThreshold # Barplot nb_sire = verif$ped %>% group_by(.data$sire) %>% count() nb_dam = verif$ped %>% group_by(.data$dam) %>% count() tmp=data.frame(par=c(nb_sire$sire,nb_dam$dam),n=c(nb_sire$n,nb_dam$n)) tmp$par=toupper(tmp$par) df_par$SampleName=toupper(df_par$SampleName) barplot = left_join(df_par,tmp,by=c("SampleName"="par")) barplot$n[is.na(barplot$n)]=0 count0 = function(n){ length(which(n==0)) } countDif0 = function(n){ length(which(n!=0)) } suma_dam = barplot %>% filter(.data$Sexe %in% c(2,4)) %>% select(.data$n) %>% summarise(min=min(.data$n,na.rm=T),max=max(.data$n,na.rm=T),mean=mean(.data$n,na.rm=T), median=median(.data$n,na.rm=T),nb0=count0(.data$n),nbdif0=countDif0(.data$n)) %>% round(digits = 2) suma_sire = barplot %>% filter(.data$Sexe %in% c(1,3)) %>% select(.data$n) %>% summarise(min=min(.data$n,na.rm=T),max=max(.data$n,na.rm=T),mean=mean(.data$n,na.rm=T), median=median(.data$n,na.rm=T),nb0=count0(.data$n),nbdif0=countDif0(.data$n)) %>% round(digits = 2) verif$sentence_dam1 = paste0("Dam Number of Offspring -- Min : ",suma_dam$min," ; Max : ",suma_dam$max," ; Mean : ",suma_dam$mean," ; Median : ",suma_dam$median) verif$sentence_dam2 = paste0("Nb no off : ",suma_dam$nb0," -- Nb with off : ",suma_dam$nbdif0) verif$sentence_sire1 = paste0("Sire Number of Offspring -- Min : ",suma_sire$min," ; Max : ",suma_sire$max," ; Mean : ",suma_sire$mean," ; Median : ",suma_sire$median) verif$sentence_sire2 = paste0("Nb no off : ",suma_sire$nb0," -- Nb with off : ",suma_sire$nbdif0) # Def ggbar order_ind = barplot %>% arrange(.data$Sexe,desc(.data$n))%>% select(SampleName) barplot$SampleName=factor(barplot$SampleName,levels=as.character(order_ind$SampleName)) barplot$Sexe[barplot$Sexe==1 | barplot$Sexe==3]="Sire" barplot$Sexe[barplot$Sexe==2 | barplot$Sexe==4]="Dam" barplot$Sexe=as.factor(barplot$Sexe) to_plot$ggbar=ggplot(data=barplot,aes(x=.data$SampleName,y=.data$n,fill=.data$Sexe))+ geom_bar(stat='identity')+ scale_fill_manual(values = c("Dam" = "#009E73", "Sire" = "#56B4E9"))+ theme_bw()+ geom_hline(yintercept = 0)+ labs(x="Individuals",y="Number of offspring")+ theme(panel.grid.major.x = element_blank(), axis.ticks.x = element_blank(), # axis.text.x = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, size=5,face = "bold")) # Heatmap tmp=verif$ped %>% group_by(sire,dam) %>% count() %>% arrange(desc(.data$n)) ind_sire=df_par$SampleName[df_par$Sexe==1 | df_par$Sexe==3] ind_dam=df_par$SampleName[df_par$Sexe==2 | df_par$Sexe==4] tmp2=expand.grid(Sire=ind_sire,Dam=ind_dam) heatmap = left_join(tmp2,tmp,by=c("Sire"="sire","Dam"="dam")) # Def ggheat if (!is.null(input$tab_accou)){ verif$accou$SampleName=toupper(verif$accou$SampleName) tmpSire = df_par %>% filter(.data$Sexe==1 | .data$Sexe==3) %>% select(.data$SampleName) %>% left_join(verif$accou,by=c("SampleName"="SampleName"),multiple="all") %>% arrange(.data$Facto) %>% select(.data$SampleName) tmpDam = df_par %>% filter(.data$Sexe==2 | .data$Sexe==4) %>% select(.data$SampleName) %>% left_join(verif$accou,by=c("SampleName"="SampleName"),multiple="all") %>% arrange(.data$Facto) %>% select(.data$SampleName) f_sire=data.frame(SN=unique(tmpSire$SampleName),Fa_sire=NA) for (k in 1:length(f_sire$SN)){ sire_k=f_sire$SN[k] f_k=paste0(sort(verif$accou$Facto[verif$accou$SampleName==sire_k]),collapse = "/") f_sire$Fa_sire[k]=f_k } f_dam=data.frame(SN=unique(tmpDam$SampleName),Fa_dam=NA) for (k in 1:length(f_dam$SN)){ dam_k=f_dam$SN[k] f_k=paste0(sort(verif$accou$Facto[verif$accou$SampleName==dam_k]),collapse = "/") f_dam$Fa_dam[k]=f_k } heatmap = heatmap %>% left_join(f_sire,by=c("Sire"="SN")) %>% left_join(f_dam,by=c("Dam"="SN")) heatmap$Sire=factor(heatmap$Sire,levels = unique(as.character(tmpSire$SampleName))) heatmap$Dam=factor(heatmap$Dam,levels = unique(as.character(tmpDam$SampleName))) heatmap$Same = heatmap$Fa_sire==heatmap$Fa_dam to_plot$ggheat=ggplot(data = heatmap,aes(x = .data$Sire,y = .data$Dam,fill = .data$n,text = paste0("Fac Sire : ",.data$Fa_sire,"<br>Fac Dam : ",.data$Fa_dam)))+ geom_tile(col=heatmap$Same,linewidth=0.05,width=0.8,height=0.8)+ scale_fill_gradient2(low = "#66CCFF", high = "#D55E00", mid="#F0E442", na.value="white",name="Nombre de \ndescendants",midpoint =5,space = "Lab")+ theme_bw()+ theme(panel.grid = element_blank())+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, size = 5, hjust = 1,face = "bold"))+ theme(axis.text.y = element_text(angle = 0, vjust = 0.5, size = 5, hjust = 1,face="bold")) # Indiv Hors Plan if (length(verif$accou$SampleName)==length(unique(verif$accou$SampleName))){ tab_assign2 = left_join(verif$ped,verif$accou,by=c('sire'='SampleName')) %>% rename(FactoSire=.data$Facto) %>% select(.data$offspring,.data$sire,.data$dam,.data$FactoSire) # names(tab_assign2)[6]='FactoSire' # tab_assign2 = tab_assign2[,-c(4,5)] tab_assign3 = left_join(tab_assign2,verif$accou,by=c('dam'='SampleName')) %>% rename(FactoDam=.data$Facto) %>% select(.data$offspring,.data$sire,.data$dam,.data$FactoSire,.data$FactoDam) # names(tab_assign3)[7]='FactoDam' # tab_assign3 = tab_assign3[,-c(5,6)] indiv_pb=tab_assign3$offspring[which(tab_assign3$FactoSire!=tab_assign3$FactoDam)] n_pb=length(indiv_pb) n_na=length(which(is.na(tab_assign3$sire))) n_tot=length(verif$ped$offspring) } else { # si il y a des individus avec plusieurs facto tab_assign2 = left_join(verif$ped,verif$accou,by=c('sire'='SampleName'),multiple = "all") %>% rename(FactoSire=.data$Facto) %>% select(.data$offspring,.data$sire,.data$dam,.data$FactoSire) tab_assign3 = left_join(tab_assign2,verif$accou,by=c('dam'='SampleName'),multiple = "all") %>% rename(FactoDam=.data$Facto) %>% select(.data$offspring,.data$sire,.data$dam,.data$FactoSire,.data$FactoDam) n_pb=0 n_na=0 indiv_pb=c() n_tot=length(verif$ped$offspring) for (off in unique(verif$ped$offspring)){ tmp=tab_assign3[tab_assign3$offspring==off,] n.row=nrow(tmp) if (n.row==1){ # parents have only 1 facto if (!is.na(tmp$sire)){ if (tmp$FactoSire!=tmp$FactoSire){ n_pb=n_pb+1 indiv_pb=c(indiv_pb,off) } } else { n_na=n_na+1 } } else { # un des parents au moins fait parti de plusieurs factorielles # parents ne peuvent pas etre NA si plusieurs lignes (par construction) same_facto = FALSE for (k in 1:n.row){ if (tmp$FactoDam[k]==tmp$FactoSire[k]){ same_facto = TRUE } } if (!same_facto){ n_pb=n_pb+1 indiv_pb=c(indiv_pb,off) } } } } verif$out1=paste0("There is/are ",n_na," no assigned offspring(s) (",round((n_na/n_tot)*100,2),"%)\nand among assigned offspring(s) ",n_pb," have parents that are not in the same factorial (",round((n_pb/n_tot)*100,2),"%).") verif$out2=paste0("Real assignment rate : ",n_tot-n_pb-n_na,"/",n_tot,"=",round((n_tot-n_pb-n_na)*100/n_tot,2),"%.") verif$out3=paste0("The assignment was done using ",length(snp_kept)," markers.") if (!mismatch_error){ verif$out4=paste0("The maximum theoretical error rate for this assignment is ",round(THRESHOLD,2)*100,"%.") } verif$tab=tab_assign3[tab_assign3$offspring %in% indiv_pb,] } else { to_plot$ggheat=ggplot(data = heatmap,aes(x = .data$Sire,y = .data$Dam,fill = .data$n))+ geom_tile(linewidth=0.05,width=0.8,height=0.8)+ scale_fill_gradient2(low = "#66CCFF", high = "#D55E00", mid="#F0E442", na.value="white",name="Nombre de \ndescendants",midpoint =5,space = "Lab")+ theme_bw()+ theme(panel.grid = element_blank())+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, size = 5, hjust = 1,face = "bold"))+ theme(axis.text.y = element_text(angle = 0, vjust = 0.5, size = 5, hjust = 1,face="bold")) n_na=length(which(is.na(verif$ped$sire))) n_tot=length(verif$ped$sire) verif$out1=paste0("There is/are ",n_na," no assigned offspring(s) (",round((n_na/n_tot)*100,2),"%).") verif$out2=paste0("Real assignment rate : ",n_tot-n_na,"/",n_tot,"=",round((n_tot-n_na)*100/n_tot,2),"%.") verif$out3=paste0("The assignment was done using ",length(snp_kept)," markers.") if (!mismatch_error){ verif$out4=paste0("The maximum theoretical error rate for this assignment is ",round(THRESHOLD,2)*100,"%.") } verif$tab=verif$ped[which(is.na(verif$ped$sire)),] } verif$displayed = data.frame() # reinitialisation so that it does not overcharge the user experience } }) ##### Event from Formating ##### formating=reactiveValues(data=NULL,head=NULL,end=NULL, dataMap=NULL, colFormat=FALSE,colMap=FALSE, Lpar=NULL,import_vcf = FALSE) #---Display launch button when ready output$go_format = renderUI({ if (length(formating$head)!=0 | formating$import_vcf){ actionButton(inputId = "format",label = "Launch formatting") } }) #---Load list with parents observeEvent(c(input$list_par,input$header_list_par),{ if (!is.null(input$list_par$datapath)){ if (input$header_list_par=='Yes'){ formating$Lpar = read.table(file = input$list_par$datapath,header = TRUE) } else { formating$Lpar = read.table(file = input$list_par$datapath,header = FALSE) } } }) #---Check if there is a header and ask for the good column output$col_head0 = renderUI({ if (!is.null(formating$Lpar)){ sliderInput(inputId = "col_head", label = div("Choose the number of the column corresponding to sample names", tipify(el = bsButton(inputId = "5",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Select the number corresponding to the column with the sample names. Look at the table displayed to be sure.")), min = 1,max = min(20,ncol(formating$Lpar)),value = 1,step = 1) } }) #---Display head of the list with parents # output$headLpar = renderTable({ # head(formating$Lpar) # }) output$headLpar = renderDataTable({ datatable(head(formating$Lpar),rownames = FALSE,options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Head list parents')) }) #---Load genotype file to format and display the head of df to help select the columns observeEvent(input$to_format,{ if (grepl(pattern = ".vcf",x = input$to_format$datapath)){ formating$import_vcf = TRUE } else { formating$import_vcf = FALSE dta = read.table(file = input$to_format$datapath,header = FALSE,comment.char = "#") colnames(dta) = 1:ncol(dta) formating$data = dta formating$colFormat=TRUE if (ncol(formating$data)>20){ formating$head = head(formating$data)[,1:20] } else { formating$head = head(formating$data) } } }) # output$head = renderTable({ # formating$head # }) output$head = renderDataTable({ datatable(formating$head,rownames = FALSE,options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Head dataset to format')) }) output$uiNoColSN = renderUI({ if (length(formating$data)>0){ sliderInput(inputId = "col_SN", label = div("Choose the number of the column corresponding to sample names", tipify(el = bsButton(inputId = "10",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Select the number corresponding to the column with the sample names. Look at the table displayed to be sure.")), min = 1,max = min(20,ncol(formating$data)),value = 1,step = 1) } }) output$uiNoColGeno = renderUI({ if (length(formating$data)>0){ sliderInput(inputId = "col_geno", label = div("Choose the number of the first column with genotype", tipify(el = bsButton(inputId = "11",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Select the number corresponding to the first column genotype. Look at the table displayed to be sure.")), min = 1,max = min(20,ncol(formating$data)),value = 2,step = 1) } }) output$colFormat <- reactive({ formating$colFormat }) outputOptions(output, 'colFormat', suspendWhenHidden=FALSE) #---Load map or txt file with marker names and display the head of df to help select the column observeEvent(c(input$snp_map,input$header_snp_map),{ if (!is.null(input$snp_map) & !is.null(input$header_snp_map)){ if (input$header_snp_map=='Yes'){ formating$dataMap = read.table(file = input$snp_map$datapath,header = TRUE,comment.char = "#") } else { formating$dataMap = read.table(file = input$snp_map$datapath,header = FALSE,comment.char = "#") } formating$colMap=TRUE } }) # output$headMap = renderTable({ # head(formating$dataMap) # }) output$headMap = renderDataTable({ datatable(head(formating$dataMap),rownames = FALSE,options = list(dom = 't'), caption = htmltools::tags$caption( style = 'caption-side: top; text-align: center; color:black; font-size:150% ;','Head list markers')) }) output$uiNoColMap = renderUI({ if (length(formating$dataMap)>0){ sliderInput(inputId = "col_marker", label = div("Choose the number of the column correspoonding to marker names", tipify(el = bsButton(inputId = "12",label = "",icon = icon("question"), style = "info", size = "extra-small"), title = "Select the number corresponding to the first column genotype. Look at the table displayed to be sure.")), min = 1,max = min(20,ncol(formating$dataMap)),value = 1,step = 1) } }) output$colMap <- reactive({ formating$colMap }) outputOptions(output, 'colMap', suspendWhenHidden=FALSE) ###### Launch Formating ##### observeEvent(input$format,{ if (!is.null(input$to_format$name)){ if (!dir.exists("./log")){ dir.create("./log") } # Set the saving name indice1 = regexpr(pattern = ".ped",text = input$to_format$name,fixed = TRUE) indice2 = regexpr(pattern = ".txt",text = input$to_format$name,fixed = TRUE) indice_vcf = regexpr(pattern = ".vcf",text = input$to_format$name,fixed = TRUE) if (indice1!=-1 | indice2!=-1 | indice_vcf!=-1){ saving_name = substr(x = input$to_format$name,start = 1,stop = nchar(input$to_format$name)-4) } else { indice=gregexpr(pattern = ".",text = input$to_format$name,fixed = TRUE)[[1]] if (indice[1] !=-1){ saving_name = substr(x = input$to_format$name,start = 1,stop = indice[length(indice)]-1) } else { saving_name = input$to_format$name } } print("-----Formating dataset-----") date_time = Sys.time() date_time=gsub(pattern = "-",replacement = "",x = date_time) date_time=gsub(pattern = ":",replacement = "",x = date_time) # date_time=gsub(pattern = " CEST",replacement = "_",x = date_time) date_time=gsub(pattern = " ",replacement = "_",x = date_time) date_time=substr(x = date_time,start = 3,stop = 16) path_log = paste0("./log/",date_time,"_log_Formating.txt") write(x = "-----Formating dataset-----",file = path_log) if (!dir.exists("./data")){ # result folder dir.create("./data") } write(x = paste0("Dataset : ",input$to_format$name),file = path_log,append = TRUE) if (! formating$import_vcf){ dta = read.table(file = input$to_format$datapath,header = FALSE,comment.char = "#") if(!is.null(input$snp_map$name)){ write(x = paste0("Marker file : ",input$snp_map$name),file = path_log,append = TRUE) marker_name=formating$dataMap[,as.numeric(input$col_marker)] } else { marker_name=paste0("Marker",seq(1,(ncol(dta)-1)/as.numeric(input$ploidy_format),1)) write(x = "No marker file provided : markers names are Marker1, Marker2, ...",file = path_log,append = TRUE) } } if (indice_vcf!=-1){ dta = import_from_vcf(input$to_format$datapath) rownames(dta) = toupper(rownames(dta)) SampleName = rownames(dta) } else { SampleName = dta[,as.numeric(input$col_SN)] if (indice1 != -1){ indice1=regexpr(pattern="_[A-Z][0-9][0-9].CEL$",SampleName) indice2=regexpr(pattern="_[A-Z][0-9].CEL$",SampleName) SampleName[indice1!=-1]=substr(x = SampleName[indice1!=-1],start = 1,stop = (indice1[indice1!=-1]-1)) SampleName[indice2!=-1]=substr(x = SampleName[indice2!=-1],start = 1,stop = (indice2[indice2!=-1]-1)) } SampleName=toupper(SampleName) # au cas ou 'en' en minuscule dta=dta[,-c(1:(as.numeric(input$col_geno)-1))] dta[dta==0]=NA if (ncol(dta)%%as.numeric(input$ploidy_format)!=0){ stop(paste0("Invalide number of columns ! There should be ",as.numeric(input$ploidy_format)," columns by markers")) } } if (input$what=='Both'){ write(x = "Parents and offspring are in the same dataset",file = path_log,append = TRUE) if (!is.null(input$list_par)){ write(x = paste0("File with parents names : ",input$list_par$name),file = path_log,append = TRUE) par_nam = formating$Lpar[,as.numeric(input$col_head)] indice1=regexpr(pattern="_[A-Z][0-9][0-9].CEL$",par_nam) indice2=regexpr(pattern="_[A-Z][0-9].CEL$",par_nam) par_nam[indice1!=-1]=substr(x = par_nam[indice1!=-1],start = 1,stop = (indice1[indice1!=-1]-1)) par_nam[indice2!=-1]=substr(x = par_nam[indice2!=-1],start = 1,stop = (indice2[indice2!=-1]-1)) par_nam=toupper(par_nam) # au cas ou 'en' en minuscule data_par = dta[SampleName %in% par_nam,] data_off = dta[! SampleName %in% par_nam,] # For parents if (indice_vcf==-1){ res=Run_formating(data = data_par, SampleName = SampleName[SampleName %in% par_nam], marker_name = marker_name, ploidy = as.numeric(input$ploidy_format), marker_type = input$markerType) data_par=res[[1]] df_SNP=res[[2]] } else { allele_freq = as.data.frame(get_allele_frequencies(data_par,ploidy_level = 2)) if (!is.null(allele_freq$Freq_NA)){ df_SNP = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,CR=1-allele_freq$Freq_NA) } else { df_SNP = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,CR=1) } } save(data_par,df_SNP,file=paste0("./data/",saving_name,"_Parents_genoAPIS.Rdata")) # For offspring if (indice_vcf==-1){ res=Run_formating(data = data_off, SampleName = SampleName[! SampleName %in% par_nam], marker_name = marker_name, ploidy = as.numeric(input$ploidy_format), marker_type = input$markerType) data_off=res[[1]] df_SNP=res[[2]] } else { allele_freq = as.data.frame(get_allele_frequencies(data_off,ploidy_level = as.numeric(input$ploidy))) min_non_0 = function(x){ x[x==min(x[x!=0])][1] } df_SNP = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE,MAF=apply(X = allele_freq %>% select(.data$Freq_0,.data$Freq_1),MARGIN = 1,FUN = min_non_0,simplify = TRUE)) } save(data_off,df_SNP,file=paste0("./data/",saving_name,"_Offspring_genoAPIS.Rdata")) } else { stop("There must be a list with parents names !") } } else { # If only parents or offspring if (indice_vcf==-1){ res=Run_formating(data = dta, SampleName = SampleName, marker_name = marker_name, ploidy = as.numeric(input$ploidy_format), marker_type = input$markerType) new_data=res[[1]] df_SNP=res[[2]] } else { allele_freq = as.data.frame(get_allele_frequencies(dta,ploidy_level = as.numeric(input$ploidy))) min_non_0 = function(x){ x[x==min(x[x!=0])][1] } if (is.null(allele_freq$Freq_NA)){ df_SNP = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE, MAF=apply(X = allele_freq %>% select(.data$Freq_0,.data$Freq_1),MARGIN = 1,FUN = min_non_0,simplify = TRUE), CR=1) } else { df_SNP = data.frame(MarkerName=rownames(allele_freq),toKeep=TRUE, MAF=apply(X = allele_freq %>% select(.data$Freq_0,.data$Freq_1),MARGIN = 1,FUN = min_non_0,simplify = TRUE), CR=1-allele_freq$Freq_NA) } new_data = dta } save(new_data,df_SNP,file=paste0("./data/",saving_name,"_genoAPIS.Rdata")) } write(x = "-----End formating-----",file = path_log,append = TRUE) write(x = paste0("Saving name : ",saving_name, " (same name as input file but with extension _genoAPIS.Rdata => in ./data folder)"),file = path_log,append = TRUE) formating$end="Format OK" print("-----End formating-----") } }) output$end_format=renderText({ formating$end }) } options(shiny.maxRequestSize=10000*1024^2) # 10000 pour 10Go => augmenter si besoin shinyApp(ui = ui, server = server) }
/scratch/gouwar.j/cran-all/cranData/APIS/R/App_APIS_Shiny.R
#' Format data from genotype to recommanded format for APIS #' #' @param data genotype data #' @param SampleName names of each individuals in the data (ordered) #' @param marker_name name of each snp (ordered) #' @param ploidy ploidy level of the genotype data #' @param marker_type marker type (snp or microsatellite) #' #' @return a list of 2 : a dataframe of the data formatted and a dataframe with some metrics about each markers #' #' @keywords internal #' @noRd #' Run_formating=function(data,SampleName,marker_name,ploidy,marker_type){ new_data = as.data.frame(matrix(nrow = length(SampleName),ncol = length(marker_name))) rownames(new_data)=SampleName colnames(new_data)=marker_name MAF=c() CR=c() for (k in 1:(ncol(data)/ploidy)){ new_data[,k]=apply(X = data[,(ploidy*(k-1)+1):(ploidy*k)],MARGIN = 1,FUN = paste,collapse = "/") if (marker_type=="SNP"){ x=as.vector(as.matrix(data[,(ploidy*(k-1)+1):(ploidy*k)])) g=unique(x)[!is.na(unique(x))] if (length(g)==2){ # nombre dallele == 2 f1=length(x[x==g[1] & !is.na(x)])/length(x[!is.na(x)]) f2=length(x[x==g[2] & !is.na(x)])/length(x[!is.na(x)]) MAF=c(MAF,min(f1,f2)) } else if (length(g)>2){ warning("Max allele is 2 and you have more for your marker : MAF will not be calculated !") MAF=c(MAF,NA) } else { MAF=c(MAF,0) } } else { # marker_type == 'microsat' # Nothing equivalent except heterozygous rate ? } CR=c(CR,length(which(!is.na(x)))/length(x)) } # Remove duplicate individual by selecting the best CR count_na = function(x){ round(1-(length(which(x==paste0(rep("NA",ploidy),collapse = "/")))/length(x)),3) } CR_ind=data.frame(SampleName=SampleName,CR=apply(X = new_data,MARGIN = 1,FUN = count_na)) db_ind = c() db_nam = c() for (pat in c("_2",".2","_BIS",".BIS")){ # add more if necessary indice = regexpr(pattern = pat,text = CR_ind$SampleName,fixed = TRUE) # fixed : so that . is not a regular expression replacing all character db_ind = c(db_ind,CR_ind$SampleName[which(indice!=-1)]) db_nam = c(db_nam,CR_ind$SampleName[which(CR_ind$SampleName %in% substr(x = CR_ind$SampleName[which(indice!=-1)], start = 1, stop = nchar(CR_ind$SampleName[which(indice!=-1)])-nchar(pat,)))]) } if (length(db_nam)>1){ for (k in 1:length(db_ind)){ cr1=CR_ind$CR[CR_ind$SampleName==db_ind[k]] cr2=CR_ind$CR[CR_ind$SampleName==db_nam[k]] if (cr1>cr2){ new_data = new_data[-which(rownames(new_data)==db_nam[k]),] # suppress rownames(new_data)[which(rownames(new_data)==db_ind[k])] = db_nam[k] # rename because there is a _2 or else } else { new_data = new_data[-which(rownames(new_data)==db_ind[k]),] # just suppress, the good name is still here } } } if (marker_type=="SNP"){ df_SNP = data.frame(MarkerName=marker_name,MAF=MAF,CR=CR,toKeep=TRUE) # toKeep TRUE -> can't know what filter user wants } else { # marker_type == 'microsat' df_SNP = data.frame(MarkerName=marker_name,CR=CR,toKeep=TRUE) } return(list(new_data,df_SNP)) }
/scratch/gouwar.j/cran-all/cranData/APIS/R/function_formating.R
#' Format log_file #' #' Gives names to columns and formats variables as numeric #' #' @param log_file the log_file #' #' @return log_file updated #' #' @keywords internal #' @noRd format_logfile = function(log_file){ colnames(log_file) = c('offspring', 'sire_1', 'dam_1', 'probability_1', 'mismatch_1', 'sire_2', 'dam_2', 'probability_2', 'mismatch_2', 'sire_3', 'dam_3', 'probability_3', 'mismatch_3', 'delta_1_2', 'delta_2_3') log_file$probability_1 = as.numeric(log_file$probability_1) log_file$probability_2 = as.numeric(log_file$probability_2) log_file$probability_3 = as.numeric(log_file$probability_3) log_file$delta_1_2 = as.numeric(log_file$delta_1_2) log_file$delta_2_3 = as.numeric(log_file$delta_2_3) log_file$mismatch_1 = as.integer(log_file$mismatch_1) log_file$mismatch_2 = as.integer(log_file$mismatch_2) log_file$mismatch_3 = as.integer(log_file$mismatch_3) return(log_file) }
/scratch/gouwar.j/cran-all/cranData/APIS/R/function_logfile.R
#' Simulate offspring #' #' @param sire_genotype sire genotype #' @param dam_genotype dam genotype #' @param number_offspring number of offspring to simulate #' @param ploidy_level ploidy level of offspring #' @param sire_contribution sire contribution #' @param dam_contribution dam contribution #' @param recombination_rate recombination rate (only important for tri/tetra ploids offspring) #' @param genotyping_error genotyping error #' #' @return list with matrix with simulated offspring and pedigree #' #' @examples #' data("APIS_sire") #' data("APIS_dam") #' #' # For diploide offspring #' simulate_offspring(sire_genotype=APIS_sire, dam_genotype=APIS_dam, #' number_offspring=10, #' ploidy_level = 2, #' sire_contribution = 1, dam_contribution = 1, #' recombination_rate = 0.5, #' genotyping_error = 0.01) #' #' # For triploide offspring #' simulate_offspring(sire_genotype=APIS_sire, dam_genotype=APIS_dam, #' number_offspring=10, #' ploidy_level = 3, #' sire_contribution = 1, dam_contribution = 2, #' recombination_rate = 0.5, #' genotyping_error = 0.01) #' #' @export simulate_offspring = function(sire_genotype, dam_genotype, number_offspring, ploidy_level = 2, sire_contribution = 1, dam_contribution = 1, recombination_rate = 0.5, genotyping_error = 0.01) { if (sire_contribution + dam_contribution != ploidy_level) { stop('sire_contribution + dam_contribution must be equal to the ploidy level') } if (sire_contribution>2 | dam_contribution>2){ stop('sire_contribution and dam_contribution must be equal inferior to 2. Method not implemented above.') } n_sire = nrow(sire_genotype) n_dam = nrow(dam_genotype) n_marker = ncol(sire_genotype) mat_off = matrix(nrow=number_offspring,ncol = n_marker) colnames(mat_off) = colnames(sire_genotype) rownames(mat_off) = paste0("Simulated_off_",1:number_offspring) simulated_pedigree = matrix(NA, nrow = number_offspring, ncol = 3) colnames(simulated_pedigree) = c('individual', 'sire', 'dam') simulated_pedigree[,1] = rownames(mat_off) all_allele = list() for (a in 1:n_marker){ all_allele[[a]] = unique(c(sire_genotype[,a],dam_genotype[,a])) all_allele[[a]] = unique(unlist(strsplit(x = c(sire_genotype[,a],dam_genotype[,a]),split = "/",fixed = TRUE))) test_na = which(all_allele[[a]]=='NA') if (length(test_na)>0){ all_allele[[a]] = all_allele[[a]][-test_na] } } id_selected_sire = sample(x = 1:n_sire,size = number_offspring,replace = T) id_selected_dam = sample(x = 1:n_dam,size = number_offspring,replace = T) simulated_pedigree[,2] = rownames(sire_genotype)[id_selected_sire] simulated_pedigree[,3] = rownames(dam_genotype)[id_selected_dam] for (k in 1:number_offspring){ print(k) id_sire = id_selected_sire[k] id_dam = id_selected_dam[k] sire_selected = strsplit(x = sire_genotype[id_sire,],split = "/",fixed = TRUE) dam_selected = strsplit(x = dam_genotype[id_dam,],split = "/",fixed = TRUE) which_allele_sire = sample(x = 1:2,size = n_marker,replace = TRUE) which_allele_dam = sample(x = 1:2,size = n_marker,replace = TRUE) if (sire_contribution==2){ which_allele_sire2 = c() for (l in 1:n_marker){ if (which_allele_sire[l]==1){ id2=sample(1:2,size=1,replace=F,prob=c(1-recombination_rate,recombination_rate)) } else { id2=sample(1:2,size=1,replace=F,prob=c(recombination_rate,1-recombination_rate)) } which_allele_sire2 = c(which_allele_sire2,id2) } } if (dam_contribution==2){ which_allele_dam2 = c() for (l in 1:n_marker){ if (which_allele_dam[l]==1){ id2=sample(1:2,size=1,replace=F,prob=c(1-recombination_rate,recombination_rate)) } else { id2=sample(1:2,size=1,replace=F,prob=c(recombination_rate,1-recombination_rate)) } which_allele_dam2 = c(which_allele_dam2,id2) } } offspring_genotype = c() if (dam_contribution==1 & sire_contribution==1){ for (n in 1:n_marker){ sire_allele = mutate_allele(allele = sire_selected[[n]][which_allele_sire[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele = mutate_allele(allele = dam_selected[[n]][which_allele_dam[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) off_allele = paste0(sort(c(sire_allele, dam_allele)), collapse = "/") if (length(grep(pattern = "NA",x = off_allele))>0){ off_allele="NA/NA" } offspring_genotype[n] = off_allele } } else if (dam_contribution==2 & sire_contribution==1){ for (n in 1:n_marker){ sire_allele = mutate_allele(allele = sire_selected[[n]][which_allele_sire[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele = mutate_allele(allele = dam_selected[[n]][which_allele_dam[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele2 = mutate_allele(allele = dam_selected[[n]][which_allele_dam2[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) off_allele = paste0(sort(c(sire_allele, dam_allele,dam_allele2)), collapse = "/") if (length(grep(pattern = "NA",x = off_allele))>0){ off_allele="NA/NA/NA" } offspring_genotype[n] = off_allele } } else if(dam_contribution==1 & sire_contribution==2){ for (n in 1:n_marker){ sire_allele = mutate_allele(allele = sire_selected[[n]][which_allele_sire[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) sire_allele2 = mutate_allele(allele = sire_selected[[n]][which_allele_sire2[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele = mutate_allele(allele = dam_selected[[n]][which_allele_dam[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) off_allele = paste0(sort(c(sire_allele,sire_allele2, dam_allele)), collapse = "/") if (length(grep(pattern = "NA",x = off_allele))>0){ off_allele="NA/NA/NA" } offspring_genotype[n] = off_allele } } else if(dam_contribution==2 & sire_contribution==2){ for (n in 1:n_marker){ sire_allele = mutate_allele(allele = sire_selected[[n]][which_allele_sire[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) sire_allele2 = mutate_allele(allele = sire_selected[[n]][which_allele_sire2[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele = mutate_allele(allele = dam_selected[[n]][which_allele_dam[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) dam_allele2 = mutate_allele(allele = dam_selected[[n]][which_allele_dam2[n]],possib_allele = all_allele[[n]],error_rate = genotyping_error) off_allele = paste0(sort(c(sire_allele,sire_allele2, dam_allele,dam_allele2)), collapse = "/") if (length(grep(pattern = "NA",x = off_allele))>0){ off_allele="NA/NA/NA/NA" } offspring_genotype[n] = off_allele } } mat_off[k,]=offspring_genotype } return(list(genotypes = mat_off, pedigree = simulated_pedigree)) } #' Simulate mutation #' #' @param allele parental allele #' @param possib_allele possible allele for this marker #' @param error_rate error rate #' #' @return mutated allele or not mutated allele #' #' @importFrom stats runif #' #' @keywords internal #' @noRd mutate_allele=function(allele,possib_allele,error_rate=0.1){ if (length(possib_allele)>0){ if (runif(1,0,1)<error_rate){ new_allele = sample(x = possib_allele,size = 1) return(new_allele) } else { return(allele) } } else { return(allele) } } #' Remove simulated offspring from results #' #' @param log_file log_file #' @param pedigree pedigree #' @param simulated_individuals simulated offspring #' #' @return void #' #' @keywords internal #' @noRd remove_simulation <- function(log_file, pedigree, simulated_individuals) { # DESCRIPTION # remove simulated individuals from the APIS outputs # # INPUTS # log_file : the log file # pedigree : the pedigree # simulated_individuals : names of the simulated individuals # # OUTPUTS # list of 2 elements : the new log file and pedigree without the simulated individuals new_log = subset(log_file, !(log_file$offspring %in% simulated_individuals)) new_pedigree = subset(pedigree, !(pedigree$offspring %in% simulated_individuals)) return(list(log_file = new_log, pedigree = new_pedigree)) }
/scratch/gouwar.j/cran-all/cranData/APIS/R/function_simulate_offspring.R
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR ASSIGNMENT # # ===================================================================================== #' Estimate threshold #' #' @param log_file log file #' @param error error #' #' @return threshold for delta #' #' @importFrom stats quantile #' #' @keywords internal #' @noRd estimate_mendel_threshold <- function(log_file, error) { # DESCRIPTION # estimate the delta mendel threshold for assignment # # INPUTS # log_file : the log file # error : percent of error allowed in "mendel" method # # OUTPUTS # threshold_delta : value of the delta threshold missing_range <- estimate_missing_parents(log_file = log_file) # If the number of individuals with missing parents is lower than the error if ((missing_range[1]) <= round(error * nrow(log_file))) { # All the individuals are assigned to their most likely parent pair threshold_delta = min(log_file$delta_1_2) } else { sorted_delta_2_3 <- sort(log_file$delta_2_3, decreasing = TRUE) threshold_delta <- quantile(sorted_delta_2_3[1:(nrow(log_file) - (missing_range[2]))], probs = (1 - error), type = 5, na.rm = TRUE) } return(threshold_delta) } #' Estimate missing parents #' #' @param log_file log file #' #' @return range of individuals with missing parents #' #' @importFrom stats median #' #' @keywords internal #' @noRd estimate_missing_parents <- function(log_file) { # DESCRIPTION # estimate the number of individuals with missing parents # # INPUTS # log_file : the log file # # OUTPUTS # vector of 2 elements : estimated number of offspring with missing parents with strict and relaxed method # median_p2 <- median(log_file$probability_2) # # probability_2_sort <- sort(log_file$probability_2) # # missing_0 <- 2 * length(log_file$probability_1[which(log_file$probability_1 <= median_p2)]) # number_individual <- length(log_file$probability_1) # # threshold <- min(log_file$probability_2) # # N2_l <- round(length(log_file$probability_2[which(log_file$probability_2 <= threshold)]) - # (length(log_file$probability_3[which(log_file$probability_3 <= threshold)]) * ((missing_0) / number_individual))) # # cpt <- 1 # while(N2_l < ((number_individual - (missing_0)) / 2) & cpt <= nrow(log_file)) { # threshold <- probability_2_sort[cpt] # # N2_l <- round(length(log_file$probability_2[which(log_file$probability_2 <= threshold)]) - # (length(log_file$probability_3[which(log_file$probability_3 <= threshold)]) * ((missing_0) / number_individual))) # # cpt <- cpt+1 # } # # median_1 <- threshold # missing_1 <- 2 * length(log_file$probability_1[which(log_file$probability_1 <= median_1)]) N1_1min <- 2 * length(which(log_file$probability_1 <= median(log_file$probability_2))) # N1_1 <- ifelse(test = missing_1 > round(nrow(log_file)/2), yes = round(nrow(log_file)/2), no = missing_1) N1_1 <- ifelse(test = N1_1min > nrow(log_file), yes = nrow(log_file), no = N1_1min) return(c(N1_1min, N1_1)) } # ggplot(data=log_file)+geom_histogram(aes(x=probability_2),fill='blue',alpha=0.5)+geom_histogram(aes(x=probability_1),fill='pink',alpha=0.5)+geom_histogram(aes(x=probability_3),fill='green',alpha=0.5)+ # geom_vline(xintercept=threshold)+geom_vline(xintercept = median_p2,col='red') #' Estimate threshold #' #' @param log_file log file #' @param error error #' #' @return threshold for exclusion #' #' @importFrom stats quantile median #' #' @keywords internal #' @noRd estimate_exclusion_threshold <- function(log_file, error) { # DESCRIPTION # estimate the delta mendel threshold for assignment # # INPUTS # log_file : the log file # error : percent of error allowed in "mendel" method # # OUTPUTS # threshold_exclu : value of the delta threshold missing_0 = 2 * length(which(log_file$mismatch_1 >= median(log_file$mismatch_2))) missing_range = ifelse(test = missing_0 > nrow(log_file),yes = nrow(log_file),no = missing_0) # If the number of individuals with missing parents is lower than the error if (missing_range <= round(error * nrow(log_file))) { # All the individuals are assigned to their most likely parent pair threshold_exclu = max(log_file$mismatch_1) } else { sorted_miss_2 <- sort(log_file$mismatch_2, decreasing = FALSE) # different from proba_mendel because axis is inversed threshold_exclu <- quantile(sorted_miss_2[1:(nrow(log_file) - (missing_range))], probs = error, type = 5, na.rm = TRUE) } return(threshold_exclu) }
/scratch/gouwar.j/cran-all/cranData/APIS/R/function_threshold_error.R
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR DIPLOID ASSIGNMENT USING PARALLEL PROGRAMMING # # ===================================================================================== #' Get probabilities #' #' @param offspring_genotype matrix of the offspring genotypes recoded (after recode_genotypes_for_assignment() function) #' @param sire_genotype matrix of the sire genotypes recoded (after recode_genotypes_for_assignment() function) #' @param dam_genotype matrix of the dam genotypes recoded (after recode_genotypes_for_assignment() function) #' @param allele_frequencies allele frequencies recoded (after recode_allele_frequencies_for_assignment() function) #' @param method method of assignment ("mendel" or "exclusion") #' @param ploidy_level ploidy level #' @param number_cores number of cores #' #' @return list of probabilities and mismatch #' #' @useDynLib APIS, .registration = TRUE #' @importFrom parallel makeCluster stopCluster #' @importFrom doParallel registerDoParallel #' @importFrom foreach %dopar% foreach #' #' @keywords internal #' @noRd get_probabilities_parallel_2n <- function(offspring_genotype, sire_genotype, dam_genotype, allele_frequencies, method = "mendel", ploidy_level, number_cores) { # DESCRIPTION # Get the mendelian transmission probabilities and mismatches for all the parent pairs # for all the diploid offspring individuals using parallel programming # # INPUTS # offspring_genotype : genotype matrix of the offspring after recoding # sire_genotype : genotype matrix of the sires after recoding # dam_genotype : genotype matrix of the dams after recoding # allele_frequencies : allele frequency matrix after recoding # method : method of assignment # ploidy_level : ploidy level of the offspring # number_cores : number of cores for parallel processing # # OUTPUTS # output : the log file i = NULL iterations = nrow(offspring_genotype) new_genotype = NULL cl = parallel::makeCluster(number_cores) doParallel::registerDoParallel(cl) output <- foreach(i=1:iterations, .combine=rbind, .export="get_individual_probability_2n") %dopar% { tmp <- get_individual_probability_2n(offspring_genotype = offspring_genotype[i, , drop = FALSE], sire_genotype = sire_genotype, dam_genotype = dam_genotype, allele_frequencies = allele_frequencies, ploidy_level = ploidy_level) all_values = data.frame(sire = rep(rownames(sire_genotype), each = nrow(dam_genotype)), dam = rep(rownames(dam_genotype), times = nrow(sire_genotype)), probability = tmp[[1]], mismatch = tmp[[2]]) if (method == "mendel" | method == "exclusion") { if (method == "mendel"){ all_values = all_values[order(all_values$probability, decreasing = TRUE), ] } else { all_values = all_values[order(all_values$mismatch, decreasing = FALSE), ] } log_file = c(rownames(offspring_genotype)[i], all_values$sire[1], all_values$dam[1], all_values$probability[1], all_values$mismatch[1], all_values$sire[2], all_values$dam[2], all_values$probability[2], all_values$mismatch[2], all_values$sire[3], all_values$dam[3], all_values$probability[3], all_values$mismatch[3], all_values$probability[1] - all_values$probability[2], all_values$probability[2] - all_values$probability[3]) } else { all_values1 = all_values[order(all_values$probability, decreasing = TRUE), ] all_values2 = all_values[order(all_values$mismatch, decreasing = FALSE), ] log_file = c(rownames(offspring_genotype)[i], all_values1$sire[1], all_values1$dam[1], all_values1$probability[1], all_values1$mismatch[1], all_values1$sire[2], all_values1$dam[2], all_values1$probability[2], all_values1$mismatch[2], all_values1$sire[3], all_values1$dam[3], all_values1$probability[3], all_values1$mismatch[3], all_values1$probability[1] - all_values1$probability[2], all_values1$probability[2] - all_values1$probability[3], all_values2$sire[1], all_values2$dam[1], all_values2$probability[1], all_values2$mismatch[1], all_values2$sire[2], all_values2$dam[2], all_values2$probability[2], all_values2$mismatch[2], all_values2$sire[3], all_values2$dam[3], all_values2$probability[3], all_values2$mismatch[3], all_values2$probability[1] - all_values2$probability[2], all_values2$probability[2] - all_values2$probability[3]) } return(log_file) } parallel::stopCluster(cl) return(output) } # ===================================================================================== #' APIS for diploids #' #' @param offspring_genotype matrix of the offspring genotypes #' @param sire_genotype matrix of the sire genotypes #' @param dam_genotype matrix of the offspring genotypes #' @param method method : "mendel" i.e. likelihood or "exclusion" (default : "mendel"). Can also be "" to select the method a posteriori. #' @param exclusion_threshold threshold for "exclusion" method (default : NULL). Override the error parameter if not NULL #' @param error error accepted (default : 0.05) #' @param simulation_if_small simulate individuals (TRUE or FALSE) #' @param number_offspring_simulated number of offspring simulated (default : 500) #' @param number_cores number of cores #' @param verbose verbose #' #' @return list of 2 elements : a pedigree file and the log file #' #' @importFrom cowplot plot_grid #' @useDynLib APIS, .registration = TRUE #' #' @examples #' data("APIS_offspring") #' data("APIS_sire") #' data("APIS_dam") #' #' assignment <- APIS_2n(offspring_genotype = APIS_offspring[1:35,1:50], #' sire_genotype = APIS_sire[ ,1:50], #' dam_genotype = APIS_dam[ ,1:50], #' simulation_if_small = FALSE) #' #' @export APIS_2n <- function(offspring_genotype, sire_genotype, dam_genotype, method = "mendel", exclusion_threshold = NULL, error = 0.05, simulation_if_small = FALSE, number_offspring_simulated = max(0, 500 - nrow(offspring_genotype)), number_cores = 2, verbose = FALSE) { # DESCRIPTION # APIS process for diploid offspring # # INPUTS # offspring_genotype : genotype matrix of one offspring in APIS format # sire_genotype : genotype matrix of the sires in APIS format # dam_genotype : genotype matrix of the dams in APIS format # method : method of assignment # exclusion_threshold : number of mismatches allowed in "exclusion" method # error : percent of error allowed in "mendel" method # simulation_if_small : simulate individuals # number_offspring_simulated : number of offspring to simulate # number_cores : number of cores for parallel programming # verbose : display information on the screen # # OUTPUTS # list of 2 elements : the pedigree and the log file if (verbose) { print_title() } # Parental ploidy level parental_ploidy_level = length(strsplit(sire_genotype[1, 1], split = "/")[[1]]) # Check inputs check_matrices_format(offspring_genotype, verbose = verbose) check_matrices_format(sire_genotype, verbose = verbose) check_matrices_format(dam_genotype, verbose = verbose) genotypes_checked = check_genotypes(offspring_genotype, sire_genotype, dam_genotype, offspring_ploidy_level = 2, parental_ploidy_level = parental_ploidy_level, verbose = verbose) offspring_genotype = genotypes_checked[[1]] sire_genotype = genotypes_checked[[2]] dam_genotype = genotypes_checked[[3]] if (!is(offspring_genotype, "matrix")) { offspring_genotype = t(as.matrix(offspring_genotype)) } check_input_parameters(exclusion_threshold = exclusion_threshold, number_marker = ncol(offspring_genotype), error = error, verbose = verbose) # Theoretical assignment power P = assignment_power(sire_genotype = sire_genotype, dam_genotype = dam_genotype, ploidy_level = parental_ploidy_level, verbose = verbose) # Add simulation if (simulation_if_small & number_offspring_simulated > 0) { simulation = simulate_offspring(sire_genotype = sire_genotype, dam_genotype = dam_genotype, number_offspring = number_offspring_simulated) offspring_genotype = rbind(offspring_genotype, simulation$genotypes) } # Recode genotype matrices prepared_data = prepare_for_assignment(offspring_genotype = offspring_genotype, sire_genotype = sire_genotype, dam_genotype = dam_genotype, ploidy_level = 2, number_cores = number_cores, verbose = verbose) offspring_genotype_recoded = prepared_data[[1]] sire_genotype_recoded = prepared_data[[2]] dam_genotype_recoded = prepared_data[[3]] allele_frequency_recoded = prepared_data[[4]] # Calculate probabilities and mismatches if (verbose) { cat("estimation of mendelian transmission probabilities and mismatches", sep = "\n") } log_file = get_probabilities_parallel_2n(offspring_genotype = offspring_genotype_recoded, sire_genotype = sire_genotype_recoded, dam_genotype = dam_genotype_recoded, allele_frequencies = allele_frequency_recoded, method = method, ploidy_level = 2, number_cores = number_cores) if (is(log_file, "vector")) { log_file = as.data.frame(t(log_file)) } else { log_file = as.data.frame(log_file) } if (method %in% c("mendel","exclusion")){ log_file = format_logfile(log_file) } else { log_file_likelihood = log_file[,1:15] log_file_likelihood = format_logfile(log_file_likelihood) log_file_exclusion = log_file[,c(1,16:29)] log_file_exclusion = format_logfile(log_file_exclusion) } if (verbose) { cat("assignment ...", sep = "\n") } # if method = mendel if(method == "mendel") { # Create pedigree threshold_mendel = estimate_mendel_threshold(log_file = log_file, error = error) pedigree = create_pedigree_delta(log_file = log_file, threshold_delta = threshold_mendel) # Plot graphs if (simulation_if_small & number_offspring_simulated > 0) { simulated_individuals = simulation$pedigree[, 1] } else { simulated_individuals = NULL } p1 = plot_delta(log_file = log_file, threshold = threshold_mendel, simulated_individuals = simulated_individuals) p2 = plot_probabilities(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p3 = plot_mismatches(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) # if method = exclusion } else if (method == "exclusion") { # Create pedigree if (is.null(exclusion_threshold)){ exclusion_threshold = estimate_exclusion_threshold(log_file = log_file, error = error) } else if (verbose){ cat("error override as exclusion_threshold provided", sep = "\n") } pedigree = create_pedigree_exclusion(log_file = log_file, threshold_exclusion = exclusion_threshold) # Plot graphs if (simulation_if_small & number_offspring_simulated > 0) { simulated_individuals = simulation$pedigree[, 1] } else { simulated_individuals = NULL } p1 = plot_delta(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p2 = plot_probabilities(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p3 = plot_mismatches(log_file = log_file, threshold = exclusion_threshold, simulated_individuals = simulated_individuals) } else { # method for selection a posteriori for the shiny app threshold_mendel = estimate_mendel_threshold(log_file = log_file_likelihood, error = error) pedigree_likelihood = create_pedigree_delta(log_file = log_file_likelihood, threshold_delta = threshold_mendel) if (is.null(exclusion_threshold)){ exclusion_threshold = estimate_exclusion_threshold(log_file = log_file_exclusion, error = error) } else if (verbose){ cat("error override as exclusion_threshold provided", sep = "\n") } pedigree_exclusion = create_pedigree_exclusion(log_file = log_file_exclusion, threshold_exclusion = exclusion_threshold) } if (verbose) { cat("assignment completed !", sep = "\n") if (method %in% c("mendel","exclusion")){ print_summary(theoretical_assignment_power = P, assignment_rate = get_assignment_rate(pedigree)) } } if (method %in% c("mendel","exclusion")) { print(cowplot::plot_grid(p1, p2, p3, nrow = 3)) if (simulation_if_small & number_offspring_simulated > 0) { output = remove_simulation(log_file = log_file, pedigree = pedigree, simulated_individuals = simulated_individuals) return(output) } else { return(list(pedigree = pedigree, log_file = log_file)) } } else { if (simulation_if_small & number_offspring_simulated > 0) { output1 = remove_simulation(log_file = log_file_likelihood, pedigree = pedigree_likelihood, simulated_individuals = simulated_individuals) output2 = remove_simulation(log_file = log_file_exclusion, pedigree = pedigree_exclusion, simulated_individuals = simulated_individuals) return(c(output1,output2)) } else { return(list(pedigree_likelihood=pedigree_likelihood,pedigree_exclusion=pedigree_exclusion, log_file_likelihood=log_file_likelihood,log_file_exclusion=log_file_exclusion)) } } }
/scratch/gouwar.j/cran-all/cranData/APIS/R/functions_APIS_2n.R
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR TRIPLOID ASSIGNMENT USING PARALLEL PROGRAMMING # # ===================================================================================== #' Get probabilities #' #' @param offspring_genotype matrix of the offspring genotypes recoded (after recode_genotypes_for_assignment() function) #' @param sire_genotype matrix of the sire genotypes recoded (after recode_genotypes_for_assignment() function) #' @param dam_genotype matrix of the dam genotypes recoded (after recode_genotypes_for_assignment() function) #' @param allele_frequencies allele frequencies recoded (after recode_allele_frequencies_for_assignment() function) #' @param method method of assignment ("mendel" or "exclusion") #' @param ploidy_level ploidy level of the offspring #' @param number_cores number of cores #' @param t_recom recombination rate #' #' @return list of probabilities and mismatch #' #' @useDynLib APIS, .registration = TRUE #' @importFrom parallel makeCluster stopCluster #' @importFrom doParallel registerDoParallel #' @importFrom foreach %dopar% foreach #' #' @keywords internal #' @noRd get_probabilities_parallel_3n <- function(offspring_genotype, sire_genotype, dam_genotype, allele_frequencies, method = "mendel", ploidy_level, number_cores, t_recom) { # DESCRIPTION # Get the mendelian transmission probabilities and mismatches for all the parent pairs # for all the triploid offspring individuals using parallel programming # # INPUTS # offspring_genotype : genotype matrix of the offspring after recoding # sire_genotype : genotype matrix of the sires after recoding # dam_genotype : genotype matrix of the dams after recoding # allele_frequencies : allele frequency matrix after recoding # method : method of assignment # ploidy_level : ploidy level of the offspring # number_cores : number of cores for parallel processing # # OUTPUTS # output : the log file i = NULL iterations = nrow(offspring_genotype) new_genotype = NULL cl = parallel::makeCluster(number_cores) doParallel::registerDoParallel(cl) output <- foreach(i=1:iterations, .combine=rbind, .export="get_individual_probability_3n") %dopar% { # dyn.load(x="./src_test/APIS.dll",local = TRUE,now=TRUE) # Ajout 06/02/2023 -> pour faire marcher sans le package # dyn.load(x="./APIS_input_Recom.dll",local = TRUE,now=TRUE) tmp <- get_individual_probability_3n(offspring_genotype = offspring_genotype[i, , drop = FALSE], sire_genotype = sire_genotype, dam_genotype = dam_genotype, allele_frequencies = allele_frequencies, ploidy_level = ploidy_level, t_recom=t_recom) all_values = data.frame(sire = rep(rownames(sire_genotype), each = nrow(dam_genotype)), dam = rep(rownames(dam_genotype), times = nrow(sire_genotype)), probability = tmp[[1]], mismatch = tmp[[2]]) if (method == "mendel" | method == "exclusion") { if (method == "mendel"){ all_values = all_values[order(all_values$probability, decreasing = TRUE), ] } else { all_values = all_values[order(all_values$mismatch, decreasing = FALSE), ] } log_file = c(rownames(offspring_genotype)[i], all_values$sire[1], all_values$dam[1], all_values$probability[1], all_values$mismatch[1], all_values$sire[2], all_values$dam[2], all_values$probability[2], all_values$mismatch[2], all_values$sire[3], all_values$dam[3], all_values$probability[3], all_values$mismatch[3], all_values$probability[1] - all_values$probability[2], all_values$probability[2] - all_values$probability[3]) } else { all_values1 = all_values[order(all_values$probability, decreasing = TRUE), ] all_values2 = all_values[order(all_values$mismatch, decreasing = FALSE), ] log_file = c(rownames(offspring_genotype)[i], all_values1$sire[1], all_values1$dam[1], all_values1$probability[1], all_values1$mismatch[1], all_values1$sire[2], all_values1$dam[2], all_values1$probability[2], all_values1$mismatch[2], all_values1$sire[3], all_values1$dam[3], all_values1$probability[3], all_values1$mismatch[3], all_values1$probability[1] - all_values1$probability[2], all_values1$probability[2] - all_values1$probability[3], all_values2$sire[1], all_values2$dam[1], all_values2$probability[1], all_values2$mismatch[1], all_values2$sire[2], all_values2$dam[2], all_values2$probability[2], all_values2$mismatch[2], all_values2$sire[3], all_values2$dam[3], all_values2$probability[3], all_values2$mismatch[3], all_values2$probability[1] - all_values2$probability[2], all_values2$probability[2] - all_values2$probability[3]) } return(log_file) } parallel::stopCluster(cl) return(output) } # ===================================================================================== #' APIS for triploids #' #' @param offspring_genotype matrix of the offspring genotypes #' @param sire_genotype matrix of the sire genotypes #' @param dam_genotype matrix of the dam genotypes #' @param method method : "mendel" i.e. likelihood or "exclusion" (default : "mendel"). Can also be "" to select the method a posteriori. #' @param exclusion_threshold threshold for "exclusion" method (default : NULL). Override the error parameter if not NULL #' @param error error accepted (default : 0.05) #' @param simulation_if_small simulate individuals (TRUE or FALSE) (default : TRUE) #' @param number_offspring_simulated number of offspring simulated (default : 500) #' @param number_cores number of cores #' @param verbose verbose #' @param t_recom recombination rate #' #' @return list of 2 elements : a pedigree file and the log file #' #' @importFrom cowplot plot_grid #' @useDynLib APIS, .registration = TRUE #' #' @examples #' data("APIS_offspring3n") #' data("APIS_sire") #' data("APIS_dam") # #' assignment <- APIS_3n(offspring_genotype = APIS_offspring3n[1:35,1:50], #' sire_genotype = APIS_sire[ ,1:50], #' dam_genotype = APIS_dam[ ,1:50], #' simulation_if_small = FALSE) #' #' @export #' APIS_3n <- function(offspring_genotype, sire_genotype, dam_genotype, method = "mendel", exclusion_threshold = NULL, error = 0.05, simulation_if_small = FALSE, number_offspring_simulated = max(0, 500 - nrow(offspring_genotype)), number_cores = 2, verbose = FALSE, t_recom = 0.5) { # DESCRIPTION # APIS process for triploid offspring # # INPUTS # offspring_genotype : genotype matrix of one offspring in APIS format # sire_genotype : genotype matrix of the sires in APIS format # dam_genotype : genotype matrix of the dams in APIS format # method : method of assignment # exclusion_threshold : number of mismatches allowed in "exclusion" method # error : percent of error allowed in "mendel" method # simulation_if_small : simulate individuals # number_offspring_simulated : number of offspring to simulate # number_cores : number of cores for parallel programming # verbose : display information on the screen # # OUTPUTS # list of 2 elements : the pedigree and the log file if (verbose) { print_title() } # Parental ploidy level parental_ploidy_level = length(strsplit(sire_genotype[1, 1], split = "/")[[1]]) # Check inputs offspring_names = rownames(offspring_genotype) sire_names = rownames(sire_genotype) dam_names = rownames(dam_genotype) check_matrices_format(offspring_genotype, verbose = verbose) check_matrices_format(sire_genotype, verbose = verbose) check_matrices_format(dam_genotype, verbose = verbose) genotypes_checked = check_genotypes(offspring_genotype, sire_genotype, dam_genotype, offspring_ploidy_level = 3, parental_ploidy_level = parental_ploidy_level, verbose = verbose) offspring_genotype = genotypes_checked[[1]] sire_genotype = genotypes_checked[[2]] dam_genotype = genotypes_checked[[3]] if (!is(offspring_genotype, "matrix")) { offspring_genotype = t(as.matrix(offspring_genotype)) rownames(offspring_genotype) = offspring_names } if (!is(sire_genotype, "matrix")) { sire_genotype = t(as.matrix(sire_genotype)) rownames(sire_genotype) = sire_names } if (!is(dam_genotype, "matrix")) { dam_genotype = t(as.matrix(dam_genotype)) rownames(dam_genotype) = dam_names } check_input_parameters(exclusion_threshold = exclusion_threshold, number_marker = ncol(offspring_genotype), error = error, verbose = verbose) # Theoretical assignment power P = assignment_power(sire_genotype = sire_genotype, dam_genotype = dam_genotype, ploidy_level = parental_ploidy_level, verbose = verbose) # Add simulation if (simulation_if_small & number_offspring_simulated > 0) { simulation = simulate_offspring(sire_genotype = sire_genotype, dam_genotype = dam_genotype, number_offspring = number_offspring_simulated, ploidy_level = 3, recombination_rate = t_recom, sire_contribution = 1, dam_contribution = 2) offspring_genotype = rbind(offspring_genotype, simulation$genotypes) } # Recode genotype matrices prepared_data = prepare_for_assignment(offspring_genotype = offspring_genotype, sire_genotype = sire_genotype, dam_genotype = dam_genotype, ploidy_level = 3, number_cores = number_cores, verbose = verbose) offspring_genotype_recoded = prepared_data[[1]] sire_genotype_recoded = prepared_data[[2]] dam_genotype_recoded = prepared_data[[3]] allele_frequency_recoded = prepared_data[[4]] # Calculate probabilities and mismatches if (verbose) { cat("estimation of mendelian transmission probabilities and mismatches", sep = "\n") } log_file = get_probabilities_parallel_3n(offspring_genotype = offspring_genotype_recoded, sire_genotype = sire_genotype_recoded, dam_genotype = dam_genotype_recoded, allele_frequencies = allele_frequency_recoded, method = method, ploidy_level = 3, number_cores = number_cores, t_recom=t_recom) if (is(log_file, "vector")) { log_file = as.data.frame(t(log_file)) } else { log_file = as.data.frame(log_file) } if (method %in% c("mendel","exclusion")){ log_file = format_logfile(log_file) } else { log_file_likelihood = log_file[,1:15] log_file_likelihood = format_logfile(log_file_likelihood) log_file_exclusion = log_file[,c(1,16:29)] log_file_exclusion = format_logfile(log_file_exclusion) } if (verbose) { cat("assignment ...", sep = "\n") } # if method = mendel if(method == "mendel") { # Create pedigree threshold_mendel = estimate_mendel_threshold(log_file = log_file, error = error) pedigree = create_pedigree_delta(log_file = log_file, threshold_delta = threshold_mendel) # Plot graphs if (simulation_if_small & number_offspring_simulated > 0) { simulated_individuals = simulation$pedigree[, 1] } else { simulated_individuals = NULL } p1 = plot_delta(log_file = log_file, threshold = threshold_mendel, simulated_individuals = simulated_individuals) p2 = plot_probabilities(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p3 = plot_mismatches(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) # if method = exclusion } else if (method == "exclusion") { # Create pedigree if (is.null(exclusion_threshold)){ exclusion_threshold = estimate_exclusion_threshold(log_file = log_file, error = error) } else if (verbose){ cat("error override as exclusion_threshold provided", sep = "\n") } pedigree = create_pedigree_exclusion(log_file = log_file, threshold_exclusion = exclusion_threshold) # Plot graphs if (simulation_if_small & number_offspring_simulated > 0) { simulated_individuals = simulation$pedigree[, 1] } else { simulated_individuals = NULL } p1 = plot_delta(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p2 = plot_probabilities(log_file = log_file, threshold = NULL, simulated_individuals = simulated_individuals) p3 = plot_mismatches(log_file = log_file, threshold = exclusion_threshold, simulated_individuals = simulated_individuals) } else { # method for selection a posteriori for the shiny app threshold_mendel = estimate_mendel_threshold(log_file = log_file_likelihood, error = error) pedigree_likelihood = create_pedigree_delta(log_file = log_file_likelihood, threshold_delta = threshold_mendel) if (is.null(exclusion_threshold)){ exclusion_threshold = estimate_exclusion_threshold(log_file = log_file_exclusion, error = error) } else if (verbose){ cat("error override as exclusion_threshold provided", sep = "\n") } pedigree_exclusion = create_pedigree_exclusion(log_file = log_file_exclusion, threshold_exclusion = exclusion_threshold) } if (verbose) { cat("assignment completed !", sep = "\n") if (method %in% c("mendel","exclusion")){ print_summary(theoretical_assignment_power = P, assignment_rate = get_assignment_rate(pedigree)) } } if (method %in% c("mendel","exclusion")) { print(cowplot::plot_grid(p1, p2, p3, nrow = 3)) if (simulation_if_small & number_offspring_simulated > 0) { output = remove_simulation(log_file = log_file, pedigree = pedigree, simulated_individuals = simulated_individuals) return(output) } else { return(list(pedigree = pedigree, log_file = log_file)) } } else { if (simulation_if_small & number_offspring_simulated > 0) { output1 = remove_simulation(log_file = log_file_likelihood, pedigree = pedigree_likelihood, simulated_individuals = simulated_individuals) output2 = remove_simulation(log_file = log_file_exclusion, pedigree = pedigree_exclusion, simulated_individuals = simulated_individuals) return(c(output1,output2)) } else { return(list(pedigree_likelihood=pedigree_likelihood,pedigree_exclusion=pedigree_exclusion, log_file_likelihood=log_file_likelihood,log_file_exclusion=log_file_exclusion)) } } }
/scratch/gouwar.j/cran-all/cranData/APIS/R/functions_APIS_3n.R